xref: /openbmc/qemu/linux-user/syscall.c (revision e1723999)
1 /*
2  *  Linux syscalls
3  *
4  *  Copyright (c) 2003 Fabrice Bellard
5  *
6  *  This program is free software; you can redistribute it and/or modify
7  *  it under the terms of the GNU General Public License as published by
8  *  the Free Software Foundation; either version 2 of the License, or
9  *  (at your option) any later version.
10  *
11  *  This program is distributed in the hope that it will be useful,
12  *  but WITHOUT ANY WARRANTY; without even the implied warranty of
13  *  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
14  *  GNU General Public License for more details.
15  *
16  *  You should have received a copy of the GNU General Public License
17  *  along with this program; if not, see <http://www.gnu.org/licenses/>.
18  */
19 #define _ATFILE_SOURCE
20 #include "qemu/osdep.h"
21 #include "qemu/cutils.h"
22 #include "qemu/path.h"
23 #include "qemu/memfd.h"
24 #include "qemu/queue.h"
25 #include <elf.h>
26 #include <endian.h>
27 #include <grp.h>
28 #include <sys/ipc.h>
29 #include <sys/msg.h>
30 #include <sys/wait.h>
31 #include <sys/mount.h>
32 #include <sys/file.h>
33 #include <sys/fsuid.h>
34 #include <sys/personality.h>
35 #include <sys/prctl.h>
36 #include <sys/resource.h>
37 #include <sys/swap.h>
38 #include <linux/capability.h>
39 #include <sched.h>
40 #include <sys/timex.h>
41 #include <sys/socket.h>
42 #include <linux/sockios.h>
43 #include <sys/un.h>
44 #include <sys/uio.h>
45 #include <poll.h>
46 #include <sys/times.h>
47 #include <sys/shm.h>
48 #include <sys/sem.h>
49 #include <sys/statfs.h>
50 #include <utime.h>
51 #include <sys/sysinfo.h>
52 #include <sys/signalfd.h>
53 //#include <sys/user.h>
54 #include <netinet/in.h>
55 #include <netinet/ip.h>
56 #include <netinet/tcp.h>
57 #include <netinet/udp.h>
58 #include <linux/wireless.h>
59 #include <linux/icmp.h>
60 #include <linux/icmpv6.h>
61 #include <linux/if_tun.h>
62 #include <linux/in6.h>
63 #include <linux/errqueue.h>
64 #include <linux/random.h>
65 #ifdef CONFIG_TIMERFD
66 #include <sys/timerfd.h>
67 #endif
68 #ifdef CONFIG_EVENTFD
69 #include <sys/eventfd.h>
70 #endif
71 #ifdef CONFIG_EPOLL
72 #include <sys/epoll.h>
73 #endif
74 #ifdef CONFIG_ATTR
75 #include "qemu/xattr.h"
76 #endif
77 #ifdef CONFIG_SENDFILE
78 #include <sys/sendfile.h>
79 #endif
80 #ifdef HAVE_SYS_KCOV_H
81 #include <sys/kcov.h>
82 #endif
83 
84 #define termios host_termios
85 #define winsize host_winsize
86 #define termio host_termio
87 #define sgttyb host_sgttyb /* same as target */
88 #define tchars host_tchars /* same as target */
89 #define ltchars host_ltchars /* same as target */
90 
91 #include <linux/termios.h>
92 #include <linux/unistd.h>
93 #include <linux/cdrom.h>
94 #include <linux/hdreg.h>
95 #include <linux/soundcard.h>
96 #include <linux/kd.h>
97 #include <linux/mtio.h>
98 #include <linux/fs.h>
99 #include <linux/fd.h>
100 #if defined(CONFIG_FIEMAP)
101 #include <linux/fiemap.h>
102 #endif
103 #include <linux/fb.h>
104 #if defined(CONFIG_USBFS)
105 #include <linux/usbdevice_fs.h>
106 #include <linux/usb/ch9.h>
107 #endif
108 #include <linux/vt.h>
109 #include <linux/dm-ioctl.h>
110 #include <linux/reboot.h>
111 #include <linux/route.h>
112 #include <linux/filter.h>
113 #include <linux/blkpg.h>
114 #include <netpacket/packet.h>
115 #include <linux/netlink.h>
116 #include <linux/if_alg.h>
117 #include <linux/rtc.h>
118 #include <sound/asound.h>
119 #ifdef HAVE_BTRFS_H
120 #include <linux/btrfs.h>
121 #endif
122 #ifdef HAVE_DRM_H
123 #include <libdrm/drm.h>
124 #include <libdrm/i915_drm.h>
125 #endif
126 #include "linux_loop.h"
127 #include "uname.h"
128 
129 #include "qemu.h"
130 #include "user-internals.h"
131 #include "strace.h"
132 #include "signal-common.h"
133 #include "loader.h"
134 #include "user-mmap.h"
135 #include "safe-syscall.h"
136 #include "qemu/guest-random.h"
137 #include "qemu/selfmap.h"
138 #include "user/syscall-trace.h"
139 #include "qapi/error.h"
140 #include "fd-trans.h"
141 #include "tcg/tcg.h"
142 
143 #ifndef CLONE_IO
144 #define CLONE_IO                0x80000000      /* Clone io context */
145 #endif
146 
147 /* We can't directly call the host clone syscall, because this will
148  * badly confuse libc (breaking mutexes, for example). So we must
149  * divide clone flags into:
150  *  * flag combinations that look like pthread_create()
151  *  * flag combinations that look like fork()
152  *  * flags we can implement within QEMU itself
153  *  * flags we can't support and will return an error for
154  */
155 /* For thread creation, all these flags must be present; for
156  * fork, none must be present.
157  */
158 #define CLONE_THREAD_FLAGS                              \
159     (CLONE_VM | CLONE_FS | CLONE_FILES |                \
160      CLONE_SIGHAND | CLONE_THREAD | CLONE_SYSVSEM)
161 
162 /* These flags are ignored:
163  * CLONE_DETACHED is now ignored by the kernel;
164  * CLONE_IO is just an optimisation hint to the I/O scheduler
165  */
166 #define CLONE_IGNORED_FLAGS                     \
167     (CLONE_DETACHED | CLONE_IO)
168 
169 /* Flags for fork which we can implement within QEMU itself */
170 #define CLONE_OPTIONAL_FORK_FLAGS               \
171     (CLONE_SETTLS | CLONE_PARENT_SETTID |       \
172      CLONE_CHILD_CLEARTID | CLONE_CHILD_SETTID)
173 
174 /* Flags for thread creation which we can implement within QEMU itself */
175 #define CLONE_OPTIONAL_THREAD_FLAGS                             \
176     (CLONE_SETTLS | CLONE_PARENT_SETTID |                       \
177      CLONE_CHILD_CLEARTID | CLONE_CHILD_SETTID | CLONE_PARENT)
178 
179 #define CLONE_INVALID_FORK_FLAGS                                        \
180     (~(CSIGNAL | CLONE_OPTIONAL_FORK_FLAGS | CLONE_IGNORED_FLAGS))
181 
182 #define CLONE_INVALID_THREAD_FLAGS                                      \
183     (~(CSIGNAL | CLONE_THREAD_FLAGS | CLONE_OPTIONAL_THREAD_FLAGS |     \
184        CLONE_IGNORED_FLAGS))
185 
186 /* CLONE_VFORK is special cased early in do_fork(). The other flag bits
187  * have almost all been allocated. We cannot support any of
188  * CLONE_NEWNS, CLONE_NEWCGROUP, CLONE_NEWUTS, CLONE_NEWIPC,
189  * CLONE_NEWUSER, CLONE_NEWPID, CLONE_NEWNET, CLONE_PTRACE, CLONE_UNTRACED.
190  * The checks against the invalid thread masks above will catch these.
191  * (The one remaining unallocated bit is 0x1000 which used to be CLONE_PID.)
192  */
193 
194 /* Define DEBUG_ERESTARTSYS to force every syscall to be restarted
195  * once. This exercises the codepaths for restart.
196  */
197 //#define DEBUG_ERESTARTSYS
198 
199 //#include <linux/msdos_fs.h>
200 #define	VFAT_IOCTL_READDIR_BOTH		_IOR('r', 1, struct linux_dirent [2])
201 #define	VFAT_IOCTL_READDIR_SHORT	_IOR('r', 2, struct linux_dirent [2])
202 
203 #undef _syscall0
204 #undef _syscall1
205 #undef _syscall2
206 #undef _syscall3
207 #undef _syscall4
208 #undef _syscall5
209 #undef _syscall6
210 
211 #define _syscall0(type,name)		\
212 static type name (void)			\
213 {					\
214 	return syscall(__NR_##name);	\
215 }
216 
217 #define _syscall1(type,name,type1,arg1)		\
218 static type name (type1 arg1)			\
219 {						\
220 	return syscall(__NR_##name, arg1);	\
221 }
222 
223 #define _syscall2(type,name,type1,arg1,type2,arg2)	\
224 static type name (type1 arg1,type2 arg2)		\
225 {							\
226 	return syscall(__NR_##name, arg1, arg2);	\
227 }
228 
229 #define _syscall3(type,name,type1,arg1,type2,arg2,type3,arg3)	\
230 static type name (type1 arg1,type2 arg2,type3 arg3)		\
231 {								\
232 	return syscall(__NR_##name, arg1, arg2, arg3);		\
233 }
234 
235 #define _syscall4(type,name,type1,arg1,type2,arg2,type3,arg3,type4,arg4)	\
236 static type name (type1 arg1,type2 arg2,type3 arg3,type4 arg4)			\
237 {										\
238 	return syscall(__NR_##name, arg1, arg2, arg3, arg4);			\
239 }
240 
241 #define _syscall5(type,name,type1,arg1,type2,arg2,type3,arg3,type4,arg4,	\
242 		  type5,arg5)							\
243 static type name (type1 arg1,type2 arg2,type3 arg3,type4 arg4,type5 arg5)	\
244 {										\
245 	return syscall(__NR_##name, arg1, arg2, arg3, arg4, arg5);		\
246 }
247 
248 
249 #define _syscall6(type,name,type1,arg1,type2,arg2,type3,arg3,type4,arg4,	\
250 		  type5,arg5,type6,arg6)					\
251 static type name (type1 arg1,type2 arg2,type3 arg3,type4 arg4,type5 arg5,	\
252                   type6 arg6)							\
253 {										\
254 	return syscall(__NR_##name, arg1, arg2, arg3, arg4, arg5, arg6);	\
255 }
256 
257 
258 #define __NR_sys_uname __NR_uname
259 #define __NR_sys_getcwd1 __NR_getcwd
260 #define __NR_sys_getdents __NR_getdents
261 #define __NR_sys_getdents64 __NR_getdents64
262 #define __NR_sys_getpriority __NR_getpriority
263 #define __NR_sys_rt_sigqueueinfo __NR_rt_sigqueueinfo
264 #define __NR_sys_rt_tgsigqueueinfo __NR_rt_tgsigqueueinfo
265 #define __NR_sys_syslog __NR_syslog
266 #if defined(__NR_futex)
267 # define __NR_sys_futex __NR_futex
268 #endif
269 #if defined(__NR_futex_time64)
270 # define __NR_sys_futex_time64 __NR_futex_time64
271 #endif
272 #define __NR_sys_inotify_init __NR_inotify_init
273 #define __NR_sys_inotify_add_watch __NR_inotify_add_watch
274 #define __NR_sys_inotify_rm_watch __NR_inotify_rm_watch
275 #define __NR_sys_statx __NR_statx
276 
277 #if defined(__alpha__) || defined(__x86_64__) || defined(__s390x__)
278 #define __NR__llseek __NR_lseek
279 #endif
280 
281 /* Newer kernel ports have llseek() instead of _llseek() */
282 #if defined(TARGET_NR_llseek) && !defined(TARGET_NR__llseek)
283 #define TARGET_NR__llseek TARGET_NR_llseek
284 #endif
285 
286 /* some platforms need to mask more bits than just TARGET_O_NONBLOCK */
287 #ifndef TARGET_O_NONBLOCK_MASK
288 #define TARGET_O_NONBLOCK_MASK TARGET_O_NONBLOCK
289 #endif
290 
291 #define __NR_sys_gettid __NR_gettid
292 _syscall0(int, sys_gettid)
293 
294 /* For the 64-bit guest on 32-bit host case we must emulate
295  * getdents using getdents64, because otherwise the host
296  * might hand us back more dirent records than we can fit
297  * into the guest buffer after structure format conversion.
298  * Otherwise we emulate getdents with getdents if the host has it.
299  */
300 #if defined(__NR_getdents) && HOST_LONG_BITS >= TARGET_ABI_BITS
301 #define EMULATE_GETDENTS_WITH_GETDENTS
302 #endif
303 
304 #if defined(TARGET_NR_getdents) && defined(EMULATE_GETDENTS_WITH_GETDENTS)
305 _syscall3(int, sys_getdents, uint, fd, struct linux_dirent *, dirp, uint, count);
306 #endif
307 #if (defined(TARGET_NR_getdents) && \
308       !defined(EMULATE_GETDENTS_WITH_GETDENTS)) || \
309     (defined(TARGET_NR_getdents64) && defined(__NR_getdents64))
310 _syscall3(int, sys_getdents64, uint, fd, struct linux_dirent64 *, dirp, uint, count);
311 #endif
312 #if defined(TARGET_NR__llseek) && defined(__NR_llseek)
313 _syscall5(int, _llseek,  uint,  fd, ulong, hi, ulong, lo,
314           loff_t *, res, uint, wh);
315 #endif
316 _syscall3(int, sys_rt_sigqueueinfo, pid_t, pid, int, sig, siginfo_t *, uinfo)
317 _syscall4(int, sys_rt_tgsigqueueinfo, pid_t, pid, pid_t, tid, int, sig,
318           siginfo_t *, uinfo)
319 _syscall3(int,sys_syslog,int,type,char*,bufp,int,len)
320 #ifdef __NR_exit_group
321 _syscall1(int,exit_group,int,error_code)
322 #endif
323 #if defined(TARGET_NR_set_tid_address) && defined(__NR_set_tid_address)
324 _syscall1(int,set_tid_address,int *,tidptr)
325 #endif
326 #if defined(__NR_futex)
327 _syscall6(int,sys_futex,int *,uaddr,int,op,int,val,
328           const struct timespec *,timeout,int *,uaddr2,int,val3)
329 #endif
330 #if defined(__NR_futex_time64)
331 _syscall6(int,sys_futex_time64,int *,uaddr,int,op,int,val,
332           const struct timespec *,timeout,int *,uaddr2,int,val3)
333 #endif
334 #define __NR_sys_sched_getaffinity __NR_sched_getaffinity
335 _syscall3(int, sys_sched_getaffinity, pid_t, pid, unsigned int, len,
336           unsigned long *, user_mask_ptr);
337 #define __NR_sys_sched_setaffinity __NR_sched_setaffinity
338 _syscall3(int, sys_sched_setaffinity, pid_t, pid, unsigned int, len,
339           unsigned long *, user_mask_ptr);
340 #define __NR_sys_getcpu __NR_getcpu
341 _syscall3(int, sys_getcpu, unsigned *, cpu, unsigned *, node, void *, tcache);
342 _syscall4(int, reboot, int, magic1, int, magic2, unsigned int, cmd,
343           void *, arg);
344 _syscall2(int, capget, struct __user_cap_header_struct *, header,
345           struct __user_cap_data_struct *, data);
346 _syscall2(int, capset, struct __user_cap_header_struct *, header,
347           struct __user_cap_data_struct *, data);
348 #if defined(TARGET_NR_ioprio_get) && defined(__NR_ioprio_get)
349 _syscall2(int, ioprio_get, int, which, int, who)
350 #endif
351 #if defined(TARGET_NR_ioprio_set) && defined(__NR_ioprio_set)
352 _syscall3(int, ioprio_set, int, which, int, who, int, ioprio)
353 #endif
354 #if defined(TARGET_NR_getrandom) && defined(__NR_getrandom)
355 _syscall3(int, getrandom, void *, buf, size_t, buflen, unsigned int, flags)
356 #endif
357 
358 #if defined(TARGET_NR_kcmp) && defined(__NR_kcmp)
359 _syscall5(int, kcmp, pid_t, pid1, pid_t, pid2, int, type,
360           unsigned long, idx1, unsigned long, idx2)
361 #endif
362 
363 /*
364  * It is assumed that struct statx is architecture independent.
365  */
366 #if defined(TARGET_NR_statx) && defined(__NR_statx)
367 _syscall5(int, sys_statx, int, dirfd, const char *, pathname, int, flags,
368           unsigned int, mask, struct target_statx *, statxbuf)
369 #endif
370 #if defined(TARGET_NR_membarrier) && defined(__NR_membarrier)
371 _syscall2(int, membarrier, int, cmd, int, flags)
372 #endif
373 
374 static const bitmask_transtbl fcntl_flags_tbl[] = {
375   { TARGET_O_ACCMODE,   TARGET_O_WRONLY,    O_ACCMODE,   O_WRONLY,    },
376   { TARGET_O_ACCMODE,   TARGET_O_RDWR,      O_ACCMODE,   O_RDWR,      },
377   { TARGET_O_CREAT,     TARGET_O_CREAT,     O_CREAT,     O_CREAT,     },
378   { TARGET_O_EXCL,      TARGET_O_EXCL,      O_EXCL,      O_EXCL,      },
379   { TARGET_O_NOCTTY,    TARGET_O_NOCTTY,    O_NOCTTY,    O_NOCTTY,    },
380   { TARGET_O_TRUNC,     TARGET_O_TRUNC,     O_TRUNC,     O_TRUNC,     },
381   { TARGET_O_APPEND,    TARGET_O_APPEND,    O_APPEND,    O_APPEND,    },
382   { TARGET_O_NONBLOCK,  TARGET_O_NONBLOCK,  O_NONBLOCK,  O_NONBLOCK,  },
383   { TARGET_O_SYNC,      TARGET_O_DSYNC,     O_SYNC,      O_DSYNC,     },
384   { TARGET_O_SYNC,      TARGET_O_SYNC,      O_SYNC,      O_SYNC,      },
385   { TARGET_FASYNC,      TARGET_FASYNC,      FASYNC,      FASYNC,      },
386   { TARGET_O_DIRECTORY, TARGET_O_DIRECTORY, O_DIRECTORY, O_DIRECTORY, },
387   { TARGET_O_NOFOLLOW,  TARGET_O_NOFOLLOW,  O_NOFOLLOW,  O_NOFOLLOW,  },
388 #if defined(O_DIRECT)
389   { TARGET_O_DIRECT,    TARGET_O_DIRECT,    O_DIRECT,    O_DIRECT,    },
390 #endif
391 #if defined(O_NOATIME)
392   { TARGET_O_NOATIME,   TARGET_O_NOATIME,   O_NOATIME,   O_NOATIME    },
393 #endif
394 #if defined(O_CLOEXEC)
395   { TARGET_O_CLOEXEC,   TARGET_O_CLOEXEC,   O_CLOEXEC,   O_CLOEXEC    },
396 #endif
397 #if defined(O_PATH)
398   { TARGET_O_PATH,      TARGET_O_PATH,      O_PATH,      O_PATH       },
399 #endif
400 #if defined(O_TMPFILE)
401   { TARGET_O_TMPFILE,   TARGET_O_TMPFILE,   O_TMPFILE,   O_TMPFILE    },
402 #endif
403   /* Don't terminate the list prematurely on 64-bit host+guest.  */
404 #if TARGET_O_LARGEFILE != 0 || O_LARGEFILE != 0
405   { TARGET_O_LARGEFILE, TARGET_O_LARGEFILE, O_LARGEFILE, O_LARGEFILE, },
406 #endif
407   { 0, 0, 0, 0 }
408 };
409 
410 _syscall2(int, sys_getcwd1, char *, buf, size_t, size)
411 
412 #if defined(TARGET_NR_utimensat) || defined(TARGET_NR_utimensat_time64)
413 #if defined(__NR_utimensat)
414 #define __NR_sys_utimensat __NR_utimensat
415 _syscall4(int,sys_utimensat,int,dirfd,const char *,pathname,
416           const struct timespec *,tsp,int,flags)
417 #else
418 static int sys_utimensat(int dirfd, const char *pathname,
419                          const struct timespec times[2], int flags)
420 {
421     errno = ENOSYS;
422     return -1;
423 }
424 #endif
425 #endif /* TARGET_NR_utimensat */
426 
427 #ifdef TARGET_NR_renameat2
428 #if defined(__NR_renameat2)
429 #define __NR_sys_renameat2 __NR_renameat2
430 _syscall5(int, sys_renameat2, int, oldfd, const char *, old, int, newfd,
431           const char *, new, unsigned int, flags)
432 #else
433 static int sys_renameat2(int oldfd, const char *old,
434                          int newfd, const char *new, int flags)
435 {
436     if (flags == 0) {
437         return renameat(oldfd, old, newfd, new);
438     }
439     errno = ENOSYS;
440     return -1;
441 }
442 #endif
443 #endif /* TARGET_NR_renameat2 */
444 
445 #ifdef CONFIG_INOTIFY
446 #include <sys/inotify.h>
447 
448 #if defined(TARGET_NR_inotify_init) && defined(__NR_inotify_init)
449 static int sys_inotify_init(void)
450 {
451   return (inotify_init());
452 }
453 #endif
454 #if defined(TARGET_NR_inotify_add_watch) && defined(__NR_inotify_add_watch)
455 static int sys_inotify_add_watch(int fd,const char *pathname, int32_t mask)
456 {
457   return (inotify_add_watch(fd, pathname, mask));
458 }
459 #endif
460 #if defined(TARGET_NR_inotify_rm_watch) && defined(__NR_inotify_rm_watch)
461 static int sys_inotify_rm_watch(int fd, int32_t wd)
462 {
463   return (inotify_rm_watch(fd, wd));
464 }
465 #endif
466 #ifdef CONFIG_INOTIFY1
467 #if defined(TARGET_NR_inotify_init1) && defined(__NR_inotify_init1)
468 static int sys_inotify_init1(int flags)
469 {
470   return (inotify_init1(flags));
471 }
472 #endif
473 #endif
474 #else
475 /* Userspace can usually survive runtime without inotify */
476 #undef TARGET_NR_inotify_init
477 #undef TARGET_NR_inotify_init1
478 #undef TARGET_NR_inotify_add_watch
479 #undef TARGET_NR_inotify_rm_watch
480 #endif /* CONFIG_INOTIFY  */
481 
482 #if defined(TARGET_NR_prlimit64)
483 #ifndef __NR_prlimit64
484 # define __NR_prlimit64 -1
485 #endif
486 #define __NR_sys_prlimit64 __NR_prlimit64
487 /* The glibc rlimit structure may not be that used by the underlying syscall */
488 struct host_rlimit64 {
489     uint64_t rlim_cur;
490     uint64_t rlim_max;
491 };
492 _syscall4(int, sys_prlimit64, pid_t, pid, int, resource,
493           const struct host_rlimit64 *, new_limit,
494           struct host_rlimit64 *, old_limit)
495 #endif
496 
497 
498 #if defined(TARGET_NR_timer_create)
499 /* Maximum of 32 active POSIX timers allowed at any one time. */
500 static timer_t g_posix_timers[32] = { 0, } ;
501 
502 static inline int next_free_host_timer(void)
503 {
504     int k ;
505     /* FIXME: Does finding the next free slot require a lock? */
506     for (k = 0; k < ARRAY_SIZE(g_posix_timers); k++) {
507         if (g_posix_timers[k] == 0) {
508             g_posix_timers[k] = (timer_t) 1;
509             return k;
510         }
511     }
512     return -1;
513 }
514 #endif
515 
516 static inline int host_to_target_errno(int host_errno)
517 {
518     switch (host_errno) {
519 #define E(X)  case X: return TARGET_##X;
520 #include "errnos.c.inc"
521 #undef E
522     default:
523         return host_errno;
524     }
525 }
526 
527 static inline int target_to_host_errno(int target_errno)
528 {
529     switch (target_errno) {
530 #define E(X)  case TARGET_##X: return X;
531 #include "errnos.c.inc"
532 #undef E
533     default:
534         return target_errno;
535     }
536 }
537 
538 static inline abi_long get_errno(abi_long ret)
539 {
540     if (ret == -1)
541         return -host_to_target_errno(errno);
542     else
543         return ret;
544 }
545 
546 const char *target_strerror(int err)
547 {
548     if (err == TARGET_ERESTARTSYS) {
549         return "To be restarted";
550     }
551     if (err == TARGET_QEMU_ESIGRETURN) {
552         return "Successful exit from sigreturn";
553     }
554 
555     return strerror(target_to_host_errno(err));
556 }
557 
558 #define safe_syscall0(type, name) \
559 static type safe_##name(void) \
560 { \
561     return safe_syscall(__NR_##name); \
562 }
563 
564 #define safe_syscall1(type, name, type1, arg1) \
565 static type safe_##name(type1 arg1) \
566 { \
567     return safe_syscall(__NR_##name, arg1); \
568 }
569 
570 #define safe_syscall2(type, name, type1, arg1, type2, arg2) \
571 static type safe_##name(type1 arg1, type2 arg2) \
572 { \
573     return safe_syscall(__NR_##name, arg1, arg2); \
574 }
575 
576 #define safe_syscall3(type, name, type1, arg1, type2, arg2, type3, arg3) \
577 static type safe_##name(type1 arg1, type2 arg2, type3 arg3) \
578 { \
579     return safe_syscall(__NR_##name, arg1, arg2, arg3); \
580 }
581 
582 #define safe_syscall4(type, name, type1, arg1, type2, arg2, type3, arg3, \
583     type4, arg4) \
584 static type safe_##name(type1 arg1, type2 arg2, type3 arg3, type4 arg4) \
585 { \
586     return safe_syscall(__NR_##name, arg1, arg2, arg3, arg4); \
587 }
588 
589 #define safe_syscall5(type, name, type1, arg1, type2, arg2, type3, arg3, \
590     type4, arg4, type5, arg5) \
591 static type safe_##name(type1 arg1, type2 arg2, type3 arg3, type4 arg4, \
592     type5 arg5) \
593 { \
594     return safe_syscall(__NR_##name, arg1, arg2, arg3, arg4, arg5); \
595 }
596 
597 #define safe_syscall6(type, name, type1, arg1, type2, arg2, type3, arg3, \
598     type4, arg4, type5, arg5, type6, arg6) \
599 static type safe_##name(type1 arg1, type2 arg2, type3 arg3, type4 arg4, \
600     type5 arg5, type6 arg6) \
601 { \
602     return safe_syscall(__NR_##name, arg1, arg2, arg3, arg4, arg5, arg6); \
603 }
604 
605 safe_syscall3(ssize_t, read, int, fd, void *, buff, size_t, count)
606 safe_syscall3(ssize_t, write, int, fd, const void *, buff, size_t, count)
607 safe_syscall4(int, openat, int, dirfd, const char *, pathname, \
608               int, flags, mode_t, mode)
609 #if defined(TARGET_NR_wait4) || defined(TARGET_NR_waitpid)
610 safe_syscall4(pid_t, wait4, pid_t, pid, int *, status, int, options, \
611               struct rusage *, rusage)
612 #endif
613 safe_syscall5(int, waitid, idtype_t, idtype, id_t, id, siginfo_t *, infop, \
614               int, options, struct rusage *, rusage)
615 safe_syscall3(int, execve, const char *, filename, char **, argv, char **, envp)
616 #if defined(TARGET_NR_select) || defined(TARGET_NR__newselect) || \
617     defined(TARGET_NR_pselect6) || defined(TARGET_NR_pselect6_time64)
618 safe_syscall6(int, pselect6, int, nfds, fd_set *, readfds, fd_set *, writefds, \
619               fd_set *, exceptfds, struct timespec *, timeout, void *, sig)
620 #endif
621 #if defined(TARGET_NR_ppoll) || defined(TARGET_NR_ppoll_time64)
622 safe_syscall5(int, ppoll, struct pollfd *, ufds, unsigned int, nfds,
623               struct timespec *, tsp, const sigset_t *, sigmask,
624               size_t, sigsetsize)
625 #endif
626 safe_syscall6(int, epoll_pwait, int, epfd, struct epoll_event *, events,
627               int, maxevents, int, timeout, const sigset_t *, sigmask,
628               size_t, sigsetsize)
629 #if defined(__NR_futex)
630 safe_syscall6(int,futex,int *,uaddr,int,op,int,val, \
631               const struct timespec *,timeout,int *,uaddr2,int,val3)
632 #endif
633 #if defined(__NR_futex_time64)
634 safe_syscall6(int,futex_time64,int *,uaddr,int,op,int,val, \
635               const struct timespec *,timeout,int *,uaddr2,int,val3)
636 #endif
637 safe_syscall2(int, rt_sigsuspend, sigset_t *, newset, size_t, sigsetsize)
638 safe_syscall2(int, kill, pid_t, pid, int, sig)
639 safe_syscall2(int, tkill, int, tid, int, sig)
640 safe_syscall3(int, tgkill, int, tgid, int, pid, int, sig)
641 safe_syscall3(ssize_t, readv, int, fd, const struct iovec *, iov, int, iovcnt)
642 safe_syscall3(ssize_t, writev, int, fd, const struct iovec *, iov, int, iovcnt)
643 safe_syscall5(ssize_t, preadv, int, fd, const struct iovec *, iov, int, iovcnt,
644               unsigned long, pos_l, unsigned long, pos_h)
645 safe_syscall5(ssize_t, pwritev, int, fd, const struct iovec *, iov, int, iovcnt,
646               unsigned long, pos_l, unsigned long, pos_h)
647 safe_syscall3(int, connect, int, fd, const struct sockaddr *, addr,
648               socklen_t, addrlen)
649 safe_syscall6(ssize_t, sendto, int, fd, const void *, buf, size_t, len,
650               int, flags, const struct sockaddr *, addr, socklen_t, addrlen)
651 safe_syscall6(ssize_t, recvfrom, int, fd, void *, buf, size_t, len,
652               int, flags, struct sockaddr *, addr, socklen_t *, addrlen)
653 safe_syscall3(ssize_t, sendmsg, int, fd, const struct msghdr *, msg, int, flags)
654 safe_syscall3(ssize_t, recvmsg, int, fd, struct msghdr *, msg, int, flags)
655 safe_syscall2(int, flock, int, fd, int, operation)
656 #if defined(TARGET_NR_rt_sigtimedwait) || defined(TARGET_NR_rt_sigtimedwait_time64)
657 safe_syscall4(int, rt_sigtimedwait, const sigset_t *, these, siginfo_t *, uinfo,
658               const struct timespec *, uts, size_t, sigsetsize)
659 #endif
660 safe_syscall4(int, accept4, int, fd, struct sockaddr *, addr, socklen_t *, len,
661               int, flags)
662 #if defined(TARGET_NR_nanosleep)
663 safe_syscall2(int, nanosleep, const struct timespec *, req,
664               struct timespec *, rem)
665 #endif
666 #if defined(TARGET_NR_clock_nanosleep) || \
667     defined(TARGET_NR_clock_nanosleep_time64)
668 safe_syscall4(int, clock_nanosleep, const clockid_t, clock, int, flags,
669               const struct timespec *, req, struct timespec *, rem)
670 #endif
671 #ifdef __NR_ipc
672 #ifdef __s390x__
673 safe_syscall5(int, ipc, int, call, long, first, long, second, long, third,
674               void *, ptr)
675 #else
676 safe_syscall6(int, ipc, int, call, long, first, long, second, long, third,
677               void *, ptr, long, fifth)
678 #endif
679 #endif
680 #ifdef __NR_msgsnd
681 safe_syscall4(int, msgsnd, int, msgid, const void *, msgp, size_t, sz,
682               int, flags)
683 #endif
684 #ifdef __NR_msgrcv
685 safe_syscall5(int, msgrcv, int, msgid, void *, msgp, size_t, sz,
686               long, msgtype, int, flags)
687 #endif
688 #ifdef __NR_semtimedop
689 safe_syscall4(int, semtimedop, int, semid, struct sembuf *, tsops,
690               unsigned, nsops, const struct timespec *, timeout)
691 #endif
692 #if defined(TARGET_NR_mq_timedsend) || \
693     defined(TARGET_NR_mq_timedsend_time64)
694 safe_syscall5(int, mq_timedsend, int, mqdes, const char *, msg_ptr,
695               size_t, len, unsigned, prio, const struct timespec *, timeout)
696 #endif
697 #if defined(TARGET_NR_mq_timedreceive) || \
698     defined(TARGET_NR_mq_timedreceive_time64)
699 safe_syscall5(int, mq_timedreceive, int, mqdes, char *, msg_ptr,
700               size_t, len, unsigned *, prio, const struct timespec *, timeout)
701 #endif
702 #if defined(TARGET_NR_copy_file_range) && defined(__NR_copy_file_range)
703 safe_syscall6(ssize_t, copy_file_range, int, infd, loff_t *, pinoff,
704               int, outfd, loff_t *, poutoff, size_t, length,
705               unsigned int, flags)
706 #endif
707 
708 /* We do ioctl like this rather than via safe_syscall3 to preserve the
709  * "third argument might be integer or pointer or not present" behaviour of
710  * the libc function.
711  */
712 #define safe_ioctl(...) safe_syscall(__NR_ioctl, __VA_ARGS__)
713 /* Similarly for fcntl. Note that callers must always:
714  *  pass the F_GETLK64 etc constants rather than the unsuffixed F_GETLK
715  *  use the flock64 struct rather than unsuffixed flock
716  * This will then work and use a 64-bit offset for both 32-bit and 64-bit hosts.
717  */
718 #ifdef __NR_fcntl64
719 #define safe_fcntl(...) safe_syscall(__NR_fcntl64, __VA_ARGS__)
720 #else
721 #define safe_fcntl(...) safe_syscall(__NR_fcntl, __VA_ARGS__)
722 #endif
723 
724 static inline int host_to_target_sock_type(int host_type)
725 {
726     int target_type;
727 
728     switch (host_type & 0xf /* SOCK_TYPE_MASK */) {
729     case SOCK_DGRAM:
730         target_type = TARGET_SOCK_DGRAM;
731         break;
732     case SOCK_STREAM:
733         target_type = TARGET_SOCK_STREAM;
734         break;
735     default:
736         target_type = host_type & 0xf /* SOCK_TYPE_MASK */;
737         break;
738     }
739 
740 #if defined(SOCK_CLOEXEC)
741     if (host_type & SOCK_CLOEXEC) {
742         target_type |= TARGET_SOCK_CLOEXEC;
743     }
744 #endif
745 
746 #if defined(SOCK_NONBLOCK)
747     if (host_type & SOCK_NONBLOCK) {
748         target_type |= TARGET_SOCK_NONBLOCK;
749     }
750 #endif
751 
752     return target_type;
753 }
754 
755 static abi_ulong target_brk;
756 static abi_ulong target_original_brk;
757 static abi_ulong brk_page;
758 
759 void target_set_brk(abi_ulong new_brk)
760 {
761     target_original_brk = target_brk = HOST_PAGE_ALIGN(new_brk);
762     brk_page = HOST_PAGE_ALIGN(target_brk);
763 }
764 
765 //#define DEBUGF_BRK(message, args...) do { fprintf(stderr, (message), ## args); } while (0)
766 #define DEBUGF_BRK(message, args...)
767 
768 /* do_brk() must return target values and target errnos. */
769 abi_long do_brk(abi_ulong new_brk)
770 {
771     abi_long mapped_addr;
772     abi_ulong new_alloc_size;
773 
774     /* brk pointers are always untagged */
775 
776     DEBUGF_BRK("do_brk(" TARGET_ABI_FMT_lx ") -> ", new_brk);
777 
778     if (!new_brk) {
779         DEBUGF_BRK(TARGET_ABI_FMT_lx " (!new_brk)\n", target_brk);
780         return target_brk;
781     }
782     if (new_brk < target_original_brk) {
783         DEBUGF_BRK(TARGET_ABI_FMT_lx " (new_brk < target_original_brk)\n",
784                    target_brk);
785         return target_brk;
786     }
787 
788     /* If the new brk is less than the highest page reserved to the
789      * target heap allocation, set it and we're almost done...  */
790     if (new_brk <= brk_page) {
791         /* Heap contents are initialized to zero, as for anonymous
792          * mapped pages.  */
793         if (new_brk > target_brk) {
794             memset(g2h_untagged(target_brk), 0, new_brk - target_brk);
795         }
796 	target_brk = new_brk;
797         DEBUGF_BRK(TARGET_ABI_FMT_lx " (new_brk <= brk_page)\n", target_brk);
798 	return target_brk;
799     }
800 
801     /* We need to allocate more memory after the brk... Note that
802      * we don't use MAP_FIXED because that will map over the top of
803      * any existing mapping (like the one with the host libc or qemu
804      * itself); instead we treat "mapped but at wrong address" as
805      * a failure and unmap again.
806      */
807     new_alloc_size = HOST_PAGE_ALIGN(new_brk - brk_page);
808     mapped_addr = get_errno(target_mmap(brk_page, new_alloc_size,
809                                         PROT_READ|PROT_WRITE,
810                                         MAP_ANON|MAP_PRIVATE, 0, 0));
811 
812     if (mapped_addr == brk_page) {
813         /* Heap contents are initialized to zero, as for anonymous
814          * mapped pages.  Technically the new pages are already
815          * initialized to zero since they *are* anonymous mapped
816          * pages, however we have to take care with the contents that
817          * come from the remaining part of the previous page: it may
818          * contains garbage data due to a previous heap usage (grown
819          * then shrunken).  */
820         memset(g2h_untagged(target_brk), 0, brk_page - target_brk);
821 
822         target_brk = new_brk;
823         brk_page = HOST_PAGE_ALIGN(target_brk);
824         DEBUGF_BRK(TARGET_ABI_FMT_lx " (mapped_addr == brk_page)\n",
825             target_brk);
826         return target_brk;
827     } else if (mapped_addr != -1) {
828         /* Mapped but at wrong address, meaning there wasn't actually
829          * enough space for this brk.
830          */
831         target_munmap(mapped_addr, new_alloc_size);
832         mapped_addr = -1;
833         DEBUGF_BRK(TARGET_ABI_FMT_lx " (mapped_addr != -1)\n", target_brk);
834     }
835     else {
836         DEBUGF_BRK(TARGET_ABI_FMT_lx " (otherwise)\n", target_brk);
837     }
838 
839 #if defined(TARGET_ALPHA)
840     /* We (partially) emulate OSF/1 on Alpha, which requires we
841        return a proper errno, not an unchanged brk value.  */
842     return -TARGET_ENOMEM;
843 #endif
844     /* For everything else, return the previous break. */
845     return target_brk;
846 }
847 
848 #if defined(TARGET_NR_select) || defined(TARGET_NR__newselect) || \
849     defined(TARGET_NR_pselect6) || defined(TARGET_NR_pselect6_time64)
850 static inline abi_long copy_from_user_fdset(fd_set *fds,
851                                             abi_ulong target_fds_addr,
852                                             int n)
853 {
854     int i, nw, j, k;
855     abi_ulong b, *target_fds;
856 
857     nw = DIV_ROUND_UP(n, TARGET_ABI_BITS);
858     if (!(target_fds = lock_user(VERIFY_READ,
859                                  target_fds_addr,
860                                  sizeof(abi_ulong) * nw,
861                                  1)))
862         return -TARGET_EFAULT;
863 
864     FD_ZERO(fds);
865     k = 0;
866     for (i = 0; i < nw; i++) {
867         /* grab the abi_ulong */
868         __get_user(b, &target_fds[i]);
869         for (j = 0; j < TARGET_ABI_BITS; j++) {
870             /* check the bit inside the abi_ulong */
871             if ((b >> j) & 1)
872                 FD_SET(k, fds);
873             k++;
874         }
875     }
876 
877     unlock_user(target_fds, target_fds_addr, 0);
878 
879     return 0;
880 }
881 
882 static inline abi_ulong copy_from_user_fdset_ptr(fd_set *fds, fd_set **fds_ptr,
883                                                  abi_ulong target_fds_addr,
884                                                  int n)
885 {
886     if (target_fds_addr) {
887         if (copy_from_user_fdset(fds, target_fds_addr, n))
888             return -TARGET_EFAULT;
889         *fds_ptr = fds;
890     } else {
891         *fds_ptr = NULL;
892     }
893     return 0;
894 }
895 
896 static inline abi_long copy_to_user_fdset(abi_ulong target_fds_addr,
897                                           const fd_set *fds,
898                                           int n)
899 {
900     int i, nw, j, k;
901     abi_long v;
902     abi_ulong *target_fds;
903 
904     nw = DIV_ROUND_UP(n, TARGET_ABI_BITS);
905     if (!(target_fds = lock_user(VERIFY_WRITE,
906                                  target_fds_addr,
907                                  sizeof(abi_ulong) * nw,
908                                  0)))
909         return -TARGET_EFAULT;
910 
911     k = 0;
912     for (i = 0; i < nw; i++) {
913         v = 0;
914         for (j = 0; j < TARGET_ABI_BITS; j++) {
915             v |= ((abi_ulong)(FD_ISSET(k, fds) != 0) << j);
916             k++;
917         }
918         __put_user(v, &target_fds[i]);
919     }
920 
921     unlock_user(target_fds, target_fds_addr, sizeof(abi_ulong) * nw);
922 
923     return 0;
924 }
925 #endif
926 
927 #if defined(__alpha__)
928 #define HOST_HZ 1024
929 #else
930 #define HOST_HZ 100
931 #endif
932 
933 static inline abi_long host_to_target_clock_t(long ticks)
934 {
935 #if HOST_HZ == TARGET_HZ
936     return ticks;
937 #else
938     return ((int64_t)ticks * TARGET_HZ) / HOST_HZ;
939 #endif
940 }
941 
942 static inline abi_long host_to_target_rusage(abi_ulong target_addr,
943                                              const struct rusage *rusage)
944 {
945     struct target_rusage *target_rusage;
946 
947     if (!lock_user_struct(VERIFY_WRITE, target_rusage, target_addr, 0))
948         return -TARGET_EFAULT;
949     target_rusage->ru_utime.tv_sec = tswapal(rusage->ru_utime.tv_sec);
950     target_rusage->ru_utime.tv_usec = tswapal(rusage->ru_utime.tv_usec);
951     target_rusage->ru_stime.tv_sec = tswapal(rusage->ru_stime.tv_sec);
952     target_rusage->ru_stime.tv_usec = tswapal(rusage->ru_stime.tv_usec);
953     target_rusage->ru_maxrss = tswapal(rusage->ru_maxrss);
954     target_rusage->ru_ixrss = tswapal(rusage->ru_ixrss);
955     target_rusage->ru_idrss = tswapal(rusage->ru_idrss);
956     target_rusage->ru_isrss = tswapal(rusage->ru_isrss);
957     target_rusage->ru_minflt = tswapal(rusage->ru_minflt);
958     target_rusage->ru_majflt = tswapal(rusage->ru_majflt);
959     target_rusage->ru_nswap = tswapal(rusage->ru_nswap);
960     target_rusage->ru_inblock = tswapal(rusage->ru_inblock);
961     target_rusage->ru_oublock = tswapal(rusage->ru_oublock);
962     target_rusage->ru_msgsnd = tswapal(rusage->ru_msgsnd);
963     target_rusage->ru_msgrcv = tswapal(rusage->ru_msgrcv);
964     target_rusage->ru_nsignals = tswapal(rusage->ru_nsignals);
965     target_rusage->ru_nvcsw = tswapal(rusage->ru_nvcsw);
966     target_rusage->ru_nivcsw = tswapal(rusage->ru_nivcsw);
967     unlock_user_struct(target_rusage, target_addr, 1);
968 
969     return 0;
970 }
971 
972 #ifdef TARGET_NR_setrlimit
973 static inline rlim_t target_to_host_rlim(abi_ulong target_rlim)
974 {
975     abi_ulong target_rlim_swap;
976     rlim_t result;
977 
978     target_rlim_swap = tswapal(target_rlim);
979     if (target_rlim_swap == TARGET_RLIM_INFINITY)
980         return RLIM_INFINITY;
981 
982     result = target_rlim_swap;
983     if (target_rlim_swap != (rlim_t)result)
984         return RLIM_INFINITY;
985 
986     return result;
987 }
988 #endif
989 
990 #if defined(TARGET_NR_getrlimit) || defined(TARGET_NR_ugetrlimit)
991 static inline abi_ulong host_to_target_rlim(rlim_t rlim)
992 {
993     abi_ulong target_rlim_swap;
994     abi_ulong result;
995 
996     if (rlim == RLIM_INFINITY || rlim != (abi_long)rlim)
997         target_rlim_swap = TARGET_RLIM_INFINITY;
998     else
999         target_rlim_swap = rlim;
1000     result = tswapal(target_rlim_swap);
1001 
1002     return result;
1003 }
1004 #endif
1005 
1006 static inline int target_to_host_resource(int code)
1007 {
1008     switch (code) {
1009     case TARGET_RLIMIT_AS:
1010         return RLIMIT_AS;
1011     case TARGET_RLIMIT_CORE:
1012         return RLIMIT_CORE;
1013     case TARGET_RLIMIT_CPU:
1014         return RLIMIT_CPU;
1015     case TARGET_RLIMIT_DATA:
1016         return RLIMIT_DATA;
1017     case TARGET_RLIMIT_FSIZE:
1018         return RLIMIT_FSIZE;
1019     case TARGET_RLIMIT_LOCKS:
1020         return RLIMIT_LOCKS;
1021     case TARGET_RLIMIT_MEMLOCK:
1022         return RLIMIT_MEMLOCK;
1023     case TARGET_RLIMIT_MSGQUEUE:
1024         return RLIMIT_MSGQUEUE;
1025     case TARGET_RLIMIT_NICE:
1026         return RLIMIT_NICE;
1027     case TARGET_RLIMIT_NOFILE:
1028         return RLIMIT_NOFILE;
1029     case TARGET_RLIMIT_NPROC:
1030         return RLIMIT_NPROC;
1031     case TARGET_RLIMIT_RSS:
1032         return RLIMIT_RSS;
1033     case TARGET_RLIMIT_RTPRIO:
1034         return RLIMIT_RTPRIO;
1035     case TARGET_RLIMIT_SIGPENDING:
1036         return RLIMIT_SIGPENDING;
1037     case TARGET_RLIMIT_STACK:
1038         return RLIMIT_STACK;
1039     default:
1040         return code;
1041     }
1042 }
1043 
1044 static inline abi_long copy_from_user_timeval(struct timeval *tv,
1045                                               abi_ulong target_tv_addr)
1046 {
1047     struct target_timeval *target_tv;
1048 
1049     if (!lock_user_struct(VERIFY_READ, target_tv, target_tv_addr, 1)) {
1050         return -TARGET_EFAULT;
1051     }
1052 
1053     __get_user(tv->tv_sec, &target_tv->tv_sec);
1054     __get_user(tv->tv_usec, &target_tv->tv_usec);
1055 
1056     unlock_user_struct(target_tv, target_tv_addr, 0);
1057 
1058     return 0;
1059 }
1060 
1061 static inline abi_long copy_to_user_timeval(abi_ulong target_tv_addr,
1062                                             const struct timeval *tv)
1063 {
1064     struct target_timeval *target_tv;
1065 
1066     if (!lock_user_struct(VERIFY_WRITE, target_tv, target_tv_addr, 0)) {
1067         return -TARGET_EFAULT;
1068     }
1069 
1070     __put_user(tv->tv_sec, &target_tv->tv_sec);
1071     __put_user(tv->tv_usec, &target_tv->tv_usec);
1072 
1073     unlock_user_struct(target_tv, target_tv_addr, 1);
1074 
1075     return 0;
1076 }
1077 
1078 #if defined(TARGET_NR_clock_adjtime64) && defined(CONFIG_CLOCK_ADJTIME)
1079 static inline abi_long copy_from_user_timeval64(struct timeval *tv,
1080                                                 abi_ulong target_tv_addr)
1081 {
1082     struct target__kernel_sock_timeval *target_tv;
1083 
1084     if (!lock_user_struct(VERIFY_READ, target_tv, target_tv_addr, 1)) {
1085         return -TARGET_EFAULT;
1086     }
1087 
1088     __get_user(tv->tv_sec, &target_tv->tv_sec);
1089     __get_user(tv->tv_usec, &target_tv->tv_usec);
1090 
1091     unlock_user_struct(target_tv, target_tv_addr, 0);
1092 
1093     return 0;
1094 }
1095 #endif
1096 
1097 static inline abi_long copy_to_user_timeval64(abi_ulong target_tv_addr,
1098                                               const struct timeval *tv)
1099 {
1100     struct target__kernel_sock_timeval *target_tv;
1101 
1102     if (!lock_user_struct(VERIFY_WRITE, target_tv, target_tv_addr, 0)) {
1103         return -TARGET_EFAULT;
1104     }
1105 
1106     __put_user(tv->tv_sec, &target_tv->tv_sec);
1107     __put_user(tv->tv_usec, &target_tv->tv_usec);
1108 
1109     unlock_user_struct(target_tv, target_tv_addr, 1);
1110 
1111     return 0;
1112 }
1113 
1114 #if defined(TARGET_NR_futex) || \
1115     defined(TARGET_NR_rt_sigtimedwait) || \
1116     defined(TARGET_NR_pselect6) || defined(TARGET_NR_pselect6) || \
1117     defined(TARGET_NR_nanosleep) || defined(TARGET_NR_clock_settime) || \
1118     defined(TARGET_NR_utimensat) || defined(TARGET_NR_mq_timedsend) || \
1119     defined(TARGET_NR_mq_timedreceive) || defined(TARGET_NR_ipc) || \
1120     defined(TARGET_NR_semop) || defined(TARGET_NR_semtimedop) || \
1121     defined(TARGET_NR_timer_settime) || \
1122     (defined(TARGET_NR_timerfd_settime) && defined(CONFIG_TIMERFD))
1123 static inline abi_long target_to_host_timespec(struct timespec *host_ts,
1124                                                abi_ulong target_addr)
1125 {
1126     struct target_timespec *target_ts;
1127 
1128     if (!lock_user_struct(VERIFY_READ, target_ts, target_addr, 1)) {
1129         return -TARGET_EFAULT;
1130     }
1131     __get_user(host_ts->tv_sec, &target_ts->tv_sec);
1132     __get_user(host_ts->tv_nsec, &target_ts->tv_nsec);
1133     unlock_user_struct(target_ts, target_addr, 0);
1134     return 0;
1135 }
1136 #endif
1137 
1138 #if defined(TARGET_NR_clock_settime64) || defined(TARGET_NR_futex_time64) || \
1139     defined(TARGET_NR_timer_settime64) || \
1140     defined(TARGET_NR_mq_timedsend_time64) || \
1141     defined(TARGET_NR_mq_timedreceive_time64) || \
1142     (defined(TARGET_NR_timerfd_settime64) && defined(CONFIG_TIMERFD)) || \
1143     defined(TARGET_NR_clock_nanosleep_time64) || \
1144     defined(TARGET_NR_rt_sigtimedwait_time64) || \
1145     defined(TARGET_NR_utimensat) || \
1146     defined(TARGET_NR_utimensat_time64) || \
1147     defined(TARGET_NR_semtimedop_time64) || \
1148     defined(TARGET_NR_pselect6_time64) || defined(TARGET_NR_ppoll_time64)
1149 static inline abi_long target_to_host_timespec64(struct timespec *host_ts,
1150                                                  abi_ulong target_addr)
1151 {
1152     struct target__kernel_timespec *target_ts;
1153 
1154     if (!lock_user_struct(VERIFY_READ, target_ts, target_addr, 1)) {
1155         return -TARGET_EFAULT;
1156     }
1157     __get_user(host_ts->tv_sec, &target_ts->tv_sec);
1158     __get_user(host_ts->tv_nsec, &target_ts->tv_nsec);
1159     /* in 32bit mode, this drops the padding */
1160     host_ts->tv_nsec = (long)(abi_long)host_ts->tv_nsec;
1161     unlock_user_struct(target_ts, target_addr, 0);
1162     return 0;
1163 }
1164 #endif
1165 
1166 static inline abi_long host_to_target_timespec(abi_ulong target_addr,
1167                                                struct timespec *host_ts)
1168 {
1169     struct target_timespec *target_ts;
1170 
1171     if (!lock_user_struct(VERIFY_WRITE, target_ts, target_addr, 0)) {
1172         return -TARGET_EFAULT;
1173     }
1174     __put_user(host_ts->tv_sec, &target_ts->tv_sec);
1175     __put_user(host_ts->tv_nsec, &target_ts->tv_nsec);
1176     unlock_user_struct(target_ts, target_addr, 1);
1177     return 0;
1178 }
1179 
1180 static inline abi_long host_to_target_timespec64(abi_ulong target_addr,
1181                                                  struct timespec *host_ts)
1182 {
1183     struct target__kernel_timespec *target_ts;
1184 
1185     if (!lock_user_struct(VERIFY_WRITE, target_ts, target_addr, 0)) {
1186         return -TARGET_EFAULT;
1187     }
1188     __put_user(host_ts->tv_sec, &target_ts->tv_sec);
1189     __put_user(host_ts->tv_nsec, &target_ts->tv_nsec);
1190     unlock_user_struct(target_ts, target_addr, 1);
1191     return 0;
1192 }
1193 
1194 #if defined(TARGET_NR_gettimeofday)
1195 static inline abi_long copy_to_user_timezone(abi_ulong target_tz_addr,
1196                                              struct timezone *tz)
1197 {
1198     struct target_timezone *target_tz;
1199 
1200     if (!lock_user_struct(VERIFY_WRITE, target_tz, target_tz_addr, 1)) {
1201         return -TARGET_EFAULT;
1202     }
1203 
1204     __put_user(tz->tz_minuteswest, &target_tz->tz_minuteswest);
1205     __put_user(tz->tz_dsttime, &target_tz->tz_dsttime);
1206 
1207     unlock_user_struct(target_tz, target_tz_addr, 1);
1208 
1209     return 0;
1210 }
1211 #endif
1212 
1213 #if defined(TARGET_NR_settimeofday)
1214 static inline abi_long copy_from_user_timezone(struct timezone *tz,
1215                                                abi_ulong target_tz_addr)
1216 {
1217     struct target_timezone *target_tz;
1218 
1219     if (!lock_user_struct(VERIFY_READ, target_tz, target_tz_addr, 1)) {
1220         return -TARGET_EFAULT;
1221     }
1222 
1223     __get_user(tz->tz_minuteswest, &target_tz->tz_minuteswest);
1224     __get_user(tz->tz_dsttime, &target_tz->tz_dsttime);
1225 
1226     unlock_user_struct(target_tz, target_tz_addr, 0);
1227 
1228     return 0;
1229 }
1230 #endif
1231 
1232 #if defined(TARGET_NR_mq_open) && defined(__NR_mq_open)
1233 #include <mqueue.h>
1234 
1235 static inline abi_long copy_from_user_mq_attr(struct mq_attr *attr,
1236                                               abi_ulong target_mq_attr_addr)
1237 {
1238     struct target_mq_attr *target_mq_attr;
1239 
1240     if (!lock_user_struct(VERIFY_READ, target_mq_attr,
1241                           target_mq_attr_addr, 1))
1242         return -TARGET_EFAULT;
1243 
1244     __get_user(attr->mq_flags, &target_mq_attr->mq_flags);
1245     __get_user(attr->mq_maxmsg, &target_mq_attr->mq_maxmsg);
1246     __get_user(attr->mq_msgsize, &target_mq_attr->mq_msgsize);
1247     __get_user(attr->mq_curmsgs, &target_mq_attr->mq_curmsgs);
1248 
1249     unlock_user_struct(target_mq_attr, target_mq_attr_addr, 0);
1250 
1251     return 0;
1252 }
1253 
1254 static inline abi_long copy_to_user_mq_attr(abi_ulong target_mq_attr_addr,
1255                                             const struct mq_attr *attr)
1256 {
1257     struct target_mq_attr *target_mq_attr;
1258 
1259     if (!lock_user_struct(VERIFY_WRITE, target_mq_attr,
1260                           target_mq_attr_addr, 0))
1261         return -TARGET_EFAULT;
1262 
1263     __put_user(attr->mq_flags, &target_mq_attr->mq_flags);
1264     __put_user(attr->mq_maxmsg, &target_mq_attr->mq_maxmsg);
1265     __put_user(attr->mq_msgsize, &target_mq_attr->mq_msgsize);
1266     __put_user(attr->mq_curmsgs, &target_mq_attr->mq_curmsgs);
1267 
1268     unlock_user_struct(target_mq_attr, target_mq_attr_addr, 1);
1269 
1270     return 0;
1271 }
1272 #endif
1273 
1274 #if defined(TARGET_NR_select) || defined(TARGET_NR__newselect)
1275 /* do_select() must return target values and target errnos. */
1276 static abi_long do_select(int n,
1277                           abi_ulong rfd_addr, abi_ulong wfd_addr,
1278                           abi_ulong efd_addr, abi_ulong target_tv_addr)
1279 {
1280     fd_set rfds, wfds, efds;
1281     fd_set *rfds_ptr, *wfds_ptr, *efds_ptr;
1282     struct timeval tv;
1283     struct timespec ts, *ts_ptr;
1284     abi_long ret;
1285 
1286     ret = copy_from_user_fdset_ptr(&rfds, &rfds_ptr, rfd_addr, n);
1287     if (ret) {
1288         return ret;
1289     }
1290     ret = copy_from_user_fdset_ptr(&wfds, &wfds_ptr, wfd_addr, n);
1291     if (ret) {
1292         return ret;
1293     }
1294     ret = copy_from_user_fdset_ptr(&efds, &efds_ptr, efd_addr, n);
1295     if (ret) {
1296         return ret;
1297     }
1298 
1299     if (target_tv_addr) {
1300         if (copy_from_user_timeval(&tv, target_tv_addr))
1301             return -TARGET_EFAULT;
1302         ts.tv_sec = tv.tv_sec;
1303         ts.tv_nsec = tv.tv_usec * 1000;
1304         ts_ptr = &ts;
1305     } else {
1306         ts_ptr = NULL;
1307     }
1308 
1309     ret = get_errno(safe_pselect6(n, rfds_ptr, wfds_ptr, efds_ptr,
1310                                   ts_ptr, NULL));
1311 
1312     if (!is_error(ret)) {
1313         if (rfd_addr && copy_to_user_fdset(rfd_addr, &rfds, n))
1314             return -TARGET_EFAULT;
1315         if (wfd_addr && copy_to_user_fdset(wfd_addr, &wfds, n))
1316             return -TARGET_EFAULT;
1317         if (efd_addr && copy_to_user_fdset(efd_addr, &efds, n))
1318             return -TARGET_EFAULT;
1319 
1320         if (target_tv_addr) {
1321             tv.tv_sec = ts.tv_sec;
1322             tv.tv_usec = ts.tv_nsec / 1000;
1323             if (copy_to_user_timeval(target_tv_addr, &tv)) {
1324                 return -TARGET_EFAULT;
1325             }
1326         }
1327     }
1328 
1329     return ret;
1330 }
1331 
1332 #if defined(TARGET_WANT_OLD_SYS_SELECT)
1333 static abi_long do_old_select(abi_ulong arg1)
1334 {
1335     struct target_sel_arg_struct *sel;
1336     abi_ulong inp, outp, exp, tvp;
1337     long nsel;
1338 
1339     if (!lock_user_struct(VERIFY_READ, sel, arg1, 1)) {
1340         return -TARGET_EFAULT;
1341     }
1342 
1343     nsel = tswapal(sel->n);
1344     inp = tswapal(sel->inp);
1345     outp = tswapal(sel->outp);
1346     exp = tswapal(sel->exp);
1347     tvp = tswapal(sel->tvp);
1348 
1349     unlock_user_struct(sel, arg1, 0);
1350 
1351     return do_select(nsel, inp, outp, exp, tvp);
1352 }
1353 #endif
1354 #endif
1355 
1356 #if defined(TARGET_NR_pselect6) || defined(TARGET_NR_pselect6_time64)
1357 static abi_long do_pselect6(abi_long arg1, abi_long arg2, abi_long arg3,
1358                             abi_long arg4, abi_long arg5, abi_long arg6,
1359                             bool time64)
1360 {
1361     abi_long rfd_addr, wfd_addr, efd_addr, n, ts_addr;
1362     fd_set rfds, wfds, efds;
1363     fd_set *rfds_ptr, *wfds_ptr, *efds_ptr;
1364     struct timespec ts, *ts_ptr;
1365     abi_long ret;
1366 
1367     /*
1368      * The 6th arg is actually two args smashed together,
1369      * so we cannot use the C library.
1370      */
1371     sigset_t set;
1372     struct {
1373         sigset_t *set;
1374         size_t size;
1375     } sig, *sig_ptr;
1376 
1377     abi_ulong arg_sigset, arg_sigsize, *arg7;
1378     target_sigset_t *target_sigset;
1379 
1380     n = arg1;
1381     rfd_addr = arg2;
1382     wfd_addr = arg3;
1383     efd_addr = arg4;
1384     ts_addr = arg5;
1385 
1386     ret = copy_from_user_fdset_ptr(&rfds, &rfds_ptr, rfd_addr, n);
1387     if (ret) {
1388         return ret;
1389     }
1390     ret = copy_from_user_fdset_ptr(&wfds, &wfds_ptr, wfd_addr, n);
1391     if (ret) {
1392         return ret;
1393     }
1394     ret = copy_from_user_fdset_ptr(&efds, &efds_ptr, efd_addr, n);
1395     if (ret) {
1396         return ret;
1397     }
1398 
1399     /*
1400      * This takes a timespec, and not a timeval, so we cannot
1401      * use the do_select() helper ...
1402      */
1403     if (ts_addr) {
1404         if (time64) {
1405             if (target_to_host_timespec64(&ts, ts_addr)) {
1406                 return -TARGET_EFAULT;
1407             }
1408         } else {
1409             if (target_to_host_timespec(&ts, ts_addr)) {
1410                 return -TARGET_EFAULT;
1411             }
1412         }
1413             ts_ptr = &ts;
1414     } else {
1415         ts_ptr = NULL;
1416     }
1417 
1418     /* Extract the two packed args for the sigset */
1419     if (arg6) {
1420         sig_ptr = &sig;
1421         sig.size = SIGSET_T_SIZE;
1422 
1423         arg7 = lock_user(VERIFY_READ, arg6, sizeof(*arg7) * 2, 1);
1424         if (!arg7) {
1425             return -TARGET_EFAULT;
1426         }
1427         arg_sigset = tswapal(arg7[0]);
1428         arg_sigsize = tswapal(arg7[1]);
1429         unlock_user(arg7, arg6, 0);
1430 
1431         if (arg_sigset) {
1432             sig.set = &set;
1433             if (arg_sigsize != sizeof(*target_sigset)) {
1434                 /* Like the kernel, we enforce correct size sigsets */
1435                 return -TARGET_EINVAL;
1436             }
1437             target_sigset = lock_user(VERIFY_READ, arg_sigset,
1438                                       sizeof(*target_sigset), 1);
1439             if (!target_sigset) {
1440                 return -TARGET_EFAULT;
1441             }
1442             target_to_host_sigset(&set, target_sigset);
1443             unlock_user(target_sigset, arg_sigset, 0);
1444         } else {
1445             sig.set = NULL;
1446         }
1447     } else {
1448         sig_ptr = NULL;
1449     }
1450 
1451     ret = get_errno(safe_pselect6(n, rfds_ptr, wfds_ptr, efds_ptr,
1452                                   ts_ptr, sig_ptr));
1453 
1454     if (!is_error(ret)) {
1455         if (rfd_addr && copy_to_user_fdset(rfd_addr, &rfds, n)) {
1456             return -TARGET_EFAULT;
1457         }
1458         if (wfd_addr && copy_to_user_fdset(wfd_addr, &wfds, n)) {
1459             return -TARGET_EFAULT;
1460         }
1461         if (efd_addr && copy_to_user_fdset(efd_addr, &efds, n)) {
1462             return -TARGET_EFAULT;
1463         }
1464         if (time64) {
1465             if (ts_addr && host_to_target_timespec64(ts_addr, &ts)) {
1466                 return -TARGET_EFAULT;
1467             }
1468         } else {
1469             if (ts_addr && host_to_target_timespec(ts_addr, &ts)) {
1470                 return -TARGET_EFAULT;
1471             }
1472         }
1473     }
1474     return ret;
1475 }
1476 #endif
1477 
1478 #if defined(TARGET_NR_poll) || defined(TARGET_NR_ppoll) || \
1479     defined(TARGET_NR_ppoll_time64)
1480 static abi_long do_ppoll(abi_long arg1, abi_long arg2, abi_long arg3,
1481                          abi_long arg4, abi_long arg5, bool ppoll, bool time64)
1482 {
1483     struct target_pollfd *target_pfd;
1484     unsigned int nfds = arg2;
1485     struct pollfd *pfd;
1486     unsigned int i;
1487     abi_long ret;
1488 
1489     pfd = NULL;
1490     target_pfd = NULL;
1491     if (nfds) {
1492         if (nfds > (INT_MAX / sizeof(struct target_pollfd))) {
1493             return -TARGET_EINVAL;
1494         }
1495         target_pfd = lock_user(VERIFY_WRITE, arg1,
1496                                sizeof(struct target_pollfd) * nfds, 1);
1497         if (!target_pfd) {
1498             return -TARGET_EFAULT;
1499         }
1500 
1501         pfd = alloca(sizeof(struct pollfd) * nfds);
1502         for (i = 0; i < nfds; i++) {
1503             pfd[i].fd = tswap32(target_pfd[i].fd);
1504             pfd[i].events = tswap16(target_pfd[i].events);
1505         }
1506     }
1507     if (ppoll) {
1508         struct timespec _timeout_ts, *timeout_ts = &_timeout_ts;
1509         target_sigset_t *target_set;
1510         sigset_t _set, *set = &_set;
1511 
1512         if (arg3) {
1513             if (time64) {
1514                 if (target_to_host_timespec64(timeout_ts, arg3)) {
1515                     unlock_user(target_pfd, arg1, 0);
1516                     return -TARGET_EFAULT;
1517                 }
1518             } else {
1519                 if (target_to_host_timespec(timeout_ts, arg3)) {
1520                     unlock_user(target_pfd, arg1, 0);
1521                     return -TARGET_EFAULT;
1522                 }
1523             }
1524         } else {
1525             timeout_ts = NULL;
1526         }
1527 
1528         if (arg4) {
1529             if (arg5 != sizeof(target_sigset_t)) {
1530                 unlock_user(target_pfd, arg1, 0);
1531                 return -TARGET_EINVAL;
1532             }
1533 
1534             target_set = lock_user(VERIFY_READ, arg4,
1535                                    sizeof(target_sigset_t), 1);
1536             if (!target_set) {
1537                 unlock_user(target_pfd, arg1, 0);
1538                 return -TARGET_EFAULT;
1539             }
1540             target_to_host_sigset(set, target_set);
1541         } else {
1542             set = NULL;
1543         }
1544 
1545         ret = get_errno(safe_ppoll(pfd, nfds, timeout_ts,
1546                                    set, SIGSET_T_SIZE));
1547 
1548         if (!is_error(ret) && arg3) {
1549             if (time64) {
1550                 if (host_to_target_timespec64(arg3, timeout_ts)) {
1551                     return -TARGET_EFAULT;
1552                 }
1553             } else {
1554                 if (host_to_target_timespec(arg3, timeout_ts)) {
1555                     return -TARGET_EFAULT;
1556                 }
1557             }
1558         }
1559         if (arg4) {
1560             unlock_user(target_set, arg4, 0);
1561         }
1562     } else {
1563           struct timespec ts, *pts;
1564 
1565           if (arg3 >= 0) {
1566               /* Convert ms to secs, ns */
1567               ts.tv_sec = arg3 / 1000;
1568               ts.tv_nsec = (arg3 % 1000) * 1000000LL;
1569               pts = &ts;
1570           } else {
1571               /* -ve poll() timeout means "infinite" */
1572               pts = NULL;
1573           }
1574           ret = get_errno(safe_ppoll(pfd, nfds, pts, NULL, 0));
1575     }
1576 
1577     if (!is_error(ret)) {
1578         for (i = 0; i < nfds; i++) {
1579             target_pfd[i].revents = tswap16(pfd[i].revents);
1580         }
1581     }
1582     unlock_user(target_pfd, arg1, sizeof(struct target_pollfd) * nfds);
1583     return ret;
1584 }
1585 #endif
1586 
1587 static abi_long do_pipe2(int host_pipe[], int flags)
1588 {
1589 #ifdef CONFIG_PIPE2
1590     return pipe2(host_pipe, flags);
1591 #else
1592     return -ENOSYS;
1593 #endif
1594 }
1595 
1596 static abi_long do_pipe(void *cpu_env, abi_ulong pipedes,
1597                         int flags, int is_pipe2)
1598 {
1599     int host_pipe[2];
1600     abi_long ret;
1601     ret = flags ? do_pipe2(host_pipe, flags) : pipe(host_pipe);
1602 
1603     if (is_error(ret))
1604         return get_errno(ret);
1605 
1606     /* Several targets have special calling conventions for the original
1607        pipe syscall, but didn't replicate this into the pipe2 syscall.  */
1608     if (!is_pipe2) {
1609 #if defined(TARGET_ALPHA)
1610         ((CPUAlphaState *)cpu_env)->ir[IR_A4] = host_pipe[1];
1611         return host_pipe[0];
1612 #elif defined(TARGET_MIPS)
1613         ((CPUMIPSState*)cpu_env)->active_tc.gpr[3] = host_pipe[1];
1614         return host_pipe[0];
1615 #elif defined(TARGET_SH4)
1616         ((CPUSH4State*)cpu_env)->gregs[1] = host_pipe[1];
1617         return host_pipe[0];
1618 #elif defined(TARGET_SPARC)
1619         ((CPUSPARCState*)cpu_env)->regwptr[1] = host_pipe[1];
1620         return host_pipe[0];
1621 #endif
1622     }
1623 
1624     if (put_user_s32(host_pipe[0], pipedes)
1625         || put_user_s32(host_pipe[1], pipedes + sizeof(host_pipe[0])))
1626         return -TARGET_EFAULT;
1627     return get_errno(ret);
1628 }
1629 
1630 static inline abi_long target_to_host_ip_mreq(struct ip_mreqn *mreqn,
1631                                               abi_ulong target_addr,
1632                                               socklen_t len)
1633 {
1634     struct target_ip_mreqn *target_smreqn;
1635 
1636     target_smreqn = lock_user(VERIFY_READ, target_addr, len, 1);
1637     if (!target_smreqn)
1638         return -TARGET_EFAULT;
1639     mreqn->imr_multiaddr.s_addr = target_smreqn->imr_multiaddr.s_addr;
1640     mreqn->imr_address.s_addr = target_smreqn->imr_address.s_addr;
1641     if (len == sizeof(struct target_ip_mreqn))
1642         mreqn->imr_ifindex = tswapal(target_smreqn->imr_ifindex);
1643     unlock_user(target_smreqn, target_addr, 0);
1644 
1645     return 0;
1646 }
1647 
1648 static inline abi_long target_to_host_sockaddr(int fd, struct sockaddr *addr,
1649                                                abi_ulong target_addr,
1650                                                socklen_t len)
1651 {
1652     const socklen_t unix_maxlen = sizeof (struct sockaddr_un);
1653     sa_family_t sa_family;
1654     struct target_sockaddr *target_saddr;
1655 
1656     if (fd_trans_target_to_host_addr(fd)) {
1657         return fd_trans_target_to_host_addr(fd)(addr, target_addr, len);
1658     }
1659 
1660     target_saddr = lock_user(VERIFY_READ, target_addr, len, 1);
1661     if (!target_saddr)
1662         return -TARGET_EFAULT;
1663 
1664     sa_family = tswap16(target_saddr->sa_family);
1665 
1666     /* Oops. The caller might send a incomplete sun_path; sun_path
1667      * must be terminated by \0 (see the manual page), but
1668      * unfortunately it is quite common to specify sockaddr_un
1669      * length as "strlen(x->sun_path)" while it should be
1670      * "strlen(...) + 1". We'll fix that here if needed.
1671      * Linux kernel has a similar feature.
1672      */
1673 
1674     if (sa_family == AF_UNIX) {
1675         if (len < unix_maxlen && len > 0) {
1676             char *cp = (char*)target_saddr;
1677 
1678             if ( cp[len-1] && !cp[len] )
1679                 len++;
1680         }
1681         if (len > unix_maxlen)
1682             len = unix_maxlen;
1683     }
1684 
1685     memcpy(addr, target_saddr, len);
1686     addr->sa_family = sa_family;
1687     if (sa_family == AF_NETLINK) {
1688         struct sockaddr_nl *nladdr;
1689 
1690         nladdr = (struct sockaddr_nl *)addr;
1691         nladdr->nl_pid = tswap32(nladdr->nl_pid);
1692         nladdr->nl_groups = tswap32(nladdr->nl_groups);
1693     } else if (sa_family == AF_PACKET) {
1694 	struct target_sockaddr_ll *lladdr;
1695 
1696 	lladdr = (struct target_sockaddr_ll *)addr;
1697 	lladdr->sll_ifindex = tswap32(lladdr->sll_ifindex);
1698 	lladdr->sll_hatype = tswap16(lladdr->sll_hatype);
1699     }
1700     unlock_user(target_saddr, target_addr, 0);
1701 
1702     return 0;
1703 }
1704 
1705 static inline abi_long host_to_target_sockaddr(abi_ulong target_addr,
1706                                                struct sockaddr *addr,
1707                                                socklen_t len)
1708 {
1709     struct target_sockaddr *target_saddr;
1710 
1711     if (len == 0) {
1712         return 0;
1713     }
1714     assert(addr);
1715 
1716     target_saddr = lock_user(VERIFY_WRITE, target_addr, len, 0);
1717     if (!target_saddr)
1718         return -TARGET_EFAULT;
1719     memcpy(target_saddr, addr, len);
1720     if (len >= offsetof(struct target_sockaddr, sa_family) +
1721         sizeof(target_saddr->sa_family)) {
1722         target_saddr->sa_family = tswap16(addr->sa_family);
1723     }
1724     if (addr->sa_family == AF_NETLINK &&
1725         len >= sizeof(struct target_sockaddr_nl)) {
1726         struct target_sockaddr_nl *target_nl =
1727                (struct target_sockaddr_nl *)target_saddr;
1728         target_nl->nl_pid = tswap32(target_nl->nl_pid);
1729         target_nl->nl_groups = tswap32(target_nl->nl_groups);
1730     } else if (addr->sa_family == AF_PACKET) {
1731         struct sockaddr_ll *target_ll = (struct sockaddr_ll *)target_saddr;
1732         target_ll->sll_ifindex = tswap32(target_ll->sll_ifindex);
1733         target_ll->sll_hatype = tswap16(target_ll->sll_hatype);
1734     } else if (addr->sa_family == AF_INET6 &&
1735                len >= sizeof(struct target_sockaddr_in6)) {
1736         struct target_sockaddr_in6 *target_in6 =
1737                (struct target_sockaddr_in6 *)target_saddr;
1738         target_in6->sin6_scope_id = tswap16(target_in6->sin6_scope_id);
1739     }
1740     unlock_user(target_saddr, target_addr, len);
1741 
1742     return 0;
1743 }
1744 
1745 static inline abi_long target_to_host_cmsg(struct msghdr *msgh,
1746                                            struct target_msghdr *target_msgh)
1747 {
1748     struct cmsghdr *cmsg = CMSG_FIRSTHDR(msgh);
1749     abi_long msg_controllen;
1750     abi_ulong target_cmsg_addr;
1751     struct target_cmsghdr *target_cmsg, *target_cmsg_start;
1752     socklen_t space = 0;
1753 
1754     msg_controllen = tswapal(target_msgh->msg_controllen);
1755     if (msg_controllen < sizeof (struct target_cmsghdr))
1756         goto the_end;
1757     target_cmsg_addr = tswapal(target_msgh->msg_control);
1758     target_cmsg = lock_user(VERIFY_READ, target_cmsg_addr, msg_controllen, 1);
1759     target_cmsg_start = target_cmsg;
1760     if (!target_cmsg)
1761         return -TARGET_EFAULT;
1762 
1763     while (cmsg && target_cmsg) {
1764         void *data = CMSG_DATA(cmsg);
1765         void *target_data = TARGET_CMSG_DATA(target_cmsg);
1766 
1767         int len = tswapal(target_cmsg->cmsg_len)
1768             - sizeof(struct target_cmsghdr);
1769 
1770         space += CMSG_SPACE(len);
1771         if (space > msgh->msg_controllen) {
1772             space -= CMSG_SPACE(len);
1773             /* This is a QEMU bug, since we allocated the payload
1774              * area ourselves (unlike overflow in host-to-target
1775              * conversion, which is just the guest giving us a buffer
1776              * that's too small). It can't happen for the payload types
1777              * we currently support; if it becomes an issue in future
1778              * we would need to improve our allocation strategy to
1779              * something more intelligent than "twice the size of the
1780              * target buffer we're reading from".
1781              */
1782             qemu_log_mask(LOG_UNIMP,
1783                           ("Unsupported ancillary data %d/%d: "
1784                            "unhandled msg size\n"),
1785                           tswap32(target_cmsg->cmsg_level),
1786                           tswap32(target_cmsg->cmsg_type));
1787             break;
1788         }
1789 
1790         if (tswap32(target_cmsg->cmsg_level) == TARGET_SOL_SOCKET) {
1791             cmsg->cmsg_level = SOL_SOCKET;
1792         } else {
1793             cmsg->cmsg_level = tswap32(target_cmsg->cmsg_level);
1794         }
1795         cmsg->cmsg_type = tswap32(target_cmsg->cmsg_type);
1796         cmsg->cmsg_len = CMSG_LEN(len);
1797 
1798         if (cmsg->cmsg_level == SOL_SOCKET && cmsg->cmsg_type == SCM_RIGHTS) {
1799             int *fd = (int *)data;
1800             int *target_fd = (int *)target_data;
1801             int i, numfds = len / sizeof(int);
1802 
1803             for (i = 0; i < numfds; i++) {
1804                 __get_user(fd[i], target_fd + i);
1805             }
1806         } else if (cmsg->cmsg_level == SOL_SOCKET
1807                &&  cmsg->cmsg_type == SCM_CREDENTIALS) {
1808             struct ucred *cred = (struct ucred *)data;
1809             struct target_ucred *target_cred =
1810                 (struct target_ucred *)target_data;
1811 
1812             __get_user(cred->pid, &target_cred->pid);
1813             __get_user(cred->uid, &target_cred->uid);
1814             __get_user(cred->gid, &target_cred->gid);
1815         } else {
1816             qemu_log_mask(LOG_UNIMP, "Unsupported ancillary data: %d/%d\n",
1817                           cmsg->cmsg_level, cmsg->cmsg_type);
1818             memcpy(data, target_data, len);
1819         }
1820 
1821         cmsg = CMSG_NXTHDR(msgh, cmsg);
1822         target_cmsg = TARGET_CMSG_NXTHDR(target_msgh, target_cmsg,
1823                                          target_cmsg_start);
1824     }
1825     unlock_user(target_cmsg, target_cmsg_addr, 0);
1826  the_end:
1827     msgh->msg_controllen = space;
1828     return 0;
1829 }
1830 
1831 static inline abi_long host_to_target_cmsg(struct target_msghdr *target_msgh,
1832                                            struct msghdr *msgh)
1833 {
1834     struct cmsghdr *cmsg = CMSG_FIRSTHDR(msgh);
1835     abi_long msg_controllen;
1836     abi_ulong target_cmsg_addr;
1837     struct target_cmsghdr *target_cmsg, *target_cmsg_start;
1838     socklen_t space = 0;
1839 
1840     msg_controllen = tswapal(target_msgh->msg_controllen);
1841     if (msg_controllen < sizeof (struct target_cmsghdr))
1842         goto the_end;
1843     target_cmsg_addr = tswapal(target_msgh->msg_control);
1844     target_cmsg = lock_user(VERIFY_WRITE, target_cmsg_addr, msg_controllen, 0);
1845     target_cmsg_start = target_cmsg;
1846     if (!target_cmsg)
1847         return -TARGET_EFAULT;
1848 
1849     while (cmsg && target_cmsg) {
1850         void *data = CMSG_DATA(cmsg);
1851         void *target_data = TARGET_CMSG_DATA(target_cmsg);
1852 
1853         int len = cmsg->cmsg_len - sizeof(struct cmsghdr);
1854         int tgt_len, tgt_space;
1855 
1856         /* We never copy a half-header but may copy half-data;
1857          * this is Linux's behaviour in put_cmsg(). Note that
1858          * truncation here is a guest problem (which we report
1859          * to the guest via the CTRUNC bit), unlike truncation
1860          * in target_to_host_cmsg, which is a QEMU bug.
1861          */
1862         if (msg_controllen < sizeof(struct target_cmsghdr)) {
1863             target_msgh->msg_flags |= tswap32(MSG_CTRUNC);
1864             break;
1865         }
1866 
1867         if (cmsg->cmsg_level == SOL_SOCKET) {
1868             target_cmsg->cmsg_level = tswap32(TARGET_SOL_SOCKET);
1869         } else {
1870             target_cmsg->cmsg_level = tswap32(cmsg->cmsg_level);
1871         }
1872         target_cmsg->cmsg_type = tswap32(cmsg->cmsg_type);
1873 
1874         /* Payload types which need a different size of payload on
1875          * the target must adjust tgt_len here.
1876          */
1877         tgt_len = len;
1878         switch (cmsg->cmsg_level) {
1879         case SOL_SOCKET:
1880             switch (cmsg->cmsg_type) {
1881             case SO_TIMESTAMP:
1882                 tgt_len = sizeof(struct target_timeval);
1883                 break;
1884             default:
1885                 break;
1886             }
1887             break;
1888         default:
1889             break;
1890         }
1891 
1892         if (msg_controllen < TARGET_CMSG_LEN(tgt_len)) {
1893             target_msgh->msg_flags |= tswap32(MSG_CTRUNC);
1894             tgt_len = msg_controllen - sizeof(struct target_cmsghdr);
1895         }
1896 
1897         /* We must now copy-and-convert len bytes of payload
1898          * into tgt_len bytes of destination space. Bear in mind
1899          * that in both source and destination we may be dealing
1900          * with a truncated value!
1901          */
1902         switch (cmsg->cmsg_level) {
1903         case SOL_SOCKET:
1904             switch (cmsg->cmsg_type) {
1905             case SCM_RIGHTS:
1906             {
1907                 int *fd = (int *)data;
1908                 int *target_fd = (int *)target_data;
1909                 int i, numfds = tgt_len / sizeof(int);
1910 
1911                 for (i = 0; i < numfds; i++) {
1912                     __put_user(fd[i], target_fd + i);
1913                 }
1914                 break;
1915             }
1916             case SO_TIMESTAMP:
1917             {
1918                 struct timeval *tv = (struct timeval *)data;
1919                 struct target_timeval *target_tv =
1920                     (struct target_timeval *)target_data;
1921 
1922                 if (len != sizeof(struct timeval) ||
1923                     tgt_len != sizeof(struct target_timeval)) {
1924                     goto unimplemented;
1925                 }
1926 
1927                 /* copy struct timeval to target */
1928                 __put_user(tv->tv_sec, &target_tv->tv_sec);
1929                 __put_user(tv->tv_usec, &target_tv->tv_usec);
1930                 break;
1931             }
1932             case SCM_CREDENTIALS:
1933             {
1934                 struct ucred *cred = (struct ucred *)data;
1935                 struct target_ucred *target_cred =
1936                     (struct target_ucred *)target_data;
1937 
1938                 __put_user(cred->pid, &target_cred->pid);
1939                 __put_user(cred->uid, &target_cred->uid);
1940                 __put_user(cred->gid, &target_cred->gid);
1941                 break;
1942             }
1943             default:
1944                 goto unimplemented;
1945             }
1946             break;
1947 
1948         case SOL_IP:
1949             switch (cmsg->cmsg_type) {
1950             case IP_TTL:
1951             {
1952                 uint32_t *v = (uint32_t *)data;
1953                 uint32_t *t_int = (uint32_t *)target_data;
1954 
1955                 if (len != sizeof(uint32_t) ||
1956                     tgt_len != sizeof(uint32_t)) {
1957                     goto unimplemented;
1958                 }
1959                 __put_user(*v, t_int);
1960                 break;
1961             }
1962             case IP_RECVERR:
1963             {
1964                 struct errhdr_t {
1965                    struct sock_extended_err ee;
1966                    struct sockaddr_in offender;
1967                 };
1968                 struct errhdr_t *errh = (struct errhdr_t *)data;
1969                 struct errhdr_t *target_errh =
1970                     (struct errhdr_t *)target_data;
1971 
1972                 if (len != sizeof(struct errhdr_t) ||
1973                     tgt_len != sizeof(struct errhdr_t)) {
1974                     goto unimplemented;
1975                 }
1976                 __put_user(errh->ee.ee_errno, &target_errh->ee.ee_errno);
1977                 __put_user(errh->ee.ee_origin, &target_errh->ee.ee_origin);
1978                 __put_user(errh->ee.ee_type,  &target_errh->ee.ee_type);
1979                 __put_user(errh->ee.ee_code, &target_errh->ee.ee_code);
1980                 __put_user(errh->ee.ee_pad, &target_errh->ee.ee_pad);
1981                 __put_user(errh->ee.ee_info, &target_errh->ee.ee_info);
1982                 __put_user(errh->ee.ee_data, &target_errh->ee.ee_data);
1983                 host_to_target_sockaddr((unsigned long) &target_errh->offender,
1984                     (void *) &errh->offender, sizeof(errh->offender));
1985                 break;
1986             }
1987             default:
1988                 goto unimplemented;
1989             }
1990             break;
1991 
1992         case SOL_IPV6:
1993             switch (cmsg->cmsg_type) {
1994             case IPV6_HOPLIMIT:
1995             {
1996                 uint32_t *v = (uint32_t *)data;
1997                 uint32_t *t_int = (uint32_t *)target_data;
1998 
1999                 if (len != sizeof(uint32_t) ||
2000                     tgt_len != sizeof(uint32_t)) {
2001                     goto unimplemented;
2002                 }
2003                 __put_user(*v, t_int);
2004                 break;
2005             }
2006             case IPV6_RECVERR:
2007             {
2008                 struct errhdr6_t {
2009                    struct sock_extended_err ee;
2010                    struct sockaddr_in6 offender;
2011                 };
2012                 struct errhdr6_t *errh = (struct errhdr6_t *)data;
2013                 struct errhdr6_t *target_errh =
2014                     (struct errhdr6_t *)target_data;
2015 
2016                 if (len != sizeof(struct errhdr6_t) ||
2017                     tgt_len != sizeof(struct errhdr6_t)) {
2018                     goto unimplemented;
2019                 }
2020                 __put_user(errh->ee.ee_errno, &target_errh->ee.ee_errno);
2021                 __put_user(errh->ee.ee_origin, &target_errh->ee.ee_origin);
2022                 __put_user(errh->ee.ee_type,  &target_errh->ee.ee_type);
2023                 __put_user(errh->ee.ee_code, &target_errh->ee.ee_code);
2024                 __put_user(errh->ee.ee_pad, &target_errh->ee.ee_pad);
2025                 __put_user(errh->ee.ee_info, &target_errh->ee.ee_info);
2026                 __put_user(errh->ee.ee_data, &target_errh->ee.ee_data);
2027                 host_to_target_sockaddr((unsigned long) &target_errh->offender,
2028                     (void *) &errh->offender, sizeof(errh->offender));
2029                 break;
2030             }
2031             default:
2032                 goto unimplemented;
2033             }
2034             break;
2035 
2036         default:
2037         unimplemented:
2038             qemu_log_mask(LOG_UNIMP, "Unsupported ancillary data: %d/%d\n",
2039                           cmsg->cmsg_level, cmsg->cmsg_type);
2040             memcpy(target_data, data, MIN(len, tgt_len));
2041             if (tgt_len > len) {
2042                 memset(target_data + len, 0, tgt_len - len);
2043             }
2044         }
2045 
2046         target_cmsg->cmsg_len = tswapal(TARGET_CMSG_LEN(tgt_len));
2047         tgt_space = TARGET_CMSG_SPACE(tgt_len);
2048         if (msg_controllen < tgt_space) {
2049             tgt_space = msg_controllen;
2050         }
2051         msg_controllen -= tgt_space;
2052         space += tgt_space;
2053         cmsg = CMSG_NXTHDR(msgh, cmsg);
2054         target_cmsg = TARGET_CMSG_NXTHDR(target_msgh, target_cmsg,
2055                                          target_cmsg_start);
2056     }
2057     unlock_user(target_cmsg, target_cmsg_addr, space);
2058  the_end:
2059     target_msgh->msg_controllen = tswapal(space);
2060     return 0;
2061 }
2062 
2063 /* do_setsockopt() Must return target values and target errnos. */
2064 static abi_long do_setsockopt(int sockfd, int level, int optname,
2065                               abi_ulong optval_addr, socklen_t optlen)
2066 {
2067     abi_long ret;
2068     int val;
2069     struct ip_mreqn *ip_mreq;
2070     struct ip_mreq_source *ip_mreq_source;
2071 
2072     switch(level) {
2073     case SOL_TCP:
2074     case SOL_UDP:
2075         /* TCP and UDP options all take an 'int' value.  */
2076         if (optlen < sizeof(uint32_t))
2077             return -TARGET_EINVAL;
2078 
2079         if (get_user_u32(val, optval_addr))
2080             return -TARGET_EFAULT;
2081         ret = get_errno(setsockopt(sockfd, level, optname, &val, sizeof(val)));
2082         break;
2083     case SOL_IP:
2084         switch(optname) {
2085         case IP_TOS:
2086         case IP_TTL:
2087         case IP_HDRINCL:
2088         case IP_ROUTER_ALERT:
2089         case IP_RECVOPTS:
2090         case IP_RETOPTS:
2091         case IP_PKTINFO:
2092         case IP_MTU_DISCOVER:
2093         case IP_RECVERR:
2094         case IP_RECVTTL:
2095         case IP_RECVTOS:
2096 #ifdef IP_FREEBIND
2097         case IP_FREEBIND:
2098 #endif
2099         case IP_MULTICAST_TTL:
2100         case IP_MULTICAST_LOOP:
2101             val = 0;
2102             if (optlen >= sizeof(uint32_t)) {
2103                 if (get_user_u32(val, optval_addr))
2104                     return -TARGET_EFAULT;
2105             } else if (optlen >= 1) {
2106                 if (get_user_u8(val, optval_addr))
2107                     return -TARGET_EFAULT;
2108             }
2109             ret = get_errno(setsockopt(sockfd, level, optname, &val, sizeof(val)));
2110             break;
2111         case IP_ADD_MEMBERSHIP:
2112         case IP_DROP_MEMBERSHIP:
2113             if (optlen < sizeof (struct target_ip_mreq) ||
2114                 optlen > sizeof (struct target_ip_mreqn))
2115                 return -TARGET_EINVAL;
2116 
2117             ip_mreq = (struct ip_mreqn *) alloca(optlen);
2118             target_to_host_ip_mreq(ip_mreq, optval_addr, optlen);
2119             ret = get_errno(setsockopt(sockfd, level, optname, ip_mreq, optlen));
2120             break;
2121 
2122         case IP_BLOCK_SOURCE:
2123         case IP_UNBLOCK_SOURCE:
2124         case IP_ADD_SOURCE_MEMBERSHIP:
2125         case IP_DROP_SOURCE_MEMBERSHIP:
2126             if (optlen != sizeof (struct target_ip_mreq_source))
2127                 return -TARGET_EINVAL;
2128 
2129             ip_mreq_source = lock_user(VERIFY_READ, optval_addr, optlen, 1);
2130             if (!ip_mreq_source) {
2131                 return -TARGET_EFAULT;
2132             }
2133             ret = get_errno(setsockopt(sockfd, level, optname, ip_mreq_source, optlen));
2134             unlock_user (ip_mreq_source, optval_addr, 0);
2135             break;
2136 
2137         default:
2138             goto unimplemented;
2139         }
2140         break;
2141     case SOL_IPV6:
2142         switch (optname) {
2143         case IPV6_MTU_DISCOVER:
2144         case IPV6_MTU:
2145         case IPV6_V6ONLY:
2146         case IPV6_RECVPKTINFO:
2147         case IPV6_UNICAST_HOPS:
2148         case IPV6_MULTICAST_HOPS:
2149         case IPV6_MULTICAST_LOOP:
2150         case IPV6_RECVERR:
2151         case IPV6_RECVHOPLIMIT:
2152         case IPV6_2292HOPLIMIT:
2153         case IPV6_CHECKSUM:
2154         case IPV6_ADDRFORM:
2155         case IPV6_2292PKTINFO:
2156         case IPV6_RECVTCLASS:
2157         case IPV6_RECVRTHDR:
2158         case IPV6_2292RTHDR:
2159         case IPV6_RECVHOPOPTS:
2160         case IPV6_2292HOPOPTS:
2161         case IPV6_RECVDSTOPTS:
2162         case IPV6_2292DSTOPTS:
2163         case IPV6_TCLASS:
2164         case IPV6_ADDR_PREFERENCES:
2165 #ifdef IPV6_RECVPATHMTU
2166         case IPV6_RECVPATHMTU:
2167 #endif
2168 #ifdef IPV6_TRANSPARENT
2169         case IPV6_TRANSPARENT:
2170 #endif
2171 #ifdef IPV6_FREEBIND
2172         case IPV6_FREEBIND:
2173 #endif
2174 #ifdef IPV6_RECVORIGDSTADDR
2175         case IPV6_RECVORIGDSTADDR:
2176 #endif
2177             val = 0;
2178             if (optlen < sizeof(uint32_t)) {
2179                 return -TARGET_EINVAL;
2180             }
2181             if (get_user_u32(val, optval_addr)) {
2182                 return -TARGET_EFAULT;
2183             }
2184             ret = get_errno(setsockopt(sockfd, level, optname,
2185                                        &val, sizeof(val)));
2186             break;
2187         case IPV6_PKTINFO:
2188         {
2189             struct in6_pktinfo pki;
2190 
2191             if (optlen < sizeof(pki)) {
2192                 return -TARGET_EINVAL;
2193             }
2194 
2195             if (copy_from_user(&pki, optval_addr, sizeof(pki))) {
2196                 return -TARGET_EFAULT;
2197             }
2198 
2199             pki.ipi6_ifindex = tswap32(pki.ipi6_ifindex);
2200 
2201             ret = get_errno(setsockopt(sockfd, level, optname,
2202                                        &pki, sizeof(pki)));
2203             break;
2204         }
2205         case IPV6_ADD_MEMBERSHIP:
2206         case IPV6_DROP_MEMBERSHIP:
2207         {
2208             struct ipv6_mreq ipv6mreq;
2209 
2210             if (optlen < sizeof(ipv6mreq)) {
2211                 return -TARGET_EINVAL;
2212             }
2213 
2214             if (copy_from_user(&ipv6mreq, optval_addr, sizeof(ipv6mreq))) {
2215                 return -TARGET_EFAULT;
2216             }
2217 
2218             ipv6mreq.ipv6mr_interface = tswap32(ipv6mreq.ipv6mr_interface);
2219 
2220             ret = get_errno(setsockopt(sockfd, level, optname,
2221                                        &ipv6mreq, sizeof(ipv6mreq)));
2222             break;
2223         }
2224         default:
2225             goto unimplemented;
2226         }
2227         break;
2228     case SOL_ICMPV6:
2229         switch (optname) {
2230         case ICMPV6_FILTER:
2231         {
2232             struct icmp6_filter icmp6f;
2233 
2234             if (optlen > sizeof(icmp6f)) {
2235                 optlen = sizeof(icmp6f);
2236             }
2237 
2238             if (copy_from_user(&icmp6f, optval_addr, optlen)) {
2239                 return -TARGET_EFAULT;
2240             }
2241 
2242             for (val = 0; val < 8; val++) {
2243                 icmp6f.data[val] = tswap32(icmp6f.data[val]);
2244             }
2245 
2246             ret = get_errno(setsockopt(sockfd, level, optname,
2247                                        &icmp6f, optlen));
2248             break;
2249         }
2250         default:
2251             goto unimplemented;
2252         }
2253         break;
2254     case SOL_RAW:
2255         switch (optname) {
2256         case ICMP_FILTER:
2257         case IPV6_CHECKSUM:
2258             /* those take an u32 value */
2259             if (optlen < sizeof(uint32_t)) {
2260                 return -TARGET_EINVAL;
2261             }
2262 
2263             if (get_user_u32(val, optval_addr)) {
2264                 return -TARGET_EFAULT;
2265             }
2266             ret = get_errno(setsockopt(sockfd, level, optname,
2267                                        &val, sizeof(val)));
2268             break;
2269 
2270         default:
2271             goto unimplemented;
2272         }
2273         break;
2274 #if defined(SOL_ALG) && defined(ALG_SET_KEY) && defined(ALG_SET_AEAD_AUTHSIZE)
2275     case SOL_ALG:
2276         switch (optname) {
2277         case ALG_SET_KEY:
2278         {
2279             char *alg_key = g_malloc(optlen);
2280 
2281             if (!alg_key) {
2282                 return -TARGET_ENOMEM;
2283             }
2284             if (copy_from_user(alg_key, optval_addr, optlen)) {
2285                 g_free(alg_key);
2286                 return -TARGET_EFAULT;
2287             }
2288             ret = get_errno(setsockopt(sockfd, level, optname,
2289                                        alg_key, optlen));
2290             g_free(alg_key);
2291             break;
2292         }
2293         case ALG_SET_AEAD_AUTHSIZE:
2294         {
2295             ret = get_errno(setsockopt(sockfd, level, optname,
2296                                        NULL, optlen));
2297             break;
2298         }
2299         default:
2300             goto unimplemented;
2301         }
2302         break;
2303 #endif
2304     case TARGET_SOL_SOCKET:
2305         switch (optname) {
2306         case TARGET_SO_RCVTIMEO:
2307         {
2308                 struct timeval tv;
2309 
2310                 optname = SO_RCVTIMEO;
2311 
2312 set_timeout:
2313                 if (optlen != sizeof(struct target_timeval)) {
2314                     return -TARGET_EINVAL;
2315                 }
2316 
2317                 if (copy_from_user_timeval(&tv, optval_addr)) {
2318                     return -TARGET_EFAULT;
2319                 }
2320 
2321                 ret = get_errno(setsockopt(sockfd, SOL_SOCKET, optname,
2322                                 &tv, sizeof(tv)));
2323                 return ret;
2324         }
2325         case TARGET_SO_SNDTIMEO:
2326                 optname = SO_SNDTIMEO;
2327                 goto set_timeout;
2328         case TARGET_SO_ATTACH_FILTER:
2329         {
2330                 struct target_sock_fprog *tfprog;
2331                 struct target_sock_filter *tfilter;
2332                 struct sock_fprog fprog;
2333                 struct sock_filter *filter;
2334                 int i;
2335 
2336                 if (optlen != sizeof(*tfprog)) {
2337                     return -TARGET_EINVAL;
2338                 }
2339                 if (!lock_user_struct(VERIFY_READ, tfprog, optval_addr, 0)) {
2340                     return -TARGET_EFAULT;
2341                 }
2342                 if (!lock_user_struct(VERIFY_READ, tfilter,
2343                                       tswapal(tfprog->filter), 0)) {
2344                     unlock_user_struct(tfprog, optval_addr, 1);
2345                     return -TARGET_EFAULT;
2346                 }
2347 
2348                 fprog.len = tswap16(tfprog->len);
2349                 filter = g_try_new(struct sock_filter, fprog.len);
2350                 if (filter == NULL) {
2351                     unlock_user_struct(tfilter, tfprog->filter, 1);
2352                     unlock_user_struct(tfprog, optval_addr, 1);
2353                     return -TARGET_ENOMEM;
2354                 }
2355                 for (i = 0; i < fprog.len; i++) {
2356                     filter[i].code = tswap16(tfilter[i].code);
2357                     filter[i].jt = tfilter[i].jt;
2358                     filter[i].jf = tfilter[i].jf;
2359                     filter[i].k = tswap32(tfilter[i].k);
2360                 }
2361                 fprog.filter = filter;
2362 
2363                 ret = get_errno(setsockopt(sockfd, SOL_SOCKET,
2364                                 SO_ATTACH_FILTER, &fprog, sizeof(fprog)));
2365                 g_free(filter);
2366 
2367                 unlock_user_struct(tfilter, tfprog->filter, 1);
2368                 unlock_user_struct(tfprog, optval_addr, 1);
2369                 return ret;
2370         }
2371 	case TARGET_SO_BINDTODEVICE:
2372 	{
2373 		char *dev_ifname, *addr_ifname;
2374 
2375 		if (optlen > IFNAMSIZ - 1) {
2376 		    optlen = IFNAMSIZ - 1;
2377 		}
2378 		dev_ifname = lock_user(VERIFY_READ, optval_addr, optlen, 1);
2379 		if (!dev_ifname) {
2380 		    return -TARGET_EFAULT;
2381 		}
2382 		optname = SO_BINDTODEVICE;
2383 		addr_ifname = alloca(IFNAMSIZ);
2384 		memcpy(addr_ifname, dev_ifname, optlen);
2385 		addr_ifname[optlen] = 0;
2386 		ret = get_errno(setsockopt(sockfd, SOL_SOCKET, optname,
2387                                            addr_ifname, optlen));
2388 		unlock_user (dev_ifname, optval_addr, 0);
2389 		return ret;
2390 	}
2391         case TARGET_SO_LINGER:
2392         {
2393                 struct linger lg;
2394                 struct target_linger *tlg;
2395 
2396                 if (optlen != sizeof(struct target_linger)) {
2397                     return -TARGET_EINVAL;
2398                 }
2399                 if (!lock_user_struct(VERIFY_READ, tlg, optval_addr, 1)) {
2400                     return -TARGET_EFAULT;
2401                 }
2402                 __get_user(lg.l_onoff, &tlg->l_onoff);
2403                 __get_user(lg.l_linger, &tlg->l_linger);
2404                 ret = get_errno(setsockopt(sockfd, SOL_SOCKET, SO_LINGER,
2405                                 &lg, sizeof(lg)));
2406                 unlock_user_struct(tlg, optval_addr, 0);
2407                 return ret;
2408         }
2409             /* Options with 'int' argument.  */
2410         case TARGET_SO_DEBUG:
2411 		optname = SO_DEBUG;
2412 		break;
2413         case TARGET_SO_REUSEADDR:
2414 		optname = SO_REUSEADDR;
2415 		break;
2416 #ifdef SO_REUSEPORT
2417         case TARGET_SO_REUSEPORT:
2418                 optname = SO_REUSEPORT;
2419                 break;
2420 #endif
2421         case TARGET_SO_TYPE:
2422 		optname = SO_TYPE;
2423 		break;
2424         case TARGET_SO_ERROR:
2425 		optname = SO_ERROR;
2426 		break;
2427         case TARGET_SO_DONTROUTE:
2428 		optname = SO_DONTROUTE;
2429 		break;
2430         case TARGET_SO_BROADCAST:
2431 		optname = SO_BROADCAST;
2432 		break;
2433         case TARGET_SO_SNDBUF:
2434 		optname = SO_SNDBUF;
2435 		break;
2436         case TARGET_SO_SNDBUFFORCE:
2437                 optname = SO_SNDBUFFORCE;
2438                 break;
2439         case TARGET_SO_RCVBUF:
2440 		optname = SO_RCVBUF;
2441 		break;
2442         case TARGET_SO_RCVBUFFORCE:
2443                 optname = SO_RCVBUFFORCE;
2444                 break;
2445         case TARGET_SO_KEEPALIVE:
2446 		optname = SO_KEEPALIVE;
2447 		break;
2448         case TARGET_SO_OOBINLINE:
2449 		optname = SO_OOBINLINE;
2450 		break;
2451         case TARGET_SO_NO_CHECK:
2452 		optname = SO_NO_CHECK;
2453 		break;
2454         case TARGET_SO_PRIORITY:
2455 		optname = SO_PRIORITY;
2456 		break;
2457 #ifdef SO_BSDCOMPAT
2458         case TARGET_SO_BSDCOMPAT:
2459 		optname = SO_BSDCOMPAT;
2460 		break;
2461 #endif
2462         case TARGET_SO_PASSCRED:
2463 		optname = SO_PASSCRED;
2464 		break;
2465         case TARGET_SO_PASSSEC:
2466                 optname = SO_PASSSEC;
2467                 break;
2468         case TARGET_SO_TIMESTAMP:
2469 		optname = SO_TIMESTAMP;
2470 		break;
2471         case TARGET_SO_RCVLOWAT:
2472 		optname = SO_RCVLOWAT;
2473 		break;
2474         default:
2475             goto unimplemented;
2476         }
2477 	if (optlen < sizeof(uint32_t))
2478             return -TARGET_EINVAL;
2479 
2480 	if (get_user_u32(val, optval_addr))
2481             return -TARGET_EFAULT;
2482 	ret = get_errno(setsockopt(sockfd, SOL_SOCKET, optname, &val, sizeof(val)));
2483         break;
2484 #ifdef SOL_NETLINK
2485     case SOL_NETLINK:
2486         switch (optname) {
2487         case NETLINK_PKTINFO:
2488         case NETLINK_ADD_MEMBERSHIP:
2489         case NETLINK_DROP_MEMBERSHIP:
2490         case NETLINK_BROADCAST_ERROR:
2491         case NETLINK_NO_ENOBUFS:
2492 #if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 2, 0)
2493         case NETLINK_LISTEN_ALL_NSID:
2494         case NETLINK_CAP_ACK:
2495 #endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(4, 2, 0) */
2496 #if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 12, 0)
2497         case NETLINK_EXT_ACK:
2498 #endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(4, 12, 0) */
2499 #if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 20, 0)
2500         case NETLINK_GET_STRICT_CHK:
2501 #endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(4, 12, 0) */
2502             break;
2503         default:
2504             goto unimplemented;
2505         }
2506         val = 0;
2507         if (optlen < sizeof(uint32_t)) {
2508             return -TARGET_EINVAL;
2509         }
2510         if (get_user_u32(val, optval_addr)) {
2511             return -TARGET_EFAULT;
2512         }
2513         ret = get_errno(setsockopt(sockfd, SOL_NETLINK, optname, &val,
2514                                    sizeof(val)));
2515         break;
2516 #endif /* SOL_NETLINK */
2517     default:
2518     unimplemented:
2519         qemu_log_mask(LOG_UNIMP, "Unsupported setsockopt level=%d optname=%d\n",
2520                       level, optname);
2521         ret = -TARGET_ENOPROTOOPT;
2522     }
2523     return ret;
2524 }
2525 
2526 /* do_getsockopt() Must return target values and target errnos. */
2527 static abi_long do_getsockopt(int sockfd, int level, int optname,
2528                               abi_ulong optval_addr, abi_ulong optlen)
2529 {
2530     abi_long ret;
2531     int len, val;
2532     socklen_t lv;
2533 
2534     switch(level) {
2535     case TARGET_SOL_SOCKET:
2536         level = SOL_SOCKET;
2537         switch (optname) {
2538         /* These don't just return a single integer */
2539         case TARGET_SO_PEERNAME:
2540             goto unimplemented;
2541         case TARGET_SO_RCVTIMEO: {
2542             struct timeval tv;
2543             socklen_t tvlen;
2544 
2545             optname = SO_RCVTIMEO;
2546 
2547 get_timeout:
2548             if (get_user_u32(len, optlen)) {
2549                 return -TARGET_EFAULT;
2550             }
2551             if (len < 0) {
2552                 return -TARGET_EINVAL;
2553             }
2554 
2555             tvlen = sizeof(tv);
2556             ret = get_errno(getsockopt(sockfd, level, optname,
2557                                        &tv, &tvlen));
2558             if (ret < 0) {
2559                 return ret;
2560             }
2561             if (len > sizeof(struct target_timeval)) {
2562                 len = sizeof(struct target_timeval);
2563             }
2564             if (copy_to_user_timeval(optval_addr, &tv)) {
2565                 return -TARGET_EFAULT;
2566             }
2567             if (put_user_u32(len, optlen)) {
2568                 return -TARGET_EFAULT;
2569             }
2570             break;
2571         }
2572         case TARGET_SO_SNDTIMEO:
2573             optname = SO_SNDTIMEO;
2574             goto get_timeout;
2575         case TARGET_SO_PEERCRED: {
2576             struct ucred cr;
2577             socklen_t crlen;
2578             struct target_ucred *tcr;
2579 
2580             if (get_user_u32(len, optlen)) {
2581                 return -TARGET_EFAULT;
2582             }
2583             if (len < 0) {
2584                 return -TARGET_EINVAL;
2585             }
2586 
2587             crlen = sizeof(cr);
2588             ret = get_errno(getsockopt(sockfd, level, SO_PEERCRED,
2589                                        &cr, &crlen));
2590             if (ret < 0) {
2591                 return ret;
2592             }
2593             if (len > crlen) {
2594                 len = crlen;
2595             }
2596             if (!lock_user_struct(VERIFY_WRITE, tcr, optval_addr, 0)) {
2597                 return -TARGET_EFAULT;
2598             }
2599             __put_user(cr.pid, &tcr->pid);
2600             __put_user(cr.uid, &tcr->uid);
2601             __put_user(cr.gid, &tcr->gid);
2602             unlock_user_struct(tcr, optval_addr, 1);
2603             if (put_user_u32(len, optlen)) {
2604                 return -TARGET_EFAULT;
2605             }
2606             break;
2607         }
2608         case TARGET_SO_PEERSEC: {
2609             char *name;
2610 
2611             if (get_user_u32(len, optlen)) {
2612                 return -TARGET_EFAULT;
2613             }
2614             if (len < 0) {
2615                 return -TARGET_EINVAL;
2616             }
2617             name = lock_user(VERIFY_WRITE, optval_addr, len, 0);
2618             if (!name) {
2619                 return -TARGET_EFAULT;
2620             }
2621             lv = len;
2622             ret = get_errno(getsockopt(sockfd, level, SO_PEERSEC,
2623                                        name, &lv));
2624             if (put_user_u32(lv, optlen)) {
2625                 ret = -TARGET_EFAULT;
2626             }
2627             unlock_user(name, optval_addr, lv);
2628             break;
2629         }
2630         case TARGET_SO_LINGER:
2631         {
2632             struct linger lg;
2633             socklen_t lglen;
2634             struct target_linger *tlg;
2635 
2636             if (get_user_u32(len, optlen)) {
2637                 return -TARGET_EFAULT;
2638             }
2639             if (len < 0) {
2640                 return -TARGET_EINVAL;
2641             }
2642 
2643             lglen = sizeof(lg);
2644             ret = get_errno(getsockopt(sockfd, level, SO_LINGER,
2645                                        &lg, &lglen));
2646             if (ret < 0) {
2647                 return ret;
2648             }
2649             if (len > lglen) {
2650                 len = lglen;
2651             }
2652             if (!lock_user_struct(VERIFY_WRITE, tlg, optval_addr, 0)) {
2653                 return -TARGET_EFAULT;
2654             }
2655             __put_user(lg.l_onoff, &tlg->l_onoff);
2656             __put_user(lg.l_linger, &tlg->l_linger);
2657             unlock_user_struct(tlg, optval_addr, 1);
2658             if (put_user_u32(len, optlen)) {
2659                 return -TARGET_EFAULT;
2660             }
2661             break;
2662         }
2663         /* Options with 'int' argument.  */
2664         case TARGET_SO_DEBUG:
2665             optname = SO_DEBUG;
2666             goto int_case;
2667         case TARGET_SO_REUSEADDR:
2668             optname = SO_REUSEADDR;
2669             goto int_case;
2670 #ifdef SO_REUSEPORT
2671         case TARGET_SO_REUSEPORT:
2672             optname = SO_REUSEPORT;
2673             goto int_case;
2674 #endif
2675         case TARGET_SO_TYPE:
2676             optname = SO_TYPE;
2677             goto int_case;
2678         case TARGET_SO_ERROR:
2679             optname = SO_ERROR;
2680             goto int_case;
2681         case TARGET_SO_DONTROUTE:
2682             optname = SO_DONTROUTE;
2683             goto int_case;
2684         case TARGET_SO_BROADCAST:
2685             optname = SO_BROADCAST;
2686             goto int_case;
2687         case TARGET_SO_SNDBUF:
2688             optname = SO_SNDBUF;
2689             goto int_case;
2690         case TARGET_SO_RCVBUF:
2691             optname = SO_RCVBUF;
2692             goto int_case;
2693         case TARGET_SO_KEEPALIVE:
2694             optname = SO_KEEPALIVE;
2695             goto int_case;
2696         case TARGET_SO_OOBINLINE:
2697             optname = SO_OOBINLINE;
2698             goto int_case;
2699         case TARGET_SO_NO_CHECK:
2700             optname = SO_NO_CHECK;
2701             goto int_case;
2702         case TARGET_SO_PRIORITY:
2703             optname = SO_PRIORITY;
2704             goto int_case;
2705 #ifdef SO_BSDCOMPAT
2706         case TARGET_SO_BSDCOMPAT:
2707             optname = SO_BSDCOMPAT;
2708             goto int_case;
2709 #endif
2710         case TARGET_SO_PASSCRED:
2711             optname = SO_PASSCRED;
2712             goto int_case;
2713         case TARGET_SO_TIMESTAMP:
2714             optname = SO_TIMESTAMP;
2715             goto int_case;
2716         case TARGET_SO_RCVLOWAT:
2717             optname = SO_RCVLOWAT;
2718             goto int_case;
2719         case TARGET_SO_ACCEPTCONN:
2720             optname = SO_ACCEPTCONN;
2721             goto int_case;
2722         case TARGET_SO_PROTOCOL:
2723             optname = SO_PROTOCOL;
2724             goto int_case;
2725         case TARGET_SO_DOMAIN:
2726             optname = SO_DOMAIN;
2727             goto int_case;
2728         default:
2729             goto int_case;
2730         }
2731         break;
2732     case SOL_TCP:
2733     case SOL_UDP:
2734         /* TCP and UDP options all take an 'int' value.  */
2735     int_case:
2736         if (get_user_u32(len, optlen))
2737             return -TARGET_EFAULT;
2738         if (len < 0)
2739             return -TARGET_EINVAL;
2740         lv = sizeof(lv);
2741         ret = get_errno(getsockopt(sockfd, level, optname, &val, &lv));
2742         if (ret < 0)
2743             return ret;
2744         if (optname == SO_TYPE) {
2745             val = host_to_target_sock_type(val);
2746         }
2747         if (len > lv)
2748             len = lv;
2749         if (len == 4) {
2750             if (put_user_u32(val, optval_addr))
2751                 return -TARGET_EFAULT;
2752         } else {
2753             if (put_user_u8(val, optval_addr))
2754                 return -TARGET_EFAULT;
2755         }
2756         if (put_user_u32(len, optlen))
2757             return -TARGET_EFAULT;
2758         break;
2759     case SOL_IP:
2760         switch(optname) {
2761         case IP_TOS:
2762         case IP_TTL:
2763         case IP_HDRINCL:
2764         case IP_ROUTER_ALERT:
2765         case IP_RECVOPTS:
2766         case IP_RETOPTS:
2767         case IP_PKTINFO:
2768         case IP_MTU_DISCOVER:
2769         case IP_RECVERR:
2770         case IP_RECVTOS:
2771 #ifdef IP_FREEBIND
2772         case IP_FREEBIND:
2773 #endif
2774         case IP_MULTICAST_TTL:
2775         case IP_MULTICAST_LOOP:
2776             if (get_user_u32(len, optlen))
2777                 return -TARGET_EFAULT;
2778             if (len < 0)
2779                 return -TARGET_EINVAL;
2780             lv = sizeof(lv);
2781             ret = get_errno(getsockopt(sockfd, level, optname, &val, &lv));
2782             if (ret < 0)
2783                 return ret;
2784             if (len < sizeof(int) && len > 0 && val >= 0 && val < 255) {
2785                 len = 1;
2786                 if (put_user_u32(len, optlen)
2787                     || put_user_u8(val, optval_addr))
2788                     return -TARGET_EFAULT;
2789             } else {
2790                 if (len > sizeof(int))
2791                     len = sizeof(int);
2792                 if (put_user_u32(len, optlen)
2793                     || put_user_u32(val, optval_addr))
2794                     return -TARGET_EFAULT;
2795             }
2796             break;
2797         default:
2798             ret = -TARGET_ENOPROTOOPT;
2799             break;
2800         }
2801         break;
2802     case SOL_IPV6:
2803         switch (optname) {
2804         case IPV6_MTU_DISCOVER:
2805         case IPV6_MTU:
2806         case IPV6_V6ONLY:
2807         case IPV6_RECVPKTINFO:
2808         case IPV6_UNICAST_HOPS:
2809         case IPV6_MULTICAST_HOPS:
2810         case IPV6_MULTICAST_LOOP:
2811         case IPV6_RECVERR:
2812         case IPV6_RECVHOPLIMIT:
2813         case IPV6_2292HOPLIMIT:
2814         case IPV6_CHECKSUM:
2815         case IPV6_ADDRFORM:
2816         case IPV6_2292PKTINFO:
2817         case IPV6_RECVTCLASS:
2818         case IPV6_RECVRTHDR:
2819         case IPV6_2292RTHDR:
2820         case IPV6_RECVHOPOPTS:
2821         case IPV6_2292HOPOPTS:
2822         case IPV6_RECVDSTOPTS:
2823         case IPV6_2292DSTOPTS:
2824         case IPV6_TCLASS:
2825         case IPV6_ADDR_PREFERENCES:
2826 #ifdef IPV6_RECVPATHMTU
2827         case IPV6_RECVPATHMTU:
2828 #endif
2829 #ifdef IPV6_TRANSPARENT
2830         case IPV6_TRANSPARENT:
2831 #endif
2832 #ifdef IPV6_FREEBIND
2833         case IPV6_FREEBIND:
2834 #endif
2835 #ifdef IPV6_RECVORIGDSTADDR
2836         case IPV6_RECVORIGDSTADDR:
2837 #endif
2838             if (get_user_u32(len, optlen))
2839                 return -TARGET_EFAULT;
2840             if (len < 0)
2841                 return -TARGET_EINVAL;
2842             lv = sizeof(lv);
2843             ret = get_errno(getsockopt(sockfd, level, optname, &val, &lv));
2844             if (ret < 0)
2845                 return ret;
2846             if (len < sizeof(int) && len > 0 && val >= 0 && val < 255) {
2847                 len = 1;
2848                 if (put_user_u32(len, optlen)
2849                     || put_user_u8(val, optval_addr))
2850                     return -TARGET_EFAULT;
2851             } else {
2852                 if (len > sizeof(int))
2853                     len = sizeof(int);
2854                 if (put_user_u32(len, optlen)
2855                     || put_user_u32(val, optval_addr))
2856                     return -TARGET_EFAULT;
2857             }
2858             break;
2859         default:
2860             ret = -TARGET_ENOPROTOOPT;
2861             break;
2862         }
2863         break;
2864 #ifdef SOL_NETLINK
2865     case SOL_NETLINK:
2866         switch (optname) {
2867         case NETLINK_PKTINFO:
2868         case NETLINK_BROADCAST_ERROR:
2869         case NETLINK_NO_ENOBUFS:
2870 #if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 2, 0)
2871         case NETLINK_LISTEN_ALL_NSID:
2872         case NETLINK_CAP_ACK:
2873 #endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(4, 2, 0) */
2874 #if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 12, 0)
2875         case NETLINK_EXT_ACK:
2876 #endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(4, 12, 0) */
2877 #if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 20, 0)
2878         case NETLINK_GET_STRICT_CHK:
2879 #endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(4, 12, 0) */
2880             if (get_user_u32(len, optlen)) {
2881                 return -TARGET_EFAULT;
2882             }
2883             if (len != sizeof(val)) {
2884                 return -TARGET_EINVAL;
2885             }
2886             lv = len;
2887             ret = get_errno(getsockopt(sockfd, level, optname, &val, &lv));
2888             if (ret < 0) {
2889                 return ret;
2890             }
2891             if (put_user_u32(lv, optlen)
2892                 || put_user_u32(val, optval_addr)) {
2893                 return -TARGET_EFAULT;
2894             }
2895             break;
2896 #if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 2, 0)
2897         case NETLINK_LIST_MEMBERSHIPS:
2898         {
2899             uint32_t *results;
2900             int i;
2901             if (get_user_u32(len, optlen)) {
2902                 return -TARGET_EFAULT;
2903             }
2904             if (len < 0) {
2905                 return -TARGET_EINVAL;
2906             }
2907             results = lock_user(VERIFY_WRITE, optval_addr, len, 1);
2908             if (!results && len > 0) {
2909                 return -TARGET_EFAULT;
2910             }
2911             lv = len;
2912             ret = get_errno(getsockopt(sockfd, level, optname, results, &lv));
2913             if (ret < 0) {
2914                 unlock_user(results, optval_addr, 0);
2915                 return ret;
2916             }
2917             /* swap host endianess to target endianess. */
2918             for (i = 0; i < (len / sizeof(uint32_t)); i++) {
2919                 results[i] = tswap32(results[i]);
2920             }
2921             if (put_user_u32(lv, optlen)) {
2922                 return -TARGET_EFAULT;
2923             }
2924             unlock_user(results, optval_addr, 0);
2925             break;
2926         }
2927 #endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(4, 2, 0) */
2928         default:
2929             goto unimplemented;
2930         }
2931         break;
2932 #endif /* SOL_NETLINK */
2933     default:
2934     unimplemented:
2935         qemu_log_mask(LOG_UNIMP,
2936                       "getsockopt level=%d optname=%d not yet supported\n",
2937                       level, optname);
2938         ret = -TARGET_EOPNOTSUPP;
2939         break;
2940     }
2941     return ret;
2942 }
2943 
2944 /* Convert target low/high pair representing file offset into the host
2945  * low/high pair. This function doesn't handle offsets bigger than 64 bits
2946  * as the kernel doesn't handle them either.
2947  */
2948 static void target_to_host_low_high(abi_ulong tlow,
2949                                     abi_ulong thigh,
2950                                     unsigned long *hlow,
2951                                     unsigned long *hhigh)
2952 {
2953     uint64_t off = tlow |
2954         ((unsigned long long)thigh << TARGET_LONG_BITS / 2) <<
2955         TARGET_LONG_BITS / 2;
2956 
2957     *hlow = off;
2958     *hhigh = (off >> HOST_LONG_BITS / 2) >> HOST_LONG_BITS / 2;
2959 }
2960 
2961 static struct iovec *lock_iovec(int type, abi_ulong target_addr,
2962                                 abi_ulong count, int copy)
2963 {
2964     struct target_iovec *target_vec;
2965     struct iovec *vec;
2966     abi_ulong total_len, max_len;
2967     int i;
2968     int err = 0;
2969     bool bad_address = false;
2970 
2971     if (count == 0) {
2972         errno = 0;
2973         return NULL;
2974     }
2975     if (count > IOV_MAX) {
2976         errno = EINVAL;
2977         return NULL;
2978     }
2979 
2980     vec = g_try_new0(struct iovec, count);
2981     if (vec == NULL) {
2982         errno = ENOMEM;
2983         return NULL;
2984     }
2985 
2986     target_vec = lock_user(VERIFY_READ, target_addr,
2987                            count * sizeof(struct target_iovec), 1);
2988     if (target_vec == NULL) {
2989         err = EFAULT;
2990         goto fail2;
2991     }
2992 
2993     /* ??? If host page size > target page size, this will result in a
2994        value larger than what we can actually support.  */
2995     max_len = 0x7fffffff & TARGET_PAGE_MASK;
2996     total_len = 0;
2997 
2998     for (i = 0; i < count; i++) {
2999         abi_ulong base = tswapal(target_vec[i].iov_base);
3000         abi_long len = tswapal(target_vec[i].iov_len);
3001 
3002         if (len < 0) {
3003             err = EINVAL;
3004             goto fail;
3005         } else if (len == 0) {
3006             /* Zero length pointer is ignored.  */
3007             vec[i].iov_base = 0;
3008         } else {
3009             vec[i].iov_base = lock_user(type, base, len, copy);
3010             /* If the first buffer pointer is bad, this is a fault.  But
3011              * subsequent bad buffers will result in a partial write; this
3012              * is realized by filling the vector with null pointers and
3013              * zero lengths. */
3014             if (!vec[i].iov_base) {
3015                 if (i == 0) {
3016                     err = EFAULT;
3017                     goto fail;
3018                 } else {
3019                     bad_address = true;
3020                 }
3021             }
3022             if (bad_address) {
3023                 len = 0;
3024             }
3025             if (len > max_len - total_len) {
3026                 len = max_len - total_len;
3027             }
3028         }
3029         vec[i].iov_len = len;
3030         total_len += len;
3031     }
3032 
3033     unlock_user(target_vec, target_addr, 0);
3034     return vec;
3035 
3036  fail:
3037     while (--i >= 0) {
3038         if (tswapal(target_vec[i].iov_len) > 0) {
3039             unlock_user(vec[i].iov_base, tswapal(target_vec[i].iov_base), 0);
3040         }
3041     }
3042     unlock_user(target_vec, target_addr, 0);
3043  fail2:
3044     g_free(vec);
3045     errno = err;
3046     return NULL;
3047 }
3048 
3049 static void unlock_iovec(struct iovec *vec, abi_ulong target_addr,
3050                          abi_ulong count, int copy)
3051 {
3052     struct target_iovec *target_vec;
3053     int i;
3054 
3055     target_vec = lock_user(VERIFY_READ, target_addr,
3056                            count * sizeof(struct target_iovec), 1);
3057     if (target_vec) {
3058         for (i = 0; i < count; i++) {
3059             abi_ulong base = tswapal(target_vec[i].iov_base);
3060             abi_long len = tswapal(target_vec[i].iov_len);
3061             if (len < 0) {
3062                 break;
3063             }
3064             unlock_user(vec[i].iov_base, base, copy ? vec[i].iov_len : 0);
3065         }
3066         unlock_user(target_vec, target_addr, 0);
3067     }
3068 
3069     g_free(vec);
3070 }
3071 
3072 static inline int target_to_host_sock_type(int *type)
3073 {
3074     int host_type = 0;
3075     int target_type = *type;
3076 
3077     switch (target_type & TARGET_SOCK_TYPE_MASK) {
3078     case TARGET_SOCK_DGRAM:
3079         host_type = SOCK_DGRAM;
3080         break;
3081     case TARGET_SOCK_STREAM:
3082         host_type = SOCK_STREAM;
3083         break;
3084     default:
3085         host_type = target_type & TARGET_SOCK_TYPE_MASK;
3086         break;
3087     }
3088     if (target_type & TARGET_SOCK_CLOEXEC) {
3089 #if defined(SOCK_CLOEXEC)
3090         host_type |= SOCK_CLOEXEC;
3091 #else
3092         return -TARGET_EINVAL;
3093 #endif
3094     }
3095     if (target_type & TARGET_SOCK_NONBLOCK) {
3096 #if defined(SOCK_NONBLOCK)
3097         host_type |= SOCK_NONBLOCK;
3098 #elif !defined(O_NONBLOCK)
3099         return -TARGET_EINVAL;
3100 #endif
3101     }
3102     *type = host_type;
3103     return 0;
3104 }
3105 
3106 /* Try to emulate socket type flags after socket creation.  */
3107 static int sock_flags_fixup(int fd, int target_type)
3108 {
3109 #if !defined(SOCK_NONBLOCK) && defined(O_NONBLOCK)
3110     if (target_type & TARGET_SOCK_NONBLOCK) {
3111         int flags = fcntl(fd, F_GETFL);
3112         if (fcntl(fd, F_SETFL, O_NONBLOCK | flags) == -1) {
3113             close(fd);
3114             return -TARGET_EINVAL;
3115         }
3116     }
3117 #endif
3118     return fd;
3119 }
3120 
3121 /* do_socket() Must return target values and target errnos. */
3122 static abi_long do_socket(int domain, int type, int protocol)
3123 {
3124     int target_type = type;
3125     int ret;
3126 
3127     ret = target_to_host_sock_type(&type);
3128     if (ret) {
3129         return ret;
3130     }
3131 
3132     if (domain == PF_NETLINK && !(
3133 #ifdef CONFIG_RTNETLINK
3134          protocol == NETLINK_ROUTE ||
3135 #endif
3136          protocol == NETLINK_KOBJECT_UEVENT ||
3137          protocol == NETLINK_AUDIT)) {
3138         return -TARGET_EPROTONOSUPPORT;
3139     }
3140 
3141     if (domain == AF_PACKET ||
3142         (domain == AF_INET && type == SOCK_PACKET)) {
3143         protocol = tswap16(protocol);
3144     }
3145 
3146     ret = get_errno(socket(domain, type, protocol));
3147     if (ret >= 0) {
3148         ret = sock_flags_fixup(ret, target_type);
3149         if (type == SOCK_PACKET) {
3150             /* Manage an obsolete case :
3151              * if socket type is SOCK_PACKET, bind by name
3152              */
3153             fd_trans_register(ret, &target_packet_trans);
3154         } else if (domain == PF_NETLINK) {
3155             switch (protocol) {
3156 #ifdef CONFIG_RTNETLINK
3157             case NETLINK_ROUTE:
3158                 fd_trans_register(ret, &target_netlink_route_trans);
3159                 break;
3160 #endif
3161             case NETLINK_KOBJECT_UEVENT:
3162                 /* nothing to do: messages are strings */
3163                 break;
3164             case NETLINK_AUDIT:
3165                 fd_trans_register(ret, &target_netlink_audit_trans);
3166                 break;
3167             default:
3168                 g_assert_not_reached();
3169             }
3170         }
3171     }
3172     return ret;
3173 }
3174 
3175 /* do_bind() Must return target values and target errnos. */
3176 static abi_long do_bind(int sockfd, abi_ulong target_addr,
3177                         socklen_t addrlen)
3178 {
3179     void *addr;
3180     abi_long ret;
3181 
3182     if ((int)addrlen < 0) {
3183         return -TARGET_EINVAL;
3184     }
3185 
3186     addr = alloca(addrlen+1);
3187 
3188     ret = target_to_host_sockaddr(sockfd, addr, target_addr, addrlen);
3189     if (ret)
3190         return ret;
3191 
3192     return get_errno(bind(sockfd, addr, addrlen));
3193 }
3194 
3195 /* do_connect() Must return target values and target errnos. */
3196 static abi_long do_connect(int sockfd, abi_ulong target_addr,
3197                            socklen_t addrlen)
3198 {
3199     void *addr;
3200     abi_long ret;
3201 
3202     if ((int)addrlen < 0) {
3203         return -TARGET_EINVAL;
3204     }
3205 
3206     addr = alloca(addrlen+1);
3207 
3208     ret = target_to_host_sockaddr(sockfd, addr, target_addr, addrlen);
3209     if (ret)
3210         return ret;
3211 
3212     return get_errno(safe_connect(sockfd, addr, addrlen));
3213 }
3214 
3215 /* do_sendrecvmsg_locked() Must return target values and target errnos. */
3216 static abi_long do_sendrecvmsg_locked(int fd, struct target_msghdr *msgp,
3217                                       int flags, int send)
3218 {
3219     abi_long ret, len;
3220     struct msghdr msg;
3221     abi_ulong count;
3222     struct iovec *vec;
3223     abi_ulong target_vec;
3224 
3225     if (msgp->msg_name) {
3226         msg.msg_namelen = tswap32(msgp->msg_namelen);
3227         msg.msg_name = alloca(msg.msg_namelen+1);
3228         ret = target_to_host_sockaddr(fd, msg.msg_name,
3229                                       tswapal(msgp->msg_name),
3230                                       msg.msg_namelen);
3231         if (ret == -TARGET_EFAULT) {
3232             /* For connected sockets msg_name and msg_namelen must
3233              * be ignored, so returning EFAULT immediately is wrong.
3234              * Instead, pass a bad msg_name to the host kernel, and
3235              * let it decide whether to return EFAULT or not.
3236              */
3237             msg.msg_name = (void *)-1;
3238         } else if (ret) {
3239             goto out2;
3240         }
3241     } else {
3242         msg.msg_name = NULL;
3243         msg.msg_namelen = 0;
3244     }
3245     msg.msg_controllen = 2 * tswapal(msgp->msg_controllen);
3246     msg.msg_control = alloca(msg.msg_controllen);
3247     memset(msg.msg_control, 0, msg.msg_controllen);
3248 
3249     msg.msg_flags = tswap32(msgp->msg_flags);
3250 
3251     count = tswapal(msgp->msg_iovlen);
3252     target_vec = tswapal(msgp->msg_iov);
3253 
3254     if (count > IOV_MAX) {
3255         /* sendrcvmsg returns a different errno for this condition than
3256          * readv/writev, so we must catch it here before lock_iovec() does.
3257          */
3258         ret = -TARGET_EMSGSIZE;
3259         goto out2;
3260     }
3261 
3262     vec = lock_iovec(send ? VERIFY_READ : VERIFY_WRITE,
3263                      target_vec, count, send);
3264     if (vec == NULL) {
3265         ret = -host_to_target_errno(errno);
3266         goto out2;
3267     }
3268     msg.msg_iovlen = count;
3269     msg.msg_iov = vec;
3270 
3271     if (send) {
3272         if (fd_trans_target_to_host_data(fd)) {
3273             void *host_msg;
3274 
3275             host_msg = g_malloc(msg.msg_iov->iov_len);
3276             memcpy(host_msg, msg.msg_iov->iov_base, msg.msg_iov->iov_len);
3277             ret = fd_trans_target_to_host_data(fd)(host_msg,
3278                                                    msg.msg_iov->iov_len);
3279             if (ret >= 0) {
3280                 msg.msg_iov->iov_base = host_msg;
3281                 ret = get_errno(safe_sendmsg(fd, &msg, flags));
3282             }
3283             g_free(host_msg);
3284         } else {
3285             ret = target_to_host_cmsg(&msg, msgp);
3286             if (ret == 0) {
3287                 ret = get_errno(safe_sendmsg(fd, &msg, flags));
3288             }
3289         }
3290     } else {
3291         ret = get_errno(safe_recvmsg(fd, &msg, flags));
3292         if (!is_error(ret)) {
3293             len = ret;
3294             if (fd_trans_host_to_target_data(fd)) {
3295                 ret = fd_trans_host_to_target_data(fd)(msg.msg_iov->iov_base,
3296                                                MIN(msg.msg_iov->iov_len, len));
3297             } else {
3298                 ret = host_to_target_cmsg(msgp, &msg);
3299             }
3300             if (!is_error(ret)) {
3301                 msgp->msg_namelen = tswap32(msg.msg_namelen);
3302                 msgp->msg_flags = tswap32(msg.msg_flags);
3303                 if (msg.msg_name != NULL && msg.msg_name != (void *)-1) {
3304                     ret = host_to_target_sockaddr(tswapal(msgp->msg_name),
3305                                     msg.msg_name, msg.msg_namelen);
3306                     if (ret) {
3307                         goto out;
3308                     }
3309                 }
3310 
3311                 ret = len;
3312             }
3313         }
3314     }
3315 
3316 out:
3317     unlock_iovec(vec, target_vec, count, !send);
3318 out2:
3319     return ret;
3320 }
3321 
3322 static abi_long do_sendrecvmsg(int fd, abi_ulong target_msg,
3323                                int flags, int send)
3324 {
3325     abi_long ret;
3326     struct target_msghdr *msgp;
3327 
3328     if (!lock_user_struct(send ? VERIFY_READ : VERIFY_WRITE,
3329                           msgp,
3330                           target_msg,
3331                           send ? 1 : 0)) {
3332         return -TARGET_EFAULT;
3333     }
3334     ret = do_sendrecvmsg_locked(fd, msgp, flags, send);
3335     unlock_user_struct(msgp, target_msg, send ? 0 : 1);
3336     return ret;
3337 }
3338 
3339 /* We don't rely on the C library to have sendmmsg/recvmmsg support,
3340  * so it might not have this *mmsg-specific flag either.
3341  */
3342 #ifndef MSG_WAITFORONE
3343 #define MSG_WAITFORONE 0x10000
3344 #endif
3345 
3346 static abi_long do_sendrecvmmsg(int fd, abi_ulong target_msgvec,
3347                                 unsigned int vlen, unsigned int flags,
3348                                 int send)
3349 {
3350     struct target_mmsghdr *mmsgp;
3351     abi_long ret = 0;
3352     int i;
3353 
3354     if (vlen > UIO_MAXIOV) {
3355         vlen = UIO_MAXIOV;
3356     }
3357 
3358     mmsgp = lock_user(VERIFY_WRITE, target_msgvec, sizeof(*mmsgp) * vlen, 1);
3359     if (!mmsgp) {
3360         return -TARGET_EFAULT;
3361     }
3362 
3363     for (i = 0; i < vlen; i++) {
3364         ret = do_sendrecvmsg_locked(fd, &mmsgp[i].msg_hdr, flags, send);
3365         if (is_error(ret)) {
3366             break;
3367         }
3368         mmsgp[i].msg_len = tswap32(ret);
3369         /* MSG_WAITFORONE turns on MSG_DONTWAIT after one packet */
3370         if (flags & MSG_WAITFORONE) {
3371             flags |= MSG_DONTWAIT;
3372         }
3373     }
3374 
3375     unlock_user(mmsgp, target_msgvec, sizeof(*mmsgp) * i);
3376 
3377     /* Return number of datagrams sent if we sent any at all;
3378      * otherwise return the error.
3379      */
3380     if (i) {
3381         return i;
3382     }
3383     return ret;
3384 }
3385 
3386 /* do_accept4() Must return target values and target errnos. */
3387 static abi_long do_accept4(int fd, abi_ulong target_addr,
3388                            abi_ulong target_addrlen_addr, int flags)
3389 {
3390     socklen_t addrlen, ret_addrlen;
3391     void *addr;
3392     abi_long ret;
3393     int host_flags;
3394 
3395     host_flags = target_to_host_bitmask(flags, fcntl_flags_tbl);
3396 
3397     if (target_addr == 0) {
3398         return get_errno(safe_accept4(fd, NULL, NULL, host_flags));
3399     }
3400 
3401     /* linux returns EFAULT if addrlen pointer is invalid */
3402     if (get_user_u32(addrlen, target_addrlen_addr))
3403         return -TARGET_EFAULT;
3404 
3405     if ((int)addrlen < 0) {
3406         return -TARGET_EINVAL;
3407     }
3408 
3409     if (!access_ok(thread_cpu, VERIFY_WRITE, target_addr, addrlen)) {
3410         return -TARGET_EFAULT;
3411     }
3412 
3413     addr = alloca(addrlen);
3414 
3415     ret_addrlen = addrlen;
3416     ret = get_errno(safe_accept4(fd, addr, &ret_addrlen, host_flags));
3417     if (!is_error(ret)) {
3418         host_to_target_sockaddr(target_addr, addr, MIN(addrlen, ret_addrlen));
3419         if (put_user_u32(ret_addrlen, target_addrlen_addr)) {
3420             ret = -TARGET_EFAULT;
3421         }
3422     }
3423     return ret;
3424 }
3425 
3426 /* do_getpeername() Must return target values and target errnos. */
3427 static abi_long do_getpeername(int fd, abi_ulong target_addr,
3428                                abi_ulong target_addrlen_addr)
3429 {
3430     socklen_t addrlen, ret_addrlen;
3431     void *addr;
3432     abi_long ret;
3433 
3434     if (get_user_u32(addrlen, target_addrlen_addr))
3435         return -TARGET_EFAULT;
3436 
3437     if ((int)addrlen < 0) {
3438         return -TARGET_EINVAL;
3439     }
3440 
3441     if (!access_ok(thread_cpu, VERIFY_WRITE, target_addr, addrlen)) {
3442         return -TARGET_EFAULT;
3443     }
3444 
3445     addr = alloca(addrlen);
3446 
3447     ret_addrlen = addrlen;
3448     ret = get_errno(getpeername(fd, addr, &ret_addrlen));
3449     if (!is_error(ret)) {
3450         host_to_target_sockaddr(target_addr, addr, MIN(addrlen, ret_addrlen));
3451         if (put_user_u32(ret_addrlen, target_addrlen_addr)) {
3452             ret = -TARGET_EFAULT;
3453         }
3454     }
3455     return ret;
3456 }
3457 
3458 /* do_getsockname() Must return target values and target errnos. */
3459 static abi_long do_getsockname(int fd, abi_ulong target_addr,
3460                                abi_ulong target_addrlen_addr)
3461 {
3462     socklen_t addrlen, ret_addrlen;
3463     void *addr;
3464     abi_long ret;
3465 
3466     if (get_user_u32(addrlen, target_addrlen_addr))
3467         return -TARGET_EFAULT;
3468 
3469     if ((int)addrlen < 0) {
3470         return -TARGET_EINVAL;
3471     }
3472 
3473     if (!access_ok(thread_cpu, VERIFY_WRITE, target_addr, addrlen)) {
3474         return -TARGET_EFAULT;
3475     }
3476 
3477     addr = alloca(addrlen);
3478 
3479     ret_addrlen = addrlen;
3480     ret = get_errno(getsockname(fd, addr, &ret_addrlen));
3481     if (!is_error(ret)) {
3482         host_to_target_sockaddr(target_addr, addr, MIN(addrlen, ret_addrlen));
3483         if (put_user_u32(ret_addrlen, target_addrlen_addr)) {
3484             ret = -TARGET_EFAULT;
3485         }
3486     }
3487     return ret;
3488 }
3489 
3490 /* do_socketpair() Must return target values and target errnos. */
3491 static abi_long do_socketpair(int domain, int type, int protocol,
3492                               abi_ulong target_tab_addr)
3493 {
3494     int tab[2];
3495     abi_long ret;
3496 
3497     target_to_host_sock_type(&type);
3498 
3499     ret = get_errno(socketpair(domain, type, protocol, tab));
3500     if (!is_error(ret)) {
3501         if (put_user_s32(tab[0], target_tab_addr)
3502             || put_user_s32(tab[1], target_tab_addr + sizeof(tab[0])))
3503             ret = -TARGET_EFAULT;
3504     }
3505     return ret;
3506 }
3507 
3508 /* do_sendto() Must return target values and target errnos. */
3509 static abi_long do_sendto(int fd, abi_ulong msg, size_t len, int flags,
3510                           abi_ulong target_addr, socklen_t addrlen)
3511 {
3512     void *addr;
3513     void *host_msg;
3514     void *copy_msg = NULL;
3515     abi_long ret;
3516 
3517     if ((int)addrlen < 0) {
3518         return -TARGET_EINVAL;
3519     }
3520 
3521     host_msg = lock_user(VERIFY_READ, msg, len, 1);
3522     if (!host_msg)
3523         return -TARGET_EFAULT;
3524     if (fd_trans_target_to_host_data(fd)) {
3525         copy_msg = host_msg;
3526         host_msg = g_malloc(len);
3527         memcpy(host_msg, copy_msg, len);
3528         ret = fd_trans_target_to_host_data(fd)(host_msg, len);
3529         if (ret < 0) {
3530             goto fail;
3531         }
3532     }
3533     if (target_addr) {
3534         addr = alloca(addrlen+1);
3535         ret = target_to_host_sockaddr(fd, addr, target_addr, addrlen);
3536         if (ret) {
3537             goto fail;
3538         }
3539         ret = get_errno(safe_sendto(fd, host_msg, len, flags, addr, addrlen));
3540     } else {
3541         ret = get_errno(safe_sendto(fd, host_msg, len, flags, NULL, 0));
3542     }
3543 fail:
3544     if (copy_msg) {
3545         g_free(host_msg);
3546         host_msg = copy_msg;
3547     }
3548     unlock_user(host_msg, msg, 0);
3549     return ret;
3550 }
3551 
3552 /* do_recvfrom() Must return target values and target errnos. */
3553 static abi_long do_recvfrom(int fd, abi_ulong msg, size_t len, int flags,
3554                             abi_ulong target_addr,
3555                             abi_ulong target_addrlen)
3556 {
3557     socklen_t addrlen, ret_addrlen;
3558     void *addr;
3559     void *host_msg;
3560     abi_long ret;
3561 
3562     if (!msg) {
3563         host_msg = NULL;
3564     } else {
3565         host_msg = lock_user(VERIFY_WRITE, msg, len, 0);
3566         if (!host_msg) {
3567             return -TARGET_EFAULT;
3568         }
3569     }
3570     if (target_addr) {
3571         if (get_user_u32(addrlen, target_addrlen)) {
3572             ret = -TARGET_EFAULT;
3573             goto fail;
3574         }
3575         if ((int)addrlen < 0) {
3576             ret = -TARGET_EINVAL;
3577             goto fail;
3578         }
3579         addr = alloca(addrlen);
3580         ret_addrlen = addrlen;
3581         ret = get_errno(safe_recvfrom(fd, host_msg, len, flags,
3582                                       addr, &ret_addrlen));
3583     } else {
3584         addr = NULL; /* To keep compiler quiet.  */
3585         addrlen = 0; /* To keep compiler quiet.  */
3586         ret = get_errno(safe_recvfrom(fd, host_msg, len, flags, NULL, 0));
3587     }
3588     if (!is_error(ret)) {
3589         if (fd_trans_host_to_target_data(fd)) {
3590             abi_long trans;
3591             trans = fd_trans_host_to_target_data(fd)(host_msg, MIN(ret, len));
3592             if (is_error(trans)) {
3593                 ret = trans;
3594                 goto fail;
3595             }
3596         }
3597         if (target_addr) {
3598             host_to_target_sockaddr(target_addr, addr,
3599                                     MIN(addrlen, ret_addrlen));
3600             if (put_user_u32(ret_addrlen, target_addrlen)) {
3601                 ret = -TARGET_EFAULT;
3602                 goto fail;
3603             }
3604         }
3605         unlock_user(host_msg, msg, len);
3606     } else {
3607 fail:
3608         unlock_user(host_msg, msg, 0);
3609     }
3610     return ret;
3611 }
3612 
3613 #ifdef TARGET_NR_socketcall
3614 /* do_socketcall() must return target values and target errnos. */
3615 static abi_long do_socketcall(int num, abi_ulong vptr)
3616 {
3617     static const unsigned nargs[] = { /* number of arguments per operation */
3618         [TARGET_SYS_SOCKET] = 3,      /* domain, type, protocol */
3619         [TARGET_SYS_BIND] = 3,        /* fd, addr, addrlen */
3620         [TARGET_SYS_CONNECT] = 3,     /* fd, addr, addrlen */
3621         [TARGET_SYS_LISTEN] = 2,      /* fd, backlog */
3622         [TARGET_SYS_ACCEPT] = 3,      /* fd, addr, addrlen */
3623         [TARGET_SYS_GETSOCKNAME] = 3, /* fd, addr, addrlen */
3624         [TARGET_SYS_GETPEERNAME] = 3, /* fd, addr, addrlen */
3625         [TARGET_SYS_SOCKETPAIR] = 4,  /* domain, type, protocol, tab */
3626         [TARGET_SYS_SEND] = 4,        /* fd, msg, len, flags */
3627         [TARGET_SYS_RECV] = 4,        /* fd, msg, len, flags */
3628         [TARGET_SYS_SENDTO] = 6,      /* fd, msg, len, flags, addr, addrlen */
3629         [TARGET_SYS_RECVFROM] = 6,    /* fd, msg, len, flags, addr, addrlen */
3630         [TARGET_SYS_SHUTDOWN] = 2,    /* fd, how */
3631         [TARGET_SYS_SETSOCKOPT] = 5,  /* fd, level, optname, optval, optlen */
3632         [TARGET_SYS_GETSOCKOPT] = 5,  /* fd, level, optname, optval, optlen */
3633         [TARGET_SYS_SENDMSG] = 3,     /* fd, msg, flags */
3634         [TARGET_SYS_RECVMSG] = 3,     /* fd, msg, flags */
3635         [TARGET_SYS_ACCEPT4] = 4,     /* fd, addr, addrlen, flags */
3636         [TARGET_SYS_RECVMMSG] = 4,    /* fd, msgvec, vlen, flags */
3637         [TARGET_SYS_SENDMMSG] = 4,    /* fd, msgvec, vlen, flags */
3638     };
3639     abi_long a[6]; /* max 6 args */
3640     unsigned i;
3641 
3642     /* check the range of the first argument num */
3643     /* (TARGET_SYS_SENDMMSG is the highest among TARGET_SYS_xxx) */
3644     if (num < 1 || num > TARGET_SYS_SENDMMSG) {
3645         return -TARGET_EINVAL;
3646     }
3647     /* ensure we have space for args */
3648     if (nargs[num] > ARRAY_SIZE(a)) {
3649         return -TARGET_EINVAL;
3650     }
3651     /* collect the arguments in a[] according to nargs[] */
3652     for (i = 0; i < nargs[num]; ++i) {
3653         if (get_user_ual(a[i], vptr + i * sizeof(abi_long)) != 0) {
3654             return -TARGET_EFAULT;
3655         }
3656     }
3657     /* now when we have the args, invoke the appropriate underlying function */
3658     switch (num) {
3659     case TARGET_SYS_SOCKET: /* domain, type, protocol */
3660         return do_socket(a[0], a[1], a[2]);
3661     case TARGET_SYS_BIND: /* sockfd, addr, addrlen */
3662         return do_bind(a[0], a[1], a[2]);
3663     case TARGET_SYS_CONNECT: /* sockfd, addr, addrlen */
3664         return do_connect(a[0], a[1], a[2]);
3665     case TARGET_SYS_LISTEN: /* sockfd, backlog */
3666         return get_errno(listen(a[0], a[1]));
3667     case TARGET_SYS_ACCEPT: /* sockfd, addr, addrlen */
3668         return do_accept4(a[0], a[1], a[2], 0);
3669     case TARGET_SYS_GETSOCKNAME: /* sockfd, addr, addrlen */
3670         return do_getsockname(a[0], a[1], a[2]);
3671     case TARGET_SYS_GETPEERNAME: /* sockfd, addr, addrlen */
3672         return do_getpeername(a[0], a[1], a[2]);
3673     case TARGET_SYS_SOCKETPAIR: /* domain, type, protocol, tab */
3674         return do_socketpair(a[0], a[1], a[2], a[3]);
3675     case TARGET_SYS_SEND: /* sockfd, msg, len, flags */
3676         return do_sendto(a[0], a[1], a[2], a[3], 0, 0);
3677     case TARGET_SYS_RECV: /* sockfd, msg, len, flags */
3678         return do_recvfrom(a[0], a[1], a[2], a[3], 0, 0);
3679     case TARGET_SYS_SENDTO: /* sockfd, msg, len, flags, addr, addrlen */
3680         return do_sendto(a[0], a[1], a[2], a[3], a[4], a[5]);
3681     case TARGET_SYS_RECVFROM: /* sockfd, msg, len, flags, addr, addrlen */
3682         return do_recvfrom(a[0], a[1], a[2], a[3], a[4], a[5]);
3683     case TARGET_SYS_SHUTDOWN: /* sockfd, how */
3684         return get_errno(shutdown(a[0], a[1]));
3685     case TARGET_SYS_SETSOCKOPT: /* sockfd, level, optname, optval, optlen */
3686         return do_setsockopt(a[0], a[1], a[2], a[3], a[4]);
3687     case TARGET_SYS_GETSOCKOPT: /* sockfd, level, optname, optval, optlen */
3688         return do_getsockopt(a[0], a[1], a[2], a[3], a[4]);
3689     case TARGET_SYS_SENDMSG: /* sockfd, msg, flags */
3690         return do_sendrecvmsg(a[0], a[1], a[2], 1);
3691     case TARGET_SYS_RECVMSG: /* sockfd, msg, flags */
3692         return do_sendrecvmsg(a[0], a[1], a[2], 0);
3693     case TARGET_SYS_ACCEPT4: /* sockfd, addr, addrlen, flags */
3694         return do_accept4(a[0], a[1], a[2], a[3]);
3695     case TARGET_SYS_RECVMMSG: /* sockfd, msgvec, vlen, flags */
3696         return do_sendrecvmmsg(a[0], a[1], a[2], a[3], 0);
3697     case TARGET_SYS_SENDMMSG: /* sockfd, msgvec, vlen, flags */
3698         return do_sendrecvmmsg(a[0], a[1], a[2], a[3], 1);
3699     default:
3700         qemu_log_mask(LOG_UNIMP, "Unsupported socketcall: %d\n", num);
3701         return -TARGET_EINVAL;
3702     }
3703 }
3704 #endif
3705 
3706 #define N_SHM_REGIONS	32
3707 
3708 static struct shm_region {
3709     abi_ulong start;
3710     abi_ulong size;
3711     bool in_use;
3712 } shm_regions[N_SHM_REGIONS];
3713 
3714 #ifndef TARGET_SEMID64_DS
3715 /* asm-generic version of this struct */
3716 struct target_semid64_ds
3717 {
3718   struct target_ipc_perm sem_perm;
3719   abi_ulong sem_otime;
3720 #if TARGET_ABI_BITS == 32
3721   abi_ulong __unused1;
3722 #endif
3723   abi_ulong sem_ctime;
3724 #if TARGET_ABI_BITS == 32
3725   abi_ulong __unused2;
3726 #endif
3727   abi_ulong sem_nsems;
3728   abi_ulong __unused3;
3729   abi_ulong __unused4;
3730 };
3731 #endif
3732 
3733 static inline abi_long target_to_host_ipc_perm(struct ipc_perm *host_ip,
3734                                                abi_ulong target_addr)
3735 {
3736     struct target_ipc_perm *target_ip;
3737     struct target_semid64_ds *target_sd;
3738 
3739     if (!lock_user_struct(VERIFY_READ, target_sd, target_addr, 1))
3740         return -TARGET_EFAULT;
3741     target_ip = &(target_sd->sem_perm);
3742     host_ip->__key = tswap32(target_ip->__key);
3743     host_ip->uid = tswap32(target_ip->uid);
3744     host_ip->gid = tswap32(target_ip->gid);
3745     host_ip->cuid = tswap32(target_ip->cuid);
3746     host_ip->cgid = tswap32(target_ip->cgid);
3747 #if defined(TARGET_ALPHA) || defined(TARGET_MIPS) || defined(TARGET_PPC)
3748     host_ip->mode = tswap32(target_ip->mode);
3749 #else
3750     host_ip->mode = tswap16(target_ip->mode);
3751 #endif
3752 #if defined(TARGET_PPC)
3753     host_ip->__seq = tswap32(target_ip->__seq);
3754 #else
3755     host_ip->__seq = tswap16(target_ip->__seq);
3756 #endif
3757     unlock_user_struct(target_sd, target_addr, 0);
3758     return 0;
3759 }
3760 
3761 static inline abi_long host_to_target_ipc_perm(abi_ulong target_addr,
3762                                                struct ipc_perm *host_ip)
3763 {
3764     struct target_ipc_perm *target_ip;
3765     struct target_semid64_ds *target_sd;
3766 
3767     if (!lock_user_struct(VERIFY_WRITE, target_sd, target_addr, 0))
3768         return -TARGET_EFAULT;
3769     target_ip = &(target_sd->sem_perm);
3770     target_ip->__key = tswap32(host_ip->__key);
3771     target_ip->uid = tswap32(host_ip->uid);
3772     target_ip->gid = tswap32(host_ip->gid);
3773     target_ip->cuid = tswap32(host_ip->cuid);
3774     target_ip->cgid = tswap32(host_ip->cgid);
3775 #if defined(TARGET_ALPHA) || defined(TARGET_MIPS) || defined(TARGET_PPC)
3776     target_ip->mode = tswap32(host_ip->mode);
3777 #else
3778     target_ip->mode = tswap16(host_ip->mode);
3779 #endif
3780 #if defined(TARGET_PPC)
3781     target_ip->__seq = tswap32(host_ip->__seq);
3782 #else
3783     target_ip->__seq = tswap16(host_ip->__seq);
3784 #endif
3785     unlock_user_struct(target_sd, target_addr, 1);
3786     return 0;
3787 }
3788 
3789 static inline abi_long target_to_host_semid_ds(struct semid_ds *host_sd,
3790                                                abi_ulong target_addr)
3791 {
3792     struct target_semid64_ds *target_sd;
3793 
3794     if (!lock_user_struct(VERIFY_READ, target_sd, target_addr, 1))
3795         return -TARGET_EFAULT;
3796     if (target_to_host_ipc_perm(&(host_sd->sem_perm),target_addr))
3797         return -TARGET_EFAULT;
3798     host_sd->sem_nsems = tswapal(target_sd->sem_nsems);
3799     host_sd->sem_otime = tswapal(target_sd->sem_otime);
3800     host_sd->sem_ctime = tswapal(target_sd->sem_ctime);
3801     unlock_user_struct(target_sd, target_addr, 0);
3802     return 0;
3803 }
3804 
3805 static inline abi_long host_to_target_semid_ds(abi_ulong target_addr,
3806                                                struct semid_ds *host_sd)
3807 {
3808     struct target_semid64_ds *target_sd;
3809 
3810     if (!lock_user_struct(VERIFY_WRITE, target_sd, target_addr, 0))
3811         return -TARGET_EFAULT;
3812     if (host_to_target_ipc_perm(target_addr,&(host_sd->sem_perm)))
3813         return -TARGET_EFAULT;
3814     target_sd->sem_nsems = tswapal(host_sd->sem_nsems);
3815     target_sd->sem_otime = tswapal(host_sd->sem_otime);
3816     target_sd->sem_ctime = tswapal(host_sd->sem_ctime);
3817     unlock_user_struct(target_sd, target_addr, 1);
3818     return 0;
3819 }
3820 
3821 struct target_seminfo {
3822     int semmap;
3823     int semmni;
3824     int semmns;
3825     int semmnu;
3826     int semmsl;
3827     int semopm;
3828     int semume;
3829     int semusz;
3830     int semvmx;
3831     int semaem;
3832 };
3833 
3834 static inline abi_long host_to_target_seminfo(abi_ulong target_addr,
3835                                               struct seminfo *host_seminfo)
3836 {
3837     struct target_seminfo *target_seminfo;
3838     if (!lock_user_struct(VERIFY_WRITE, target_seminfo, target_addr, 0))
3839         return -TARGET_EFAULT;
3840     __put_user(host_seminfo->semmap, &target_seminfo->semmap);
3841     __put_user(host_seminfo->semmni, &target_seminfo->semmni);
3842     __put_user(host_seminfo->semmns, &target_seminfo->semmns);
3843     __put_user(host_seminfo->semmnu, &target_seminfo->semmnu);
3844     __put_user(host_seminfo->semmsl, &target_seminfo->semmsl);
3845     __put_user(host_seminfo->semopm, &target_seminfo->semopm);
3846     __put_user(host_seminfo->semume, &target_seminfo->semume);
3847     __put_user(host_seminfo->semusz, &target_seminfo->semusz);
3848     __put_user(host_seminfo->semvmx, &target_seminfo->semvmx);
3849     __put_user(host_seminfo->semaem, &target_seminfo->semaem);
3850     unlock_user_struct(target_seminfo, target_addr, 1);
3851     return 0;
3852 }
3853 
3854 union semun {
3855 	int val;
3856 	struct semid_ds *buf;
3857 	unsigned short *array;
3858 	struct seminfo *__buf;
3859 };
3860 
3861 union target_semun {
3862 	int val;
3863 	abi_ulong buf;
3864 	abi_ulong array;
3865 	abi_ulong __buf;
3866 };
3867 
3868 static inline abi_long target_to_host_semarray(int semid, unsigned short **host_array,
3869                                                abi_ulong target_addr)
3870 {
3871     int nsems;
3872     unsigned short *array;
3873     union semun semun;
3874     struct semid_ds semid_ds;
3875     int i, ret;
3876 
3877     semun.buf = &semid_ds;
3878 
3879     ret = semctl(semid, 0, IPC_STAT, semun);
3880     if (ret == -1)
3881         return get_errno(ret);
3882 
3883     nsems = semid_ds.sem_nsems;
3884 
3885     *host_array = g_try_new(unsigned short, nsems);
3886     if (!*host_array) {
3887         return -TARGET_ENOMEM;
3888     }
3889     array = lock_user(VERIFY_READ, target_addr,
3890                       nsems*sizeof(unsigned short), 1);
3891     if (!array) {
3892         g_free(*host_array);
3893         return -TARGET_EFAULT;
3894     }
3895 
3896     for(i=0; i<nsems; i++) {
3897         __get_user((*host_array)[i], &array[i]);
3898     }
3899     unlock_user(array, target_addr, 0);
3900 
3901     return 0;
3902 }
3903 
3904 static inline abi_long host_to_target_semarray(int semid, abi_ulong target_addr,
3905                                                unsigned short **host_array)
3906 {
3907     int nsems;
3908     unsigned short *array;
3909     union semun semun;
3910     struct semid_ds semid_ds;
3911     int i, ret;
3912 
3913     semun.buf = &semid_ds;
3914 
3915     ret = semctl(semid, 0, IPC_STAT, semun);
3916     if (ret == -1)
3917         return get_errno(ret);
3918 
3919     nsems = semid_ds.sem_nsems;
3920 
3921     array = lock_user(VERIFY_WRITE, target_addr,
3922                       nsems*sizeof(unsigned short), 0);
3923     if (!array)
3924         return -TARGET_EFAULT;
3925 
3926     for(i=0; i<nsems; i++) {
3927         __put_user((*host_array)[i], &array[i]);
3928     }
3929     g_free(*host_array);
3930     unlock_user(array, target_addr, 1);
3931 
3932     return 0;
3933 }
3934 
3935 static inline abi_long do_semctl(int semid, int semnum, int cmd,
3936                                  abi_ulong target_arg)
3937 {
3938     union target_semun target_su = { .buf = target_arg };
3939     union semun arg;
3940     struct semid_ds dsarg;
3941     unsigned short *array = NULL;
3942     struct seminfo seminfo;
3943     abi_long ret = -TARGET_EINVAL;
3944     abi_long err;
3945     cmd &= 0xff;
3946 
3947     switch( cmd ) {
3948 	case GETVAL:
3949 	case SETVAL:
3950             /* In 64 bit cross-endian situations, we will erroneously pick up
3951              * the wrong half of the union for the "val" element.  To rectify
3952              * this, the entire 8-byte structure is byteswapped, followed by
3953 	     * a swap of the 4 byte val field. In other cases, the data is
3954 	     * already in proper host byte order. */
3955 	    if (sizeof(target_su.val) != (sizeof(target_su.buf))) {
3956 		target_su.buf = tswapal(target_su.buf);
3957 		arg.val = tswap32(target_su.val);
3958 	    } else {
3959 		arg.val = target_su.val;
3960 	    }
3961             ret = get_errno(semctl(semid, semnum, cmd, arg));
3962             break;
3963 	case GETALL:
3964 	case SETALL:
3965             err = target_to_host_semarray(semid, &array, target_su.array);
3966             if (err)
3967                 return err;
3968             arg.array = array;
3969             ret = get_errno(semctl(semid, semnum, cmd, arg));
3970             err = host_to_target_semarray(semid, target_su.array, &array);
3971             if (err)
3972                 return err;
3973             break;
3974 	case IPC_STAT:
3975 	case IPC_SET:
3976 	case SEM_STAT:
3977             err = target_to_host_semid_ds(&dsarg, target_su.buf);
3978             if (err)
3979                 return err;
3980             arg.buf = &dsarg;
3981             ret = get_errno(semctl(semid, semnum, cmd, arg));
3982             err = host_to_target_semid_ds(target_su.buf, &dsarg);
3983             if (err)
3984                 return err;
3985             break;
3986 	case IPC_INFO:
3987 	case SEM_INFO:
3988             arg.__buf = &seminfo;
3989             ret = get_errno(semctl(semid, semnum, cmd, arg));
3990             err = host_to_target_seminfo(target_su.__buf, &seminfo);
3991             if (err)
3992                 return err;
3993             break;
3994 	case IPC_RMID:
3995 	case GETPID:
3996 	case GETNCNT:
3997 	case GETZCNT:
3998             ret = get_errno(semctl(semid, semnum, cmd, NULL));
3999             break;
4000     }
4001 
4002     return ret;
4003 }
4004 
4005 struct target_sembuf {
4006     unsigned short sem_num;
4007     short sem_op;
4008     short sem_flg;
4009 };
4010 
4011 static inline abi_long target_to_host_sembuf(struct sembuf *host_sembuf,
4012                                              abi_ulong target_addr,
4013                                              unsigned nsops)
4014 {
4015     struct target_sembuf *target_sembuf;
4016     int i;
4017 
4018     target_sembuf = lock_user(VERIFY_READ, target_addr,
4019                               nsops*sizeof(struct target_sembuf), 1);
4020     if (!target_sembuf)
4021         return -TARGET_EFAULT;
4022 
4023     for(i=0; i<nsops; i++) {
4024         __get_user(host_sembuf[i].sem_num, &target_sembuf[i].sem_num);
4025         __get_user(host_sembuf[i].sem_op, &target_sembuf[i].sem_op);
4026         __get_user(host_sembuf[i].sem_flg, &target_sembuf[i].sem_flg);
4027     }
4028 
4029     unlock_user(target_sembuf, target_addr, 0);
4030 
4031     return 0;
4032 }
4033 
4034 #if defined(TARGET_NR_ipc) || defined(TARGET_NR_semop) || \
4035     defined(TARGET_NR_semtimedop) || defined(TARGET_NR_semtimedop_time64)
4036 
4037 /*
4038  * This macro is required to handle the s390 variants, which passes the
4039  * arguments in a different order than default.
4040  */
4041 #ifdef __s390x__
4042 #define SEMTIMEDOP_IPC_ARGS(__nsops, __sops, __timeout) \
4043   (__nsops), (__timeout), (__sops)
4044 #else
4045 #define SEMTIMEDOP_IPC_ARGS(__nsops, __sops, __timeout) \
4046   (__nsops), 0, (__sops), (__timeout)
4047 #endif
4048 
4049 static inline abi_long do_semtimedop(int semid,
4050                                      abi_long ptr,
4051                                      unsigned nsops,
4052                                      abi_long timeout, bool time64)
4053 {
4054     struct sembuf *sops;
4055     struct timespec ts, *pts = NULL;
4056     abi_long ret;
4057 
4058     if (timeout) {
4059         pts = &ts;
4060         if (time64) {
4061             if (target_to_host_timespec64(pts, timeout)) {
4062                 return -TARGET_EFAULT;
4063             }
4064         } else {
4065             if (target_to_host_timespec(pts, timeout)) {
4066                 return -TARGET_EFAULT;
4067             }
4068         }
4069     }
4070 
4071     if (nsops > TARGET_SEMOPM) {
4072         return -TARGET_E2BIG;
4073     }
4074 
4075     sops = g_new(struct sembuf, nsops);
4076 
4077     if (target_to_host_sembuf(sops, ptr, nsops)) {
4078         g_free(sops);
4079         return -TARGET_EFAULT;
4080     }
4081 
4082     ret = -TARGET_ENOSYS;
4083 #ifdef __NR_semtimedop
4084     ret = get_errno(safe_semtimedop(semid, sops, nsops, pts));
4085 #endif
4086 #ifdef __NR_ipc
4087     if (ret == -TARGET_ENOSYS) {
4088         ret = get_errno(safe_ipc(IPCOP_semtimedop, semid,
4089                                  SEMTIMEDOP_IPC_ARGS(nsops, sops, (long)pts)));
4090     }
4091 #endif
4092     g_free(sops);
4093     return ret;
4094 }
4095 #endif
4096 
4097 struct target_msqid_ds
4098 {
4099     struct target_ipc_perm msg_perm;
4100     abi_ulong msg_stime;
4101 #if TARGET_ABI_BITS == 32
4102     abi_ulong __unused1;
4103 #endif
4104     abi_ulong msg_rtime;
4105 #if TARGET_ABI_BITS == 32
4106     abi_ulong __unused2;
4107 #endif
4108     abi_ulong msg_ctime;
4109 #if TARGET_ABI_BITS == 32
4110     abi_ulong __unused3;
4111 #endif
4112     abi_ulong __msg_cbytes;
4113     abi_ulong msg_qnum;
4114     abi_ulong msg_qbytes;
4115     abi_ulong msg_lspid;
4116     abi_ulong msg_lrpid;
4117     abi_ulong __unused4;
4118     abi_ulong __unused5;
4119 };
4120 
4121 static inline abi_long target_to_host_msqid_ds(struct msqid_ds *host_md,
4122                                                abi_ulong target_addr)
4123 {
4124     struct target_msqid_ds *target_md;
4125 
4126     if (!lock_user_struct(VERIFY_READ, target_md, target_addr, 1))
4127         return -TARGET_EFAULT;
4128     if (target_to_host_ipc_perm(&(host_md->msg_perm),target_addr))
4129         return -TARGET_EFAULT;
4130     host_md->msg_stime = tswapal(target_md->msg_stime);
4131     host_md->msg_rtime = tswapal(target_md->msg_rtime);
4132     host_md->msg_ctime = tswapal(target_md->msg_ctime);
4133     host_md->__msg_cbytes = tswapal(target_md->__msg_cbytes);
4134     host_md->msg_qnum = tswapal(target_md->msg_qnum);
4135     host_md->msg_qbytes = tswapal(target_md->msg_qbytes);
4136     host_md->msg_lspid = tswapal(target_md->msg_lspid);
4137     host_md->msg_lrpid = tswapal(target_md->msg_lrpid);
4138     unlock_user_struct(target_md, target_addr, 0);
4139     return 0;
4140 }
4141 
4142 static inline abi_long host_to_target_msqid_ds(abi_ulong target_addr,
4143                                                struct msqid_ds *host_md)
4144 {
4145     struct target_msqid_ds *target_md;
4146 
4147     if (!lock_user_struct(VERIFY_WRITE, target_md, target_addr, 0))
4148         return -TARGET_EFAULT;
4149     if (host_to_target_ipc_perm(target_addr,&(host_md->msg_perm)))
4150         return -TARGET_EFAULT;
4151     target_md->msg_stime = tswapal(host_md->msg_stime);
4152     target_md->msg_rtime = tswapal(host_md->msg_rtime);
4153     target_md->msg_ctime = tswapal(host_md->msg_ctime);
4154     target_md->__msg_cbytes = tswapal(host_md->__msg_cbytes);
4155     target_md->msg_qnum = tswapal(host_md->msg_qnum);
4156     target_md->msg_qbytes = tswapal(host_md->msg_qbytes);
4157     target_md->msg_lspid = tswapal(host_md->msg_lspid);
4158     target_md->msg_lrpid = tswapal(host_md->msg_lrpid);
4159     unlock_user_struct(target_md, target_addr, 1);
4160     return 0;
4161 }
4162 
4163 struct target_msginfo {
4164     int msgpool;
4165     int msgmap;
4166     int msgmax;
4167     int msgmnb;
4168     int msgmni;
4169     int msgssz;
4170     int msgtql;
4171     unsigned short int msgseg;
4172 };
4173 
4174 static inline abi_long host_to_target_msginfo(abi_ulong target_addr,
4175                                               struct msginfo *host_msginfo)
4176 {
4177     struct target_msginfo *target_msginfo;
4178     if (!lock_user_struct(VERIFY_WRITE, target_msginfo, target_addr, 0))
4179         return -TARGET_EFAULT;
4180     __put_user(host_msginfo->msgpool, &target_msginfo->msgpool);
4181     __put_user(host_msginfo->msgmap, &target_msginfo->msgmap);
4182     __put_user(host_msginfo->msgmax, &target_msginfo->msgmax);
4183     __put_user(host_msginfo->msgmnb, &target_msginfo->msgmnb);
4184     __put_user(host_msginfo->msgmni, &target_msginfo->msgmni);
4185     __put_user(host_msginfo->msgssz, &target_msginfo->msgssz);
4186     __put_user(host_msginfo->msgtql, &target_msginfo->msgtql);
4187     __put_user(host_msginfo->msgseg, &target_msginfo->msgseg);
4188     unlock_user_struct(target_msginfo, target_addr, 1);
4189     return 0;
4190 }
4191 
4192 static inline abi_long do_msgctl(int msgid, int cmd, abi_long ptr)
4193 {
4194     struct msqid_ds dsarg;
4195     struct msginfo msginfo;
4196     abi_long ret = -TARGET_EINVAL;
4197 
4198     cmd &= 0xff;
4199 
4200     switch (cmd) {
4201     case IPC_STAT:
4202     case IPC_SET:
4203     case MSG_STAT:
4204         if (target_to_host_msqid_ds(&dsarg,ptr))
4205             return -TARGET_EFAULT;
4206         ret = get_errno(msgctl(msgid, cmd, &dsarg));
4207         if (host_to_target_msqid_ds(ptr,&dsarg))
4208             return -TARGET_EFAULT;
4209         break;
4210     case IPC_RMID:
4211         ret = get_errno(msgctl(msgid, cmd, NULL));
4212         break;
4213     case IPC_INFO:
4214     case MSG_INFO:
4215         ret = get_errno(msgctl(msgid, cmd, (struct msqid_ds *)&msginfo));
4216         if (host_to_target_msginfo(ptr, &msginfo))
4217             return -TARGET_EFAULT;
4218         break;
4219     }
4220 
4221     return ret;
4222 }
4223 
4224 struct target_msgbuf {
4225     abi_long mtype;
4226     char	mtext[1];
4227 };
4228 
4229 static inline abi_long do_msgsnd(int msqid, abi_long msgp,
4230                                  ssize_t msgsz, int msgflg)
4231 {
4232     struct target_msgbuf *target_mb;
4233     struct msgbuf *host_mb;
4234     abi_long ret = 0;
4235 
4236     if (msgsz < 0) {
4237         return -TARGET_EINVAL;
4238     }
4239 
4240     if (!lock_user_struct(VERIFY_READ, target_mb, msgp, 0))
4241         return -TARGET_EFAULT;
4242     host_mb = g_try_malloc(msgsz + sizeof(long));
4243     if (!host_mb) {
4244         unlock_user_struct(target_mb, msgp, 0);
4245         return -TARGET_ENOMEM;
4246     }
4247     host_mb->mtype = (abi_long) tswapal(target_mb->mtype);
4248     memcpy(host_mb->mtext, target_mb->mtext, msgsz);
4249     ret = -TARGET_ENOSYS;
4250 #ifdef __NR_msgsnd
4251     ret = get_errno(safe_msgsnd(msqid, host_mb, msgsz, msgflg));
4252 #endif
4253 #ifdef __NR_ipc
4254     if (ret == -TARGET_ENOSYS) {
4255 #ifdef __s390x__
4256         ret = get_errno(safe_ipc(IPCOP_msgsnd, msqid, msgsz, msgflg,
4257                                  host_mb));
4258 #else
4259         ret = get_errno(safe_ipc(IPCOP_msgsnd, msqid, msgsz, msgflg,
4260                                  host_mb, 0));
4261 #endif
4262     }
4263 #endif
4264     g_free(host_mb);
4265     unlock_user_struct(target_mb, msgp, 0);
4266 
4267     return ret;
4268 }
4269 
4270 #ifdef __NR_ipc
4271 #if defined(__sparc__)
4272 /* SPARC for msgrcv it does not use the kludge on final 2 arguments.  */
4273 #define MSGRCV_ARGS(__msgp, __msgtyp) __msgp, __msgtyp
4274 #elif defined(__s390x__)
4275 /* The s390 sys_ipc variant has only five parameters.  */
4276 #define MSGRCV_ARGS(__msgp, __msgtyp) \
4277     ((long int[]){(long int)__msgp, __msgtyp})
4278 #else
4279 #define MSGRCV_ARGS(__msgp, __msgtyp) \
4280     ((long int[]){(long int)__msgp, __msgtyp}), 0
4281 #endif
4282 #endif
4283 
4284 static inline abi_long do_msgrcv(int msqid, abi_long msgp,
4285                                  ssize_t msgsz, abi_long msgtyp,
4286                                  int msgflg)
4287 {
4288     struct target_msgbuf *target_mb;
4289     char *target_mtext;
4290     struct msgbuf *host_mb;
4291     abi_long ret = 0;
4292 
4293     if (msgsz < 0) {
4294         return -TARGET_EINVAL;
4295     }
4296 
4297     if (!lock_user_struct(VERIFY_WRITE, target_mb, msgp, 0))
4298         return -TARGET_EFAULT;
4299 
4300     host_mb = g_try_malloc(msgsz + sizeof(long));
4301     if (!host_mb) {
4302         ret = -TARGET_ENOMEM;
4303         goto end;
4304     }
4305     ret = -TARGET_ENOSYS;
4306 #ifdef __NR_msgrcv
4307     ret = get_errno(safe_msgrcv(msqid, host_mb, msgsz, msgtyp, msgflg));
4308 #endif
4309 #ifdef __NR_ipc
4310     if (ret == -TARGET_ENOSYS) {
4311         ret = get_errno(safe_ipc(IPCOP_CALL(1, IPCOP_msgrcv), msqid, msgsz,
4312                         msgflg, MSGRCV_ARGS(host_mb, msgtyp)));
4313     }
4314 #endif
4315 
4316     if (ret > 0) {
4317         abi_ulong target_mtext_addr = msgp + sizeof(abi_ulong);
4318         target_mtext = lock_user(VERIFY_WRITE, target_mtext_addr, ret, 0);
4319         if (!target_mtext) {
4320             ret = -TARGET_EFAULT;
4321             goto end;
4322         }
4323         memcpy(target_mb->mtext, host_mb->mtext, ret);
4324         unlock_user(target_mtext, target_mtext_addr, ret);
4325     }
4326 
4327     target_mb->mtype = tswapal(host_mb->mtype);
4328 
4329 end:
4330     if (target_mb)
4331         unlock_user_struct(target_mb, msgp, 1);
4332     g_free(host_mb);
4333     return ret;
4334 }
4335 
4336 static inline abi_long target_to_host_shmid_ds(struct shmid_ds *host_sd,
4337                                                abi_ulong target_addr)
4338 {
4339     struct target_shmid_ds *target_sd;
4340 
4341     if (!lock_user_struct(VERIFY_READ, target_sd, target_addr, 1))
4342         return -TARGET_EFAULT;
4343     if (target_to_host_ipc_perm(&(host_sd->shm_perm), target_addr))
4344         return -TARGET_EFAULT;
4345     __get_user(host_sd->shm_segsz, &target_sd->shm_segsz);
4346     __get_user(host_sd->shm_atime, &target_sd->shm_atime);
4347     __get_user(host_sd->shm_dtime, &target_sd->shm_dtime);
4348     __get_user(host_sd->shm_ctime, &target_sd->shm_ctime);
4349     __get_user(host_sd->shm_cpid, &target_sd->shm_cpid);
4350     __get_user(host_sd->shm_lpid, &target_sd->shm_lpid);
4351     __get_user(host_sd->shm_nattch, &target_sd->shm_nattch);
4352     unlock_user_struct(target_sd, target_addr, 0);
4353     return 0;
4354 }
4355 
4356 static inline abi_long host_to_target_shmid_ds(abi_ulong target_addr,
4357                                                struct shmid_ds *host_sd)
4358 {
4359     struct target_shmid_ds *target_sd;
4360 
4361     if (!lock_user_struct(VERIFY_WRITE, target_sd, target_addr, 0))
4362         return -TARGET_EFAULT;
4363     if (host_to_target_ipc_perm(target_addr, &(host_sd->shm_perm)))
4364         return -TARGET_EFAULT;
4365     __put_user(host_sd->shm_segsz, &target_sd->shm_segsz);
4366     __put_user(host_sd->shm_atime, &target_sd->shm_atime);
4367     __put_user(host_sd->shm_dtime, &target_sd->shm_dtime);
4368     __put_user(host_sd->shm_ctime, &target_sd->shm_ctime);
4369     __put_user(host_sd->shm_cpid, &target_sd->shm_cpid);
4370     __put_user(host_sd->shm_lpid, &target_sd->shm_lpid);
4371     __put_user(host_sd->shm_nattch, &target_sd->shm_nattch);
4372     unlock_user_struct(target_sd, target_addr, 1);
4373     return 0;
4374 }
4375 
4376 struct  target_shminfo {
4377     abi_ulong shmmax;
4378     abi_ulong shmmin;
4379     abi_ulong shmmni;
4380     abi_ulong shmseg;
4381     abi_ulong shmall;
4382 };
4383 
4384 static inline abi_long host_to_target_shminfo(abi_ulong target_addr,
4385                                               struct shminfo *host_shminfo)
4386 {
4387     struct target_shminfo *target_shminfo;
4388     if (!lock_user_struct(VERIFY_WRITE, target_shminfo, target_addr, 0))
4389         return -TARGET_EFAULT;
4390     __put_user(host_shminfo->shmmax, &target_shminfo->shmmax);
4391     __put_user(host_shminfo->shmmin, &target_shminfo->shmmin);
4392     __put_user(host_shminfo->shmmni, &target_shminfo->shmmni);
4393     __put_user(host_shminfo->shmseg, &target_shminfo->shmseg);
4394     __put_user(host_shminfo->shmall, &target_shminfo->shmall);
4395     unlock_user_struct(target_shminfo, target_addr, 1);
4396     return 0;
4397 }
4398 
4399 struct target_shm_info {
4400     int used_ids;
4401     abi_ulong shm_tot;
4402     abi_ulong shm_rss;
4403     abi_ulong shm_swp;
4404     abi_ulong swap_attempts;
4405     abi_ulong swap_successes;
4406 };
4407 
4408 static inline abi_long host_to_target_shm_info(abi_ulong target_addr,
4409                                                struct shm_info *host_shm_info)
4410 {
4411     struct target_shm_info *target_shm_info;
4412     if (!lock_user_struct(VERIFY_WRITE, target_shm_info, target_addr, 0))
4413         return -TARGET_EFAULT;
4414     __put_user(host_shm_info->used_ids, &target_shm_info->used_ids);
4415     __put_user(host_shm_info->shm_tot, &target_shm_info->shm_tot);
4416     __put_user(host_shm_info->shm_rss, &target_shm_info->shm_rss);
4417     __put_user(host_shm_info->shm_swp, &target_shm_info->shm_swp);
4418     __put_user(host_shm_info->swap_attempts, &target_shm_info->swap_attempts);
4419     __put_user(host_shm_info->swap_successes, &target_shm_info->swap_successes);
4420     unlock_user_struct(target_shm_info, target_addr, 1);
4421     return 0;
4422 }
4423 
4424 static inline abi_long do_shmctl(int shmid, int cmd, abi_long buf)
4425 {
4426     struct shmid_ds dsarg;
4427     struct shminfo shminfo;
4428     struct shm_info shm_info;
4429     abi_long ret = -TARGET_EINVAL;
4430 
4431     cmd &= 0xff;
4432 
4433     switch(cmd) {
4434     case IPC_STAT:
4435     case IPC_SET:
4436     case SHM_STAT:
4437         if (target_to_host_shmid_ds(&dsarg, buf))
4438             return -TARGET_EFAULT;
4439         ret = get_errno(shmctl(shmid, cmd, &dsarg));
4440         if (host_to_target_shmid_ds(buf, &dsarg))
4441             return -TARGET_EFAULT;
4442         break;
4443     case IPC_INFO:
4444         ret = get_errno(shmctl(shmid, cmd, (struct shmid_ds *)&shminfo));
4445         if (host_to_target_shminfo(buf, &shminfo))
4446             return -TARGET_EFAULT;
4447         break;
4448     case SHM_INFO:
4449         ret = get_errno(shmctl(shmid, cmd, (struct shmid_ds *)&shm_info));
4450         if (host_to_target_shm_info(buf, &shm_info))
4451             return -TARGET_EFAULT;
4452         break;
4453     case IPC_RMID:
4454     case SHM_LOCK:
4455     case SHM_UNLOCK:
4456         ret = get_errno(shmctl(shmid, cmd, NULL));
4457         break;
4458     }
4459 
4460     return ret;
4461 }
4462 
4463 #ifndef TARGET_FORCE_SHMLBA
4464 /* For most architectures, SHMLBA is the same as the page size;
4465  * some architectures have larger values, in which case they should
4466  * define TARGET_FORCE_SHMLBA and provide a target_shmlba() function.
4467  * This corresponds to the kernel arch code defining __ARCH_FORCE_SHMLBA
4468  * and defining its own value for SHMLBA.
4469  *
4470  * The kernel also permits SHMLBA to be set by the architecture to a
4471  * value larger than the page size without setting __ARCH_FORCE_SHMLBA;
4472  * this means that addresses are rounded to the large size if
4473  * SHM_RND is set but addresses not aligned to that size are not rejected
4474  * as long as they are at least page-aligned. Since the only architecture
4475  * which uses this is ia64 this code doesn't provide for that oddity.
4476  */
4477 static inline abi_ulong target_shmlba(CPUArchState *cpu_env)
4478 {
4479     return TARGET_PAGE_SIZE;
4480 }
4481 #endif
4482 
4483 static inline abi_ulong do_shmat(CPUArchState *cpu_env,
4484                                  int shmid, abi_ulong shmaddr, int shmflg)
4485 {
4486     CPUState *cpu = env_cpu(cpu_env);
4487     abi_long raddr;
4488     void *host_raddr;
4489     struct shmid_ds shm_info;
4490     int i,ret;
4491     abi_ulong shmlba;
4492 
4493     /* shmat pointers are always untagged */
4494 
4495     /* find out the length of the shared memory segment */
4496     ret = get_errno(shmctl(shmid, IPC_STAT, &shm_info));
4497     if (is_error(ret)) {
4498         /* can't get length, bail out */
4499         return ret;
4500     }
4501 
4502     shmlba = target_shmlba(cpu_env);
4503 
4504     if (shmaddr & (shmlba - 1)) {
4505         if (shmflg & SHM_RND) {
4506             shmaddr &= ~(shmlba - 1);
4507         } else {
4508             return -TARGET_EINVAL;
4509         }
4510     }
4511     if (!guest_range_valid_untagged(shmaddr, shm_info.shm_segsz)) {
4512         return -TARGET_EINVAL;
4513     }
4514 
4515     mmap_lock();
4516 
4517     /*
4518      * We're mapping shared memory, so ensure we generate code for parallel
4519      * execution and flush old translations.  This will work up to the level
4520      * supported by the host -- anything that requires EXCP_ATOMIC will not
4521      * be atomic with respect to an external process.
4522      */
4523     if (!(cpu->tcg_cflags & CF_PARALLEL)) {
4524         cpu->tcg_cflags |= CF_PARALLEL;
4525         tb_flush(cpu);
4526     }
4527 
4528     if (shmaddr)
4529         host_raddr = shmat(shmid, (void *)g2h_untagged(shmaddr), shmflg);
4530     else {
4531         abi_ulong mmap_start;
4532 
4533         /* In order to use the host shmat, we need to honor host SHMLBA.  */
4534         mmap_start = mmap_find_vma(0, shm_info.shm_segsz, MAX(SHMLBA, shmlba));
4535 
4536         if (mmap_start == -1) {
4537             errno = ENOMEM;
4538             host_raddr = (void *)-1;
4539         } else
4540             host_raddr = shmat(shmid, g2h_untagged(mmap_start),
4541                                shmflg | SHM_REMAP);
4542     }
4543 
4544     if (host_raddr == (void *)-1) {
4545         mmap_unlock();
4546         return get_errno((long)host_raddr);
4547     }
4548     raddr=h2g((unsigned long)host_raddr);
4549 
4550     page_set_flags(raddr, raddr + shm_info.shm_segsz,
4551                    PAGE_VALID | PAGE_RESET | PAGE_READ |
4552                    (shmflg & SHM_RDONLY ? 0 : PAGE_WRITE));
4553 
4554     for (i = 0; i < N_SHM_REGIONS; i++) {
4555         if (!shm_regions[i].in_use) {
4556             shm_regions[i].in_use = true;
4557             shm_regions[i].start = raddr;
4558             shm_regions[i].size = shm_info.shm_segsz;
4559             break;
4560         }
4561     }
4562 
4563     mmap_unlock();
4564     return raddr;
4565 
4566 }
4567 
4568 static inline abi_long do_shmdt(abi_ulong shmaddr)
4569 {
4570     int i;
4571     abi_long rv;
4572 
4573     /* shmdt pointers are always untagged */
4574 
4575     mmap_lock();
4576 
4577     for (i = 0; i < N_SHM_REGIONS; ++i) {
4578         if (shm_regions[i].in_use && shm_regions[i].start == shmaddr) {
4579             shm_regions[i].in_use = false;
4580             page_set_flags(shmaddr, shmaddr + shm_regions[i].size, 0);
4581             break;
4582         }
4583     }
4584     rv = get_errno(shmdt(g2h_untagged(shmaddr)));
4585 
4586     mmap_unlock();
4587 
4588     return rv;
4589 }
4590 
4591 #ifdef TARGET_NR_ipc
4592 /* ??? This only works with linear mappings.  */
4593 /* do_ipc() must return target values and target errnos. */
4594 static abi_long do_ipc(CPUArchState *cpu_env,
4595                        unsigned int call, abi_long first,
4596                        abi_long second, abi_long third,
4597                        abi_long ptr, abi_long fifth)
4598 {
4599     int version;
4600     abi_long ret = 0;
4601 
4602     version = call >> 16;
4603     call &= 0xffff;
4604 
4605     switch (call) {
4606     case IPCOP_semop:
4607         ret = do_semtimedop(first, ptr, second, 0, false);
4608         break;
4609     case IPCOP_semtimedop:
4610     /*
4611      * The s390 sys_ipc variant has only five parameters instead of six
4612      * (as for default variant) and the only difference is the handling of
4613      * SEMTIMEDOP where on s390 the third parameter is used as a pointer
4614      * to a struct timespec where the generic variant uses fifth parameter.
4615      */
4616 #if defined(TARGET_S390X)
4617         ret = do_semtimedop(first, ptr, second, third, TARGET_ABI_BITS == 64);
4618 #else
4619         ret = do_semtimedop(first, ptr, second, fifth, TARGET_ABI_BITS == 64);
4620 #endif
4621         break;
4622 
4623     case IPCOP_semget:
4624         ret = get_errno(semget(first, second, third));
4625         break;
4626 
4627     case IPCOP_semctl: {
4628         /* The semun argument to semctl is passed by value, so dereference the
4629          * ptr argument. */
4630         abi_ulong atptr;
4631         get_user_ual(atptr, ptr);
4632         ret = do_semctl(first, second, third, atptr);
4633         break;
4634     }
4635 
4636     case IPCOP_msgget:
4637         ret = get_errno(msgget(first, second));
4638         break;
4639 
4640     case IPCOP_msgsnd:
4641         ret = do_msgsnd(first, ptr, second, third);
4642         break;
4643 
4644     case IPCOP_msgctl:
4645         ret = do_msgctl(first, second, ptr);
4646         break;
4647 
4648     case IPCOP_msgrcv:
4649         switch (version) {
4650         case 0:
4651             {
4652                 struct target_ipc_kludge {
4653                     abi_long msgp;
4654                     abi_long msgtyp;
4655                 } *tmp;
4656 
4657                 if (!lock_user_struct(VERIFY_READ, tmp, ptr, 1)) {
4658                     ret = -TARGET_EFAULT;
4659                     break;
4660                 }
4661 
4662                 ret = do_msgrcv(first, tswapal(tmp->msgp), second, tswapal(tmp->msgtyp), third);
4663 
4664                 unlock_user_struct(tmp, ptr, 0);
4665                 break;
4666             }
4667         default:
4668             ret = do_msgrcv(first, ptr, second, fifth, third);
4669         }
4670         break;
4671 
4672     case IPCOP_shmat:
4673         switch (version) {
4674         default:
4675         {
4676             abi_ulong raddr;
4677             raddr = do_shmat(cpu_env, first, ptr, second);
4678             if (is_error(raddr))
4679                 return get_errno(raddr);
4680             if (put_user_ual(raddr, third))
4681                 return -TARGET_EFAULT;
4682             break;
4683         }
4684         case 1:
4685             ret = -TARGET_EINVAL;
4686             break;
4687         }
4688 	break;
4689     case IPCOP_shmdt:
4690         ret = do_shmdt(ptr);
4691 	break;
4692 
4693     case IPCOP_shmget:
4694 	/* IPC_* flag values are the same on all linux platforms */
4695 	ret = get_errno(shmget(first, second, third));
4696 	break;
4697 
4698 	/* IPC_* and SHM_* command values are the same on all linux platforms */
4699     case IPCOP_shmctl:
4700         ret = do_shmctl(first, second, ptr);
4701         break;
4702     default:
4703         qemu_log_mask(LOG_UNIMP, "Unsupported ipc call: %d (version %d)\n",
4704                       call, version);
4705 	ret = -TARGET_ENOSYS;
4706 	break;
4707     }
4708     return ret;
4709 }
4710 #endif
4711 
4712 /* kernel structure types definitions */
4713 
4714 #define STRUCT(name, ...) STRUCT_ ## name,
4715 #define STRUCT_SPECIAL(name) STRUCT_ ## name,
4716 enum {
4717 #include "syscall_types.h"
4718 STRUCT_MAX
4719 };
4720 #undef STRUCT
4721 #undef STRUCT_SPECIAL
4722 
4723 #define STRUCT(name, ...) static const argtype struct_ ## name ## _def[] = {  __VA_ARGS__, TYPE_NULL };
4724 #define STRUCT_SPECIAL(name)
4725 #include "syscall_types.h"
4726 #undef STRUCT
4727 #undef STRUCT_SPECIAL
4728 
4729 #define MAX_STRUCT_SIZE 4096
4730 
4731 #ifdef CONFIG_FIEMAP
4732 /* So fiemap access checks don't overflow on 32 bit systems.
4733  * This is very slightly smaller than the limit imposed by
4734  * the underlying kernel.
4735  */
4736 #define FIEMAP_MAX_EXTENTS ((UINT_MAX - sizeof(struct fiemap))  \
4737                             / sizeof(struct fiemap_extent))
4738 
4739 static abi_long do_ioctl_fs_ioc_fiemap(const IOCTLEntry *ie, uint8_t *buf_temp,
4740                                        int fd, int cmd, abi_long arg)
4741 {
4742     /* The parameter for this ioctl is a struct fiemap followed
4743      * by an array of struct fiemap_extent whose size is set
4744      * in fiemap->fm_extent_count. The array is filled in by the
4745      * ioctl.
4746      */
4747     int target_size_in, target_size_out;
4748     struct fiemap *fm;
4749     const argtype *arg_type = ie->arg_type;
4750     const argtype extent_arg_type[] = { MK_STRUCT(STRUCT_fiemap_extent) };
4751     void *argptr, *p;
4752     abi_long ret;
4753     int i, extent_size = thunk_type_size(extent_arg_type, 0);
4754     uint32_t outbufsz;
4755     int free_fm = 0;
4756 
4757     assert(arg_type[0] == TYPE_PTR);
4758     assert(ie->access == IOC_RW);
4759     arg_type++;
4760     target_size_in = thunk_type_size(arg_type, 0);
4761     argptr = lock_user(VERIFY_READ, arg, target_size_in, 1);
4762     if (!argptr) {
4763         return -TARGET_EFAULT;
4764     }
4765     thunk_convert(buf_temp, argptr, arg_type, THUNK_HOST);
4766     unlock_user(argptr, arg, 0);
4767     fm = (struct fiemap *)buf_temp;
4768     if (fm->fm_extent_count > FIEMAP_MAX_EXTENTS) {
4769         return -TARGET_EINVAL;
4770     }
4771 
4772     outbufsz = sizeof (*fm) +
4773         (sizeof(struct fiemap_extent) * fm->fm_extent_count);
4774 
4775     if (outbufsz > MAX_STRUCT_SIZE) {
4776         /* We can't fit all the extents into the fixed size buffer.
4777          * Allocate one that is large enough and use it instead.
4778          */
4779         fm = g_try_malloc(outbufsz);
4780         if (!fm) {
4781             return -TARGET_ENOMEM;
4782         }
4783         memcpy(fm, buf_temp, sizeof(struct fiemap));
4784         free_fm = 1;
4785     }
4786     ret = get_errno(safe_ioctl(fd, ie->host_cmd, fm));
4787     if (!is_error(ret)) {
4788         target_size_out = target_size_in;
4789         /* An extent_count of 0 means we were only counting the extents
4790          * so there are no structs to copy
4791          */
4792         if (fm->fm_extent_count != 0) {
4793             target_size_out += fm->fm_mapped_extents * extent_size;
4794         }
4795         argptr = lock_user(VERIFY_WRITE, arg, target_size_out, 0);
4796         if (!argptr) {
4797             ret = -TARGET_EFAULT;
4798         } else {
4799             /* Convert the struct fiemap */
4800             thunk_convert(argptr, fm, arg_type, THUNK_TARGET);
4801             if (fm->fm_extent_count != 0) {
4802                 p = argptr + target_size_in;
4803                 /* ...and then all the struct fiemap_extents */
4804                 for (i = 0; i < fm->fm_mapped_extents; i++) {
4805                     thunk_convert(p, &fm->fm_extents[i], extent_arg_type,
4806                                   THUNK_TARGET);
4807                     p += extent_size;
4808                 }
4809             }
4810             unlock_user(argptr, arg, target_size_out);
4811         }
4812     }
4813     if (free_fm) {
4814         g_free(fm);
4815     }
4816     return ret;
4817 }
4818 #endif
4819 
4820 static abi_long do_ioctl_ifconf(const IOCTLEntry *ie, uint8_t *buf_temp,
4821                                 int fd, int cmd, abi_long arg)
4822 {
4823     const argtype *arg_type = ie->arg_type;
4824     int target_size;
4825     void *argptr;
4826     int ret;
4827     struct ifconf *host_ifconf;
4828     uint32_t outbufsz;
4829     const argtype ifreq_arg_type[] = { MK_STRUCT(STRUCT_sockaddr_ifreq) };
4830     const argtype ifreq_max_type[] = { MK_STRUCT(STRUCT_ifmap_ifreq) };
4831     int target_ifreq_size;
4832     int nb_ifreq;
4833     int free_buf = 0;
4834     int i;
4835     int target_ifc_len;
4836     abi_long target_ifc_buf;
4837     int host_ifc_len;
4838     char *host_ifc_buf;
4839 
4840     assert(arg_type[0] == TYPE_PTR);
4841     assert(ie->access == IOC_RW);
4842 
4843     arg_type++;
4844     target_size = thunk_type_size(arg_type, 0);
4845 
4846     argptr = lock_user(VERIFY_READ, arg, target_size, 1);
4847     if (!argptr)
4848         return -TARGET_EFAULT;
4849     thunk_convert(buf_temp, argptr, arg_type, THUNK_HOST);
4850     unlock_user(argptr, arg, 0);
4851 
4852     host_ifconf = (struct ifconf *)(unsigned long)buf_temp;
4853     target_ifc_buf = (abi_long)(unsigned long)host_ifconf->ifc_buf;
4854     target_ifreq_size = thunk_type_size(ifreq_max_type, 0);
4855 
4856     if (target_ifc_buf != 0) {
4857         target_ifc_len = host_ifconf->ifc_len;
4858         nb_ifreq = target_ifc_len / target_ifreq_size;
4859         host_ifc_len = nb_ifreq * sizeof(struct ifreq);
4860 
4861         outbufsz = sizeof(*host_ifconf) + host_ifc_len;
4862         if (outbufsz > MAX_STRUCT_SIZE) {
4863             /*
4864              * We can't fit all the extents into the fixed size buffer.
4865              * Allocate one that is large enough and use it instead.
4866              */
4867             host_ifconf = malloc(outbufsz);
4868             if (!host_ifconf) {
4869                 return -TARGET_ENOMEM;
4870             }
4871             memcpy(host_ifconf, buf_temp, sizeof(*host_ifconf));
4872             free_buf = 1;
4873         }
4874         host_ifc_buf = (char *)host_ifconf + sizeof(*host_ifconf);
4875 
4876         host_ifconf->ifc_len = host_ifc_len;
4877     } else {
4878       host_ifc_buf = NULL;
4879     }
4880     host_ifconf->ifc_buf = host_ifc_buf;
4881 
4882     ret = get_errno(safe_ioctl(fd, ie->host_cmd, host_ifconf));
4883     if (!is_error(ret)) {
4884 	/* convert host ifc_len to target ifc_len */
4885 
4886         nb_ifreq = host_ifconf->ifc_len / sizeof(struct ifreq);
4887         target_ifc_len = nb_ifreq * target_ifreq_size;
4888         host_ifconf->ifc_len = target_ifc_len;
4889 
4890 	/* restore target ifc_buf */
4891 
4892         host_ifconf->ifc_buf = (char *)(unsigned long)target_ifc_buf;
4893 
4894 	/* copy struct ifconf to target user */
4895 
4896         argptr = lock_user(VERIFY_WRITE, arg, target_size, 0);
4897         if (!argptr)
4898             return -TARGET_EFAULT;
4899         thunk_convert(argptr, host_ifconf, arg_type, THUNK_TARGET);
4900         unlock_user(argptr, arg, target_size);
4901 
4902         if (target_ifc_buf != 0) {
4903             /* copy ifreq[] to target user */
4904             argptr = lock_user(VERIFY_WRITE, target_ifc_buf, target_ifc_len, 0);
4905             for (i = 0; i < nb_ifreq ; i++) {
4906                 thunk_convert(argptr + i * target_ifreq_size,
4907                               host_ifc_buf + i * sizeof(struct ifreq),
4908                               ifreq_arg_type, THUNK_TARGET);
4909             }
4910             unlock_user(argptr, target_ifc_buf, target_ifc_len);
4911         }
4912     }
4913 
4914     if (free_buf) {
4915         free(host_ifconf);
4916     }
4917 
4918     return ret;
4919 }
4920 
4921 #if defined(CONFIG_USBFS)
4922 #if HOST_LONG_BITS > 64
4923 #error USBDEVFS thunks do not support >64 bit hosts yet.
4924 #endif
4925 struct live_urb {
4926     uint64_t target_urb_adr;
4927     uint64_t target_buf_adr;
4928     char *target_buf_ptr;
4929     struct usbdevfs_urb host_urb;
4930 };
4931 
4932 static GHashTable *usbdevfs_urb_hashtable(void)
4933 {
4934     static GHashTable *urb_hashtable;
4935 
4936     if (!urb_hashtable) {
4937         urb_hashtable = g_hash_table_new(g_int64_hash, g_int64_equal);
4938     }
4939     return urb_hashtable;
4940 }
4941 
4942 static void urb_hashtable_insert(struct live_urb *urb)
4943 {
4944     GHashTable *urb_hashtable = usbdevfs_urb_hashtable();
4945     g_hash_table_insert(urb_hashtable, urb, urb);
4946 }
4947 
4948 static struct live_urb *urb_hashtable_lookup(uint64_t target_urb_adr)
4949 {
4950     GHashTable *urb_hashtable = usbdevfs_urb_hashtable();
4951     return g_hash_table_lookup(urb_hashtable, &target_urb_adr);
4952 }
4953 
4954 static void urb_hashtable_remove(struct live_urb *urb)
4955 {
4956     GHashTable *urb_hashtable = usbdevfs_urb_hashtable();
4957     g_hash_table_remove(urb_hashtable, urb);
4958 }
4959 
4960 static abi_long
4961 do_ioctl_usbdevfs_reapurb(const IOCTLEntry *ie, uint8_t *buf_temp,
4962                           int fd, int cmd, abi_long arg)
4963 {
4964     const argtype usbfsurb_arg_type[] = { MK_STRUCT(STRUCT_usbdevfs_urb) };
4965     const argtype ptrvoid_arg_type[] = { TYPE_PTRVOID, 0, 0 };
4966     struct live_urb *lurb;
4967     void *argptr;
4968     uint64_t hurb;
4969     int target_size;
4970     uintptr_t target_urb_adr;
4971     abi_long ret;
4972 
4973     target_size = thunk_type_size(usbfsurb_arg_type, THUNK_TARGET);
4974 
4975     memset(buf_temp, 0, sizeof(uint64_t));
4976     ret = get_errno(safe_ioctl(fd, ie->host_cmd, buf_temp));
4977     if (is_error(ret)) {
4978         return ret;
4979     }
4980 
4981     memcpy(&hurb, buf_temp, sizeof(uint64_t));
4982     lurb = (void *)((uintptr_t)hurb - offsetof(struct live_urb, host_urb));
4983     if (!lurb->target_urb_adr) {
4984         return -TARGET_EFAULT;
4985     }
4986     urb_hashtable_remove(lurb);
4987     unlock_user(lurb->target_buf_ptr, lurb->target_buf_adr,
4988         lurb->host_urb.buffer_length);
4989     lurb->target_buf_ptr = NULL;
4990 
4991     /* restore the guest buffer pointer */
4992     lurb->host_urb.buffer = (void *)(uintptr_t)lurb->target_buf_adr;
4993 
4994     /* update the guest urb struct */
4995     argptr = lock_user(VERIFY_WRITE, lurb->target_urb_adr, target_size, 0);
4996     if (!argptr) {
4997         g_free(lurb);
4998         return -TARGET_EFAULT;
4999     }
5000     thunk_convert(argptr, &lurb->host_urb, usbfsurb_arg_type, THUNK_TARGET);
5001     unlock_user(argptr, lurb->target_urb_adr, target_size);
5002 
5003     target_size = thunk_type_size(ptrvoid_arg_type, THUNK_TARGET);
5004     /* write back the urb handle */
5005     argptr = lock_user(VERIFY_WRITE, arg, target_size, 0);
5006     if (!argptr) {
5007         g_free(lurb);
5008         return -TARGET_EFAULT;
5009     }
5010 
5011     /* GHashTable uses 64-bit keys but thunk_convert expects uintptr_t */
5012     target_urb_adr = lurb->target_urb_adr;
5013     thunk_convert(argptr, &target_urb_adr, ptrvoid_arg_type, THUNK_TARGET);
5014     unlock_user(argptr, arg, target_size);
5015 
5016     g_free(lurb);
5017     return ret;
5018 }
5019 
5020 static abi_long
5021 do_ioctl_usbdevfs_discardurb(const IOCTLEntry *ie,
5022                              uint8_t *buf_temp __attribute__((unused)),
5023                              int fd, int cmd, abi_long arg)
5024 {
5025     struct live_urb *lurb;
5026 
5027     /* map target address back to host URB with metadata. */
5028     lurb = urb_hashtable_lookup(arg);
5029     if (!lurb) {
5030         return -TARGET_EFAULT;
5031     }
5032     return get_errno(safe_ioctl(fd, ie->host_cmd, &lurb->host_urb));
5033 }
5034 
5035 static abi_long
5036 do_ioctl_usbdevfs_submiturb(const IOCTLEntry *ie, uint8_t *buf_temp,
5037                             int fd, int cmd, abi_long arg)
5038 {
5039     const argtype *arg_type = ie->arg_type;
5040     int target_size;
5041     abi_long ret;
5042     void *argptr;
5043     int rw_dir;
5044     struct live_urb *lurb;
5045 
5046     /*
5047      * each submitted URB needs to map to a unique ID for the
5048      * kernel, and that unique ID needs to be a pointer to
5049      * host memory.  hence, we need to malloc for each URB.
5050      * isochronous transfers have a variable length struct.
5051      */
5052     arg_type++;
5053     target_size = thunk_type_size(arg_type, THUNK_TARGET);
5054 
5055     /* construct host copy of urb and metadata */
5056     lurb = g_try_malloc0(sizeof(struct live_urb));
5057     if (!lurb) {
5058         return -TARGET_ENOMEM;
5059     }
5060 
5061     argptr = lock_user(VERIFY_READ, arg, target_size, 1);
5062     if (!argptr) {
5063         g_free(lurb);
5064         return -TARGET_EFAULT;
5065     }
5066     thunk_convert(&lurb->host_urb, argptr, arg_type, THUNK_HOST);
5067     unlock_user(argptr, arg, 0);
5068 
5069     lurb->target_urb_adr = arg;
5070     lurb->target_buf_adr = (uintptr_t)lurb->host_urb.buffer;
5071 
5072     /* buffer space used depends on endpoint type so lock the entire buffer */
5073     /* control type urbs should check the buffer contents for true direction */
5074     rw_dir = lurb->host_urb.endpoint & USB_DIR_IN ? VERIFY_WRITE : VERIFY_READ;
5075     lurb->target_buf_ptr = lock_user(rw_dir, lurb->target_buf_adr,
5076         lurb->host_urb.buffer_length, 1);
5077     if (lurb->target_buf_ptr == NULL) {
5078         g_free(lurb);
5079         return -TARGET_EFAULT;
5080     }
5081 
5082     /* update buffer pointer in host copy */
5083     lurb->host_urb.buffer = lurb->target_buf_ptr;
5084 
5085     ret = get_errno(safe_ioctl(fd, ie->host_cmd, &lurb->host_urb));
5086     if (is_error(ret)) {
5087         unlock_user(lurb->target_buf_ptr, lurb->target_buf_adr, 0);
5088         g_free(lurb);
5089     } else {
5090         urb_hashtable_insert(lurb);
5091     }
5092 
5093     return ret;
5094 }
5095 #endif /* CONFIG_USBFS */
5096 
5097 static abi_long do_ioctl_dm(const IOCTLEntry *ie, uint8_t *buf_temp, int fd,
5098                             int cmd, abi_long arg)
5099 {
5100     void *argptr;
5101     struct dm_ioctl *host_dm;
5102     abi_long guest_data;
5103     uint32_t guest_data_size;
5104     int target_size;
5105     const argtype *arg_type = ie->arg_type;
5106     abi_long ret;
5107     void *big_buf = NULL;
5108     char *host_data;
5109 
5110     arg_type++;
5111     target_size = thunk_type_size(arg_type, 0);
5112     argptr = lock_user(VERIFY_READ, arg, target_size, 1);
5113     if (!argptr) {
5114         ret = -TARGET_EFAULT;
5115         goto out;
5116     }
5117     thunk_convert(buf_temp, argptr, arg_type, THUNK_HOST);
5118     unlock_user(argptr, arg, 0);
5119 
5120     /* buf_temp is too small, so fetch things into a bigger buffer */
5121     big_buf = g_malloc0(((struct dm_ioctl*)buf_temp)->data_size * 2);
5122     memcpy(big_buf, buf_temp, target_size);
5123     buf_temp = big_buf;
5124     host_dm = big_buf;
5125 
5126     guest_data = arg + host_dm->data_start;
5127     if ((guest_data - arg) < 0) {
5128         ret = -TARGET_EINVAL;
5129         goto out;
5130     }
5131     guest_data_size = host_dm->data_size - host_dm->data_start;
5132     host_data = (char*)host_dm + host_dm->data_start;
5133 
5134     argptr = lock_user(VERIFY_READ, guest_data, guest_data_size, 1);
5135     if (!argptr) {
5136         ret = -TARGET_EFAULT;
5137         goto out;
5138     }
5139 
5140     switch (ie->host_cmd) {
5141     case DM_REMOVE_ALL:
5142     case DM_LIST_DEVICES:
5143     case DM_DEV_CREATE:
5144     case DM_DEV_REMOVE:
5145     case DM_DEV_SUSPEND:
5146     case DM_DEV_STATUS:
5147     case DM_DEV_WAIT:
5148     case DM_TABLE_STATUS:
5149     case DM_TABLE_CLEAR:
5150     case DM_TABLE_DEPS:
5151     case DM_LIST_VERSIONS:
5152         /* no input data */
5153         break;
5154     case DM_DEV_RENAME:
5155     case DM_DEV_SET_GEOMETRY:
5156         /* data contains only strings */
5157         memcpy(host_data, argptr, guest_data_size);
5158         break;
5159     case DM_TARGET_MSG:
5160         memcpy(host_data, argptr, guest_data_size);
5161         *(uint64_t*)host_data = tswap64(*(uint64_t*)argptr);
5162         break;
5163     case DM_TABLE_LOAD:
5164     {
5165         void *gspec = argptr;
5166         void *cur_data = host_data;
5167         const argtype arg_type[] = { MK_STRUCT(STRUCT_dm_target_spec) };
5168         int spec_size = thunk_type_size(arg_type, 0);
5169         int i;
5170 
5171         for (i = 0; i < host_dm->target_count; i++) {
5172             struct dm_target_spec *spec = cur_data;
5173             uint32_t next;
5174             int slen;
5175 
5176             thunk_convert(spec, gspec, arg_type, THUNK_HOST);
5177             slen = strlen((char*)gspec + spec_size) + 1;
5178             next = spec->next;
5179             spec->next = sizeof(*spec) + slen;
5180             strcpy((char*)&spec[1], gspec + spec_size);
5181             gspec += next;
5182             cur_data += spec->next;
5183         }
5184         break;
5185     }
5186     default:
5187         ret = -TARGET_EINVAL;
5188         unlock_user(argptr, guest_data, 0);
5189         goto out;
5190     }
5191     unlock_user(argptr, guest_data, 0);
5192 
5193     ret = get_errno(safe_ioctl(fd, ie->host_cmd, buf_temp));
5194     if (!is_error(ret)) {
5195         guest_data = arg + host_dm->data_start;
5196         guest_data_size = host_dm->data_size - host_dm->data_start;
5197         argptr = lock_user(VERIFY_WRITE, guest_data, guest_data_size, 0);
5198         switch (ie->host_cmd) {
5199         case DM_REMOVE_ALL:
5200         case DM_DEV_CREATE:
5201         case DM_DEV_REMOVE:
5202         case DM_DEV_RENAME:
5203         case DM_DEV_SUSPEND:
5204         case DM_DEV_STATUS:
5205         case DM_TABLE_LOAD:
5206         case DM_TABLE_CLEAR:
5207         case DM_TARGET_MSG:
5208         case DM_DEV_SET_GEOMETRY:
5209             /* no return data */
5210             break;
5211         case DM_LIST_DEVICES:
5212         {
5213             struct dm_name_list *nl = (void*)host_dm + host_dm->data_start;
5214             uint32_t remaining_data = guest_data_size;
5215             void *cur_data = argptr;
5216             const argtype arg_type[] = { MK_STRUCT(STRUCT_dm_name_list) };
5217             int nl_size = 12; /* can't use thunk_size due to alignment */
5218 
5219             while (1) {
5220                 uint32_t next = nl->next;
5221                 if (next) {
5222                     nl->next = nl_size + (strlen(nl->name) + 1);
5223                 }
5224                 if (remaining_data < nl->next) {
5225                     host_dm->flags |= DM_BUFFER_FULL_FLAG;
5226                     break;
5227                 }
5228                 thunk_convert(cur_data, nl, arg_type, THUNK_TARGET);
5229                 strcpy(cur_data + nl_size, nl->name);
5230                 cur_data += nl->next;
5231                 remaining_data -= nl->next;
5232                 if (!next) {
5233                     break;
5234                 }
5235                 nl = (void*)nl + next;
5236             }
5237             break;
5238         }
5239         case DM_DEV_WAIT:
5240         case DM_TABLE_STATUS:
5241         {
5242             struct dm_target_spec *spec = (void*)host_dm + host_dm->data_start;
5243             void *cur_data = argptr;
5244             const argtype arg_type[] = { MK_STRUCT(STRUCT_dm_target_spec) };
5245             int spec_size = thunk_type_size(arg_type, 0);
5246             int i;
5247 
5248             for (i = 0; i < host_dm->target_count; i++) {
5249                 uint32_t next = spec->next;
5250                 int slen = strlen((char*)&spec[1]) + 1;
5251                 spec->next = (cur_data - argptr) + spec_size + slen;
5252                 if (guest_data_size < spec->next) {
5253                     host_dm->flags |= DM_BUFFER_FULL_FLAG;
5254                     break;
5255                 }
5256                 thunk_convert(cur_data, spec, arg_type, THUNK_TARGET);
5257                 strcpy(cur_data + spec_size, (char*)&spec[1]);
5258                 cur_data = argptr + spec->next;
5259                 spec = (void*)host_dm + host_dm->data_start + next;
5260             }
5261             break;
5262         }
5263         case DM_TABLE_DEPS:
5264         {
5265             void *hdata = (void*)host_dm + host_dm->data_start;
5266             int count = *(uint32_t*)hdata;
5267             uint64_t *hdev = hdata + 8;
5268             uint64_t *gdev = argptr + 8;
5269             int i;
5270 
5271             *(uint32_t*)argptr = tswap32(count);
5272             for (i = 0; i < count; i++) {
5273                 *gdev = tswap64(*hdev);
5274                 gdev++;
5275                 hdev++;
5276             }
5277             break;
5278         }
5279         case DM_LIST_VERSIONS:
5280         {
5281             struct dm_target_versions *vers = (void*)host_dm + host_dm->data_start;
5282             uint32_t remaining_data = guest_data_size;
5283             void *cur_data = argptr;
5284             const argtype arg_type[] = { MK_STRUCT(STRUCT_dm_target_versions) };
5285             int vers_size = thunk_type_size(arg_type, 0);
5286 
5287             while (1) {
5288                 uint32_t next = vers->next;
5289                 if (next) {
5290                     vers->next = vers_size + (strlen(vers->name) + 1);
5291                 }
5292                 if (remaining_data < vers->next) {
5293                     host_dm->flags |= DM_BUFFER_FULL_FLAG;
5294                     break;
5295                 }
5296                 thunk_convert(cur_data, vers, arg_type, THUNK_TARGET);
5297                 strcpy(cur_data + vers_size, vers->name);
5298                 cur_data += vers->next;
5299                 remaining_data -= vers->next;
5300                 if (!next) {
5301                     break;
5302                 }
5303                 vers = (void*)vers + next;
5304             }
5305             break;
5306         }
5307         default:
5308             unlock_user(argptr, guest_data, 0);
5309             ret = -TARGET_EINVAL;
5310             goto out;
5311         }
5312         unlock_user(argptr, guest_data, guest_data_size);
5313 
5314         argptr = lock_user(VERIFY_WRITE, arg, target_size, 0);
5315         if (!argptr) {
5316             ret = -TARGET_EFAULT;
5317             goto out;
5318         }
5319         thunk_convert(argptr, buf_temp, arg_type, THUNK_TARGET);
5320         unlock_user(argptr, arg, target_size);
5321     }
5322 out:
5323     g_free(big_buf);
5324     return ret;
5325 }
5326 
5327 static abi_long do_ioctl_blkpg(const IOCTLEntry *ie, uint8_t *buf_temp, int fd,
5328                                int cmd, abi_long arg)
5329 {
5330     void *argptr;
5331     int target_size;
5332     const argtype *arg_type = ie->arg_type;
5333     const argtype part_arg_type[] = { MK_STRUCT(STRUCT_blkpg_partition) };
5334     abi_long ret;
5335 
5336     struct blkpg_ioctl_arg *host_blkpg = (void*)buf_temp;
5337     struct blkpg_partition host_part;
5338 
5339     /* Read and convert blkpg */
5340     arg_type++;
5341     target_size = thunk_type_size(arg_type, 0);
5342     argptr = lock_user(VERIFY_READ, arg, target_size, 1);
5343     if (!argptr) {
5344         ret = -TARGET_EFAULT;
5345         goto out;
5346     }
5347     thunk_convert(buf_temp, argptr, arg_type, THUNK_HOST);
5348     unlock_user(argptr, arg, 0);
5349 
5350     switch (host_blkpg->op) {
5351     case BLKPG_ADD_PARTITION:
5352     case BLKPG_DEL_PARTITION:
5353         /* payload is struct blkpg_partition */
5354         break;
5355     default:
5356         /* Unknown opcode */
5357         ret = -TARGET_EINVAL;
5358         goto out;
5359     }
5360 
5361     /* Read and convert blkpg->data */
5362     arg = (abi_long)(uintptr_t)host_blkpg->data;
5363     target_size = thunk_type_size(part_arg_type, 0);
5364     argptr = lock_user(VERIFY_READ, arg, target_size, 1);
5365     if (!argptr) {
5366         ret = -TARGET_EFAULT;
5367         goto out;
5368     }
5369     thunk_convert(&host_part, argptr, part_arg_type, THUNK_HOST);
5370     unlock_user(argptr, arg, 0);
5371 
5372     /* Swizzle the data pointer to our local copy and call! */
5373     host_blkpg->data = &host_part;
5374     ret = get_errno(safe_ioctl(fd, ie->host_cmd, host_blkpg));
5375 
5376 out:
5377     return ret;
5378 }
5379 
5380 static abi_long do_ioctl_rt(const IOCTLEntry *ie, uint8_t *buf_temp,
5381                                 int fd, int cmd, abi_long arg)
5382 {
5383     const argtype *arg_type = ie->arg_type;
5384     const StructEntry *se;
5385     const argtype *field_types;
5386     const int *dst_offsets, *src_offsets;
5387     int target_size;
5388     void *argptr;
5389     abi_ulong *target_rt_dev_ptr = NULL;
5390     unsigned long *host_rt_dev_ptr = NULL;
5391     abi_long ret;
5392     int i;
5393 
5394     assert(ie->access == IOC_W);
5395     assert(*arg_type == TYPE_PTR);
5396     arg_type++;
5397     assert(*arg_type == TYPE_STRUCT);
5398     target_size = thunk_type_size(arg_type, 0);
5399     argptr = lock_user(VERIFY_READ, arg, target_size, 1);
5400     if (!argptr) {
5401         return -TARGET_EFAULT;
5402     }
5403     arg_type++;
5404     assert(*arg_type == (int)STRUCT_rtentry);
5405     se = struct_entries + *arg_type++;
5406     assert(se->convert[0] == NULL);
5407     /* convert struct here to be able to catch rt_dev string */
5408     field_types = se->field_types;
5409     dst_offsets = se->field_offsets[THUNK_HOST];
5410     src_offsets = se->field_offsets[THUNK_TARGET];
5411     for (i = 0; i < se->nb_fields; i++) {
5412         if (dst_offsets[i] == offsetof(struct rtentry, rt_dev)) {
5413             assert(*field_types == TYPE_PTRVOID);
5414             target_rt_dev_ptr = (abi_ulong *)(argptr + src_offsets[i]);
5415             host_rt_dev_ptr = (unsigned long *)(buf_temp + dst_offsets[i]);
5416             if (*target_rt_dev_ptr != 0) {
5417                 *host_rt_dev_ptr = (unsigned long)lock_user_string(
5418                                                   tswapal(*target_rt_dev_ptr));
5419                 if (!*host_rt_dev_ptr) {
5420                     unlock_user(argptr, arg, 0);
5421                     return -TARGET_EFAULT;
5422                 }
5423             } else {
5424                 *host_rt_dev_ptr = 0;
5425             }
5426             field_types++;
5427             continue;
5428         }
5429         field_types = thunk_convert(buf_temp + dst_offsets[i],
5430                                     argptr + src_offsets[i],
5431                                     field_types, THUNK_HOST);
5432     }
5433     unlock_user(argptr, arg, 0);
5434 
5435     ret = get_errno(safe_ioctl(fd, ie->host_cmd, buf_temp));
5436 
5437     assert(host_rt_dev_ptr != NULL);
5438     assert(target_rt_dev_ptr != NULL);
5439     if (*host_rt_dev_ptr != 0) {
5440         unlock_user((void *)*host_rt_dev_ptr,
5441                     *target_rt_dev_ptr, 0);
5442     }
5443     return ret;
5444 }
5445 
5446 static abi_long do_ioctl_kdsigaccept(const IOCTLEntry *ie, uint8_t *buf_temp,
5447                                      int fd, int cmd, abi_long arg)
5448 {
5449     int sig = target_to_host_signal(arg);
5450     return get_errno(safe_ioctl(fd, ie->host_cmd, sig));
5451 }
5452 
5453 static abi_long do_ioctl_SIOCGSTAMP(const IOCTLEntry *ie, uint8_t *buf_temp,
5454                                     int fd, int cmd, abi_long arg)
5455 {
5456     struct timeval tv;
5457     abi_long ret;
5458 
5459     ret = get_errno(safe_ioctl(fd, SIOCGSTAMP, &tv));
5460     if (is_error(ret)) {
5461         return ret;
5462     }
5463 
5464     if (cmd == (int)TARGET_SIOCGSTAMP_OLD) {
5465         if (copy_to_user_timeval(arg, &tv)) {
5466             return -TARGET_EFAULT;
5467         }
5468     } else {
5469         if (copy_to_user_timeval64(arg, &tv)) {
5470             return -TARGET_EFAULT;
5471         }
5472     }
5473 
5474     return ret;
5475 }
5476 
5477 static abi_long do_ioctl_SIOCGSTAMPNS(const IOCTLEntry *ie, uint8_t *buf_temp,
5478                                       int fd, int cmd, abi_long arg)
5479 {
5480     struct timespec ts;
5481     abi_long ret;
5482 
5483     ret = get_errno(safe_ioctl(fd, SIOCGSTAMPNS, &ts));
5484     if (is_error(ret)) {
5485         return ret;
5486     }
5487 
5488     if (cmd == (int)TARGET_SIOCGSTAMPNS_OLD) {
5489         if (host_to_target_timespec(arg, &ts)) {
5490             return -TARGET_EFAULT;
5491         }
5492     } else{
5493         if (host_to_target_timespec64(arg, &ts)) {
5494             return -TARGET_EFAULT;
5495         }
5496     }
5497 
5498     return ret;
5499 }
5500 
5501 #ifdef TIOCGPTPEER
5502 static abi_long do_ioctl_tiocgptpeer(const IOCTLEntry *ie, uint8_t *buf_temp,
5503                                      int fd, int cmd, abi_long arg)
5504 {
5505     int flags = target_to_host_bitmask(arg, fcntl_flags_tbl);
5506     return get_errno(safe_ioctl(fd, ie->host_cmd, flags));
5507 }
5508 #endif
5509 
5510 #ifdef HAVE_DRM_H
5511 
5512 static void unlock_drm_version(struct drm_version *host_ver,
5513                                struct target_drm_version *target_ver,
5514                                bool copy)
5515 {
5516     unlock_user(host_ver->name, target_ver->name,
5517                                 copy ? host_ver->name_len : 0);
5518     unlock_user(host_ver->date, target_ver->date,
5519                                 copy ? host_ver->date_len : 0);
5520     unlock_user(host_ver->desc, target_ver->desc,
5521                                 copy ? host_ver->desc_len : 0);
5522 }
5523 
5524 static inline abi_long target_to_host_drmversion(struct drm_version *host_ver,
5525                                           struct target_drm_version *target_ver)
5526 {
5527     memset(host_ver, 0, sizeof(*host_ver));
5528 
5529     __get_user(host_ver->name_len, &target_ver->name_len);
5530     if (host_ver->name_len) {
5531         host_ver->name = lock_user(VERIFY_WRITE, target_ver->name,
5532                                    target_ver->name_len, 0);
5533         if (!host_ver->name) {
5534             return -EFAULT;
5535         }
5536     }
5537 
5538     __get_user(host_ver->date_len, &target_ver->date_len);
5539     if (host_ver->date_len) {
5540         host_ver->date = lock_user(VERIFY_WRITE, target_ver->date,
5541                                    target_ver->date_len, 0);
5542         if (!host_ver->date) {
5543             goto err;
5544         }
5545     }
5546 
5547     __get_user(host_ver->desc_len, &target_ver->desc_len);
5548     if (host_ver->desc_len) {
5549         host_ver->desc = lock_user(VERIFY_WRITE, target_ver->desc,
5550                                    target_ver->desc_len, 0);
5551         if (!host_ver->desc) {
5552             goto err;
5553         }
5554     }
5555 
5556     return 0;
5557 err:
5558     unlock_drm_version(host_ver, target_ver, false);
5559     return -EFAULT;
5560 }
5561 
5562 static inline void host_to_target_drmversion(
5563                                           struct target_drm_version *target_ver,
5564                                           struct drm_version *host_ver)
5565 {
5566     __put_user(host_ver->version_major, &target_ver->version_major);
5567     __put_user(host_ver->version_minor, &target_ver->version_minor);
5568     __put_user(host_ver->version_patchlevel, &target_ver->version_patchlevel);
5569     __put_user(host_ver->name_len, &target_ver->name_len);
5570     __put_user(host_ver->date_len, &target_ver->date_len);
5571     __put_user(host_ver->desc_len, &target_ver->desc_len);
5572     unlock_drm_version(host_ver, target_ver, true);
5573 }
5574 
5575 static abi_long do_ioctl_drm(const IOCTLEntry *ie, uint8_t *buf_temp,
5576                              int fd, int cmd, abi_long arg)
5577 {
5578     struct drm_version *ver;
5579     struct target_drm_version *target_ver;
5580     abi_long ret;
5581 
5582     switch (ie->host_cmd) {
5583     case DRM_IOCTL_VERSION:
5584         if (!lock_user_struct(VERIFY_WRITE, target_ver, arg, 0)) {
5585             return -TARGET_EFAULT;
5586         }
5587         ver = (struct drm_version *)buf_temp;
5588         ret = target_to_host_drmversion(ver, target_ver);
5589         if (!is_error(ret)) {
5590             ret = get_errno(safe_ioctl(fd, ie->host_cmd, ver));
5591             if (is_error(ret)) {
5592                 unlock_drm_version(ver, target_ver, false);
5593             } else {
5594                 host_to_target_drmversion(target_ver, ver);
5595             }
5596         }
5597         unlock_user_struct(target_ver, arg, 0);
5598         return ret;
5599     }
5600     return -TARGET_ENOSYS;
5601 }
5602 
5603 static abi_long do_ioctl_drm_i915_getparam(const IOCTLEntry *ie,
5604                                            struct drm_i915_getparam *gparam,
5605                                            int fd, abi_long arg)
5606 {
5607     abi_long ret;
5608     int value;
5609     struct target_drm_i915_getparam *target_gparam;
5610 
5611     if (!lock_user_struct(VERIFY_READ, target_gparam, arg, 0)) {
5612         return -TARGET_EFAULT;
5613     }
5614 
5615     __get_user(gparam->param, &target_gparam->param);
5616     gparam->value = &value;
5617     ret = get_errno(safe_ioctl(fd, ie->host_cmd, gparam));
5618     put_user_s32(value, target_gparam->value);
5619 
5620     unlock_user_struct(target_gparam, arg, 0);
5621     return ret;
5622 }
5623 
5624 static abi_long do_ioctl_drm_i915(const IOCTLEntry *ie, uint8_t *buf_temp,
5625                                   int fd, int cmd, abi_long arg)
5626 {
5627     switch (ie->host_cmd) {
5628     case DRM_IOCTL_I915_GETPARAM:
5629         return do_ioctl_drm_i915_getparam(ie,
5630                                           (struct drm_i915_getparam *)buf_temp,
5631                                           fd, arg);
5632     default:
5633         return -TARGET_ENOSYS;
5634     }
5635 }
5636 
5637 #endif
5638 
5639 static abi_long do_ioctl_TUNSETTXFILTER(const IOCTLEntry *ie, uint8_t *buf_temp,
5640                                         int fd, int cmd, abi_long arg)
5641 {
5642     struct tun_filter *filter = (struct tun_filter *)buf_temp;
5643     struct tun_filter *target_filter;
5644     char *target_addr;
5645 
5646     assert(ie->access == IOC_W);
5647 
5648     target_filter = lock_user(VERIFY_READ, arg, sizeof(*target_filter), 1);
5649     if (!target_filter) {
5650         return -TARGET_EFAULT;
5651     }
5652     filter->flags = tswap16(target_filter->flags);
5653     filter->count = tswap16(target_filter->count);
5654     unlock_user(target_filter, arg, 0);
5655 
5656     if (filter->count) {
5657         if (offsetof(struct tun_filter, addr) + filter->count * ETH_ALEN >
5658             MAX_STRUCT_SIZE) {
5659             return -TARGET_EFAULT;
5660         }
5661 
5662         target_addr = lock_user(VERIFY_READ,
5663                                 arg + offsetof(struct tun_filter, addr),
5664                                 filter->count * ETH_ALEN, 1);
5665         if (!target_addr) {
5666             return -TARGET_EFAULT;
5667         }
5668         memcpy(filter->addr, target_addr, filter->count * ETH_ALEN);
5669         unlock_user(target_addr, arg + offsetof(struct tun_filter, addr), 0);
5670     }
5671 
5672     return get_errno(safe_ioctl(fd, ie->host_cmd, filter));
5673 }
5674 
5675 IOCTLEntry ioctl_entries[] = {
5676 #define IOCTL(cmd, access, ...) \
5677     { TARGET_ ## cmd, cmd, #cmd, access, 0, {  __VA_ARGS__ } },
5678 #define IOCTL_SPECIAL(cmd, access, dofn, ...)                      \
5679     { TARGET_ ## cmd, cmd, #cmd, access, dofn, {  __VA_ARGS__ } },
5680 #define IOCTL_IGNORE(cmd) \
5681     { TARGET_ ## cmd, 0, #cmd },
5682 #include "ioctls.h"
5683     { 0, 0, },
5684 };
5685 
5686 /* ??? Implement proper locking for ioctls.  */
5687 /* do_ioctl() Must return target values and target errnos. */
5688 static abi_long do_ioctl(int fd, int cmd, abi_long arg)
5689 {
5690     const IOCTLEntry *ie;
5691     const argtype *arg_type;
5692     abi_long ret;
5693     uint8_t buf_temp[MAX_STRUCT_SIZE];
5694     int target_size;
5695     void *argptr;
5696 
5697     ie = ioctl_entries;
5698     for(;;) {
5699         if (ie->target_cmd == 0) {
5700             qemu_log_mask(
5701                 LOG_UNIMP, "Unsupported ioctl: cmd=0x%04lx\n", (long)cmd);
5702             return -TARGET_ENOSYS;
5703         }
5704         if (ie->target_cmd == cmd)
5705             break;
5706         ie++;
5707     }
5708     arg_type = ie->arg_type;
5709     if (ie->do_ioctl) {
5710         return ie->do_ioctl(ie, buf_temp, fd, cmd, arg);
5711     } else if (!ie->host_cmd) {
5712         /* Some architectures define BSD ioctls in their headers
5713            that are not implemented in Linux.  */
5714         return -TARGET_ENOSYS;
5715     }
5716 
5717     switch(arg_type[0]) {
5718     case TYPE_NULL:
5719         /* no argument */
5720         ret = get_errno(safe_ioctl(fd, ie->host_cmd));
5721         break;
5722     case TYPE_PTRVOID:
5723     case TYPE_INT:
5724     case TYPE_LONG:
5725     case TYPE_ULONG:
5726         ret = get_errno(safe_ioctl(fd, ie->host_cmd, arg));
5727         break;
5728     case TYPE_PTR:
5729         arg_type++;
5730         target_size = thunk_type_size(arg_type, 0);
5731         switch(ie->access) {
5732         case IOC_R:
5733             ret = get_errno(safe_ioctl(fd, ie->host_cmd, buf_temp));
5734             if (!is_error(ret)) {
5735                 argptr = lock_user(VERIFY_WRITE, arg, target_size, 0);
5736                 if (!argptr)
5737                     return -TARGET_EFAULT;
5738                 thunk_convert(argptr, buf_temp, arg_type, THUNK_TARGET);
5739                 unlock_user(argptr, arg, target_size);
5740             }
5741             break;
5742         case IOC_W:
5743             argptr = lock_user(VERIFY_READ, arg, target_size, 1);
5744             if (!argptr)
5745                 return -TARGET_EFAULT;
5746             thunk_convert(buf_temp, argptr, arg_type, THUNK_HOST);
5747             unlock_user(argptr, arg, 0);
5748             ret = get_errno(safe_ioctl(fd, ie->host_cmd, buf_temp));
5749             break;
5750         default:
5751         case IOC_RW:
5752             argptr = lock_user(VERIFY_READ, arg, target_size, 1);
5753             if (!argptr)
5754                 return -TARGET_EFAULT;
5755             thunk_convert(buf_temp, argptr, arg_type, THUNK_HOST);
5756             unlock_user(argptr, arg, 0);
5757             ret = get_errno(safe_ioctl(fd, ie->host_cmd, buf_temp));
5758             if (!is_error(ret)) {
5759                 argptr = lock_user(VERIFY_WRITE, arg, target_size, 0);
5760                 if (!argptr)
5761                     return -TARGET_EFAULT;
5762                 thunk_convert(argptr, buf_temp, arg_type, THUNK_TARGET);
5763                 unlock_user(argptr, arg, target_size);
5764             }
5765             break;
5766         }
5767         break;
5768     default:
5769         qemu_log_mask(LOG_UNIMP,
5770                       "Unsupported ioctl type: cmd=0x%04lx type=%d\n",
5771                       (long)cmd, arg_type[0]);
5772         ret = -TARGET_ENOSYS;
5773         break;
5774     }
5775     return ret;
5776 }
5777 
5778 static const bitmask_transtbl iflag_tbl[] = {
5779         { TARGET_IGNBRK, TARGET_IGNBRK, IGNBRK, IGNBRK },
5780         { TARGET_BRKINT, TARGET_BRKINT, BRKINT, BRKINT },
5781         { TARGET_IGNPAR, TARGET_IGNPAR, IGNPAR, IGNPAR },
5782         { TARGET_PARMRK, TARGET_PARMRK, PARMRK, PARMRK },
5783         { TARGET_INPCK, TARGET_INPCK, INPCK, INPCK },
5784         { TARGET_ISTRIP, TARGET_ISTRIP, ISTRIP, ISTRIP },
5785         { TARGET_INLCR, TARGET_INLCR, INLCR, INLCR },
5786         { TARGET_IGNCR, TARGET_IGNCR, IGNCR, IGNCR },
5787         { TARGET_ICRNL, TARGET_ICRNL, ICRNL, ICRNL },
5788         { TARGET_IUCLC, TARGET_IUCLC, IUCLC, IUCLC },
5789         { TARGET_IXON, TARGET_IXON, IXON, IXON },
5790         { TARGET_IXANY, TARGET_IXANY, IXANY, IXANY },
5791         { TARGET_IXOFF, TARGET_IXOFF, IXOFF, IXOFF },
5792         { TARGET_IMAXBEL, TARGET_IMAXBEL, IMAXBEL, IMAXBEL },
5793         { TARGET_IUTF8, TARGET_IUTF8, IUTF8, IUTF8},
5794         { 0, 0, 0, 0 }
5795 };
5796 
5797 static const bitmask_transtbl oflag_tbl[] = {
5798 	{ TARGET_OPOST, TARGET_OPOST, OPOST, OPOST },
5799 	{ TARGET_OLCUC, TARGET_OLCUC, OLCUC, OLCUC },
5800 	{ TARGET_ONLCR, TARGET_ONLCR, ONLCR, ONLCR },
5801 	{ TARGET_OCRNL, TARGET_OCRNL, OCRNL, OCRNL },
5802 	{ TARGET_ONOCR, TARGET_ONOCR, ONOCR, ONOCR },
5803 	{ TARGET_ONLRET, TARGET_ONLRET, ONLRET, ONLRET },
5804 	{ TARGET_OFILL, TARGET_OFILL, OFILL, OFILL },
5805 	{ TARGET_OFDEL, TARGET_OFDEL, OFDEL, OFDEL },
5806 	{ TARGET_NLDLY, TARGET_NL0, NLDLY, NL0 },
5807 	{ TARGET_NLDLY, TARGET_NL1, NLDLY, NL1 },
5808 	{ TARGET_CRDLY, TARGET_CR0, CRDLY, CR0 },
5809 	{ TARGET_CRDLY, TARGET_CR1, CRDLY, CR1 },
5810 	{ TARGET_CRDLY, TARGET_CR2, CRDLY, CR2 },
5811 	{ TARGET_CRDLY, TARGET_CR3, CRDLY, CR3 },
5812 	{ TARGET_TABDLY, TARGET_TAB0, TABDLY, TAB0 },
5813 	{ TARGET_TABDLY, TARGET_TAB1, TABDLY, TAB1 },
5814 	{ TARGET_TABDLY, TARGET_TAB2, TABDLY, TAB2 },
5815 	{ TARGET_TABDLY, TARGET_TAB3, TABDLY, TAB3 },
5816 	{ TARGET_BSDLY, TARGET_BS0, BSDLY, BS0 },
5817 	{ TARGET_BSDLY, TARGET_BS1, BSDLY, BS1 },
5818 	{ TARGET_VTDLY, TARGET_VT0, VTDLY, VT0 },
5819 	{ TARGET_VTDLY, TARGET_VT1, VTDLY, VT1 },
5820 	{ TARGET_FFDLY, TARGET_FF0, FFDLY, FF0 },
5821 	{ TARGET_FFDLY, TARGET_FF1, FFDLY, FF1 },
5822 	{ 0, 0, 0, 0 }
5823 };
5824 
5825 static const bitmask_transtbl cflag_tbl[] = {
5826 	{ TARGET_CBAUD, TARGET_B0, CBAUD, B0 },
5827 	{ TARGET_CBAUD, TARGET_B50, CBAUD, B50 },
5828 	{ TARGET_CBAUD, TARGET_B75, CBAUD, B75 },
5829 	{ TARGET_CBAUD, TARGET_B110, CBAUD, B110 },
5830 	{ TARGET_CBAUD, TARGET_B134, CBAUD, B134 },
5831 	{ TARGET_CBAUD, TARGET_B150, CBAUD, B150 },
5832 	{ TARGET_CBAUD, TARGET_B200, CBAUD, B200 },
5833 	{ TARGET_CBAUD, TARGET_B300, CBAUD, B300 },
5834 	{ TARGET_CBAUD, TARGET_B600, CBAUD, B600 },
5835 	{ TARGET_CBAUD, TARGET_B1200, CBAUD, B1200 },
5836 	{ TARGET_CBAUD, TARGET_B1800, CBAUD, B1800 },
5837 	{ TARGET_CBAUD, TARGET_B2400, CBAUD, B2400 },
5838 	{ TARGET_CBAUD, TARGET_B4800, CBAUD, B4800 },
5839 	{ TARGET_CBAUD, TARGET_B9600, CBAUD, B9600 },
5840 	{ TARGET_CBAUD, TARGET_B19200, CBAUD, B19200 },
5841 	{ TARGET_CBAUD, TARGET_B38400, CBAUD, B38400 },
5842 	{ TARGET_CBAUD, TARGET_B57600, CBAUD, B57600 },
5843 	{ TARGET_CBAUD, TARGET_B115200, CBAUD, B115200 },
5844 	{ TARGET_CBAUD, TARGET_B230400, CBAUD, B230400 },
5845 	{ TARGET_CBAUD, TARGET_B460800, CBAUD, B460800 },
5846 	{ TARGET_CSIZE, TARGET_CS5, CSIZE, CS5 },
5847 	{ TARGET_CSIZE, TARGET_CS6, CSIZE, CS6 },
5848 	{ TARGET_CSIZE, TARGET_CS7, CSIZE, CS7 },
5849 	{ TARGET_CSIZE, TARGET_CS8, CSIZE, CS8 },
5850 	{ TARGET_CSTOPB, TARGET_CSTOPB, CSTOPB, CSTOPB },
5851 	{ TARGET_CREAD, TARGET_CREAD, CREAD, CREAD },
5852 	{ TARGET_PARENB, TARGET_PARENB, PARENB, PARENB },
5853 	{ TARGET_PARODD, TARGET_PARODD, PARODD, PARODD },
5854 	{ TARGET_HUPCL, TARGET_HUPCL, HUPCL, HUPCL },
5855 	{ TARGET_CLOCAL, TARGET_CLOCAL, CLOCAL, CLOCAL },
5856 	{ TARGET_CRTSCTS, TARGET_CRTSCTS, CRTSCTS, CRTSCTS },
5857 	{ 0, 0, 0, 0 }
5858 };
5859 
5860 static const bitmask_transtbl lflag_tbl[] = {
5861   { TARGET_ISIG, TARGET_ISIG, ISIG, ISIG },
5862   { TARGET_ICANON, TARGET_ICANON, ICANON, ICANON },
5863   { TARGET_XCASE, TARGET_XCASE, XCASE, XCASE },
5864   { TARGET_ECHO, TARGET_ECHO, ECHO, ECHO },
5865   { TARGET_ECHOE, TARGET_ECHOE, ECHOE, ECHOE },
5866   { TARGET_ECHOK, TARGET_ECHOK, ECHOK, ECHOK },
5867   { TARGET_ECHONL, TARGET_ECHONL, ECHONL, ECHONL },
5868   { TARGET_NOFLSH, TARGET_NOFLSH, NOFLSH, NOFLSH },
5869   { TARGET_TOSTOP, TARGET_TOSTOP, TOSTOP, TOSTOP },
5870   { TARGET_ECHOCTL, TARGET_ECHOCTL, ECHOCTL, ECHOCTL },
5871   { TARGET_ECHOPRT, TARGET_ECHOPRT, ECHOPRT, ECHOPRT },
5872   { TARGET_ECHOKE, TARGET_ECHOKE, ECHOKE, ECHOKE },
5873   { TARGET_FLUSHO, TARGET_FLUSHO, FLUSHO, FLUSHO },
5874   { TARGET_PENDIN, TARGET_PENDIN, PENDIN, PENDIN },
5875   { TARGET_IEXTEN, TARGET_IEXTEN, IEXTEN, IEXTEN },
5876   { TARGET_EXTPROC, TARGET_EXTPROC, EXTPROC, EXTPROC},
5877   { 0, 0, 0, 0 }
5878 };
5879 
5880 static void target_to_host_termios (void *dst, const void *src)
5881 {
5882     struct host_termios *host = dst;
5883     const struct target_termios *target = src;
5884 
5885     host->c_iflag =
5886         target_to_host_bitmask(tswap32(target->c_iflag), iflag_tbl);
5887     host->c_oflag =
5888         target_to_host_bitmask(tswap32(target->c_oflag), oflag_tbl);
5889     host->c_cflag =
5890         target_to_host_bitmask(tswap32(target->c_cflag), cflag_tbl);
5891     host->c_lflag =
5892         target_to_host_bitmask(tswap32(target->c_lflag), lflag_tbl);
5893     host->c_line = target->c_line;
5894 
5895     memset(host->c_cc, 0, sizeof(host->c_cc));
5896     host->c_cc[VINTR] = target->c_cc[TARGET_VINTR];
5897     host->c_cc[VQUIT] = target->c_cc[TARGET_VQUIT];
5898     host->c_cc[VERASE] = target->c_cc[TARGET_VERASE];
5899     host->c_cc[VKILL] = target->c_cc[TARGET_VKILL];
5900     host->c_cc[VEOF] = target->c_cc[TARGET_VEOF];
5901     host->c_cc[VTIME] = target->c_cc[TARGET_VTIME];
5902     host->c_cc[VMIN] = target->c_cc[TARGET_VMIN];
5903     host->c_cc[VSWTC] = target->c_cc[TARGET_VSWTC];
5904     host->c_cc[VSTART] = target->c_cc[TARGET_VSTART];
5905     host->c_cc[VSTOP] = target->c_cc[TARGET_VSTOP];
5906     host->c_cc[VSUSP] = target->c_cc[TARGET_VSUSP];
5907     host->c_cc[VEOL] = target->c_cc[TARGET_VEOL];
5908     host->c_cc[VREPRINT] = target->c_cc[TARGET_VREPRINT];
5909     host->c_cc[VDISCARD] = target->c_cc[TARGET_VDISCARD];
5910     host->c_cc[VWERASE] = target->c_cc[TARGET_VWERASE];
5911     host->c_cc[VLNEXT] = target->c_cc[TARGET_VLNEXT];
5912     host->c_cc[VEOL2] = target->c_cc[TARGET_VEOL2];
5913 }
5914 
5915 static void host_to_target_termios (void *dst, const void *src)
5916 {
5917     struct target_termios *target = dst;
5918     const struct host_termios *host = src;
5919 
5920     target->c_iflag =
5921         tswap32(host_to_target_bitmask(host->c_iflag, iflag_tbl));
5922     target->c_oflag =
5923         tswap32(host_to_target_bitmask(host->c_oflag, oflag_tbl));
5924     target->c_cflag =
5925         tswap32(host_to_target_bitmask(host->c_cflag, cflag_tbl));
5926     target->c_lflag =
5927         tswap32(host_to_target_bitmask(host->c_lflag, lflag_tbl));
5928     target->c_line = host->c_line;
5929 
5930     memset(target->c_cc, 0, sizeof(target->c_cc));
5931     target->c_cc[TARGET_VINTR] = host->c_cc[VINTR];
5932     target->c_cc[TARGET_VQUIT] = host->c_cc[VQUIT];
5933     target->c_cc[TARGET_VERASE] = host->c_cc[VERASE];
5934     target->c_cc[TARGET_VKILL] = host->c_cc[VKILL];
5935     target->c_cc[TARGET_VEOF] = host->c_cc[VEOF];
5936     target->c_cc[TARGET_VTIME] = host->c_cc[VTIME];
5937     target->c_cc[TARGET_VMIN] = host->c_cc[VMIN];
5938     target->c_cc[TARGET_VSWTC] = host->c_cc[VSWTC];
5939     target->c_cc[TARGET_VSTART] = host->c_cc[VSTART];
5940     target->c_cc[TARGET_VSTOP] = host->c_cc[VSTOP];
5941     target->c_cc[TARGET_VSUSP] = host->c_cc[VSUSP];
5942     target->c_cc[TARGET_VEOL] = host->c_cc[VEOL];
5943     target->c_cc[TARGET_VREPRINT] = host->c_cc[VREPRINT];
5944     target->c_cc[TARGET_VDISCARD] = host->c_cc[VDISCARD];
5945     target->c_cc[TARGET_VWERASE] = host->c_cc[VWERASE];
5946     target->c_cc[TARGET_VLNEXT] = host->c_cc[VLNEXT];
5947     target->c_cc[TARGET_VEOL2] = host->c_cc[VEOL2];
5948 }
5949 
5950 static const StructEntry struct_termios_def = {
5951     .convert = { host_to_target_termios, target_to_host_termios },
5952     .size = { sizeof(struct target_termios), sizeof(struct host_termios) },
5953     .align = { __alignof__(struct target_termios), __alignof__(struct host_termios) },
5954     .print = print_termios,
5955 };
5956 
5957 static const bitmask_transtbl mmap_flags_tbl[] = {
5958     { TARGET_MAP_SHARED, TARGET_MAP_SHARED, MAP_SHARED, MAP_SHARED },
5959     { TARGET_MAP_PRIVATE, TARGET_MAP_PRIVATE, MAP_PRIVATE, MAP_PRIVATE },
5960     { TARGET_MAP_FIXED, TARGET_MAP_FIXED, MAP_FIXED, MAP_FIXED },
5961     { TARGET_MAP_ANONYMOUS, TARGET_MAP_ANONYMOUS,
5962       MAP_ANONYMOUS, MAP_ANONYMOUS },
5963     { TARGET_MAP_GROWSDOWN, TARGET_MAP_GROWSDOWN,
5964       MAP_GROWSDOWN, MAP_GROWSDOWN },
5965     { TARGET_MAP_DENYWRITE, TARGET_MAP_DENYWRITE,
5966       MAP_DENYWRITE, MAP_DENYWRITE },
5967     { TARGET_MAP_EXECUTABLE, TARGET_MAP_EXECUTABLE,
5968       MAP_EXECUTABLE, MAP_EXECUTABLE },
5969     { TARGET_MAP_LOCKED, TARGET_MAP_LOCKED, MAP_LOCKED, MAP_LOCKED },
5970     { TARGET_MAP_NORESERVE, TARGET_MAP_NORESERVE,
5971       MAP_NORESERVE, MAP_NORESERVE },
5972     { TARGET_MAP_HUGETLB, TARGET_MAP_HUGETLB, MAP_HUGETLB, MAP_HUGETLB },
5973     /* MAP_STACK had been ignored by the kernel for quite some time.
5974        Recognize it for the target insofar as we do not want to pass
5975        it through to the host.  */
5976     { TARGET_MAP_STACK, TARGET_MAP_STACK, 0, 0 },
5977     { 0, 0, 0, 0 }
5978 };
5979 
5980 /*
5981  * NOTE: TARGET_ABI32 is defined for TARGET_I386 (but not for TARGET_X86_64)
5982  *       TARGET_I386 is defined if TARGET_X86_64 is defined
5983  */
5984 #if defined(TARGET_I386)
5985 
5986 /* NOTE: there is really one LDT for all the threads */
5987 static uint8_t *ldt_table;
5988 
5989 static abi_long read_ldt(abi_ulong ptr, unsigned long bytecount)
5990 {
5991     int size;
5992     void *p;
5993 
5994     if (!ldt_table)
5995         return 0;
5996     size = TARGET_LDT_ENTRIES * TARGET_LDT_ENTRY_SIZE;
5997     if (size > bytecount)
5998         size = bytecount;
5999     p = lock_user(VERIFY_WRITE, ptr, size, 0);
6000     if (!p)
6001         return -TARGET_EFAULT;
6002     /* ??? Should this by byteswapped?  */
6003     memcpy(p, ldt_table, size);
6004     unlock_user(p, ptr, size);
6005     return size;
6006 }
6007 
6008 /* XXX: add locking support */
6009 static abi_long write_ldt(CPUX86State *env,
6010                           abi_ulong ptr, unsigned long bytecount, int oldmode)
6011 {
6012     struct target_modify_ldt_ldt_s ldt_info;
6013     struct target_modify_ldt_ldt_s *target_ldt_info;
6014     int seg_32bit, contents, read_exec_only, limit_in_pages;
6015     int seg_not_present, useable, lm;
6016     uint32_t *lp, entry_1, entry_2;
6017 
6018     if (bytecount != sizeof(ldt_info))
6019         return -TARGET_EINVAL;
6020     if (!lock_user_struct(VERIFY_READ, target_ldt_info, ptr, 1))
6021         return -TARGET_EFAULT;
6022     ldt_info.entry_number = tswap32(target_ldt_info->entry_number);
6023     ldt_info.base_addr = tswapal(target_ldt_info->base_addr);
6024     ldt_info.limit = tswap32(target_ldt_info->limit);
6025     ldt_info.flags = tswap32(target_ldt_info->flags);
6026     unlock_user_struct(target_ldt_info, ptr, 0);
6027 
6028     if (ldt_info.entry_number >= TARGET_LDT_ENTRIES)
6029         return -TARGET_EINVAL;
6030     seg_32bit = ldt_info.flags & 1;
6031     contents = (ldt_info.flags >> 1) & 3;
6032     read_exec_only = (ldt_info.flags >> 3) & 1;
6033     limit_in_pages = (ldt_info.flags >> 4) & 1;
6034     seg_not_present = (ldt_info.flags >> 5) & 1;
6035     useable = (ldt_info.flags >> 6) & 1;
6036 #ifdef TARGET_ABI32
6037     lm = 0;
6038 #else
6039     lm = (ldt_info.flags >> 7) & 1;
6040 #endif
6041     if (contents == 3) {
6042         if (oldmode)
6043             return -TARGET_EINVAL;
6044         if (seg_not_present == 0)
6045             return -TARGET_EINVAL;
6046     }
6047     /* allocate the LDT */
6048     if (!ldt_table) {
6049         env->ldt.base = target_mmap(0,
6050                                     TARGET_LDT_ENTRIES * TARGET_LDT_ENTRY_SIZE,
6051                                     PROT_READ|PROT_WRITE,
6052                                     MAP_ANONYMOUS|MAP_PRIVATE, -1, 0);
6053         if (env->ldt.base == -1)
6054             return -TARGET_ENOMEM;
6055         memset(g2h_untagged(env->ldt.base), 0,
6056                TARGET_LDT_ENTRIES * TARGET_LDT_ENTRY_SIZE);
6057         env->ldt.limit = 0xffff;
6058         ldt_table = g2h_untagged(env->ldt.base);
6059     }
6060 
6061     /* NOTE: same code as Linux kernel */
6062     /* Allow LDTs to be cleared by the user. */
6063     if (ldt_info.base_addr == 0 && ldt_info.limit == 0) {
6064         if (oldmode ||
6065             (contents == 0		&&
6066              read_exec_only == 1	&&
6067              seg_32bit == 0		&&
6068              limit_in_pages == 0	&&
6069              seg_not_present == 1	&&
6070              useable == 0 )) {
6071             entry_1 = 0;
6072             entry_2 = 0;
6073             goto install;
6074         }
6075     }
6076 
6077     entry_1 = ((ldt_info.base_addr & 0x0000ffff) << 16) |
6078         (ldt_info.limit & 0x0ffff);
6079     entry_2 = (ldt_info.base_addr & 0xff000000) |
6080         ((ldt_info.base_addr & 0x00ff0000) >> 16) |
6081         (ldt_info.limit & 0xf0000) |
6082         ((read_exec_only ^ 1) << 9) |
6083         (contents << 10) |
6084         ((seg_not_present ^ 1) << 15) |
6085         (seg_32bit << 22) |
6086         (limit_in_pages << 23) |
6087         (lm << 21) |
6088         0x7000;
6089     if (!oldmode)
6090         entry_2 |= (useable << 20);
6091 
6092     /* Install the new entry ...  */
6093 install:
6094     lp = (uint32_t *)(ldt_table + (ldt_info.entry_number << 3));
6095     lp[0] = tswap32(entry_1);
6096     lp[1] = tswap32(entry_2);
6097     return 0;
6098 }
6099 
6100 /* specific and weird i386 syscalls */
6101 static abi_long do_modify_ldt(CPUX86State *env, int func, abi_ulong ptr,
6102                               unsigned long bytecount)
6103 {
6104     abi_long ret;
6105 
6106     switch (func) {
6107     case 0:
6108         ret = read_ldt(ptr, bytecount);
6109         break;
6110     case 1:
6111         ret = write_ldt(env, ptr, bytecount, 1);
6112         break;
6113     case 0x11:
6114         ret = write_ldt(env, ptr, bytecount, 0);
6115         break;
6116     default:
6117         ret = -TARGET_ENOSYS;
6118         break;
6119     }
6120     return ret;
6121 }
6122 
6123 #if defined(TARGET_ABI32)
6124 abi_long do_set_thread_area(CPUX86State *env, abi_ulong ptr)
6125 {
6126     uint64_t *gdt_table = g2h_untagged(env->gdt.base);
6127     struct target_modify_ldt_ldt_s ldt_info;
6128     struct target_modify_ldt_ldt_s *target_ldt_info;
6129     int seg_32bit, contents, read_exec_only, limit_in_pages;
6130     int seg_not_present, useable, lm;
6131     uint32_t *lp, entry_1, entry_2;
6132     int i;
6133 
6134     lock_user_struct(VERIFY_WRITE, target_ldt_info, ptr, 1);
6135     if (!target_ldt_info)
6136         return -TARGET_EFAULT;
6137     ldt_info.entry_number = tswap32(target_ldt_info->entry_number);
6138     ldt_info.base_addr = tswapal(target_ldt_info->base_addr);
6139     ldt_info.limit = tswap32(target_ldt_info->limit);
6140     ldt_info.flags = tswap32(target_ldt_info->flags);
6141     if (ldt_info.entry_number == -1) {
6142         for (i=TARGET_GDT_ENTRY_TLS_MIN; i<=TARGET_GDT_ENTRY_TLS_MAX; i++) {
6143             if (gdt_table[i] == 0) {
6144                 ldt_info.entry_number = i;
6145                 target_ldt_info->entry_number = tswap32(i);
6146                 break;
6147             }
6148         }
6149     }
6150     unlock_user_struct(target_ldt_info, ptr, 1);
6151 
6152     if (ldt_info.entry_number < TARGET_GDT_ENTRY_TLS_MIN ||
6153         ldt_info.entry_number > TARGET_GDT_ENTRY_TLS_MAX)
6154            return -TARGET_EINVAL;
6155     seg_32bit = ldt_info.flags & 1;
6156     contents = (ldt_info.flags >> 1) & 3;
6157     read_exec_only = (ldt_info.flags >> 3) & 1;
6158     limit_in_pages = (ldt_info.flags >> 4) & 1;
6159     seg_not_present = (ldt_info.flags >> 5) & 1;
6160     useable = (ldt_info.flags >> 6) & 1;
6161 #ifdef TARGET_ABI32
6162     lm = 0;
6163 #else
6164     lm = (ldt_info.flags >> 7) & 1;
6165 #endif
6166 
6167     if (contents == 3) {
6168         if (seg_not_present == 0)
6169             return -TARGET_EINVAL;
6170     }
6171 
6172     /* NOTE: same code as Linux kernel */
6173     /* Allow LDTs to be cleared by the user. */
6174     if (ldt_info.base_addr == 0 && ldt_info.limit == 0) {
6175         if ((contents == 0             &&
6176              read_exec_only == 1       &&
6177              seg_32bit == 0            &&
6178              limit_in_pages == 0       &&
6179              seg_not_present == 1      &&
6180              useable == 0 )) {
6181             entry_1 = 0;
6182             entry_2 = 0;
6183             goto install;
6184         }
6185     }
6186 
6187     entry_1 = ((ldt_info.base_addr & 0x0000ffff) << 16) |
6188         (ldt_info.limit & 0x0ffff);
6189     entry_2 = (ldt_info.base_addr & 0xff000000) |
6190         ((ldt_info.base_addr & 0x00ff0000) >> 16) |
6191         (ldt_info.limit & 0xf0000) |
6192         ((read_exec_only ^ 1) << 9) |
6193         (contents << 10) |
6194         ((seg_not_present ^ 1) << 15) |
6195         (seg_32bit << 22) |
6196         (limit_in_pages << 23) |
6197         (useable << 20) |
6198         (lm << 21) |
6199         0x7000;
6200 
6201     /* Install the new entry ...  */
6202 install:
6203     lp = (uint32_t *)(gdt_table + ldt_info.entry_number);
6204     lp[0] = tswap32(entry_1);
6205     lp[1] = tswap32(entry_2);
6206     return 0;
6207 }
6208 
6209 static abi_long do_get_thread_area(CPUX86State *env, abi_ulong ptr)
6210 {
6211     struct target_modify_ldt_ldt_s *target_ldt_info;
6212     uint64_t *gdt_table = g2h_untagged(env->gdt.base);
6213     uint32_t base_addr, limit, flags;
6214     int seg_32bit, contents, read_exec_only, limit_in_pages, idx;
6215     int seg_not_present, useable, lm;
6216     uint32_t *lp, entry_1, entry_2;
6217 
6218     lock_user_struct(VERIFY_WRITE, target_ldt_info, ptr, 1);
6219     if (!target_ldt_info)
6220         return -TARGET_EFAULT;
6221     idx = tswap32(target_ldt_info->entry_number);
6222     if (idx < TARGET_GDT_ENTRY_TLS_MIN ||
6223         idx > TARGET_GDT_ENTRY_TLS_MAX) {
6224         unlock_user_struct(target_ldt_info, ptr, 1);
6225         return -TARGET_EINVAL;
6226     }
6227     lp = (uint32_t *)(gdt_table + idx);
6228     entry_1 = tswap32(lp[0]);
6229     entry_2 = tswap32(lp[1]);
6230 
6231     read_exec_only = ((entry_2 >> 9) & 1) ^ 1;
6232     contents = (entry_2 >> 10) & 3;
6233     seg_not_present = ((entry_2 >> 15) & 1) ^ 1;
6234     seg_32bit = (entry_2 >> 22) & 1;
6235     limit_in_pages = (entry_2 >> 23) & 1;
6236     useable = (entry_2 >> 20) & 1;
6237 #ifdef TARGET_ABI32
6238     lm = 0;
6239 #else
6240     lm = (entry_2 >> 21) & 1;
6241 #endif
6242     flags = (seg_32bit << 0) | (contents << 1) |
6243         (read_exec_only << 3) | (limit_in_pages << 4) |
6244         (seg_not_present << 5) | (useable << 6) | (lm << 7);
6245     limit = (entry_1 & 0xffff) | (entry_2  & 0xf0000);
6246     base_addr = (entry_1 >> 16) |
6247         (entry_2 & 0xff000000) |
6248         ((entry_2 & 0xff) << 16);
6249     target_ldt_info->base_addr = tswapal(base_addr);
6250     target_ldt_info->limit = tswap32(limit);
6251     target_ldt_info->flags = tswap32(flags);
6252     unlock_user_struct(target_ldt_info, ptr, 1);
6253     return 0;
6254 }
6255 
6256 abi_long do_arch_prctl(CPUX86State *env, int code, abi_ulong addr)
6257 {
6258     return -TARGET_ENOSYS;
6259 }
6260 #else
6261 abi_long do_arch_prctl(CPUX86State *env, int code, abi_ulong addr)
6262 {
6263     abi_long ret = 0;
6264     abi_ulong val;
6265     int idx;
6266 
6267     switch(code) {
6268     case TARGET_ARCH_SET_GS:
6269     case TARGET_ARCH_SET_FS:
6270         if (code == TARGET_ARCH_SET_GS)
6271             idx = R_GS;
6272         else
6273             idx = R_FS;
6274         cpu_x86_load_seg(env, idx, 0);
6275         env->segs[idx].base = addr;
6276         break;
6277     case TARGET_ARCH_GET_GS:
6278     case TARGET_ARCH_GET_FS:
6279         if (code == TARGET_ARCH_GET_GS)
6280             idx = R_GS;
6281         else
6282             idx = R_FS;
6283         val = env->segs[idx].base;
6284         if (put_user(val, addr, abi_ulong))
6285             ret = -TARGET_EFAULT;
6286         break;
6287     default:
6288         ret = -TARGET_EINVAL;
6289         break;
6290     }
6291     return ret;
6292 }
6293 #endif /* defined(TARGET_ABI32 */
6294 
6295 #endif /* defined(TARGET_I386) */
6296 
6297 #define NEW_STACK_SIZE 0x40000
6298 
6299 
6300 static pthread_mutex_t clone_lock = PTHREAD_MUTEX_INITIALIZER;
6301 typedef struct {
6302     CPUArchState *env;
6303     pthread_mutex_t mutex;
6304     pthread_cond_t cond;
6305     pthread_t thread;
6306     uint32_t tid;
6307     abi_ulong child_tidptr;
6308     abi_ulong parent_tidptr;
6309     sigset_t sigmask;
6310 } new_thread_info;
6311 
6312 static void *clone_func(void *arg)
6313 {
6314     new_thread_info *info = arg;
6315     CPUArchState *env;
6316     CPUState *cpu;
6317     TaskState *ts;
6318 
6319     rcu_register_thread();
6320     tcg_register_thread();
6321     env = info->env;
6322     cpu = env_cpu(env);
6323     thread_cpu = cpu;
6324     ts = (TaskState *)cpu->opaque;
6325     info->tid = sys_gettid();
6326     task_settid(ts);
6327     if (info->child_tidptr)
6328         put_user_u32(info->tid, info->child_tidptr);
6329     if (info->parent_tidptr)
6330         put_user_u32(info->tid, info->parent_tidptr);
6331     qemu_guest_random_seed_thread_part2(cpu->random_seed);
6332     /* Enable signals.  */
6333     sigprocmask(SIG_SETMASK, &info->sigmask, NULL);
6334     /* Signal to the parent that we're ready.  */
6335     pthread_mutex_lock(&info->mutex);
6336     pthread_cond_broadcast(&info->cond);
6337     pthread_mutex_unlock(&info->mutex);
6338     /* Wait until the parent has finished initializing the tls state.  */
6339     pthread_mutex_lock(&clone_lock);
6340     pthread_mutex_unlock(&clone_lock);
6341     cpu_loop(env);
6342     /* never exits */
6343     return NULL;
6344 }
6345 
6346 /* do_fork() Must return host values and target errnos (unlike most
6347    do_*() functions). */
6348 static int do_fork(CPUArchState *env, unsigned int flags, abi_ulong newsp,
6349                    abi_ulong parent_tidptr, target_ulong newtls,
6350                    abi_ulong child_tidptr)
6351 {
6352     CPUState *cpu = env_cpu(env);
6353     int ret;
6354     TaskState *ts;
6355     CPUState *new_cpu;
6356     CPUArchState *new_env;
6357     sigset_t sigmask;
6358 
6359     flags &= ~CLONE_IGNORED_FLAGS;
6360 
6361     /* Emulate vfork() with fork() */
6362     if (flags & CLONE_VFORK)
6363         flags &= ~(CLONE_VFORK | CLONE_VM);
6364 
6365     if (flags & CLONE_VM) {
6366         TaskState *parent_ts = (TaskState *)cpu->opaque;
6367         new_thread_info info;
6368         pthread_attr_t attr;
6369 
6370         if (((flags & CLONE_THREAD_FLAGS) != CLONE_THREAD_FLAGS) ||
6371             (flags & CLONE_INVALID_THREAD_FLAGS)) {
6372             return -TARGET_EINVAL;
6373         }
6374 
6375         ts = g_new0(TaskState, 1);
6376         init_task_state(ts);
6377 
6378         /* Grab a mutex so that thread setup appears atomic.  */
6379         pthread_mutex_lock(&clone_lock);
6380 
6381         /*
6382          * If this is our first additional thread, we need to ensure we
6383          * generate code for parallel execution and flush old translations.
6384          * Do this now so that the copy gets CF_PARALLEL too.
6385          */
6386         if (!(cpu->tcg_cflags & CF_PARALLEL)) {
6387             cpu->tcg_cflags |= CF_PARALLEL;
6388             tb_flush(cpu);
6389         }
6390 
6391         /* we create a new CPU instance. */
6392         new_env = cpu_copy(env);
6393         /* Init regs that differ from the parent.  */
6394         cpu_clone_regs_child(new_env, newsp, flags);
6395         cpu_clone_regs_parent(env, flags);
6396         new_cpu = env_cpu(new_env);
6397         new_cpu->opaque = ts;
6398         ts->bprm = parent_ts->bprm;
6399         ts->info = parent_ts->info;
6400         ts->signal_mask = parent_ts->signal_mask;
6401 
6402         if (flags & CLONE_CHILD_CLEARTID) {
6403             ts->child_tidptr = child_tidptr;
6404         }
6405 
6406         if (flags & CLONE_SETTLS) {
6407             cpu_set_tls (new_env, newtls);
6408         }
6409 
6410         memset(&info, 0, sizeof(info));
6411         pthread_mutex_init(&info.mutex, NULL);
6412         pthread_mutex_lock(&info.mutex);
6413         pthread_cond_init(&info.cond, NULL);
6414         info.env = new_env;
6415         if (flags & CLONE_CHILD_SETTID) {
6416             info.child_tidptr = child_tidptr;
6417         }
6418         if (flags & CLONE_PARENT_SETTID) {
6419             info.parent_tidptr = parent_tidptr;
6420         }
6421 
6422         ret = pthread_attr_init(&attr);
6423         ret = pthread_attr_setstacksize(&attr, NEW_STACK_SIZE);
6424         ret = pthread_attr_setdetachstate(&attr, PTHREAD_CREATE_DETACHED);
6425         /* It is not safe to deliver signals until the child has finished
6426            initializing, so temporarily block all signals.  */
6427         sigfillset(&sigmask);
6428         sigprocmask(SIG_BLOCK, &sigmask, &info.sigmask);
6429         cpu->random_seed = qemu_guest_random_seed_thread_part1();
6430 
6431         ret = pthread_create(&info.thread, &attr, clone_func, &info);
6432         /* TODO: Free new CPU state if thread creation failed.  */
6433 
6434         sigprocmask(SIG_SETMASK, &info.sigmask, NULL);
6435         pthread_attr_destroy(&attr);
6436         if (ret == 0) {
6437             /* Wait for the child to initialize.  */
6438             pthread_cond_wait(&info.cond, &info.mutex);
6439             ret = info.tid;
6440         } else {
6441             ret = -1;
6442         }
6443         pthread_mutex_unlock(&info.mutex);
6444         pthread_cond_destroy(&info.cond);
6445         pthread_mutex_destroy(&info.mutex);
6446         pthread_mutex_unlock(&clone_lock);
6447     } else {
6448         /* if no CLONE_VM, we consider it is a fork */
6449         if (flags & CLONE_INVALID_FORK_FLAGS) {
6450             return -TARGET_EINVAL;
6451         }
6452 
6453         /* We can't support custom termination signals */
6454         if ((flags & CSIGNAL) != TARGET_SIGCHLD) {
6455             return -TARGET_EINVAL;
6456         }
6457 
6458         if (block_signals()) {
6459             return -TARGET_ERESTARTSYS;
6460         }
6461 
6462         fork_start();
6463         ret = fork();
6464         if (ret == 0) {
6465             /* Child Process.  */
6466             cpu_clone_regs_child(env, newsp, flags);
6467             fork_end(1);
6468             /* There is a race condition here.  The parent process could
6469                theoretically read the TID in the child process before the child
6470                tid is set.  This would require using either ptrace
6471                (not implemented) or having *_tidptr to point at a shared memory
6472                mapping.  We can't repeat the spinlock hack used above because
6473                the child process gets its own copy of the lock.  */
6474             if (flags & CLONE_CHILD_SETTID)
6475                 put_user_u32(sys_gettid(), child_tidptr);
6476             if (flags & CLONE_PARENT_SETTID)
6477                 put_user_u32(sys_gettid(), parent_tidptr);
6478             ts = (TaskState *)cpu->opaque;
6479             if (flags & CLONE_SETTLS)
6480                 cpu_set_tls (env, newtls);
6481             if (flags & CLONE_CHILD_CLEARTID)
6482                 ts->child_tidptr = child_tidptr;
6483         } else {
6484             cpu_clone_regs_parent(env, flags);
6485             fork_end(0);
6486         }
6487     }
6488     return ret;
6489 }
6490 
6491 /* warning : doesn't handle linux specific flags... */
6492 static int target_to_host_fcntl_cmd(int cmd)
6493 {
6494     int ret;
6495 
6496     switch(cmd) {
6497     case TARGET_F_DUPFD:
6498     case TARGET_F_GETFD:
6499     case TARGET_F_SETFD:
6500     case TARGET_F_GETFL:
6501     case TARGET_F_SETFL:
6502     case TARGET_F_OFD_GETLK:
6503     case TARGET_F_OFD_SETLK:
6504     case TARGET_F_OFD_SETLKW:
6505         ret = cmd;
6506         break;
6507     case TARGET_F_GETLK:
6508         ret = F_GETLK64;
6509         break;
6510     case TARGET_F_SETLK:
6511         ret = F_SETLK64;
6512         break;
6513     case TARGET_F_SETLKW:
6514         ret = F_SETLKW64;
6515         break;
6516     case TARGET_F_GETOWN:
6517         ret = F_GETOWN;
6518         break;
6519     case TARGET_F_SETOWN:
6520         ret = F_SETOWN;
6521         break;
6522     case TARGET_F_GETSIG:
6523         ret = F_GETSIG;
6524         break;
6525     case TARGET_F_SETSIG:
6526         ret = F_SETSIG;
6527         break;
6528 #if TARGET_ABI_BITS == 32
6529     case TARGET_F_GETLK64:
6530         ret = F_GETLK64;
6531         break;
6532     case TARGET_F_SETLK64:
6533         ret = F_SETLK64;
6534         break;
6535     case TARGET_F_SETLKW64:
6536         ret = F_SETLKW64;
6537         break;
6538 #endif
6539     case TARGET_F_SETLEASE:
6540         ret = F_SETLEASE;
6541         break;
6542     case TARGET_F_GETLEASE:
6543         ret = F_GETLEASE;
6544         break;
6545 #ifdef F_DUPFD_CLOEXEC
6546     case TARGET_F_DUPFD_CLOEXEC:
6547         ret = F_DUPFD_CLOEXEC;
6548         break;
6549 #endif
6550     case TARGET_F_NOTIFY:
6551         ret = F_NOTIFY;
6552         break;
6553 #ifdef F_GETOWN_EX
6554     case TARGET_F_GETOWN_EX:
6555         ret = F_GETOWN_EX;
6556         break;
6557 #endif
6558 #ifdef F_SETOWN_EX
6559     case TARGET_F_SETOWN_EX:
6560         ret = F_SETOWN_EX;
6561         break;
6562 #endif
6563 #ifdef F_SETPIPE_SZ
6564     case TARGET_F_SETPIPE_SZ:
6565         ret = F_SETPIPE_SZ;
6566         break;
6567     case TARGET_F_GETPIPE_SZ:
6568         ret = F_GETPIPE_SZ;
6569         break;
6570 #endif
6571 #ifdef F_ADD_SEALS
6572     case TARGET_F_ADD_SEALS:
6573         ret = F_ADD_SEALS;
6574         break;
6575     case TARGET_F_GET_SEALS:
6576         ret = F_GET_SEALS;
6577         break;
6578 #endif
6579     default:
6580         ret = -TARGET_EINVAL;
6581         break;
6582     }
6583 
6584 #if defined(__powerpc64__)
6585     /* On PPC64, glibc headers has the F_*LK* defined to 12, 13 and 14 and
6586      * is not supported by kernel. The glibc fcntl call actually adjusts
6587      * them to 5, 6 and 7 before making the syscall(). Since we make the
6588      * syscall directly, adjust to what is supported by the kernel.
6589      */
6590     if (ret >= F_GETLK64 && ret <= F_SETLKW64) {
6591         ret -= F_GETLK64 - 5;
6592     }
6593 #endif
6594 
6595     return ret;
6596 }
6597 
6598 #define FLOCK_TRANSTBL \
6599     switch (type) { \
6600     TRANSTBL_CONVERT(F_RDLCK); \
6601     TRANSTBL_CONVERT(F_WRLCK); \
6602     TRANSTBL_CONVERT(F_UNLCK); \
6603     }
6604 
6605 static int target_to_host_flock(int type)
6606 {
6607 #define TRANSTBL_CONVERT(a) case TARGET_##a: return a
6608     FLOCK_TRANSTBL
6609 #undef  TRANSTBL_CONVERT
6610     return -TARGET_EINVAL;
6611 }
6612 
6613 static int host_to_target_flock(int type)
6614 {
6615 #define TRANSTBL_CONVERT(a) case a: return TARGET_##a
6616     FLOCK_TRANSTBL
6617 #undef  TRANSTBL_CONVERT
6618     /* if we don't know how to convert the value coming
6619      * from the host we copy to the target field as-is
6620      */
6621     return type;
6622 }
6623 
6624 static inline abi_long copy_from_user_flock(struct flock64 *fl,
6625                                             abi_ulong target_flock_addr)
6626 {
6627     struct target_flock *target_fl;
6628     int l_type;
6629 
6630     if (!lock_user_struct(VERIFY_READ, target_fl, target_flock_addr, 1)) {
6631         return -TARGET_EFAULT;
6632     }
6633 
6634     __get_user(l_type, &target_fl->l_type);
6635     l_type = target_to_host_flock(l_type);
6636     if (l_type < 0) {
6637         return l_type;
6638     }
6639     fl->l_type = l_type;
6640     __get_user(fl->l_whence, &target_fl->l_whence);
6641     __get_user(fl->l_start, &target_fl->l_start);
6642     __get_user(fl->l_len, &target_fl->l_len);
6643     __get_user(fl->l_pid, &target_fl->l_pid);
6644     unlock_user_struct(target_fl, target_flock_addr, 0);
6645     return 0;
6646 }
6647 
6648 static inline abi_long copy_to_user_flock(abi_ulong target_flock_addr,
6649                                           const struct flock64 *fl)
6650 {
6651     struct target_flock *target_fl;
6652     short l_type;
6653 
6654     if (!lock_user_struct(VERIFY_WRITE, target_fl, target_flock_addr, 0)) {
6655         return -TARGET_EFAULT;
6656     }
6657 
6658     l_type = host_to_target_flock(fl->l_type);
6659     __put_user(l_type, &target_fl->l_type);
6660     __put_user(fl->l_whence, &target_fl->l_whence);
6661     __put_user(fl->l_start, &target_fl->l_start);
6662     __put_user(fl->l_len, &target_fl->l_len);
6663     __put_user(fl->l_pid, &target_fl->l_pid);
6664     unlock_user_struct(target_fl, target_flock_addr, 1);
6665     return 0;
6666 }
6667 
6668 typedef abi_long from_flock64_fn(struct flock64 *fl, abi_ulong target_addr);
6669 typedef abi_long to_flock64_fn(abi_ulong target_addr, const struct flock64 *fl);
6670 
6671 #if defined(TARGET_ARM) && TARGET_ABI_BITS == 32
6672 static inline abi_long copy_from_user_oabi_flock64(struct flock64 *fl,
6673                                                    abi_ulong target_flock_addr)
6674 {
6675     struct target_oabi_flock64 *target_fl;
6676     int l_type;
6677 
6678     if (!lock_user_struct(VERIFY_READ, target_fl, target_flock_addr, 1)) {
6679         return -TARGET_EFAULT;
6680     }
6681 
6682     __get_user(l_type, &target_fl->l_type);
6683     l_type = target_to_host_flock(l_type);
6684     if (l_type < 0) {
6685         return l_type;
6686     }
6687     fl->l_type = l_type;
6688     __get_user(fl->l_whence, &target_fl->l_whence);
6689     __get_user(fl->l_start, &target_fl->l_start);
6690     __get_user(fl->l_len, &target_fl->l_len);
6691     __get_user(fl->l_pid, &target_fl->l_pid);
6692     unlock_user_struct(target_fl, target_flock_addr, 0);
6693     return 0;
6694 }
6695 
6696 static inline abi_long copy_to_user_oabi_flock64(abi_ulong target_flock_addr,
6697                                                  const struct flock64 *fl)
6698 {
6699     struct target_oabi_flock64 *target_fl;
6700     short l_type;
6701 
6702     if (!lock_user_struct(VERIFY_WRITE, target_fl, target_flock_addr, 0)) {
6703         return -TARGET_EFAULT;
6704     }
6705 
6706     l_type = host_to_target_flock(fl->l_type);
6707     __put_user(l_type, &target_fl->l_type);
6708     __put_user(fl->l_whence, &target_fl->l_whence);
6709     __put_user(fl->l_start, &target_fl->l_start);
6710     __put_user(fl->l_len, &target_fl->l_len);
6711     __put_user(fl->l_pid, &target_fl->l_pid);
6712     unlock_user_struct(target_fl, target_flock_addr, 1);
6713     return 0;
6714 }
6715 #endif
6716 
6717 static inline abi_long copy_from_user_flock64(struct flock64 *fl,
6718                                               abi_ulong target_flock_addr)
6719 {
6720     struct target_flock64 *target_fl;
6721     int l_type;
6722 
6723     if (!lock_user_struct(VERIFY_READ, target_fl, target_flock_addr, 1)) {
6724         return -TARGET_EFAULT;
6725     }
6726 
6727     __get_user(l_type, &target_fl->l_type);
6728     l_type = target_to_host_flock(l_type);
6729     if (l_type < 0) {
6730         return l_type;
6731     }
6732     fl->l_type = l_type;
6733     __get_user(fl->l_whence, &target_fl->l_whence);
6734     __get_user(fl->l_start, &target_fl->l_start);
6735     __get_user(fl->l_len, &target_fl->l_len);
6736     __get_user(fl->l_pid, &target_fl->l_pid);
6737     unlock_user_struct(target_fl, target_flock_addr, 0);
6738     return 0;
6739 }
6740 
6741 static inline abi_long copy_to_user_flock64(abi_ulong target_flock_addr,
6742                                             const struct flock64 *fl)
6743 {
6744     struct target_flock64 *target_fl;
6745     short l_type;
6746 
6747     if (!lock_user_struct(VERIFY_WRITE, target_fl, target_flock_addr, 0)) {
6748         return -TARGET_EFAULT;
6749     }
6750 
6751     l_type = host_to_target_flock(fl->l_type);
6752     __put_user(l_type, &target_fl->l_type);
6753     __put_user(fl->l_whence, &target_fl->l_whence);
6754     __put_user(fl->l_start, &target_fl->l_start);
6755     __put_user(fl->l_len, &target_fl->l_len);
6756     __put_user(fl->l_pid, &target_fl->l_pid);
6757     unlock_user_struct(target_fl, target_flock_addr, 1);
6758     return 0;
6759 }
6760 
6761 static abi_long do_fcntl(int fd, int cmd, abi_ulong arg)
6762 {
6763     struct flock64 fl64;
6764 #ifdef F_GETOWN_EX
6765     struct f_owner_ex fox;
6766     struct target_f_owner_ex *target_fox;
6767 #endif
6768     abi_long ret;
6769     int host_cmd = target_to_host_fcntl_cmd(cmd);
6770 
6771     if (host_cmd == -TARGET_EINVAL)
6772 	    return host_cmd;
6773 
6774     switch(cmd) {
6775     case TARGET_F_GETLK:
6776         ret = copy_from_user_flock(&fl64, arg);
6777         if (ret) {
6778             return ret;
6779         }
6780         ret = get_errno(safe_fcntl(fd, host_cmd, &fl64));
6781         if (ret == 0) {
6782             ret = copy_to_user_flock(arg, &fl64);
6783         }
6784         break;
6785 
6786     case TARGET_F_SETLK:
6787     case TARGET_F_SETLKW:
6788         ret = copy_from_user_flock(&fl64, arg);
6789         if (ret) {
6790             return ret;
6791         }
6792         ret = get_errno(safe_fcntl(fd, host_cmd, &fl64));
6793         break;
6794 
6795     case TARGET_F_GETLK64:
6796     case TARGET_F_OFD_GETLK:
6797         ret = copy_from_user_flock64(&fl64, arg);
6798         if (ret) {
6799             return ret;
6800         }
6801         ret = get_errno(safe_fcntl(fd, host_cmd, &fl64));
6802         if (ret == 0) {
6803             ret = copy_to_user_flock64(arg, &fl64);
6804         }
6805         break;
6806     case TARGET_F_SETLK64:
6807     case TARGET_F_SETLKW64:
6808     case TARGET_F_OFD_SETLK:
6809     case TARGET_F_OFD_SETLKW:
6810         ret = copy_from_user_flock64(&fl64, arg);
6811         if (ret) {
6812             return ret;
6813         }
6814         ret = get_errno(safe_fcntl(fd, host_cmd, &fl64));
6815         break;
6816 
6817     case TARGET_F_GETFL:
6818         ret = get_errno(safe_fcntl(fd, host_cmd, arg));
6819         if (ret >= 0) {
6820             ret = host_to_target_bitmask(ret, fcntl_flags_tbl);
6821         }
6822         break;
6823 
6824     case TARGET_F_SETFL:
6825         ret = get_errno(safe_fcntl(fd, host_cmd,
6826                                    target_to_host_bitmask(arg,
6827                                                           fcntl_flags_tbl)));
6828         break;
6829 
6830 #ifdef F_GETOWN_EX
6831     case TARGET_F_GETOWN_EX:
6832         ret = get_errno(safe_fcntl(fd, host_cmd, &fox));
6833         if (ret >= 0) {
6834             if (!lock_user_struct(VERIFY_WRITE, target_fox, arg, 0))
6835                 return -TARGET_EFAULT;
6836             target_fox->type = tswap32(fox.type);
6837             target_fox->pid = tswap32(fox.pid);
6838             unlock_user_struct(target_fox, arg, 1);
6839         }
6840         break;
6841 #endif
6842 
6843 #ifdef F_SETOWN_EX
6844     case TARGET_F_SETOWN_EX:
6845         if (!lock_user_struct(VERIFY_READ, target_fox, arg, 1))
6846             return -TARGET_EFAULT;
6847         fox.type = tswap32(target_fox->type);
6848         fox.pid = tswap32(target_fox->pid);
6849         unlock_user_struct(target_fox, arg, 0);
6850         ret = get_errno(safe_fcntl(fd, host_cmd, &fox));
6851         break;
6852 #endif
6853 
6854     case TARGET_F_SETSIG:
6855         ret = get_errno(safe_fcntl(fd, host_cmd, target_to_host_signal(arg)));
6856         break;
6857 
6858     case TARGET_F_GETSIG:
6859         ret = host_to_target_signal(get_errno(safe_fcntl(fd, host_cmd, arg)));
6860         break;
6861 
6862     case TARGET_F_SETOWN:
6863     case TARGET_F_GETOWN:
6864     case TARGET_F_SETLEASE:
6865     case TARGET_F_GETLEASE:
6866     case TARGET_F_SETPIPE_SZ:
6867     case TARGET_F_GETPIPE_SZ:
6868     case TARGET_F_ADD_SEALS:
6869     case TARGET_F_GET_SEALS:
6870         ret = get_errno(safe_fcntl(fd, host_cmd, arg));
6871         break;
6872 
6873     default:
6874         ret = get_errno(safe_fcntl(fd, cmd, arg));
6875         break;
6876     }
6877     return ret;
6878 }
6879 
6880 #ifdef USE_UID16
6881 
6882 static inline int high2lowuid(int uid)
6883 {
6884     if (uid > 65535)
6885         return 65534;
6886     else
6887         return uid;
6888 }
6889 
6890 static inline int high2lowgid(int gid)
6891 {
6892     if (gid > 65535)
6893         return 65534;
6894     else
6895         return gid;
6896 }
6897 
6898 static inline int low2highuid(int uid)
6899 {
6900     if ((int16_t)uid == -1)
6901         return -1;
6902     else
6903         return uid;
6904 }
6905 
6906 static inline int low2highgid(int gid)
6907 {
6908     if ((int16_t)gid == -1)
6909         return -1;
6910     else
6911         return gid;
6912 }
6913 static inline int tswapid(int id)
6914 {
6915     return tswap16(id);
6916 }
6917 
6918 #define put_user_id(x, gaddr) put_user_u16(x, gaddr)
6919 
6920 #else /* !USE_UID16 */
6921 static inline int high2lowuid(int uid)
6922 {
6923     return uid;
6924 }
6925 static inline int high2lowgid(int gid)
6926 {
6927     return gid;
6928 }
6929 static inline int low2highuid(int uid)
6930 {
6931     return uid;
6932 }
6933 static inline int low2highgid(int gid)
6934 {
6935     return gid;
6936 }
6937 static inline int tswapid(int id)
6938 {
6939     return tswap32(id);
6940 }
6941 
6942 #define put_user_id(x, gaddr) put_user_u32(x, gaddr)
6943 
6944 #endif /* USE_UID16 */
6945 
6946 /* We must do direct syscalls for setting UID/GID, because we want to
6947  * implement the Linux system call semantics of "change only for this thread",
6948  * not the libc/POSIX semantics of "change for all threads in process".
6949  * (See http://ewontfix.com/17/ for more details.)
6950  * We use the 32-bit version of the syscalls if present; if it is not
6951  * then either the host architecture supports 32-bit UIDs natively with
6952  * the standard syscall, or the 16-bit UID is the best we can do.
6953  */
6954 #ifdef __NR_setuid32
6955 #define __NR_sys_setuid __NR_setuid32
6956 #else
6957 #define __NR_sys_setuid __NR_setuid
6958 #endif
6959 #ifdef __NR_setgid32
6960 #define __NR_sys_setgid __NR_setgid32
6961 #else
6962 #define __NR_sys_setgid __NR_setgid
6963 #endif
6964 #ifdef __NR_setresuid32
6965 #define __NR_sys_setresuid __NR_setresuid32
6966 #else
6967 #define __NR_sys_setresuid __NR_setresuid
6968 #endif
6969 #ifdef __NR_setresgid32
6970 #define __NR_sys_setresgid __NR_setresgid32
6971 #else
6972 #define __NR_sys_setresgid __NR_setresgid
6973 #endif
6974 
6975 _syscall1(int, sys_setuid, uid_t, uid)
6976 _syscall1(int, sys_setgid, gid_t, gid)
6977 _syscall3(int, sys_setresuid, uid_t, ruid, uid_t, euid, uid_t, suid)
6978 _syscall3(int, sys_setresgid, gid_t, rgid, gid_t, egid, gid_t, sgid)
6979 
6980 void syscall_init(void)
6981 {
6982     IOCTLEntry *ie;
6983     const argtype *arg_type;
6984     int size;
6985 
6986     thunk_init(STRUCT_MAX);
6987 
6988 #define STRUCT(name, ...) thunk_register_struct(STRUCT_ ## name, #name, struct_ ## name ## _def);
6989 #define STRUCT_SPECIAL(name) thunk_register_struct_direct(STRUCT_ ## name, #name, &struct_ ## name ## _def);
6990 #include "syscall_types.h"
6991 #undef STRUCT
6992 #undef STRUCT_SPECIAL
6993 
6994     /* we patch the ioctl size if necessary. We rely on the fact that
6995        no ioctl has all the bits at '1' in the size field */
6996     ie = ioctl_entries;
6997     while (ie->target_cmd != 0) {
6998         if (((ie->target_cmd >> TARGET_IOC_SIZESHIFT) & TARGET_IOC_SIZEMASK) ==
6999             TARGET_IOC_SIZEMASK) {
7000             arg_type = ie->arg_type;
7001             if (arg_type[0] != TYPE_PTR) {
7002                 fprintf(stderr, "cannot patch size for ioctl 0x%x\n",
7003                         ie->target_cmd);
7004                 exit(1);
7005             }
7006             arg_type++;
7007             size = thunk_type_size(arg_type, 0);
7008             ie->target_cmd = (ie->target_cmd &
7009                               ~(TARGET_IOC_SIZEMASK << TARGET_IOC_SIZESHIFT)) |
7010                 (size << TARGET_IOC_SIZESHIFT);
7011         }
7012 
7013         /* automatic consistency check if same arch */
7014 #if (defined(__i386__) && defined(TARGET_I386) && defined(TARGET_ABI32)) || \
7015     (defined(__x86_64__) && defined(TARGET_X86_64))
7016         if (unlikely(ie->target_cmd != ie->host_cmd)) {
7017             fprintf(stderr, "ERROR: ioctl(%s): target=0x%x host=0x%x\n",
7018                     ie->name, ie->target_cmd, ie->host_cmd);
7019         }
7020 #endif
7021         ie++;
7022     }
7023 }
7024 
7025 #ifdef TARGET_NR_truncate64
7026 static inline abi_long target_truncate64(void *cpu_env, const char *arg1,
7027                                          abi_long arg2,
7028                                          abi_long arg3,
7029                                          abi_long arg4)
7030 {
7031     if (regpairs_aligned(cpu_env, TARGET_NR_truncate64)) {
7032         arg2 = arg3;
7033         arg3 = arg4;
7034     }
7035     return get_errno(truncate64(arg1, target_offset64(arg2, arg3)));
7036 }
7037 #endif
7038 
7039 #ifdef TARGET_NR_ftruncate64
7040 static inline abi_long target_ftruncate64(void *cpu_env, abi_long arg1,
7041                                           abi_long arg2,
7042                                           abi_long arg3,
7043                                           abi_long arg4)
7044 {
7045     if (regpairs_aligned(cpu_env, TARGET_NR_ftruncate64)) {
7046         arg2 = arg3;
7047         arg3 = arg4;
7048     }
7049     return get_errno(ftruncate64(arg1, target_offset64(arg2, arg3)));
7050 }
7051 #endif
7052 
7053 #if defined(TARGET_NR_timer_settime) || \
7054     (defined(TARGET_NR_timerfd_settime) && defined(CONFIG_TIMERFD))
7055 static inline abi_long target_to_host_itimerspec(struct itimerspec *host_its,
7056                                                  abi_ulong target_addr)
7057 {
7058     if (target_to_host_timespec(&host_its->it_interval, target_addr +
7059                                 offsetof(struct target_itimerspec,
7060                                          it_interval)) ||
7061         target_to_host_timespec(&host_its->it_value, target_addr +
7062                                 offsetof(struct target_itimerspec,
7063                                          it_value))) {
7064         return -TARGET_EFAULT;
7065     }
7066 
7067     return 0;
7068 }
7069 #endif
7070 
7071 #if defined(TARGET_NR_timer_settime64) || \
7072     (defined(TARGET_NR_timerfd_settime64) && defined(CONFIG_TIMERFD))
7073 static inline abi_long target_to_host_itimerspec64(struct itimerspec *host_its,
7074                                                    abi_ulong target_addr)
7075 {
7076     if (target_to_host_timespec64(&host_its->it_interval, target_addr +
7077                                   offsetof(struct target__kernel_itimerspec,
7078                                            it_interval)) ||
7079         target_to_host_timespec64(&host_its->it_value, target_addr +
7080                                   offsetof(struct target__kernel_itimerspec,
7081                                            it_value))) {
7082         return -TARGET_EFAULT;
7083     }
7084 
7085     return 0;
7086 }
7087 #endif
7088 
7089 #if ((defined(TARGET_NR_timerfd_gettime) || \
7090       defined(TARGET_NR_timerfd_settime)) && defined(CONFIG_TIMERFD)) || \
7091       defined(TARGET_NR_timer_gettime) || defined(TARGET_NR_timer_settime)
7092 static inline abi_long host_to_target_itimerspec(abi_ulong target_addr,
7093                                                  struct itimerspec *host_its)
7094 {
7095     if (host_to_target_timespec(target_addr + offsetof(struct target_itimerspec,
7096                                                        it_interval),
7097                                 &host_its->it_interval) ||
7098         host_to_target_timespec(target_addr + offsetof(struct target_itimerspec,
7099                                                        it_value),
7100                                 &host_its->it_value)) {
7101         return -TARGET_EFAULT;
7102     }
7103     return 0;
7104 }
7105 #endif
7106 
7107 #if ((defined(TARGET_NR_timerfd_gettime64) || \
7108       defined(TARGET_NR_timerfd_settime64)) && defined(CONFIG_TIMERFD)) || \
7109       defined(TARGET_NR_timer_gettime64) || defined(TARGET_NR_timer_settime64)
7110 static inline abi_long host_to_target_itimerspec64(abi_ulong target_addr,
7111                                                    struct itimerspec *host_its)
7112 {
7113     if (host_to_target_timespec64(target_addr +
7114                                   offsetof(struct target__kernel_itimerspec,
7115                                            it_interval),
7116                                   &host_its->it_interval) ||
7117         host_to_target_timespec64(target_addr +
7118                                   offsetof(struct target__kernel_itimerspec,
7119                                            it_value),
7120                                   &host_its->it_value)) {
7121         return -TARGET_EFAULT;
7122     }
7123     return 0;
7124 }
7125 #endif
7126 
7127 #if defined(TARGET_NR_adjtimex) || \
7128     (defined(TARGET_NR_clock_adjtime) && defined(CONFIG_CLOCK_ADJTIME))
7129 static inline abi_long target_to_host_timex(struct timex *host_tx,
7130                                             abi_long target_addr)
7131 {
7132     struct target_timex *target_tx;
7133 
7134     if (!lock_user_struct(VERIFY_READ, target_tx, target_addr, 1)) {
7135         return -TARGET_EFAULT;
7136     }
7137 
7138     __get_user(host_tx->modes, &target_tx->modes);
7139     __get_user(host_tx->offset, &target_tx->offset);
7140     __get_user(host_tx->freq, &target_tx->freq);
7141     __get_user(host_tx->maxerror, &target_tx->maxerror);
7142     __get_user(host_tx->esterror, &target_tx->esterror);
7143     __get_user(host_tx->status, &target_tx->status);
7144     __get_user(host_tx->constant, &target_tx->constant);
7145     __get_user(host_tx->precision, &target_tx->precision);
7146     __get_user(host_tx->tolerance, &target_tx->tolerance);
7147     __get_user(host_tx->time.tv_sec, &target_tx->time.tv_sec);
7148     __get_user(host_tx->time.tv_usec, &target_tx->time.tv_usec);
7149     __get_user(host_tx->tick, &target_tx->tick);
7150     __get_user(host_tx->ppsfreq, &target_tx->ppsfreq);
7151     __get_user(host_tx->jitter, &target_tx->jitter);
7152     __get_user(host_tx->shift, &target_tx->shift);
7153     __get_user(host_tx->stabil, &target_tx->stabil);
7154     __get_user(host_tx->jitcnt, &target_tx->jitcnt);
7155     __get_user(host_tx->calcnt, &target_tx->calcnt);
7156     __get_user(host_tx->errcnt, &target_tx->errcnt);
7157     __get_user(host_tx->stbcnt, &target_tx->stbcnt);
7158     __get_user(host_tx->tai, &target_tx->tai);
7159 
7160     unlock_user_struct(target_tx, target_addr, 0);
7161     return 0;
7162 }
7163 
7164 static inline abi_long host_to_target_timex(abi_long target_addr,
7165                                             struct timex *host_tx)
7166 {
7167     struct target_timex *target_tx;
7168 
7169     if (!lock_user_struct(VERIFY_WRITE, target_tx, target_addr, 0)) {
7170         return -TARGET_EFAULT;
7171     }
7172 
7173     __put_user(host_tx->modes, &target_tx->modes);
7174     __put_user(host_tx->offset, &target_tx->offset);
7175     __put_user(host_tx->freq, &target_tx->freq);
7176     __put_user(host_tx->maxerror, &target_tx->maxerror);
7177     __put_user(host_tx->esterror, &target_tx->esterror);
7178     __put_user(host_tx->status, &target_tx->status);
7179     __put_user(host_tx->constant, &target_tx->constant);
7180     __put_user(host_tx->precision, &target_tx->precision);
7181     __put_user(host_tx->tolerance, &target_tx->tolerance);
7182     __put_user(host_tx->time.tv_sec, &target_tx->time.tv_sec);
7183     __put_user(host_tx->time.tv_usec, &target_tx->time.tv_usec);
7184     __put_user(host_tx->tick, &target_tx->tick);
7185     __put_user(host_tx->ppsfreq, &target_tx->ppsfreq);
7186     __put_user(host_tx->jitter, &target_tx->jitter);
7187     __put_user(host_tx->shift, &target_tx->shift);
7188     __put_user(host_tx->stabil, &target_tx->stabil);
7189     __put_user(host_tx->jitcnt, &target_tx->jitcnt);
7190     __put_user(host_tx->calcnt, &target_tx->calcnt);
7191     __put_user(host_tx->errcnt, &target_tx->errcnt);
7192     __put_user(host_tx->stbcnt, &target_tx->stbcnt);
7193     __put_user(host_tx->tai, &target_tx->tai);
7194 
7195     unlock_user_struct(target_tx, target_addr, 1);
7196     return 0;
7197 }
7198 #endif
7199 
7200 
7201 #if defined(TARGET_NR_clock_adjtime64) && defined(CONFIG_CLOCK_ADJTIME)
7202 static inline abi_long target_to_host_timex64(struct timex *host_tx,
7203                                               abi_long target_addr)
7204 {
7205     struct target__kernel_timex *target_tx;
7206 
7207     if (copy_from_user_timeval64(&host_tx->time, target_addr +
7208                                  offsetof(struct target__kernel_timex,
7209                                           time))) {
7210         return -TARGET_EFAULT;
7211     }
7212 
7213     if (!lock_user_struct(VERIFY_READ, target_tx, target_addr, 1)) {
7214         return -TARGET_EFAULT;
7215     }
7216 
7217     __get_user(host_tx->modes, &target_tx->modes);
7218     __get_user(host_tx->offset, &target_tx->offset);
7219     __get_user(host_tx->freq, &target_tx->freq);
7220     __get_user(host_tx->maxerror, &target_tx->maxerror);
7221     __get_user(host_tx->esterror, &target_tx->esterror);
7222     __get_user(host_tx->status, &target_tx->status);
7223     __get_user(host_tx->constant, &target_tx->constant);
7224     __get_user(host_tx->precision, &target_tx->precision);
7225     __get_user(host_tx->tolerance, &target_tx->tolerance);
7226     __get_user(host_tx->tick, &target_tx->tick);
7227     __get_user(host_tx->ppsfreq, &target_tx->ppsfreq);
7228     __get_user(host_tx->jitter, &target_tx->jitter);
7229     __get_user(host_tx->shift, &target_tx->shift);
7230     __get_user(host_tx->stabil, &target_tx->stabil);
7231     __get_user(host_tx->jitcnt, &target_tx->jitcnt);
7232     __get_user(host_tx->calcnt, &target_tx->calcnt);
7233     __get_user(host_tx->errcnt, &target_tx->errcnt);
7234     __get_user(host_tx->stbcnt, &target_tx->stbcnt);
7235     __get_user(host_tx->tai, &target_tx->tai);
7236 
7237     unlock_user_struct(target_tx, target_addr, 0);
7238     return 0;
7239 }
7240 
7241 static inline abi_long host_to_target_timex64(abi_long target_addr,
7242                                               struct timex *host_tx)
7243 {
7244     struct target__kernel_timex *target_tx;
7245 
7246    if (copy_to_user_timeval64(target_addr +
7247                               offsetof(struct target__kernel_timex, time),
7248                               &host_tx->time)) {
7249         return -TARGET_EFAULT;
7250     }
7251 
7252     if (!lock_user_struct(VERIFY_WRITE, target_tx, target_addr, 0)) {
7253         return -TARGET_EFAULT;
7254     }
7255 
7256     __put_user(host_tx->modes, &target_tx->modes);
7257     __put_user(host_tx->offset, &target_tx->offset);
7258     __put_user(host_tx->freq, &target_tx->freq);
7259     __put_user(host_tx->maxerror, &target_tx->maxerror);
7260     __put_user(host_tx->esterror, &target_tx->esterror);
7261     __put_user(host_tx->status, &target_tx->status);
7262     __put_user(host_tx->constant, &target_tx->constant);
7263     __put_user(host_tx->precision, &target_tx->precision);
7264     __put_user(host_tx->tolerance, &target_tx->tolerance);
7265     __put_user(host_tx->tick, &target_tx->tick);
7266     __put_user(host_tx->ppsfreq, &target_tx->ppsfreq);
7267     __put_user(host_tx->jitter, &target_tx->jitter);
7268     __put_user(host_tx->shift, &target_tx->shift);
7269     __put_user(host_tx->stabil, &target_tx->stabil);
7270     __put_user(host_tx->jitcnt, &target_tx->jitcnt);
7271     __put_user(host_tx->calcnt, &target_tx->calcnt);
7272     __put_user(host_tx->errcnt, &target_tx->errcnt);
7273     __put_user(host_tx->stbcnt, &target_tx->stbcnt);
7274     __put_user(host_tx->tai, &target_tx->tai);
7275 
7276     unlock_user_struct(target_tx, target_addr, 1);
7277     return 0;
7278 }
7279 #endif
7280 
7281 #ifndef HAVE_SIGEV_NOTIFY_THREAD_ID
7282 #define sigev_notify_thread_id _sigev_un._tid
7283 #endif
7284 
7285 static inline abi_long target_to_host_sigevent(struct sigevent *host_sevp,
7286                                                abi_ulong target_addr)
7287 {
7288     struct target_sigevent *target_sevp;
7289 
7290     if (!lock_user_struct(VERIFY_READ, target_sevp, target_addr, 1)) {
7291         return -TARGET_EFAULT;
7292     }
7293 
7294     /* This union is awkward on 64 bit systems because it has a 32 bit
7295      * integer and a pointer in it; we follow the conversion approach
7296      * used for handling sigval types in signal.c so the guest should get
7297      * the correct value back even if we did a 64 bit byteswap and it's
7298      * using the 32 bit integer.
7299      */
7300     host_sevp->sigev_value.sival_ptr =
7301         (void *)(uintptr_t)tswapal(target_sevp->sigev_value.sival_ptr);
7302     host_sevp->sigev_signo =
7303         target_to_host_signal(tswap32(target_sevp->sigev_signo));
7304     host_sevp->sigev_notify = tswap32(target_sevp->sigev_notify);
7305     host_sevp->sigev_notify_thread_id = tswap32(target_sevp->_sigev_un._tid);
7306 
7307     unlock_user_struct(target_sevp, target_addr, 1);
7308     return 0;
7309 }
7310 
7311 #if defined(TARGET_NR_mlockall)
7312 static inline int target_to_host_mlockall_arg(int arg)
7313 {
7314     int result = 0;
7315 
7316     if (arg & TARGET_MCL_CURRENT) {
7317         result |= MCL_CURRENT;
7318     }
7319     if (arg & TARGET_MCL_FUTURE) {
7320         result |= MCL_FUTURE;
7321     }
7322 #ifdef MCL_ONFAULT
7323     if (arg & TARGET_MCL_ONFAULT) {
7324         result |= MCL_ONFAULT;
7325     }
7326 #endif
7327 
7328     return result;
7329 }
7330 #endif
7331 
7332 #if (defined(TARGET_NR_stat64) || defined(TARGET_NR_lstat64) ||     \
7333      defined(TARGET_NR_fstat64) || defined(TARGET_NR_fstatat64) ||  \
7334      defined(TARGET_NR_newfstatat))
7335 static inline abi_long host_to_target_stat64(void *cpu_env,
7336                                              abi_ulong target_addr,
7337                                              struct stat *host_st)
7338 {
7339 #if defined(TARGET_ARM) && defined(TARGET_ABI32)
7340     if (((CPUARMState *)cpu_env)->eabi) {
7341         struct target_eabi_stat64 *target_st;
7342 
7343         if (!lock_user_struct(VERIFY_WRITE, target_st, target_addr, 0))
7344             return -TARGET_EFAULT;
7345         memset(target_st, 0, sizeof(struct target_eabi_stat64));
7346         __put_user(host_st->st_dev, &target_st->st_dev);
7347         __put_user(host_st->st_ino, &target_st->st_ino);
7348 #ifdef TARGET_STAT64_HAS_BROKEN_ST_INO
7349         __put_user(host_st->st_ino, &target_st->__st_ino);
7350 #endif
7351         __put_user(host_st->st_mode, &target_st->st_mode);
7352         __put_user(host_st->st_nlink, &target_st->st_nlink);
7353         __put_user(host_st->st_uid, &target_st->st_uid);
7354         __put_user(host_st->st_gid, &target_st->st_gid);
7355         __put_user(host_st->st_rdev, &target_st->st_rdev);
7356         __put_user(host_st->st_size, &target_st->st_size);
7357         __put_user(host_st->st_blksize, &target_st->st_blksize);
7358         __put_user(host_st->st_blocks, &target_st->st_blocks);
7359         __put_user(host_st->st_atime, &target_st->target_st_atime);
7360         __put_user(host_st->st_mtime, &target_st->target_st_mtime);
7361         __put_user(host_st->st_ctime, &target_st->target_st_ctime);
7362 #ifdef HAVE_STRUCT_STAT_ST_ATIM
7363         __put_user(host_st->st_atim.tv_nsec, &target_st->target_st_atime_nsec);
7364         __put_user(host_st->st_mtim.tv_nsec, &target_st->target_st_mtime_nsec);
7365         __put_user(host_st->st_ctim.tv_nsec, &target_st->target_st_ctime_nsec);
7366 #endif
7367         unlock_user_struct(target_st, target_addr, 1);
7368     } else
7369 #endif
7370     {
7371 #if defined(TARGET_HAS_STRUCT_STAT64)
7372         struct target_stat64 *target_st;
7373 #else
7374         struct target_stat *target_st;
7375 #endif
7376 
7377         if (!lock_user_struct(VERIFY_WRITE, target_st, target_addr, 0))
7378             return -TARGET_EFAULT;
7379         memset(target_st, 0, sizeof(*target_st));
7380         __put_user(host_st->st_dev, &target_st->st_dev);
7381         __put_user(host_st->st_ino, &target_st->st_ino);
7382 #ifdef TARGET_STAT64_HAS_BROKEN_ST_INO
7383         __put_user(host_st->st_ino, &target_st->__st_ino);
7384 #endif
7385         __put_user(host_st->st_mode, &target_st->st_mode);
7386         __put_user(host_st->st_nlink, &target_st->st_nlink);
7387         __put_user(host_st->st_uid, &target_st->st_uid);
7388         __put_user(host_st->st_gid, &target_st->st_gid);
7389         __put_user(host_st->st_rdev, &target_st->st_rdev);
7390         /* XXX: better use of kernel struct */
7391         __put_user(host_st->st_size, &target_st->st_size);
7392         __put_user(host_st->st_blksize, &target_st->st_blksize);
7393         __put_user(host_st->st_blocks, &target_st->st_blocks);
7394         __put_user(host_st->st_atime, &target_st->target_st_atime);
7395         __put_user(host_st->st_mtime, &target_st->target_st_mtime);
7396         __put_user(host_st->st_ctime, &target_st->target_st_ctime);
7397 #ifdef HAVE_STRUCT_STAT_ST_ATIM
7398         __put_user(host_st->st_atim.tv_nsec, &target_st->target_st_atime_nsec);
7399         __put_user(host_st->st_mtim.tv_nsec, &target_st->target_st_mtime_nsec);
7400         __put_user(host_st->st_ctim.tv_nsec, &target_st->target_st_ctime_nsec);
7401 #endif
7402         unlock_user_struct(target_st, target_addr, 1);
7403     }
7404 
7405     return 0;
7406 }
7407 #endif
7408 
7409 #if defined(TARGET_NR_statx) && defined(__NR_statx)
7410 static inline abi_long host_to_target_statx(struct target_statx *host_stx,
7411                                             abi_ulong target_addr)
7412 {
7413     struct target_statx *target_stx;
7414 
7415     if (!lock_user_struct(VERIFY_WRITE, target_stx, target_addr,  0)) {
7416         return -TARGET_EFAULT;
7417     }
7418     memset(target_stx, 0, sizeof(*target_stx));
7419 
7420     __put_user(host_stx->stx_mask, &target_stx->stx_mask);
7421     __put_user(host_stx->stx_blksize, &target_stx->stx_blksize);
7422     __put_user(host_stx->stx_attributes, &target_stx->stx_attributes);
7423     __put_user(host_stx->stx_nlink, &target_stx->stx_nlink);
7424     __put_user(host_stx->stx_uid, &target_stx->stx_uid);
7425     __put_user(host_stx->stx_gid, &target_stx->stx_gid);
7426     __put_user(host_stx->stx_mode, &target_stx->stx_mode);
7427     __put_user(host_stx->stx_ino, &target_stx->stx_ino);
7428     __put_user(host_stx->stx_size, &target_stx->stx_size);
7429     __put_user(host_stx->stx_blocks, &target_stx->stx_blocks);
7430     __put_user(host_stx->stx_attributes_mask, &target_stx->stx_attributes_mask);
7431     __put_user(host_stx->stx_atime.tv_sec, &target_stx->stx_atime.tv_sec);
7432     __put_user(host_stx->stx_atime.tv_nsec, &target_stx->stx_atime.tv_nsec);
7433     __put_user(host_stx->stx_btime.tv_sec, &target_stx->stx_btime.tv_sec);
7434     __put_user(host_stx->stx_btime.tv_nsec, &target_stx->stx_btime.tv_nsec);
7435     __put_user(host_stx->stx_ctime.tv_sec, &target_stx->stx_ctime.tv_sec);
7436     __put_user(host_stx->stx_ctime.tv_nsec, &target_stx->stx_ctime.tv_nsec);
7437     __put_user(host_stx->stx_mtime.tv_sec, &target_stx->stx_mtime.tv_sec);
7438     __put_user(host_stx->stx_mtime.tv_nsec, &target_stx->stx_mtime.tv_nsec);
7439     __put_user(host_stx->stx_rdev_major, &target_stx->stx_rdev_major);
7440     __put_user(host_stx->stx_rdev_minor, &target_stx->stx_rdev_minor);
7441     __put_user(host_stx->stx_dev_major, &target_stx->stx_dev_major);
7442     __put_user(host_stx->stx_dev_minor, &target_stx->stx_dev_minor);
7443 
7444     unlock_user_struct(target_stx, target_addr, 1);
7445 
7446     return 0;
7447 }
7448 #endif
7449 
7450 static int do_sys_futex(int *uaddr, int op, int val,
7451                          const struct timespec *timeout, int *uaddr2,
7452                          int val3)
7453 {
7454 #if HOST_LONG_BITS == 64
7455 #if defined(__NR_futex)
7456     /* always a 64-bit time_t, it doesn't define _time64 version  */
7457     return sys_futex(uaddr, op, val, timeout, uaddr2, val3);
7458 
7459 #endif
7460 #else /* HOST_LONG_BITS == 64 */
7461 #if defined(__NR_futex_time64)
7462     if (sizeof(timeout->tv_sec) == 8) {
7463         /* _time64 function on 32bit arch */
7464         return sys_futex_time64(uaddr, op, val, timeout, uaddr2, val3);
7465     }
7466 #endif
7467 #if defined(__NR_futex)
7468     /* old function on 32bit arch */
7469     return sys_futex(uaddr, op, val, timeout, uaddr2, val3);
7470 #endif
7471 #endif /* HOST_LONG_BITS == 64 */
7472     g_assert_not_reached();
7473 }
7474 
7475 static int do_safe_futex(int *uaddr, int op, int val,
7476                          const struct timespec *timeout, int *uaddr2,
7477                          int val3)
7478 {
7479 #if HOST_LONG_BITS == 64
7480 #if defined(__NR_futex)
7481     /* always a 64-bit time_t, it doesn't define _time64 version  */
7482     return get_errno(safe_futex(uaddr, op, val, timeout, uaddr2, val3));
7483 #endif
7484 #else /* HOST_LONG_BITS == 64 */
7485 #if defined(__NR_futex_time64)
7486     if (sizeof(timeout->tv_sec) == 8) {
7487         /* _time64 function on 32bit arch */
7488         return get_errno(safe_futex_time64(uaddr, op, val, timeout, uaddr2,
7489                                            val3));
7490     }
7491 #endif
7492 #if defined(__NR_futex)
7493     /* old function on 32bit arch */
7494     return get_errno(safe_futex(uaddr, op, val, timeout, uaddr2, val3));
7495 #endif
7496 #endif /* HOST_LONG_BITS == 64 */
7497     return -TARGET_ENOSYS;
7498 }
7499 
7500 /* ??? Using host futex calls even when target atomic operations
7501    are not really atomic probably breaks things.  However implementing
7502    futexes locally would make futexes shared between multiple processes
7503    tricky.  However they're probably useless because guest atomic
7504    operations won't work either.  */
7505 #if defined(TARGET_NR_futex)
7506 static int do_futex(CPUState *cpu, target_ulong uaddr, int op, int val,
7507                     target_ulong timeout, target_ulong uaddr2, int val3)
7508 {
7509     struct timespec ts, *pts;
7510     int base_op;
7511 
7512     /* ??? We assume FUTEX_* constants are the same on both host
7513        and target.  */
7514 #ifdef FUTEX_CMD_MASK
7515     base_op = op & FUTEX_CMD_MASK;
7516 #else
7517     base_op = op;
7518 #endif
7519     switch (base_op) {
7520     case FUTEX_WAIT:
7521     case FUTEX_WAIT_BITSET:
7522         if (timeout) {
7523             pts = &ts;
7524             target_to_host_timespec(pts, timeout);
7525         } else {
7526             pts = NULL;
7527         }
7528         return do_safe_futex(g2h(cpu, uaddr),
7529                              op, tswap32(val), pts, NULL, val3);
7530     case FUTEX_WAKE:
7531         return do_safe_futex(g2h(cpu, uaddr),
7532                              op, val, NULL, NULL, 0);
7533     case FUTEX_FD:
7534         return do_safe_futex(g2h(cpu, uaddr),
7535                              op, val, NULL, NULL, 0);
7536     case FUTEX_REQUEUE:
7537     case FUTEX_CMP_REQUEUE:
7538     case FUTEX_WAKE_OP:
7539         /* For FUTEX_REQUEUE, FUTEX_CMP_REQUEUE, and FUTEX_WAKE_OP, the
7540            TIMEOUT parameter is interpreted as a uint32_t by the kernel.
7541            But the prototype takes a `struct timespec *'; insert casts
7542            to satisfy the compiler.  We do not need to tswap TIMEOUT
7543            since it's not compared to guest memory.  */
7544         pts = (struct timespec *)(uintptr_t) timeout;
7545         return do_safe_futex(g2h(cpu, uaddr), op, val, pts, g2h(cpu, uaddr2),
7546                              (base_op == FUTEX_CMP_REQUEUE
7547                               ? tswap32(val3) : val3));
7548     default:
7549         return -TARGET_ENOSYS;
7550     }
7551 }
7552 #endif
7553 
7554 #if defined(TARGET_NR_futex_time64)
7555 static int do_futex_time64(CPUState *cpu, target_ulong uaddr, int op,
7556                            int val, target_ulong timeout,
7557                            target_ulong uaddr2, int val3)
7558 {
7559     struct timespec ts, *pts;
7560     int base_op;
7561 
7562     /* ??? We assume FUTEX_* constants are the same on both host
7563        and target.  */
7564 #ifdef FUTEX_CMD_MASK
7565     base_op = op & FUTEX_CMD_MASK;
7566 #else
7567     base_op = op;
7568 #endif
7569     switch (base_op) {
7570     case FUTEX_WAIT:
7571     case FUTEX_WAIT_BITSET:
7572         if (timeout) {
7573             pts = &ts;
7574             if (target_to_host_timespec64(pts, timeout)) {
7575                 return -TARGET_EFAULT;
7576             }
7577         } else {
7578             pts = NULL;
7579         }
7580         return do_safe_futex(g2h(cpu, uaddr), op,
7581                              tswap32(val), pts, NULL, val3);
7582     case FUTEX_WAKE:
7583         return do_safe_futex(g2h(cpu, uaddr), op, val, NULL, NULL, 0);
7584     case FUTEX_FD:
7585         return do_safe_futex(g2h(cpu, uaddr), op, val, NULL, NULL, 0);
7586     case FUTEX_REQUEUE:
7587     case FUTEX_CMP_REQUEUE:
7588     case FUTEX_WAKE_OP:
7589         /* For FUTEX_REQUEUE, FUTEX_CMP_REQUEUE, and FUTEX_WAKE_OP, the
7590            TIMEOUT parameter is interpreted as a uint32_t by the kernel.
7591            But the prototype takes a `struct timespec *'; insert casts
7592            to satisfy the compiler.  We do not need to tswap TIMEOUT
7593            since it's not compared to guest memory.  */
7594         pts = (struct timespec *)(uintptr_t) timeout;
7595         return do_safe_futex(g2h(cpu, uaddr), op, val, pts, g2h(cpu, uaddr2),
7596                              (base_op == FUTEX_CMP_REQUEUE
7597                               ? tswap32(val3) : val3));
7598     default:
7599         return -TARGET_ENOSYS;
7600     }
7601 }
7602 #endif
7603 
7604 #if defined(TARGET_NR_name_to_handle_at) && defined(CONFIG_OPEN_BY_HANDLE)
7605 static abi_long do_name_to_handle_at(abi_long dirfd, abi_long pathname,
7606                                      abi_long handle, abi_long mount_id,
7607                                      abi_long flags)
7608 {
7609     struct file_handle *target_fh;
7610     struct file_handle *fh;
7611     int mid = 0;
7612     abi_long ret;
7613     char *name;
7614     unsigned int size, total_size;
7615 
7616     if (get_user_s32(size, handle)) {
7617         return -TARGET_EFAULT;
7618     }
7619 
7620     name = lock_user_string(pathname);
7621     if (!name) {
7622         return -TARGET_EFAULT;
7623     }
7624 
7625     total_size = sizeof(struct file_handle) + size;
7626     target_fh = lock_user(VERIFY_WRITE, handle, total_size, 0);
7627     if (!target_fh) {
7628         unlock_user(name, pathname, 0);
7629         return -TARGET_EFAULT;
7630     }
7631 
7632     fh = g_malloc0(total_size);
7633     fh->handle_bytes = size;
7634 
7635     ret = get_errno(name_to_handle_at(dirfd, path(name), fh, &mid, flags));
7636     unlock_user(name, pathname, 0);
7637 
7638     /* man name_to_handle_at(2):
7639      * Other than the use of the handle_bytes field, the caller should treat
7640      * the file_handle structure as an opaque data type
7641      */
7642 
7643     memcpy(target_fh, fh, total_size);
7644     target_fh->handle_bytes = tswap32(fh->handle_bytes);
7645     target_fh->handle_type = tswap32(fh->handle_type);
7646     g_free(fh);
7647     unlock_user(target_fh, handle, total_size);
7648 
7649     if (put_user_s32(mid, mount_id)) {
7650         return -TARGET_EFAULT;
7651     }
7652 
7653     return ret;
7654 
7655 }
7656 #endif
7657 
7658 #if defined(TARGET_NR_open_by_handle_at) && defined(CONFIG_OPEN_BY_HANDLE)
7659 static abi_long do_open_by_handle_at(abi_long mount_fd, abi_long handle,
7660                                      abi_long flags)
7661 {
7662     struct file_handle *target_fh;
7663     struct file_handle *fh;
7664     unsigned int size, total_size;
7665     abi_long ret;
7666 
7667     if (get_user_s32(size, handle)) {
7668         return -TARGET_EFAULT;
7669     }
7670 
7671     total_size = sizeof(struct file_handle) + size;
7672     target_fh = lock_user(VERIFY_READ, handle, total_size, 1);
7673     if (!target_fh) {
7674         return -TARGET_EFAULT;
7675     }
7676 
7677     fh = g_memdup(target_fh, total_size);
7678     fh->handle_bytes = size;
7679     fh->handle_type = tswap32(target_fh->handle_type);
7680 
7681     ret = get_errno(open_by_handle_at(mount_fd, fh,
7682                     target_to_host_bitmask(flags, fcntl_flags_tbl)));
7683 
7684     g_free(fh);
7685 
7686     unlock_user(target_fh, handle, total_size);
7687 
7688     return ret;
7689 }
7690 #endif
7691 
7692 #if defined(TARGET_NR_signalfd) || defined(TARGET_NR_signalfd4)
7693 
7694 static abi_long do_signalfd4(int fd, abi_long mask, int flags)
7695 {
7696     int host_flags;
7697     target_sigset_t *target_mask;
7698     sigset_t host_mask;
7699     abi_long ret;
7700 
7701     if (flags & ~(TARGET_O_NONBLOCK_MASK | TARGET_O_CLOEXEC)) {
7702         return -TARGET_EINVAL;
7703     }
7704     if (!lock_user_struct(VERIFY_READ, target_mask, mask, 1)) {
7705         return -TARGET_EFAULT;
7706     }
7707 
7708     target_to_host_sigset(&host_mask, target_mask);
7709 
7710     host_flags = target_to_host_bitmask(flags, fcntl_flags_tbl);
7711 
7712     ret = get_errno(signalfd(fd, &host_mask, host_flags));
7713     if (ret >= 0) {
7714         fd_trans_register(ret, &target_signalfd_trans);
7715     }
7716 
7717     unlock_user_struct(target_mask, mask, 0);
7718 
7719     return ret;
7720 }
7721 #endif
7722 
7723 /* Map host to target signal numbers for the wait family of syscalls.
7724    Assume all other status bits are the same.  */
7725 int host_to_target_waitstatus(int status)
7726 {
7727     if (WIFSIGNALED(status)) {
7728         return host_to_target_signal(WTERMSIG(status)) | (status & ~0x7f);
7729     }
7730     if (WIFSTOPPED(status)) {
7731         return (host_to_target_signal(WSTOPSIG(status)) << 8)
7732                | (status & 0xff);
7733     }
7734     return status;
7735 }
7736 
7737 static int open_self_cmdline(void *cpu_env, int fd)
7738 {
7739     CPUState *cpu = env_cpu((CPUArchState *)cpu_env);
7740     struct linux_binprm *bprm = ((TaskState *)cpu->opaque)->bprm;
7741     int i;
7742 
7743     for (i = 0; i < bprm->argc; i++) {
7744         size_t len = strlen(bprm->argv[i]) + 1;
7745 
7746         if (write(fd, bprm->argv[i], len) != len) {
7747             return -1;
7748         }
7749     }
7750 
7751     return 0;
7752 }
7753 
7754 static int open_self_maps(void *cpu_env, int fd)
7755 {
7756     CPUState *cpu = env_cpu((CPUArchState *)cpu_env);
7757     TaskState *ts = cpu->opaque;
7758     GSList *map_info = read_self_maps();
7759     GSList *s;
7760     int count;
7761 
7762     for (s = map_info; s; s = g_slist_next(s)) {
7763         MapInfo *e = (MapInfo *) s->data;
7764 
7765         if (h2g_valid(e->start)) {
7766             unsigned long min = e->start;
7767             unsigned long max = e->end;
7768             int flags = page_get_flags(h2g(min));
7769             const char *path;
7770 
7771             max = h2g_valid(max - 1) ?
7772                 max : (uintptr_t) g2h_untagged(GUEST_ADDR_MAX) + 1;
7773 
7774             if (page_check_range(h2g(min), max - min, flags) == -1) {
7775                 continue;
7776             }
7777 
7778             if (h2g(min) == ts->info->stack_limit) {
7779                 path = "[stack]";
7780             } else {
7781                 path = e->path;
7782             }
7783 
7784             count = dprintf(fd, TARGET_ABI_FMT_ptr "-" TARGET_ABI_FMT_ptr
7785                             " %c%c%c%c %08" PRIx64 " %s %"PRId64,
7786                             h2g(min), h2g(max - 1) + 1,
7787                             (flags & PAGE_READ) ? 'r' : '-',
7788                             (flags & PAGE_WRITE_ORG) ? 'w' : '-',
7789                             (flags & PAGE_EXEC) ? 'x' : '-',
7790                             e->is_priv ? 'p' : '-',
7791                             (uint64_t) e->offset, e->dev, e->inode);
7792             if (path) {
7793                 dprintf(fd, "%*s%s\n", 73 - count, "", path);
7794             } else {
7795                 dprintf(fd, "\n");
7796             }
7797         }
7798     }
7799 
7800     free_self_maps(map_info);
7801 
7802 #ifdef TARGET_VSYSCALL_PAGE
7803     /*
7804      * We only support execution from the vsyscall page.
7805      * This is as if CONFIG_LEGACY_VSYSCALL_XONLY=y from v5.3.
7806      */
7807     count = dprintf(fd, TARGET_FMT_lx "-" TARGET_FMT_lx
7808                     " --xp 00000000 00:00 0",
7809                     TARGET_VSYSCALL_PAGE, TARGET_VSYSCALL_PAGE + TARGET_PAGE_SIZE);
7810     dprintf(fd, "%*s%s\n", 73 - count, "",  "[vsyscall]");
7811 #endif
7812 
7813     return 0;
7814 }
7815 
7816 static int open_self_stat(void *cpu_env, int fd)
7817 {
7818     CPUState *cpu = env_cpu((CPUArchState *)cpu_env);
7819     TaskState *ts = cpu->opaque;
7820     g_autoptr(GString) buf = g_string_new(NULL);
7821     int i;
7822 
7823     for (i = 0; i < 44; i++) {
7824         if (i == 0) {
7825             /* pid */
7826             g_string_printf(buf, FMT_pid " ", getpid());
7827         } else if (i == 1) {
7828             /* app name */
7829             gchar *bin = g_strrstr(ts->bprm->argv[0], "/");
7830             bin = bin ? bin + 1 : ts->bprm->argv[0];
7831             g_string_printf(buf, "(%.15s) ", bin);
7832         } else if (i == 3) {
7833             /* ppid */
7834             g_string_printf(buf, FMT_pid " ", getppid());
7835         } else if (i == 27) {
7836             /* stack bottom */
7837             g_string_printf(buf, TARGET_ABI_FMT_ld " ", ts->info->start_stack);
7838         } else {
7839             /* for the rest, there is MasterCard */
7840             g_string_printf(buf, "0%c", i == 43 ? '\n' : ' ');
7841         }
7842 
7843         if (write(fd, buf->str, buf->len) != buf->len) {
7844             return -1;
7845         }
7846     }
7847 
7848     return 0;
7849 }
7850 
7851 static int open_self_auxv(void *cpu_env, int fd)
7852 {
7853     CPUState *cpu = env_cpu((CPUArchState *)cpu_env);
7854     TaskState *ts = cpu->opaque;
7855     abi_ulong auxv = ts->info->saved_auxv;
7856     abi_ulong len = ts->info->auxv_len;
7857     char *ptr;
7858 
7859     /*
7860      * Auxiliary vector is stored in target process stack.
7861      * read in whole auxv vector and copy it to file
7862      */
7863     ptr = lock_user(VERIFY_READ, auxv, len, 0);
7864     if (ptr != NULL) {
7865         while (len > 0) {
7866             ssize_t r;
7867             r = write(fd, ptr, len);
7868             if (r <= 0) {
7869                 break;
7870             }
7871             len -= r;
7872             ptr += r;
7873         }
7874         lseek(fd, 0, SEEK_SET);
7875         unlock_user(ptr, auxv, len);
7876     }
7877 
7878     return 0;
7879 }
7880 
7881 static int is_proc_myself(const char *filename, const char *entry)
7882 {
7883     if (!strncmp(filename, "/proc/", strlen("/proc/"))) {
7884         filename += strlen("/proc/");
7885         if (!strncmp(filename, "self/", strlen("self/"))) {
7886             filename += strlen("self/");
7887         } else if (*filename >= '1' && *filename <= '9') {
7888             char myself[80];
7889             snprintf(myself, sizeof(myself), "%d/", getpid());
7890             if (!strncmp(filename, myself, strlen(myself))) {
7891                 filename += strlen(myself);
7892             } else {
7893                 return 0;
7894             }
7895         } else {
7896             return 0;
7897         }
7898         if (!strcmp(filename, entry)) {
7899             return 1;
7900         }
7901     }
7902     return 0;
7903 }
7904 
7905 #if defined(HOST_WORDS_BIGENDIAN) != defined(TARGET_WORDS_BIGENDIAN) || \
7906     defined(TARGET_SPARC) || defined(TARGET_M68K) || defined(TARGET_HPPA)
7907 static int is_proc(const char *filename, const char *entry)
7908 {
7909     return strcmp(filename, entry) == 0;
7910 }
7911 #endif
7912 
7913 #if defined(HOST_WORDS_BIGENDIAN) != defined(TARGET_WORDS_BIGENDIAN)
7914 static int open_net_route(void *cpu_env, int fd)
7915 {
7916     FILE *fp;
7917     char *line = NULL;
7918     size_t len = 0;
7919     ssize_t read;
7920 
7921     fp = fopen("/proc/net/route", "r");
7922     if (fp == NULL) {
7923         return -1;
7924     }
7925 
7926     /* read header */
7927 
7928     read = getline(&line, &len, fp);
7929     dprintf(fd, "%s", line);
7930 
7931     /* read routes */
7932 
7933     while ((read = getline(&line, &len, fp)) != -1) {
7934         char iface[16];
7935         uint32_t dest, gw, mask;
7936         unsigned int flags, refcnt, use, metric, mtu, window, irtt;
7937         int fields;
7938 
7939         fields = sscanf(line,
7940                         "%s\t%08x\t%08x\t%04x\t%d\t%d\t%d\t%08x\t%d\t%u\t%u\n",
7941                         iface, &dest, &gw, &flags, &refcnt, &use, &metric,
7942                         &mask, &mtu, &window, &irtt);
7943         if (fields != 11) {
7944             continue;
7945         }
7946         dprintf(fd, "%s\t%08x\t%08x\t%04x\t%d\t%d\t%d\t%08x\t%d\t%u\t%u\n",
7947                 iface, tswap32(dest), tswap32(gw), flags, refcnt, use,
7948                 metric, tswap32(mask), mtu, window, irtt);
7949     }
7950 
7951     free(line);
7952     fclose(fp);
7953 
7954     return 0;
7955 }
7956 #endif
7957 
7958 #if defined(TARGET_SPARC)
7959 static int open_cpuinfo(void *cpu_env, int fd)
7960 {
7961     dprintf(fd, "type\t\t: sun4u\n");
7962     return 0;
7963 }
7964 #endif
7965 
7966 #if defined(TARGET_HPPA)
7967 static int open_cpuinfo(void *cpu_env, int fd)
7968 {
7969     dprintf(fd, "cpu family\t: PA-RISC 1.1e\n");
7970     dprintf(fd, "cpu\t\t: PA7300LC (PCX-L2)\n");
7971     dprintf(fd, "capabilities\t: os32\n");
7972     dprintf(fd, "model\t\t: 9000/778/B160L\n");
7973     dprintf(fd, "model name\t: Merlin L2 160 QEMU (9000/778/B160L)\n");
7974     return 0;
7975 }
7976 #endif
7977 
7978 #if defined(TARGET_M68K)
7979 static int open_hardware(void *cpu_env, int fd)
7980 {
7981     dprintf(fd, "Model:\t\tqemu-m68k\n");
7982     return 0;
7983 }
7984 #endif
7985 
7986 static int do_openat(void *cpu_env, int dirfd, const char *pathname, int flags, mode_t mode)
7987 {
7988     struct fake_open {
7989         const char *filename;
7990         int (*fill)(void *cpu_env, int fd);
7991         int (*cmp)(const char *s1, const char *s2);
7992     };
7993     const struct fake_open *fake_open;
7994     static const struct fake_open fakes[] = {
7995         { "maps", open_self_maps, is_proc_myself },
7996         { "stat", open_self_stat, is_proc_myself },
7997         { "auxv", open_self_auxv, is_proc_myself },
7998         { "cmdline", open_self_cmdline, is_proc_myself },
7999 #if defined(HOST_WORDS_BIGENDIAN) != defined(TARGET_WORDS_BIGENDIAN)
8000         { "/proc/net/route", open_net_route, is_proc },
8001 #endif
8002 #if defined(TARGET_SPARC) || defined(TARGET_HPPA)
8003         { "/proc/cpuinfo", open_cpuinfo, is_proc },
8004 #endif
8005 #if defined(TARGET_M68K)
8006         { "/proc/hardware", open_hardware, is_proc },
8007 #endif
8008         { NULL, NULL, NULL }
8009     };
8010 
8011     if (is_proc_myself(pathname, "exe")) {
8012         int execfd = qemu_getauxval(AT_EXECFD);
8013         return execfd ? execfd : safe_openat(dirfd, exec_path, flags, mode);
8014     }
8015 
8016     for (fake_open = fakes; fake_open->filename; fake_open++) {
8017         if (fake_open->cmp(pathname, fake_open->filename)) {
8018             break;
8019         }
8020     }
8021 
8022     if (fake_open->filename) {
8023         const char *tmpdir;
8024         char filename[PATH_MAX];
8025         int fd, r;
8026 
8027         /* create temporary file to map stat to */
8028         tmpdir = getenv("TMPDIR");
8029         if (!tmpdir)
8030             tmpdir = "/tmp";
8031         snprintf(filename, sizeof(filename), "%s/qemu-open.XXXXXX", tmpdir);
8032         fd = mkstemp(filename);
8033         if (fd < 0) {
8034             return fd;
8035         }
8036         unlink(filename);
8037 
8038         if ((r = fake_open->fill(cpu_env, fd))) {
8039             int e = errno;
8040             close(fd);
8041             errno = e;
8042             return r;
8043         }
8044         lseek(fd, 0, SEEK_SET);
8045 
8046         return fd;
8047     }
8048 
8049     return safe_openat(dirfd, path(pathname), flags, mode);
8050 }
8051 
8052 #define TIMER_MAGIC 0x0caf0000
8053 #define TIMER_MAGIC_MASK 0xffff0000
8054 
8055 /* Convert QEMU provided timer ID back to internal 16bit index format */
8056 static target_timer_t get_timer_id(abi_long arg)
8057 {
8058     target_timer_t timerid = arg;
8059 
8060     if ((timerid & TIMER_MAGIC_MASK) != TIMER_MAGIC) {
8061         return -TARGET_EINVAL;
8062     }
8063 
8064     timerid &= 0xffff;
8065 
8066     if (timerid >= ARRAY_SIZE(g_posix_timers)) {
8067         return -TARGET_EINVAL;
8068     }
8069 
8070     return timerid;
8071 }
8072 
8073 static int target_to_host_cpu_mask(unsigned long *host_mask,
8074                                    size_t host_size,
8075                                    abi_ulong target_addr,
8076                                    size_t target_size)
8077 {
8078     unsigned target_bits = sizeof(abi_ulong) * 8;
8079     unsigned host_bits = sizeof(*host_mask) * 8;
8080     abi_ulong *target_mask;
8081     unsigned i, j;
8082 
8083     assert(host_size >= target_size);
8084 
8085     target_mask = lock_user(VERIFY_READ, target_addr, target_size, 1);
8086     if (!target_mask) {
8087         return -TARGET_EFAULT;
8088     }
8089     memset(host_mask, 0, host_size);
8090 
8091     for (i = 0 ; i < target_size / sizeof(abi_ulong); i++) {
8092         unsigned bit = i * target_bits;
8093         abi_ulong val;
8094 
8095         __get_user(val, &target_mask[i]);
8096         for (j = 0; j < target_bits; j++, bit++) {
8097             if (val & (1UL << j)) {
8098                 host_mask[bit / host_bits] |= 1UL << (bit % host_bits);
8099             }
8100         }
8101     }
8102 
8103     unlock_user(target_mask, target_addr, 0);
8104     return 0;
8105 }
8106 
8107 static int host_to_target_cpu_mask(const unsigned long *host_mask,
8108                                    size_t host_size,
8109                                    abi_ulong target_addr,
8110                                    size_t target_size)
8111 {
8112     unsigned target_bits = sizeof(abi_ulong) * 8;
8113     unsigned host_bits = sizeof(*host_mask) * 8;
8114     abi_ulong *target_mask;
8115     unsigned i, j;
8116 
8117     assert(host_size >= target_size);
8118 
8119     target_mask = lock_user(VERIFY_WRITE, target_addr, target_size, 0);
8120     if (!target_mask) {
8121         return -TARGET_EFAULT;
8122     }
8123 
8124     for (i = 0 ; i < target_size / sizeof(abi_ulong); i++) {
8125         unsigned bit = i * target_bits;
8126         abi_ulong val = 0;
8127 
8128         for (j = 0; j < target_bits; j++, bit++) {
8129             if (host_mask[bit / host_bits] & (1UL << (bit % host_bits))) {
8130                 val |= 1UL << j;
8131             }
8132         }
8133         __put_user(val, &target_mask[i]);
8134     }
8135 
8136     unlock_user(target_mask, target_addr, target_size);
8137     return 0;
8138 }
8139 
8140 #if defined(TARGET_NR_pivot_root) && defined(__NR_pivot_root)
8141 _syscall2(int, pivot_root, const char *, new_root, const char *, put_old)
8142 #endif
8143 
8144 /* This is an internal helper for do_syscall so that it is easier
8145  * to have a single return point, so that actions, such as logging
8146  * of syscall results, can be performed.
8147  * All errnos that do_syscall() returns must be -TARGET_<errcode>.
8148  */
8149 static abi_long do_syscall1(void *cpu_env, int num, abi_long arg1,
8150                             abi_long arg2, abi_long arg3, abi_long arg4,
8151                             abi_long arg5, abi_long arg6, abi_long arg7,
8152                             abi_long arg8)
8153 {
8154     CPUState *cpu = env_cpu(cpu_env);
8155     abi_long ret;
8156 #if defined(TARGET_NR_stat) || defined(TARGET_NR_stat64) \
8157     || defined(TARGET_NR_lstat) || defined(TARGET_NR_lstat64) \
8158     || defined(TARGET_NR_fstat) || defined(TARGET_NR_fstat64) \
8159     || defined(TARGET_NR_statx)
8160     struct stat st;
8161 #endif
8162 #if defined(TARGET_NR_statfs) || defined(TARGET_NR_statfs64) \
8163     || defined(TARGET_NR_fstatfs)
8164     struct statfs stfs;
8165 #endif
8166     void *p;
8167 
8168     switch(num) {
8169     case TARGET_NR_exit:
8170         /* In old applications this may be used to implement _exit(2).
8171            However in threaded applications it is used for thread termination,
8172            and _exit_group is used for application termination.
8173            Do thread termination if we have more then one thread.  */
8174 
8175         if (block_signals()) {
8176             return -TARGET_ERESTARTSYS;
8177         }
8178 
8179         pthread_mutex_lock(&clone_lock);
8180 
8181         if (CPU_NEXT(first_cpu)) {
8182             TaskState *ts = cpu->opaque;
8183 
8184             object_property_set_bool(OBJECT(cpu), "realized", false, NULL);
8185             object_unref(OBJECT(cpu));
8186             /*
8187              * At this point the CPU should be unrealized and removed
8188              * from cpu lists. We can clean-up the rest of the thread
8189              * data without the lock held.
8190              */
8191 
8192             pthread_mutex_unlock(&clone_lock);
8193 
8194             if (ts->child_tidptr) {
8195                 put_user_u32(0, ts->child_tidptr);
8196                 do_sys_futex(g2h(cpu, ts->child_tidptr),
8197                              FUTEX_WAKE, INT_MAX, NULL, NULL, 0);
8198             }
8199             thread_cpu = NULL;
8200             g_free(ts);
8201             rcu_unregister_thread();
8202             pthread_exit(NULL);
8203         }
8204 
8205         pthread_mutex_unlock(&clone_lock);
8206         preexit_cleanup(cpu_env, arg1);
8207         _exit(arg1);
8208         return 0; /* avoid warning */
8209     case TARGET_NR_read:
8210         if (arg2 == 0 && arg3 == 0) {
8211             return get_errno(safe_read(arg1, 0, 0));
8212         } else {
8213             if (!(p = lock_user(VERIFY_WRITE, arg2, arg3, 0)))
8214                 return -TARGET_EFAULT;
8215             ret = get_errno(safe_read(arg1, p, arg3));
8216             if (ret >= 0 &&
8217                 fd_trans_host_to_target_data(arg1)) {
8218                 ret = fd_trans_host_to_target_data(arg1)(p, ret);
8219             }
8220             unlock_user(p, arg2, ret);
8221         }
8222         return ret;
8223     case TARGET_NR_write:
8224         if (arg2 == 0 && arg3 == 0) {
8225             return get_errno(safe_write(arg1, 0, 0));
8226         }
8227         if (!(p = lock_user(VERIFY_READ, arg2, arg3, 1)))
8228             return -TARGET_EFAULT;
8229         if (fd_trans_target_to_host_data(arg1)) {
8230             void *copy = g_malloc(arg3);
8231             memcpy(copy, p, arg3);
8232             ret = fd_trans_target_to_host_data(arg1)(copy, arg3);
8233             if (ret >= 0) {
8234                 ret = get_errno(safe_write(arg1, copy, ret));
8235             }
8236             g_free(copy);
8237         } else {
8238             ret = get_errno(safe_write(arg1, p, arg3));
8239         }
8240         unlock_user(p, arg2, 0);
8241         return ret;
8242 
8243 #ifdef TARGET_NR_open
8244     case TARGET_NR_open:
8245         if (!(p = lock_user_string(arg1)))
8246             return -TARGET_EFAULT;
8247         ret = get_errno(do_openat(cpu_env, AT_FDCWD, p,
8248                                   target_to_host_bitmask(arg2, fcntl_flags_tbl),
8249                                   arg3));
8250         fd_trans_unregister(ret);
8251         unlock_user(p, arg1, 0);
8252         return ret;
8253 #endif
8254     case TARGET_NR_openat:
8255         if (!(p = lock_user_string(arg2)))
8256             return -TARGET_EFAULT;
8257         ret = get_errno(do_openat(cpu_env, arg1, p,
8258                                   target_to_host_bitmask(arg3, fcntl_flags_tbl),
8259                                   arg4));
8260         fd_trans_unregister(ret);
8261         unlock_user(p, arg2, 0);
8262         return ret;
8263 #if defined(TARGET_NR_name_to_handle_at) && defined(CONFIG_OPEN_BY_HANDLE)
8264     case TARGET_NR_name_to_handle_at:
8265         ret = do_name_to_handle_at(arg1, arg2, arg3, arg4, arg5);
8266         return ret;
8267 #endif
8268 #if defined(TARGET_NR_open_by_handle_at) && defined(CONFIG_OPEN_BY_HANDLE)
8269     case TARGET_NR_open_by_handle_at:
8270         ret = do_open_by_handle_at(arg1, arg2, arg3);
8271         fd_trans_unregister(ret);
8272         return ret;
8273 #endif
8274     case TARGET_NR_close:
8275         fd_trans_unregister(arg1);
8276         return get_errno(close(arg1));
8277 
8278     case TARGET_NR_brk:
8279         return do_brk(arg1);
8280 #ifdef TARGET_NR_fork
8281     case TARGET_NR_fork:
8282         return get_errno(do_fork(cpu_env, TARGET_SIGCHLD, 0, 0, 0, 0));
8283 #endif
8284 #ifdef TARGET_NR_waitpid
8285     case TARGET_NR_waitpid:
8286         {
8287             int status;
8288             ret = get_errno(safe_wait4(arg1, &status, arg3, 0));
8289             if (!is_error(ret) && arg2 && ret
8290                 && put_user_s32(host_to_target_waitstatus(status), arg2))
8291                 return -TARGET_EFAULT;
8292         }
8293         return ret;
8294 #endif
8295 #ifdef TARGET_NR_waitid
8296     case TARGET_NR_waitid:
8297         {
8298             siginfo_t info;
8299             info.si_pid = 0;
8300             ret = get_errno(safe_waitid(arg1, arg2, &info, arg4, NULL));
8301             if (!is_error(ret) && arg3 && info.si_pid != 0) {
8302                 if (!(p = lock_user(VERIFY_WRITE, arg3, sizeof(target_siginfo_t), 0)))
8303                     return -TARGET_EFAULT;
8304                 host_to_target_siginfo(p, &info);
8305                 unlock_user(p, arg3, sizeof(target_siginfo_t));
8306             }
8307         }
8308         return ret;
8309 #endif
8310 #ifdef TARGET_NR_creat /* not on alpha */
8311     case TARGET_NR_creat:
8312         if (!(p = lock_user_string(arg1)))
8313             return -TARGET_EFAULT;
8314         ret = get_errno(creat(p, arg2));
8315         fd_trans_unregister(ret);
8316         unlock_user(p, arg1, 0);
8317         return ret;
8318 #endif
8319 #ifdef TARGET_NR_link
8320     case TARGET_NR_link:
8321         {
8322             void * p2;
8323             p = lock_user_string(arg1);
8324             p2 = lock_user_string(arg2);
8325             if (!p || !p2)
8326                 ret = -TARGET_EFAULT;
8327             else
8328                 ret = get_errno(link(p, p2));
8329             unlock_user(p2, arg2, 0);
8330             unlock_user(p, arg1, 0);
8331         }
8332         return ret;
8333 #endif
8334 #if defined(TARGET_NR_linkat)
8335     case TARGET_NR_linkat:
8336         {
8337             void * p2 = NULL;
8338             if (!arg2 || !arg4)
8339                 return -TARGET_EFAULT;
8340             p  = lock_user_string(arg2);
8341             p2 = lock_user_string(arg4);
8342             if (!p || !p2)
8343                 ret = -TARGET_EFAULT;
8344             else
8345                 ret = get_errno(linkat(arg1, p, arg3, p2, arg5));
8346             unlock_user(p, arg2, 0);
8347             unlock_user(p2, arg4, 0);
8348         }
8349         return ret;
8350 #endif
8351 #ifdef TARGET_NR_unlink
8352     case TARGET_NR_unlink:
8353         if (!(p = lock_user_string(arg1)))
8354             return -TARGET_EFAULT;
8355         ret = get_errno(unlink(p));
8356         unlock_user(p, arg1, 0);
8357         return ret;
8358 #endif
8359 #if defined(TARGET_NR_unlinkat)
8360     case TARGET_NR_unlinkat:
8361         if (!(p = lock_user_string(arg2)))
8362             return -TARGET_EFAULT;
8363         ret = get_errno(unlinkat(arg1, p, arg3));
8364         unlock_user(p, arg2, 0);
8365         return ret;
8366 #endif
8367     case TARGET_NR_execve:
8368         {
8369             char **argp, **envp;
8370             int argc, envc;
8371             abi_ulong gp;
8372             abi_ulong guest_argp;
8373             abi_ulong guest_envp;
8374             abi_ulong addr;
8375             char **q;
8376 
8377             argc = 0;
8378             guest_argp = arg2;
8379             for (gp = guest_argp; gp; gp += sizeof(abi_ulong)) {
8380                 if (get_user_ual(addr, gp))
8381                     return -TARGET_EFAULT;
8382                 if (!addr)
8383                     break;
8384                 argc++;
8385             }
8386             envc = 0;
8387             guest_envp = arg3;
8388             for (gp = guest_envp; gp; gp += sizeof(abi_ulong)) {
8389                 if (get_user_ual(addr, gp))
8390                     return -TARGET_EFAULT;
8391                 if (!addr)
8392                     break;
8393                 envc++;
8394             }
8395 
8396             argp = g_new0(char *, argc + 1);
8397             envp = g_new0(char *, envc + 1);
8398 
8399             for (gp = guest_argp, q = argp; gp;
8400                   gp += sizeof(abi_ulong), q++) {
8401                 if (get_user_ual(addr, gp))
8402                     goto execve_efault;
8403                 if (!addr)
8404                     break;
8405                 if (!(*q = lock_user_string(addr)))
8406                     goto execve_efault;
8407             }
8408             *q = NULL;
8409 
8410             for (gp = guest_envp, q = envp; gp;
8411                   gp += sizeof(abi_ulong), q++) {
8412                 if (get_user_ual(addr, gp))
8413                     goto execve_efault;
8414                 if (!addr)
8415                     break;
8416                 if (!(*q = lock_user_string(addr)))
8417                     goto execve_efault;
8418             }
8419             *q = NULL;
8420 
8421             if (!(p = lock_user_string(arg1)))
8422                 goto execve_efault;
8423             /* Although execve() is not an interruptible syscall it is
8424              * a special case where we must use the safe_syscall wrapper:
8425              * if we allow a signal to happen before we make the host
8426              * syscall then we will 'lose' it, because at the point of
8427              * execve the process leaves QEMU's control. So we use the
8428              * safe syscall wrapper to ensure that we either take the
8429              * signal as a guest signal, or else it does not happen
8430              * before the execve completes and makes it the other
8431              * program's problem.
8432              */
8433             ret = get_errno(safe_execve(p, argp, envp));
8434             unlock_user(p, arg1, 0);
8435 
8436             goto execve_end;
8437 
8438         execve_efault:
8439             ret = -TARGET_EFAULT;
8440 
8441         execve_end:
8442             for (gp = guest_argp, q = argp; *q;
8443                   gp += sizeof(abi_ulong), q++) {
8444                 if (get_user_ual(addr, gp)
8445                     || !addr)
8446                     break;
8447                 unlock_user(*q, addr, 0);
8448             }
8449             for (gp = guest_envp, q = envp; *q;
8450                   gp += sizeof(abi_ulong), q++) {
8451                 if (get_user_ual(addr, gp)
8452                     || !addr)
8453                     break;
8454                 unlock_user(*q, addr, 0);
8455             }
8456 
8457             g_free(argp);
8458             g_free(envp);
8459         }
8460         return ret;
8461     case TARGET_NR_chdir:
8462         if (!(p = lock_user_string(arg1)))
8463             return -TARGET_EFAULT;
8464         ret = get_errno(chdir(p));
8465         unlock_user(p, arg1, 0);
8466         return ret;
8467 #ifdef TARGET_NR_time
8468     case TARGET_NR_time:
8469         {
8470             time_t host_time;
8471             ret = get_errno(time(&host_time));
8472             if (!is_error(ret)
8473                 && arg1
8474                 && put_user_sal(host_time, arg1))
8475                 return -TARGET_EFAULT;
8476         }
8477         return ret;
8478 #endif
8479 #ifdef TARGET_NR_mknod
8480     case TARGET_NR_mknod:
8481         if (!(p = lock_user_string(arg1)))
8482             return -TARGET_EFAULT;
8483         ret = get_errno(mknod(p, arg2, arg3));
8484         unlock_user(p, arg1, 0);
8485         return ret;
8486 #endif
8487 #if defined(TARGET_NR_mknodat)
8488     case TARGET_NR_mknodat:
8489         if (!(p = lock_user_string(arg2)))
8490             return -TARGET_EFAULT;
8491         ret = get_errno(mknodat(arg1, p, arg3, arg4));
8492         unlock_user(p, arg2, 0);
8493         return ret;
8494 #endif
8495 #ifdef TARGET_NR_chmod
8496     case TARGET_NR_chmod:
8497         if (!(p = lock_user_string(arg1)))
8498             return -TARGET_EFAULT;
8499         ret = get_errno(chmod(p, arg2));
8500         unlock_user(p, arg1, 0);
8501         return ret;
8502 #endif
8503 #ifdef TARGET_NR_lseek
8504     case TARGET_NR_lseek:
8505         return get_errno(lseek(arg1, arg2, arg3));
8506 #endif
8507 #if defined(TARGET_NR_getxpid) && defined(TARGET_ALPHA)
8508     /* Alpha specific */
8509     case TARGET_NR_getxpid:
8510         ((CPUAlphaState *)cpu_env)->ir[IR_A4] = getppid();
8511         return get_errno(getpid());
8512 #endif
8513 #ifdef TARGET_NR_getpid
8514     case TARGET_NR_getpid:
8515         return get_errno(getpid());
8516 #endif
8517     case TARGET_NR_mount:
8518         {
8519             /* need to look at the data field */
8520             void *p2, *p3;
8521 
8522             if (arg1) {
8523                 p = lock_user_string(arg1);
8524                 if (!p) {
8525                     return -TARGET_EFAULT;
8526                 }
8527             } else {
8528                 p = NULL;
8529             }
8530 
8531             p2 = lock_user_string(arg2);
8532             if (!p2) {
8533                 if (arg1) {
8534                     unlock_user(p, arg1, 0);
8535                 }
8536                 return -TARGET_EFAULT;
8537             }
8538 
8539             if (arg3) {
8540                 p3 = lock_user_string(arg3);
8541                 if (!p3) {
8542                     if (arg1) {
8543                         unlock_user(p, arg1, 0);
8544                     }
8545                     unlock_user(p2, arg2, 0);
8546                     return -TARGET_EFAULT;
8547                 }
8548             } else {
8549                 p3 = NULL;
8550             }
8551 
8552             /* FIXME - arg5 should be locked, but it isn't clear how to
8553              * do that since it's not guaranteed to be a NULL-terminated
8554              * string.
8555              */
8556             if (!arg5) {
8557                 ret = mount(p, p2, p3, (unsigned long)arg4, NULL);
8558             } else {
8559                 ret = mount(p, p2, p3, (unsigned long)arg4, g2h(cpu, arg5));
8560             }
8561             ret = get_errno(ret);
8562 
8563             if (arg1) {
8564                 unlock_user(p, arg1, 0);
8565             }
8566             unlock_user(p2, arg2, 0);
8567             if (arg3) {
8568                 unlock_user(p3, arg3, 0);
8569             }
8570         }
8571         return ret;
8572 #if defined(TARGET_NR_umount) || defined(TARGET_NR_oldumount)
8573 #if defined(TARGET_NR_umount)
8574     case TARGET_NR_umount:
8575 #endif
8576 #if defined(TARGET_NR_oldumount)
8577     case TARGET_NR_oldumount:
8578 #endif
8579         if (!(p = lock_user_string(arg1)))
8580             return -TARGET_EFAULT;
8581         ret = get_errno(umount(p));
8582         unlock_user(p, arg1, 0);
8583         return ret;
8584 #endif
8585 #ifdef TARGET_NR_stime /* not on alpha */
8586     case TARGET_NR_stime:
8587         {
8588             struct timespec ts;
8589             ts.tv_nsec = 0;
8590             if (get_user_sal(ts.tv_sec, arg1)) {
8591                 return -TARGET_EFAULT;
8592             }
8593             return get_errno(clock_settime(CLOCK_REALTIME, &ts));
8594         }
8595 #endif
8596 #ifdef TARGET_NR_alarm /* not on alpha */
8597     case TARGET_NR_alarm:
8598         return alarm(arg1);
8599 #endif
8600 #ifdef TARGET_NR_pause /* not on alpha */
8601     case TARGET_NR_pause:
8602         if (!block_signals()) {
8603             sigsuspend(&((TaskState *)cpu->opaque)->signal_mask);
8604         }
8605         return -TARGET_EINTR;
8606 #endif
8607 #ifdef TARGET_NR_utime
8608     case TARGET_NR_utime:
8609         {
8610             struct utimbuf tbuf, *host_tbuf;
8611             struct target_utimbuf *target_tbuf;
8612             if (arg2) {
8613                 if (!lock_user_struct(VERIFY_READ, target_tbuf, arg2, 1))
8614                     return -TARGET_EFAULT;
8615                 tbuf.actime = tswapal(target_tbuf->actime);
8616                 tbuf.modtime = tswapal(target_tbuf->modtime);
8617                 unlock_user_struct(target_tbuf, arg2, 0);
8618                 host_tbuf = &tbuf;
8619             } else {
8620                 host_tbuf = NULL;
8621             }
8622             if (!(p = lock_user_string(arg1)))
8623                 return -TARGET_EFAULT;
8624             ret = get_errno(utime(p, host_tbuf));
8625             unlock_user(p, arg1, 0);
8626         }
8627         return ret;
8628 #endif
8629 #ifdef TARGET_NR_utimes
8630     case TARGET_NR_utimes:
8631         {
8632             struct timeval *tvp, tv[2];
8633             if (arg2) {
8634                 if (copy_from_user_timeval(&tv[0], arg2)
8635                     || copy_from_user_timeval(&tv[1],
8636                                               arg2 + sizeof(struct target_timeval)))
8637                     return -TARGET_EFAULT;
8638                 tvp = tv;
8639             } else {
8640                 tvp = NULL;
8641             }
8642             if (!(p = lock_user_string(arg1)))
8643                 return -TARGET_EFAULT;
8644             ret = get_errno(utimes(p, tvp));
8645             unlock_user(p, arg1, 0);
8646         }
8647         return ret;
8648 #endif
8649 #if defined(TARGET_NR_futimesat)
8650     case TARGET_NR_futimesat:
8651         {
8652             struct timeval *tvp, tv[2];
8653             if (arg3) {
8654                 if (copy_from_user_timeval(&tv[0], arg3)
8655                     || copy_from_user_timeval(&tv[1],
8656                                               arg3 + sizeof(struct target_timeval)))
8657                     return -TARGET_EFAULT;
8658                 tvp = tv;
8659             } else {
8660                 tvp = NULL;
8661             }
8662             if (!(p = lock_user_string(arg2))) {
8663                 return -TARGET_EFAULT;
8664             }
8665             ret = get_errno(futimesat(arg1, path(p), tvp));
8666             unlock_user(p, arg2, 0);
8667         }
8668         return ret;
8669 #endif
8670 #ifdef TARGET_NR_access
8671     case TARGET_NR_access:
8672         if (!(p = lock_user_string(arg1))) {
8673             return -TARGET_EFAULT;
8674         }
8675         ret = get_errno(access(path(p), arg2));
8676         unlock_user(p, arg1, 0);
8677         return ret;
8678 #endif
8679 #if defined(TARGET_NR_faccessat) && defined(__NR_faccessat)
8680     case TARGET_NR_faccessat:
8681         if (!(p = lock_user_string(arg2))) {
8682             return -TARGET_EFAULT;
8683         }
8684         ret = get_errno(faccessat(arg1, p, arg3, 0));
8685         unlock_user(p, arg2, 0);
8686         return ret;
8687 #endif
8688 #ifdef TARGET_NR_nice /* not on alpha */
8689     case TARGET_NR_nice:
8690         return get_errno(nice(arg1));
8691 #endif
8692     case TARGET_NR_sync:
8693         sync();
8694         return 0;
8695 #if defined(TARGET_NR_syncfs) && defined(CONFIG_SYNCFS)
8696     case TARGET_NR_syncfs:
8697         return get_errno(syncfs(arg1));
8698 #endif
8699     case TARGET_NR_kill:
8700         return get_errno(safe_kill(arg1, target_to_host_signal(arg2)));
8701 #ifdef TARGET_NR_rename
8702     case TARGET_NR_rename:
8703         {
8704             void *p2;
8705             p = lock_user_string(arg1);
8706             p2 = lock_user_string(arg2);
8707             if (!p || !p2)
8708                 ret = -TARGET_EFAULT;
8709             else
8710                 ret = get_errno(rename(p, p2));
8711             unlock_user(p2, arg2, 0);
8712             unlock_user(p, arg1, 0);
8713         }
8714         return ret;
8715 #endif
8716 #if defined(TARGET_NR_renameat)
8717     case TARGET_NR_renameat:
8718         {
8719             void *p2;
8720             p  = lock_user_string(arg2);
8721             p2 = lock_user_string(arg4);
8722             if (!p || !p2)
8723                 ret = -TARGET_EFAULT;
8724             else
8725                 ret = get_errno(renameat(arg1, p, arg3, p2));
8726             unlock_user(p2, arg4, 0);
8727             unlock_user(p, arg2, 0);
8728         }
8729         return ret;
8730 #endif
8731 #if defined(TARGET_NR_renameat2)
8732     case TARGET_NR_renameat2:
8733         {
8734             void *p2;
8735             p  = lock_user_string(arg2);
8736             p2 = lock_user_string(arg4);
8737             if (!p || !p2) {
8738                 ret = -TARGET_EFAULT;
8739             } else {
8740                 ret = get_errno(sys_renameat2(arg1, p, arg3, p2, arg5));
8741             }
8742             unlock_user(p2, arg4, 0);
8743             unlock_user(p, arg2, 0);
8744         }
8745         return ret;
8746 #endif
8747 #ifdef TARGET_NR_mkdir
8748     case TARGET_NR_mkdir:
8749         if (!(p = lock_user_string(arg1)))
8750             return -TARGET_EFAULT;
8751         ret = get_errno(mkdir(p, arg2));
8752         unlock_user(p, arg1, 0);
8753         return ret;
8754 #endif
8755 #if defined(TARGET_NR_mkdirat)
8756     case TARGET_NR_mkdirat:
8757         if (!(p = lock_user_string(arg2)))
8758             return -TARGET_EFAULT;
8759         ret = get_errno(mkdirat(arg1, p, arg3));
8760         unlock_user(p, arg2, 0);
8761         return ret;
8762 #endif
8763 #ifdef TARGET_NR_rmdir
8764     case TARGET_NR_rmdir:
8765         if (!(p = lock_user_string(arg1)))
8766             return -TARGET_EFAULT;
8767         ret = get_errno(rmdir(p));
8768         unlock_user(p, arg1, 0);
8769         return ret;
8770 #endif
8771     case TARGET_NR_dup:
8772         ret = get_errno(dup(arg1));
8773         if (ret >= 0) {
8774             fd_trans_dup(arg1, ret);
8775         }
8776         return ret;
8777 #ifdef TARGET_NR_pipe
8778     case TARGET_NR_pipe:
8779         return do_pipe(cpu_env, arg1, 0, 0);
8780 #endif
8781 #ifdef TARGET_NR_pipe2
8782     case TARGET_NR_pipe2:
8783         return do_pipe(cpu_env, arg1,
8784                        target_to_host_bitmask(arg2, fcntl_flags_tbl), 1);
8785 #endif
8786     case TARGET_NR_times:
8787         {
8788             struct target_tms *tmsp;
8789             struct tms tms;
8790             ret = get_errno(times(&tms));
8791             if (arg1) {
8792                 tmsp = lock_user(VERIFY_WRITE, arg1, sizeof(struct target_tms), 0);
8793                 if (!tmsp)
8794                     return -TARGET_EFAULT;
8795                 tmsp->tms_utime = tswapal(host_to_target_clock_t(tms.tms_utime));
8796                 tmsp->tms_stime = tswapal(host_to_target_clock_t(tms.tms_stime));
8797                 tmsp->tms_cutime = tswapal(host_to_target_clock_t(tms.tms_cutime));
8798                 tmsp->tms_cstime = tswapal(host_to_target_clock_t(tms.tms_cstime));
8799             }
8800             if (!is_error(ret))
8801                 ret = host_to_target_clock_t(ret);
8802         }
8803         return ret;
8804     case TARGET_NR_acct:
8805         if (arg1 == 0) {
8806             ret = get_errno(acct(NULL));
8807         } else {
8808             if (!(p = lock_user_string(arg1))) {
8809                 return -TARGET_EFAULT;
8810             }
8811             ret = get_errno(acct(path(p)));
8812             unlock_user(p, arg1, 0);
8813         }
8814         return ret;
8815 #ifdef TARGET_NR_umount2
8816     case TARGET_NR_umount2:
8817         if (!(p = lock_user_string(arg1)))
8818             return -TARGET_EFAULT;
8819         ret = get_errno(umount2(p, arg2));
8820         unlock_user(p, arg1, 0);
8821         return ret;
8822 #endif
8823     case TARGET_NR_ioctl:
8824         return do_ioctl(arg1, arg2, arg3);
8825 #ifdef TARGET_NR_fcntl
8826     case TARGET_NR_fcntl:
8827         return do_fcntl(arg1, arg2, arg3);
8828 #endif
8829     case TARGET_NR_setpgid:
8830         return get_errno(setpgid(arg1, arg2));
8831     case TARGET_NR_umask:
8832         return get_errno(umask(arg1));
8833     case TARGET_NR_chroot:
8834         if (!(p = lock_user_string(arg1)))
8835             return -TARGET_EFAULT;
8836         ret = get_errno(chroot(p));
8837         unlock_user(p, arg1, 0);
8838         return ret;
8839 #ifdef TARGET_NR_dup2
8840     case TARGET_NR_dup2:
8841         ret = get_errno(dup2(arg1, arg2));
8842         if (ret >= 0) {
8843             fd_trans_dup(arg1, arg2);
8844         }
8845         return ret;
8846 #endif
8847 #if defined(CONFIG_DUP3) && defined(TARGET_NR_dup3)
8848     case TARGET_NR_dup3:
8849     {
8850         int host_flags;
8851 
8852         if ((arg3 & ~TARGET_O_CLOEXEC) != 0) {
8853             return -EINVAL;
8854         }
8855         host_flags = target_to_host_bitmask(arg3, fcntl_flags_tbl);
8856         ret = get_errno(dup3(arg1, arg2, host_flags));
8857         if (ret >= 0) {
8858             fd_trans_dup(arg1, arg2);
8859         }
8860         return ret;
8861     }
8862 #endif
8863 #ifdef TARGET_NR_getppid /* not on alpha */
8864     case TARGET_NR_getppid:
8865         return get_errno(getppid());
8866 #endif
8867 #ifdef TARGET_NR_getpgrp
8868     case TARGET_NR_getpgrp:
8869         return get_errno(getpgrp());
8870 #endif
8871     case TARGET_NR_setsid:
8872         return get_errno(setsid());
8873 #ifdef TARGET_NR_sigaction
8874     case TARGET_NR_sigaction:
8875         {
8876 #if defined(TARGET_MIPS)
8877 	    struct target_sigaction act, oact, *pact, *old_act;
8878 
8879 	    if (arg2) {
8880                 if (!lock_user_struct(VERIFY_READ, old_act, arg2, 1))
8881                     return -TARGET_EFAULT;
8882 		act._sa_handler = old_act->_sa_handler;
8883 		target_siginitset(&act.sa_mask, old_act->sa_mask.sig[0]);
8884 		act.sa_flags = old_act->sa_flags;
8885 		unlock_user_struct(old_act, arg2, 0);
8886 		pact = &act;
8887 	    } else {
8888 		pact = NULL;
8889 	    }
8890 
8891         ret = get_errno(do_sigaction(arg1, pact, &oact, 0));
8892 
8893 	    if (!is_error(ret) && arg3) {
8894                 if (!lock_user_struct(VERIFY_WRITE, old_act, arg3, 0))
8895                     return -TARGET_EFAULT;
8896 		old_act->_sa_handler = oact._sa_handler;
8897 		old_act->sa_flags = oact.sa_flags;
8898 		old_act->sa_mask.sig[0] = oact.sa_mask.sig[0];
8899 		old_act->sa_mask.sig[1] = 0;
8900 		old_act->sa_mask.sig[2] = 0;
8901 		old_act->sa_mask.sig[3] = 0;
8902 		unlock_user_struct(old_act, arg3, 1);
8903 	    }
8904 #else
8905             struct target_old_sigaction *old_act;
8906             struct target_sigaction act, oact, *pact;
8907             if (arg2) {
8908                 if (!lock_user_struct(VERIFY_READ, old_act, arg2, 1))
8909                     return -TARGET_EFAULT;
8910                 act._sa_handler = old_act->_sa_handler;
8911                 target_siginitset(&act.sa_mask, old_act->sa_mask);
8912                 act.sa_flags = old_act->sa_flags;
8913 #ifdef TARGET_ARCH_HAS_SA_RESTORER
8914                 act.sa_restorer = old_act->sa_restorer;
8915 #endif
8916                 unlock_user_struct(old_act, arg2, 0);
8917                 pact = &act;
8918             } else {
8919                 pact = NULL;
8920             }
8921             ret = get_errno(do_sigaction(arg1, pact, &oact, 0));
8922             if (!is_error(ret) && arg3) {
8923                 if (!lock_user_struct(VERIFY_WRITE, old_act, arg3, 0))
8924                     return -TARGET_EFAULT;
8925                 old_act->_sa_handler = oact._sa_handler;
8926                 old_act->sa_mask = oact.sa_mask.sig[0];
8927                 old_act->sa_flags = oact.sa_flags;
8928 #ifdef TARGET_ARCH_HAS_SA_RESTORER
8929                 old_act->sa_restorer = oact.sa_restorer;
8930 #endif
8931                 unlock_user_struct(old_act, arg3, 1);
8932             }
8933 #endif
8934         }
8935         return ret;
8936 #endif
8937     case TARGET_NR_rt_sigaction:
8938         {
8939             /*
8940              * For Alpha and SPARC this is a 5 argument syscall, with
8941              * a 'restorer' parameter which must be copied into the
8942              * sa_restorer field of the sigaction struct.
8943              * For Alpha that 'restorer' is arg5; for SPARC it is arg4,
8944              * and arg5 is the sigsetsize.
8945              */
8946 #if defined(TARGET_ALPHA)
8947             target_ulong sigsetsize = arg4;
8948             target_ulong restorer = arg5;
8949 #elif defined(TARGET_SPARC)
8950             target_ulong restorer = arg4;
8951             target_ulong sigsetsize = arg5;
8952 #else
8953             target_ulong sigsetsize = arg4;
8954             target_ulong restorer = 0;
8955 #endif
8956             struct target_sigaction *act = NULL;
8957             struct target_sigaction *oact = NULL;
8958 
8959             if (sigsetsize != sizeof(target_sigset_t)) {
8960                 return -TARGET_EINVAL;
8961             }
8962             if (arg2 && !lock_user_struct(VERIFY_READ, act, arg2, 1)) {
8963                 return -TARGET_EFAULT;
8964             }
8965             if (arg3 && !lock_user_struct(VERIFY_WRITE, oact, arg3, 0)) {
8966                 ret = -TARGET_EFAULT;
8967             } else {
8968                 ret = get_errno(do_sigaction(arg1, act, oact, restorer));
8969                 if (oact) {
8970                     unlock_user_struct(oact, arg3, 1);
8971                 }
8972             }
8973             if (act) {
8974                 unlock_user_struct(act, arg2, 0);
8975             }
8976         }
8977         return ret;
8978 #ifdef TARGET_NR_sgetmask /* not on alpha */
8979     case TARGET_NR_sgetmask:
8980         {
8981             sigset_t cur_set;
8982             abi_ulong target_set;
8983             ret = do_sigprocmask(0, NULL, &cur_set);
8984             if (!ret) {
8985                 host_to_target_old_sigset(&target_set, &cur_set);
8986                 ret = target_set;
8987             }
8988         }
8989         return ret;
8990 #endif
8991 #ifdef TARGET_NR_ssetmask /* not on alpha */
8992     case TARGET_NR_ssetmask:
8993         {
8994             sigset_t set, oset;
8995             abi_ulong target_set = arg1;
8996             target_to_host_old_sigset(&set, &target_set);
8997             ret = do_sigprocmask(SIG_SETMASK, &set, &oset);
8998             if (!ret) {
8999                 host_to_target_old_sigset(&target_set, &oset);
9000                 ret = target_set;
9001             }
9002         }
9003         return ret;
9004 #endif
9005 #ifdef TARGET_NR_sigprocmask
9006     case TARGET_NR_sigprocmask:
9007         {
9008 #if defined(TARGET_ALPHA)
9009             sigset_t set, oldset;
9010             abi_ulong mask;
9011             int how;
9012 
9013             switch (arg1) {
9014             case TARGET_SIG_BLOCK:
9015                 how = SIG_BLOCK;
9016                 break;
9017             case TARGET_SIG_UNBLOCK:
9018                 how = SIG_UNBLOCK;
9019                 break;
9020             case TARGET_SIG_SETMASK:
9021                 how = SIG_SETMASK;
9022                 break;
9023             default:
9024                 return -TARGET_EINVAL;
9025             }
9026             mask = arg2;
9027             target_to_host_old_sigset(&set, &mask);
9028 
9029             ret = do_sigprocmask(how, &set, &oldset);
9030             if (!is_error(ret)) {
9031                 host_to_target_old_sigset(&mask, &oldset);
9032                 ret = mask;
9033                 ((CPUAlphaState *)cpu_env)->ir[IR_V0] = 0; /* force no error */
9034             }
9035 #else
9036             sigset_t set, oldset, *set_ptr;
9037             int how;
9038 
9039             if (arg2) {
9040                 switch (arg1) {
9041                 case TARGET_SIG_BLOCK:
9042                     how = SIG_BLOCK;
9043                     break;
9044                 case TARGET_SIG_UNBLOCK:
9045                     how = SIG_UNBLOCK;
9046                     break;
9047                 case TARGET_SIG_SETMASK:
9048                     how = SIG_SETMASK;
9049                     break;
9050                 default:
9051                     return -TARGET_EINVAL;
9052                 }
9053                 if (!(p = lock_user(VERIFY_READ, arg2, sizeof(target_sigset_t), 1)))
9054                     return -TARGET_EFAULT;
9055                 target_to_host_old_sigset(&set, p);
9056                 unlock_user(p, arg2, 0);
9057                 set_ptr = &set;
9058             } else {
9059                 how = 0;
9060                 set_ptr = NULL;
9061             }
9062             ret = do_sigprocmask(how, set_ptr, &oldset);
9063             if (!is_error(ret) && arg3) {
9064                 if (!(p = lock_user(VERIFY_WRITE, arg3, sizeof(target_sigset_t), 0)))
9065                     return -TARGET_EFAULT;
9066                 host_to_target_old_sigset(p, &oldset);
9067                 unlock_user(p, arg3, sizeof(target_sigset_t));
9068             }
9069 #endif
9070         }
9071         return ret;
9072 #endif
9073     case TARGET_NR_rt_sigprocmask:
9074         {
9075             int how = arg1;
9076             sigset_t set, oldset, *set_ptr;
9077 
9078             if (arg4 != sizeof(target_sigset_t)) {
9079                 return -TARGET_EINVAL;
9080             }
9081 
9082             if (arg2) {
9083                 switch(how) {
9084                 case TARGET_SIG_BLOCK:
9085                     how = SIG_BLOCK;
9086                     break;
9087                 case TARGET_SIG_UNBLOCK:
9088                     how = SIG_UNBLOCK;
9089                     break;
9090                 case TARGET_SIG_SETMASK:
9091                     how = SIG_SETMASK;
9092                     break;
9093                 default:
9094                     return -TARGET_EINVAL;
9095                 }
9096                 if (!(p = lock_user(VERIFY_READ, arg2, sizeof(target_sigset_t), 1)))
9097                     return -TARGET_EFAULT;
9098                 target_to_host_sigset(&set, p);
9099                 unlock_user(p, arg2, 0);
9100                 set_ptr = &set;
9101             } else {
9102                 how = 0;
9103                 set_ptr = NULL;
9104             }
9105             ret = do_sigprocmask(how, set_ptr, &oldset);
9106             if (!is_error(ret) && arg3) {
9107                 if (!(p = lock_user(VERIFY_WRITE, arg3, sizeof(target_sigset_t), 0)))
9108                     return -TARGET_EFAULT;
9109                 host_to_target_sigset(p, &oldset);
9110                 unlock_user(p, arg3, sizeof(target_sigset_t));
9111             }
9112         }
9113         return ret;
9114 #ifdef TARGET_NR_sigpending
9115     case TARGET_NR_sigpending:
9116         {
9117             sigset_t set;
9118             ret = get_errno(sigpending(&set));
9119             if (!is_error(ret)) {
9120                 if (!(p = lock_user(VERIFY_WRITE, arg1, sizeof(target_sigset_t), 0)))
9121                     return -TARGET_EFAULT;
9122                 host_to_target_old_sigset(p, &set);
9123                 unlock_user(p, arg1, sizeof(target_sigset_t));
9124             }
9125         }
9126         return ret;
9127 #endif
9128     case TARGET_NR_rt_sigpending:
9129         {
9130             sigset_t set;
9131 
9132             /* Yes, this check is >, not != like most. We follow the kernel's
9133              * logic and it does it like this because it implements
9134              * NR_sigpending through the same code path, and in that case
9135              * the old_sigset_t is smaller in size.
9136              */
9137             if (arg2 > sizeof(target_sigset_t)) {
9138                 return -TARGET_EINVAL;
9139             }
9140 
9141             ret = get_errno(sigpending(&set));
9142             if (!is_error(ret)) {
9143                 if (!(p = lock_user(VERIFY_WRITE, arg1, sizeof(target_sigset_t), 0)))
9144                     return -TARGET_EFAULT;
9145                 host_to_target_sigset(p, &set);
9146                 unlock_user(p, arg1, sizeof(target_sigset_t));
9147             }
9148         }
9149         return ret;
9150 #ifdef TARGET_NR_sigsuspend
9151     case TARGET_NR_sigsuspend:
9152         {
9153             TaskState *ts = cpu->opaque;
9154 #if defined(TARGET_ALPHA)
9155             abi_ulong mask = arg1;
9156             target_to_host_old_sigset(&ts->sigsuspend_mask, &mask);
9157 #else
9158             if (!(p = lock_user(VERIFY_READ, arg1, sizeof(target_sigset_t), 1)))
9159                 return -TARGET_EFAULT;
9160             target_to_host_old_sigset(&ts->sigsuspend_mask, p);
9161             unlock_user(p, arg1, 0);
9162 #endif
9163             ret = get_errno(safe_rt_sigsuspend(&ts->sigsuspend_mask,
9164                                                SIGSET_T_SIZE));
9165             if (ret != -TARGET_ERESTARTSYS) {
9166                 ts->in_sigsuspend = 1;
9167             }
9168         }
9169         return ret;
9170 #endif
9171     case TARGET_NR_rt_sigsuspend:
9172         {
9173             TaskState *ts = cpu->opaque;
9174 
9175             if (arg2 != sizeof(target_sigset_t)) {
9176                 return -TARGET_EINVAL;
9177             }
9178             if (!(p = lock_user(VERIFY_READ, arg1, sizeof(target_sigset_t), 1)))
9179                 return -TARGET_EFAULT;
9180             target_to_host_sigset(&ts->sigsuspend_mask, p);
9181             unlock_user(p, arg1, 0);
9182             ret = get_errno(safe_rt_sigsuspend(&ts->sigsuspend_mask,
9183                                                SIGSET_T_SIZE));
9184             if (ret != -TARGET_ERESTARTSYS) {
9185                 ts->in_sigsuspend = 1;
9186             }
9187         }
9188         return ret;
9189 #ifdef TARGET_NR_rt_sigtimedwait
9190     case TARGET_NR_rt_sigtimedwait:
9191         {
9192             sigset_t set;
9193             struct timespec uts, *puts;
9194             siginfo_t uinfo;
9195 
9196             if (arg4 != sizeof(target_sigset_t)) {
9197                 return -TARGET_EINVAL;
9198             }
9199 
9200             if (!(p = lock_user(VERIFY_READ, arg1, sizeof(target_sigset_t), 1)))
9201                 return -TARGET_EFAULT;
9202             target_to_host_sigset(&set, p);
9203             unlock_user(p, arg1, 0);
9204             if (arg3) {
9205                 puts = &uts;
9206                 if (target_to_host_timespec(puts, arg3)) {
9207                     return -TARGET_EFAULT;
9208                 }
9209             } else {
9210                 puts = NULL;
9211             }
9212             ret = get_errno(safe_rt_sigtimedwait(&set, &uinfo, puts,
9213                                                  SIGSET_T_SIZE));
9214             if (!is_error(ret)) {
9215                 if (arg2) {
9216                     p = lock_user(VERIFY_WRITE, arg2, sizeof(target_siginfo_t),
9217                                   0);
9218                     if (!p) {
9219                         return -TARGET_EFAULT;
9220                     }
9221                     host_to_target_siginfo(p, &uinfo);
9222                     unlock_user(p, arg2, sizeof(target_siginfo_t));
9223                 }
9224                 ret = host_to_target_signal(ret);
9225             }
9226         }
9227         return ret;
9228 #endif
9229 #ifdef TARGET_NR_rt_sigtimedwait_time64
9230     case TARGET_NR_rt_sigtimedwait_time64:
9231         {
9232             sigset_t set;
9233             struct timespec uts, *puts;
9234             siginfo_t uinfo;
9235 
9236             if (arg4 != sizeof(target_sigset_t)) {
9237                 return -TARGET_EINVAL;
9238             }
9239 
9240             p = lock_user(VERIFY_READ, arg1, sizeof(target_sigset_t), 1);
9241             if (!p) {
9242                 return -TARGET_EFAULT;
9243             }
9244             target_to_host_sigset(&set, p);
9245             unlock_user(p, arg1, 0);
9246             if (arg3) {
9247                 puts = &uts;
9248                 if (target_to_host_timespec64(puts, arg3)) {
9249                     return -TARGET_EFAULT;
9250                 }
9251             } else {
9252                 puts = NULL;
9253             }
9254             ret = get_errno(safe_rt_sigtimedwait(&set, &uinfo, puts,
9255                                                  SIGSET_T_SIZE));
9256             if (!is_error(ret)) {
9257                 if (arg2) {
9258                     p = lock_user(VERIFY_WRITE, arg2,
9259                                   sizeof(target_siginfo_t), 0);
9260                     if (!p) {
9261                         return -TARGET_EFAULT;
9262                     }
9263                     host_to_target_siginfo(p, &uinfo);
9264                     unlock_user(p, arg2, sizeof(target_siginfo_t));
9265                 }
9266                 ret = host_to_target_signal(ret);
9267             }
9268         }
9269         return ret;
9270 #endif
9271     case TARGET_NR_rt_sigqueueinfo:
9272         {
9273             siginfo_t uinfo;
9274 
9275             p = lock_user(VERIFY_READ, arg3, sizeof(target_siginfo_t), 1);
9276             if (!p) {
9277                 return -TARGET_EFAULT;
9278             }
9279             target_to_host_siginfo(&uinfo, p);
9280             unlock_user(p, arg3, 0);
9281             ret = get_errno(sys_rt_sigqueueinfo(arg1, arg2, &uinfo));
9282         }
9283         return ret;
9284     case TARGET_NR_rt_tgsigqueueinfo:
9285         {
9286             siginfo_t uinfo;
9287 
9288             p = lock_user(VERIFY_READ, arg4, sizeof(target_siginfo_t), 1);
9289             if (!p) {
9290                 return -TARGET_EFAULT;
9291             }
9292             target_to_host_siginfo(&uinfo, p);
9293             unlock_user(p, arg4, 0);
9294             ret = get_errno(sys_rt_tgsigqueueinfo(arg1, arg2, arg3, &uinfo));
9295         }
9296         return ret;
9297 #ifdef TARGET_NR_sigreturn
9298     case TARGET_NR_sigreturn:
9299         if (block_signals()) {
9300             return -TARGET_ERESTARTSYS;
9301         }
9302         return do_sigreturn(cpu_env);
9303 #endif
9304     case TARGET_NR_rt_sigreturn:
9305         if (block_signals()) {
9306             return -TARGET_ERESTARTSYS;
9307         }
9308         return do_rt_sigreturn(cpu_env);
9309     case TARGET_NR_sethostname:
9310         if (!(p = lock_user_string(arg1)))
9311             return -TARGET_EFAULT;
9312         ret = get_errno(sethostname(p, arg2));
9313         unlock_user(p, arg1, 0);
9314         return ret;
9315 #ifdef TARGET_NR_setrlimit
9316     case TARGET_NR_setrlimit:
9317         {
9318             int resource = target_to_host_resource(arg1);
9319             struct target_rlimit *target_rlim;
9320             struct rlimit rlim;
9321             if (!lock_user_struct(VERIFY_READ, target_rlim, arg2, 1))
9322                 return -TARGET_EFAULT;
9323             rlim.rlim_cur = target_to_host_rlim(target_rlim->rlim_cur);
9324             rlim.rlim_max = target_to_host_rlim(target_rlim->rlim_max);
9325             unlock_user_struct(target_rlim, arg2, 0);
9326             /*
9327              * If we just passed through resource limit settings for memory then
9328              * they would also apply to QEMU's own allocations, and QEMU will
9329              * crash or hang or die if its allocations fail. Ideally we would
9330              * track the guest allocations in QEMU and apply the limits ourselves.
9331              * For now, just tell the guest the call succeeded but don't actually
9332              * limit anything.
9333              */
9334             if (resource != RLIMIT_AS &&
9335                 resource != RLIMIT_DATA &&
9336                 resource != RLIMIT_STACK) {
9337                 return get_errno(setrlimit(resource, &rlim));
9338             } else {
9339                 return 0;
9340             }
9341         }
9342 #endif
9343 #ifdef TARGET_NR_getrlimit
9344     case TARGET_NR_getrlimit:
9345         {
9346             int resource = target_to_host_resource(arg1);
9347             struct target_rlimit *target_rlim;
9348             struct rlimit rlim;
9349 
9350             ret = get_errno(getrlimit(resource, &rlim));
9351             if (!is_error(ret)) {
9352                 if (!lock_user_struct(VERIFY_WRITE, target_rlim, arg2, 0))
9353                     return -TARGET_EFAULT;
9354                 target_rlim->rlim_cur = host_to_target_rlim(rlim.rlim_cur);
9355                 target_rlim->rlim_max = host_to_target_rlim(rlim.rlim_max);
9356                 unlock_user_struct(target_rlim, arg2, 1);
9357             }
9358         }
9359         return ret;
9360 #endif
9361     case TARGET_NR_getrusage:
9362         {
9363             struct rusage rusage;
9364             ret = get_errno(getrusage(arg1, &rusage));
9365             if (!is_error(ret)) {
9366                 ret = host_to_target_rusage(arg2, &rusage);
9367             }
9368         }
9369         return ret;
9370 #if defined(TARGET_NR_gettimeofday)
9371     case TARGET_NR_gettimeofday:
9372         {
9373             struct timeval tv;
9374             struct timezone tz;
9375 
9376             ret = get_errno(gettimeofday(&tv, &tz));
9377             if (!is_error(ret)) {
9378                 if (arg1 && copy_to_user_timeval(arg1, &tv)) {
9379                     return -TARGET_EFAULT;
9380                 }
9381                 if (arg2 && copy_to_user_timezone(arg2, &tz)) {
9382                     return -TARGET_EFAULT;
9383                 }
9384             }
9385         }
9386         return ret;
9387 #endif
9388 #if defined(TARGET_NR_settimeofday)
9389     case TARGET_NR_settimeofday:
9390         {
9391             struct timeval tv, *ptv = NULL;
9392             struct timezone tz, *ptz = NULL;
9393 
9394             if (arg1) {
9395                 if (copy_from_user_timeval(&tv, arg1)) {
9396                     return -TARGET_EFAULT;
9397                 }
9398                 ptv = &tv;
9399             }
9400 
9401             if (arg2) {
9402                 if (copy_from_user_timezone(&tz, arg2)) {
9403                     return -TARGET_EFAULT;
9404                 }
9405                 ptz = &tz;
9406             }
9407 
9408             return get_errno(settimeofday(ptv, ptz));
9409         }
9410 #endif
9411 #if defined(TARGET_NR_select)
9412     case TARGET_NR_select:
9413 #if defined(TARGET_WANT_NI_OLD_SELECT)
9414         /* some architectures used to have old_select here
9415          * but now ENOSYS it.
9416          */
9417         ret = -TARGET_ENOSYS;
9418 #elif defined(TARGET_WANT_OLD_SYS_SELECT)
9419         ret = do_old_select(arg1);
9420 #else
9421         ret = do_select(arg1, arg2, arg3, arg4, arg5);
9422 #endif
9423         return ret;
9424 #endif
9425 #ifdef TARGET_NR_pselect6
9426     case TARGET_NR_pselect6:
9427         return do_pselect6(arg1, arg2, arg3, arg4, arg5, arg6, false);
9428 #endif
9429 #ifdef TARGET_NR_pselect6_time64
9430     case TARGET_NR_pselect6_time64:
9431         return do_pselect6(arg1, arg2, arg3, arg4, arg5, arg6, true);
9432 #endif
9433 #ifdef TARGET_NR_symlink
9434     case TARGET_NR_symlink:
9435         {
9436             void *p2;
9437             p = lock_user_string(arg1);
9438             p2 = lock_user_string(arg2);
9439             if (!p || !p2)
9440                 ret = -TARGET_EFAULT;
9441             else
9442                 ret = get_errno(symlink(p, p2));
9443             unlock_user(p2, arg2, 0);
9444             unlock_user(p, arg1, 0);
9445         }
9446         return ret;
9447 #endif
9448 #if defined(TARGET_NR_symlinkat)
9449     case TARGET_NR_symlinkat:
9450         {
9451             void *p2;
9452             p  = lock_user_string(arg1);
9453             p2 = lock_user_string(arg3);
9454             if (!p || !p2)
9455                 ret = -TARGET_EFAULT;
9456             else
9457                 ret = get_errno(symlinkat(p, arg2, p2));
9458             unlock_user(p2, arg3, 0);
9459             unlock_user(p, arg1, 0);
9460         }
9461         return ret;
9462 #endif
9463 #ifdef TARGET_NR_readlink
9464     case TARGET_NR_readlink:
9465         {
9466             void *p2;
9467             p = lock_user_string(arg1);
9468             p2 = lock_user(VERIFY_WRITE, arg2, arg3, 0);
9469             if (!p || !p2) {
9470                 ret = -TARGET_EFAULT;
9471             } else if (!arg3) {
9472                 /* Short circuit this for the magic exe check. */
9473                 ret = -TARGET_EINVAL;
9474             } else if (is_proc_myself((const char *)p, "exe")) {
9475                 char real[PATH_MAX], *temp;
9476                 temp = realpath(exec_path, real);
9477                 /* Return value is # of bytes that we wrote to the buffer. */
9478                 if (temp == NULL) {
9479                     ret = get_errno(-1);
9480                 } else {
9481                     /* Don't worry about sign mismatch as earlier mapping
9482                      * logic would have thrown a bad address error. */
9483                     ret = MIN(strlen(real), arg3);
9484                     /* We cannot NUL terminate the string. */
9485                     memcpy(p2, real, ret);
9486                 }
9487             } else {
9488                 ret = get_errno(readlink(path(p), p2, arg3));
9489             }
9490             unlock_user(p2, arg2, ret);
9491             unlock_user(p, arg1, 0);
9492         }
9493         return ret;
9494 #endif
9495 #if defined(TARGET_NR_readlinkat)
9496     case TARGET_NR_readlinkat:
9497         {
9498             void *p2;
9499             p  = lock_user_string(arg2);
9500             p2 = lock_user(VERIFY_WRITE, arg3, arg4, 0);
9501             if (!p || !p2) {
9502                 ret = -TARGET_EFAULT;
9503             } else if (is_proc_myself((const char *)p, "exe")) {
9504                 char real[PATH_MAX], *temp;
9505                 temp = realpath(exec_path, real);
9506                 ret = temp == NULL ? get_errno(-1) : strlen(real) ;
9507                 snprintf((char *)p2, arg4, "%s", real);
9508             } else {
9509                 ret = get_errno(readlinkat(arg1, path(p), p2, arg4));
9510             }
9511             unlock_user(p2, arg3, ret);
9512             unlock_user(p, arg2, 0);
9513         }
9514         return ret;
9515 #endif
9516 #ifdef TARGET_NR_swapon
9517     case TARGET_NR_swapon:
9518         if (!(p = lock_user_string(arg1)))
9519             return -TARGET_EFAULT;
9520         ret = get_errno(swapon(p, arg2));
9521         unlock_user(p, arg1, 0);
9522         return ret;
9523 #endif
9524     case TARGET_NR_reboot:
9525         if (arg3 == LINUX_REBOOT_CMD_RESTART2) {
9526            /* arg4 must be ignored in all other cases */
9527            p = lock_user_string(arg4);
9528            if (!p) {
9529                return -TARGET_EFAULT;
9530            }
9531            ret = get_errno(reboot(arg1, arg2, arg3, p));
9532            unlock_user(p, arg4, 0);
9533         } else {
9534            ret = get_errno(reboot(arg1, arg2, arg3, NULL));
9535         }
9536         return ret;
9537 #ifdef TARGET_NR_mmap
9538     case TARGET_NR_mmap:
9539 #if (defined(TARGET_I386) && defined(TARGET_ABI32)) || \
9540     (defined(TARGET_ARM) && defined(TARGET_ABI32)) || \
9541     defined(TARGET_M68K) || defined(TARGET_CRIS) || defined(TARGET_MICROBLAZE) \
9542     || defined(TARGET_S390X)
9543         {
9544             abi_ulong *v;
9545             abi_ulong v1, v2, v3, v4, v5, v6;
9546             if (!(v = lock_user(VERIFY_READ, arg1, 6 * sizeof(abi_ulong), 1)))
9547                 return -TARGET_EFAULT;
9548             v1 = tswapal(v[0]);
9549             v2 = tswapal(v[1]);
9550             v3 = tswapal(v[2]);
9551             v4 = tswapal(v[3]);
9552             v5 = tswapal(v[4]);
9553             v6 = tswapal(v[5]);
9554             unlock_user(v, arg1, 0);
9555             ret = get_errno(target_mmap(v1, v2, v3,
9556                                         target_to_host_bitmask(v4, mmap_flags_tbl),
9557                                         v5, v6));
9558         }
9559 #else
9560         /* mmap pointers are always untagged */
9561         ret = get_errno(target_mmap(arg1, arg2, arg3,
9562                                     target_to_host_bitmask(arg4, mmap_flags_tbl),
9563                                     arg5,
9564                                     arg6));
9565 #endif
9566         return ret;
9567 #endif
9568 #ifdef TARGET_NR_mmap2
9569     case TARGET_NR_mmap2:
9570 #ifndef MMAP_SHIFT
9571 #define MMAP_SHIFT 12
9572 #endif
9573         ret = target_mmap(arg1, arg2, arg3,
9574                           target_to_host_bitmask(arg4, mmap_flags_tbl),
9575                           arg5, arg6 << MMAP_SHIFT);
9576         return get_errno(ret);
9577 #endif
9578     case TARGET_NR_munmap:
9579         arg1 = cpu_untagged_addr(cpu, arg1);
9580         return get_errno(target_munmap(arg1, arg2));
9581     case TARGET_NR_mprotect:
9582         arg1 = cpu_untagged_addr(cpu, arg1);
9583         {
9584             TaskState *ts = cpu->opaque;
9585             /* Special hack to detect libc making the stack executable.  */
9586             if ((arg3 & PROT_GROWSDOWN)
9587                 && arg1 >= ts->info->stack_limit
9588                 && arg1 <= ts->info->start_stack) {
9589                 arg3 &= ~PROT_GROWSDOWN;
9590                 arg2 = arg2 + arg1 - ts->info->stack_limit;
9591                 arg1 = ts->info->stack_limit;
9592             }
9593         }
9594         return get_errno(target_mprotect(arg1, arg2, arg3));
9595 #ifdef TARGET_NR_mremap
9596     case TARGET_NR_mremap:
9597         arg1 = cpu_untagged_addr(cpu, arg1);
9598         /* mremap new_addr (arg5) is always untagged */
9599         return get_errno(target_mremap(arg1, arg2, arg3, arg4, arg5));
9600 #endif
9601         /* ??? msync/mlock/munlock are broken for softmmu.  */
9602 #ifdef TARGET_NR_msync
9603     case TARGET_NR_msync:
9604         return get_errno(msync(g2h(cpu, arg1), arg2, arg3));
9605 #endif
9606 #ifdef TARGET_NR_mlock
9607     case TARGET_NR_mlock:
9608         return get_errno(mlock(g2h(cpu, arg1), arg2));
9609 #endif
9610 #ifdef TARGET_NR_munlock
9611     case TARGET_NR_munlock:
9612         return get_errno(munlock(g2h(cpu, arg1), arg2));
9613 #endif
9614 #ifdef TARGET_NR_mlockall
9615     case TARGET_NR_mlockall:
9616         return get_errno(mlockall(target_to_host_mlockall_arg(arg1)));
9617 #endif
9618 #ifdef TARGET_NR_munlockall
9619     case TARGET_NR_munlockall:
9620         return get_errno(munlockall());
9621 #endif
9622 #ifdef TARGET_NR_truncate
9623     case TARGET_NR_truncate:
9624         if (!(p = lock_user_string(arg1)))
9625             return -TARGET_EFAULT;
9626         ret = get_errno(truncate(p, arg2));
9627         unlock_user(p, arg1, 0);
9628         return ret;
9629 #endif
9630 #ifdef TARGET_NR_ftruncate
9631     case TARGET_NR_ftruncate:
9632         return get_errno(ftruncate(arg1, arg2));
9633 #endif
9634     case TARGET_NR_fchmod:
9635         return get_errno(fchmod(arg1, arg2));
9636 #if defined(TARGET_NR_fchmodat)
9637     case TARGET_NR_fchmodat:
9638         if (!(p = lock_user_string(arg2)))
9639             return -TARGET_EFAULT;
9640         ret = get_errno(fchmodat(arg1, p, arg3, 0));
9641         unlock_user(p, arg2, 0);
9642         return ret;
9643 #endif
9644     case TARGET_NR_getpriority:
9645         /* Note that negative values are valid for getpriority, so we must
9646            differentiate based on errno settings.  */
9647         errno = 0;
9648         ret = getpriority(arg1, arg2);
9649         if (ret == -1 && errno != 0) {
9650             return -host_to_target_errno(errno);
9651         }
9652 #ifdef TARGET_ALPHA
9653         /* Return value is the unbiased priority.  Signal no error.  */
9654         ((CPUAlphaState *)cpu_env)->ir[IR_V0] = 0;
9655 #else
9656         /* Return value is a biased priority to avoid negative numbers.  */
9657         ret = 20 - ret;
9658 #endif
9659         return ret;
9660     case TARGET_NR_setpriority:
9661         return get_errno(setpriority(arg1, arg2, arg3));
9662 #ifdef TARGET_NR_statfs
9663     case TARGET_NR_statfs:
9664         if (!(p = lock_user_string(arg1))) {
9665             return -TARGET_EFAULT;
9666         }
9667         ret = get_errno(statfs(path(p), &stfs));
9668         unlock_user(p, arg1, 0);
9669     convert_statfs:
9670         if (!is_error(ret)) {
9671             struct target_statfs *target_stfs;
9672 
9673             if (!lock_user_struct(VERIFY_WRITE, target_stfs, arg2, 0))
9674                 return -TARGET_EFAULT;
9675             __put_user(stfs.f_type, &target_stfs->f_type);
9676             __put_user(stfs.f_bsize, &target_stfs->f_bsize);
9677             __put_user(stfs.f_blocks, &target_stfs->f_blocks);
9678             __put_user(stfs.f_bfree, &target_stfs->f_bfree);
9679             __put_user(stfs.f_bavail, &target_stfs->f_bavail);
9680             __put_user(stfs.f_files, &target_stfs->f_files);
9681             __put_user(stfs.f_ffree, &target_stfs->f_ffree);
9682             __put_user(stfs.f_fsid.__val[0], &target_stfs->f_fsid.val[0]);
9683             __put_user(stfs.f_fsid.__val[1], &target_stfs->f_fsid.val[1]);
9684             __put_user(stfs.f_namelen, &target_stfs->f_namelen);
9685             __put_user(stfs.f_frsize, &target_stfs->f_frsize);
9686 #ifdef _STATFS_F_FLAGS
9687             __put_user(stfs.f_flags, &target_stfs->f_flags);
9688 #else
9689             __put_user(0, &target_stfs->f_flags);
9690 #endif
9691             memset(target_stfs->f_spare, 0, sizeof(target_stfs->f_spare));
9692             unlock_user_struct(target_stfs, arg2, 1);
9693         }
9694         return ret;
9695 #endif
9696 #ifdef TARGET_NR_fstatfs
9697     case TARGET_NR_fstatfs:
9698         ret = get_errno(fstatfs(arg1, &stfs));
9699         goto convert_statfs;
9700 #endif
9701 #ifdef TARGET_NR_statfs64
9702     case TARGET_NR_statfs64:
9703         if (!(p = lock_user_string(arg1))) {
9704             return -TARGET_EFAULT;
9705         }
9706         ret = get_errno(statfs(path(p), &stfs));
9707         unlock_user(p, arg1, 0);
9708     convert_statfs64:
9709         if (!is_error(ret)) {
9710             struct target_statfs64 *target_stfs;
9711 
9712             if (!lock_user_struct(VERIFY_WRITE, target_stfs, arg3, 0))
9713                 return -TARGET_EFAULT;
9714             __put_user(stfs.f_type, &target_stfs->f_type);
9715             __put_user(stfs.f_bsize, &target_stfs->f_bsize);
9716             __put_user(stfs.f_blocks, &target_stfs->f_blocks);
9717             __put_user(stfs.f_bfree, &target_stfs->f_bfree);
9718             __put_user(stfs.f_bavail, &target_stfs->f_bavail);
9719             __put_user(stfs.f_files, &target_stfs->f_files);
9720             __put_user(stfs.f_ffree, &target_stfs->f_ffree);
9721             __put_user(stfs.f_fsid.__val[0], &target_stfs->f_fsid.val[0]);
9722             __put_user(stfs.f_fsid.__val[1], &target_stfs->f_fsid.val[1]);
9723             __put_user(stfs.f_namelen, &target_stfs->f_namelen);
9724             __put_user(stfs.f_frsize, &target_stfs->f_frsize);
9725 #ifdef _STATFS_F_FLAGS
9726             __put_user(stfs.f_flags, &target_stfs->f_flags);
9727 #else
9728             __put_user(0, &target_stfs->f_flags);
9729 #endif
9730             memset(target_stfs->f_spare, 0, sizeof(target_stfs->f_spare));
9731             unlock_user_struct(target_stfs, arg3, 1);
9732         }
9733         return ret;
9734     case TARGET_NR_fstatfs64:
9735         ret = get_errno(fstatfs(arg1, &stfs));
9736         goto convert_statfs64;
9737 #endif
9738 #ifdef TARGET_NR_socketcall
9739     case TARGET_NR_socketcall:
9740         return do_socketcall(arg1, arg2);
9741 #endif
9742 #ifdef TARGET_NR_accept
9743     case TARGET_NR_accept:
9744         return do_accept4(arg1, arg2, arg3, 0);
9745 #endif
9746 #ifdef TARGET_NR_accept4
9747     case TARGET_NR_accept4:
9748         return do_accept4(arg1, arg2, arg3, arg4);
9749 #endif
9750 #ifdef TARGET_NR_bind
9751     case TARGET_NR_bind:
9752         return do_bind(arg1, arg2, arg3);
9753 #endif
9754 #ifdef TARGET_NR_connect
9755     case TARGET_NR_connect:
9756         return do_connect(arg1, arg2, arg3);
9757 #endif
9758 #ifdef TARGET_NR_getpeername
9759     case TARGET_NR_getpeername:
9760         return do_getpeername(arg1, arg2, arg3);
9761 #endif
9762 #ifdef TARGET_NR_getsockname
9763     case TARGET_NR_getsockname:
9764         return do_getsockname(arg1, arg2, arg3);
9765 #endif
9766 #ifdef TARGET_NR_getsockopt
9767     case TARGET_NR_getsockopt:
9768         return do_getsockopt(arg1, arg2, arg3, arg4, arg5);
9769 #endif
9770 #ifdef TARGET_NR_listen
9771     case TARGET_NR_listen:
9772         return get_errno(listen(arg1, arg2));
9773 #endif
9774 #ifdef TARGET_NR_recv
9775     case TARGET_NR_recv:
9776         return do_recvfrom(arg1, arg2, arg3, arg4, 0, 0);
9777 #endif
9778 #ifdef TARGET_NR_recvfrom
9779     case TARGET_NR_recvfrom:
9780         return do_recvfrom(arg1, arg2, arg3, arg4, arg5, arg6);
9781 #endif
9782 #ifdef TARGET_NR_recvmsg
9783     case TARGET_NR_recvmsg:
9784         return do_sendrecvmsg(arg1, arg2, arg3, 0);
9785 #endif
9786 #ifdef TARGET_NR_send
9787     case TARGET_NR_send:
9788         return do_sendto(arg1, arg2, arg3, arg4, 0, 0);
9789 #endif
9790 #ifdef TARGET_NR_sendmsg
9791     case TARGET_NR_sendmsg:
9792         return do_sendrecvmsg(arg1, arg2, arg3, 1);
9793 #endif
9794 #ifdef TARGET_NR_sendmmsg
9795     case TARGET_NR_sendmmsg:
9796         return do_sendrecvmmsg(arg1, arg2, arg3, arg4, 1);
9797 #endif
9798 #ifdef TARGET_NR_recvmmsg
9799     case TARGET_NR_recvmmsg:
9800         return do_sendrecvmmsg(arg1, arg2, arg3, arg4, 0);
9801 #endif
9802 #ifdef TARGET_NR_sendto
9803     case TARGET_NR_sendto:
9804         return do_sendto(arg1, arg2, arg3, arg4, arg5, arg6);
9805 #endif
9806 #ifdef TARGET_NR_shutdown
9807     case TARGET_NR_shutdown:
9808         return get_errno(shutdown(arg1, arg2));
9809 #endif
9810 #if defined(TARGET_NR_getrandom) && defined(__NR_getrandom)
9811     case TARGET_NR_getrandom:
9812         p = lock_user(VERIFY_WRITE, arg1, arg2, 0);
9813         if (!p) {
9814             return -TARGET_EFAULT;
9815         }
9816         ret = get_errno(getrandom(p, arg2, arg3));
9817         unlock_user(p, arg1, ret);
9818         return ret;
9819 #endif
9820 #ifdef TARGET_NR_socket
9821     case TARGET_NR_socket:
9822         return do_socket(arg1, arg2, arg3);
9823 #endif
9824 #ifdef TARGET_NR_socketpair
9825     case TARGET_NR_socketpair:
9826         return do_socketpair(arg1, arg2, arg3, arg4);
9827 #endif
9828 #ifdef TARGET_NR_setsockopt
9829     case TARGET_NR_setsockopt:
9830         return do_setsockopt(arg1, arg2, arg3, arg4, (socklen_t) arg5);
9831 #endif
9832 #if defined(TARGET_NR_syslog)
9833     case TARGET_NR_syslog:
9834         {
9835             int len = arg2;
9836 
9837             switch (arg1) {
9838             case TARGET_SYSLOG_ACTION_CLOSE:         /* Close log */
9839             case TARGET_SYSLOG_ACTION_OPEN:          /* Open log */
9840             case TARGET_SYSLOG_ACTION_CLEAR:         /* Clear ring buffer */
9841             case TARGET_SYSLOG_ACTION_CONSOLE_OFF:   /* Disable logging */
9842             case TARGET_SYSLOG_ACTION_CONSOLE_ON:    /* Enable logging */
9843             case TARGET_SYSLOG_ACTION_CONSOLE_LEVEL: /* Set messages level */
9844             case TARGET_SYSLOG_ACTION_SIZE_UNREAD:   /* Number of chars */
9845             case TARGET_SYSLOG_ACTION_SIZE_BUFFER:   /* Size of the buffer */
9846                 return get_errno(sys_syslog((int)arg1, NULL, (int)arg3));
9847             case TARGET_SYSLOG_ACTION_READ:          /* Read from log */
9848             case TARGET_SYSLOG_ACTION_READ_CLEAR:    /* Read/clear msgs */
9849             case TARGET_SYSLOG_ACTION_READ_ALL:      /* Read last messages */
9850                 {
9851                     if (len < 0) {
9852                         return -TARGET_EINVAL;
9853                     }
9854                     if (len == 0) {
9855                         return 0;
9856                     }
9857                     p = lock_user(VERIFY_WRITE, arg2, arg3, 0);
9858                     if (!p) {
9859                         return -TARGET_EFAULT;
9860                     }
9861                     ret = get_errno(sys_syslog((int)arg1, p, (int)arg3));
9862                     unlock_user(p, arg2, arg3);
9863                 }
9864                 return ret;
9865             default:
9866                 return -TARGET_EINVAL;
9867             }
9868         }
9869         break;
9870 #endif
9871     case TARGET_NR_setitimer:
9872         {
9873             struct itimerval value, ovalue, *pvalue;
9874 
9875             if (arg2) {
9876                 pvalue = &value;
9877                 if (copy_from_user_timeval(&pvalue->it_interval, arg2)
9878                     || copy_from_user_timeval(&pvalue->it_value,
9879                                               arg2 + sizeof(struct target_timeval)))
9880                     return -TARGET_EFAULT;
9881             } else {
9882                 pvalue = NULL;
9883             }
9884             ret = get_errno(setitimer(arg1, pvalue, &ovalue));
9885             if (!is_error(ret) && arg3) {
9886                 if (copy_to_user_timeval(arg3,
9887                                          &ovalue.it_interval)
9888                     || copy_to_user_timeval(arg3 + sizeof(struct target_timeval),
9889                                             &ovalue.it_value))
9890                     return -TARGET_EFAULT;
9891             }
9892         }
9893         return ret;
9894     case TARGET_NR_getitimer:
9895         {
9896             struct itimerval value;
9897 
9898             ret = get_errno(getitimer(arg1, &value));
9899             if (!is_error(ret) && arg2) {
9900                 if (copy_to_user_timeval(arg2,
9901                                          &value.it_interval)
9902                     || copy_to_user_timeval(arg2 + sizeof(struct target_timeval),
9903                                             &value.it_value))
9904                     return -TARGET_EFAULT;
9905             }
9906         }
9907         return ret;
9908 #ifdef TARGET_NR_stat
9909     case TARGET_NR_stat:
9910         if (!(p = lock_user_string(arg1))) {
9911             return -TARGET_EFAULT;
9912         }
9913         ret = get_errno(stat(path(p), &st));
9914         unlock_user(p, arg1, 0);
9915         goto do_stat;
9916 #endif
9917 #ifdef TARGET_NR_lstat
9918     case TARGET_NR_lstat:
9919         if (!(p = lock_user_string(arg1))) {
9920             return -TARGET_EFAULT;
9921         }
9922         ret = get_errno(lstat(path(p), &st));
9923         unlock_user(p, arg1, 0);
9924         goto do_stat;
9925 #endif
9926 #ifdef TARGET_NR_fstat
9927     case TARGET_NR_fstat:
9928         {
9929             ret = get_errno(fstat(arg1, &st));
9930 #if defined(TARGET_NR_stat) || defined(TARGET_NR_lstat)
9931         do_stat:
9932 #endif
9933             if (!is_error(ret)) {
9934                 struct target_stat *target_st;
9935 
9936                 if (!lock_user_struct(VERIFY_WRITE, target_st, arg2, 0))
9937                     return -TARGET_EFAULT;
9938                 memset(target_st, 0, sizeof(*target_st));
9939                 __put_user(st.st_dev, &target_st->st_dev);
9940                 __put_user(st.st_ino, &target_st->st_ino);
9941                 __put_user(st.st_mode, &target_st->st_mode);
9942                 __put_user(st.st_uid, &target_st->st_uid);
9943                 __put_user(st.st_gid, &target_st->st_gid);
9944                 __put_user(st.st_nlink, &target_st->st_nlink);
9945                 __put_user(st.st_rdev, &target_st->st_rdev);
9946                 __put_user(st.st_size, &target_st->st_size);
9947                 __put_user(st.st_blksize, &target_st->st_blksize);
9948                 __put_user(st.st_blocks, &target_st->st_blocks);
9949                 __put_user(st.st_atime, &target_st->target_st_atime);
9950                 __put_user(st.st_mtime, &target_st->target_st_mtime);
9951                 __put_user(st.st_ctime, &target_st->target_st_ctime);
9952 #if defined(HAVE_STRUCT_STAT_ST_ATIM) && defined(TARGET_STAT_HAVE_NSEC)
9953                 __put_user(st.st_atim.tv_nsec,
9954                            &target_st->target_st_atime_nsec);
9955                 __put_user(st.st_mtim.tv_nsec,
9956                            &target_st->target_st_mtime_nsec);
9957                 __put_user(st.st_ctim.tv_nsec,
9958                            &target_st->target_st_ctime_nsec);
9959 #endif
9960                 unlock_user_struct(target_st, arg2, 1);
9961             }
9962         }
9963         return ret;
9964 #endif
9965     case TARGET_NR_vhangup:
9966         return get_errno(vhangup());
9967 #ifdef TARGET_NR_syscall
9968     case TARGET_NR_syscall:
9969         return do_syscall(cpu_env, arg1 & 0xffff, arg2, arg3, arg4, arg5,
9970                           arg6, arg7, arg8, 0);
9971 #endif
9972 #if defined(TARGET_NR_wait4)
9973     case TARGET_NR_wait4:
9974         {
9975             int status;
9976             abi_long status_ptr = arg2;
9977             struct rusage rusage, *rusage_ptr;
9978             abi_ulong target_rusage = arg4;
9979             abi_long rusage_err;
9980             if (target_rusage)
9981                 rusage_ptr = &rusage;
9982             else
9983                 rusage_ptr = NULL;
9984             ret = get_errno(safe_wait4(arg1, &status, arg3, rusage_ptr));
9985             if (!is_error(ret)) {
9986                 if (status_ptr && ret) {
9987                     status = host_to_target_waitstatus(status);
9988                     if (put_user_s32(status, status_ptr))
9989                         return -TARGET_EFAULT;
9990                 }
9991                 if (target_rusage) {
9992                     rusage_err = host_to_target_rusage(target_rusage, &rusage);
9993                     if (rusage_err) {
9994                         ret = rusage_err;
9995                     }
9996                 }
9997             }
9998         }
9999         return ret;
10000 #endif
10001 #ifdef TARGET_NR_swapoff
10002     case TARGET_NR_swapoff:
10003         if (!(p = lock_user_string(arg1)))
10004             return -TARGET_EFAULT;
10005         ret = get_errno(swapoff(p));
10006         unlock_user(p, arg1, 0);
10007         return ret;
10008 #endif
10009     case TARGET_NR_sysinfo:
10010         {
10011             struct target_sysinfo *target_value;
10012             struct sysinfo value;
10013             ret = get_errno(sysinfo(&value));
10014             if (!is_error(ret) && arg1)
10015             {
10016                 if (!lock_user_struct(VERIFY_WRITE, target_value, arg1, 0))
10017                     return -TARGET_EFAULT;
10018                 __put_user(value.uptime, &target_value->uptime);
10019                 __put_user(value.loads[0], &target_value->loads[0]);
10020                 __put_user(value.loads[1], &target_value->loads[1]);
10021                 __put_user(value.loads[2], &target_value->loads[2]);
10022                 __put_user(value.totalram, &target_value->totalram);
10023                 __put_user(value.freeram, &target_value->freeram);
10024                 __put_user(value.sharedram, &target_value->sharedram);
10025                 __put_user(value.bufferram, &target_value->bufferram);
10026                 __put_user(value.totalswap, &target_value->totalswap);
10027                 __put_user(value.freeswap, &target_value->freeswap);
10028                 __put_user(value.procs, &target_value->procs);
10029                 __put_user(value.totalhigh, &target_value->totalhigh);
10030                 __put_user(value.freehigh, &target_value->freehigh);
10031                 __put_user(value.mem_unit, &target_value->mem_unit);
10032                 unlock_user_struct(target_value, arg1, 1);
10033             }
10034         }
10035         return ret;
10036 #ifdef TARGET_NR_ipc
10037     case TARGET_NR_ipc:
10038         return do_ipc(cpu_env, arg1, arg2, arg3, arg4, arg5, arg6);
10039 #endif
10040 #ifdef TARGET_NR_semget
10041     case TARGET_NR_semget:
10042         return get_errno(semget(arg1, arg2, arg3));
10043 #endif
10044 #ifdef TARGET_NR_semop
10045     case TARGET_NR_semop:
10046         return do_semtimedop(arg1, arg2, arg3, 0, false);
10047 #endif
10048 #ifdef TARGET_NR_semtimedop
10049     case TARGET_NR_semtimedop:
10050         return do_semtimedop(arg1, arg2, arg3, arg4, false);
10051 #endif
10052 #ifdef TARGET_NR_semtimedop_time64
10053     case TARGET_NR_semtimedop_time64:
10054         return do_semtimedop(arg1, arg2, arg3, arg4, true);
10055 #endif
10056 #ifdef TARGET_NR_semctl
10057     case TARGET_NR_semctl:
10058         return do_semctl(arg1, arg2, arg3, arg4);
10059 #endif
10060 #ifdef TARGET_NR_msgctl
10061     case TARGET_NR_msgctl:
10062         return do_msgctl(arg1, arg2, arg3);
10063 #endif
10064 #ifdef TARGET_NR_msgget
10065     case TARGET_NR_msgget:
10066         return get_errno(msgget(arg1, arg2));
10067 #endif
10068 #ifdef TARGET_NR_msgrcv
10069     case TARGET_NR_msgrcv:
10070         return do_msgrcv(arg1, arg2, arg3, arg4, arg5);
10071 #endif
10072 #ifdef TARGET_NR_msgsnd
10073     case TARGET_NR_msgsnd:
10074         return do_msgsnd(arg1, arg2, arg3, arg4);
10075 #endif
10076 #ifdef TARGET_NR_shmget
10077     case TARGET_NR_shmget:
10078         return get_errno(shmget(arg1, arg2, arg3));
10079 #endif
10080 #ifdef TARGET_NR_shmctl
10081     case TARGET_NR_shmctl:
10082         return do_shmctl(arg1, arg2, arg3);
10083 #endif
10084 #ifdef TARGET_NR_shmat
10085     case TARGET_NR_shmat:
10086         return do_shmat(cpu_env, arg1, arg2, arg3);
10087 #endif
10088 #ifdef TARGET_NR_shmdt
10089     case TARGET_NR_shmdt:
10090         return do_shmdt(arg1);
10091 #endif
10092     case TARGET_NR_fsync:
10093         return get_errno(fsync(arg1));
10094     case TARGET_NR_clone:
10095         /* Linux manages to have three different orderings for its
10096          * arguments to clone(); the BACKWARDS and BACKWARDS2 defines
10097          * match the kernel's CONFIG_CLONE_* settings.
10098          * Microblaze is further special in that it uses a sixth
10099          * implicit argument to clone for the TLS pointer.
10100          */
10101 #if defined(TARGET_MICROBLAZE)
10102         ret = get_errno(do_fork(cpu_env, arg1, arg2, arg4, arg6, arg5));
10103 #elif defined(TARGET_CLONE_BACKWARDS)
10104         ret = get_errno(do_fork(cpu_env, arg1, arg2, arg3, arg4, arg5));
10105 #elif defined(TARGET_CLONE_BACKWARDS2)
10106         ret = get_errno(do_fork(cpu_env, arg2, arg1, arg3, arg5, arg4));
10107 #else
10108         ret = get_errno(do_fork(cpu_env, arg1, arg2, arg3, arg5, arg4));
10109 #endif
10110         return ret;
10111 #ifdef __NR_exit_group
10112         /* new thread calls */
10113     case TARGET_NR_exit_group:
10114         preexit_cleanup(cpu_env, arg1);
10115         return get_errno(exit_group(arg1));
10116 #endif
10117     case TARGET_NR_setdomainname:
10118         if (!(p = lock_user_string(arg1)))
10119             return -TARGET_EFAULT;
10120         ret = get_errno(setdomainname(p, arg2));
10121         unlock_user(p, arg1, 0);
10122         return ret;
10123     case TARGET_NR_uname:
10124         /* no need to transcode because we use the linux syscall */
10125         {
10126             struct new_utsname * buf;
10127 
10128             if (!lock_user_struct(VERIFY_WRITE, buf, arg1, 0))
10129                 return -TARGET_EFAULT;
10130             ret = get_errno(sys_uname(buf));
10131             if (!is_error(ret)) {
10132                 /* Overwrite the native machine name with whatever is being
10133                    emulated. */
10134                 g_strlcpy(buf->machine, cpu_to_uname_machine(cpu_env),
10135                           sizeof(buf->machine));
10136                 /* Allow the user to override the reported release.  */
10137                 if (qemu_uname_release && *qemu_uname_release) {
10138                     g_strlcpy(buf->release, qemu_uname_release,
10139                               sizeof(buf->release));
10140                 }
10141             }
10142             unlock_user_struct(buf, arg1, 1);
10143         }
10144         return ret;
10145 #ifdef TARGET_I386
10146     case TARGET_NR_modify_ldt:
10147         return do_modify_ldt(cpu_env, arg1, arg2, arg3);
10148 #if !defined(TARGET_X86_64)
10149     case TARGET_NR_vm86:
10150         return do_vm86(cpu_env, arg1, arg2);
10151 #endif
10152 #endif
10153 #if defined(TARGET_NR_adjtimex)
10154     case TARGET_NR_adjtimex:
10155         {
10156             struct timex host_buf;
10157 
10158             if (target_to_host_timex(&host_buf, arg1) != 0) {
10159                 return -TARGET_EFAULT;
10160             }
10161             ret = get_errno(adjtimex(&host_buf));
10162             if (!is_error(ret)) {
10163                 if (host_to_target_timex(arg1, &host_buf) != 0) {
10164                     return -TARGET_EFAULT;
10165                 }
10166             }
10167         }
10168         return ret;
10169 #endif
10170 #if defined(TARGET_NR_clock_adjtime) && defined(CONFIG_CLOCK_ADJTIME)
10171     case TARGET_NR_clock_adjtime:
10172         {
10173             struct timex htx, *phtx = &htx;
10174 
10175             if (target_to_host_timex(phtx, arg2) != 0) {
10176                 return -TARGET_EFAULT;
10177             }
10178             ret = get_errno(clock_adjtime(arg1, phtx));
10179             if (!is_error(ret) && phtx) {
10180                 if (host_to_target_timex(arg2, phtx) != 0) {
10181                     return -TARGET_EFAULT;
10182                 }
10183             }
10184         }
10185         return ret;
10186 #endif
10187 #if defined(TARGET_NR_clock_adjtime64) && defined(CONFIG_CLOCK_ADJTIME)
10188     case TARGET_NR_clock_adjtime64:
10189         {
10190             struct timex htx;
10191 
10192             if (target_to_host_timex64(&htx, arg2) != 0) {
10193                 return -TARGET_EFAULT;
10194             }
10195             ret = get_errno(clock_adjtime(arg1, &htx));
10196             if (!is_error(ret) && host_to_target_timex64(arg2, &htx)) {
10197                     return -TARGET_EFAULT;
10198             }
10199         }
10200         return ret;
10201 #endif
10202     case TARGET_NR_getpgid:
10203         return get_errno(getpgid(arg1));
10204     case TARGET_NR_fchdir:
10205         return get_errno(fchdir(arg1));
10206     case TARGET_NR_personality:
10207         return get_errno(personality(arg1));
10208 #ifdef TARGET_NR__llseek /* Not on alpha */
10209     case TARGET_NR__llseek:
10210         {
10211             int64_t res;
10212 #if !defined(__NR_llseek)
10213             res = lseek(arg1, ((uint64_t)arg2 << 32) | (abi_ulong)arg3, arg5);
10214             if (res == -1) {
10215                 ret = get_errno(res);
10216             } else {
10217                 ret = 0;
10218             }
10219 #else
10220             ret = get_errno(_llseek(arg1, arg2, arg3, &res, arg5));
10221 #endif
10222             if ((ret == 0) && put_user_s64(res, arg4)) {
10223                 return -TARGET_EFAULT;
10224             }
10225         }
10226         return ret;
10227 #endif
10228 #ifdef TARGET_NR_getdents
10229     case TARGET_NR_getdents:
10230 #ifdef EMULATE_GETDENTS_WITH_GETDENTS
10231 #if TARGET_ABI_BITS == 32 && HOST_LONG_BITS == 64
10232         {
10233             struct target_dirent *target_dirp;
10234             struct linux_dirent *dirp;
10235             abi_long count = arg3;
10236 
10237             dirp = g_try_malloc(count);
10238             if (!dirp) {
10239                 return -TARGET_ENOMEM;
10240             }
10241 
10242             ret = get_errno(sys_getdents(arg1, dirp, count));
10243             if (!is_error(ret)) {
10244                 struct linux_dirent *de;
10245 		struct target_dirent *tde;
10246                 int len = ret;
10247                 int reclen, treclen;
10248 		int count1, tnamelen;
10249 
10250 		count1 = 0;
10251                 de = dirp;
10252                 if (!(target_dirp = lock_user(VERIFY_WRITE, arg2, count, 0)))
10253                     return -TARGET_EFAULT;
10254 		tde = target_dirp;
10255                 while (len > 0) {
10256                     reclen = de->d_reclen;
10257                     tnamelen = reclen - offsetof(struct linux_dirent, d_name);
10258                     assert(tnamelen >= 0);
10259                     treclen = tnamelen + offsetof(struct target_dirent, d_name);
10260                     assert(count1 + treclen <= count);
10261                     tde->d_reclen = tswap16(treclen);
10262                     tde->d_ino = tswapal(de->d_ino);
10263                     tde->d_off = tswapal(de->d_off);
10264                     memcpy(tde->d_name, de->d_name, tnamelen);
10265                     de = (struct linux_dirent *)((char *)de + reclen);
10266                     len -= reclen;
10267                     tde = (struct target_dirent *)((char *)tde + treclen);
10268 		    count1 += treclen;
10269                 }
10270 		ret = count1;
10271                 unlock_user(target_dirp, arg2, ret);
10272             }
10273             g_free(dirp);
10274         }
10275 #else
10276         {
10277             struct linux_dirent *dirp;
10278             abi_long count = arg3;
10279 
10280             if (!(dirp = lock_user(VERIFY_WRITE, arg2, count, 0)))
10281                 return -TARGET_EFAULT;
10282             ret = get_errno(sys_getdents(arg1, dirp, count));
10283             if (!is_error(ret)) {
10284                 struct linux_dirent *de;
10285                 int len = ret;
10286                 int reclen;
10287                 de = dirp;
10288                 while (len > 0) {
10289                     reclen = de->d_reclen;
10290                     if (reclen > len)
10291                         break;
10292                     de->d_reclen = tswap16(reclen);
10293                     tswapls(&de->d_ino);
10294                     tswapls(&de->d_off);
10295                     de = (struct linux_dirent *)((char *)de + reclen);
10296                     len -= reclen;
10297                 }
10298             }
10299             unlock_user(dirp, arg2, ret);
10300         }
10301 #endif
10302 #else
10303         /* Implement getdents in terms of getdents64 */
10304         {
10305             struct linux_dirent64 *dirp;
10306             abi_long count = arg3;
10307 
10308             dirp = lock_user(VERIFY_WRITE, arg2, count, 0);
10309             if (!dirp) {
10310                 return -TARGET_EFAULT;
10311             }
10312             ret = get_errno(sys_getdents64(arg1, dirp, count));
10313             if (!is_error(ret)) {
10314                 /* Convert the dirent64 structs to target dirent.  We do this
10315                  * in-place, since we can guarantee that a target_dirent is no
10316                  * larger than a dirent64; however this means we have to be
10317                  * careful to read everything before writing in the new format.
10318                  */
10319                 struct linux_dirent64 *de;
10320                 struct target_dirent *tde;
10321                 int len = ret;
10322                 int tlen = 0;
10323 
10324                 de = dirp;
10325                 tde = (struct target_dirent *)dirp;
10326                 while (len > 0) {
10327                     int namelen, treclen;
10328                     int reclen = de->d_reclen;
10329                     uint64_t ino = de->d_ino;
10330                     int64_t off = de->d_off;
10331                     uint8_t type = de->d_type;
10332 
10333                     namelen = strlen(de->d_name);
10334                     treclen = offsetof(struct target_dirent, d_name)
10335                         + namelen + 2;
10336                     treclen = QEMU_ALIGN_UP(treclen, sizeof(abi_long));
10337 
10338                     memmove(tde->d_name, de->d_name, namelen + 1);
10339                     tde->d_ino = tswapal(ino);
10340                     tde->d_off = tswapal(off);
10341                     tde->d_reclen = tswap16(treclen);
10342                     /* The target_dirent type is in what was formerly a padding
10343                      * byte at the end of the structure:
10344                      */
10345                     *(((char *)tde) + treclen - 1) = type;
10346 
10347                     de = (struct linux_dirent64 *)((char *)de + reclen);
10348                     tde = (struct target_dirent *)((char *)tde + treclen);
10349                     len -= reclen;
10350                     tlen += treclen;
10351                 }
10352                 ret = tlen;
10353             }
10354             unlock_user(dirp, arg2, ret);
10355         }
10356 #endif
10357         return ret;
10358 #endif /* TARGET_NR_getdents */
10359 #if defined(TARGET_NR_getdents64) && defined(__NR_getdents64)
10360     case TARGET_NR_getdents64:
10361         {
10362             struct linux_dirent64 *dirp;
10363             abi_long count = arg3;
10364             if (!(dirp = lock_user(VERIFY_WRITE, arg2, count, 0)))
10365                 return -TARGET_EFAULT;
10366             ret = get_errno(sys_getdents64(arg1, dirp, count));
10367             if (!is_error(ret)) {
10368                 struct linux_dirent64 *de;
10369                 int len = ret;
10370                 int reclen;
10371                 de = dirp;
10372                 while (len > 0) {
10373                     reclen = de->d_reclen;
10374                     if (reclen > len)
10375                         break;
10376                     de->d_reclen = tswap16(reclen);
10377                     tswap64s((uint64_t *)&de->d_ino);
10378                     tswap64s((uint64_t *)&de->d_off);
10379                     de = (struct linux_dirent64 *)((char *)de + reclen);
10380                     len -= reclen;
10381                 }
10382             }
10383             unlock_user(dirp, arg2, ret);
10384         }
10385         return ret;
10386 #endif /* TARGET_NR_getdents64 */
10387 #if defined(TARGET_NR__newselect)
10388     case TARGET_NR__newselect:
10389         return do_select(arg1, arg2, arg3, arg4, arg5);
10390 #endif
10391 #ifdef TARGET_NR_poll
10392     case TARGET_NR_poll:
10393         return do_ppoll(arg1, arg2, arg3, arg4, arg5, false, false);
10394 #endif
10395 #ifdef TARGET_NR_ppoll
10396     case TARGET_NR_ppoll:
10397         return do_ppoll(arg1, arg2, arg3, arg4, arg5, true, false);
10398 #endif
10399 #ifdef TARGET_NR_ppoll_time64
10400     case TARGET_NR_ppoll_time64:
10401         return do_ppoll(arg1, arg2, arg3, arg4, arg5, true, true);
10402 #endif
10403     case TARGET_NR_flock:
10404         /* NOTE: the flock constant seems to be the same for every
10405            Linux platform */
10406         return get_errno(safe_flock(arg1, arg2));
10407     case TARGET_NR_readv:
10408         {
10409             struct iovec *vec = lock_iovec(VERIFY_WRITE, arg2, arg3, 0);
10410             if (vec != NULL) {
10411                 ret = get_errno(safe_readv(arg1, vec, arg3));
10412                 unlock_iovec(vec, arg2, arg3, 1);
10413             } else {
10414                 ret = -host_to_target_errno(errno);
10415             }
10416         }
10417         return ret;
10418     case TARGET_NR_writev:
10419         {
10420             struct iovec *vec = lock_iovec(VERIFY_READ, arg2, arg3, 1);
10421             if (vec != NULL) {
10422                 ret = get_errno(safe_writev(arg1, vec, arg3));
10423                 unlock_iovec(vec, arg2, arg3, 0);
10424             } else {
10425                 ret = -host_to_target_errno(errno);
10426             }
10427         }
10428         return ret;
10429 #if defined(TARGET_NR_preadv)
10430     case TARGET_NR_preadv:
10431         {
10432             struct iovec *vec = lock_iovec(VERIFY_WRITE, arg2, arg3, 0);
10433             if (vec != NULL) {
10434                 unsigned long low, high;
10435 
10436                 target_to_host_low_high(arg4, arg5, &low, &high);
10437                 ret = get_errno(safe_preadv(arg1, vec, arg3, low, high));
10438                 unlock_iovec(vec, arg2, arg3, 1);
10439             } else {
10440                 ret = -host_to_target_errno(errno);
10441            }
10442         }
10443         return ret;
10444 #endif
10445 #if defined(TARGET_NR_pwritev)
10446     case TARGET_NR_pwritev:
10447         {
10448             struct iovec *vec = lock_iovec(VERIFY_READ, arg2, arg3, 1);
10449             if (vec != NULL) {
10450                 unsigned long low, high;
10451 
10452                 target_to_host_low_high(arg4, arg5, &low, &high);
10453                 ret = get_errno(safe_pwritev(arg1, vec, arg3, low, high));
10454                 unlock_iovec(vec, arg2, arg3, 0);
10455             } else {
10456                 ret = -host_to_target_errno(errno);
10457            }
10458         }
10459         return ret;
10460 #endif
10461     case TARGET_NR_getsid:
10462         return get_errno(getsid(arg1));
10463 #if defined(TARGET_NR_fdatasync) /* Not on alpha (osf_datasync ?) */
10464     case TARGET_NR_fdatasync:
10465         return get_errno(fdatasync(arg1));
10466 #endif
10467     case TARGET_NR_sched_getaffinity:
10468         {
10469             unsigned int mask_size;
10470             unsigned long *mask;
10471 
10472             /*
10473              * sched_getaffinity needs multiples of ulong, so need to take
10474              * care of mismatches between target ulong and host ulong sizes.
10475              */
10476             if (arg2 & (sizeof(abi_ulong) - 1)) {
10477                 return -TARGET_EINVAL;
10478             }
10479             mask_size = (arg2 + (sizeof(*mask) - 1)) & ~(sizeof(*mask) - 1);
10480 
10481             mask = alloca(mask_size);
10482             memset(mask, 0, mask_size);
10483             ret = get_errno(sys_sched_getaffinity(arg1, mask_size, mask));
10484 
10485             if (!is_error(ret)) {
10486                 if (ret > arg2) {
10487                     /* More data returned than the caller's buffer will fit.
10488                      * This only happens if sizeof(abi_long) < sizeof(long)
10489                      * and the caller passed us a buffer holding an odd number
10490                      * of abi_longs. If the host kernel is actually using the
10491                      * extra 4 bytes then fail EINVAL; otherwise we can just
10492                      * ignore them and only copy the interesting part.
10493                      */
10494                     int numcpus = sysconf(_SC_NPROCESSORS_CONF);
10495                     if (numcpus > arg2 * 8) {
10496                         return -TARGET_EINVAL;
10497                     }
10498                     ret = arg2;
10499                 }
10500 
10501                 if (host_to_target_cpu_mask(mask, mask_size, arg3, ret)) {
10502                     return -TARGET_EFAULT;
10503                 }
10504             }
10505         }
10506         return ret;
10507     case TARGET_NR_sched_setaffinity:
10508         {
10509             unsigned int mask_size;
10510             unsigned long *mask;
10511 
10512             /*
10513              * sched_setaffinity needs multiples of ulong, so need to take
10514              * care of mismatches between target ulong and host ulong sizes.
10515              */
10516             if (arg2 & (sizeof(abi_ulong) - 1)) {
10517                 return -TARGET_EINVAL;
10518             }
10519             mask_size = (arg2 + (sizeof(*mask) - 1)) & ~(sizeof(*mask) - 1);
10520             mask = alloca(mask_size);
10521 
10522             ret = target_to_host_cpu_mask(mask, mask_size, arg3, arg2);
10523             if (ret) {
10524                 return ret;
10525             }
10526 
10527             return get_errno(sys_sched_setaffinity(arg1, mask_size, mask));
10528         }
10529     case TARGET_NR_getcpu:
10530         {
10531             unsigned cpu, node;
10532             ret = get_errno(sys_getcpu(arg1 ? &cpu : NULL,
10533                                        arg2 ? &node : NULL,
10534                                        NULL));
10535             if (is_error(ret)) {
10536                 return ret;
10537             }
10538             if (arg1 && put_user_u32(cpu, arg1)) {
10539                 return -TARGET_EFAULT;
10540             }
10541             if (arg2 && put_user_u32(node, arg2)) {
10542                 return -TARGET_EFAULT;
10543             }
10544         }
10545         return ret;
10546     case TARGET_NR_sched_setparam:
10547         {
10548             struct sched_param *target_schp;
10549             struct sched_param schp;
10550 
10551             if (arg2 == 0) {
10552                 return -TARGET_EINVAL;
10553             }
10554             if (!lock_user_struct(VERIFY_READ, target_schp, arg2, 1))
10555                 return -TARGET_EFAULT;
10556             schp.sched_priority = tswap32(target_schp->sched_priority);
10557             unlock_user_struct(target_schp, arg2, 0);
10558             return get_errno(sched_setparam(arg1, &schp));
10559         }
10560     case TARGET_NR_sched_getparam:
10561         {
10562             struct sched_param *target_schp;
10563             struct sched_param schp;
10564 
10565             if (arg2 == 0) {
10566                 return -TARGET_EINVAL;
10567             }
10568             ret = get_errno(sched_getparam(arg1, &schp));
10569             if (!is_error(ret)) {
10570                 if (!lock_user_struct(VERIFY_WRITE, target_schp, arg2, 0))
10571                     return -TARGET_EFAULT;
10572                 target_schp->sched_priority = tswap32(schp.sched_priority);
10573                 unlock_user_struct(target_schp, arg2, 1);
10574             }
10575         }
10576         return ret;
10577     case TARGET_NR_sched_setscheduler:
10578         {
10579             struct sched_param *target_schp;
10580             struct sched_param schp;
10581             if (arg3 == 0) {
10582                 return -TARGET_EINVAL;
10583             }
10584             if (!lock_user_struct(VERIFY_READ, target_schp, arg3, 1))
10585                 return -TARGET_EFAULT;
10586             schp.sched_priority = tswap32(target_schp->sched_priority);
10587             unlock_user_struct(target_schp, arg3, 0);
10588             return get_errno(sched_setscheduler(arg1, arg2, &schp));
10589         }
10590     case TARGET_NR_sched_getscheduler:
10591         return get_errno(sched_getscheduler(arg1));
10592     case TARGET_NR_sched_yield:
10593         return get_errno(sched_yield());
10594     case TARGET_NR_sched_get_priority_max:
10595         return get_errno(sched_get_priority_max(arg1));
10596     case TARGET_NR_sched_get_priority_min:
10597         return get_errno(sched_get_priority_min(arg1));
10598 #ifdef TARGET_NR_sched_rr_get_interval
10599     case TARGET_NR_sched_rr_get_interval:
10600         {
10601             struct timespec ts;
10602             ret = get_errno(sched_rr_get_interval(arg1, &ts));
10603             if (!is_error(ret)) {
10604                 ret = host_to_target_timespec(arg2, &ts);
10605             }
10606         }
10607         return ret;
10608 #endif
10609 #ifdef TARGET_NR_sched_rr_get_interval_time64
10610     case TARGET_NR_sched_rr_get_interval_time64:
10611         {
10612             struct timespec ts;
10613             ret = get_errno(sched_rr_get_interval(arg1, &ts));
10614             if (!is_error(ret)) {
10615                 ret = host_to_target_timespec64(arg2, &ts);
10616             }
10617         }
10618         return ret;
10619 #endif
10620 #if defined(TARGET_NR_nanosleep)
10621     case TARGET_NR_nanosleep:
10622         {
10623             struct timespec req, rem;
10624             target_to_host_timespec(&req, arg1);
10625             ret = get_errno(safe_nanosleep(&req, &rem));
10626             if (is_error(ret) && arg2) {
10627                 host_to_target_timespec(arg2, &rem);
10628             }
10629         }
10630         return ret;
10631 #endif
10632     case TARGET_NR_prctl:
10633         switch (arg1) {
10634         case PR_GET_PDEATHSIG:
10635         {
10636             int deathsig;
10637             ret = get_errno(prctl(arg1, &deathsig, arg3, arg4, arg5));
10638             if (!is_error(ret) && arg2
10639                 && put_user_s32(deathsig, arg2)) {
10640                 return -TARGET_EFAULT;
10641             }
10642             return ret;
10643         }
10644 #ifdef PR_GET_NAME
10645         case PR_GET_NAME:
10646         {
10647             void *name = lock_user(VERIFY_WRITE, arg2, 16, 1);
10648             if (!name) {
10649                 return -TARGET_EFAULT;
10650             }
10651             ret = get_errno(prctl(arg1, (unsigned long)name,
10652                                   arg3, arg4, arg5));
10653             unlock_user(name, arg2, 16);
10654             return ret;
10655         }
10656         case PR_SET_NAME:
10657         {
10658             void *name = lock_user(VERIFY_READ, arg2, 16, 1);
10659             if (!name) {
10660                 return -TARGET_EFAULT;
10661             }
10662             ret = get_errno(prctl(arg1, (unsigned long)name,
10663                                   arg3, arg4, arg5));
10664             unlock_user(name, arg2, 0);
10665             return ret;
10666         }
10667 #endif
10668 #ifdef TARGET_MIPS
10669         case TARGET_PR_GET_FP_MODE:
10670         {
10671             CPUMIPSState *env = ((CPUMIPSState *)cpu_env);
10672             ret = 0;
10673             if (env->CP0_Status & (1 << CP0St_FR)) {
10674                 ret |= TARGET_PR_FP_MODE_FR;
10675             }
10676             if (env->CP0_Config5 & (1 << CP0C5_FRE)) {
10677                 ret |= TARGET_PR_FP_MODE_FRE;
10678             }
10679             return ret;
10680         }
10681         case TARGET_PR_SET_FP_MODE:
10682         {
10683             CPUMIPSState *env = ((CPUMIPSState *)cpu_env);
10684             bool old_fr = env->CP0_Status & (1 << CP0St_FR);
10685             bool old_fre = env->CP0_Config5 & (1 << CP0C5_FRE);
10686             bool new_fr = arg2 & TARGET_PR_FP_MODE_FR;
10687             bool new_fre = arg2 & TARGET_PR_FP_MODE_FRE;
10688 
10689             const unsigned int known_bits = TARGET_PR_FP_MODE_FR |
10690                                             TARGET_PR_FP_MODE_FRE;
10691 
10692             /* If nothing to change, return right away, successfully.  */
10693             if (old_fr == new_fr && old_fre == new_fre) {
10694                 return 0;
10695             }
10696             /* Check the value is valid */
10697             if (arg2 & ~known_bits) {
10698                 return -TARGET_EOPNOTSUPP;
10699             }
10700             /* Setting FRE without FR is not supported.  */
10701             if (new_fre && !new_fr) {
10702                 return -TARGET_EOPNOTSUPP;
10703             }
10704             if (new_fr && !(env->active_fpu.fcr0 & (1 << FCR0_F64))) {
10705                 /* FR1 is not supported */
10706                 return -TARGET_EOPNOTSUPP;
10707             }
10708             if (!new_fr && (env->active_fpu.fcr0 & (1 << FCR0_F64))
10709                 && !(env->CP0_Status_rw_bitmask & (1 << CP0St_FR))) {
10710                 /* cannot set FR=0 */
10711                 return -TARGET_EOPNOTSUPP;
10712             }
10713             if (new_fre && !(env->active_fpu.fcr0 & (1 << FCR0_FREP))) {
10714                 /* Cannot set FRE=1 */
10715                 return -TARGET_EOPNOTSUPP;
10716             }
10717 
10718             int i;
10719             fpr_t *fpr = env->active_fpu.fpr;
10720             for (i = 0; i < 32 ; i += 2) {
10721                 if (!old_fr && new_fr) {
10722                     fpr[i].w[!FP_ENDIAN_IDX] = fpr[i + 1].w[FP_ENDIAN_IDX];
10723                 } else if (old_fr && !new_fr) {
10724                     fpr[i + 1].w[FP_ENDIAN_IDX] = fpr[i].w[!FP_ENDIAN_IDX];
10725                 }
10726             }
10727 
10728             if (new_fr) {
10729                 env->CP0_Status |= (1 << CP0St_FR);
10730                 env->hflags |= MIPS_HFLAG_F64;
10731             } else {
10732                 env->CP0_Status &= ~(1 << CP0St_FR);
10733                 env->hflags &= ~MIPS_HFLAG_F64;
10734             }
10735             if (new_fre) {
10736                 env->CP0_Config5 |= (1 << CP0C5_FRE);
10737                 if (env->active_fpu.fcr0 & (1 << FCR0_FREP)) {
10738                     env->hflags |= MIPS_HFLAG_FRE;
10739                 }
10740             } else {
10741                 env->CP0_Config5 &= ~(1 << CP0C5_FRE);
10742                 env->hflags &= ~MIPS_HFLAG_FRE;
10743             }
10744 
10745             return 0;
10746         }
10747 #endif /* MIPS */
10748 #ifdef TARGET_AARCH64
10749         case TARGET_PR_SVE_SET_VL:
10750             /*
10751              * We cannot support either PR_SVE_SET_VL_ONEXEC or
10752              * PR_SVE_VL_INHERIT.  Note the kernel definition
10753              * of sve_vl_valid allows for VQ=512, i.e. VL=8192,
10754              * even though the current architectural maximum is VQ=16.
10755              */
10756             ret = -TARGET_EINVAL;
10757             if (cpu_isar_feature(aa64_sve, env_archcpu(cpu_env))
10758                 && arg2 >= 0 && arg2 <= 512 * 16 && !(arg2 & 15)) {
10759                 CPUARMState *env = cpu_env;
10760                 ARMCPU *cpu = env_archcpu(env);
10761                 uint32_t vq, old_vq;
10762 
10763                 old_vq = (env->vfp.zcr_el[1] & 0xf) + 1;
10764                 vq = MAX(arg2 / 16, 1);
10765                 vq = MIN(vq, cpu->sve_max_vq);
10766 
10767                 if (vq < old_vq) {
10768                     aarch64_sve_narrow_vq(env, vq);
10769                 }
10770                 env->vfp.zcr_el[1] = vq - 1;
10771                 arm_rebuild_hflags(env);
10772                 ret = vq * 16;
10773             }
10774             return ret;
10775         case TARGET_PR_SVE_GET_VL:
10776             ret = -TARGET_EINVAL;
10777             {
10778                 ARMCPU *cpu = env_archcpu(cpu_env);
10779                 if (cpu_isar_feature(aa64_sve, cpu)) {
10780                     ret = ((cpu->env.vfp.zcr_el[1] & 0xf) + 1) * 16;
10781                 }
10782             }
10783             return ret;
10784         case TARGET_PR_PAC_RESET_KEYS:
10785             {
10786                 CPUARMState *env = cpu_env;
10787                 ARMCPU *cpu = env_archcpu(env);
10788 
10789                 if (arg3 || arg4 || arg5) {
10790                     return -TARGET_EINVAL;
10791                 }
10792                 if (cpu_isar_feature(aa64_pauth, cpu)) {
10793                     int all = (TARGET_PR_PAC_APIAKEY | TARGET_PR_PAC_APIBKEY |
10794                                TARGET_PR_PAC_APDAKEY | TARGET_PR_PAC_APDBKEY |
10795                                TARGET_PR_PAC_APGAKEY);
10796                     int ret = 0;
10797                     Error *err = NULL;
10798 
10799                     if (arg2 == 0) {
10800                         arg2 = all;
10801                     } else if (arg2 & ~all) {
10802                         return -TARGET_EINVAL;
10803                     }
10804                     if (arg2 & TARGET_PR_PAC_APIAKEY) {
10805                         ret |= qemu_guest_getrandom(&env->keys.apia,
10806                                                     sizeof(ARMPACKey), &err);
10807                     }
10808                     if (arg2 & TARGET_PR_PAC_APIBKEY) {
10809                         ret |= qemu_guest_getrandom(&env->keys.apib,
10810                                                     sizeof(ARMPACKey), &err);
10811                     }
10812                     if (arg2 & TARGET_PR_PAC_APDAKEY) {
10813                         ret |= qemu_guest_getrandom(&env->keys.apda,
10814                                                     sizeof(ARMPACKey), &err);
10815                     }
10816                     if (arg2 & TARGET_PR_PAC_APDBKEY) {
10817                         ret |= qemu_guest_getrandom(&env->keys.apdb,
10818                                                     sizeof(ARMPACKey), &err);
10819                     }
10820                     if (arg2 & TARGET_PR_PAC_APGAKEY) {
10821                         ret |= qemu_guest_getrandom(&env->keys.apga,
10822                                                     sizeof(ARMPACKey), &err);
10823                     }
10824                     if (ret != 0) {
10825                         /*
10826                          * Some unknown failure in the crypto.  The best
10827                          * we can do is log it and fail the syscall.
10828                          * The real syscall cannot fail this way.
10829                          */
10830                         qemu_log_mask(LOG_UNIMP,
10831                                       "PR_PAC_RESET_KEYS: Crypto failure: %s",
10832                                       error_get_pretty(err));
10833                         error_free(err);
10834                         return -TARGET_EIO;
10835                     }
10836                     return 0;
10837                 }
10838             }
10839             return -TARGET_EINVAL;
10840         case TARGET_PR_SET_TAGGED_ADDR_CTRL:
10841             {
10842                 abi_ulong valid_mask = TARGET_PR_TAGGED_ADDR_ENABLE;
10843                 CPUARMState *env = cpu_env;
10844                 ARMCPU *cpu = env_archcpu(env);
10845 
10846                 if (cpu_isar_feature(aa64_mte, cpu)) {
10847                     valid_mask |= TARGET_PR_MTE_TCF_MASK;
10848                     valid_mask |= TARGET_PR_MTE_TAG_MASK;
10849                 }
10850 
10851                 if ((arg2 & ~valid_mask) || arg3 || arg4 || arg5) {
10852                     return -TARGET_EINVAL;
10853                 }
10854                 env->tagged_addr_enable = arg2 & TARGET_PR_TAGGED_ADDR_ENABLE;
10855 
10856                 if (cpu_isar_feature(aa64_mte, cpu)) {
10857                     switch (arg2 & TARGET_PR_MTE_TCF_MASK) {
10858                     case TARGET_PR_MTE_TCF_NONE:
10859                     case TARGET_PR_MTE_TCF_SYNC:
10860                     case TARGET_PR_MTE_TCF_ASYNC:
10861                         break;
10862                     default:
10863                         return -EINVAL;
10864                     }
10865 
10866                     /*
10867                      * Write PR_MTE_TCF to SCTLR_EL1[TCF0].
10868                      * Note that the syscall values are consistent with hw.
10869                      */
10870                     env->cp15.sctlr_el[1] =
10871                         deposit64(env->cp15.sctlr_el[1], 38, 2,
10872                                   arg2 >> TARGET_PR_MTE_TCF_SHIFT);
10873 
10874                     /*
10875                      * Write PR_MTE_TAG to GCR_EL1[Exclude].
10876                      * Note that the syscall uses an include mask,
10877                      * and hardware uses an exclude mask -- invert.
10878                      */
10879                     env->cp15.gcr_el1 =
10880                         deposit64(env->cp15.gcr_el1, 0, 16,
10881                                   ~arg2 >> TARGET_PR_MTE_TAG_SHIFT);
10882                     arm_rebuild_hflags(env);
10883                 }
10884                 return 0;
10885             }
10886         case TARGET_PR_GET_TAGGED_ADDR_CTRL:
10887             {
10888                 abi_long ret = 0;
10889                 CPUARMState *env = cpu_env;
10890                 ARMCPU *cpu = env_archcpu(env);
10891 
10892                 if (arg2 || arg3 || arg4 || arg5) {
10893                     return -TARGET_EINVAL;
10894                 }
10895                 if (env->tagged_addr_enable) {
10896                     ret |= TARGET_PR_TAGGED_ADDR_ENABLE;
10897                 }
10898                 if (cpu_isar_feature(aa64_mte, cpu)) {
10899                     /* See above. */
10900                     ret |= (extract64(env->cp15.sctlr_el[1], 38, 2)
10901                             << TARGET_PR_MTE_TCF_SHIFT);
10902                     ret = deposit64(ret, TARGET_PR_MTE_TAG_SHIFT, 16,
10903                                     ~env->cp15.gcr_el1);
10904                 }
10905                 return ret;
10906             }
10907 #endif /* AARCH64 */
10908         case PR_GET_SECCOMP:
10909         case PR_SET_SECCOMP:
10910             /* Disable seccomp to prevent the target disabling syscalls we
10911              * need. */
10912             return -TARGET_EINVAL;
10913         default:
10914             /* Most prctl options have no pointer arguments */
10915             return get_errno(prctl(arg1, arg2, arg3, arg4, arg5));
10916         }
10917         break;
10918 #ifdef TARGET_NR_arch_prctl
10919     case TARGET_NR_arch_prctl:
10920         return do_arch_prctl(cpu_env, arg1, arg2);
10921 #endif
10922 #ifdef TARGET_NR_pread64
10923     case TARGET_NR_pread64:
10924         if (regpairs_aligned(cpu_env, num)) {
10925             arg4 = arg5;
10926             arg5 = arg6;
10927         }
10928         if (arg2 == 0 && arg3 == 0) {
10929             /* Special-case NULL buffer and zero length, which should succeed */
10930             p = 0;
10931         } else {
10932             p = lock_user(VERIFY_WRITE, arg2, arg3, 0);
10933             if (!p) {
10934                 return -TARGET_EFAULT;
10935             }
10936         }
10937         ret = get_errno(pread64(arg1, p, arg3, target_offset64(arg4, arg5)));
10938         unlock_user(p, arg2, ret);
10939         return ret;
10940     case TARGET_NR_pwrite64:
10941         if (regpairs_aligned(cpu_env, num)) {
10942             arg4 = arg5;
10943             arg5 = arg6;
10944         }
10945         if (arg2 == 0 && arg3 == 0) {
10946             /* Special-case NULL buffer and zero length, which should succeed */
10947             p = 0;
10948         } else {
10949             p = lock_user(VERIFY_READ, arg2, arg3, 1);
10950             if (!p) {
10951                 return -TARGET_EFAULT;
10952             }
10953         }
10954         ret = get_errno(pwrite64(arg1, p, arg3, target_offset64(arg4, arg5)));
10955         unlock_user(p, arg2, 0);
10956         return ret;
10957 #endif
10958     case TARGET_NR_getcwd:
10959         if (!(p = lock_user(VERIFY_WRITE, arg1, arg2, 0)))
10960             return -TARGET_EFAULT;
10961         ret = get_errno(sys_getcwd1(p, arg2));
10962         unlock_user(p, arg1, ret);
10963         return ret;
10964     case TARGET_NR_capget:
10965     case TARGET_NR_capset:
10966     {
10967         struct target_user_cap_header *target_header;
10968         struct target_user_cap_data *target_data = NULL;
10969         struct __user_cap_header_struct header;
10970         struct __user_cap_data_struct data[2];
10971         struct __user_cap_data_struct *dataptr = NULL;
10972         int i, target_datalen;
10973         int data_items = 1;
10974 
10975         if (!lock_user_struct(VERIFY_WRITE, target_header, arg1, 1)) {
10976             return -TARGET_EFAULT;
10977         }
10978         header.version = tswap32(target_header->version);
10979         header.pid = tswap32(target_header->pid);
10980 
10981         if (header.version != _LINUX_CAPABILITY_VERSION) {
10982             /* Version 2 and up takes pointer to two user_data structs */
10983             data_items = 2;
10984         }
10985 
10986         target_datalen = sizeof(*target_data) * data_items;
10987 
10988         if (arg2) {
10989             if (num == TARGET_NR_capget) {
10990                 target_data = lock_user(VERIFY_WRITE, arg2, target_datalen, 0);
10991             } else {
10992                 target_data = lock_user(VERIFY_READ, arg2, target_datalen, 1);
10993             }
10994             if (!target_data) {
10995                 unlock_user_struct(target_header, arg1, 0);
10996                 return -TARGET_EFAULT;
10997             }
10998 
10999             if (num == TARGET_NR_capset) {
11000                 for (i = 0; i < data_items; i++) {
11001                     data[i].effective = tswap32(target_data[i].effective);
11002                     data[i].permitted = tswap32(target_data[i].permitted);
11003                     data[i].inheritable = tswap32(target_data[i].inheritable);
11004                 }
11005             }
11006 
11007             dataptr = data;
11008         }
11009 
11010         if (num == TARGET_NR_capget) {
11011             ret = get_errno(capget(&header, dataptr));
11012         } else {
11013             ret = get_errno(capset(&header, dataptr));
11014         }
11015 
11016         /* The kernel always updates version for both capget and capset */
11017         target_header->version = tswap32(header.version);
11018         unlock_user_struct(target_header, arg1, 1);
11019 
11020         if (arg2) {
11021             if (num == TARGET_NR_capget) {
11022                 for (i = 0; i < data_items; i++) {
11023                     target_data[i].effective = tswap32(data[i].effective);
11024                     target_data[i].permitted = tswap32(data[i].permitted);
11025                     target_data[i].inheritable = tswap32(data[i].inheritable);
11026                 }
11027                 unlock_user(target_data, arg2, target_datalen);
11028             } else {
11029                 unlock_user(target_data, arg2, 0);
11030             }
11031         }
11032         return ret;
11033     }
11034     case TARGET_NR_sigaltstack:
11035         return do_sigaltstack(arg1, arg2, cpu_env);
11036 
11037 #ifdef CONFIG_SENDFILE
11038 #ifdef TARGET_NR_sendfile
11039     case TARGET_NR_sendfile:
11040     {
11041         off_t *offp = NULL;
11042         off_t off;
11043         if (arg3) {
11044             ret = get_user_sal(off, arg3);
11045             if (is_error(ret)) {
11046                 return ret;
11047             }
11048             offp = &off;
11049         }
11050         ret = get_errno(sendfile(arg1, arg2, offp, arg4));
11051         if (!is_error(ret) && arg3) {
11052             abi_long ret2 = put_user_sal(off, arg3);
11053             if (is_error(ret2)) {
11054                 ret = ret2;
11055             }
11056         }
11057         return ret;
11058     }
11059 #endif
11060 #ifdef TARGET_NR_sendfile64
11061     case TARGET_NR_sendfile64:
11062     {
11063         off_t *offp = NULL;
11064         off_t off;
11065         if (arg3) {
11066             ret = get_user_s64(off, arg3);
11067             if (is_error(ret)) {
11068                 return ret;
11069             }
11070             offp = &off;
11071         }
11072         ret = get_errno(sendfile(arg1, arg2, offp, arg4));
11073         if (!is_error(ret) && arg3) {
11074             abi_long ret2 = put_user_s64(off, arg3);
11075             if (is_error(ret2)) {
11076                 ret = ret2;
11077             }
11078         }
11079         return ret;
11080     }
11081 #endif
11082 #endif
11083 #ifdef TARGET_NR_vfork
11084     case TARGET_NR_vfork:
11085         return get_errno(do_fork(cpu_env,
11086                          CLONE_VFORK | CLONE_VM | TARGET_SIGCHLD,
11087                          0, 0, 0, 0));
11088 #endif
11089 #ifdef TARGET_NR_ugetrlimit
11090     case TARGET_NR_ugetrlimit:
11091     {
11092 	struct rlimit rlim;
11093 	int resource = target_to_host_resource(arg1);
11094 	ret = get_errno(getrlimit(resource, &rlim));
11095 	if (!is_error(ret)) {
11096 	    struct target_rlimit *target_rlim;
11097             if (!lock_user_struct(VERIFY_WRITE, target_rlim, arg2, 0))
11098                 return -TARGET_EFAULT;
11099 	    target_rlim->rlim_cur = host_to_target_rlim(rlim.rlim_cur);
11100 	    target_rlim->rlim_max = host_to_target_rlim(rlim.rlim_max);
11101             unlock_user_struct(target_rlim, arg2, 1);
11102 	}
11103         return ret;
11104     }
11105 #endif
11106 #ifdef TARGET_NR_truncate64
11107     case TARGET_NR_truncate64:
11108         if (!(p = lock_user_string(arg1)))
11109             return -TARGET_EFAULT;
11110 	ret = target_truncate64(cpu_env, p, arg2, arg3, arg4);
11111         unlock_user(p, arg1, 0);
11112         return ret;
11113 #endif
11114 #ifdef TARGET_NR_ftruncate64
11115     case TARGET_NR_ftruncate64:
11116         return target_ftruncate64(cpu_env, arg1, arg2, arg3, arg4);
11117 #endif
11118 #ifdef TARGET_NR_stat64
11119     case TARGET_NR_stat64:
11120         if (!(p = lock_user_string(arg1))) {
11121             return -TARGET_EFAULT;
11122         }
11123         ret = get_errno(stat(path(p), &st));
11124         unlock_user(p, arg1, 0);
11125         if (!is_error(ret))
11126             ret = host_to_target_stat64(cpu_env, arg2, &st);
11127         return ret;
11128 #endif
11129 #ifdef TARGET_NR_lstat64
11130     case TARGET_NR_lstat64:
11131         if (!(p = lock_user_string(arg1))) {
11132             return -TARGET_EFAULT;
11133         }
11134         ret = get_errno(lstat(path(p), &st));
11135         unlock_user(p, arg1, 0);
11136         if (!is_error(ret))
11137             ret = host_to_target_stat64(cpu_env, arg2, &st);
11138         return ret;
11139 #endif
11140 #ifdef TARGET_NR_fstat64
11141     case TARGET_NR_fstat64:
11142         ret = get_errno(fstat(arg1, &st));
11143         if (!is_error(ret))
11144             ret = host_to_target_stat64(cpu_env, arg2, &st);
11145         return ret;
11146 #endif
11147 #if (defined(TARGET_NR_fstatat64) || defined(TARGET_NR_newfstatat))
11148 #ifdef TARGET_NR_fstatat64
11149     case TARGET_NR_fstatat64:
11150 #endif
11151 #ifdef TARGET_NR_newfstatat
11152     case TARGET_NR_newfstatat:
11153 #endif
11154         if (!(p = lock_user_string(arg2))) {
11155             return -TARGET_EFAULT;
11156         }
11157         ret = get_errno(fstatat(arg1, path(p), &st, arg4));
11158         unlock_user(p, arg2, 0);
11159         if (!is_error(ret))
11160             ret = host_to_target_stat64(cpu_env, arg3, &st);
11161         return ret;
11162 #endif
11163 #if defined(TARGET_NR_statx)
11164     case TARGET_NR_statx:
11165         {
11166             struct target_statx *target_stx;
11167             int dirfd = arg1;
11168             int flags = arg3;
11169 
11170             p = lock_user_string(arg2);
11171             if (p == NULL) {
11172                 return -TARGET_EFAULT;
11173             }
11174 #if defined(__NR_statx)
11175             {
11176                 /*
11177                  * It is assumed that struct statx is architecture independent.
11178                  */
11179                 struct target_statx host_stx;
11180                 int mask = arg4;
11181 
11182                 ret = get_errno(sys_statx(dirfd, p, flags, mask, &host_stx));
11183                 if (!is_error(ret)) {
11184                     if (host_to_target_statx(&host_stx, arg5) != 0) {
11185                         unlock_user(p, arg2, 0);
11186                         return -TARGET_EFAULT;
11187                     }
11188                 }
11189 
11190                 if (ret != -TARGET_ENOSYS) {
11191                     unlock_user(p, arg2, 0);
11192                     return ret;
11193                 }
11194             }
11195 #endif
11196             ret = get_errno(fstatat(dirfd, path(p), &st, flags));
11197             unlock_user(p, arg2, 0);
11198 
11199             if (!is_error(ret)) {
11200                 if (!lock_user_struct(VERIFY_WRITE, target_stx, arg5, 0)) {
11201                     return -TARGET_EFAULT;
11202                 }
11203                 memset(target_stx, 0, sizeof(*target_stx));
11204                 __put_user(major(st.st_dev), &target_stx->stx_dev_major);
11205                 __put_user(minor(st.st_dev), &target_stx->stx_dev_minor);
11206                 __put_user(st.st_ino, &target_stx->stx_ino);
11207                 __put_user(st.st_mode, &target_stx->stx_mode);
11208                 __put_user(st.st_uid, &target_stx->stx_uid);
11209                 __put_user(st.st_gid, &target_stx->stx_gid);
11210                 __put_user(st.st_nlink, &target_stx->stx_nlink);
11211                 __put_user(major(st.st_rdev), &target_stx->stx_rdev_major);
11212                 __put_user(minor(st.st_rdev), &target_stx->stx_rdev_minor);
11213                 __put_user(st.st_size, &target_stx->stx_size);
11214                 __put_user(st.st_blksize, &target_stx->stx_blksize);
11215                 __put_user(st.st_blocks, &target_stx->stx_blocks);
11216                 __put_user(st.st_atime, &target_stx->stx_atime.tv_sec);
11217                 __put_user(st.st_mtime, &target_stx->stx_mtime.tv_sec);
11218                 __put_user(st.st_ctime, &target_stx->stx_ctime.tv_sec);
11219                 unlock_user_struct(target_stx, arg5, 1);
11220             }
11221         }
11222         return ret;
11223 #endif
11224 #ifdef TARGET_NR_lchown
11225     case TARGET_NR_lchown:
11226         if (!(p = lock_user_string(arg1)))
11227             return -TARGET_EFAULT;
11228         ret = get_errno(lchown(p, low2highuid(arg2), low2highgid(arg3)));
11229         unlock_user(p, arg1, 0);
11230         return ret;
11231 #endif
11232 #ifdef TARGET_NR_getuid
11233     case TARGET_NR_getuid:
11234         return get_errno(high2lowuid(getuid()));
11235 #endif
11236 #ifdef TARGET_NR_getgid
11237     case TARGET_NR_getgid:
11238         return get_errno(high2lowgid(getgid()));
11239 #endif
11240 #ifdef TARGET_NR_geteuid
11241     case TARGET_NR_geteuid:
11242         return get_errno(high2lowuid(geteuid()));
11243 #endif
11244 #ifdef TARGET_NR_getegid
11245     case TARGET_NR_getegid:
11246         return get_errno(high2lowgid(getegid()));
11247 #endif
11248     case TARGET_NR_setreuid:
11249         return get_errno(setreuid(low2highuid(arg1), low2highuid(arg2)));
11250     case TARGET_NR_setregid:
11251         return get_errno(setregid(low2highgid(arg1), low2highgid(arg2)));
11252     case TARGET_NR_getgroups:
11253         {
11254             int gidsetsize = arg1;
11255             target_id *target_grouplist;
11256             gid_t *grouplist;
11257             int i;
11258 
11259             grouplist = alloca(gidsetsize * sizeof(gid_t));
11260             ret = get_errno(getgroups(gidsetsize, grouplist));
11261             if (gidsetsize == 0)
11262                 return ret;
11263             if (!is_error(ret)) {
11264                 target_grouplist = lock_user(VERIFY_WRITE, arg2, gidsetsize * sizeof(target_id), 0);
11265                 if (!target_grouplist)
11266                     return -TARGET_EFAULT;
11267                 for(i = 0;i < ret; i++)
11268                     target_grouplist[i] = tswapid(high2lowgid(grouplist[i]));
11269                 unlock_user(target_grouplist, arg2, gidsetsize * sizeof(target_id));
11270             }
11271         }
11272         return ret;
11273     case TARGET_NR_setgroups:
11274         {
11275             int gidsetsize = arg1;
11276             target_id *target_grouplist;
11277             gid_t *grouplist = NULL;
11278             int i;
11279             if (gidsetsize) {
11280                 grouplist = alloca(gidsetsize * sizeof(gid_t));
11281                 target_grouplist = lock_user(VERIFY_READ, arg2, gidsetsize * sizeof(target_id), 1);
11282                 if (!target_grouplist) {
11283                     return -TARGET_EFAULT;
11284                 }
11285                 for (i = 0; i < gidsetsize; i++) {
11286                     grouplist[i] = low2highgid(tswapid(target_grouplist[i]));
11287                 }
11288                 unlock_user(target_grouplist, arg2, 0);
11289             }
11290             return get_errno(setgroups(gidsetsize, grouplist));
11291         }
11292     case TARGET_NR_fchown:
11293         return get_errno(fchown(arg1, low2highuid(arg2), low2highgid(arg3)));
11294 #if defined(TARGET_NR_fchownat)
11295     case TARGET_NR_fchownat:
11296         if (!(p = lock_user_string(arg2)))
11297             return -TARGET_EFAULT;
11298         ret = get_errno(fchownat(arg1, p, low2highuid(arg3),
11299                                  low2highgid(arg4), arg5));
11300         unlock_user(p, arg2, 0);
11301         return ret;
11302 #endif
11303 #ifdef TARGET_NR_setresuid
11304     case TARGET_NR_setresuid:
11305         return get_errno(sys_setresuid(low2highuid(arg1),
11306                                        low2highuid(arg2),
11307                                        low2highuid(arg3)));
11308 #endif
11309 #ifdef TARGET_NR_getresuid
11310     case TARGET_NR_getresuid:
11311         {
11312             uid_t ruid, euid, suid;
11313             ret = get_errno(getresuid(&ruid, &euid, &suid));
11314             if (!is_error(ret)) {
11315                 if (put_user_id(high2lowuid(ruid), arg1)
11316                     || put_user_id(high2lowuid(euid), arg2)
11317                     || put_user_id(high2lowuid(suid), arg3))
11318                     return -TARGET_EFAULT;
11319             }
11320         }
11321         return ret;
11322 #endif
11323 #ifdef TARGET_NR_getresgid
11324     case TARGET_NR_setresgid:
11325         return get_errno(sys_setresgid(low2highgid(arg1),
11326                                        low2highgid(arg2),
11327                                        low2highgid(arg3)));
11328 #endif
11329 #ifdef TARGET_NR_getresgid
11330     case TARGET_NR_getresgid:
11331         {
11332             gid_t rgid, egid, sgid;
11333             ret = get_errno(getresgid(&rgid, &egid, &sgid));
11334             if (!is_error(ret)) {
11335                 if (put_user_id(high2lowgid(rgid), arg1)
11336                     || put_user_id(high2lowgid(egid), arg2)
11337                     || put_user_id(high2lowgid(sgid), arg3))
11338                     return -TARGET_EFAULT;
11339             }
11340         }
11341         return ret;
11342 #endif
11343 #ifdef TARGET_NR_chown
11344     case TARGET_NR_chown:
11345         if (!(p = lock_user_string(arg1)))
11346             return -TARGET_EFAULT;
11347         ret = get_errno(chown(p, low2highuid(arg2), low2highgid(arg3)));
11348         unlock_user(p, arg1, 0);
11349         return ret;
11350 #endif
11351     case TARGET_NR_setuid:
11352         return get_errno(sys_setuid(low2highuid(arg1)));
11353     case TARGET_NR_setgid:
11354         return get_errno(sys_setgid(low2highgid(arg1)));
11355     case TARGET_NR_setfsuid:
11356         return get_errno(setfsuid(arg1));
11357     case TARGET_NR_setfsgid:
11358         return get_errno(setfsgid(arg1));
11359 
11360 #ifdef TARGET_NR_lchown32
11361     case TARGET_NR_lchown32:
11362         if (!(p = lock_user_string(arg1)))
11363             return -TARGET_EFAULT;
11364         ret = get_errno(lchown(p, arg2, arg3));
11365         unlock_user(p, arg1, 0);
11366         return ret;
11367 #endif
11368 #ifdef TARGET_NR_getuid32
11369     case TARGET_NR_getuid32:
11370         return get_errno(getuid());
11371 #endif
11372 
11373 #if defined(TARGET_NR_getxuid) && defined(TARGET_ALPHA)
11374    /* Alpha specific */
11375     case TARGET_NR_getxuid:
11376          {
11377             uid_t euid;
11378             euid=geteuid();
11379             ((CPUAlphaState *)cpu_env)->ir[IR_A4]=euid;
11380          }
11381         return get_errno(getuid());
11382 #endif
11383 #if defined(TARGET_NR_getxgid) && defined(TARGET_ALPHA)
11384    /* Alpha specific */
11385     case TARGET_NR_getxgid:
11386          {
11387             uid_t egid;
11388             egid=getegid();
11389             ((CPUAlphaState *)cpu_env)->ir[IR_A4]=egid;
11390          }
11391         return get_errno(getgid());
11392 #endif
11393 #if defined(TARGET_NR_osf_getsysinfo) && defined(TARGET_ALPHA)
11394     /* Alpha specific */
11395     case TARGET_NR_osf_getsysinfo:
11396         ret = -TARGET_EOPNOTSUPP;
11397         switch (arg1) {
11398           case TARGET_GSI_IEEE_FP_CONTROL:
11399             {
11400                 uint64_t fpcr = cpu_alpha_load_fpcr(cpu_env);
11401                 uint64_t swcr = ((CPUAlphaState *)cpu_env)->swcr;
11402 
11403                 swcr &= ~SWCR_STATUS_MASK;
11404                 swcr |= (fpcr >> 35) & SWCR_STATUS_MASK;
11405 
11406                 if (put_user_u64 (swcr, arg2))
11407                         return -TARGET_EFAULT;
11408                 ret = 0;
11409             }
11410             break;
11411 
11412           /* case GSI_IEEE_STATE_AT_SIGNAL:
11413              -- Not implemented in linux kernel.
11414              case GSI_UACPROC:
11415              -- Retrieves current unaligned access state; not much used.
11416              case GSI_PROC_TYPE:
11417              -- Retrieves implver information; surely not used.
11418              case GSI_GET_HWRPB:
11419              -- Grabs a copy of the HWRPB; surely not used.
11420           */
11421         }
11422         return ret;
11423 #endif
11424 #if defined(TARGET_NR_osf_setsysinfo) && defined(TARGET_ALPHA)
11425     /* Alpha specific */
11426     case TARGET_NR_osf_setsysinfo:
11427         ret = -TARGET_EOPNOTSUPP;
11428         switch (arg1) {
11429           case TARGET_SSI_IEEE_FP_CONTROL:
11430             {
11431                 uint64_t swcr, fpcr;
11432 
11433                 if (get_user_u64 (swcr, arg2)) {
11434                     return -TARGET_EFAULT;
11435                 }
11436 
11437                 /*
11438                  * The kernel calls swcr_update_status to update the
11439                  * status bits from the fpcr at every point that it
11440                  * could be queried.  Therefore, we store the status
11441                  * bits only in FPCR.
11442                  */
11443                 ((CPUAlphaState *)cpu_env)->swcr
11444                     = swcr & (SWCR_TRAP_ENABLE_MASK | SWCR_MAP_MASK);
11445 
11446                 fpcr = cpu_alpha_load_fpcr(cpu_env);
11447                 fpcr &= ((uint64_t)FPCR_DYN_MASK << 32);
11448                 fpcr |= alpha_ieee_swcr_to_fpcr(swcr);
11449                 cpu_alpha_store_fpcr(cpu_env, fpcr);
11450                 ret = 0;
11451             }
11452             break;
11453 
11454           case TARGET_SSI_IEEE_RAISE_EXCEPTION:
11455             {
11456                 uint64_t exc, fpcr, fex;
11457 
11458                 if (get_user_u64(exc, arg2)) {
11459                     return -TARGET_EFAULT;
11460                 }
11461                 exc &= SWCR_STATUS_MASK;
11462                 fpcr = cpu_alpha_load_fpcr(cpu_env);
11463 
11464                 /* Old exceptions are not signaled.  */
11465                 fex = alpha_ieee_fpcr_to_swcr(fpcr);
11466                 fex = exc & ~fex;
11467                 fex >>= SWCR_STATUS_TO_EXCSUM_SHIFT;
11468                 fex &= ((CPUArchState *)cpu_env)->swcr;
11469 
11470                 /* Update the hardware fpcr.  */
11471                 fpcr |= alpha_ieee_swcr_to_fpcr(exc);
11472                 cpu_alpha_store_fpcr(cpu_env, fpcr);
11473 
11474                 if (fex) {
11475                     int si_code = TARGET_FPE_FLTUNK;
11476                     target_siginfo_t info;
11477 
11478                     if (fex & SWCR_TRAP_ENABLE_DNO) {
11479                         si_code = TARGET_FPE_FLTUND;
11480                     }
11481                     if (fex & SWCR_TRAP_ENABLE_INE) {
11482                         si_code = TARGET_FPE_FLTRES;
11483                     }
11484                     if (fex & SWCR_TRAP_ENABLE_UNF) {
11485                         si_code = TARGET_FPE_FLTUND;
11486                     }
11487                     if (fex & SWCR_TRAP_ENABLE_OVF) {
11488                         si_code = TARGET_FPE_FLTOVF;
11489                     }
11490                     if (fex & SWCR_TRAP_ENABLE_DZE) {
11491                         si_code = TARGET_FPE_FLTDIV;
11492                     }
11493                     if (fex & SWCR_TRAP_ENABLE_INV) {
11494                         si_code = TARGET_FPE_FLTINV;
11495                     }
11496 
11497                     info.si_signo = SIGFPE;
11498                     info.si_errno = 0;
11499                     info.si_code = si_code;
11500                     info._sifields._sigfault._addr
11501                         = ((CPUArchState *)cpu_env)->pc;
11502                     queue_signal((CPUArchState *)cpu_env, info.si_signo,
11503                                  QEMU_SI_FAULT, &info);
11504                 }
11505                 ret = 0;
11506             }
11507             break;
11508 
11509           /* case SSI_NVPAIRS:
11510              -- Used with SSIN_UACPROC to enable unaligned accesses.
11511              case SSI_IEEE_STATE_AT_SIGNAL:
11512              case SSI_IEEE_IGNORE_STATE_AT_SIGNAL:
11513              -- Not implemented in linux kernel
11514           */
11515         }
11516         return ret;
11517 #endif
11518 #ifdef TARGET_NR_osf_sigprocmask
11519     /* Alpha specific.  */
11520     case TARGET_NR_osf_sigprocmask:
11521         {
11522             abi_ulong mask;
11523             int how;
11524             sigset_t set, oldset;
11525 
11526             switch(arg1) {
11527             case TARGET_SIG_BLOCK:
11528                 how = SIG_BLOCK;
11529                 break;
11530             case TARGET_SIG_UNBLOCK:
11531                 how = SIG_UNBLOCK;
11532                 break;
11533             case TARGET_SIG_SETMASK:
11534                 how = SIG_SETMASK;
11535                 break;
11536             default:
11537                 return -TARGET_EINVAL;
11538             }
11539             mask = arg2;
11540             target_to_host_old_sigset(&set, &mask);
11541             ret = do_sigprocmask(how, &set, &oldset);
11542             if (!ret) {
11543                 host_to_target_old_sigset(&mask, &oldset);
11544                 ret = mask;
11545             }
11546         }
11547         return ret;
11548 #endif
11549 
11550 #ifdef TARGET_NR_getgid32
11551     case TARGET_NR_getgid32:
11552         return get_errno(getgid());
11553 #endif
11554 #ifdef TARGET_NR_geteuid32
11555     case TARGET_NR_geteuid32:
11556         return get_errno(geteuid());
11557 #endif
11558 #ifdef TARGET_NR_getegid32
11559     case TARGET_NR_getegid32:
11560         return get_errno(getegid());
11561 #endif
11562 #ifdef TARGET_NR_setreuid32
11563     case TARGET_NR_setreuid32:
11564         return get_errno(setreuid(arg1, arg2));
11565 #endif
11566 #ifdef TARGET_NR_setregid32
11567     case TARGET_NR_setregid32:
11568         return get_errno(setregid(arg1, arg2));
11569 #endif
11570 #ifdef TARGET_NR_getgroups32
11571     case TARGET_NR_getgroups32:
11572         {
11573             int gidsetsize = arg1;
11574             uint32_t *target_grouplist;
11575             gid_t *grouplist;
11576             int i;
11577 
11578             grouplist = alloca(gidsetsize * sizeof(gid_t));
11579             ret = get_errno(getgroups(gidsetsize, grouplist));
11580             if (gidsetsize == 0)
11581                 return ret;
11582             if (!is_error(ret)) {
11583                 target_grouplist = lock_user(VERIFY_WRITE, arg2, gidsetsize * 4, 0);
11584                 if (!target_grouplist) {
11585                     return -TARGET_EFAULT;
11586                 }
11587                 for(i = 0;i < ret; i++)
11588                     target_grouplist[i] = tswap32(grouplist[i]);
11589                 unlock_user(target_grouplist, arg2, gidsetsize * 4);
11590             }
11591         }
11592         return ret;
11593 #endif
11594 #ifdef TARGET_NR_setgroups32
11595     case TARGET_NR_setgroups32:
11596         {
11597             int gidsetsize = arg1;
11598             uint32_t *target_grouplist;
11599             gid_t *grouplist;
11600             int i;
11601 
11602             grouplist = alloca(gidsetsize * sizeof(gid_t));
11603             target_grouplist = lock_user(VERIFY_READ, arg2, gidsetsize * 4, 1);
11604             if (!target_grouplist) {
11605                 return -TARGET_EFAULT;
11606             }
11607             for(i = 0;i < gidsetsize; i++)
11608                 grouplist[i] = tswap32(target_grouplist[i]);
11609             unlock_user(target_grouplist, arg2, 0);
11610             return get_errno(setgroups(gidsetsize, grouplist));
11611         }
11612 #endif
11613 #ifdef TARGET_NR_fchown32
11614     case TARGET_NR_fchown32:
11615         return get_errno(fchown(arg1, arg2, arg3));
11616 #endif
11617 #ifdef TARGET_NR_setresuid32
11618     case TARGET_NR_setresuid32:
11619         return get_errno(sys_setresuid(arg1, arg2, arg3));
11620 #endif
11621 #ifdef TARGET_NR_getresuid32
11622     case TARGET_NR_getresuid32:
11623         {
11624             uid_t ruid, euid, suid;
11625             ret = get_errno(getresuid(&ruid, &euid, &suid));
11626             if (!is_error(ret)) {
11627                 if (put_user_u32(ruid, arg1)
11628                     || put_user_u32(euid, arg2)
11629                     || put_user_u32(suid, arg3))
11630                     return -TARGET_EFAULT;
11631             }
11632         }
11633         return ret;
11634 #endif
11635 #ifdef TARGET_NR_setresgid32
11636     case TARGET_NR_setresgid32:
11637         return get_errno(sys_setresgid(arg1, arg2, arg3));
11638 #endif
11639 #ifdef TARGET_NR_getresgid32
11640     case TARGET_NR_getresgid32:
11641         {
11642             gid_t rgid, egid, sgid;
11643             ret = get_errno(getresgid(&rgid, &egid, &sgid));
11644             if (!is_error(ret)) {
11645                 if (put_user_u32(rgid, arg1)
11646                     || put_user_u32(egid, arg2)
11647                     || put_user_u32(sgid, arg3))
11648                     return -TARGET_EFAULT;
11649             }
11650         }
11651         return ret;
11652 #endif
11653 #ifdef TARGET_NR_chown32
11654     case TARGET_NR_chown32:
11655         if (!(p = lock_user_string(arg1)))
11656             return -TARGET_EFAULT;
11657         ret = get_errno(chown(p, arg2, arg3));
11658         unlock_user(p, arg1, 0);
11659         return ret;
11660 #endif
11661 #ifdef TARGET_NR_setuid32
11662     case TARGET_NR_setuid32:
11663         return get_errno(sys_setuid(arg1));
11664 #endif
11665 #ifdef TARGET_NR_setgid32
11666     case TARGET_NR_setgid32:
11667         return get_errno(sys_setgid(arg1));
11668 #endif
11669 #ifdef TARGET_NR_setfsuid32
11670     case TARGET_NR_setfsuid32:
11671         return get_errno(setfsuid(arg1));
11672 #endif
11673 #ifdef TARGET_NR_setfsgid32
11674     case TARGET_NR_setfsgid32:
11675         return get_errno(setfsgid(arg1));
11676 #endif
11677 #ifdef TARGET_NR_mincore
11678     case TARGET_NR_mincore:
11679         {
11680             void *a = lock_user(VERIFY_READ, arg1, arg2, 0);
11681             if (!a) {
11682                 return -TARGET_ENOMEM;
11683             }
11684             p = lock_user_string(arg3);
11685             if (!p) {
11686                 ret = -TARGET_EFAULT;
11687             } else {
11688                 ret = get_errno(mincore(a, arg2, p));
11689                 unlock_user(p, arg3, ret);
11690             }
11691             unlock_user(a, arg1, 0);
11692         }
11693         return ret;
11694 #endif
11695 #ifdef TARGET_NR_arm_fadvise64_64
11696     case TARGET_NR_arm_fadvise64_64:
11697         /* arm_fadvise64_64 looks like fadvise64_64 but
11698          * with different argument order: fd, advice, offset, len
11699          * rather than the usual fd, offset, len, advice.
11700          * Note that offset and len are both 64-bit so appear as
11701          * pairs of 32-bit registers.
11702          */
11703         ret = posix_fadvise(arg1, target_offset64(arg3, arg4),
11704                             target_offset64(arg5, arg6), arg2);
11705         return -host_to_target_errno(ret);
11706 #endif
11707 
11708 #if TARGET_ABI_BITS == 32
11709 
11710 #ifdef TARGET_NR_fadvise64_64
11711     case TARGET_NR_fadvise64_64:
11712 #if defined(TARGET_PPC) || defined(TARGET_XTENSA)
11713         /* 6 args: fd, advice, offset (high, low), len (high, low) */
11714         ret = arg2;
11715         arg2 = arg3;
11716         arg3 = arg4;
11717         arg4 = arg5;
11718         arg5 = arg6;
11719         arg6 = ret;
11720 #else
11721         /* 6 args: fd, offset (high, low), len (high, low), advice */
11722         if (regpairs_aligned(cpu_env, num)) {
11723             /* offset is in (3,4), len in (5,6) and advice in 7 */
11724             arg2 = arg3;
11725             arg3 = arg4;
11726             arg4 = arg5;
11727             arg5 = arg6;
11728             arg6 = arg7;
11729         }
11730 #endif
11731         ret = posix_fadvise(arg1, target_offset64(arg2, arg3),
11732                             target_offset64(arg4, arg5), arg6);
11733         return -host_to_target_errno(ret);
11734 #endif
11735 
11736 #ifdef TARGET_NR_fadvise64
11737     case TARGET_NR_fadvise64:
11738         /* 5 args: fd, offset (high, low), len, advice */
11739         if (regpairs_aligned(cpu_env, num)) {
11740             /* offset is in (3,4), len in 5 and advice in 6 */
11741             arg2 = arg3;
11742             arg3 = arg4;
11743             arg4 = arg5;
11744             arg5 = arg6;
11745         }
11746         ret = posix_fadvise(arg1, target_offset64(arg2, arg3), arg4, arg5);
11747         return -host_to_target_errno(ret);
11748 #endif
11749 
11750 #else /* not a 32-bit ABI */
11751 #if defined(TARGET_NR_fadvise64_64) || defined(TARGET_NR_fadvise64)
11752 #ifdef TARGET_NR_fadvise64_64
11753     case TARGET_NR_fadvise64_64:
11754 #endif
11755 #ifdef TARGET_NR_fadvise64
11756     case TARGET_NR_fadvise64:
11757 #endif
11758 #ifdef TARGET_S390X
11759         switch (arg4) {
11760         case 4: arg4 = POSIX_FADV_NOREUSE + 1; break; /* make sure it's an invalid value */
11761         case 5: arg4 = POSIX_FADV_NOREUSE + 2; break; /* ditto */
11762         case 6: arg4 = POSIX_FADV_DONTNEED; break;
11763         case 7: arg4 = POSIX_FADV_NOREUSE; break;
11764         default: break;
11765         }
11766 #endif
11767         return -host_to_target_errno(posix_fadvise(arg1, arg2, arg3, arg4));
11768 #endif
11769 #endif /* end of 64-bit ABI fadvise handling */
11770 
11771 #ifdef TARGET_NR_madvise
11772     case TARGET_NR_madvise:
11773         /* A straight passthrough may not be safe because qemu sometimes
11774            turns private file-backed mappings into anonymous mappings.
11775            This will break MADV_DONTNEED.
11776            This is a hint, so ignoring and returning success is ok.  */
11777         return 0;
11778 #endif
11779 #ifdef TARGET_NR_fcntl64
11780     case TARGET_NR_fcntl64:
11781     {
11782         int cmd;
11783         struct flock64 fl;
11784         from_flock64_fn *copyfrom = copy_from_user_flock64;
11785         to_flock64_fn *copyto = copy_to_user_flock64;
11786 
11787 #ifdef TARGET_ARM
11788         if (!((CPUARMState *)cpu_env)->eabi) {
11789             copyfrom = copy_from_user_oabi_flock64;
11790             copyto = copy_to_user_oabi_flock64;
11791         }
11792 #endif
11793 
11794         cmd = target_to_host_fcntl_cmd(arg2);
11795         if (cmd == -TARGET_EINVAL) {
11796             return cmd;
11797         }
11798 
11799         switch(arg2) {
11800         case TARGET_F_GETLK64:
11801             ret = copyfrom(&fl, arg3);
11802             if (ret) {
11803                 break;
11804             }
11805             ret = get_errno(safe_fcntl(arg1, cmd, &fl));
11806             if (ret == 0) {
11807                 ret = copyto(arg3, &fl);
11808             }
11809 	    break;
11810 
11811         case TARGET_F_SETLK64:
11812         case TARGET_F_SETLKW64:
11813             ret = copyfrom(&fl, arg3);
11814             if (ret) {
11815                 break;
11816             }
11817             ret = get_errno(safe_fcntl(arg1, cmd, &fl));
11818 	    break;
11819         default:
11820             ret = do_fcntl(arg1, arg2, arg3);
11821             break;
11822         }
11823         return ret;
11824     }
11825 #endif
11826 #ifdef TARGET_NR_cacheflush
11827     case TARGET_NR_cacheflush:
11828         /* self-modifying code is handled automatically, so nothing needed */
11829         return 0;
11830 #endif
11831 #ifdef TARGET_NR_getpagesize
11832     case TARGET_NR_getpagesize:
11833         return TARGET_PAGE_SIZE;
11834 #endif
11835     case TARGET_NR_gettid:
11836         return get_errno(sys_gettid());
11837 #ifdef TARGET_NR_readahead
11838     case TARGET_NR_readahead:
11839 #if TARGET_ABI_BITS == 32
11840         if (regpairs_aligned(cpu_env, num)) {
11841             arg2 = arg3;
11842             arg3 = arg4;
11843             arg4 = arg5;
11844         }
11845         ret = get_errno(readahead(arg1, target_offset64(arg2, arg3) , arg4));
11846 #else
11847         ret = get_errno(readahead(arg1, arg2, arg3));
11848 #endif
11849         return ret;
11850 #endif
11851 #ifdef CONFIG_ATTR
11852 #ifdef TARGET_NR_setxattr
11853     case TARGET_NR_listxattr:
11854     case TARGET_NR_llistxattr:
11855     {
11856         void *p, *b = 0;
11857         if (arg2) {
11858             b = lock_user(VERIFY_WRITE, arg2, arg3, 0);
11859             if (!b) {
11860                 return -TARGET_EFAULT;
11861             }
11862         }
11863         p = lock_user_string(arg1);
11864         if (p) {
11865             if (num == TARGET_NR_listxattr) {
11866                 ret = get_errno(listxattr(p, b, arg3));
11867             } else {
11868                 ret = get_errno(llistxattr(p, b, arg3));
11869             }
11870         } else {
11871             ret = -TARGET_EFAULT;
11872         }
11873         unlock_user(p, arg1, 0);
11874         unlock_user(b, arg2, arg3);
11875         return ret;
11876     }
11877     case TARGET_NR_flistxattr:
11878     {
11879         void *b = 0;
11880         if (arg2) {
11881             b = lock_user(VERIFY_WRITE, arg2, arg3, 0);
11882             if (!b) {
11883                 return -TARGET_EFAULT;
11884             }
11885         }
11886         ret = get_errno(flistxattr(arg1, b, arg3));
11887         unlock_user(b, arg2, arg3);
11888         return ret;
11889     }
11890     case TARGET_NR_setxattr:
11891     case TARGET_NR_lsetxattr:
11892         {
11893             void *p, *n, *v = 0;
11894             if (arg3) {
11895                 v = lock_user(VERIFY_READ, arg3, arg4, 1);
11896                 if (!v) {
11897                     return -TARGET_EFAULT;
11898                 }
11899             }
11900             p = lock_user_string(arg1);
11901             n = lock_user_string(arg2);
11902             if (p && n) {
11903                 if (num == TARGET_NR_setxattr) {
11904                     ret = get_errno(setxattr(p, n, v, arg4, arg5));
11905                 } else {
11906                     ret = get_errno(lsetxattr(p, n, v, arg4, arg5));
11907                 }
11908             } else {
11909                 ret = -TARGET_EFAULT;
11910             }
11911             unlock_user(p, arg1, 0);
11912             unlock_user(n, arg2, 0);
11913             unlock_user(v, arg3, 0);
11914         }
11915         return ret;
11916     case TARGET_NR_fsetxattr:
11917         {
11918             void *n, *v = 0;
11919             if (arg3) {
11920                 v = lock_user(VERIFY_READ, arg3, arg4, 1);
11921                 if (!v) {
11922                     return -TARGET_EFAULT;
11923                 }
11924             }
11925             n = lock_user_string(arg2);
11926             if (n) {
11927                 ret = get_errno(fsetxattr(arg1, n, v, arg4, arg5));
11928             } else {
11929                 ret = -TARGET_EFAULT;
11930             }
11931             unlock_user(n, arg2, 0);
11932             unlock_user(v, arg3, 0);
11933         }
11934         return ret;
11935     case TARGET_NR_getxattr:
11936     case TARGET_NR_lgetxattr:
11937         {
11938             void *p, *n, *v = 0;
11939             if (arg3) {
11940                 v = lock_user(VERIFY_WRITE, arg3, arg4, 0);
11941                 if (!v) {
11942                     return -TARGET_EFAULT;
11943                 }
11944             }
11945             p = lock_user_string(arg1);
11946             n = lock_user_string(arg2);
11947             if (p && n) {
11948                 if (num == TARGET_NR_getxattr) {
11949                     ret = get_errno(getxattr(p, n, v, arg4));
11950                 } else {
11951                     ret = get_errno(lgetxattr(p, n, v, arg4));
11952                 }
11953             } else {
11954                 ret = -TARGET_EFAULT;
11955             }
11956             unlock_user(p, arg1, 0);
11957             unlock_user(n, arg2, 0);
11958             unlock_user(v, arg3, arg4);
11959         }
11960         return ret;
11961     case TARGET_NR_fgetxattr:
11962         {
11963             void *n, *v = 0;
11964             if (arg3) {
11965                 v = lock_user(VERIFY_WRITE, arg3, arg4, 0);
11966                 if (!v) {
11967                     return -TARGET_EFAULT;
11968                 }
11969             }
11970             n = lock_user_string(arg2);
11971             if (n) {
11972                 ret = get_errno(fgetxattr(arg1, n, v, arg4));
11973             } else {
11974                 ret = -TARGET_EFAULT;
11975             }
11976             unlock_user(n, arg2, 0);
11977             unlock_user(v, arg3, arg4);
11978         }
11979         return ret;
11980     case TARGET_NR_removexattr:
11981     case TARGET_NR_lremovexattr:
11982         {
11983             void *p, *n;
11984             p = lock_user_string(arg1);
11985             n = lock_user_string(arg2);
11986             if (p && n) {
11987                 if (num == TARGET_NR_removexattr) {
11988                     ret = get_errno(removexattr(p, n));
11989                 } else {
11990                     ret = get_errno(lremovexattr(p, n));
11991                 }
11992             } else {
11993                 ret = -TARGET_EFAULT;
11994             }
11995             unlock_user(p, arg1, 0);
11996             unlock_user(n, arg2, 0);
11997         }
11998         return ret;
11999     case TARGET_NR_fremovexattr:
12000         {
12001             void *n;
12002             n = lock_user_string(arg2);
12003             if (n) {
12004                 ret = get_errno(fremovexattr(arg1, n));
12005             } else {
12006                 ret = -TARGET_EFAULT;
12007             }
12008             unlock_user(n, arg2, 0);
12009         }
12010         return ret;
12011 #endif
12012 #endif /* CONFIG_ATTR */
12013 #ifdef TARGET_NR_set_thread_area
12014     case TARGET_NR_set_thread_area:
12015 #if defined(TARGET_MIPS)
12016       ((CPUMIPSState *) cpu_env)->active_tc.CP0_UserLocal = arg1;
12017       return 0;
12018 #elif defined(TARGET_CRIS)
12019       if (arg1 & 0xff)
12020           ret = -TARGET_EINVAL;
12021       else {
12022           ((CPUCRISState *) cpu_env)->pregs[PR_PID] = arg1;
12023           ret = 0;
12024       }
12025       return ret;
12026 #elif defined(TARGET_I386) && defined(TARGET_ABI32)
12027       return do_set_thread_area(cpu_env, arg1);
12028 #elif defined(TARGET_M68K)
12029       {
12030           TaskState *ts = cpu->opaque;
12031           ts->tp_value = arg1;
12032           return 0;
12033       }
12034 #else
12035       return -TARGET_ENOSYS;
12036 #endif
12037 #endif
12038 #ifdef TARGET_NR_get_thread_area
12039     case TARGET_NR_get_thread_area:
12040 #if defined(TARGET_I386) && defined(TARGET_ABI32)
12041         return do_get_thread_area(cpu_env, arg1);
12042 #elif defined(TARGET_M68K)
12043         {
12044             TaskState *ts = cpu->opaque;
12045             return ts->tp_value;
12046         }
12047 #else
12048         return -TARGET_ENOSYS;
12049 #endif
12050 #endif
12051 #ifdef TARGET_NR_getdomainname
12052     case TARGET_NR_getdomainname:
12053         return -TARGET_ENOSYS;
12054 #endif
12055 
12056 #ifdef TARGET_NR_clock_settime
12057     case TARGET_NR_clock_settime:
12058     {
12059         struct timespec ts;
12060 
12061         ret = target_to_host_timespec(&ts, arg2);
12062         if (!is_error(ret)) {
12063             ret = get_errno(clock_settime(arg1, &ts));
12064         }
12065         return ret;
12066     }
12067 #endif
12068 #ifdef TARGET_NR_clock_settime64
12069     case TARGET_NR_clock_settime64:
12070     {
12071         struct timespec ts;
12072 
12073         ret = target_to_host_timespec64(&ts, arg2);
12074         if (!is_error(ret)) {
12075             ret = get_errno(clock_settime(arg1, &ts));
12076         }
12077         return ret;
12078     }
12079 #endif
12080 #ifdef TARGET_NR_clock_gettime
12081     case TARGET_NR_clock_gettime:
12082     {
12083         struct timespec ts;
12084         ret = get_errno(clock_gettime(arg1, &ts));
12085         if (!is_error(ret)) {
12086             ret = host_to_target_timespec(arg2, &ts);
12087         }
12088         return ret;
12089     }
12090 #endif
12091 #ifdef TARGET_NR_clock_gettime64
12092     case TARGET_NR_clock_gettime64:
12093     {
12094         struct timespec ts;
12095         ret = get_errno(clock_gettime(arg1, &ts));
12096         if (!is_error(ret)) {
12097             ret = host_to_target_timespec64(arg2, &ts);
12098         }
12099         return ret;
12100     }
12101 #endif
12102 #ifdef TARGET_NR_clock_getres
12103     case TARGET_NR_clock_getres:
12104     {
12105         struct timespec ts;
12106         ret = get_errno(clock_getres(arg1, &ts));
12107         if (!is_error(ret)) {
12108             host_to_target_timespec(arg2, &ts);
12109         }
12110         return ret;
12111     }
12112 #endif
12113 #ifdef TARGET_NR_clock_getres_time64
12114     case TARGET_NR_clock_getres_time64:
12115     {
12116         struct timespec ts;
12117         ret = get_errno(clock_getres(arg1, &ts));
12118         if (!is_error(ret)) {
12119             host_to_target_timespec64(arg2, &ts);
12120         }
12121         return ret;
12122     }
12123 #endif
12124 #ifdef TARGET_NR_clock_nanosleep
12125     case TARGET_NR_clock_nanosleep:
12126     {
12127         struct timespec ts;
12128         if (target_to_host_timespec(&ts, arg3)) {
12129             return -TARGET_EFAULT;
12130         }
12131         ret = get_errno(safe_clock_nanosleep(arg1, arg2,
12132                                              &ts, arg4 ? &ts : NULL));
12133         /*
12134          * if the call is interrupted by a signal handler, it fails
12135          * with error -TARGET_EINTR and if arg4 is not NULL and arg2 is not
12136          * TIMER_ABSTIME, it returns the remaining unslept time in arg4.
12137          */
12138         if (ret == -TARGET_EINTR && arg4 && arg2 != TIMER_ABSTIME &&
12139             host_to_target_timespec(arg4, &ts)) {
12140               return -TARGET_EFAULT;
12141         }
12142 
12143         return ret;
12144     }
12145 #endif
12146 #ifdef TARGET_NR_clock_nanosleep_time64
12147     case TARGET_NR_clock_nanosleep_time64:
12148     {
12149         struct timespec ts;
12150 
12151         if (target_to_host_timespec64(&ts, arg3)) {
12152             return -TARGET_EFAULT;
12153         }
12154 
12155         ret = get_errno(safe_clock_nanosleep(arg1, arg2,
12156                                              &ts, arg4 ? &ts : NULL));
12157 
12158         if (ret == -TARGET_EINTR && arg4 && arg2 != TIMER_ABSTIME &&
12159             host_to_target_timespec64(arg4, &ts)) {
12160             return -TARGET_EFAULT;
12161         }
12162         return ret;
12163     }
12164 #endif
12165 
12166 #if defined(TARGET_NR_set_tid_address) && defined(__NR_set_tid_address)
12167     case TARGET_NR_set_tid_address:
12168         return get_errno(set_tid_address((int *)g2h(cpu, arg1)));
12169 #endif
12170 
12171     case TARGET_NR_tkill:
12172         return get_errno(safe_tkill((int)arg1, target_to_host_signal(arg2)));
12173 
12174     case TARGET_NR_tgkill:
12175         return get_errno(safe_tgkill((int)arg1, (int)arg2,
12176                          target_to_host_signal(arg3)));
12177 
12178 #ifdef TARGET_NR_set_robust_list
12179     case TARGET_NR_set_robust_list:
12180     case TARGET_NR_get_robust_list:
12181         /* The ABI for supporting robust futexes has userspace pass
12182          * the kernel a pointer to a linked list which is updated by
12183          * userspace after the syscall; the list is walked by the kernel
12184          * when the thread exits. Since the linked list in QEMU guest
12185          * memory isn't a valid linked list for the host and we have
12186          * no way to reliably intercept the thread-death event, we can't
12187          * support these. Silently return ENOSYS so that guest userspace
12188          * falls back to a non-robust futex implementation (which should
12189          * be OK except in the corner case of the guest crashing while
12190          * holding a mutex that is shared with another process via
12191          * shared memory).
12192          */
12193         return -TARGET_ENOSYS;
12194 #endif
12195 
12196 #if defined(TARGET_NR_utimensat)
12197     case TARGET_NR_utimensat:
12198         {
12199             struct timespec *tsp, ts[2];
12200             if (!arg3) {
12201                 tsp = NULL;
12202             } else {
12203                 if (target_to_host_timespec(ts, arg3)) {
12204                     return -TARGET_EFAULT;
12205                 }
12206                 if (target_to_host_timespec(ts + 1, arg3 +
12207                                             sizeof(struct target_timespec))) {
12208                     return -TARGET_EFAULT;
12209                 }
12210                 tsp = ts;
12211             }
12212             if (!arg2)
12213                 ret = get_errno(sys_utimensat(arg1, NULL, tsp, arg4));
12214             else {
12215                 if (!(p = lock_user_string(arg2))) {
12216                     return -TARGET_EFAULT;
12217                 }
12218                 ret = get_errno(sys_utimensat(arg1, path(p), tsp, arg4));
12219                 unlock_user(p, arg2, 0);
12220             }
12221         }
12222         return ret;
12223 #endif
12224 #ifdef TARGET_NR_utimensat_time64
12225     case TARGET_NR_utimensat_time64:
12226         {
12227             struct timespec *tsp, ts[2];
12228             if (!arg3) {
12229                 tsp = NULL;
12230             } else {
12231                 if (target_to_host_timespec64(ts, arg3)) {
12232                     return -TARGET_EFAULT;
12233                 }
12234                 if (target_to_host_timespec64(ts + 1, arg3 +
12235                                      sizeof(struct target__kernel_timespec))) {
12236                     return -TARGET_EFAULT;
12237                 }
12238                 tsp = ts;
12239             }
12240             if (!arg2)
12241                 ret = get_errno(sys_utimensat(arg1, NULL, tsp, arg4));
12242             else {
12243                 p = lock_user_string(arg2);
12244                 if (!p) {
12245                     return -TARGET_EFAULT;
12246                 }
12247                 ret = get_errno(sys_utimensat(arg1, path(p), tsp, arg4));
12248                 unlock_user(p, arg2, 0);
12249             }
12250         }
12251         return ret;
12252 #endif
12253 #ifdef TARGET_NR_futex
12254     case TARGET_NR_futex:
12255         return do_futex(cpu, arg1, arg2, arg3, arg4, arg5, arg6);
12256 #endif
12257 #ifdef TARGET_NR_futex_time64
12258     case TARGET_NR_futex_time64:
12259         return do_futex_time64(cpu, arg1, arg2, arg3, arg4, arg5, arg6);
12260 #endif
12261 #if defined(TARGET_NR_inotify_init) && defined(__NR_inotify_init)
12262     case TARGET_NR_inotify_init:
12263         ret = get_errno(sys_inotify_init());
12264         if (ret >= 0) {
12265             fd_trans_register(ret, &target_inotify_trans);
12266         }
12267         return ret;
12268 #endif
12269 #ifdef CONFIG_INOTIFY1
12270 #if defined(TARGET_NR_inotify_init1) && defined(__NR_inotify_init1)
12271     case TARGET_NR_inotify_init1:
12272         ret = get_errno(sys_inotify_init1(target_to_host_bitmask(arg1,
12273                                           fcntl_flags_tbl)));
12274         if (ret >= 0) {
12275             fd_trans_register(ret, &target_inotify_trans);
12276         }
12277         return ret;
12278 #endif
12279 #endif
12280 #if defined(TARGET_NR_inotify_add_watch) && defined(__NR_inotify_add_watch)
12281     case TARGET_NR_inotify_add_watch:
12282         p = lock_user_string(arg2);
12283         ret = get_errno(sys_inotify_add_watch(arg1, path(p), arg3));
12284         unlock_user(p, arg2, 0);
12285         return ret;
12286 #endif
12287 #if defined(TARGET_NR_inotify_rm_watch) && defined(__NR_inotify_rm_watch)
12288     case TARGET_NR_inotify_rm_watch:
12289         return get_errno(sys_inotify_rm_watch(arg1, arg2));
12290 #endif
12291 
12292 #if defined(TARGET_NR_mq_open) && defined(__NR_mq_open)
12293     case TARGET_NR_mq_open:
12294         {
12295             struct mq_attr posix_mq_attr;
12296             struct mq_attr *pposix_mq_attr;
12297             int host_flags;
12298 
12299             host_flags = target_to_host_bitmask(arg2, fcntl_flags_tbl);
12300             pposix_mq_attr = NULL;
12301             if (arg4) {
12302                 if (copy_from_user_mq_attr(&posix_mq_attr, arg4) != 0) {
12303                     return -TARGET_EFAULT;
12304                 }
12305                 pposix_mq_attr = &posix_mq_attr;
12306             }
12307             p = lock_user_string(arg1 - 1);
12308             if (!p) {
12309                 return -TARGET_EFAULT;
12310             }
12311             ret = get_errno(mq_open(p, host_flags, arg3, pposix_mq_attr));
12312             unlock_user (p, arg1, 0);
12313         }
12314         return ret;
12315 
12316     case TARGET_NR_mq_unlink:
12317         p = lock_user_string(arg1 - 1);
12318         if (!p) {
12319             return -TARGET_EFAULT;
12320         }
12321         ret = get_errno(mq_unlink(p));
12322         unlock_user (p, arg1, 0);
12323         return ret;
12324 
12325 #ifdef TARGET_NR_mq_timedsend
12326     case TARGET_NR_mq_timedsend:
12327         {
12328             struct timespec ts;
12329 
12330             p = lock_user (VERIFY_READ, arg2, arg3, 1);
12331             if (arg5 != 0) {
12332                 if (target_to_host_timespec(&ts, arg5)) {
12333                     return -TARGET_EFAULT;
12334                 }
12335                 ret = get_errno(safe_mq_timedsend(arg1, p, arg3, arg4, &ts));
12336                 if (!is_error(ret) && host_to_target_timespec(arg5, &ts)) {
12337                     return -TARGET_EFAULT;
12338                 }
12339             } else {
12340                 ret = get_errno(safe_mq_timedsend(arg1, p, arg3, arg4, NULL));
12341             }
12342             unlock_user (p, arg2, arg3);
12343         }
12344         return ret;
12345 #endif
12346 #ifdef TARGET_NR_mq_timedsend_time64
12347     case TARGET_NR_mq_timedsend_time64:
12348         {
12349             struct timespec ts;
12350 
12351             p = lock_user(VERIFY_READ, arg2, arg3, 1);
12352             if (arg5 != 0) {
12353                 if (target_to_host_timespec64(&ts, arg5)) {
12354                     return -TARGET_EFAULT;
12355                 }
12356                 ret = get_errno(safe_mq_timedsend(arg1, p, arg3, arg4, &ts));
12357                 if (!is_error(ret) && host_to_target_timespec64(arg5, &ts)) {
12358                     return -TARGET_EFAULT;
12359                 }
12360             } else {
12361                 ret = get_errno(safe_mq_timedsend(arg1, p, arg3, arg4, NULL));
12362             }
12363             unlock_user(p, arg2, arg3);
12364         }
12365         return ret;
12366 #endif
12367 
12368 #ifdef TARGET_NR_mq_timedreceive
12369     case TARGET_NR_mq_timedreceive:
12370         {
12371             struct timespec ts;
12372             unsigned int prio;
12373 
12374             p = lock_user (VERIFY_READ, arg2, arg3, 1);
12375             if (arg5 != 0) {
12376                 if (target_to_host_timespec(&ts, arg5)) {
12377                     return -TARGET_EFAULT;
12378                 }
12379                 ret = get_errno(safe_mq_timedreceive(arg1, p, arg3,
12380                                                      &prio, &ts));
12381                 if (!is_error(ret) && host_to_target_timespec(arg5, &ts)) {
12382                     return -TARGET_EFAULT;
12383                 }
12384             } else {
12385                 ret = get_errno(safe_mq_timedreceive(arg1, p, arg3,
12386                                                      &prio, NULL));
12387             }
12388             unlock_user (p, arg2, arg3);
12389             if (arg4 != 0)
12390                 put_user_u32(prio, arg4);
12391         }
12392         return ret;
12393 #endif
12394 #ifdef TARGET_NR_mq_timedreceive_time64
12395     case TARGET_NR_mq_timedreceive_time64:
12396         {
12397             struct timespec ts;
12398             unsigned int prio;
12399 
12400             p = lock_user(VERIFY_READ, arg2, arg3, 1);
12401             if (arg5 != 0) {
12402                 if (target_to_host_timespec64(&ts, arg5)) {
12403                     return -TARGET_EFAULT;
12404                 }
12405                 ret = get_errno(safe_mq_timedreceive(arg1, p, arg3,
12406                                                      &prio, &ts));
12407                 if (!is_error(ret) && host_to_target_timespec64(arg5, &ts)) {
12408                     return -TARGET_EFAULT;
12409                 }
12410             } else {
12411                 ret = get_errno(safe_mq_timedreceive(arg1, p, arg3,
12412                                                      &prio, NULL));
12413             }
12414             unlock_user(p, arg2, arg3);
12415             if (arg4 != 0) {
12416                 put_user_u32(prio, arg4);
12417             }
12418         }
12419         return ret;
12420 #endif
12421 
12422     /* Not implemented for now... */
12423 /*     case TARGET_NR_mq_notify: */
12424 /*         break; */
12425 
12426     case TARGET_NR_mq_getsetattr:
12427         {
12428             struct mq_attr posix_mq_attr_in, posix_mq_attr_out;
12429             ret = 0;
12430             if (arg2 != 0) {
12431                 copy_from_user_mq_attr(&posix_mq_attr_in, arg2);
12432                 ret = get_errno(mq_setattr(arg1, &posix_mq_attr_in,
12433                                            &posix_mq_attr_out));
12434             } else if (arg3 != 0) {
12435                 ret = get_errno(mq_getattr(arg1, &posix_mq_attr_out));
12436             }
12437             if (ret == 0 && arg3 != 0) {
12438                 copy_to_user_mq_attr(arg3, &posix_mq_attr_out);
12439             }
12440         }
12441         return ret;
12442 #endif
12443 
12444 #ifdef CONFIG_SPLICE
12445 #ifdef TARGET_NR_tee
12446     case TARGET_NR_tee:
12447         {
12448             ret = get_errno(tee(arg1,arg2,arg3,arg4));
12449         }
12450         return ret;
12451 #endif
12452 #ifdef TARGET_NR_splice
12453     case TARGET_NR_splice:
12454         {
12455             loff_t loff_in, loff_out;
12456             loff_t *ploff_in = NULL, *ploff_out = NULL;
12457             if (arg2) {
12458                 if (get_user_u64(loff_in, arg2)) {
12459                     return -TARGET_EFAULT;
12460                 }
12461                 ploff_in = &loff_in;
12462             }
12463             if (arg4) {
12464                 if (get_user_u64(loff_out, arg4)) {
12465                     return -TARGET_EFAULT;
12466                 }
12467                 ploff_out = &loff_out;
12468             }
12469             ret = get_errno(splice(arg1, ploff_in, arg3, ploff_out, arg5, arg6));
12470             if (arg2) {
12471                 if (put_user_u64(loff_in, arg2)) {
12472                     return -TARGET_EFAULT;
12473                 }
12474             }
12475             if (arg4) {
12476                 if (put_user_u64(loff_out, arg4)) {
12477                     return -TARGET_EFAULT;
12478                 }
12479             }
12480         }
12481         return ret;
12482 #endif
12483 #ifdef TARGET_NR_vmsplice
12484 	case TARGET_NR_vmsplice:
12485         {
12486             struct iovec *vec = lock_iovec(VERIFY_READ, arg2, arg3, 1);
12487             if (vec != NULL) {
12488                 ret = get_errno(vmsplice(arg1, vec, arg3, arg4));
12489                 unlock_iovec(vec, arg2, arg3, 0);
12490             } else {
12491                 ret = -host_to_target_errno(errno);
12492             }
12493         }
12494         return ret;
12495 #endif
12496 #endif /* CONFIG_SPLICE */
12497 #ifdef CONFIG_EVENTFD
12498 #if defined(TARGET_NR_eventfd)
12499     case TARGET_NR_eventfd:
12500         ret = get_errno(eventfd(arg1, 0));
12501         if (ret >= 0) {
12502             fd_trans_register(ret, &target_eventfd_trans);
12503         }
12504         return ret;
12505 #endif
12506 #if defined(TARGET_NR_eventfd2)
12507     case TARGET_NR_eventfd2:
12508     {
12509         int host_flags = arg2 & (~(TARGET_O_NONBLOCK_MASK | TARGET_O_CLOEXEC));
12510         if (arg2 & TARGET_O_NONBLOCK) {
12511             host_flags |= O_NONBLOCK;
12512         }
12513         if (arg2 & TARGET_O_CLOEXEC) {
12514             host_flags |= O_CLOEXEC;
12515         }
12516         ret = get_errno(eventfd(arg1, host_flags));
12517         if (ret >= 0) {
12518             fd_trans_register(ret, &target_eventfd_trans);
12519         }
12520         return ret;
12521     }
12522 #endif
12523 #endif /* CONFIG_EVENTFD  */
12524 #if defined(CONFIG_FALLOCATE) && defined(TARGET_NR_fallocate)
12525     case TARGET_NR_fallocate:
12526 #if TARGET_ABI_BITS == 32
12527         ret = get_errno(fallocate(arg1, arg2, target_offset64(arg3, arg4),
12528                                   target_offset64(arg5, arg6)));
12529 #else
12530         ret = get_errno(fallocate(arg1, arg2, arg3, arg4));
12531 #endif
12532         return ret;
12533 #endif
12534 #if defined(CONFIG_SYNC_FILE_RANGE)
12535 #if defined(TARGET_NR_sync_file_range)
12536     case TARGET_NR_sync_file_range:
12537 #if TARGET_ABI_BITS == 32
12538 #if defined(TARGET_MIPS)
12539         ret = get_errno(sync_file_range(arg1, target_offset64(arg3, arg4),
12540                                         target_offset64(arg5, arg6), arg7));
12541 #else
12542         ret = get_errno(sync_file_range(arg1, target_offset64(arg2, arg3),
12543                                         target_offset64(arg4, arg5), arg6));
12544 #endif /* !TARGET_MIPS */
12545 #else
12546         ret = get_errno(sync_file_range(arg1, arg2, arg3, arg4));
12547 #endif
12548         return ret;
12549 #endif
12550 #if defined(TARGET_NR_sync_file_range2) || \
12551     defined(TARGET_NR_arm_sync_file_range)
12552 #if defined(TARGET_NR_sync_file_range2)
12553     case TARGET_NR_sync_file_range2:
12554 #endif
12555 #if defined(TARGET_NR_arm_sync_file_range)
12556     case TARGET_NR_arm_sync_file_range:
12557 #endif
12558         /* This is like sync_file_range but the arguments are reordered */
12559 #if TARGET_ABI_BITS == 32
12560         ret = get_errno(sync_file_range(arg1, target_offset64(arg3, arg4),
12561                                         target_offset64(arg5, arg6), arg2));
12562 #else
12563         ret = get_errno(sync_file_range(arg1, arg3, arg4, arg2));
12564 #endif
12565         return ret;
12566 #endif
12567 #endif
12568 #if defined(TARGET_NR_signalfd4)
12569     case TARGET_NR_signalfd4:
12570         return do_signalfd4(arg1, arg2, arg4);
12571 #endif
12572 #if defined(TARGET_NR_signalfd)
12573     case TARGET_NR_signalfd:
12574         return do_signalfd4(arg1, arg2, 0);
12575 #endif
12576 #if defined(CONFIG_EPOLL)
12577 #if defined(TARGET_NR_epoll_create)
12578     case TARGET_NR_epoll_create:
12579         return get_errno(epoll_create(arg1));
12580 #endif
12581 #if defined(TARGET_NR_epoll_create1) && defined(CONFIG_EPOLL_CREATE1)
12582     case TARGET_NR_epoll_create1:
12583         return get_errno(epoll_create1(target_to_host_bitmask(arg1, fcntl_flags_tbl)));
12584 #endif
12585 #if defined(TARGET_NR_epoll_ctl)
12586     case TARGET_NR_epoll_ctl:
12587     {
12588         struct epoll_event ep;
12589         struct epoll_event *epp = 0;
12590         if (arg4) {
12591             if (arg2 != EPOLL_CTL_DEL) {
12592                 struct target_epoll_event *target_ep;
12593                 if (!lock_user_struct(VERIFY_READ, target_ep, arg4, 1)) {
12594                     return -TARGET_EFAULT;
12595                 }
12596                 ep.events = tswap32(target_ep->events);
12597                 /*
12598                  * The epoll_data_t union is just opaque data to the kernel,
12599                  * so we transfer all 64 bits across and need not worry what
12600                  * actual data type it is.
12601                  */
12602                 ep.data.u64 = tswap64(target_ep->data.u64);
12603                 unlock_user_struct(target_ep, arg4, 0);
12604             }
12605             /*
12606              * before kernel 2.6.9, EPOLL_CTL_DEL operation required a
12607              * non-null pointer, even though this argument is ignored.
12608              *
12609              */
12610             epp = &ep;
12611         }
12612         return get_errno(epoll_ctl(arg1, arg2, arg3, epp));
12613     }
12614 #endif
12615 
12616 #if defined(TARGET_NR_epoll_wait) || defined(TARGET_NR_epoll_pwait)
12617 #if defined(TARGET_NR_epoll_wait)
12618     case TARGET_NR_epoll_wait:
12619 #endif
12620 #if defined(TARGET_NR_epoll_pwait)
12621     case TARGET_NR_epoll_pwait:
12622 #endif
12623     {
12624         struct target_epoll_event *target_ep;
12625         struct epoll_event *ep;
12626         int epfd = arg1;
12627         int maxevents = arg3;
12628         int timeout = arg4;
12629 
12630         if (maxevents <= 0 || maxevents > TARGET_EP_MAX_EVENTS) {
12631             return -TARGET_EINVAL;
12632         }
12633 
12634         target_ep = lock_user(VERIFY_WRITE, arg2,
12635                               maxevents * sizeof(struct target_epoll_event), 1);
12636         if (!target_ep) {
12637             return -TARGET_EFAULT;
12638         }
12639 
12640         ep = g_try_new(struct epoll_event, maxevents);
12641         if (!ep) {
12642             unlock_user(target_ep, arg2, 0);
12643             return -TARGET_ENOMEM;
12644         }
12645 
12646         switch (num) {
12647 #if defined(TARGET_NR_epoll_pwait)
12648         case TARGET_NR_epoll_pwait:
12649         {
12650             target_sigset_t *target_set;
12651             sigset_t _set, *set = &_set;
12652 
12653             if (arg5) {
12654                 if (arg6 != sizeof(target_sigset_t)) {
12655                     ret = -TARGET_EINVAL;
12656                     break;
12657                 }
12658 
12659                 target_set = lock_user(VERIFY_READ, arg5,
12660                                        sizeof(target_sigset_t), 1);
12661                 if (!target_set) {
12662                     ret = -TARGET_EFAULT;
12663                     break;
12664                 }
12665                 target_to_host_sigset(set, target_set);
12666                 unlock_user(target_set, arg5, 0);
12667             } else {
12668                 set = NULL;
12669             }
12670 
12671             ret = get_errno(safe_epoll_pwait(epfd, ep, maxevents, timeout,
12672                                              set, SIGSET_T_SIZE));
12673             break;
12674         }
12675 #endif
12676 #if defined(TARGET_NR_epoll_wait)
12677         case TARGET_NR_epoll_wait:
12678             ret = get_errno(safe_epoll_pwait(epfd, ep, maxevents, timeout,
12679                                              NULL, 0));
12680             break;
12681 #endif
12682         default:
12683             ret = -TARGET_ENOSYS;
12684         }
12685         if (!is_error(ret)) {
12686             int i;
12687             for (i = 0; i < ret; i++) {
12688                 target_ep[i].events = tswap32(ep[i].events);
12689                 target_ep[i].data.u64 = tswap64(ep[i].data.u64);
12690             }
12691             unlock_user(target_ep, arg2,
12692                         ret * sizeof(struct target_epoll_event));
12693         } else {
12694             unlock_user(target_ep, arg2, 0);
12695         }
12696         g_free(ep);
12697         return ret;
12698     }
12699 #endif
12700 #endif
12701 #ifdef TARGET_NR_prlimit64
12702     case TARGET_NR_prlimit64:
12703     {
12704         /* args: pid, resource number, ptr to new rlimit, ptr to old rlimit */
12705         struct target_rlimit64 *target_rnew, *target_rold;
12706         struct host_rlimit64 rnew, rold, *rnewp = 0;
12707         int resource = target_to_host_resource(arg2);
12708 
12709         if (arg3 && (resource != RLIMIT_AS &&
12710                      resource != RLIMIT_DATA &&
12711                      resource != RLIMIT_STACK)) {
12712             if (!lock_user_struct(VERIFY_READ, target_rnew, arg3, 1)) {
12713                 return -TARGET_EFAULT;
12714             }
12715             rnew.rlim_cur = tswap64(target_rnew->rlim_cur);
12716             rnew.rlim_max = tswap64(target_rnew->rlim_max);
12717             unlock_user_struct(target_rnew, arg3, 0);
12718             rnewp = &rnew;
12719         }
12720 
12721         ret = get_errno(sys_prlimit64(arg1, resource, rnewp, arg4 ? &rold : 0));
12722         if (!is_error(ret) && arg4) {
12723             if (!lock_user_struct(VERIFY_WRITE, target_rold, arg4, 1)) {
12724                 return -TARGET_EFAULT;
12725             }
12726             target_rold->rlim_cur = tswap64(rold.rlim_cur);
12727             target_rold->rlim_max = tswap64(rold.rlim_max);
12728             unlock_user_struct(target_rold, arg4, 1);
12729         }
12730         return ret;
12731     }
12732 #endif
12733 #ifdef TARGET_NR_gethostname
12734     case TARGET_NR_gethostname:
12735     {
12736         char *name = lock_user(VERIFY_WRITE, arg1, arg2, 0);
12737         if (name) {
12738             ret = get_errno(gethostname(name, arg2));
12739             unlock_user(name, arg1, arg2);
12740         } else {
12741             ret = -TARGET_EFAULT;
12742         }
12743         return ret;
12744     }
12745 #endif
12746 #ifdef TARGET_NR_atomic_cmpxchg_32
12747     case TARGET_NR_atomic_cmpxchg_32:
12748     {
12749         /* should use start_exclusive from main.c */
12750         abi_ulong mem_value;
12751         if (get_user_u32(mem_value, arg6)) {
12752             target_siginfo_t info;
12753             info.si_signo = SIGSEGV;
12754             info.si_errno = 0;
12755             info.si_code = TARGET_SEGV_MAPERR;
12756             info._sifields._sigfault._addr = arg6;
12757             queue_signal((CPUArchState *)cpu_env, info.si_signo,
12758                          QEMU_SI_FAULT, &info);
12759             ret = 0xdeadbeef;
12760 
12761         }
12762         if (mem_value == arg2)
12763             put_user_u32(arg1, arg6);
12764         return mem_value;
12765     }
12766 #endif
12767 #ifdef TARGET_NR_atomic_barrier
12768     case TARGET_NR_atomic_barrier:
12769         /* Like the kernel implementation and the
12770            qemu arm barrier, no-op this? */
12771         return 0;
12772 #endif
12773 
12774 #ifdef TARGET_NR_timer_create
12775     case TARGET_NR_timer_create:
12776     {
12777         /* args: clockid_t clockid, struct sigevent *sevp, timer_t *timerid */
12778 
12779         struct sigevent host_sevp = { {0}, }, *phost_sevp = NULL;
12780 
12781         int clkid = arg1;
12782         int timer_index = next_free_host_timer();
12783 
12784         if (timer_index < 0) {
12785             ret = -TARGET_EAGAIN;
12786         } else {
12787             timer_t *phtimer = g_posix_timers  + timer_index;
12788 
12789             if (arg2) {
12790                 phost_sevp = &host_sevp;
12791                 ret = target_to_host_sigevent(phost_sevp, arg2);
12792                 if (ret != 0) {
12793                     return ret;
12794                 }
12795             }
12796 
12797             ret = get_errno(timer_create(clkid, phost_sevp, phtimer));
12798             if (ret) {
12799                 phtimer = NULL;
12800             } else {
12801                 if (put_user(TIMER_MAGIC | timer_index, arg3, target_timer_t)) {
12802                     return -TARGET_EFAULT;
12803                 }
12804             }
12805         }
12806         return ret;
12807     }
12808 #endif
12809 
12810 #ifdef TARGET_NR_timer_settime
12811     case TARGET_NR_timer_settime:
12812     {
12813         /* args: timer_t timerid, int flags, const struct itimerspec *new_value,
12814          * struct itimerspec * old_value */
12815         target_timer_t timerid = get_timer_id(arg1);
12816 
12817         if (timerid < 0) {
12818             ret = timerid;
12819         } else if (arg3 == 0) {
12820             ret = -TARGET_EINVAL;
12821         } else {
12822             timer_t htimer = g_posix_timers[timerid];
12823             struct itimerspec hspec_new = {{0},}, hspec_old = {{0},};
12824 
12825             if (target_to_host_itimerspec(&hspec_new, arg3)) {
12826                 return -TARGET_EFAULT;
12827             }
12828             ret = get_errno(
12829                           timer_settime(htimer, arg2, &hspec_new, &hspec_old));
12830             if (arg4 && host_to_target_itimerspec(arg4, &hspec_old)) {
12831                 return -TARGET_EFAULT;
12832             }
12833         }
12834         return ret;
12835     }
12836 #endif
12837 
12838 #ifdef TARGET_NR_timer_settime64
12839     case TARGET_NR_timer_settime64:
12840     {
12841         target_timer_t timerid = get_timer_id(arg1);
12842 
12843         if (timerid < 0) {
12844             ret = timerid;
12845         } else if (arg3 == 0) {
12846             ret = -TARGET_EINVAL;
12847         } else {
12848             timer_t htimer = g_posix_timers[timerid];
12849             struct itimerspec hspec_new = {{0},}, hspec_old = {{0},};
12850 
12851             if (target_to_host_itimerspec64(&hspec_new, arg3)) {
12852                 return -TARGET_EFAULT;
12853             }
12854             ret = get_errno(
12855                           timer_settime(htimer, arg2, &hspec_new, &hspec_old));
12856             if (arg4 && host_to_target_itimerspec64(arg4, &hspec_old)) {
12857                 return -TARGET_EFAULT;
12858             }
12859         }
12860         return ret;
12861     }
12862 #endif
12863 
12864 #ifdef TARGET_NR_timer_gettime
12865     case TARGET_NR_timer_gettime:
12866     {
12867         /* args: timer_t timerid, struct itimerspec *curr_value */
12868         target_timer_t timerid = get_timer_id(arg1);
12869 
12870         if (timerid < 0) {
12871             ret = timerid;
12872         } else if (!arg2) {
12873             ret = -TARGET_EFAULT;
12874         } else {
12875             timer_t htimer = g_posix_timers[timerid];
12876             struct itimerspec hspec;
12877             ret = get_errno(timer_gettime(htimer, &hspec));
12878 
12879             if (host_to_target_itimerspec(arg2, &hspec)) {
12880                 ret = -TARGET_EFAULT;
12881             }
12882         }
12883         return ret;
12884     }
12885 #endif
12886 
12887 #ifdef TARGET_NR_timer_gettime64
12888     case TARGET_NR_timer_gettime64:
12889     {
12890         /* args: timer_t timerid, struct itimerspec64 *curr_value */
12891         target_timer_t timerid = get_timer_id(arg1);
12892 
12893         if (timerid < 0) {
12894             ret = timerid;
12895         } else if (!arg2) {
12896             ret = -TARGET_EFAULT;
12897         } else {
12898             timer_t htimer = g_posix_timers[timerid];
12899             struct itimerspec hspec;
12900             ret = get_errno(timer_gettime(htimer, &hspec));
12901 
12902             if (host_to_target_itimerspec64(arg2, &hspec)) {
12903                 ret = -TARGET_EFAULT;
12904             }
12905         }
12906         return ret;
12907     }
12908 #endif
12909 
12910 #ifdef TARGET_NR_timer_getoverrun
12911     case TARGET_NR_timer_getoverrun:
12912     {
12913         /* args: timer_t timerid */
12914         target_timer_t timerid = get_timer_id(arg1);
12915 
12916         if (timerid < 0) {
12917             ret = timerid;
12918         } else {
12919             timer_t htimer = g_posix_timers[timerid];
12920             ret = get_errno(timer_getoverrun(htimer));
12921         }
12922         return ret;
12923     }
12924 #endif
12925 
12926 #ifdef TARGET_NR_timer_delete
12927     case TARGET_NR_timer_delete:
12928     {
12929         /* args: timer_t timerid */
12930         target_timer_t timerid = get_timer_id(arg1);
12931 
12932         if (timerid < 0) {
12933             ret = timerid;
12934         } else {
12935             timer_t htimer = g_posix_timers[timerid];
12936             ret = get_errno(timer_delete(htimer));
12937             g_posix_timers[timerid] = 0;
12938         }
12939         return ret;
12940     }
12941 #endif
12942 
12943 #if defined(TARGET_NR_timerfd_create) && defined(CONFIG_TIMERFD)
12944     case TARGET_NR_timerfd_create:
12945         return get_errno(timerfd_create(arg1,
12946                           target_to_host_bitmask(arg2, fcntl_flags_tbl)));
12947 #endif
12948 
12949 #if defined(TARGET_NR_timerfd_gettime) && defined(CONFIG_TIMERFD)
12950     case TARGET_NR_timerfd_gettime:
12951         {
12952             struct itimerspec its_curr;
12953 
12954             ret = get_errno(timerfd_gettime(arg1, &its_curr));
12955 
12956             if (arg2 && host_to_target_itimerspec(arg2, &its_curr)) {
12957                 return -TARGET_EFAULT;
12958             }
12959         }
12960         return ret;
12961 #endif
12962 
12963 #if defined(TARGET_NR_timerfd_gettime64) && defined(CONFIG_TIMERFD)
12964     case TARGET_NR_timerfd_gettime64:
12965         {
12966             struct itimerspec its_curr;
12967 
12968             ret = get_errno(timerfd_gettime(arg1, &its_curr));
12969 
12970             if (arg2 && host_to_target_itimerspec64(arg2, &its_curr)) {
12971                 return -TARGET_EFAULT;
12972             }
12973         }
12974         return ret;
12975 #endif
12976 
12977 #if defined(TARGET_NR_timerfd_settime) && defined(CONFIG_TIMERFD)
12978     case TARGET_NR_timerfd_settime:
12979         {
12980             struct itimerspec its_new, its_old, *p_new;
12981 
12982             if (arg3) {
12983                 if (target_to_host_itimerspec(&its_new, arg3)) {
12984                     return -TARGET_EFAULT;
12985                 }
12986                 p_new = &its_new;
12987             } else {
12988                 p_new = NULL;
12989             }
12990 
12991             ret = get_errno(timerfd_settime(arg1, arg2, p_new, &its_old));
12992 
12993             if (arg4 && host_to_target_itimerspec(arg4, &its_old)) {
12994                 return -TARGET_EFAULT;
12995             }
12996         }
12997         return ret;
12998 #endif
12999 
13000 #if defined(TARGET_NR_timerfd_settime64) && defined(CONFIG_TIMERFD)
13001     case TARGET_NR_timerfd_settime64:
13002         {
13003             struct itimerspec its_new, its_old, *p_new;
13004 
13005             if (arg3) {
13006                 if (target_to_host_itimerspec64(&its_new, arg3)) {
13007                     return -TARGET_EFAULT;
13008                 }
13009                 p_new = &its_new;
13010             } else {
13011                 p_new = NULL;
13012             }
13013 
13014             ret = get_errno(timerfd_settime(arg1, arg2, p_new, &its_old));
13015 
13016             if (arg4 && host_to_target_itimerspec64(arg4, &its_old)) {
13017                 return -TARGET_EFAULT;
13018             }
13019         }
13020         return ret;
13021 #endif
13022 
13023 #if defined(TARGET_NR_ioprio_get) && defined(__NR_ioprio_get)
13024     case TARGET_NR_ioprio_get:
13025         return get_errno(ioprio_get(arg1, arg2));
13026 #endif
13027 
13028 #if defined(TARGET_NR_ioprio_set) && defined(__NR_ioprio_set)
13029     case TARGET_NR_ioprio_set:
13030         return get_errno(ioprio_set(arg1, arg2, arg3));
13031 #endif
13032 
13033 #if defined(TARGET_NR_setns) && defined(CONFIG_SETNS)
13034     case TARGET_NR_setns:
13035         return get_errno(setns(arg1, arg2));
13036 #endif
13037 #if defined(TARGET_NR_unshare) && defined(CONFIG_SETNS)
13038     case TARGET_NR_unshare:
13039         return get_errno(unshare(arg1));
13040 #endif
13041 #if defined(TARGET_NR_kcmp) && defined(__NR_kcmp)
13042     case TARGET_NR_kcmp:
13043         return get_errno(kcmp(arg1, arg2, arg3, arg4, arg5));
13044 #endif
13045 #ifdef TARGET_NR_swapcontext
13046     case TARGET_NR_swapcontext:
13047         /* PowerPC specific.  */
13048         return do_swapcontext(cpu_env, arg1, arg2, arg3);
13049 #endif
13050 #ifdef TARGET_NR_memfd_create
13051     case TARGET_NR_memfd_create:
13052         p = lock_user_string(arg1);
13053         if (!p) {
13054             return -TARGET_EFAULT;
13055         }
13056         ret = get_errno(memfd_create(p, arg2));
13057         fd_trans_unregister(ret);
13058         unlock_user(p, arg1, 0);
13059         return ret;
13060 #endif
13061 #if defined TARGET_NR_membarrier && defined __NR_membarrier
13062     case TARGET_NR_membarrier:
13063         return get_errno(membarrier(arg1, arg2));
13064 #endif
13065 
13066 #if defined(TARGET_NR_copy_file_range) && defined(__NR_copy_file_range)
13067     case TARGET_NR_copy_file_range:
13068         {
13069             loff_t inoff, outoff;
13070             loff_t *pinoff = NULL, *poutoff = NULL;
13071 
13072             if (arg2) {
13073                 if (get_user_u64(inoff, arg2)) {
13074                     return -TARGET_EFAULT;
13075                 }
13076                 pinoff = &inoff;
13077             }
13078             if (arg4) {
13079                 if (get_user_u64(outoff, arg4)) {
13080                     return -TARGET_EFAULT;
13081                 }
13082                 poutoff = &outoff;
13083             }
13084             /* Do not sign-extend the count parameter. */
13085             ret = get_errno(safe_copy_file_range(arg1, pinoff, arg3, poutoff,
13086                                                  (abi_ulong)arg5, arg6));
13087             if (!is_error(ret) && ret > 0) {
13088                 if (arg2) {
13089                     if (put_user_u64(inoff, arg2)) {
13090                         return -TARGET_EFAULT;
13091                     }
13092                 }
13093                 if (arg4) {
13094                     if (put_user_u64(outoff, arg4)) {
13095                         return -TARGET_EFAULT;
13096                     }
13097                 }
13098             }
13099         }
13100         return ret;
13101 #endif
13102 
13103 #if defined(TARGET_NR_pivot_root)
13104     case TARGET_NR_pivot_root:
13105         {
13106             void *p2;
13107             p = lock_user_string(arg1); /* new_root */
13108             p2 = lock_user_string(arg2); /* put_old */
13109             if (!p || !p2) {
13110                 ret = -TARGET_EFAULT;
13111             } else {
13112                 ret = get_errno(pivot_root(p, p2));
13113             }
13114             unlock_user(p2, arg2, 0);
13115             unlock_user(p, arg1, 0);
13116         }
13117         return ret;
13118 #endif
13119 
13120     default:
13121         qemu_log_mask(LOG_UNIMP, "Unsupported syscall: %d\n", num);
13122         return -TARGET_ENOSYS;
13123     }
13124     return ret;
13125 }
13126 
13127 abi_long do_syscall(void *cpu_env, int num, abi_long arg1,
13128                     abi_long arg2, abi_long arg3, abi_long arg4,
13129                     abi_long arg5, abi_long arg6, abi_long arg7,
13130                     abi_long arg8)
13131 {
13132     CPUState *cpu = env_cpu(cpu_env);
13133     abi_long ret;
13134 
13135 #ifdef DEBUG_ERESTARTSYS
13136     /* Debug-only code for exercising the syscall-restart code paths
13137      * in the per-architecture cpu main loops: restart every syscall
13138      * the guest makes once before letting it through.
13139      */
13140     {
13141         static bool flag;
13142         flag = !flag;
13143         if (flag) {
13144             return -TARGET_ERESTARTSYS;
13145         }
13146     }
13147 #endif
13148 
13149     record_syscall_start(cpu, num, arg1,
13150                          arg2, arg3, arg4, arg5, arg6, arg7, arg8);
13151 
13152     if (unlikely(qemu_loglevel_mask(LOG_STRACE))) {
13153         print_syscall(cpu_env, num, arg1, arg2, arg3, arg4, arg5, arg6);
13154     }
13155 
13156     ret = do_syscall1(cpu_env, num, arg1, arg2, arg3, arg4,
13157                       arg5, arg6, arg7, arg8);
13158 
13159     if (unlikely(qemu_loglevel_mask(LOG_STRACE))) {
13160         print_syscall_ret(cpu_env, num, ret, arg1, arg2,
13161                           arg3, arg4, arg5, arg6);
13162     }
13163 
13164     record_syscall_return(cpu, num, ret);
13165     return ret;
13166 }
13167