xref: /openbmc/qemu/linux-user/syscall.c (revision fd08ddb9)
1 /*
2  *  Linux syscalls
3  *
4  *  Copyright (c) 2003 Fabrice Bellard
5  *
6  *  This program is free software; you can redistribute it and/or modify
7  *  it under the terms of the GNU General Public License as published by
8  *  the Free Software Foundation; either version 2 of the License, or
9  *  (at your option) any later version.
10  *
11  *  This program is distributed in the hope that it will be useful,
12  *  but WITHOUT ANY WARRANTY; without even the implied warranty of
13  *  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
14  *  GNU General Public License for more details.
15  *
16  *  You should have received a copy of the GNU General Public License
17  *  along with this program; if not, see <http://www.gnu.org/licenses/>.
18  */
19 #define _ATFILE_SOURCE
20 #include "qemu/osdep.h"
21 #include "qemu/cutils.h"
22 #include "qemu/path.h"
23 #include "qemu/memfd.h"
24 #include "qemu/queue.h"
25 #include <elf.h>
26 #include <endian.h>
27 #include <grp.h>
28 #include <sys/ipc.h>
29 #include <sys/msg.h>
30 #include <sys/wait.h>
31 #include <sys/mount.h>
32 #include <sys/file.h>
33 #include <sys/fsuid.h>
34 #include <sys/personality.h>
35 #include <sys/prctl.h>
36 #include <sys/resource.h>
37 #include <sys/swap.h>
38 #include <linux/capability.h>
39 #include <sched.h>
40 #include <sys/timex.h>
41 #include <sys/socket.h>
42 #include <linux/sockios.h>
43 #include <sys/un.h>
44 #include <sys/uio.h>
45 #include <poll.h>
46 #include <sys/times.h>
47 #include <sys/shm.h>
48 #include <sys/sem.h>
49 #include <sys/statfs.h>
50 #include <utime.h>
51 #include <sys/sysinfo.h>
52 #include <sys/signalfd.h>
53 //#include <sys/user.h>
54 #include <netinet/in.h>
55 #include <netinet/ip.h>
56 #include <netinet/tcp.h>
57 #include <netinet/udp.h>
58 #include <linux/wireless.h>
59 #include <linux/icmp.h>
60 #include <linux/icmpv6.h>
61 #include <linux/if_tun.h>
62 #include <linux/in6.h>
63 #include <linux/errqueue.h>
64 #include <linux/random.h>
65 #ifdef CONFIG_TIMERFD
66 #include <sys/timerfd.h>
67 #endif
68 #ifdef CONFIG_EVENTFD
69 #include <sys/eventfd.h>
70 #endif
71 #ifdef CONFIG_EPOLL
72 #include <sys/epoll.h>
73 #endif
74 #ifdef CONFIG_ATTR
75 #include "qemu/xattr.h"
76 #endif
77 #ifdef CONFIG_SENDFILE
78 #include <sys/sendfile.h>
79 #endif
80 #ifdef HAVE_SYS_KCOV_H
81 #include <sys/kcov.h>
82 #endif
83 
84 #define termios host_termios
85 #define winsize host_winsize
86 #define termio host_termio
87 #define sgttyb host_sgttyb /* same as target */
88 #define tchars host_tchars /* same as target */
89 #define ltchars host_ltchars /* same as target */
90 
91 #include <linux/termios.h>
92 #include <linux/unistd.h>
93 #include <linux/cdrom.h>
94 #include <linux/hdreg.h>
95 #include <linux/soundcard.h>
96 #include <linux/kd.h>
97 #include <linux/mtio.h>
98 #include <linux/fs.h>
99 #include <linux/fd.h>
100 #if defined(CONFIG_FIEMAP)
101 #include <linux/fiemap.h>
102 #endif
103 #include <linux/fb.h>
104 #if defined(CONFIG_USBFS)
105 #include <linux/usbdevice_fs.h>
106 #include <linux/usb/ch9.h>
107 #endif
108 #include <linux/vt.h>
109 #include <linux/dm-ioctl.h>
110 #include <linux/reboot.h>
111 #include <linux/route.h>
112 #include <linux/filter.h>
113 #include <linux/blkpg.h>
114 #include <netpacket/packet.h>
115 #include <linux/netlink.h>
116 #include <linux/if_alg.h>
117 #include <linux/rtc.h>
118 #include <sound/asound.h>
119 #ifdef HAVE_BTRFS_H
120 #include <linux/btrfs.h>
121 #endif
122 #ifdef HAVE_DRM_H
123 #include <libdrm/drm.h>
124 #include <libdrm/i915_drm.h>
125 #endif
126 #include "linux_loop.h"
127 #include "uname.h"
128 
129 #include "qemu.h"
130 #include "user-internals.h"
131 #include "strace.h"
132 #include "signal-common.h"
133 #include "loader.h"
134 #include "user-mmap.h"
135 #include "safe-syscall.h"
136 #include "qemu/guest-random.h"
137 #include "qemu/selfmap.h"
138 #include "user/syscall-trace.h"
139 #include "qapi/error.h"
140 #include "fd-trans.h"
141 #include "tcg/tcg.h"
142 
143 #ifndef CLONE_IO
144 #define CLONE_IO                0x80000000      /* Clone io context */
145 #endif
146 
147 /* We can't directly call the host clone syscall, because this will
148  * badly confuse libc (breaking mutexes, for example). So we must
149  * divide clone flags into:
150  *  * flag combinations that look like pthread_create()
151  *  * flag combinations that look like fork()
152  *  * flags we can implement within QEMU itself
153  *  * flags we can't support and will return an error for
154  */
155 /* For thread creation, all these flags must be present; for
156  * fork, none must be present.
157  */
158 #define CLONE_THREAD_FLAGS                              \
159     (CLONE_VM | CLONE_FS | CLONE_FILES |                \
160      CLONE_SIGHAND | CLONE_THREAD | CLONE_SYSVSEM)
161 
162 /* These flags are ignored:
163  * CLONE_DETACHED is now ignored by the kernel;
164  * CLONE_IO is just an optimisation hint to the I/O scheduler
165  */
166 #define CLONE_IGNORED_FLAGS                     \
167     (CLONE_DETACHED | CLONE_IO)
168 
169 /* Flags for fork which we can implement within QEMU itself */
170 #define CLONE_OPTIONAL_FORK_FLAGS               \
171     (CLONE_SETTLS | CLONE_PARENT_SETTID |       \
172      CLONE_CHILD_CLEARTID | CLONE_CHILD_SETTID)
173 
174 /* Flags for thread creation which we can implement within QEMU itself */
175 #define CLONE_OPTIONAL_THREAD_FLAGS                             \
176     (CLONE_SETTLS | CLONE_PARENT_SETTID |                       \
177      CLONE_CHILD_CLEARTID | CLONE_CHILD_SETTID | CLONE_PARENT)
178 
179 #define CLONE_INVALID_FORK_FLAGS                                        \
180     (~(CSIGNAL | CLONE_OPTIONAL_FORK_FLAGS | CLONE_IGNORED_FLAGS))
181 
182 #define CLONE_INVALID_THREAD_FLAGS                                      \
183     (~(CSIGNAL | CLONE_THREAD_FLAGS | CLONE_OPTIONAL_THREAD_FLAGS |     \
184        CLONE_IGNORED_FLAGS))
185 
186 /* CLONE_VFORK is special cased early in do_fork(). The other flag bits
187  * have almost all been allocated. We cannot support any of
188  * CLONE_NEWNS, CLONE_NEWCGROUP, CLONE_NEWUTS, CLONE_NEWIPC,
189  * CLONE_NEWUSER, CLONE_NEWPID, CLONE_NEWNET, CLONE_PTRACE, CLONE_UNTRACED.
190  * The checks against the invalid thread masks above will catch these.
191  * (The one remaining unallocated bit is 0x1000 which used to be CLONE_PID.)
192  */
193 
194 /* Define DEBUG_ERESTARTSYS to force every syscall to be restarted
195  * once. This exercises the codepaths for restart.
196  */
197 //#define DEBUG_ERESTARTSYS
198 
199 //#include <linux/msdos_fs.h>
200 #define	VFAT_IOCTL_READDIR_BOTH		_IOR('r', 1, struct linux_dirent [2])
201 #define	VFAT_IOCTL_READDIR_SHORT	_IOR('r', 2, struct linux_dirent [2])
202 
203 #undef _syscall0
204 #undef _syscall1
205 #undef _syscall2
206 #undef _syscall3
207 #undef _syscall4
208 #undef _syscall5
209 #undef _syscall6
210 
211 #define _syscall0(type,name)		\
212 static type name (void)			\
213 {					\
214 	return syscall(__NR_##name);	\
215 }
216 
217 #define _syscall1(type,name,type1,arg1)		\
218 static type name (type1 arg1)			\
219 {						\
220 	return syscall(__NR_##name, arg1);	\
221 }
222 
223 #define _syscall2(type,name,type1,arg1,type2,arg2)	\
224 static type name (type1 arg1,type2 arg2)		\
225 {							\
226 	return syscall(__NR_##name, arg1, arg2);	\
227 }
228 
229 #define _syscall3(type,name,type1,arg1,type2,arg2,type3,arg3)	\
230 static type name (type1 arg1,type2 arg2,type3 arg3)		\
231 {								\
232 	return syscall(__NR_##name, arg1, arg2, arg3);		\
233 }
234 
235 #define _syscall4(type,name,type1,arg1,type2,arg2,type3,arg3,type4,arg4)	\
236 static type name (type1 arg1,type2 arg2,type3 arg3,type4 arg4)			\
237 {										\
238 	return syscall(__NR_##name, arg1, arg2, arg3, arg4);			\
239 }
240 
241 #define _syscall5(type,name,type1,arg1,type2,arg2,type3,arg3,type4,arg4,	\
242 		  type5,arg5)							\
243 static type name (type1 arg1,type2 arg2,type3 arg3,type4 arg4,type5 arg5)	\
244 {										\
245 	return syscall(__NR_##name, arg1, arg2, arg3, arg4, arg5);		\
246 }
247 
248 
249 #define _syscall6(type,name,type1,arg1,type2,arg2,type3,arg3,type4,arg4,	\
250 		  type5,arg5,type6,arg6)					\
251 static type name (type1 arg1,type2 arg2,type3 arg3,type4 arg4,type5 arg5,	\
252                   type6 arg6)							\
253 {										\
254 	return syscall(__NR_##name, arg1, arg2, arg3, arg4, arg5, arg6);	\
255 }
256 
257 
258 #define __NR_sys_uname __NR_uname
259 #define __NR_sys_getcwd1 __NR_getcwd
260 #define __NR_sys_getdents __NR_getdents
261 #define __NR_sys_getdents64 __NR_getdents64
262 #define __NR_sys_getpriority __NR_getpriority
263 #define __NR_sys_rt_sigqueueinfo __NR_rt_sigqueueinfo
264 #define __NR_sys_rt_tgsigqueueinfo __NR_rt_tgsigqueueinfo
265 #define __NR_sys_syslog __NR_syslog
266 #if defined(__NR_futex)
267 # define __NR_sys_futex __NR_futex
268 #endif
269 #if defined(__NR_futex_time64)
270 # define __NR_sys_futex_time64 __NR_futex_time64
271 #endif
272 #define __NR_sys_inotify_init __NR_inotify_init
273 #define __NR_sys_inotify_add_watch __NR_inotify_add_watch
274 #define __NR_sys_inotify_rm_watch __NR_inotify_rm_watch
275 #define __NR_sys_statx __NR_statx
276 
277 #if defined(__alpha__) || defined(__x86_64__) || defined(__s390x__)
278 #define __NR__llseek __NR_lseek
279 #endif
280 
281 /* Newer kernel ports have llseek() instead of _llseek() */
282 #if defined(TARGET_NR_llseek) && !defined(TARGET_NR__llseek)
283 #define TARGET_NR__llseek TARGET_NR_llseek
284 #endif
285 
286 /* some platforms need to mask more bits than just TARGET_O_NONBLOCK */
287 #ifndef TARGET_O_NONBLOCK_MASK
288 #define TARGET_O_NONBLOCK_MASK TARGET_O_NONBLOCK
289 #endif
290 
291 #define __NR_sys_gettid __NR_gettid
292 _syscall0(int, sys_gettid)
293 
294 /* For the 64-bit guest on 32-bit host case we must emulate
295  * getdents using getdents64, because otherwise the host
296  * might hand us back more dirent records than we can fit
297  * into the guest buffer after structure format conversion.
298  * Otherwise we emulate getdents with getdents if the host has it.
299  */
300 #if defined(__NR_getdents) && HOST_LONG_BITS >= TARGET_ABI_BITS
301 #define EMULATE_GETDENTS_WITH_GETDENTS
302 #endif
303 
304 #if defined(TARGET_NR_getdents) && defined(EMULATE_GETDENTS_WITH_GETDENTS)
305 _syscall3(int, sys_getdents, uint, fd, struct linux_dirent *, dirp, uint, count);
306 #endif
307 #if (defined(TARGET_NR_getdents) && \
308       !defined(EMULATE_GETDENTS_WITH_GETDENTS)) || \
309     (defined(TARGET_NR_getdents64) && defined(__NR_getdents64))
310 _syscall3(int, sys_getdents64, uint, fd, struct linux_dirent64 *, dirp, uint, count);
311 #endif
312 #if defined(TARGET_NR__llseek) && defined(__NR_llseek)
313 _syscall5(int, _llseek,  uint,  fd, ulong, hi, ulong, lo,
314           loff_t *, res, uint, wh);
315 #endif
316 _syscall3(int, sys_rt_sigqueueinfo, pid_t, pid, int, sig, siginfo_t *, uinfo)
317 _syscall4(int, sys_rt_tgsigqueueinfo, pid_t, pid, pid_t, tid, int, sig,
318           siginfo_t *, uinfo)
319 _syscall3(int,sys_syslog,int,type,char*,bufp,int,len)
320 #ifdef __NR_exit_group
321 _syscall1(int,exit_group,int,error_code)
322 #endif
323 #if defined(TARGET_NR_set_tid_address) && defined(__NR_set_tid_address)
324 _syscall1(int,set_tid_address,int *,tidptr)
325 #endif
326 #if defined(__NR_futex)
327 _syscall6(int,sys_futex,int *,uaddr,int,op,int,val,
328           const struct timespec *,timeout,int *,uaddr2,int,val3)
329 #endif
330 #if defined(__NR_futex_time64)
331 _syscall6(int,sys_futex_time64,int *,uaddr,int,op,int,val,
332           const struct timespec *,timeout,int *,uaddr2,int,val3)
333 #endif
334 #define __NR_sys_sched_getaffinity __NR_sched_getaffinity
335 _syscall3(int, sys_sched_getaffinity, pid_t, pid, unsigned int, len,
336           unsigned long *, user_mask_ptr);
337 #define __NR_sys_sched_setaffinity __NR_sched_setaffinity
338 _syscall3(int, sys_sched_setaffinity, pid_t, pid, unsigned int, len,
339           unsigned long *, user_mask_ptr);
340 #define __NR_sys_getcpu __NR_getcpu
341 _syscall3(int, sys_getcpu, unsigned *, cpu, unsigned *, node, void *, tcache);
342 _syscall4(int, reboot, int, magic1, int, magic2, unsigned int, cmd,
343           void *, arg);
344 _syscall2(int, capget, struct __user_cap_header_struct *, header,
345           struct __user_cap_data_struct *, data);
346 _syscall2(int, capset, struct __user_cap_header_struct *, header,
347           struct __user_cap_data_struct *, data);
348 #if defined(TARGET_NR_ioprio_get) && defined(__NR_ioprio_get)
349 _syscall2(int, ioprio_get, int, which, int, who)
350 #endif
351 #if defined(TARGET_NR_ioprio_set) && defined(__NR_ioprio_set)
352 _syscall3(int, ioprio_set, int, which, int, who, int, ioprio)
353 #endif
354 #if defined(TARGET_NR_getrandom) && defined(__NR_getrandom)
355 _syscall3(int, getrandom, void *, buf, size_t, buflen, unsigned int, flags)
356 #endif
357 
358 #if defined(TARGET_NR_kcmp) && defined(__NR_kcmp)
359 _syscall5(int, kcmp, pid_t, pid1, pid_t, pid2, int, type,
360           unsigned long, idx1, unsigned long, idx2)
361 #endif
362 
363 /*
364  * It is assumed that struct statx is architecture independent.
365  */
366 #if defined(TARGET_NR_statx) && defined(__NR_statx)
367 _syscall5(int, sys_statx, int, dirfd, const char *, pathname, int, flags,
368           unsigned int, mask, struct target_statx *, statxbuf)
369 #endif
370 #if defined(TARGET_NR_membarrier) && defined(__NR_membarrier)
371 _syscall2(int, membarrier, int, cmd, int, flags)
372 #endif
373 
374 static const bitmask_transtbl fcntl_flags_tbl[] = {
375   { TARGET_O_ACCMODE,   TARGET_O_WRONLY,    O_ACCMODE,   O_WRONLY,    },
376   { TARGET_O_ACCMODE,   TARGET_O_RDWR,      O_ACCMODE,   O_RDWR,      },
377   { TARGET_O_CREAT,     TARGET_O_CREAT,     O_CREAT,     O_CREAT,     },
378   { TARGET_O_EXCL,      TARGET_O_EXCL,      O_EXCL,      O_EXCL,      },
379   { TARGET_O_NOCTTY,    TARGET_O_NOCTTY,    O_NOCTTY,    O_NOCTTY,    },
380   { TARGET_O_TRUNC,     TARGET_O_TRUNC,     O_TRUNC,     O_TRUNC,     },
381   { TARGET_O_APPEND,    TARGET_O_APPEND,    O_APPEND,    O_APPEND,    },
382   { TARGET_O_NONBLOCK,  TARGET_O_NONBLOCK,  O_NONBLOCK,  O_NONBLOCK,  },
383   { TARGET_O_SYNC,      TARGET_O_DSYNC,     O_SYNC,      O_DSYNC,     },
384   { TARGET_O_SYNC,      TARGET_O_SYNC,      O_SYNC,      O_SYNC,      },
385   { TARGET_FASYNC,      TARGET_FASYNC,      FASYNC,      FASYNC,      },
386   { TARGET_O_DIRECTORY, TARGET_O_DIRECTORY, O_DIRECTORY, O_DIRECTORY, },
387   { TARGET_O_NOFOLLOW,  TARGET_O_NOFOLLOW,  O_NOFOLLOW,  O_NOFOLLOW,  },
388 #if defined(O_DIRECT)
389   { TARGET_O_DIRECT,    TARGET_O_DIRECT,    O_DIRECT,    O_DIRECT,    },
390 #endif
391 #if defined(O_NOATIME)
392   { TARGET_O_NOATIME,   TARGET_O_NOATIME,   O_NOATIME,   O_NOATIME    },
393 #endif
394 #if defined(O_CLOEXEC)
395   { TARGET_O_CLOEXEC,   TARGET_O_CLOEXEC,   O_CLOEXEC,   O_CLOEXEC    },
396 #endif
397 #if defined(O_PATH)
398   { TARGET_O_PATH,      TARGET_O_PATH,      O_PATH,      O_PATH       },
399 #endif
400 #if defined(O_TMPFILE)
401   { TARGET_O_TMPFILE,   TARGET_O_TMPFILE,   O_TMPFILE,   O_TMPFILE    },
402 #endif
403   /* Don't terminate the list prematurely on 64-bit host+guest.  */
404 #if TARGET_O_LARGEFILE != 0 || O_LARGEFILE != 0
405   { TARGET_O_LARGEFILE, TARGET_O_LARGEFILE, O_LARGEFILE, O_LARGEFILE, },
406 #endif
407   { 0, 0, 0, 0 }
408 };
409 
410 _syscall2(int, sys_getcwd1, char *, buf, size_t, size)
411 
412 #if defined(TARGET_NR_utimensat) || defined(TARGET_NR_utimensat_time64)
413 #if defined(__NR_utimensat)
414 #define __NR_sys_utimensat __NR_utimensat
415 _syscall4(int,sys_utimensat,int,dirfd,const char *,pathname,
416           const struct timespec *,tsp,int,flags)
417 #else
418 static int sys_utimensat(int dirfd, const char *pathname,
419                          const struct timespec times[2], int flags)
420 {
421     errno = ENOSYS;
422     return -1;
423 }
424 #endif
425 #endif /* TARGET_NR_utimensat */
426 
427 #ifdef TARGET_NR_renameat2
428 #if defined(__NR_renameat2)
429 #define __NR_sys_renameat2 __NR_renameat2
430 _syscall5(int, sys_renameat2, int, oldfd, const char *, old, int, newfd,
431           const char *, new, unsigned int, flags)
432 #else
433 static int sys_renameat2(int oldfd, const char *old,
434                          int newfd, const char *new, int flags)
435 {
436     if (flags == 0) {
437         return renameat(oldfd, old, newfd, new);
438     }
439     errno = ENOSYS;
440     return -1;
441 }
442 #endif
443 #endif /* TARGET_NR_renameat2 */
444 
445 #ifdef CONFIG_INOTIFY
446 #include <sys/inotify.h>
447 
448 #if defined(TARGET_NR_inotify_init) && defined(__NR_inotify_init)
449 static int sys_inotify_init(void)
450 {
451   return (inotify_init());
452 }
453 #endif
454 #if defined(TARGET_NR_inotify_add_watch) && defined(__NR_inotify_add_watch)
455 static int sys_inotify_add_watch(int fd,const char *pathname, int32_t mask)
456 {
457   return (inotify_add_watch(fd, pathname, mask));
458 }
459 #endif
460 #if defined(TARGET_NR_inotify_rm_watch) && defined(__NR_inotify_rm_watch)
461 static int sys_inotify_rm_watch(int fd, int32_t wd)
462 {
463   return (inotify_rm_watch(fd, wd));
464 }
465 #endif
466 #ifdef CONFIG_INOTIFY1
467 #if defined(TARGET_NR_inotify_init1) && defined(__NR_inotify_init1)
468 static int sys_inotify_init1(int flags)
469 {
470   return (inotify_init1(flags));
471 }
472 #endif
473 #endif
474 #else
475 /* Userspace can usually survive runtime without inotify */
476 #undef TARGET_NR_inotify_init
477 #undef TARGET_NR_inotify_init1
478 #undef TARGET_NR_inotify_add_watch
479 #undef TARGET_NR_inotify_rm_watch
480 #endif /* CONFIG_INOTIFY  */
481 
482 #if defined(TARGET_NR_prlimit64)
483 #ifndef __NR_prlimit64
484 # define __NR_prlimit64 -1
485 #endif
486 #define __NR_sys_prlimit64 __NR_prlimit64
487 /* The glibc rlimit structure may not be that used by the underlying syscall */
488 struct host_rlimit64 {
489     uint64_t rlim_cur;
490     uint64_t rlim_max;
491 };
492 _syscall4(int, sys_prlimit64, pid_t, pid, int, resource,
493           const struct host_rlimit64 *, new_limit,
494           struct host_rlimit64 *, old_limit)
495 #endif
496 
497 
498 #if defined(TARGET_NR_timer_create)
499 /* Maximum of 32 active POSIX timers allowed at any one time. */
500 static timer_t g_posix_timers[32] = { 0, } ;
501 
502 static inline int next_free_host_timer(void)
503 {
504     int k ;
505     /* FIXME: Does finding the next free slot require a lock? */
506     for (k = 0; k < ARRAY_SIZE(g_posix_timers); k++) {
507         if (g_posix_timers[k] == 0) {
508             g_posix_timers[k] = (timer_t) 1;
509             return k;
510         }
511     }
512     return -1;
513 }
514 #endif
515 
516 static inline int host_to_target_errno(int host_errno)
517 {
518     switch (host_errno) {
519 #define E(X)  case X: return TARGET_##X;
520 #include "errnos.c.inc"
521 #undef E
522     default:
523         return host_errno;
524     }
525 }
526 
527 static inline int target_to_host_errno(int target_errno)
528 {
529     switch (target_errno) {
530 #define E(X)  case TARGET_##X: return X;
531 #include "errnos.c.inc"
532 #undef E
533     default:
534         return target_errno;
535     }
536 }
537 
538 static inline abi_long get_errno(abi_long ret)
539 {
540     if (ret == -1)
541         return -host_to_target_errno(errno);
542     else
543         return ret;
544 }
545 
546 const char *target_strerror(int err)
547 {
548     if (err == TARGET_ERESTARTSYS) {
549         return "To be restarted";
550     }
551     if (err == TARGET_QEMU_ESIGRETURN) {
552         return "Successful exit from sigreturn";
553     }
554 
555     return strerror(target_to_host_errno(err));
556 }
557 
558 #define safe_syscall0(type, name) \
559 static type safe_##name(void) \
560 { \
561     return safe_syscall(__NR_##name); \
562 }
563 
564 #define safe_syscall1(type, name, type1, arg1) \
565 static type safe_##name(type1 arg1) \
566 { \
567     return safe_syscall(__NR_##name, arg1); \
568 }
569 
570 #define safe_syscall2(type, name, type1, arg1, type2, arg2) \
571 static type safe_##name(type1 arg1, type2 arg2) \
572 { \
573     return safe_syscall(__NR_##name, arg1, arg2); \
574 }
575 
576 #define safe_syscall3(type, name, type1, arg1, type2, arg2, type3, arg3) \
577 static type safe_##name(type1 arg1, type2 arg2, type3 arg3) \
578 { \
579     return safe_syscall(__NR_##name, arg1, arg2, arg3); \
580 }
581 
582 #define safe_syscall4(type, name, type1, arg1, type2, arg2, type3, arg3, \
583     type4, arg4) \
584 static type safe_##name(type1 arg1, type2 arg2, type3 arg3, type4 arg4) \
585 { \
586     return safe_syscall(__NR_##name, arg1, arg2, arg3, arg4); \
587 }
588 
589 #define safe_syscall5(type, name, type1, arg1, type2, arg2, type3, arg3, \
590     type4, arg4, type5, arg5) \
591 static type safe_##name(type1 arg1, type2 arg2, type3 arg3, type4 arg4, \
592     type5 arg5) \
593 { \
594     return safe_syscall(__NR_##name, arg1, arg2, arg3, arg4, arg5); \
595 }
596 
597 #define safe_syscall6(type, name, type1, arg1, type2, arg2, type3, arg3, \
598     type4, arg4, type5, arg5, type6, arg6) \
599 static type safe_##name(type1 arg1, type2 arg2, type3 arg3, type4 arg4, \
600     type5 arg5, type6 arg6) \
601 { \
602     return safe_syscall(__NR_##name, arg1, arg2, arg3, arg4, arg5, arg6); \
603 }
604 
605 safe_syscall3(ssize_t, read, int, fd, void *, buff, size_t, count)
606 safe_syscall3(ssize_t, write, int, fd, const void *, buff, size_t, count)
607 safe_syscall4(int, openat, int, dirfd, const char *, pathname, \
608               int, flags, mode_t, mode)
609 #if defined(TARGET_NR_wait4) || defined(TARGET_NR_waitpid)
610 safe_syscall4(pid_t, wait4, pid_t, pid, int *, status, int, options, \
611               struct rusage *, rusage)
612 #endif
613 safe_syscall5(int, waitid, idtype_t, idtype, id_t, id, siginfo_t *, infop, \
614               int, options, struct rusage *, rusage)
615 safe_syscall3(int, execve, const char *, filename, char **, argv, char **, envp)
616 #if defined(TARGET_NR_select) || defined(TARGET_NR__newselect) || \
617     defined(TARGET_NR_pselect6) || defined(TARGET_NR_pselect6_time64)
618 safe_syscall6(int, pselect6, int, nfds, fd_set *, readfds, fd_set *, writefds, \
619               fd_set *, exceptfds, struct timespec *, timeout, void *, sig)
620 #endif
621 #if defined(TARGET_NR_ppoll) || defined(TARGET_NR_ppoll_time64)
622 safe_syscall5(int, ppoll, struct pollfd *, ufds, unsigned int, nfds,
623               struct timespec *, tsp, const sigset_t *, sigmask,
624               size_t, sigsetsize)
625 #endif
626 safe_syscall6(int, epoll_pwait, int, epfd, struct epoll_event *, events,
627               int, maxevents, int, timeout, const sigset_t *, sigmask,
628               size_t, sigsetsize)
629 #if defined(__NR_futex)
630 safe_syscall6(int,futex,int *,uaddr,int,op,int,val, \
631               const struct timespec *,timeout,int *,uaddr2,int,val3)
632 #endif
633 #if defined(__NR_futex_time64)
634 safe_syscall6(int,futex_time64,int *,uaddr,int,op,int,val, \
635               const struct timespec *,timeout,int *,uaddr2,int,val3)
636 #endif
637 safe_syscall2(int, rt_sigsuspend, sigset_t *, newset, size_t, sigsetsize)
638 safe_syscall2(int, kill, pid_t, pid, int, sig)
639 safe_syscall2(int, tkill, int, tid, int, sig)
640 safe_syscall3(int, tgkill, int, tgid, int, pid, int, sig)
641 safe_syscall3(ssize_t, readv, int, fd, const struct iovec *, iov, int, iovcnt)
642 safe_syscall3(ssize_t, writev, int, fd, const struct iovec *, iov, int, iovcnt)
643 safe_syscall5(ssize_t, preadv, int, fd, const struct iovec *, iov, int, iovcnt,
644               unsigned long, pos_l, unsigned long, pos_h)
645 safe_syscall5(ssize_t, pwritev, int, fd, const struct iovec *, iov, int, iovcnt,
646               unsigned long, pos_l, unsigned long, pos_h)
647 safe_syscall3(int, connect, int, fd, const struct sockaddr *, addr,
648               socklen_t, addrlen)
649 safe_syscall6(ssize_t, sendto, int, fd, const void *, buf, size_t, len,
650               int, flags, const struct sockaddr *, addr, socklen_t, addrlen)
651 safe_syscall6(ssize_t, recvfrom, int, fd, void *, buf, size_t, len,
652               int, flags, struct sockaddr *, addr, socklen_t *, addrlen)
653 safe_syscall3(ssize_t, sendmsg, int, fd, const struct msghdr *, msg, int, flags)
654 safe_syscall3(ssize_t, recvmsg, int, fd, struct msghdr *, msg, int, flags)
655 safe_syscall2(int, flock, int, fd, int, operation)
656 #if defined(TARGET_NR_rt_sigtimedwait) || defined(TARGET_NR_rt_sigtimedwait_time64)
657 safe_syscall4(int, rt_sigtimedwait, const sigset_t *, these, siginfo_t *, uinfo,
658               const struct timespec *, uts, size_t, sigsetsize)
659 #endif
660 safe_syscall4(int, accept4, int, fd, struct sockaddr *, addr, socklen_t *, len,
661               int, flags)
662 #if defined(TARGET_NR_nanosleep)
663 safe_syscall2(int, nanosleep, const struct timespec *, req,
664               struct timespec *, rem)
665 #endif
666 #if defined(TARGET_NR_clock_nanosleep) || \
667     defined(TARGET_NR_clock_nanosleep_time64)
668 safe_syscall4(int, clock_nanosleep, const clockid_t, clock, int, flags,
669               const struct timespec *, req, struct timespec *, rem)
670 #endif
671 #ifdef __NR_ipc
672 #ifdef __s390x__
673 safe_syscall5(int, ipc, int, call, long, first, long, second, long, third,
674               void *, ptr)
675 #else
676 safe_syscall6(int, ipc, int, call, long, first, long, second, long, third,
677               void *, ptr, long, fifth)
678 #endif
679 #endif
680 #ifdef __NR_msgsnd
681 safe_syscall4(int, msgsnd, int, msgid, const void *, msgp, size_t, sz,
682               int, flags)
683 #endif
684 #ifdef __NR_msgrcv
685 safe_syscall5(int, msgrcv, int, msgid, void *, msgp, size_t, sz,
686               long, msgtype, int, flags)
687 #endif
688 #ifdef __NR_semtimedop
689 safe_syscall4(int, semtimedop, int, semid, struct sembuf *, tsops,
690               unsigned, nsops, const struct timespec *, timeout)
691 #endif
692 #if defined(TARGET_NR_mq_timedsend) || \
693     defined(TARGET_NR_mq_timedsend_time64)
694 safe_syscall5(int, mq_timedsend, int, mqdes, const char *, msg_ptr,
695               size_t, len, unsigned, prio, const struct timespec *, timeout)
696 #endif
697 #if defined(TARGET_NR_mq_timedreceive) || \
698     defined(TARGET_NR_mq_timedreceive_time64)
699 safe_syscall5(int, mq_timedreceive, int, mqdes, char *, msg_ptr,
700               size_t, len, unsigned *, prio, const struct timespec *, timeout)
701 #endif
702 #if defined(TARGET_NR_copy_file_range) && defined(__NR_copy_file_range)
703 safe_syscall6(ssize_t, copy_file_range, int, infd, loff_t *, pinoff,
704               int, outfd, loff_t *, poutoff, size_t, length,
705               unsigned int, flags)
706 #endif
707 
708 /* We do ioctl like this rather than via safe_syscall3 to preserve the
709  * "third argument might be integer or pointer or not present" behaviour of
710  * the libc function.
711  */
712 #define safe_ioctl(...) safe_syscall(__NR_ioctl, __VA_ARGS__)
713 /* Similarly for fcntl. Note that callers must always:
714  *  pass the F_GETLK64 etc constants rather than the unsuffixed F_GETLK
715  *  use the flock64 struct rather than unsuffixed flock
716  * This will then work and use a 64-bit offset for both 32-bit and 64-bit hosts.
717  */
718 #ifdef __NR_fcntl64
719 #define safe_fcntl(...) safe_syscall(__NR_fcntl64, __VA_ARGS__)
720 #else
721 #define safe_fcntl(...) safe_syscall(__NR_fcntl, __VA_ARGS__)
722 #endif
723 
724 static inline int host_to_target_sock_type(int host_type)
725 {
726     int target_type;
727 
728     switch (host_type & 0xf /* SOCK_TYPE_MASK */) {
729     case SOCK_DGRAM:
730         target_type = TARGET_SOCK_DGRAM;
731         break;
732     case SOCK_STREAM:
733         target_type = TARGET_SOCK_STREAM;
734         break;
735     default:
736         target_type = host_type & 0xf /* SOCK_TYPE_MASK */;
737         break;
738     }
739 
740 #if defined(SOCK_CLOEXEC)
741     if (host_type & SOCK_CLOEXEC) {
742         target_type |= TARGET_SOCK_CLOEXEC;
743     }
744 #endif
745 
746 #if defined(SOCK_NONBLOCK)
747     if (host_type & SOCK_NONBLOCK) {
748         target_type |= TARGET_SOCK_NONBLOCK;
749     }
750 #endif
751 
752     return target_type;
753 }
754 
755 static abi_ulong target_brk;
756 static abi_ulong target_original_brk;
757 static abi_ulong brk_page;
758 
759 void target_set_brk(abi_ulong new_brk)
760 {
761     target_original_brk = target_brk = HOST_PAGE_ALIGN(new_brk);
762     brk_page = HOST_PAGE_ALIGN(target_brk);
763 }
764 
765 //#define DEBUGF_BRK(message, args...) do { fprintf(stderr, (message), ## args); } while (0)
766 #define DEBUGF_BRK(message, args...)
767 
768 /* do_brk() must return target values and target errnos. */
769 abi_long do_brk(abi_ulong new_brk)
770 {
771     abi_long mapped_addr;
772     abi_ulong new_alloc_size;
773 
774     /* brk pointers are always untagged */
775 
776     DEBUGF_BRK("do_brk(" TARGET_ABI_FMT_lx ") -> ", new_brk);
777 
778     if (!new_brk) {
779         DEBUGF_BRK(TARGET_ABI_FMT_lx " (!new_brk)\n", target_brk);
780         return target_brk;
781     }
782     if (new_brk < target_original_brk) {
783         DEBUGF_BRK(TARGET_ABI_FMT_lx " (new_brk < target_original_brk)\n",
784                    target_brk);
785         return target_brk;
786     }
787 
788     /* If the new brk is less than the highest page reserved to the
789      * target heap allocation, set it and we're almost done...  */
790     if (new_brk <= brk_page) {
791         /* Heap contents are initialized to zero, as for anonymous
792          * mapped pages.  */
793         if (new_brk > target_brk) {
794             memset(g2h_untagged(target_brk), 0, new_brk - target_brk);
795         }
796 	target_brk = new_brk;
797         DEBUGF_BRK(TARGET_ABI_FMT_lx " (new_brk <= brk_page)\n", target_brk);
798 	return target_brk;
799     }
800 
801     /* We need to allocate more memory after the brk... Note that
802      * we don't use MAP_FIXED because that will map over the top of
803      * any existing mapping (like the one with the host libc or qemu
804      * itself); instead we treat "mapped but at wrong address" as
805      * a failure and unmap again.
806      */
807     new_alloc_size = HOST_PAGE_ALIGN(new_brk - brk_page);
808     mapped_addr = get_errno(target_mmap(brk_page, new_alloc_size,
809                                         PROT_READ|PROT_WRITE,
810                                         MAP_ANON|MAP_PRIVATE, 0, 0));
811 
812     if (mapped_addr == brk_page) {
813         /* Heap contents are initialized to zero, as for anonymous
814          * mapped pages.  Technically the new pages are already
815          * initialized to zero since they *are* anonymous mapped
816          * pages, however we have to take care with the contents that
817          * come from the remaining part of the previous page: it may
818          * contains garbage data due to a previous heap usage (grown
819          * then shrunken).  */
820         memset(g2h_untagged(target_brk), 0, brk_page - target_brk);
821 
822         target_brk = new_brk;
823         brk_page = HOST_PAGE_ALIGN(target_brk);
824         DEBUGF_BRK(TARGET_ABI_FMT_lx " (mapped_addr == brk_page)\n",
825             target_brk);
826         return target_brk;
827     } else if (mapped_addr != -1) {
828         /* Mapped but at wrong address, meaning there wasn't actually
829          * enough space for this brk.
830          */
831         target_munmap(mapped_addr, new_alloc_size);
832         mapped_addr = -1;
833         DEBUGF_BRK(TARGET_ABI_FMT_lx " (mapped_addr != -1)\n", target_brk);
834     }
835     else {
836         DEBUGF_BRK(TARGET_ABI_FMT_lx " (otherwise)\n", target_brk);
837     }
838 
839 #if defined(TARGET_ALPHA)
840     /* We (partially) emulate OSF/1 on Alpha, which requires we
841        return a proper errno, not an unchanged brk value.  */
842     return -TARGET_ENOMEM;
843 #endif
844     /* For everything else, return the previous break. */
845     return target_brk;
846 }
847 
848 #if defined(TARGET_NR_select) || defined(TARGET_NR__newselect) || \
849     defined(TARGET_NR_pselect6) || defined(TARGET_NR_pselect6_time64)
850 static inline abi_long copy_from_user_fdset(fd_set *fds,
851                                             abi_ulong target_fds_addr,
852                                             int n)
853 {
854     int i, nw, j, k;
855     abi_ulong b, *target_fds;
856 
857     nw = DIV_ROUND_UP(n, TARGET_ABI_BITS);
858     if (!(target_fds = lock_user(VERIFY_READ,
859                                  target_fds_addr,
860                                  sizeof(abi_ulong) * nw,
861                                  1)))
862         return -TARGET_EFAULT;
863 
864     FD_ZERO(fds);
865     k = 0;
866     for (i = 0; i < nw; i++) {
867         /* grab the abi_ulong */
868         __get_user(b, &target_fds[i]);
869         for (j = 0; j < TARGET_ABI_BITS; j++) {
870             /* check the bit inside the abi_ulong */
871             if ((b >> j) & 1)
872                 FD_SET(k, fds);
873             k++;
874         }
875     }
876 
877     unlock_user(target_fds, target_fds_addr, 0);
878 
879     return 0;
880 }
881 
882 static inline abi_ulong copy_from_user_fdset_ptr(fd_set *fds, fd_set **fds_ptr,
883                                                  abi_ulong target_fds_addr,
884                                                  int n)
885 {
886     if (target_fds_addr) {
887         if (copy_from_user_fdset(fds, target_fds_addr, n))
888             return -TARGET_EFAULT;
889         *fds_ptr = fds;
890     } else {
891         *fds_ptr = NULL;
892     }
893     return 0;
894 }
895 
896 static inline abi_long copy_to_user_fdset(abi_ulong target_fds_addr,
897                                           const fd_set *fds,
898                                           int n)
899 {
900     int i, nw, j, k;
901     abi_long v;
902     abi_ulong *target_fds;
903 
904     nw = DIV_ROUND_UP(n, TARGET_ABI_BITS);
905     if (!(target_fds = lock_user(VERIFY_WRITE,
906                                  target_fds_addr,
907                                  sizeof(abi_ulong) * nw,
908                                  0)))
909         return -TARGET_EFAULT;
910 
911     k = 0;
912     for (i = 0; i < nw; i++) {
913         v = 0;
914         for (j = 0; j < TARGET_ABI_BITS; j++) {
915             v |= ((abi_ulong)(FD_ISSET(k, fds) != 0) << j);
916             k++;
917         }
918         __put_user(v, &target_fds[i]);
919     }
920 
921     unlock_user(target_fds, target_fds_addr, sizeof(abi_ulong) * nw);
922 
923     return 0;
924 }
925 #endif
926 
927 #if defined(__alpha__)
928 #define HOST_HZ 1024
929 #else
930 #define HOST_HZ 100
931 #endif
932 
933 static inline abi_long host_to_target_clock_t(long ticks)
934 {
935 #if HOST_HZ == TARGET_HZ
936     return ticks;
937 #else
938     return ((int64_t)ticks * TARGET_HZ) / HOST_HZ;
939 #endif
940 }
941 
942 static inline abi_long host_to_target_rusage(abi_ulong target_addr,
943                                              const struct rusage *rusage)
944 {
945     struct target_rusage *target_rusage;
946 
947     if (!lock_user_struct(VERIFY_WRITE, target_rusage, target_addr, 0))
948         return -TARGET_EFAULT;
949     target_rusage->ru_utime.tv_sec = tswapal(rusage->ru_utime.tv_sec);
950     target_rusage->ru_utime.tv_usec = tswapal(rusage->ru_utime.tv_usec);
951     target_rusage->ru_stime.tv_sec = tswapal(rusage->ru_stime.tv_sec);
952     target_rusage->ru_stime.tv_usec = tswapal(rusage->ru_stime.tv_usec);
953     target_rusage->ru_maxrss = tswapal(rusage->ru_maxrss);
954     target_rusage->ru_ixrss = tswapal(rusage->ru_ixrss);
955     target_rusage->ru_idrss = tswapal(rusage->ru_idrss);
956     target_rusage->ru_isrss = tswapal(rusage->ru_isrss);
957     target_rusage->ru_minflt = tswapal(rusage->ru_minflt);
958     target_rusage->ru_majflt = tswapal(rusage->ru_majflt);
959     target_rusage->ru_nswap = tswapal(rusage->ru_nswap);
960     target_rusage->ru_inblock = tswapal(rusage->ru_inblock);
961     target_rusage->ru_oublock = tswapal(rusage->ru_oublock);
962     target_rusage->ru_msgsnd = tswapal(rusage->ru_msgsnd);
963     target_rusage->ru_msgrcv = tswapal(rusage->ru_msgrcv);
964     target_rusage->ru_nsignals = tswapal(rusage->ru_nsignals);
965     target_rusage->ru_nvcsw = tswapal(rusage->ru_nvcsw);
966     target_rusage->ru_nivcsw = tswapal(rusage->ru_nivcsw);
967     unlock_user_struct(target_rusage, target_addr, 1);
968 
969     return 0;
970 }
971 
972 #ifdef TARGET_NR_setrlimit
973 static inline rlim_t target_to_host_rlim(abi_ulong target_rlim)
974 {
975     abi_ulong target_rlim_swap;
976     rlim_t result;
977 
978     target_rlim_swap = tswapal(target_rlim);
979     if (target_rlim_swap == TARGET_RLIM_INFINITY)
980         return RLIM_INFINITY;
981 
982     result = target_rlim_swap;
983     if (target_rlim_swap != (rlim_t)result)
984         return RLIM_INFINITY;
985 
986     return result;
987 }
988 #endif
989 
990 #if defined(TARGET_NR_getrlimit) || defined(TARGET_NR_ugetrlimit)
991 static inline abi_ulong host_to_target_rlim(rlim_t rlim)
992 {
993     abi_ulong target_rlim_swap;
994     abi_ulong result;
995 
996     if (rlim == RLIM_INFINITY || rlim != (abi_long)rlim)
997         target_rlim_swap = TARGET_RLIM_INFINITY;
998     else
999         target_rlim_swap = rlim;
1000     result = tswapal(target_rlim_swap);
1001 
1002     return result;
1003 }
1004 #endif
1005 
1006 static inline int target_to_host_resource(int code)
1007 {
1008     switch (code) {
1009     case TARGET_RLIMIT_AS:
1010         return RLIMIT_AS;
1011     case TARGET_RLIMIT_CORE:
1012         return RLIMIT_CORE;
1013     case TARGET_RLIMIT_CPU:
1014         return RLIMIT_CPU;
1015     case TARGET_RLIMIT_DATA:
1016         return RLIMIT_DATA;
1017     case TARGET_RLIMIT_FSIZE:
1018         return RLIMIT_FSIZE;
1019     case TARGET_RLIMIT_LOCKS:
1020         return RLIMIT_LOCKS;
1021     case TARGET_RLIMIT_MEMLOCK:
1022         return RLIMIT_MEMLOCK;
1023     case TARGET_RLIMIT_MSGQUEUE:
1024         return RLIMIT_MSGQUEUE;
1025     case TARGET_RLIMIT_NICE:
1026         return RLIMIT_NICE;
1027     case TARGET_RLIMIT_NOFILE:
1028         return RLIMIT_NOFILE;
1029     case TARGET_RLIMIT_NPROC:
1030         return RLIMIT_NPROC;
1031     case TARGET_RLIMIT_RSS:
1032         return RLIMIT_RSS;
1033     case TARGET_RLIMIT_RTPRIO:
1034         return RLIMIT_RTPRIO;
1035     case TARGET_RLIMIT_SIGPENDING:
1036         return RLIMIT_SIGPENDING;
1037     case TARGET_RLIMIT_STACK:
1038         return RLIMIT_STACK;
1039     default:
1040         return code;
1041     }
1042 }
1043 
1044 static inline abi_long copy_from_user_timeval(struct timeval *tv,
1045                                               abi_ulong target_tv_addr)
1046 {
1047     struct target_timeval *target_tv;
1048 
1049     if (!lock_user_struct(VERIFY_READ, target_tv, target_tv_addr, 1)) {
1050         return -TARGET_EFAULT;
1051     }
1052 
1053     __get_user(tv->tv_sec, &target_tv->tv_sec);
1054     __get_user(tv->tv_usec, &target_tv->tv_usec);
1055 
1056     unlock_user_struct(target_tv, target_tv_addr, 0);
1057 
1058     return 0;
1059 }
1060 
1061 static inline abi_long copy_to_user_timeval(abi_ulong target_tv_addr,
1062                                             const struct timeval *tv)
1063 {
1064     struct target_timeval *target_tv;
1065 
1066     if (!lock_user_struct(VERIFY_WRITE, target_tv, target_tv_addr, 0)) {
1067         return -TARGET_EFAULT;
1068     }
1069 
1070     __put_user(tv->tv_sec, &target_tv->tv_sec);
1071     __put_user(tv->tv_usec, &target_tv->tv_usec);
1072 
1073     unlock_user_struct(target_tv, target_tv_addr, 1);
1074 
1075     return 0;
1076 }
1077 
1078 #if defined(TARGET_NR_clock_adjtime64) && defined(CONFIG_CLOCK_ADJTIME)
1079 static inline abi_long copy_from_user_timeval64(struct timeval *tv,
1080                                                 abi_ulong target_tv_addr)
1081 {
1082     struct target__kernel_sock_timeval *target_tv;
1083 
1084     if (!lock_user_struct(VERIFY_READ, target_tv, target_tv_addr, 1)) {
1085         return -TARGET_EFAULT;
1086     }
1087 
1088     __get_user(tv->tv_sec, &target_tv->tv_sec);
1089     __get_user(tv->tv_usec, &target_tv->tv_usec);
1090 
1091     unlock_user_struct(target_tv, target_tv_addr, 0);
1092 
1093     return 0;
1094 }
1095 #endif
1096 
1097 static inline abi_long copy_to_user_timeval64(abi_ulong target_tv_addr,
1098                                               const struct timeval *tv)
1099 {
1100     struct target__kernel_sock_timeval *target_tv;
1101 
1102     if (!lock_user_struct(VERIFY_WRITE, target_tv, target_tv_addr, 0)) {
1103         return -TARGET_EFAULT;
1104     }
1105 
1106     __put_user(tv->tv_sec, &target_tv->tv_sec);
1107     __put_user(tv->tv_usec, &target_tv->tv_usec);
1108 
1109     unlock_user_struct(target_tv, target_tv_addr, 1);
1110 
1111     return 0;
1112 }
1113 
1114 #if defined(TARGET_NR_futex) || \
1115     defined(TARGET_NR_rt_sigtimedwait) || \
1116     defined(TARGET_NR_pselect6) || defined(TARGET_NR_pselect6) || \
1117     defined(TARGET_NR_nanosleep) || defined(TARGET_NR_clock_settime) || \
1118     defined(TARGET_NR_utimensat) || defined(TARGET_NR_mq_timedsend) || \
1119     defined(TARGET_NR_mq_timedreceive) || defined(TARGET_NR_ipc) || \
1120     defined(TARGET_NR_semop) || defined(TARGET_NR_semtimedop) || \
1121     defined(TARGET_NR_timer_settime) || \
1122     (defined(TARGET_NR_timerfd_settime) && defined(CONFIG_TIMERFD))
1123 static inline abi_long target_to_host_timespec(struct timespec *host_ts,
1124                                                abi_ulong target_addr)
1125 {
1126     struct target_timespec *target_ts;
1127 
1128     if (!lock_user_struct(VERIFY_READ, target_ts, target_addr, 1)) {
1129         return -TARGET_EFAULT;
1130     }
1131     __get_user(host_ts->tv_sec, &target_ts->tv_sec);
1132     __get_user(host_ts->tv_nsec, &target_ts->tv_nsec);
1133     unlock_user_struct(target_ts, target_addr, 0);
1134     return 0;
1135 }
1136 #endif
1137 
1138 #if defined(TARGET_NR_clock_settime64) || defined(TARGET_NR_futex_time64) || \
1139     defined(TARGET_NR_timer_settime64) || \
1140     defined(TARGET_NR_mq_timedsend_time64) || \
1141     defined(TARGET_NR_mq_timedreceive_time64) || \
1142     (defined(TARGET_NR_timerfd_settime64) && defined(CONFIG_TIMERFD)) || \
1143     defined(TARGET_NR_clock_nanosleep_time64) || \
1144     defined(TARGET_NR_rt_sigtimedwait_time64) || \
1145     defined(TARGET_NR_utimensat) || \
1146     defined(TARGET_NR_utimensat_time64) || \
1147     defined(TARGET_NR_semtimedop_time64) || \
1148     defined(TARGET_NR_pselect6_time64) || defined(TARGET_NR_ppoll_time64)
1149 static inline abi_long target_to_host_timespec64(struct timespec *host_ts,
1150                                                  abi_ulong target_addr)
1151 {
1152     struct target__kernel_timespec *target_ts;
1153 
1154     if (!lock_user_struct(VERIFY_READ, target_ts, target_addr, 1)) {
1155         return -TARGET_EFAULT;
1156     }
1157     __get_user(host_ts->tv_sec, &target_ts->tv_sec);
1158     __get_user(host_ts->tv_nsec, &target_ts->tv_nsec);
1159     /* in 32bit mode, this drops the padding */
1160     host_ts->tv_nsec = (long)(abi_long)host_ts->tv_nsec;
1161     unlock_user_struct(target_ts, target_addr, 0);
1162     return 0;
1163 }
1164 #endif
1165 
1166 static inline abi_long host_to_target_timespec(abi_ulong target_addr,
1167                                                struct timespec *host_ts)
1168 {
1169     struct target_timespec *target_ts;
1170 
1171     if (!lock_user_struct(VERIFY_WRITE, target_ts, target_addr, 0)) {
1172         return -TARGET_EFAULT;
1173     }
1174     __put_user(host_ts->tv_sec, &target_ts->tv_sec);
1175     __put_user(host_ts->tv_nsec, &target_ts->tv_nsec);
1176     unlock_user_struct(target_ts, target_addr, 1);
1177     return 0;
1178 }
1179 
1180 static inline abi_long host_to_target_timespec64(abi_ulong target_addr,
1181                                                  struct timespec *host_ts)
1182 {
1183     struct target__kernel_timespec *target_ts;
1184 
1185     if (!lock_user_struct(VERIFY_WRITE, target_ts, target_addr, 0)) {
1186         return -TARGET_EFAULT;
1187     }
1188     __put_user(host_ts->tv_sec, &target_ts->tv_sec);
1189     __put_user(host_ts->tv_nsec, &target_ts->tv_nsec);
1190     unlock_user_struct(target_ts, target_addr, 1);
1191     return 0;
1192 }
1193 
1194 #if defined(TARGET_NR_gettimeofday)
1195 static inline abi_long copy_to_user_timezone(abi_ulong target_tz_addr,
1196                                              struct timezone *tz)
1197 {
1198     struct target_timezone *target_tz;
1199 
1200     if (!lock_user_struct(VERIFY_WRITE, target_tz, target_tz_addr, 1)) {
1201         return -TARGET_EFAULT;
1202     }
1203 
1204     __put_user(tz->tz_minuteswest, &target_tz->tz_minuteswest);
1205     __put_user(tz->tz_dsttime, &target_tz->tz_dsttime);
1206 
1207     unlock_user_struct(target_tz, target_tz_addr, 1);
1208 
1209     return 0;
1210 }
1211 #endif
1212 
1213 #if defined(TARGET_NR_settimeofday)
1214 static inline abi_long copy_from_user_timezone(struct timezone *tz,
1215                                                abi_ulong target_tz_addr)
1216 {
1217     struct target_timezone *target_tz;
1218 
1219     if (!lock_user_struct(VERIFY_READ, target_tz, target_tz_addr, 1)) {
1220         return -TARGET_EFAULT;
1221     }
1222 
1223     __get_user(tz->tz_minuteswest, &target_tz->tz_minuteswest);
1224     __get_user(tz->tz_dsttime, &target_tz->tz_dsttime);
1225 
1226     unlock_user_struct(target_tz, target_tz_addr, 0);
1227 
1228     return 0;
1229 }
1230 #endif
1231 
1232 #if defined(TARGET_NR_mq_open) && defined(__NR_mq_open)
1233 #include <mqueue.h>
1234 
1235 static inline abi_long copy_from_user_mq_attr(struct mq_attr *attr,
1236                                               abi_ulong target_mq_attr_addr)
1237 {
1238     struct target_mq_attr *target_mq_attr;
1239 
1240     if (!lock_user_struct(VERIFY_READ, target_mq_attr,
1241                           target_mq_attr_addr, 1))
1242         return -TARGET_EFAULT;
1243 
1244     __get_user(attr->mq_flags, &target_mq_attr->mq_flags);
1245     __get_user(attr->mq_maxmsg, &target_mq_attr->mq_maxmsg);
1246     __get_user(attr->mq_msgsize, &target_mq_attr->mq_msgsize);
1247     __get_user(attr->mq_curmsgs, &target_mq_attr->mq_curmsgs);
1248 
1249     unlock_user_struct(target_mq_attr, target_mq_attr_addr, 0);
1250 
1251     return 0;
1252 }
1253 
1254 static inline abi_long copy_to_user_mq_attr(abi_ulong target_mq_attr_addr,
1255                                             const struct mq_attr *attr)
1256 {
1257     struct target_mq_attr *target_mq_attr;
1258 
1259     if (!lock_user_struct(VERIFY_WRITE, target_mq_attr,
1260                           target_mq_attr_addr, 0))
1261         return -TARGET_EFAULT;
1262 
1263     __put_user(attr->mq_flags, &target_mq_attr->mq_flags);
1264     __put_user(attr->mq_maxmsg, &target_mq_attr->mq_maxmsg);
1265     __put_user(attr->mq_msgsize, &target_mq_attr->mq_msgsize);
1266     __put_user(attr->mq_curmsgs, &target_mq_attr->mq_curmsgs);
1267 
1268     unlock_user_struct(target_mq_attr, target_mq_attr_addr, 1);
1269 
1270     return 0;
1271 }
1272 #endif
1273 
1274 #if defined(TARGET_NR_select) || defined(TARGET_NR__newselect)
1275 /* do_select() must return target values and target errnos. */
1276 static abi_long do_select(int n,
1277                           abi_ulong rfd_addr, abi_ulong wfd_addr,
1278                           abi_ulong efd_addr, abi_ulong target_tv_addr)
1279 {
1280     fd_set rfds, wfds, efds;
1281     fd_set *rfds_ptr, *wfds_ptr, *efds_ptr;
1282     struct timeval tv;
1283     struct timespec ts, *ts_ptr;
1284     abi_long ret;
1285 
1286     ret = copy_from_user_fdset_ptr(&rfds, &rfds_ptr, rfd_addr, n);
1287     if (ret) {
1288         return ret;
1289     }
1290     ret = copy_from_user_fdset_ptr(&wfds, &wfds_ptr, wfd_addr, n);
1291     if (ret) {
1292         return ret;
1293     }
1294     ret = copy_from_user_fdset_ptr(&efds, &efds_ptr, efd_addr, n);
1295     if (ret) {
1296         return ret;
1297     }
1298 
1299     if (target_tv_addr) {
1300         if (copy_from_user_timeval(&tv, target_tv_addr))
1301             return -TARGET_EFAULT;
1302         ts.tv_sec = tv.tv_sec;
1303         ts.tv_nsec = tv.tv_usec * 1000;
1304         ts_ptr = &ts;
1305     } else {
1306         ts_ptr = NULL;
1307     }
1308 
1309     ret = get_errno(safe_pselect6(n, rfds_ptr, wfds_ptr, efds_ptr,
1310                                   ts_ptr, NULL));
1311 
1312     if (!is_error(ret)) {
1313         if (rfd_addr && copy_to_user_fdset(rfd_addr, &rfds, n))
1314             return -TARGET_EFAULT;
1315         if (wfd_addr && copy_to_user_fdset(wfd_addr, &wfds, n))
1316             return -TARGET_EFAULT;
1317         if (efd_addr && copy_to_user_fdset(efd_addr, &efds, n))
1318             return -TARGET_EFAULT;
1319 
1320         if (target_tv_addr) {
1321             tv.tv_sec = ts.tv_sec;
1322             tv.tv_usec = ts.tv_nsec / 1000;
1323             if (copy_to_user_timeval(target_tv_addr, &tv)) {
1324                 return -TARGET_EFAULT;
1325             }
1326         }
1327     }
1328 
1329     return ret;
1330 }
1331 
1332 #if defined(TARGET_WANT_OLD_SYS_SELECT)
1333 static abi_long do_old_select(abi_ulong arg1)
1334 {
1335     struct target_sel_arg_struct *sel;
1336     abi_ulong inp, outp, exp, tvp;
1337     long nsel;
1338 
1339     if (!lock_user_struct(VERIFY_READ, sel, arg1, 1)) {
1340         return -TARGET_EFAULT;
1341     }
1342 
1343     nsel = tswapal(sel->n);
1344     inp = tswapal(sel->inp);
1345     outp = tswapal(sel->outp);
1346     exp = tswapal(sel->exp);
1347     tvp = tswapal(sel->tvp);
1348 
1349     unlock_user_struct(sel, arg1, 0);
1350 
1351     return do_select(nsel, inp, outp, exp, tvp);
1352 }
1353 #endif
1354 #endif
1355 
1356 #if defined(TARGET_NR_pselect6) || defined(TARGET_NR_pselect6_time64)
1357 static abi_long do_pselect6(abi_long arg1, abi_long arg2, abi_long arg3,
1358                             abi_long arg4, abi_long arg5, abi_long arg6,
1359                             bool time64)
1360 {
1361     abi_long rfd_addr, wfd_addr, efd_addr, n, ts_addr;
1362     fd_set rfds, wfds, efds;
1363     fd_set *rfds_ptr, *wfds_ptr, *efds_ptr;
1364     struct timespec ts, *ts_ptr;
1365     abi_long ret;
1366 
1367     /*
1368      * The 6th arg is actually two args smashed together,
1369      * so we cannot use the C library.
1370      */
1371     sigset_t set;
1372     struct {
1373         sigset_t *set;
1374         size_t size;
1375     } sig, *sig_ptr;
1376 
1377     abi_ulong arg_sigset, arg_sigsize, *arg7;
1378     target_sigset_t *target_sigset;
1379 
1380     n = arg1;
1381     rfd_addr = arg2;
1382     wfd_addr = arg3;
1383     efd_addr = arg4;
1384     ts_addr = arg5;
1385 
1386     ret = copy_from_user_fdset_ptr(&rfds, &rfds_ptr, rfd_addr, n);
1387     if (ret) {
1388         return ret;
1389     }
1390     ret = copy_from_user_fdset_ptr(&wfds, &wfds_ptr, wfd_addr, n);
1391     if (ret) {
1392         return ret;
1393     }
1394     ret = copy_from_user_fdset_ptr(&efds, &efds_ptr, efd_addr, n);
1395     if (ret) {
1396         return ret;
1397     }
1398 
1399     /*
1400      * This takes a timespec, and not a timeval, so we cannot
1401      * use the do_select() helper ...
1402      */
1403     if (ts_addr) {
1404         if (time64) {
1405             if (target_to_host_timespec64(&ts, ts_addr)) {
1406                 return -TARGET_EFAULT;
1407             }
1408         } else {
1409             if (target_to_host_timespec(&ts, ts_addr)) {
1410                 return -TARGET_EFAULT;
1411             }
1412         }
1413             ts_ptr = &ts;
1414     } else {
1415         ts_ptr = NULL;
1416     }
1417 
1418     /* Extract the two packed args for the sigset */
1419     if (arg6) {
1420         sig_ptr = &sig;
1421         sig.size = SIGSET_T_SIZE;
1422 
1423         arg7 = lock_user(VERIFY_READ, arg6, sizeof(*arg7) * 2, 1);
1424         if (!arg7) {
1425             return -TARGET_EFAULT;
1426         }
1427         arg_sigset = tswapal(arg7[0]);
1428         arg_sigsize = tswapal(arg7[1]);
1429         unlock_user(arg7, arg6, 0);
1430 
1431         if (arg_sigset) {
1432             sig.set = &set;
1433             if (arg_sigsize != sizeof(*target_sigset)) {
1434                 /* Like the kernel, we enforce correct size sigsets */
1435                 return -TARGET_EINVAL;
1436             }
1437             target_sigset = lock_user(VERIFY_READ, arg_sigset,
1438                                       sizeof(*target_sigset), 1);
1439             if (!target_sigset) {
1440                 return -TARGET_EFAULT;
1441             }
1442             target_to_host_sigset(&set, target_sigset);
1443             unlock_user(target_sigset, arg_sigset, 0);
1444         } else {
1445             sig.set = NULL;
1446         }
1447     } else {
1448         sig_ptr = NULL;
1449     }
1450 
1451     ret = get_errno(safe_pselect6(n, rfds_ptr, wfds_ptr, efds_ptr,
1452                                   ts_ptr, sig_ptr));
1453 
1454     if (!is_error(ret)) {
1455         if (rfd_addr && copy_to_user_fdset(rfd_addr, &rfds, n)) {
1456             return -TARGET_EFAULT;
1457         }
1458         if (wfd_addr && copy_to_user_fdset(wfd_addr, &wfds, n)) {
1459             return -TARGET_EFAULT;
1460         }
1461         if (efd_addr && copy_to_user_fdset(efd_addr, &efds, n)) {
1462             return -TARGET_EFAULT;
1463         }
1464         if (time64) {
1465             if (ts_addr && host_to_target_timespec64(ts_addr, &ts)) {
1466                 return -TARGET_EFAULT;
1467             }
1468         } else {
1469             if (ts_addr && host_to_target_timespec(ts_addr, &ts)) {
1470                 return -TARGET_EFAULT;
1471             }
1472         }
1473     }
1474     return ret;
1475 }
1476 #endif
1477 
1478 #if defined(TARGET_NR_poll) || defined(TARGET_NR_ppoll) || \
1479     defined(TARGET_NR_ppoll_time64)
1480 static abi_long do_ppoll(abi_long arg1, abi_long arg2, abi_long arg3,
1481                          abi_long arg4, abi_long arg5, bool ppoll, bool time64)
1482 {
1483     struct target_pollfd *target_pfd;
1484     unsigned int nfds = arg2;
1485     struct pollfd *pfd;
1486     unsigned int i;
1487     abi_long ret;
1488 
1489     pfd = NULL;
1490     target_pfd = NULL;
1491     if (nfds) {
1492         if (nfds > (INT_MAX / sizeof(struct target_pollfd))) {
1493             return -TARGET_EINVAL;
1494         }
1495         target_pfd = lock_user(VERIFY_WRITE, arg1,
1496                                sizeof(struct target_pollfd) * nfds, 1);
1497         if (!target_pfd) {
1498             return -TARGET_EFAULT;
1499         }
1500 
1501         pfd = alloca(sizeof(struct pollfd) * nfds);
1502         for (i = 0; i < nfds; i++) {
1503             pfd[i].fd = tswap32(target_pfd[i].fd);
1504             pfd[i].events = tswap16(target_pfd[i].events);
1505         }
1506     }
1507     if (ppoll) {
1508         struct timespec _timeout_ts, *timeout_ts = &_timeout_ts;
1509         target_sigset_t *target_set;
1510         sigset_t _set, *set = &_set;
1511 
1512         if (arg3) {
1513             if (time64) {
1514                 if (target_to_host_timespec64(timeout_ts, arg3)) {
1515                     unlock_user(target_pfd, arg1, 0);
1516                     return -TARGET_EFAULT;
1517                 }
1518             } else {
1519                 if (target_to_host_timespec(timeout_ts, arg3)) {
1520                     unlock_user(target_pfd, arg1, 0);
1521                     return -TARGET_EFAULT;
1522                 }
1523             }
1524         } else {
1525             timeout_ts = NULL;
1526         }
1527 
1528         if (arg4) {
1529             if (arg5 != sizeof(target_sigset_t)) {
1530                 unlock_user(target_pfd, arg1, 0);
1531                 return -TARGET_EINVAL;
1532             }
1533 
1534             target_set = lock_user(VERIFY_READ, arg4,
1535                                    sizeof(target_sigset_t), 1);
1536             if (!target_set) {
1537                 unlock_user(target_pfd, arg1, 0);
1538                 return -TARGET_EFAULT;
1539             }
1540             target_to_host_sigset(set, target_set);
1541         } else {
1542             set = NULL;
1543         }
1544 
1545         ret = get_errno(safe_ppoll(pfd, nfds, timeout_ts,
1546                                    set, SIGSET_T_SIZE));
1547 
1548         if (!is_error(ret) && arg3) {
1549             if (time64) {
1550                 if (host_to_target_timespec64(arg3, timeout_ts)) {
1551                     return -TARGET_EFAULT;
1552                 }
1553             } else {
1554                 if (host_to_target_timespec(arg3, timeout_ts)) {
1555                     return -TARGET_EFAULT;
1556                 }
1557             }
1558         }
1559         if (arg4) {
1560             unlock_user(target_set, arg4, 0);
1561         }
1562     } else {
1563           struct timespec ts, *pts;
1564 
1565           if (arg3 >= 0) {
1566               /* Convert ms to secs, ns */
1567               ts.tv_sec = arg3 / 1000;
1568               ts.tv_nsec = (arg3 % 1000) * 1000000LL;
1569               pts = &ts;
1570           } else {
1571               /* -ve poll() timeout means "infinite" */
1572               pts = NULL;
1573           }
1574           ret = get_errno(safe_ppoll(pfd, nfds, pts, NULL, 0));
1575     }
1576 
1577     if (!is_error(ret)) {
1578         for (i = 0; i < nfds; i++) {
1579             target_pfd[i].revents = tswap16(pfd[i].revents);
1580         }
1581     }
1582     unlock_user(target_pfd, arg1, sizeof(struct target_pollfd) * nfds);
1583     return ret;
1584 }
1585 #endif
1586 
1587 static abi_long do_pipe2(int host_pipe[], int flags)
1588 {
1589 #ifdef CONFIG_PIPE2
1590     return pipe2(host_pipe, flags);
1591 #else
1592     return -ENOSYS;
1593 #endif
1594 }
1595 
1596 static abi_long do_pipe(void *cpu_env, abi_ulong pipedes,
1597                         int flags, int is_pipe2)
1598 {
1599     int host_pipe[2];
1600     abi_long ret;
1601     ret = flags ? do_pipe2(host_pipe, flags) : pipe(host_pipe);
1602 
1603     if (is_error(ret))
1604         return get_errno(ret);
1605 
1606     /* Several targets have special calling conventions for the original
1607        pipe syscall, but didn't replicate this into the pipe2 syscall.  */
1608     if (!is_pipe2) {
1609 #if defined(TARGET_ALPHA)
1610         ((CPUAlphaState *)cpu_env)->ir[IR_A4] = host_pipe[1];
1611         return host_pipe[0];
1612 #elif defined(TARGET_MIPS)
1613         ((CPUMIPSState*)cpu_env)->active_tc.gpr[3] = host_pipe[1];
1614         return host_pipe[0];
1615 #elif defined(TARGET_SH4)
1616         ((CPUSH4State*)cpu_env)->gregs[1] = host_pipe[1];
1617         return host_pipe[0];
1618 #elif defined(TARGET_SPARC)
1619         ((CPUSPARCState*)cpu_env)->regwptr[1] = host_pipe[1];
1620         return host_pipe[0];
1621 #endif
1622     }
1623 
1624     if (put_user_s32(host_pipe[0], pipedes)
1625         || put_user_s32(host_pipe[1], pipedes + sizeof(host_pipe[0])))
1626         return -TARGET_EFAULT;
1627     return get_errno(ret);
1628 }
1629 
1630 static inline abi_long target_to_host_ip_mreq(struct ip_mreqn *mreqn,
1631                                               abi_ulong target_addr,
1632                                               socklen_t len)
1633 {
1634     struct target_ip_mreqn *target_smreqn;
1635 
1636     target_smreqn = lock_user(VERIFY_READ, target_addr, len, 1);
1637     if (!target_smreqn)
1638         return -TARGET_EFAULT;
1639     mreqn->imr_multiaddr.s_addr = target_smreqn->imr_multiaddr.s_addr;
1640     mreqn->imr_address.s_addr = target_smreqn->imr_address.s_addr;
1641     if (len == sizeof(struct target_ip_mreqn))
1642         mreqn->imr_ifindex = tswapal(target_smreqn->imr_ifindex);
1643     unlock_user(target_smreqn, target_addr, 0);
1644 
1645     return 0;
1646 }
1647 
1648 static inline abi_long target_to_host_sockaddr(int fd, struct sockaddr *addr,
1649                                                abi_ulong target_addr,
1650                                                socklen_t len)
1651 {
1652     const socklen_t unix_maxlen = sizeof (struct sockaddr_un);
1653     sa_family_t sa_family;
1654     struct target_sockaddr *target_saddr;
1655 
1656     if (fd_trans_target_to_host_addr(fd)) {
1657         return fd_trans_target_to_host_addr(fd)(addr, target_addr, len);
1658     }
1659 
1660     target_saddr = lock_user(VERIFY_READ, target_addr, len, 1);
1661     if (!target_saddr)
1662         return -TARGET_EFAULT;
1663 
1664     sa_family = tswap16(target_saddr->sa_family);
1665 
1666     /* Oops. The caller might send a incomplete sun_path; sun_path
1667      * must be terminated by \0 (see the manual page), but
1668      * unfortunately it is quite common to specify sockaddr_un
1669      * length as "strlen(x->sun_path)" while it should be
1670      * "strlen(...) + 1". We'll fix that here if needed.
1671      * Linux kernel has a similar feature.
1672      */
1673 
1674     if (sa_family == AF_UNIX) {
1675         if (len < unix_maxlen && len > 0) {
1676             char *cp = (char*)target_saddr;
1677 
1678             if ( cp[len-1] && !cp[len] )
1679                 len++;
1680         }
1681         if (len > unix_maxlen)
1682             len = unix_maxlen;
1683     }
1684 
1685     memcpy(addr, target_saddr, len);
1686     addr->sa_family = sa_family;
1687     if (sa_family == AF_NETLINK) {
1688         struct sockaddr_nl *nladdr;
1689 
1690         nladdr = (struct sockaddr_nl *)addr;
1691         nladdr->nl_pid = tswap32(nladdr->nl_pid);
1692         nladdr->nl_groups = tswap32(nladdr->nl_groups);
1693     } else if (sa_family == AF_PACKET) {
1694 	struct target_sockaddr_ll *lladdr;
1695 
1696 	lladdr = (struct target_sockaddr_ll *)addr;
1697 	lladdr->sll_ifindex = tswap32(lladdr->sll_ifindex);
1698 	lladdr->sll_hatype = tswap16(lladdr->sll_hatype);
1699     }
1700     unlock_user(target_saddr, target_addr, 0);
1701 
1702     return 0;
1703 }
1704 
1705 static inline abi_long host_to_target_sockaddr(abi_ulong target_addr,
1706                                                struct sockaddr *addr,
1707                                                socklen_t len)
1708 {
1709     struct target_sockaddr *target_saddr;
1710 
1711     if (len == 0) {
1712         return 0;
1713     }
1714     assert(addr);
1715 
1716     target_saddr = lock_user(VERIFY_WRITE, target_addr, len, 0);
1717     if (!target_saddr)
1718         return -TARGET_EFAULT;
1719     memcpy(target_saddr, addr, len);
1720     if (len >= offsetof(struct target_sockaddr, sa_family) +
1721         sizeof(target_saddr->sa_family)) {
1722         target_saddr->sa_family = tswap16(addr->sa_family);
1723     }
1724     if (addr->sa_family == AF_NETLINK &&
1725         len >= sizeof(struct target_sockaddr_nl)) {
1726         struct target_sockaddr_nl *target_nl =
1727                (struct target_sockaddr_nl *)target_saddr;
1728         target_nl->nl_pid = tswap32(target_nl->nl_pid);
1729         target_nl->nl_groups = tswap32(target_nl->nl_groups);
1730     } else if (addr->sa_family == AF_PACKET) {
1731         struct sockaddr_ll *target_ll = (struct sockaddr_ll *)target_saddr;
1732         target_ll->sll_ifindex = tswap32(target_ll->sll_ifindex);
1733         target_ll->sll_hatype = tswap16(target_ll->sll_hatype);
1734     } else if (addr->sa_family == AF_INET6 &&
1735                len >= sizeof(struct target_sockaddr_in6)) {
1736         struct target_sockaddr_in6 *target_in6 =
1737                (struct target_sockaddr_in6 *)target_saddr;
1738         target_in6->sin6_scope_id = tswap16(target_in6->sin6_scope_id);
1739     }
1740     unlock_user(target_saddr, target_addr, len);
1741 
1742     return 0;
1743 }
1744 
1745 static inline abi_long target_to_host_cmsg(struct msghdr *msgh,
1746                                            struct target_msghdr *target_msgh)
1747 {
1748     struct cmsghdr *cmsg = CMSG_FIRSTHDR(msgh);
1749     abi_long msg_controllen;
1750     abi_ulong target_cmsg_addr;
1751     struct target_cmsghdr *target_cmsg, *target_cmsg_start;
1752     socklen_t space = 0;
1753 
1754     msg_controllen = tswapal(target_msgh->msg_controllen);
1755     if (msg_controllen < sizeof (struct target_cmsghdr))
1756         goto the_end;
1757     target_cmsg_addr = tswapal(target_msgh->msg_control);
1758     target_cmsg = lock_user(VERIFY_READ, target_cmsg_addr, msg_controllen, 1);
1759     target_cmsg_start = target_cmsg;
1760     if (!target_cmsg)
1761         return -TARGET_EFAULT;
1762 
1763     while (cmsg && target_cmsg) {
1764         void *data = CMSG_DATA(cmsg);
1765         void *target_data = TARGET_CMSG_DATA(target_cmsg);
1766 
1767         int len = tswapal(target_cmsg->cmsg_len)
1768             - sizeof(struct target_cmsghdr);
1769 
1770         space += CMSG_SPACE(len);
1771         if (space > msgh->msg_controllen) {
1772             space -= CMSG_SPACE(len);
1773             /* This is a QEMU bug, since we allocated the payload
1774              * area ourselves (unlike overflow in host-to-target
1775              * conversion, which is just the guest giving us a buffer
1776              * that's too small). It can't happen for the payload types
1777              * we currently support; if it becomes an issue in future
1778              * we would need to improve our allocation strategy to
1779              * something more intelligent than "twice the size of the
1780              * target buffer we're reading from".
1781              */
1782             qemu_log_mask(LOG_UNIMP,
1783                           ("Unsupported ancillary data %d/%d: "
1784                            "unhandled msg size\n"),
1785                           tswap32(target_cmsg->cmsg_level),
1786                           tswap32(target_cmsg->cmsg_type));
1787             break;
1788         }
1789 
1790         if (tswap32(target_cmsg->cmsg_level) == TARGET_SOL_SOCKET) {
1791             cmsg->cmsg_level = SOL_SOCKET;
1792         } else {
1793             cmsg->cmsg_level = tswap32(target_cmsg->cmsg_level);
1794         }
1795         cmsg->cmsg_type = tswap32(target_cmsg->cmsg_type);
1796         cmsg->cmsg_len = CMSG_LEN(len);
1797 
1798         if (cmsg->cmsg_level == SOL_SOCKET && cmsg->cmsg_type == SCM_RIGHTS) {
1799             int *fd = (int *)data;
1800             int *target_fd = (int *)target_data;
1801             int i, numfds = len / sizeof(int);
1802 
1803             for (i = 0; i < numfds; i++) {
1804                 __get_user(fd[i], target_fd + i);
1805             }
1806         } else if (cmsg->cmsg_level == SOL_SOCKET
1807                &&  cmsg->cmsg_type == SCM_CREDENTIALS) {
1808             struct ucred *cred = (struct ucred *)data;
1809             struct target_ucred *target_cred =
1810                 (struct target_ucred *)target_data;
1811 
1812             __get_user(cred->pid, &target_cred->pid);
1813             __get_user(cred->uid, &target_cred->uid);
1814             __get_user(cred->gid, &target_cred->gid);
1815         } else {
1816             qemu_log_mask(LOG_UNIMP, "Unsupported ancillary data: %d/%d\n",
1817                           cmsg->cmsg_level, cmsg->cmsg_type);
1818             memcpy(data, target_data, len);
1819         }
1820 
1821         cmsg = CMSG_NXTHDR(msgh, cmsg);
1822         target_cmsg = TARGET_CMSG_NXTHDR(target_msgh, target_cmsg,
1823                                          target_cmsg_start);
1824     }
1825     unlock_user(target_cmsg, target_cmsg_addr, 0);
1826  the_end:
1827     msgh->msg_controllen = space;
1828     return 0;
1829 }
1830 
1831 static inline abi_long host_to_target_cmsg(struct target_msghdr *target_msgh,
1832                                            struct msghdr *msgh)
1833 {
1834     struct cmsghdr *cmsg = CMSG_FIRSTHDR(msgh);
1835     abi_long msg_controllen;
1836     abi_ulong target_cmsg_addr;
1837     struct target_cmsghdr *target_cmsg, *target_cmsg_start;
1838     socklen_t space = 0;
1839 
1840     msg_controllen = tswapal(target_msgh->msg_controllen);
1841     if (msg_controllen < sizeof (struct target_cmsghdr))
1842         goto the_end;
1843     target_cmsg_addr = tswapal(target_msgh->msg_control);
1844     target_cmsg = lock_user(VERIFY_WRITE, target_cmsg_addr, msg_controllen, 0);
1845     target_cmsg_start = target_cmsg;
1846     if (!target_cmsg)
1847         return -TARGET_EFAULT;
1848 
1849     while (cmsg && target_cmsg) {
1850         void *data = CMSG_DATA(cmsg);
1851         void *target_data = TARGET_CMSG_DATA(target_cmsg);
1852 
1853         int len = cmsg->cmsg_len - sizeof(struct cmsghdr);
1854         int tgt_len, tgt_space;
1855 
1856         /* We never copy a half-header but may copy half-data;
1857          * this is Linux's behaviour in put_cmsg(). Note that
1858          * truncation here is a guest problem (which we report
1859          * to the guest via the CTRUNC bit), unlike truncation
1860          * in target_to_host_cmsg, which is a QEMU bug.
1861          */
1862         if (msg_controllen < sizeof(struct target_cmsghdr)) {
1863             target_msgh->msg_flags |= tswap32(MSG_CTRUNC);
1864             break;
1865         }
1866 
1867         if (cmsg->cmsg_level == SOL_SOCKET) {
1868             target_cmsg->cmsg_level = tswap32(TARGET_SOL_SOCKET);
1869         } else {
1870             target_cmsg->cmsg_level = tswap32(cmsg->cmsg_level);
1871         }
1872         target_cmsg->cmsg_type = tswap32(cmsg->cmsg_type);
1873 
1874         /* Payload types which need a different size of payload on
1875          * the target must adjust tgt_len here.
1876          */
1877         tgt_len = len;
1878         switch (cmsg->cmsg_level) {
1879         case SOL_SOCKET:
1880             switch (cmsg->cmsg_type) {
1881             case SO_TIMESTAMP:
1882                 tgt_len = sizeof(struct target_timeval);
1883                 break;
1884             default:
1885                 break;
1886             }
1887             break;
1888         default:
1889             break;
1890         }
1891 
1892         if (msg_controllen < TARGET_CMSG_LEN(tgt_len)) {
1893             target_msgh->msg_flags |= tswap32(MSG_CTRUNC);
1894             tgt_len = msg_controllen - sizeof(struct target_cmsghdr);
1895         }
1896 
1897         /* We must now copy-and-convert len bytes of payload
1898          * into tgt_len bytes of destination space. Bear in mind
1899          * that in both source and destination we may be dealing
1900          * with a truncated value!
1901          */
1902         switch (cmsg->cmsg_level) {
1903         case SOL_SOCKET:
1904             switch (cmsg->cmsg_type) {
1905             case SCM_RIGHTS:
1906             {
1907                 int *fd = (int *)data;
1908                 int *target_fd = (int *)target_data;
1909                 int i, numfds = tgt_len / sizeof(int);
1910 
1911                 for (i = 0; i < numfds; i++) {
1912                     __put_user(fd[i], target_fd + i);
1913                 }
1914                 break;
1915             }
1916             case SO_TIMESTAMP:
1917             {
1918                 struct timeval *tv = (struct timeval *)data;
1919                 struct target_timeval *target_tv =
1920                     (struct target_timeval *)target_data;
1921 
1922                 if (len != sizeof(struct timeval) ||
1923                     tgt_len != sizeof(struct target_timeval)) {
1924                     goto unimplemented;
1925                 }
1926 
1927                 /* copy struct timeval to target */
1928                 __put_user(tv->tv_sec, &target_tv->tv_sec);
1929                 __put_user(tv->tv_usec, &target_tv->tv_usec);
1930                 break;
1931             }
1932             case SCM_CREDENTIALS:
1933             {
1934                 struct ucred *cred = (struct ucred *)data;
1935                 struct target_ucred *target_cred =
1936                     (struct target_ucred *)target_data;
1937 
1938                 __put_user(cred->pid, &target_cred->pid);
1939                 __put_user(cred->uid, &target_cred->uid);
1940                 __put_user(cred->gid, &target_cred->gid);
1941                 break;
1942             }
1943             default:
1944                 goto unimplemented;
1945             }
1946             break;
1947 
1948         case SOL_IP:
1949             switch (cmsg->cmsg_type) {
1950             case IP_TTL:
1951             {
1952                 uint32_t *v = (uint32_t *)data;
1953                 uint32_t *t_int = (uint32_t *)target_data;
1954 
1955                 if (len != sizeof(uint32_t) ||
1956                     tgt_len != sizeof(uint32_t)) {
1957                     goto unimplemented;
1958                 }
1959                 __put_user(*v, t_int);
1960                 break;
1961             }
1962             case IP_RECVERR:
1963             {
1964                 struct errhdr_t {
1965                    struct sock_extended_err ee;
1966                    struct sockaddr_in offender;
1967                 };
1968                 struct errhdr_t *errh = (struct errhdr_t *)data;
1969                 struct errhdr_t *target_errh =
1970                     (struct errhdr_t *)target_data;
1971 
1972                 if (len != sizeof(struct errhdr_t) ||
1973                     tgt_len != sizeof(struct errhdr_t)) {
1974                     goto unimplemented;
1975                 }
1976                 __put_user(errh->ee.ee_errno, &target_errh->ee.ee_errno);
1977                 __put_user(errh->ee.ee_origin, &target_errh->ee.ee_origin);
1978                 __put_user(errh->ee.ee_type,  &target_errh->ee.ee_type);
1979                 __put_user(errh->ee.ee_code, &target_errh->ee.ee_code);
1980                 __put_user(errh->ee.ee_pad, &target_errh->ee.ee_pad);
1981                 __put_user(errh->ee.ee_info, &target_errh->ee.ee_info);
1982                 __put_user(errh->ee.ee_data, &target_errh->ee.ee_data);
1983                 host_to_target_sockaddr((unsigned long) &target_errh->offender,
1984                     (void *) &errh->offender, sizeof(errh->offender));
1985                 break;
1986             }
1987             default:
1988                 goto unimplemented;
1989             }
1990             break;
1991 
1992         case SOL_IPV6:
1993             switch (cmsg->cmsg_type) {
1994             case IPV6_HOPLIMIT:
1995             {
1996                 uint32_t *v = (uint32_t *)data;
1997                 uint32_t *t_int = (uint32_t *)target_data;
1998 
1999                 if (len != sizeof(uint32_t) ||
2000                     tgt_len != sizeof(uint32_t)) {
2001                     goto unimplemented;
2002                 }
2003                 __put_user(*v, t_int);
2004                 break;
2005             }
2006             case IPV6_RECVERR:
2007             {
2008                 struct errhdr6_t {
2009                    struct sock_extended_err ee;
2010                    struct sockaddr_in6 offender;
2011                 };
2012                 struct errhdr6_t *errh = (struct errhdr6_t *)data;
2013                 struct errhdr6_t *target_errh =
2014                     (struct errhdr6_t *)target_data;
2015 
2016                 if (len != sizeof(struct errhdr6_t) ||
2017                     tgt_len != sizeof(struct errhdr6_t)) {
2018                     goto unimplemented;
2019                 }
2020                 __put_user(errh->ee.ee_errno, &target_errh->ee.ee_errno);
2021                 __put_user(errh->ee.ee_origin, &target_errh->ee.ee_origin);
2022                 __put_user(errh->ee.ee_type,  &target_errh->ee.ee_type);
2023                 __put_user(errh->ee.ee_code, &target_errh->ee.ee_code);
2024                 __put_user(errh->ee.ee_pad, &target_errh->ee.ee_pad);
2025                 __put_user(errh->ee.ee_info, &target_errh->ee.ee_info);
2026                 __put_user(errh->ee.ee_data, &target_errh->ee.ee_data);
2027                 host_to_target_sockaddr((unsigned long) &target_errh->offender,
2028                     (void *) &errh->offender, sizeof(errh->offender));
2029                 break;
2030             }
2031             default:
2032                 goto unimplemented;
2033             }
2034             break;
2035 
2036         default:
2037         unimplemented:
2038             qemu_log_mask(LOG_UNIMP, "Unsupported ancillary data: %d/%d\n",
2039                           cmsg->cmsg_level, cmsg->cmsg_type);
2040             memcpy(target_data, data, MIN(len, tgt_len));
2041             if (tgt_len > len) {
2042                 memset(target_data + len, 0, tgt_len - len);
2043             }
2044         }
2045 
2046         target_cmsg->cmsg_len = tswapal(TARGET_CMSG_LEN(tgt_len));
2047         tgt_space = TARGET_CMSG_SPACE(tgt_len);
2048         if (msg_controllen < tgt_space) {
2049             tgt_space = msg_controllen;
2050         }
2051         msg_controllen -= tgt_space;
2052         space += tgt_space;
2053         cmsg = CMSG_NXTHDR(msgh, cmsg);
2054         target_cmsg = TARGET_CMSG_NXTHDR(target_msgh, target_cmsg,
2055                                          target_cmsg_start);
2056     }
2057     unlock_user(target_cmsg, target_cmsg_addr, space);
2058  the_end:
2059     target_msgh->msg_controllen = tswapal(space);
2060     return 0;
2061 }
2062 
2063 /* do_setsockopt() Must return target values and target errnos. */
2064 static abi_long do_setsockopt(int sockfd, int level, int optname,
2065                               abi_ulong optval_addr, socklen_t optlen)
2066 {
2067     abi_long ret;
2068     int val;
2069     struct ip_mreqn *ip_mreq;
2070     struct ip_mreq_source *ip_mreq_source;
2071 
2072     switch(level) {
2073     case SOL_TCP:
2074     case SOL_UDP:
2075         /* TCP and UDP options all take an 'int' value.  */
2076         if (optlen < sizeof(uint32_t))
2077             return -TARGET_EINVAL;
2078 
2079         if (get_user_u32(val, optval_addr))
2080             return -TARGET_EFAULT;
2081         ret = get_errno(setsockopt(sockfd, level, optname, &val, sizeof(val)));
2082         break;
2083     case SOL_IP:
2084         switch(optname) {
2085         case IP_TOS:
2086         case IP_TTL:
2087         case IP_HDRINCL:
2088         case IP_ROUTER_ALERT:
2089         case IP_RECVOPTS:
2090         case IP_RETOPTS:
2091         case IP_PKTINFO:
2092         case IP_MTU_DISCOVER:
2093         case IP_RECVERR:
2094         case IP_RECVTTL:
2095         case IP_RECVTOS:
2096 #ifdef IP_FREEBIND
2097         case IP_FREEBIND:
2098 #endif
2099         case IP_MULTICAST_TTL:
2100         case IP_MULTICAST_LOOP:
2101             val = 0;
2102             if (optlen >= sizeof(uint32_t)) {
2103                 if (get_user_u32(val, optval_addr))
2104                     return -TARGET_EFAULT;
2105             } else if (optlen >= 1) {
2106                 if (get_user_u8(val, optval_addr))
2107                     return -TARGET_EFAULT;
2108             }
2109             ret = get_errno(setsockopt(sockfd, level, optname, &val, sizeof(val)));
2110             break;
2111         case IP_ADD_MEMBERSHIP:
2112         case IP_DROP_MEMBERSHIP:
2113             if (optlen < sizeof (struct target_ip_mreq) ||
2114                 optlen > sizeof (struct target_ip_mreqn))
2115                 return -TARGET_EINVAL;
2116 
2117             ip_mreq = (struct ip_mreqn *) alloca(optlen);
2118             target_to_host_ip_mreq(ip_mreq, optval_addr, optlen);
2119             ret = get_errno(setsockopt(sockfd, level, optname, ip_mreq, optlen));
2120             break;
2121 
2122         case IP_BLOCK_SOURCE:
2123         case IP_UNBLOCK_SOURCE:
2124         case IP_ADD_SOURCE_MEMBERSHIP:
2125         case IP_DROP_SOURCE_MEMBERSHIP:
2126             if (optlen != sizeof (struct target_ip_mreq_source))
2127                 return -TARGET_EINVAL;
2128 
2129             ip_mreq_source = lock_user(VERIFY_READ, optval_addr, optlen, 1);
2130             if (!ip_mreq_source) {
2131                 return -TARGET_EFAULT;
2132             }
2133             ret = get_errno(setsockopt(sockfd, level, optname, ip_mreq_source, optlen));
2134             unlock_user (ip_mreq_source, optval_addr, 0);
2135             break;
2136 
2137         default:
2138             goto unimplemented;
2139         }
2140         break;
2141     case SOL_IPV6:
2142         switch (optname) {
2143         case IPV6_MTU_DISCOVER:
2144         case IPV6_MTU:
2145         case IPV6_V6ONLY:
2146         case IPV6_RECVPKTINFO:
2147         case IPV6_UNICAST_HOPS:
2148         case IPV6_MULTICAST_HOPS:
2149         case IPV6_MULTICAST_LOOP:
2150         case IPV6_RECVERR:
2151         case IPV6_RECVHOPLIMIT:
2152         case IPV6_2292HOPLIMIT:
2153         case IPV6_CHECKSUM:
2154         case IPV6_ADDRFORM:
2155         case IPV6_2292PKTINFO:
2156         case IPV6_RECVTCLASS:
2157         case IPV6_RECVRTHDR:
2158         case IPV6_2292RTHDR:
2159         case IPV6_RECVHOPOPTS:
2160         case IPV6_2292HOPOPTS:
2161         case IPV6_RECVDSTOPTS:
2162         case IPV6_2292DSTOPTS:
2163         case IPV6_TCLASS:
2164         case IPV6_ADDR_PREFERENCES:
2165 #ifdef IPV6_RECVPATHMTU
2166         case IPV6_RECVPATHMTU:
2167 #endif
2168 #ifdef IPV6_TRANSPARENT
2169         case IPV6_TRANSPARENT:
2170 #endif
2171 #ifdef IPV6_FREEBIND
2172         case IPV6_FREEBIND:
2173 #endif
2174 #ifdef IPV6_RECVORIGDSTADDR
2175         case IPV6_RECVORIGDSTADDR:
2176 #endif
2177             val = 0;
2178             if (optlen < sizeof(uint32_t)) {
2179                 return -TARGET_EINVAL;
2180             }
2181             if (get_user_u32(val, optval_addr)) {
2182                 return -TARGET_EFAULT;
2183             }
2184             ret = get_errno(setsockopt(sockfd, level, optname,
2185                                        &val, sizeof(val)));
2186             break;
2187         case IPV6_PKTINFO:
2188         {
2189             struct in6_pktinfo pki;
2190 
2191             if (optlen < sizeof(pki)) {
2192                 return -TARGET_EINVAL;
2193             }
2194 
2195             if (copy_from_user(&pki, optval_addr, sizeof(pki))) {
2196                 return -TARGET_EFAULT;
2197             }
2198 
2199             pki.ipi6_ifindex = tswap32(pki.ipi6_ifindex);
2200 
2201             ret = get_errno(setsockopt(sockfd, level, optname,
2202                                        &pki, sizeof(pki)));
2203             break;
2204         }
2205         case IPV6_ADD_MEMBERSHIP:
2206         case IPV6_DROP_MEMBERSHIP:
2207         {
2208             struct ipv6_mreq ipv6mreq;
2209 
2210             if (optlen < sizeof(ipv6mreq)) {
2211                 return -TARGET_EINVAL;
2212             }
2213 
2214             if (copy_from_user(&ipv6mreq, optval_addr, sizeof(ipv6mreq))) {
2215                 return -TARGET_EFAULT;
2216             }
2217 
2218             ipv6mreq.ipv6mr_interface = tswap32(ipv6mreq.ipv6mr_interface);
2219 
2220             ret = get_errno(setsockopt(sockfd, level, optname,
2221                                        &ipv6mreq, sizeof(ipv6mreq)));
2222             break;
2223         }
2224         default:
2225             goto unimplemented;
2226         }
2227         break;
2228     case SOL_ICMPV6:
2229         switch (optname) {
2230         case ICMPV6_FILTER:
2231         {
2232             struct icmp6_filter icmp6f;
2233 
2234             if (optlen > sizeof(icmp6f)) {
2235                 optlen = sizeof(icmp6f);
2236             }
2237 
2238             if (copy_from_user(&icmp6f, optval_addr, optlen)) {
2239                 return -TARGET_EFAULT;
2240             }
2241 
2242             for (val = 0; val < 8; val++) {
2243                 icmp6f.data[val] = tswap32(icmp6f.data[val]);
2244             }
2245 
2246             ret = get_errno(setsockopt(sockfd, level, optname,
2247                                        &icmp6f, optlen));
2248             break;
2249         }
2250         default:
2251             goto unimplemented;
2252         }
2253         break;
2254     case SOL_RAW:
2255         switch (optname) {
2256         case ICMP_FILTER:
2257         case IPV6_CHECKSUM:
2258             /* those take an u32 value */
2259             if (optlen < sizeof(uint32_t)) {
2260                 return -TARGET_EINVAL;
2261             }
2262 
2263             if (get_user_u32(val, optval_addr)) {
2264                 return -TARGET_EFAULT;
2265             }
2266             ret = get_errno(setsockopt(sockfd, level, optname,
2267                                        &val, sizeof(val)));
2268             break;
2269 
2270         default:
2271             goto unimplemented;
2272         }
2273         break;
2274 #if defined(SOL_ALG) && defined(ALG_SET_KEY) && defined(ALG_SET_AEAD_AUTHSIZE)
2275     case SOL_ALG:
2276         switch (optname) {
2277         case ALG_SET_KEY:
2278         {
2279             char *alg_key = g_malloc(optlen);
2280 
2281             if (!alg_key) {
2282                 return -TARGET_ENOMEM;
2283             }
2284             if (copy_from_user(alg_key, optval_addr, optlen)) {
2285                 g_free(alg_key);
2286                 return -TARGET_EFAULT;
2287             }
2288             ret = get_errno(setsockopt(sockfd, level, optname,
2289                                        alg_key, optlen));
2290             g_free(alg_key);
2291             break;
2292         }
2293         case ALG_SET_AEAD_AUTHSIZE:
2294         {
2295             ret = get_errno(setsockopt(sockfd, level, optname,
2296                                        NULL, optlen));
2297             break;
2298         }
2299         default:
2300             goto unimplemented;
2301         }
2302         break;
2303 #endif
2304     case TARGET_SOL_SOCKET:
2305         switch (optname) {
2306         case TARGET_SO_RCVTIMEO:
2307         {
2308                 struct timeval tv;
2309 
2310                 optname = SO_RCVTIMEO;
2311 
2312 set_timeout:
2313                 if (optlen != sizeof(struct target_timeval)) {
2314                     return -TARGET_EINVAL;
2315                 }
2316 
2317                 if (copy_from_user_timeval(&tv, optval_addr)) {
2318                     return -TARGET_EFAULT;
2319                 }
2320 
2321                 ret = get_errno(setsockopt(sockfd, SOL_SOCKET, optname,
2322                                 &tv, sizeof(tv)));
2323                 return ret;
2324         }
2325         case TARGET_SO_SNDTIMEO:
2326                 optname = SO_SNDTIMEO;
2327                 goto set_timeout;
2328         case TARGET_SO_ATTACH_FILTER:
2329         {
2330                 struct target_sock_fprog *tfprog;
2331                 struct target_sock_filter *tfilter;
2332                 struct sock_fprog fprog;
2333                 struct sock_filter *filter;
2334                 int i;
2335 
2336                 if (optlen != sizeof(*tfprog)) {
2337                     return -TARGET_EINVAL;
2338                 }
2339                 if (!lock_user_struct(VERIFY_READ, tfprog, optval_addr, 0)) {
2340                     return -TARGET_EFAULT;
2341                 }
2342                 if (!lock_user_struct(VERIFY_READ, tfilter,
2343                                       tswapal(tfprog->filter), 0)) {
2344                     unlock_user_struct(tfprog, optval_addr, 1);
2345                     return -TARGET_EFAULT;
2346                 }
2347 
2348                 fprog.len = tswap16(tfprog->len);
2349                 filter = g_try_new(struct sock_filter, fprog.len);
2350                 if (filter == NULL) {
2351                     unlock_user_struct(tfilter, tfprog->filter, 1);
2352                     unlock_user_struct(tfprog, optval_addr, 1);
2353                     return -TARGET_ENOMEM;
2354                 }
2355                 for (i = 0; i < fprog.len; i++) {
2356                     filter[i].code = tswap16(tfilter[i].code);
2357                     filter[i].jt = tfilter[i].jt;
2358                     filter[i].jf = tfilter[i].jf;
2359                     filter[i].k = tswap32(tfilter[i].k);
2360                 }
2361                 fprog.filter = filter;
2362 
2363                 ret = get_errno(setsockopt(sockfd, SOL_SOCKET,
2364                                 SO_ATTACH_FILTER, &fprog, sizeof(fprog)));
2365                 g_free(filter);
2366 
2367                 unlock_user_struct(tfilter, tfprog->filter, 1);
2368                 unlock_user_struct(tfprog, optval_addr, 1);
2369                 return ret;
2370         }
2371 	case TARGET_SO_BINDTODEVICE:
2372 	{
2373 		char *dev_ifname, *addr_ifname;
2374 
2375 		if (optlen > IFNAMSIZ - 1) {
2376 		    optlen = IFNAMSIZ - 1;
2377 		}
2378 		dev_ifname = lock_user(VERIFY_READ, optval_addr, optlen, 1);
2379 		if (!dev_ifname) {
2380 		    return -TARGET_EFAULT;
2381 		}
2382 		optname = SO_BINDTODEVICE;
2383 		addr_ifname = alloca(IFNAMSIZ);
2384 		memcpy(addr_ifname, dev_ifname, optlen);
2385 		addr_ifname[optlen] = 0;
2386 		ret = get_errno(setsockopt(sockfd, SOL_SOCKET, optname,
2387                                            addr_ifname, optlen));
2388 		unlock_user (dev_ifname, optval_addr, 0);
2389 		return ret;
2390 	}
2391         case TARGET_SO_LINGER:
2392         {
2393                 struct linger lg;
2394                 struct target_linger *tlg;
2395 
2396                 if (optlen != sizeof(struct target_linger)) {
2397                     return -TARGET_EINVAL;
2398                 }
2399                 if (!lock_user_struct(VERIFY_READ, tlg, optval_addr, 1)) {
2400                     return -TARGET_EFAULT;
2401                 }
2402                 __get_user(lg.l_onoff, &tlg->l_onoff);
2403                 __get_user(lg.l_linger, &tlg->l_linger);
2404                 ret = get_errno(setsockopt(sockfd, SOL_SOCKET, SO_LINGER,
2405                                 &lg, sizeof(lg)));
2406                 unlock_user_struct(tlg, optval_addr, 0);
2407                 return ret;
2408         }
2409             /* Options with 'int' argument.  */
2410         case TARGET_SO_DEBUG:
2411 		optname = SO_DEBUG;
2412 		break;
2413         case TARGET_SO_REUSEADDR:
2414 		optname = SO_REUSEADDR;
2415 		break;
2416 #ifdef SO_REUSEPORT
2417         case TARGET_SO_REUSEPORT:
2418                 optname = SO_REUSEPORT;
2419                 break;
2420 #endif
2421         case TARGET_SO_TYPE:
2422 		optname = SO_TYPE;
2423 		break;
2424         case TARGET_SO_ERROR:
2425 		optname = SO_ERROR;
2426 		break;
2427         case TARGET_SO_DONTROUTE:
2428 		optname = SO_DONTROUTE;
2429 		break;
2430         case TARGET_SO_BROADCAST:
2431 		optname = SO_BROADCAST;
2432 		break;
2433         case TARGET_SO_SNDBUF:
2434 		optname = SO_SNDBUF;
2435 		break;
2436         case TARGET_SO_SNDBUFFORCE:
2437                 optname = SO_SNDBUFFORCE;
2438                 break;
2439         case TARGET_SO_RCVBUF:
2440 		optname = SO_RCVBUF;
2441 		break;
2442         case TARGET_SO_RCVBUFFORCE:
2443                 optname = SO_RCVBUFFORCE;
2444                 break;
2445         case TARGET_SO_KEEPALIVE:
2446 		optname = SO_KEEPALIVE;
2447 		break;
2448         case TARGET_SO_OOBINLINE:
2449 		optname = SO_OOBINLINE;
2450 		break;
2451         case TARGET_SO_NO_CHECK:
2452 		optname = SO_NO_CHECK;
2453 		break;
2454         case TARGET_SO_PRIORITY:
2455 		optname = SO_PRIORITY;
2456 		break;
2457 #ifdef SO_BSDCOMPAT
2458         case TARGET_SO_BSDCOMPAT:
2459 		optname = SO_BSDCOMPAT;
2460 		break;
2461 #endif
2462         case TARGET_SO_PASSCRED:
2463 		optname = SO_PASSCRED;
2464 		break;
2465         case TARGET_SO_PASSSEC:
2466                 optname = SO_PASSSEC;
2467                 break;
2468         case TARGET_SO_TIMESTAMP:
2469 		optname = SO_TIMESTAMP;
2470 		break;
2471         case TARGET_SO_RCVLOWAT:
2472 		optname = SO_RCVLOWAT;
2473 		break;
2474         default:
2475             goto unimplemented;
2476         }
2477 	if (optlen < sizeof(uint32_t))
2478             return -TARGET_EINVAL;
2479 
2480 	if (get_user_u32(val, optval_addr))
2481             return -TARGET_EFAULT;
2482 	ret = get_errno(setsockopt(sockfd, SOL_SOCKET, optname, &val, sizeof(val)));
2483         break;
2484 #ifdef SOL_NETLINK
2485     case SOL_NETLINK:
2486         switch (optname) {
2487         case NETLINK_PKTINFO:
2488         case NETLINK_ADD_MEMBERSHIP:
2489         case NETLINK_DROP_MEMBERSHIP:
2490         case NETLINK_BROADCAST_ERROR:
2491         case NETLINK_NO_ENOBUFS:
2492 #if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 2, 0)
2493         case NETLINK_LISTEN_ALL_NSID:
2494         case NETLINK_CAP_ACK:
2495 #endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(4, 2, 0) */
2496 #if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 12, 0)
2497         case NETLINK_EXT_ACK:
2498 #endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(4, 12, 0) */
2499 #if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 20, 0)
2500         case NETLINK_GET_STRICT_CHK:
2501 #endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(4, 12, 0) */
2502             break;
2503         default:
2504             goto unimplemented;
2505         }
2506         val = 0;
2507         if (optlen < sizeof(uint32_t)) {
2508             return -TARGET_EINVAL;
2509         }
2510         if (get_user_u32(val, optval_addr)) {
2511             return -TARGET_EFAULT;
2512         }
2513         ret = get_errno(setsockopt(sockfd, SOL_NETLINK, optname, &val,
2514                                    sizeof(val)));
2515         break;
2516 #endif /* SOL_NETLINK */
2517     default:
2518     unimplemented:
2519         qemu_log_mask(LOG_UNIMP, "Unsupported setsockopt level=%d optname=%d\n",
2520                       level, optname);
2521         ret = -TARGET_ENOPROTOOPT;
2522     }
2523     return ret;
2524 }
2525 
2526 /* do_getsockopt() Must return target values and target errnos. */
2527 static abi_long do_getsockopt(int sockfd, int level, int optname,
2528                               abi_ulong optval_addr, abi_ulong optlen)
2529 {
2530     abi_long ret;
2531     int len, val;
2532     socklen_t lv;
2533 
2534     switch(level) {
2535     case TARGET_SOL_SOCKET:
2536         level = SOL_SOCKET;
2537         switch (optname) {
2538         /* These don't just return a single integer */
2539         case TARGET_SO_PEERNAME:
2540             goto unimplemented;
2541         case TARGET_SO_RCVTIMEO: {
2542             struct timeval tv;
2543             socklen_t tvlen;
2544 
2545             optname = SO_RCVTIMEO;
2546 
2547 get_timeout:
2548             if (get_user_u32(len, optlen)) {
2549                 return -TARGET_EFAULT;
2550             }
2551             if (len < 0) {
2552                 return -TARGET_EINVAL;
2553             }
2554 
2555             tvlen = sizeof(tv);
2556             ret = get_errno(getsockopt(sockfd, level, optname,
2557                                        &tv, &tvlen));
2558             if (ret < 0) {
2559                 return ret;
2560             }
2561             if (len > sizeof(struct target_timeval)) {
2562                 len = sizeof(struct target_timeval);
2563             }
2564             if (copy_to_user_timeval(optval_addr, &tv)) {
2565                 return -TARGET_EFAULT;
2566             }
2567             if (put_user_u32(len, optlen)) {
2568                 return -TARGET_EFAULT;
2569             }
2570             break;
2571         }
2572         case TARGET_SO_SNDTIMEO:
2573             optname = SO_SNDTIMEO;
2574             goto get_timeout;
2575         case TARGET_SO_PEERCRED: {
2576             struct ucred cr;
2577             socklen_t crlen;
2578             struct target_ucred *tcr;
2579 
2580             if (get_user_u32(len, optlen)) {
2581                 return -TARGET_EFAULT;
2582             }
2583             if (len < 0) {
2584                 return -TARGET_EINVAL;
2585             }
2586 
2587             crlen = sizeof(cr);
2588             ret = get_errno(getsockopt(sockfd, level, SO_PEERCRED,
2589                                        &cr, &crlen));
2590             if (ret < 0) {
2591                 return ret;
2592             }
2593             if (len > crlen) {
2594                 len = crlen;
2595             }
2596             if (!lock_user_struct(VERIFY_WRITE, tcr, optval_addr, 0)) {
2597                 return -TARGET_EFAULT;
2598             }
2599             __put_user(cr.pid, &tcr->pid);
2600             __put_user(cr.uid, &tcr->uid);
2601             __put_user(cr.gid, &tcr->gid);
2602             unlock_user_struct(tcr, optval_addr, 1);
2603             if (put_user_u32(len, optlen)) {
2604                 return -TARGET_EFAULT;
2605             }
2606             break;
2607         }
2608         case TARGET_SO_PEERSEC: {
2609             char *name;
2610 
2611             if (get_user_u32(len, optlen)) {
2612                 return -TARGET_EFAULT;
2613             }
2614             if (len < 0) {
2615                 return -TARGET_EINVAL;
2616             }
2617             name = lock_user(VERIFY_WRITE, optval_addr, len, 0);
2618             if (!name) {
2619                 return -TARGET_EFAULT;
2620             }
2621             lv = len;
2622             ret = get_errno(getsockopt(sockfd, level, SO_PEERSEC,
2623                                        name, &lv));
2624             if (put_user_u32(lv, optlen)) {
2625                 ret = -TARGET_EFAULT;
2626             }
2627             unlock_user(name, optval_addr, lv);
2628             break;
2629         }
2630         case TARGET_SO_LINGER:
2631         {
2632             struct linger lg;
2633             socklen_t lglen;
2634             struct target_linger *tlg;
2635 
2636             if (get_user_u32(len, optlen)) {
2637                 return -TARGET_EFAULT;
2638             }
2639             if (len < 0) {
2640                 return -TARGET_EINVAL;
2641             }
2642 
2643             lglen = sizeof(lg);
2644             ret = get_errno(getsockopt(sockfd, level, SO_LINGER,
2645                                        &lg, &lglen));
2646             if (ret < 0) {
2647                 return ret;
2648             }
2649             if (len > lglen) {
2650                 len = lglen;
2651             }
2652             if (!lock_user_struct(VERIFY_WRITE, tlg, optval_addr, 0)) {
2653                 return -TARGET_EFAULT;
2654             }
2655             __put_user(lg.l_onoff, &tlg->l_onoff);
2656             __put_user(lg.l_linger, &tlg->l_linger);
2657             unlock_user_struct(tlg, optval_addr, 1);
2658             if (put_user_u32(len, optlen)) {
2659                 return -TARGET_EFAULT;
2660             }
2661             break;
2662         }
2663         /* Options with 'int' argument.  */
2664         case TARGET_SO_DEBUG:
2665             optname = SO_DEBUG;
2666             goto int_case;
2667         case TARGET_SO_REUSEADDR:
2668             optname = SO_REUSEADDR;
2669             goto int_case;
2670 #ifdef SO_REUSEPORT
2671         case TARGET_SO_REUSEPORT:
2672             optname = SO_REUSEPORT;
2673             goto int_case;
2674 #endif
2675         case TARGET_SO_TYPE:
2676             optname = SO_TYPE;
2677             goto int_case;
2678         case TARGET_SO_ERROR:
2679             optname = SO_ERROR;
2680             goto int_case;
2681         case TARGET_SO_DONTROUTE:
2682             optname = SO_DONTROUTE;
2683             goto int_case;
2684         case TARGET_SO_BROADCAST:
2685             optname = SO_BROADCAST;
2686             goto int_case;
2687         case TARGET_SO_SNDBUF:
2688             optname = SO_SNDBUF;
2689             goto int_case;
2690         case TARGET_SO_RCVBUF:
2691             optname = SO_RCVBUF;
2692             goto int_case;
2693         case TARGET_SO_KEEPALIVE:
2694             optname = SO_KEEPALIVE;
2695             goto int_case;
2696         case TARGET_SO_OOBINLINE:
2697             optname = SO_OOBINLINE;
2698             goto int_case;
2699         case TARGET_SO_NO_CHECK:
2700             optname = SO_NO_CHECK;
2701             goto int_case;
2702         case TARGET_SO_PRIORITY:
2703             optname = SO_PRIORITY;
2704             goto int_case;
2705 #ifdef SO_BSDCOMPAT
2706         case TARGET_SO_BSDCOMPAT:
2707             optname = SO_BSDCOMPAT;
2708             goto int_case;
2709 #endif
2710         case TARGET_SO_PASSCRED:
2711             optname = SO_PASSCRED;
2712             goto int_case;
2713         case TARGET_SO_TIMESTAMP:
2714             optname = SO_TIMESTAMP;
2715             goto int_case;
2716         case TARGET_SO_RCVLOWAT:
2717             optname = SO_RCVLOWAT;
2718             goto int_case;
2719         case TARGET_SO_ACCEPTCONN:
2720             optname = SO_ACCEPTCONN;
2721             goto int_case;
2722         case TARGET_SO_PROTOCOL:
2723             optname = SO_PROTOCOL;
2724             goto int_case;
2725         case TARGET_SO_DOMAIN:
2726             optname = SO_DOMAIN;
2727             goto int_case;
2728         default:
2729             goto int_case;
2730         }
2731         break;
2732     case SOL_TCP:
2733     case SOL_UDP:
2734         /* TCP and UDP options all take an 'int' value.  */
2735     int_case:
2736         if (get_user_u32(len, optlen))
2737             return -TARGET_EFAULT;
2738         if (len < 0)
2739             return -TARGET_EINVAL;
2740         lv = sizeof(lv);
2741         ret = get_errno(getsockopt(sockfd, level, optname, &val, &lv));
2742         if (ret < 0)
2743             return ret;
2744         if (optname == SO_TYPE) {
2745             val = host_to_target_sock_type(val);
2746         }
2747         if (len > lv)
2748             len = lv;
2749         if (len == 4) {
2750             if (put_user_u32(val, optval_addr))
2751                 return -TARGET_EFAULT;
2752         } else {
2753             if (put_user_u8(val, optval_addr))
2754                 return -TARGET_EFAULT;
2755         }
2756         if (put_user_u32(len, optlen))
2757             return -TARGET_EFAULT;
2758         break;
2759     case SOL_IP:
2760         switch(optname) {
2761         case IP_TOS:
2762         case IP_TTL:
2763         case IP_HDRINCL:
2764         case IP_ROUTER_ALERT:
2765         case IP_RECVOPTS:
2766         case IP_RETOPTS:
2767         case IP_PKTINFO:
2768         case IP_MTU_DISCOVER:
2769         case IP_RECVERR:
2770         case IP_RECVTOS:
2771 #ifdef IP_FREEBIND
2772         case IP_FREEBIND:
2773 #endif
2774         case IP_MULTICAST_TTL:
2775         case IP_MULTICAST_LOOP:
2776             if (get_user_u32(len, optlen))
2777                 return -TARGET_EFAULT;
2778             if (len < 0)
2779                 return -TARGET_EINVAL;
2780             lv = sizeof(lv);
2781             ret = get_errno(getsockopt(sockfd, level, optname, &val, &lv));
2782             if (ret < 0)
2783                 return ret;
2784             if (len < sizeof(int) && len > 0 && val >= 0 && val < 255) {
2785                 len = 1;
2786                 if (put_user_u32(len, optlen)
2787                     || put_user_u8(val, optval_addr))
2788                     return -TARGET_EFAULT;
2789             } else {
2790                 if (len > sizeof(int))
2791                     len = sizeof(int);
2792                 if (put_user_u32(len, optlen)
2793                     || put_user_u32(val, optval_addr))
2794                     return -TARGET_EFAULT;
2795             }
2796             break;
2797         default:
2798             ret = -TARGET_ENOPROTOOPT;
2799             break;
2800         }
2801         break;
2802     case SOL_IPV6:
2803         switch (optname) {
2804         case IPV6_MTU_DISCOVER:
2805         case IPV6_MTU:
2806         case IPV6_V6ONLY:
2807         case IPV6_RECVPKTINFO:
2808         case IPV6_UNICAST_HOPS:
2809         case IPV6_MULTICAST_HOPS:
2810         case IPV6_MULTICAST_LOOP:
2811         case IPV6_RECVERR:
2812         case IPV6_RECVHOPLIMIT:
2813         case IPV6_2292HOPLIMIT:
2814         case IPV6_CHECKSUM:
2815         case IPV6_ADDRFORM:
2816         case IPV6_2292PKTINFO:
2817         case IPV6_RECVTCLASS:
2818         case IPV6_RECVRTHDR:
2819         case IPV6_2292RTHDR:
2820         case IPV6_RECVHOPOPTS:
2821         case IPV6_2292HOPOPTS:
2822         case IPV6_RECVDSTOPTS:
2823         case IPV6_2292DSTOPTS:
2824         case IPV6_TCLASS:
2825         case IPV6_ADDR_PREFERENCES:
2826 #ifdef IPV6_RECVPATHMTU
2827         case IPV6_RECVPATHMTU:
2828 #endif
2829 #ifdef IPV6_TRANSPARENT
2830         case IPV6_TRANSPARENT:
2831 #endif
2832 #ifdef IPV6_FREEBIND
2833         case IPV6_FREEBIND:
2834 #endif
2835 #ifdef IPV6_RECVORIGDSTADDR
2836         case IPV6_RECVORIGDSTADDR:
2837 #endif
2838             if (get_user_u32(len, optlen))
2839                 return -TARGET_EFAULT;
2840             if (len < 0)
2841                 return -TARGET_EINVAL;
2842             lv = sizeof(lv);
2843             ret = get_errno(getsockopt(sockfd, level, optname, &val, &lv));
2844             if (ret < 0)
2845                 return ret;
2846             if (len < sizeof(int) && len > 0 && val >= 0 && val < 255) {
2847                 len = 1;
2848                 if (put_user_u32(len, optlen)
2849                     || put_user_u8(val, optval_addr))
2850                     return -TARGET_EFAULT;
2851             } else {
2852                 if (len > sizeof(int))
2853                     len = sizeof(int);
2854                 if (put_user_u32(len, optlen)
2855                     || put_user_u32(val, optval_addr))
2856                     return -TARGET_EFAULT;
2857             }
2858             break;
2859         default:
2860             ret = -TARGET_ENOPROTOOPT;
2861             break;
2862         }
2863         break;
2864 #ifdef SOL_NETLINK
2865     case SOL_NETLINK:
2866         switch (optname) {
2867         case NETLINK_PKTINFO:
2868         case NETLINK_BROADCAST_ERROR:
2869         case NETLINK_NO_ENOBUFS:
2870 #if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 2, 0)
2871         case NETLINK_LISTEN_ALL_NSID:
2872         case NETLINK_CAP_ACK:
2873 #endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(4, 2, 0) */
2874 #if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 12, 0)
2875         case NETLINK_EXT_ACK:
2876 #endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(4, 12, 0) */
2877 #if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 20, 0)
2878         case NETLINK_GET_STRICT_CHK:
2879 #endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(4, 12, 0) */
2880             if (get_user_u32(len, optlen)) {
2881                 return -TARGET_EFAULT;
2882             }
2883             if (len != sizeof(val)) {
2884                 return -TARGET_EINVAL;
2885             }
2886             lv = len;
2887             ret = get_errno(getsockopt(sockfd, level, optname, &val, &lv));
2888             if (ret < 0) {
2889                 return ret;
2890             }
2891             if (put_user_u32(lv, optlen)
2892                 || put_user_u32(val, optval_addr)) {
2893                 return -TARGET_EFAULT;
2894             }
2895             break;
2896 #if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 2, 0)
2897         case NETLINK_LIST_MEMBERSHIPS:
2898         {
2899             uint32_t *results;
2900             int i;
2901             if (get_user_u32(len, optlen)) {
2902                 return -TARGET_EFAULT;
2903             }
2904             if (len < 0) {
2905                 return -TARGET_EINVAL;
2906             }
2907             results = lock_user(VERIFY_WRITE, optval_addr, len, 1);
2908             if (!results && len > 0) {
2909                 return -TARGET_EFAULT;
2910             }
2911             lv = len;
2912             ret = get_errno(getsockopt(sockfd, level, optname, results, &lv));
2913             if (ret < 0) {
2914                 unlock_user(results, optval_addr, 0);
2915                 return ret;
2916             }
2917             /* swap host endianess to target endianess. */
2918             for (i = 0; i < (len / sizeof(uint32_t)); i++) {
2919                 results[i] = tswap32(results[i]);
2920             }
2921             if (put_user_u32(lv, optlen)) {
2922                 return -TARGET_EFAULT;
2923             }
2924             unlock_user(results, optval_addr, 0);
2925             break;
2926         }
2927 #endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(4, 2, 0) */
2928         default:
2929             goto unimplemented;
2930         }
2931         break;
2932 #endif /* SOL_NETLINK */
2933     default:
2934     unimplemented:
2935         qemu_log_mask(LOG_UNIMP,
2936                       "getsockopt level=%d optname=%d not yet supported\n",
2937                       level, optname);
2938         ret = -TARGET_EOPNOTSUPP;
2939         break;
2940     }
2941     return ret;
2942 }
2943 
2944 /* Convert target low/high pair representing file offset into the host
2945  * low/high pair. This function doesn't handle offsets bigger than 64 bits
2946  * as the kernel doesn't handle them either.
2947  */
2948 static void target_to_host_low_high(abi_ulong tlow,
2949                                     abi_ulong thigh,
2950                                     unsigned long *hlow,
2951                                     unsigned long *hhigh)
2952 {
2953     uint64_t off = tlow |
2954         ((unsigned long long)thigh << TARGET_LONG_BITS / 2) <<
2955         TARGET_LONG_BITS / 2;
2956 
2957     *hlow = off;
2958     *hhigh = (off >> HOST_LONG_BITS / 2) >> HOST_LONG_BITS / 2;
2959 }
2960 
2961 static struct iovec *lock_iovec(int type, abi_ulong target_addr,
2962                                 abi_ulong count, int copy)
2963 {
2964     struct target_iovec *target_vec;
2965     struct iovec *vec;
2966     abi_ulong total_len, max_len;
2967     int i;
2968     int err = 0;
2969     bool bad_address = false;
2970 
2971     if (count == 0) {
2972         errno = 0;
2973         return NULL;
2974     }
2975     if (count > IOV_MAX) {
2976         errno = EINVAL;
2977         return NULL;
2978     }
2979 
2980     vec = g_try_new0(struct iovec, count);
2981     if (vec == NULL) {
2982         errno = ENOMEM;
2983         return NULL;
2984     }
2985 
2986     target_vec = lock_user(VERIFY_READ, target_addr,
2987                            count * sizeof(struct target_iovec), 1);
2988     if (target_vec == NULL) {
2989         err = EFAULT;
2990         goto fail2;
2991     }
2992 
2993     /* ??? If host page size > target page size, this will result in a
2994        value larger than what we can actually support.  */
2995     max_len = 0x7fffffff & TARGET_PAGE_MASK;
2996     total_len = 0;
2997 
2998     for (i = 0; i < count; i++) {
2999         abi_ulong base = tswapal(target_vec[i].iov_base);
3000         abi_long len = tswapal(target_vec[i].iov_len);
3001 
3002         if (len < 0) {
3003             err = EINVAL;
3004             goto fail;
3005         } else if (len == 0) {
3006             /* Zero length pointer is ignored.  */
3007             vec[i].iov_base = 0;
3008         } else {
3009             vec[i].iov_base = lock_user(type, base, len, copy);
3010             /* If the first buffer pointer is bad, this is a fault.  But
3011              * subsequent bad buffers will result in a partial write; this
3012              * is realized by filling the vector with null pointers and
3013              * zero lengths. */
3014             if (!vec[i].iov_base) {
3015                 if (i == 0) {
3016                     err = EFAULT;
3017                     goto fail;
3018                 } else {
3019                     bad_address = true;
3020                 }
3021             }
3022             if (bad_address) {
3023                 len = 0;
3024             }
3025             if (len > max_len - total_len) {
3026                 len = max_len - total_len;
3027             }
3028         }
3029         vec[i].iov_len = len;
3030         total_len += len;
3031     }
3032 
3033     unlock_user(target_vec, target_addr, 0);
3034     return vec;
3035 
3036  fail:
3037     while (--i >= 0) {
3038         if (tswapal(target_vec[i].iov_len) > 0) {
3039             unlock_user(vec[i].iov_base, tswapal(target_vec[i].iov_base), 0);
3040         }
3041     }
3042     unlock_user(target_vec, target_addr, 0);
3043  fail2:
3044     g_free(vec);
3045     errno = err;
3046     return NULL;
3047 }
3048 
3049 static void unlock_iovec(struct iovec *vec, abi_ulong target_addr,
3050                          abi_ulong count, int copy)
3051 {
3052     struct target_iovec *target_vec;
3053     int i;
3054 
3055     target_vec = lock_user(VERIFY_READ, target_addr,
3056                            count * sizeof(struct target_iovec), 1);
3057     if (target_vec) {
3058         for (i = 0; i < count; i++) {
3059             abi_ulong base = tswapal(target_vec[i].iov_base);
3060             abi_long len = tswapal(target_vec[i].iov_len);
3061             if (len < 0) {
3062                 break;
3063             }
3064             unlock_user(vec[i].iov_base, base, copy ? vec[i].iov_len : 0);
3065         }
3066         unlock_user(target_vec, target_addr, 0);
3067     }
3068 
3069     g_free(vec);
3070 }
3071 
3072 static inline int target_to_host_sock_type(int *type)
3073 {
3074     int host_type = 0;
3075     int target_type = *type;
3076 
3077     switch (target_type & TARGET_SOCK_TYPE_MASK) {
3078     case TARGET_SOCK_DGRAM:
3079         host_type = SOCK_DGRAM;
3080         break;
3081     case TARGET_SOCK_STREAM:
3082         host_type = SOCK_STREAM;
3083         break;
3084     default:
3085         host_type = target_type & TARGET_SOCK_TYPE_MASK;
3086         break;
3087     }
3088     if (target_type & TARGET_SOCK_CLOEXEC) {
3089 #if defined(SOCK_CLOEXEC)
3090         host_type |= SOCK_CLOEXEC;
3091 #else
3092         return -TARGET_EINVAL;
3093 #endif
3094     }
3095     if (target_type & TARGET_SOCK_NONBLOCK) {
3096 #if defined(SOCK_NONBLOCK)
3097         host_type |= SOCK_NONBLOCK;
3098 #elif !defined(O_NONBLOCK)
3099         return -TARGET_EINVAL;
3100 #endif
3101     }
3102     *type = host_type;
3103     return 0;
3104 }
3105 
3106 /* Try to emulate socket type flags after socket creation.  */
3107 static int sock_flags_fixup(int fd, int target_type)
3108 {
3109 #if !defined(SOCK_NONBLOCK) && defined(O_NONBLOCK)
3110     if (target_type & TARGET_SOCK_NONBLOCK) {
3111         int flags = fcntl(fd, F_GETFL);
3112         if (fcntl(fd, F_SETFL, O_NONBLOCK | flags) == -1) {
3113             close(fd);
3114             return -TARGET_EINVAL;
3115         }
3116     }
3117 #endif
3118     return fd;
3119 }
3120 
3121 /* do_socket() Must return target values and target errnos. */
3122 static abi_long do_socket(int domain, int type, int protocol)
3123 {
3124     int target_type = type;
3125     int ret;
3126 
3127     ret = target_to_host_sock_type(&type);
3128     if (ret) {
3129         return ret;
3130     }
3131 
3132     if (domain == PF_NETLINK && !(
3133 #ifdef CONFIG_RTNETLINK
3134          protocol == NETLINK_ROUTE ||
3135 #endif
3136          protocol == NETLINK_KOBJECT_UEVENT ||
3137          protocol == NETLINK_AUDIT)) {
3138         return -TARGET_EPROTONOSUPPORT;
3139     }
3140 
3141     if (domain == AF_PACKET ||
3142         (domain == AF_INET && type == SOCK_PACKET)) {
3143         protocol = tswap16(protocol);
3144     }
3145 
3146     ret = get_errno(socket(domain, type, protocol));
3147     if (ret >= 0) {
3148         ret = sock_flags_fixup(ret, target_type);
3149         if (type == SOCK_PACKET) {
3150             /* Manage an obsolete case :
3151              * if socket type is SOCK_PACKET, bind by name
3152              */
3153             fd_trans_register(ret, &target_packet_trans);
3154         } else if (domain == PF_NETLINK) {
3155             switch (protocol) {
3156 #ifdef CONFIG_RTNETLINK
3157             case NETLINK_ROUTE:
3158                 fd_trans_register(ret, &target_netlink_route_trans);
3159                 break;
3160 #endif
3161             case NETLINK_KOBJECT_UEVENT:
3162                 /* nothing to do: messages are strings */
3163                 break;
3164             case NETLINK_AUDIT:
3165                 fd_trans_register(ret, &target_netlink_audit_trans);
3166                 break;
3167             default:
3168                 g_assert_not_reached();
3169             }
3170         }
3171     }
3172     return ret;
3173 }
3174 
3175 /* do_bind() Must return target values and target errnos. */
3176 static abi_long do_bind(int sockfd, abi_ulong target_addr,
3177                         socklen_t addrlen)
3178 {
3179     void *addr;
3180     abi_long ret;
3181 
3182     if ((int)addrlen < 0) {
3183         return -TARGET_EINVAL;
3184     }
3185 
3186     addr = alloca(addrlen+1);
3187 
3188     ret = target_to_host_sockaddr(sockfd, addr, target_addr, addrlen);
3189     if (ret)
3190         return ret;
3191 
3192     return get_errno(bind(sockfd, addr, addrlen));
3193 }
3194 
3195 /* do_connect() Must return target values and target errnos. */
3196 static abi_long do_connect(int sockfd, abi_ulong target_addr,
3197                            socklen_t addrlen)
3198 {
3199     void *addr;
3200     abi_long ret;
3201 
3202     if ((int)addrlen < 0) {
3203         return -TARGET_EINVAL;
3204     }
3205 
3206     addr = alloca(addrlen+1);
3207 
3208     ret = target_to_host_sockaddr(sockfd, addr, target_addr, addrlen);
3209     if (ret)
3210         return ret;
3211 
3212     return get_errno(safe_connect(sockfd, addr, addrlen));
3213 }
3214 
3215 /* do_sendrecvmsg_locked() Must return target values and target errnos. */
3216 static abi_long do_sendrecvmsg_locked(int fd, struct target_msghdr *msgp,
3217                                       int flags, int send)
3218 {
3219     abi_long ret, len;
3220     struct msghdr msg;
3221     abi_ulong count;
3222     struct iovec *vec;
3223     abi_ulong target_vec;
3224 
3225     if (msgp->msg_name) {
3226         msg.msg_namelen = tswap32(msgp->msg_namelen);
3227         msg.msg_name = alloca(msg.msg_namelen+1);
3228         ret = target_to_host_sockaddr(fd, msg.msg_name,
3229                                       tswapal(msgp->msg_name),
3230                                       msg.msg_namelen);
3231         if (ret == -TARGET_EFAULT) {
3232             /* For connected sockets msg_name and msg_namelen must
3233              * be ignored, so returning EFAULT immediately is wrong.
3234              * Instead, pass a bad msg_name to the host kernel, and
3235              * let it decide whether to return EFAULT or not.
3236              */
3237             msg.msg_name = (void *)-1;
3238         } else if (ret) {
3239             goto out2;
3240         }
3241     } else {
3242         msg.msg_name = NULL;
3243         msg.msg_namelen = 0;
3244     }
3245     msg.msg_controllen = 2 * tswapal(msgp->msg_controllen);
3246     msg.msg_control = alloca(msg.msg_controllen);
3247     memset(msg.msg_control, 0, msg.msg_controllen);
3248 
3249     msg.msg_flags = tswap32(msgp->msg_flags);
3250 
3251     count = tswapal(msgp->msg_iovlen);
3252     target_vec = tswapal(msgp->msg_iov);
3253 
3254     if (count > IOV_MAX) {
3255         /* sendrcvmsg returns a different errno for this condition than
3256          * readv/writev, so we must catch it here before lock_iovec() does.
3257          */
3258         ret = -TARGET_EMSGSIZE;
3259         goto out2;
3260     }
3261 
3262     vec = lock_iovec(send ? VERIFY_READ : VERIFY_WRITE,
3263                      target_vec, count, send);
3264     if (vec == NULL) {
3265         ret = -host_to_target_errno(errno);
3266         goto out2;
3267     }
3268     msg.msg_iovlen = count;
3269     msg.msg_iov = vec;
3270 
3271     if (send) {
3272         if (fd_trans_target_to_host_data(fd)) {
3273             void *host_msg;
3274 
3275             host_msg = g_malloc(msg.msg_iov->iov_len);
3276             memcpy(host_msg, msg.msg_iov->iov_base, msg.msg_iov->iov_len);
3277             ret = fd_trans_target_to_host_data(fd)(host_msg,
3278                                                    msg.msg_iov->iov_len);
3279             if (ret >= 0) {
3280                 msg.msg_iov->iov_base = host_msg;
3281                 ret = get_errno(safe_sendmsg(fd, &msg, flags));
3282             }
3283             g_free(host_msg);
3284         } else {
3285             ret = target_to_host_cmsg(&msg, msgp);
3286             if (ret == 0) {
3287                 ret = get_errno(safe_sendmsg(fd, &msg, flags));
3288             }
3289         }
3290     } else {
3291         ret = get_errno(safe_recvmsg(fd, &msg, flags));
3292         if (!is_error(ret)) {
3293             len = ret;
3294             if (fd_trans_host_to_target_data(fd)) {
3295                 ret = fd_trans_host_to_target_data(fd)(msg.msg_iov->iov_base,
3296                                                MIN(msg.msg_iov->iov_len, len));
3297             } else {
3298                 ret = host_to_target_cmsg(msgp, &msg);
3299             }
3300             if (!is_error(ret)) {
3301                 msgp->msg_namelen = tswap32(msg.msg_namelen);
3302                 msgp->msg_flags = tswap32(msg.msg_flags);
3303                 if (msg.msg_name != NULL && msg.msg_name != (void *)-1) {
3304                     ret = host_to_target_sockaddr(tswapal(msgp->msg_name),
3305                                     msg.msg_name, msg.msg_namelen);
3306                     if (ret) {
3307                         goto out;
3308                     }
3309                 }
3310 
3311                 ret = len;
3312             }
3313         }
3314     }
3315 
3316 out:
3317     unlock_iovec(vec, target_vec, count, !send);
3318 out2:
3319     return ret;
3320 }
3321 
3322 static abi_long do_sendrecvmsg(int fd, abi_ulong target_msg,
3323                                int flags, int send)
3324 {
3325     abi_long ret;
3326     struct target_msghdr *msgp;
3327 
3328     if (!lock_user_struct(send ? VERIFY_READ : VERIFY_WRITE,
3329                           msgp,
3330                           target_msg,
3331                           send ? 1 : 0)) {
3332         return -TARGET_EFAULT;
3333     }
3334     ret = do_sendrecvmsg_locked(fd, msgp, flags, send);
3335     unlock_user_struct(msgp, target_msg, send ? 0 : 1);
3336     return ret;
3337 }
3338 
3339 /* We don't rely on the C library to have sendmmsg/recvmmsg support,
3340  * so it might not have this *mmsg-specific flag either.
3341  */
3342 #ifndef MSG_WAITFORONE
3343 #define MSG_WAITFORONE 0x10000
3344 #endif
3345 
3346 static abi_long do_sendrecvmmsg(int fd, abi_ulong target_msgvec,
3347                                 unsigned int vlen, unsigned int flags,
3348                                 int send)
3349 {
3350     struct target_mmsghdr *mmsgp;
3351     abi_long ret = 0;
3352     int i;
3353 
3354     if (vlen > UIO_MAXIOV) {
3355         vlen = UIO_MAXIOV;
3356     }
3357 
3358     mmsgp = lock_user(VERIFY_WRITE, target_msgvec, sizeof(*mmsgp) * vlen, 1);
3359     if (!mmsgp) {
3360         return -TARGET_EFAULT;
3361     }
3362 
3363     for (i = 0; i < vlen; i++) {
3364         ret = do_sendrecvmsg_locked(fd, &mmsgp[i].msg_hdr, flags, send);
3365         if (is_error(ret)) {
3366             break;
3367         }
3368         mmsgp[i].msg_len = tswap32(ret);
3369         /* MSG_WAITFORONE turns on MSG_DONTWAIT after one packet */
3370         if (flags & MSG_WAITFORONE) {
3371             flags |= MSG_DONTWAIT;
3372         }
3373     }
3374 
3375     unlock_user(mmsgp, target_msgvec, sizeof(*mmsgp) * i);
3376 
3377     /* Return number of datagrams sent if we sent any at all;
3378      * otherwise return the error.
3379      */
3380     if (i) {
3381         return i;
3382     }
3383     return ret;
3384 }
3385 
3386 /* do_accept4() Must return target values and target errnos. */
3387 static abi_long do_accept4(int fd, abi_ulong target_addr,
3388                            abi_ulong target_addrlen_addr, int flags)
3389 {
3390     socklen_t addrlen, ret_addrlen;
3391     void *addr;
3392     abi_long ret;
3393     int host_flags;
3394 
3395     host_flags = target_to_host_bitmask(flags, fcntl_flags_tbl);
3396 
3397     if (target_addr == 0) {
3398         return get_errno(safe_accept4(fd, NULL, NULL, host_flags));
3399     }
3400 
3401     /* linux returns EFAULT if addrlen pointer is invalid */
3402     if (get_user_u32(addrlen, target_addrlen_addr))
3403         return -TARGET_EFAULT;
3404 
3405     if ((int)addrlen < 0) {
3406         return -TARGET_EINVAL;
3407     }
3408 
3409     if (!access_ok(thread_cpu, VERIFY_WRITE, target_addr, addrlen)) {
3410         return -TARGET_EFAULT;
3411     }
3412 
3413     addr = alloca(addrlen);
3414 
3415     ret_addrlen = addrlen;
3416     ret = get_errno(safe_accept4(fd, addr, &ret_addrlen, host_flags));
3417     if (!is_error(ret)) {
3418         host_to_target_sockaddr(target_addr, addr, MIN(addrlen, ret_addrlen));
3419         if (put_user_u32(ret_addrlen, target_addrlen_addr)) {
3420             ret = -TARGET_EFAULT;
3421         }
3422     }
3423     return ret;
3424 }
3425 
3426 /* do_getpeername() Must return target values and target errnos. */
3427 static abi_long do_getpeername(int fd, abi_ulong target_addr,
3428                                abi_ulong target_addrlen_addr)
3429 {
3430     socklen_t addrlen, ret_addrlen;
3431     void *addr;
3432     abi_long ret;
3433 
3434     if (get_user_u32(addrlen, target_addrlen_addr))
3435         return -TARGET_EFAULT;
3436 
3437     if ((int)addrlen < 0) {
3438         return -TARGET_EINVAL;
3439     }
3440 
3441     if (!access_ok(thread_cpu, VERIFY_WRITE, target_addr, addrlen)) {
3442         return -TARGET_EFAULT;
3443     }
3444 
3445     addr = alloca(addrlen);
3446 
3447     ret_addrlen = addrlen;
3448     ret = get_errno(getpeername(fd, addr, &ret_addrlen));
3449     if (!is_error(ret)) {
3450         host_to_target_sockaddr(target_addr, addr, MIN(addrlen, ret_addrlen));
3451         if (put_user_u32(ret_addrlen, target_addrlen_addr)) {
3452             ret = -TARGET_EFAULT;
3453         }
3454     }
3455     return ret;
3456 }
3457 
3458 /* do_getsockname() Must return target values and target errnos. */
3459 static abi_long do_getsockname(int fd, abi_ulong target_addr,
3460                                abi_ulong target_addrlen_addr)
3461 {
3462     socklen_t addrlen, ret_addrlen;
3463     void *addr;
3464     abi_long ret;
3465 
3466     if (get_user_u32(addrlen, target_addrlen_addr))
3467         return -TARGET_EFAULT;
3468 
3469     if ((int)addrlen < 0) {
3470         return -TARGET_EINVAL;
3471     }
3472 
3473     if (!access_ok(thread_cpu, VERIFY_WRITE, target_addr, addrlen)) {
3474         return -TARGET_EFAULT;
3475     }
3476 
3477     addr = alloca(addrlen);
3478 
3479     ret_addrlen = addrlen;
3480     ret = get_errno(getsockname(fd, addr, &ret_addrlen));
3481     if (!is_error(ret)) {
3482         host_to_target_sockaddr(target_addr, addr, MIN(addrlen, ret_addrlen));
3483         if (put_user_u32(ret_addrlen, target_addrlen_addr)) {
3484             ret = -TARGET_EFAULT;
3485         }
3486     }
3487     return ret;
3488 }
3489 
3490 /* do_socketpair() Must return target values and target errnos. */
3491 static abi_long do_socketpair(int domain, int type, int protocol,
3492                               abi_ulong target_tab_addr)
3493 {
3494     int tab[2];
3495     abi_long ret;
3496 
3497     target_to_host_sock_type(&type);
3498 
3499     ret = get_errno(socketpair(domain, type, protocol, tab));
3500     if (!is_error(ret)) {
3501         if (put_user_s32(tab[0], target_tab_addr)
3502             || put_user_s32(tab[1], target_tab_addr + sizeof(tab[0])))
3503             ret = -TARGET_EFAULT;
3504     }
3505     return ret;
3506 }
3507 
3508 /* do_sendto() Must return target values and target errnos. */
3509 static abi_long do_sendto(int fd, abi_ulong msg, size_t len, int flags,
3510                           abi_ulong target_addr, socklen_t addrlen)
3511 {
3512     void *addr;
3513     void *host_msg;
3514     void *copy_msg = NULL;
3515     abi_long ret;
3516 
3517     if ((int)addrlen < 0) {
3518         return -TARGET_EINVAL;
3519     }
3520 
3521     host_msg = lock_user(VERIFY_READ, msg, len, 1);
3522     if (!host_msg)
3523         return -TARGET_EFAULT;
3524     if (fd_trans_target_to_host_data(fd)) {
3525         copy_msg = host_msg;
3526         host_msg = g_malloc(len);
3527         memcpy(host_msg, copy_msg, len);
3528         ret = fd_trans_target_to_host_data(fd)(host_msg, len);
3529         if (ret < 0) {
3530             goto fail;
3531         }
3532     }
3533     if (target_addr) {
3534         addr = alloca(addrlen+1);
3535         ret = target_to_host_sockaddr(fd, addr, target_addr, addrlen);
3536         if (ret) {
3537             goto fail;
3538         }
3539         ret = get_errno(safe_sendto(fd, host_msg, len, flags, addr, addrlen));
3540     } else {
3541         ret = get_errno(safe_sendto(fd, host_msg, len, flags, NULL, 0));
3542     }
3543 fail:
3544     if (copy_msg) {
3545         g_free(host_msg);
3546         host_msg = copy_msg;
3547     }
3548     unlock_user(host_msg, msg, 0);
3549     return ret;
3550 }
3551 
3552 /* do_recvfrom() Must return target values and target errnos. */
3553 static abi_long do_recvfrom(int fd, abi_ulong msg, size_t len, int flags,
3554                             abi_ulong target_addr,
3555                             abi_ulong target_addrlen)
3556 {
3557     socklen_t addrlen, ret_addrlen;
3558     void *addr;
3559     void *host_msg;
3560     abi_long ret;
3561 
3562     if (!msg) {
3563         host_msg = NULL;
3564     } else {
3565         host_msg = lock_user(VERIFY_WRITE, msg, len, 0);
3566         if (!host_msg) {
3567             return -TARGET_EFAULT;
3568         }
3569     }
3570     if (target_addr) {
3571         if (get_user_u32(addrlen, target_addrlen)) {
3572             ret = -TARGET_EFAULT;
3573             goto fail;
3574         }
3575         if ((int)addrlen < 0) {
3576             ret = -TARGET_EINVAL;
3577             goto fail;
3578         }
3579         addr = alloca(addrlen);
3580         ret_addrlen = addrlen;
3581         ret = get_errno(safe_recvfrom(fd, host_msg, len, flags,
3582                                       addr, &ret_addrlen));
3583     } else {
3584         addr = NULL; /* To keep compiler quiet.  */
3585         addrlen = 0; /* To keep compiler quiet.  */
3586         ret = get_errno(safe_recvfrom(fd, host_msg, len, flags, NULL, 0));
3587     }
3588     if (!is_error(ret)) {
3589         if (fd_trans_host_to_target_data(fd)) {
3590             abi_long trans;
3591             trans = fd_trans_host_to_target_data(fd)(host_msg, MIN(ret, len));
3592             if (is_error(trans)) {
3593                 ret = trans;
3594                 goto fail;
3595             }
3596         }
3597         if (target_addr) {
3598             host_to_target_sockaddr(target_addr, addr,
3599                                     MIN(addrlen, ret_addrlen));
3600             if (put_user_u32(ret_addrlen, target_addrlen)) {
3601                 ret = -TARGET_EFAULT;
3602                 goto fail;
3603             }
3604         }
3605         unlock_user(host_msg, msg, len);
3606     } else {
3607 fail:
3608         unlock_user(host_msg, msg, 0);
3609     }
3610     return ret;
3611 }
3612 
3613 #ifdef TARGET_NR_socketcall
3614 /* do_socketcall() must return target values and target errnos. */
3615 static abi_long do_socketcall(int num, abi_ulong vptr)
3616 {
3617     static const unsigned nargs[] = { /* number of arguments per operation */
3618         [TARGET_SYS_SOCKET] = 3,      /* domain, type, protocol */
3619         [TARGET_SYS_BIND] = 3,        /* fd, addr, addrlen */
3620         [TARGET_SYS_CONNECT] = 3,     /* fd, addr, addrlen */
3621         [TARGET_SYS_LISTEN] = 2,      /* fd, backlog */
3622         [TARGET_SYS_ACCEPT] = 3,      /* fd, addr, addrlen */
3623         [TARGET_SYS_GETSOCKNAME] = 3, /* fd, addr, addrlen */
3624         [TARGET_SYS_GETPEERNAME] = 3, /* fd, addr, addrlen */
3625         [TARGET_SYS_SOCKETPAIR] = 4,  /* domain, type, protocol, tab */
3626         [TARGET_SYS_SEND] = 4,        /* fd, msg, len, flags */
3627         [TARGET_SYS_RECV] = 4,        /* fd, msg, len, flags */
3628         [TARGET_SYS_SENDTO] = 6,      /* fd, msg, len, flags, addr, addrlen */
3629         [TARGET_SYS_RECVFROM] = 6,    /* fd, msg, len, flags, addr, addrlen */
3630         [TARGET_SYS_SHUTDOWN] = 2,    /* fd, how */
3631         [TARGET_SYS_SETSOCKOPT] = 5,  /* fd, level, optname, optval, optlen */
3632         [TARGET_SYS_GETSOCKOPT] = 5,  /* fd, level, optname, optval, optlen */
3633         [TARGET_SYS_SENDMSG] = 3,     /* fd, msg, flags */
3634         [TARGET_SYS_RECVMSG] = 3,     /* fd, msg, flags */
3635         [TARGET_SYS_ACCEPT4] = 4,     /* fd, addr, addrlen, flags */
3636         [TARGET_SYS_RECVMMSG] = 4,    /* fd, msgvec, vlen, flags */
3637         [TARGET_SYS_SENDMMSG] = 4,    /* fd, msgvec, vlen, flags */
3638     };
3639     abi_long a[6]; /* max 6 args */
3640     unsigned i;
3641 
3642     /* check the range of the first argument num */
3643     /* (TARGET_SYS_SENDMMSG is the highest among TARGET_SYS_xxx) */
3644     if (num < 1 || num > TARGET_SYS_SENDMMSG) {
3645         return -TARGET_EINVAL;
3646     }
3647     /* ensure we have space for args */
3648     if (nargs[num] > ARRAY_SIZE(a)) {
3649         return -TARGET_EINVAL;
3650     }
3651     /* collect the arguments in a[] according to nargs[] */
3652     for (i = 0; i < nargs[num]; ++i) {
3653         if (get_user_ual(a[i], vptr + i * sizeof(abi_long)) != 0) {
3654             return -TARGET_EFAULT;
3655         }
3656     }
3657     /* now when we have the args, invoke the appropriate underlying function */
3658     switch (num) {
3659     case TARGET_SYS_SOCKET: /* domain, type, protocol */
3660         return do_socket(a[0], a[1], a[2]);
3661     case TARGET_SYS_BIND: /* sockfd, addr, addrlen */
3662         return do_bind(a[0], a[1], a[2]);
3663     case TARGET_SYS_CONNECT: /* sockfd, addr, addrlen */
3664         return do_connect(a[0], a[1], a[2]);
3665     case TARGET_SYS_LISTEN: /* sockfd, backlog */
3666         return get_errno(listen(a[0], a[1]));
3667     case TARGET_SYS_ACCEPT: /* sockfd, addr, addrlen */
3668         return do_accept4(a[0], a[1], a[2], 0);
3669     case TARGET_SYS_GETSOCKNAME: /* sockfd, addr, addrlen */
3670         return do_getsockname(a[0], a[1], a[2]);
3671     case TARGET_SYS_GETPEERNAME: /* sockfd, addr, addrlen */
3672         return do_getpeername(a[0], a[1], a[2]);
3673     case TARGET_SYS_SOCKETPAIR: /* domain, type, protocol, tab */
3674         return do_socketpair(a[0], a[1], a[2], a[3]);
3675     case TARGET_SYS_SEND: /* sockfd, msg, len, flags */
3676         return do_sendto(a[0], a[1], a[2], a[3], 0, 0);
3677     case TARGET_SYS_RECV: /* sockfd, msg, len, flags */
3678         return do_recvfrom(a[0], a[1], a[2], a[3], 0, 0);
3679     case TARGET_SYS_SENDTO: /* sockfd, msg, len, flags, addr, addrlen */
3680         return do_sendto(a[0], a[1], a[2], a[3], a[4], a[5]);
3681     case TARGET_SYS_RECVFROM: /* sockfd, msg, len, flags, addr, addrlen */
3682         return do_recvfrom(a[0], a[1], a[2], a[3], a[4], a[5]);
3683     case TARGET_SYS_SHUTDOWN: /* sockfd, how */
3684         return get_errno(shutdown(a[0], a[1]));
3685     case TARGET_SYS_SETSOCKOPT: /* sockfd, level, optname, optval, optlen */
3686         return do_setsockopt(a[0], a[1], a[2], a[3], a[4]);
3687     case TARGET_SYS_GETSOCKOPT: /* sockfd, level, optname, optval, optlen */
3688         return do_getsockopt(a[0], a[1], a[2], a[3], a[4]);
3689     case TARGET_SYS_SENDMSG: /* sockfd, msg, flags */
3690         return do_sendrecvmsg(a[0], a[1], a[2], 1);
3691     case TARGET_SYS_RECVMSG: /* sockfd, msg, flags */
3692         return do_sendrecvmsg(a[0], a[1], a[2], 0);
3693     case TARGET_SYS_ACCEPT4: /* sockfd, addr, addrlen, flags */
3694         return do_accept4(a[0], a[1], a[2], a[3]);
3695     case TARGET_SYS_RECVMMSG: /* sockfd, msgvec, vlen, flags */
3696         return do_sendrecvmmsg(a[0], a[1], a[2], a[3], 0);
3697     case TARGET_SYS_SENDMMSG: /* sockfd, msgvec, vlen, flags */
3698         return do_sendrecvmmsg(a[0], a[1], a[2], a[3], 1);
3699     default:
3700         qemu_log_mask(LOG_UNIMP, "Unsupported socketcall: %d\n", num);
3701         return -TARGET_EINVAL;
3702     }
3703 }
3704 #endif
3705 
3706 #define N_SHM_REGIONS	32
3707 
3708 static struct shm_region {
3709     abi_ulong start;
3710     abi_ulong size;
3711     bool in_use;
3712 } shm_regions[N_SHM_REGIONS];
3713 
3714 #ifndef TARGET_SEMID64_DS
3715 /* asm-generic version of this struct */
3716 struct target_semid64_ds
3717 {
3718   struct target_ipc_perm sem_perm;
3719   abi_ulong sem_otime;
3720 #if TARGET_ABI_BITS == 32
3721   abi_ulong __unused1;
3722 #endif
3723   abi_ulong sem_ctime;
3724 #if TARGET_ABI_BITS == 32
3725   abi_ulong __unused2;
3726 #endif
3727   abi_ulong sem_nsems;
3728   abi_ulong __unused3;
3729   abi_ulong __unused4;
3730 };
3731 #endif
3732 
3733 static inline abi_long target_to_host_ipc_perm(struct ipc_perm *host_ip,
3734                                                abi_ulong target_addr)
3735 {
3736     struct target_ipc_perm *target_ip;
3737     struct target_semid64_ds *target_sd;
3738 
3739     if (!lock_user_struct(VERIFY_READ, target_sd, target_addr, 1))
3740         return -TARGET_EFAULT;
3741     target_ip = &(target_sd->sem_perm);
3742     host_ip->__key = tswap32(target_ip->__key);
3743     host_ip->uid = tswap32(target_ip->uid);
3744     host_ip->gid = tswap32(target_ip->gid);
3745     host_ip->cuid = tswap32(target_ip->cuid);
3746     host_ip->cgid = tswap32(target_ip->cgid);
3747 #if defined(TARGET_ALPHA) || defined(TARGET_MIPS) || defined(TARGET_PPC)
3748     host_ip->mode = tswap32(target_ip->mode);
3749 #else
3750     host_ip->mode = tswap16(target_ip->mode);
3751 #endif
3752 #if defined(TARGET_PPC)
3753     host_ip->__seq = tswap32(target_ip->__seq);
3754 #else
3755     host_ip->__seq = tswap16(target_ip->__seq);
3756 #endif
3757     unlock_user_struct(target_sd, target_addr, 0);
3758     return 0;
3759 }
3760 
3761 static inline abi_long host_to_target_ipc_perm(abi_ulong target_addr,
3762                                                struct ipc_perm *host_ip)
3763 {
3764     struct target_ipc_perm *target_ip;
3765     struct target_semid64_ds *target_sd;
3766 
3767     if (!lock_user_struct(VERIFY_WRITE, target_sd, target_addr, 0))
3768         return -TARGET_EFAULT;
3769     target_ip = &(target_sd->sem_perm);
3770     target_ip->__key = tswap32(host_ip->__key);
3771     target_ip->uid = tswap32(host_ip->uid);
3772     target_ip->gid = tswap32(host_ip->gid);
3773     target_ip->cuid = tswap32(host_ip->cuid);
3774     target_ip->cgid = tswap32(host_ip->cgid);
3775 #if defined(TARGET_ALPHA) || defined(TARGET_MIPS) || defined(TARGET_PPC)
3776     target_ip->mode = tswap32(host_ip->mode);
3777 #else
3778     target_ip->mode = tswap16(host_ip->mode);
3779 #endif
3780 #if defined(TARGET_PPC)
3781     target_ip->__seq = tswap32(host_ip->__seq);
3782 #else
3783     target_ip->__seq = tswap16(host_ip->__seq);
3784 #endif
3785     unlock_user_struct(target_sd, target_addr, 1);
3786     return 0;
3787 }
3788 
3789 static inline abi_long target_to_host_semid_ds(struct semid_ds *host_sd,
3790                                                abi_ulong target_addr)
3791 {
3792     struct target_semid64_ds *target_sd;
3793 
3794     if (!lock_user_struct(VERIFY_READ, target_sd, target_addr, 1))
3795         return -TARGET_EFAULT;
3796     if (target_to_host_ipc_perm(&(host_sd->sem_perm),target_addr))
3797         return -TARGET_EFAULT;
3798     host_sd->sem_nsems = tswapal(target_sd->sem_nsems);
3799     host_sd->sem_otime = tswapal(target_sd->sem_otime);
3800     host_sd->sem_ctime = tswapal(target_sd->sem_ctime);
3801     unlock_user_struct(target_sd, target_addr, 0);
3802     return 0;
3803 }
3804 
3805 static inline abi_long host_to_target_semid_ds(abi_ulong target_addr,
3806                                                struct semid_ds *host_sd)
3807 {
3808     struct target_semid64_ds *target_sd;
3809 
3810     if (!lock_user_struct(VERIFY_WRITE, target_sd, target_addr, 0))
3811         return -TARGET_EFAULT;
3812     if (host_to_target_ipc_perm(target_addr,&(host_sd->sem_perm)))
3813         return -TARGET_EFAULT;
3814     target_sd->sem_nsems = tswapal(host_sd->sem_nsems);
3815     target_sd->sem_otime = tswapal(host_sd->sem_otime);
3816     target_sd->sem_ctime = tswapal(host_sd->sem_ctime);
3817     unlock_user_struct(target_sd, target_addr, 1);
3818     return 0;
3819 }
3820 
3821 struct target_seminfo {
3822     int semmap;
3823     int semmni;
3824     int semmns;
3825     int semmnu;
3826     int semmsl;
3827     int semopm;
3828     int semume;
3829     int semusz;
3830     int semvmx;
3831     int semaem;
3832 };
3833 
3834 static inline abi_long host_to_target_seminfo(abi_ulong target_addr,
3835                                               struct seminfo *host_seminfo)
3836 {
3837     struct target_seminfo *target_seminfo;
3838     if (!lock_user_struct(VERIFY_WRITE, target_seminfo, target_addr, 0))
3839         return -TARGET_EFAULT;
3840     __put_user(host_seminfo->semmap, &target_seminfo->semmap);
3841     __put_user(host_seminfo->semmni, &target_seminfo->semmni);
3842     __put_user(host_seminfo->semmns, &target_seminfo->semmns);
3843     __put_user(host_seminfo->semmnu, &target_seminfo->semmnu);
3844     __put_user(host_seminfo->semmsl, &target_seminfo->semmsl);
3845     __put_user(host_seminfo->semopm, &target_seminfo->semopm);
3846     __put_user(host_seminfo->semume, &target_seminfo->semume);
3847     __put_user(host_seminfo->semusz, &target_seminfo->semusz);
3848     __put_user(host_seminfo->semvmx, &target_seminfo->semvmx);
3849     __put_user(host_seminfo->semaem, &target_seminfo->semaem);
3850     unlock_user_struct(target_seminfo, target_addr, 1);
3851     return 0;
3852 }
3853 
3854 union semun {
3855 	int val;
3856 	struct semid_ds *buf;
3857 	unsigned short *array;
3858 	struct seminfo *__buf;
3859 };
3860 
3861 union target_semun {
3862 	int val;
3863 	abi_ulong buf;
3864 	abi_ulong array;
3865 	abi_ulong __buf;
3866 };
3867 
3868 static inline abi_long target_to_host_semarray(int semid, unsigned short **host_array,
3869                                                abi_ulong target_addr)
3870 {
3871     int nsems;
3872     unsigned short *array;
3873     union semun semun;
3874     struct semid_ds semid_ds;
3875     int i, ret;
3876 
3877     semun.buf = &semid_ds;
3878 
3879     ret = semctl(semid, 0, IPC_STAT, semun);
3880     if (ret == -1)
3881         return get_errno(ret);
3882 
3883     nsems = semid_ds.sem_nsems;
3884 
3885     *host_array = g_try_new(unsigned short, nsems);
3886     if (!*host_array) {
3887         return -TARGET_ENOMEM;
3888     }
3889     array = lock_user(VERIFY_READ, target_addr,
3890                       nsems*sizeof(unsigned short), 1);
3891     if (!array) {
3892         g_free(*host_array);
3893         return -TARGET_EFAULT;
3894     }
3895 
3896     for(i=0; i<nsems; i++) {
3897         __get_user((*host_array)[i], &array[i]);
3898     }
3899     unlock_user(array, target_addr, 0);
3900 
3901     return 0;
3902 }
3903 
3904 static inline abi_long host_to_target_semarray(int semid, abi_ulong target_addr,
3905                                                unsigned short **host_array)
3906 {
3907     int nsems;
3908     unsigned short *array;
3909     union semun semun;
3910     struct semid_ds semid_ds;
3911     int i, ret;
3912 
3913     semun.buf = &semid_ds;
3914 
3915     ret = semctl(semid, 0, IPC_STAT, semun);
3916     if (ret == -1)
3917         return get_errno(ret);
3918 
3919     nsems = semid_ds.sem_nsems;
3920 
3921     array = lock_user(VERIFY_WRITE, target_addr,
3922                       nsems*sizeof(unsigned short), 0);
3923     if (!array)
3924         return -TARGET_EFAULT;
3925 
3926     for(i=0; i<nsems; i++) {
3927         __put_user((*host_array)[i], &array[i]);
3928     }
3929     g_free(*host_array);
3930     unlock_user(array, target_addr, 1);
3931 
3932     return 0;
3933 }
3934 
3935 static inline abi_long do_semctl(int semid, int semnum, int cmd,
3936                                  abi_ulong target_arg)
3937 {
3938     union target_semun target_su = { .buf = target_arg };
3939     union semun arg;
3940     struct semid_ds dsarg;
3941     unsigned short *array = NULL;
3942     struct seminfo seminfo;
3943     abi_long ret = -TARGET_EINVAL;
3944     abi_long err;
3945     cmd &= 0xff;
3946 
3947     switch( cmd ) {
3948 	case GETVAL:
3949 	case SETVAL:
3950             /* In 64 bit cross-endian situations, we will erroneously pick up
3951              * the wrong half of the union for the "val" element.  To rectify
3952              * this, the entire 8-byte structure is byteswapped, followed by
3953 	     * a swap of the 4 byte val field. In other cases, the data is
3954 	     * already in proper host byte order. */
3955 	    if (sizeof(target_su.val) != (sizeof(target_su.buf))) {
3956 		target_su.buf = tswapal(target_su.buf);
3957 		arg.val = tswap32(target_su.val);
3958 	    } else {
3959 		arg.val = target_su.val;
3960 	    }
3961             ret = get_errno(semctl(semid, semnum, cmd, arg));
3962             break;
3963 	case GETALL:
3964 	case SETALL:
3965             err = target_to_host_semarray(semid, &array, target_su.array);
3966             if (err)
3967                 return err;
3968             arg.array = array;
3969             ret = get_errno(semctl(semid, semnum, cmd, arg));
3970             err = host_to_target_semarray(semid, target_su.array, &array);
3971             if (err)
3972                 return err;
3973             break;
3974 	case IPC_STAT:
3975 	case IPC_SET:
3976 	case SEM_STAT:
3977             err = target_to_host_semid_ds(&dsarg, target_su.buf);
3978             if (err)
3979                 return err;
3980             arg.buf = &dsarg;
3981             ret = get_errno(semctl(semid, semnum, cmd, arg));
3982             err = host_to_target_semid_ds(target_su.buf, &dsarg);
3983             if (err)
3984                 return err;
3985             break;
3986 	case IPC_INFO:
3987 	case SEM_INFO:
3988             arg.__buf = &seminfo;
3989             ret = get_errno(semctl(semid, semnum, cmd, arg));
3990             err = host_to_target_seminfo(target_su.__buf, &seminfo);
3991             if (err)
3992                 return err;
3993             break;
3994 	case IPC_RMID:
3995 	case GETPID:
3996 	case GETNCNT:
3997 	case GETZCNT:
3998             ret = get_errno(semctl(semid, semnum, cmd, NULL));
3999             break;
4000     }
4001 
4002     return ret;
4003 }
4004 
4005 struct target_sembuf {
4006     unsigned short sem_num;
4007     short sem_op;
4008     short sem_flg;
4009 };
4010 
4011 static inline abi_long target_to_host_sembuf(struct sembuf *host_sembuf,
4012                                              abi_ulong target_addr,
4013                                              unsigned nsops)
4014 {
4015     struct target_sembuf *target_sembuf;
4016     int i;
4017 
4018     target_sembuf = lock_user(VERIFY_READ, target_addr,
4019                               nsops*sizeof(struct target_sembuf), 1);
4020     if (!target_sembuf)
4021         return -TARGET_EFAULT;
4022 
4023     for(i=0; i<nsops; i++) {
4024         __get_user(host_sembuf[i].sem_num, &target_sembuf[i].sem_num);
4025         __get_user(host_sembuf[i].sem_op, &target_sembuf[i].sem_op);
4026         __get_user(host_sembuf[i].sem_flg, &target_sembuf[i].sem_flg);
4027     }
4028 
4029     unlock_user(target_sembuf, target_addr, 0);
4030 
4031     return 0;
4032 }
4033 
4034 #if defined(TARGET_NR_ipc) || defined(TARGET_NR_semop) || \
4035     defined(TARGET_NR_semtimedop) || defined(TARGET_NR_semtimedop_time64)
4036 
4037 /*
4038  * This macro is required to handle the s390 variants, which passes the
4039  * arguments in a different order than default.
4040  */
4041 #ifdef __s390x__
4042 #define SEMTIMEDOP_IPC_ARGS(__nsops, __sops, __timeout) \
4043   (__nsops), (__timeout), (__sops)
4044 #else
4045 #define SEMTIMEDOP_IPC_ARGS(__nsops, __sops, __timeout) \
4046   (__nsops), 0, (__sops), (__timeout)
4047 #endif
4048 
4049 static inline abi_long do_semtimedop(int semid,
4050                                      abi_long ptr,
4051                                      unsigned nsops,
4052                                      abi_long timeout, bool time64)
4053 {
4054     struct sembuf *sops;
4055     struct timespec ts, *pts = NULL;
4056     abi_long ret;
4057 
4058     if (timeout) {
4059         pts = &ts;
4060         if (time64) {
4061             if (target_to_host_timespec64(pts, timeout)) {
4062                 return -TARGET_EFAULT;
4063             }
4064         } else {
4065             if (target_to_host_timespec(pts, timeout)) {
4066                 return -TARGET_EFAULT;
4067             }
4068         }
4069     }
4070 
4071     if (nsops > TARGET_SEMOPM) {
4072         return -TARGET_E2BIG;
4073     }
4074 
4075     sops = g_new(struct sembuf, nsops);
4076 
4077     if (target_to_host_sembuf(sops, ptr, nsops)) {
4078         g_free(sops);
4079         return -TARGET_EFAULT;
4080     }
4081 
4082     ret = -TARGET_ENOSYS;
4083 #ifdef __NR_semtimedop
4084     ret = get_errno(safe_semtimedop(semid, sops, nsops, pts));
4085 #endif
4086 #ifdef __NR_ipc
4087     if (ret == -TARGET_ENOSYS) {
4088         ret = get_errno(safe_ipc(IPCOP_semtimedop, semid,
4089                                  SEMTIMEDOP_IPC_ARGS(nsops, sops, (long)pts)));
4090     }
4091 #endif
4092     g_free(sops);
4093     return ret;
4094 }
4095 #endif
4096 
4097 struct target_msqid_ds
4098 {
4099     struct target_ipc_perm msg_perm;
4100     abi_ulong msg_stime;
4101 #if TARGET_ABI_BITS == 32
4102     abi_ulong __unused1;
4103 #endif
4104     abi_ulong msg_rtime;
4105 #if TARGET_ABI_BITS == 32
4106     abi_ulong __unused2;
4107 #endif
4108     abi_ulong msg_ctime;
4109 #if TARGET_ABI_BITS == 32
4110     abi_ulong __unused3;
4111 #endif
4112     abi_ulong __msg_cbytes;
4113     abi_ulong msg_qnum;
4114     abi_ulong msg_qbytes;
4115     abi_ulong msg_lspid;
4116     abi_ulong msg_lrpid;
4117     abi_ulong __unused4;
4118     abi_ulong __unused5;
4119 };
4120 
4121 static inline abi_long target_to_host_msqid_ds(struct msqid_ds *host_md,
4122                                                abi_ulong target_addr)
4123 {
4124     struct target_msqid_ds *target_md;
4125 
4126     if (!lock_user_struct(VERIFY_READ, target_md, target_addr, 1))
4127         return -TARGET_EFAULT;
4128     if (target_to_host_ipc_perm(&(host_md->msg_perm),target_addr))
4129         return -TARGET_EFAULT;
4130     host_md->msg_stime = tswapal(target_md->msg_stime);
4131     host_md->msg_rtime = tswapal(target_md->msg_rtime);
4132     host_md->msg_ctime = tswapal(target_md->msg_ctime);
4133     host_md->__msg_cbytes = tswapal(target_md->__msg_cbytes);
4134     host_md->msg_qnum = tswapal(target_md->msg_qnum);
4135     host_md->msg_qbytes = tswapal(target_md->msg_qbytes);
4136     host_md->msg_lspid = tswapal(target_md->msg_lspid);
4137     host_md->msg_lrpid = tswapal(target_md->msg_lrpid);
4138     unlock_user_struct(target_md, target_addr, 0);
4139     return 0;
4140 }
4141 
4142 static inline abi_long host_to_target_msqid_ds(abi_ulong target_addr,
4143                                                struct msqid_ds *host_md)
4144 {
4145     struct target_msqid_ds *target_md;
4146 
4147     if (!lock_user_struct(VERIFY_WRITE, target_md, target_addr, 0))
4148         return -TARGET_EFAULT;
4149     if (host_to_target_ipc_perm(target_addr,&(host_md->msg_perm)))
4150         return -TARGET_EFAULT;
4151     target_md->msg_stime = tswapal(host_md->msg_stime);
4152     target_md->msg_rtime = tswapal(host_md->msg_rtime);
4153     target_md->msg_ctime = tswapal(host_md->msg_ctime);
4154     target_md->__msg_cbytes = tswapal(host_md->__msg_cbytes);
4155     target_md->msg_qnum = tswapal(host_md->msg_qnum);
4156     target_md->msg_qbytes = tswapal(host_md->msg_qbytes);
4157     target_md->msg_lspid = tswapal(host_md->msg_lspid);
4158     target_md->msg_lrpid = tswapal(host_md->msg_lrpid);
4159     unlock_user_struct(target_md, target_addr, 1);
4160     return 0;
4161 }
4162 
4163 struct target_msginfo {
4164     int msgpool;
4165     int msgmap;
4166     int msgmax;
4167     int msgmnb;
4168     int msgmni;
4169     int msgssz;
4170     int msgtql;
4171     unsigned short int msgseg;
4172 };
4173 
4174 static inline abi_long host_to_target_msginfo(abi_ulong target_addr,
4175                                               struct msginfo *host_msginfo)
4176 {
4177     struct target_msginfo *target_msginfo;
4178     if (!lock_user_struct(VERIFY_WRITE, target_msginfo, target_addr, 0))
4179         return -TARGET_EFAULT;
4180     __put_user(host_msginfo->msgpool, &target_msginfo->msgpool);
4181     __put_user(host_msginfo->msgmap, &target_msginfo->msgmap);
4182     __put_user(host_msginfo->msgmax, &target_msginfo->msgmax);
4183     __put_user(host_msginfo->msgmnb, &target_msginfo->msgmnb);
4184     __put_user(host_msginfo->msgmni, &target_msginfo->msgmni);
4185     __put_user(host_msginfo->msgssz, &target_msginfo->msgssz);
4186     __put_user(host_msginfo->msgtql, &target_msginfo->msgtql);
4187     __put_user(host_msginfo->msgseg, &target_msginfo->msgseg);
4188     unlock_user_struct(target_msginfo, target_addr, 1);
4189     return 0;
4190 }
4191 
4192 static inline abi_long do_msgctl(int msgid, int cmd, abi_long ptr)
4193 {
4194     struct msqid_ds dsarg;
4195     struct msginfo msginfo;
4196     abi_long ret = -TARGET_EINVAL;
4197 
4198     cmd &= 0xff;
4199 
4200     switch (cmd) {
4201     case IPC_STAT:
4202     case IPC_SET:
4203     case MSG_STAT:
4204         if (target_to_host_msqid_ds(&dsarg,ptr))
4205             return -TARGET_EFAULT;
4206         ret = get_errno(msgctl(msgid, cmd, &dsarg));
4207         if (host_to_target_msqid_ds(ptr,&dsarg))
4208             return -TARGET_EFAULT;
4209         break;
4210     case IPC_RMID:
4211         ret = get_errno(msgctl(msgid, cmd, NULL));
4212         break;
4213     case IPC_INFO:
4214     case MSG_INFO:
4215         ret = get_errno(msgctl(msgid, cmd, (struct msqid_ds *)&msginfo));
4216         if (host_to_target_msginfo(ptr, &msginfo))
4217             return -TARGET_EFAULT;
4218         break;
4219     }
4220 
4221     return ret;
4222 }
4223 
4224 struct target_msgbuf {
4225     abi_long mtype;
4226     char	mtext[1];
4227 };
4228 
4229 static inline abi_long do_msgsnd(int msqid, abi_long msgp,
4230                                  ssize_t msgsz, int msgflg)
4231 {
4232     struct target_msgbuf *target_mb;
4233     struct msgbuf *host_mb;
4234     abi_long ret = 0;
4235 
4236     if (msgsz < 0) {
4237         return -TARGET_EINVAL;
4238     }
4239 
4240     if (!lock_user_struct(VERIFY_READ, target_mb, msgp, 0))
4241         return -TARGET_EFAULT;
4242     host_mb = g_try_malloc(msgsz + sizeof(long));
4243     if (!host_mb) {
4244         unlock_user_struct(target_mb, msgp, 0);
4245         return -TARGET_ENOMEM;
4246     }
4247     host_mb->mtype = (abi_long) tswapal(target_mb->mtype);
4248     memcpy(host_mb->mtext, target_mb->mtext, msgsz);
4249     ret = -TARGET_ENOSYS;
4250 #ifdef __NR_msgsnd
4251     ret = get_errno(safe_msgsnd(msqid, host_mb, msgsz, msgflg));
4252 #endif
4253 #ifdef __NR_ipc
4254     if (ret == -TARGET_ENOSYS) {
4255 #ifdef __s390x__
4256         ret = get_errno(safe_ipc(IPCOP_msgsnd, msqid, msgsz, msgflg,
4257                                  host_mb));
4258 #else
4259         ret = get_errno(safe_ipc(IPCOP_msgsnd, msqid, msgsz, msgflg,
4260                                  host_mb, 0));
4261 #endif
4262     }
4263 #endif
4264     g_free(host_mb);
4265     unlock_user_struct(target_mb, msgp, 0);
4266 
4267     return ret;
4268 }
4269 
4270 #ifdef __NR_ipc
4271 #if defined(__sparc__)
4272 /* SPARC for msgrcv it does not use the kludge on final 2 arguments.  */
4273 #define MSGRCV_ARGS(__msgp, __msgtyp) __msgp, __msgtyp
4274 #elif defined(__s390x__)
4275 /* The s390 sys_ipc variant has only five parameters.  */
4276 #define MSGRCV_ARGS(__msgp, __msgtyp) \
4277     ((long int[]){(long int)__msgp, __msgtyp})
4278 #else
4279 #define MSGRCV_ARGS(__msgp, __msgtyp) \
4280     ((long int[]){(long int)__msgp, __msgtyp}), 0
4281 #endif
4282 #endif
4283 
4284 static inline abi_long do_msgrcv(int msqid, abi_long msgp,
4285                                  ssize_t msgsz, abi_long msgtyp,
4286                                  int msgflg)
4287 {
4288     struct target_msgbuf *target_mb;
4289     char *target_mtext;
4290     struct msgbuf *host_mb;
4291     abi_long ret = 0;
4292 
4293     if (msgsz < 0) {
4294         return -TARGET_EINVAL;
4295     }
4296 
4297     if (!lock_user_struct(VERIFY_WRITE, target_mb, msgp, 0))
4298         return -TARGET_EFAULT;
4299 
4300     host_mb = g_try_malloc(msgsz + sizeof(long));
4301     if (!host_mb) {
4302         ret = -TARGET_ENOMEM;
4303         goto end;
4304     }
4305     ret = -TARGET_ENOSYS;
4306 #ifdef __NR_msgrcv
4307     ret = get_errno(safe_msgrcv(msqid, host_mb, msgsz, msgtyp, msgflg));
4308 #endif
4309 #ifdef __NR_ipc
4310     if (ret == -TARGET_ENOSYS) {
4311         ret = get_errno(safe_ipc(IPCOP_CALL(1, IPCOP_msgrcv), msqid, msgsz,
4312                         msgflg, MSGRCV_ARGS(host_mb, msgtyp)));
4313     }
4314 #endif
4315 
4316     if (ret > 0) {
4317         abi_ulong target_mtext_addr = msgp + sizeof(abi_ulong);
4318         target_mtext = lock_user(VERIFY_WRITE, target_mtext_addr, ret, 0);
4319         if (!target_mtext) {
4320             ret = -TARGET_EFAULT;
4321             goto end;
4322         }
4323         memcpy(target_mb->mtext, host_mb->mtext, ret);
4324         unlock_user(target_mtext, target_mtext_addr, ret);
4325     }
4326 
4327     target_mb->mtype = tswapal(host_mb->mtype);
4328 
4329 end:
4330     if (target_mb)
4331         unlock_user_struct(target_mb, msgp, 1);
4332     g_free(host_mb);
4333     return ret;
4334 }
4335 
4336 static inline abi_long target_to_host_shmid_ds(struct shmid_ds *host_sd,
4337                                                abi_ulong target_addr)
4338 {
4339     struct target_shmid_ds *target_sd;
4340 
4341     if (!lock_user_struct(VERIFY_READ, target_sd, target_addr, 1))
4342         return -TARGET_EFAULT;
4343     if (target_to_host_ipc_perm(&(host_sd->shm_perm), target_addr))
4344         return -TARGET_EFAULT;
4345     __get_user(host_sd->shm_segsz, &target_sd->shm_segsz);
4346     __get_user(host_sd->shm_atime, &target_sd->shm_atime);
4347     __get_user(host_sd->shm_dtime, &target_sd->shm_dtime);
4348     __get_user(host_sd->shm_ctime, &target_sd->shm_ctime);
4349     __get_user(host_sd->shm_cpid, &target_sd->shm_cpid);
4350     __get_user(host_sd->shm_lpid, &target_sd->shm_lpid);
4351     __get_user(host_sd->shm_nattch, &target_sd->shm_nattch);
4352     unlock_user_struct(target_sd, target_addr, 0);
4353     return 0;
4354 }
4355 
4356 static inline abi_long host_to_target_shmid_ds(abi_ulong target_addr,
4357                                                struct shmid_ds *host_sd)
4358 {
4359     struct target_shmid_ds *target_sd;
4360 
4361     if (!lock_user_struct(VERIFY_WRITE, target_sd, target_addr, 0))
4362         return -TARGET_EFAULT;
4363     if (host_to_target_ipc_perm(target_addr, &(host_sd->shm_perm)))
4364         return -TARGET_EFAULT;
4365     __put_user(host_sd->shm_segsz, &target_sd->shm_segsz);
4366     __put_user(host_sd->shm_atime, &target_sd->shm_atime);
4367     __put_user(host_sd->shm_dtime, &target_sd->shm_dtime);
4368     __put_user(host_sd->shm_ctime, &target_sd->shm_ctime);
4369     __put_user(host_sd->shm_cpid, &target_sd->shm_cpid);
4370     __put_user(host_sd->shm_lpid, &target_sd->shm_lpid);
4371     __put_user(host_sd->shm_nattch, &target_sd->shm_nattch);
4372     unlock_user_struct(target_sd, target_addr, 1);
4373     return 0;
4374 }
4375 
4376 struct  target_shminfo {
4377     abi_ulong shmmax;
4378     abi_ulong shmmin;
4379     abi_ulong shmmni;
4380     abi_ulong shmseg;
4381     abi_ulong shmall;
4382 };
4383 
4384 static inline abi_long host_to_target_shminfo(abi_ulong target_addr,
4385                                               struct shminfo *host_shminfo)
4386 {
4387     struct target_shminfo *target_shminfo;
4388     if (!lock_user_struct(VERIFY_WRITE, target_shminfo, target_addr, 0))
4389         return -TARGET_EFAULT;
4390     __put_user(host_shminfo->shmmax, &target_shminfo->shmmax);
4391     __put_user(host_shminfo->shmmin, &target_shminfo->shmmin);
4392     __put_user(host_shminfo->shmmni, &target_shminfo->shmmni);
4393     __put_user(host_shminfo->shmseg, &target_shminfo->shmseg);
4394     __put_user(host_shminfo->shmall, &target_shminfo->shmall);
4395     unlock_user_struct(target_shminfo, target_addr, 1);
4396     return 0;
4397 }
4398 
4399 struct target_shm_info {
4400     int used_ids;
4401     abi_ulong shm_tot;
4402     abi_ulong shm_rss;
4403     abi_ulong shm_swp;
4404     abi_ulong swap_attempts;
4405     abi_ulong swap_successes;
4406 };
4407 
4408 static inline abi_long host_to_target_shm_info(abi_ulong target_addr,
4409                                                struct shm_info *host_shm_info)
4410 {
4411     struct target_shm_info *target_shm_info;
4412     if (!lock_user_struct(VERIFY_WRITE, target_shm_info, target_addr, 0))
4413         return -TARGET_EFAULT;
4414     __put_user(host_shm_info->used_ids, &target_shm_info->used_ids);
4415     __put_user(host_shm_info->shm_tot, &target_shm_info->shm_tot);
4416     __put_user(host_shm_info->shm_rss, &target_shm_info->shm_rss);
4417     __put_user(host_shm_info->shm_swp, &target_shm_info->shm_swp);
4418     __put_user(host_shm_info->swap_attempts, &target_shm_info->swap_attempts);
4419     __put_user(host_shm_info->swap_successes, &target_shm_info->swap_successes);
4420     unlock_user_struct(target_shm_info, target_addr, 1);
4421     return 0;
4422 }
4423 
4424 static inline abi_long do_shmctl(int shmid, int cmd, abi_long buf)
4425 {
4426     struct shmid_ds dsarg;
4427     struct shminfo shminfo;
4428     struct shm_info shm_info;
4429     abi_long ret = -TARGET_EINVAL;
4430 
4431     cmd &= 0xff;
4432 
4433     switch(cmd) {
4434     case IPC_STAT:
4435     case IPC_SET:
4436     case SHM_STAT:
4437         if (target_to_host_shmid_ds(&dsarg, buf))
4438             return -TARGET_EFAULT;
4439         ret = get_errno(shmctl(shmid, cmd, &dsarg));
4440         if (host_to_target_shmid_ds(buf, &dsarg))
4441             return -TARGET_EFAULT;
4442         break;
4443     case IPC_INFO:
4444         ret = get_errno(shmctl(shmid, cmd, (struct shmid_ds *)&shminfo));
4445         if (host_to_target_shminfo(buf, &shminfo))
4446             return -TARGET_EFAULT;
4447         break;
4448     case SHM_INFO:
4449         ret = get_errno(shmctl(shmid, cmd, (struct shmid_ds *)&shm_info));
4450         if (host_to_target_shm_info(buf, &shm_info))
4451             return -TARGET_EFAULT;
4452         break;
4453     case IPC_RMID:
4454     case SHM_LOCK:
4455     case SHM_UNLOCK:
4456         ret = get_errno(shmctl(shmid, cmd, NULL));
4457         break;
4458     }
4459 
4460     return ret;
4461 }
4462 
4463 #ifndef TARGET_FORCE_SHMLBA
4464 /* For most architectures, SHMLBA is the same as the page size;
4465  * some architectures have larger values, in which case they should
4466  * define TARGET_FORCE_SHMLBA and provide a target_shmlba() function.
4467  * This corresponds to the kernel arch code defining __ARCH_FORCE_SHMLBA
4468  * and defining its own value for SHMLBA.
4469  *
4470  * The kernel also permits SHMLBA to be set by the architecture to a
4471  * value larger than the page size without setting __ARCH_FORCE_SHMLBA;
4472  * this means that addresses are rounded to the large size if
4473  * SHM_RND is set but addresses not aligned to that size are not rejected
4474  * as long as they are at least page-aligned. Since the only architecture
4475  * which uses this is ia64 this code doesn't provide for that oddity.
4476  */
4477 static inline abi_ulong target_shmlba(CPUArchState *cpu_env)
4478 {
4479     return TARGET_PAGE_SIZE;
4480 }
4481 #endif
4482 
4483 static inline abi_ulong do_shmat(CPUArchState *cpu_env,
4484                                  int shmid, abi_ulong shmaddr, int shmflg)
4485 {
4486     CPUState *cpu = env_cpu(cpu_env);
4487     abi_long raddr;
4488     void *host_raddr;
4489     struct shmid_ds shm_info;
4490     int i,ret;
4491     abi_ulong shmlba;
4492 
4493     /* shmat pointers are always untagged */
4494 
4495     /* find out the length of the shared memory segment */
4496     ret = get_errno(shmctl(shmid, IPC_STAT, &shm_info));
4497     if (is_error(ret)) {
4498         /* can't get length, bail out */
4499         return ret;
4500     }
4501 
4502     shmlba = target_shmlba(cpu_env);
4503 
4504     if (shmaddr & (shmlba - 1)) {
4505         if (shmflg & SHM_RND) {
4506             shmaddr &= ~(shmlba - 1);
4507         } else {
4508             return -TARGET_EINVAL;
4509         }
4510     }
4511     if (!guest_range_valid_untagged(shmaddr, shm_info.shm_segsz)) {
4512         return -TARGET_EINVAL;
4513     }
4514 
4515     mmap_lock();
4516 
4517     /*
4518      * We're mapping shared memory, so ensure we generate code for parallel
4519      * execution and flush old translations.  This will work up to the level
4520      * supported by the host -- anything that requires EXCP_ATOMIC will not
4521      * be atomic with respect to an external process.
4522      */
4523     if (!(cpu->tcg_cflags & CF_PARALLEL)) {
4524         cpu->tcg_cflags |= CF_PARALLEL;
4525         tb_flush(cpu);
4526     }
4527 
4528     if (shmaddr)
4529         host_raddr = shmat(shmid, (void *)g2h_untagged(shmaddr), shmflg);
4530     else {
4531         abi_ulong mmap_start;
4532 
4533         /* In order to use the host shmat, we need to honor host SHMLBA.  */
4534         mmap_start = mmap_find_vma(0, shm_info.shm_segsz, MAX(SHMLBA, shmlba));
4535 
4536         if (mmap_start == -1) {
4537             errno = ENOMEM;
4538             host_raddr = (void *)-1;
4539         } else
4540             host_raddr = shmat(shmid, g2h_untagged(mmap_start),
4541                                shmflg | SHM_REMAP);
4542     }
4543 
4544     if (host_raddr == (void *)-1) {
4545         mmap_unlock();
4546         return get_errno((long)host_raddr);
4547     }
4548     raddr=h2g((unsigned long)host_raddr);
4549 
4550     page_set_flags(raddr, raddr + shm_info.shm_segsz,
4551                    PAGE_VALID | PAGE_RESET | PAGE_READ |
4552                    (shmflg & SHM_RDONLY ? 0 : PAGE_WRITE));
4553 
4554     for (i = 0; i < N_SHM_REGIONS; i++) {
4555         if (!shm_regions[i].in_use) {
4556             shm_regions[i].in_use = true;
4557             shm_regions[i].start = raddr;
4558             shm_regions[i].size = shm_info.shm_segsz;
4559             break;
4560         }
4561     }
4562 
4563     mmap_unlock();
4564     return raddr;
4565 
4566 }
4567 
4568 static inline abi_long do_shmdt(abi_ulong shmaddr)
4569 {
4570     int i;
4571     abi_long rv;
4572 
4573     /* shmdt pointers are always untagged */
4574 
4575     mmap_lock();
4576 
4577     for (i = 0; i < N_SHM_REGIONS; ++i) {
4578         if (shm_regions[i].in_use && shm_regions[i].start == shmaddr) {
4579             shm_regions[i].in_use = false;
4580             page_set_flags(shmaddr, shmaddr + shm_regions[i].size, 0);
4581             break;
4582         }
4583     }
4584     rv = get_errno(shmdt(g2h_untagged(shmaddr)));
4585 
4586     mmap_unlock();
4587 
4588     return rv;
4589 }
4590 
4591 #ifdef TARGET_NR_ipc
4592 /* ??? This only works with linear mappings.  */
4593 /* do_ipc() must return target values and target errnos. */
4594 static abi_long do_ipc(CPUArchState *cpu_env,
4595                        unsigned int call, abi_long first,
4596                        abi_long second, abi_long third,
4597                        abi_long ptr, abi_long fifth)
4598 {
4599     int version;
4600     abi_long ret = 0;
4601 
4602     version = call >> 16;
4603     call &= 0xffff;
4604 
4605     switch (call) {
4606     case IPCOP_semop:
4607         ret = do_semtimedop(first, ptr, second, 0, false);
4608         break;
4609     case IPCOP_semtimedop:
4610     /*
4611      * The s390 sys_ipc variant has only five parameters instead of six
4612      * (as for default variant) and the only difference is the handling of
4613      * SEMTIMEDOP where on s390 the third parameter is used as a pointer
4614      * to a struct timespec where the generic variant uses fifth parameter.
4615      */
4616 #if defined(TARGET_S390X)
4617         ret = do_semtimedop(first, ptr, second, third, TARGET_ABI_BITS == 64);
4618 #else
4619         ret = do_semtimedop(first, ptr, second, fifth, TARGET_ABI_BITS == 64);
4620 #endif
4621         break;
4622 
4623     case IPCOP_semget:
4624         ret = get_errno(semget(first, second, third));
4625         break;
4626 
4627     case IPCOP_semctl: {
4628         /* The semun argument to semctl is passed by value, so dereference the
4629          * ptr argument. */
4630         abi_ulong atptr;
4631         get_user_ual(atptr, ptr);
4632         ret = do_semctl(first, second, third, atptr);
4633         break;
4634     }
4635 
4636     case IPCOP_msgget:
4637         ret = get_errno(msgget(first, second));
4638         break;
4639 
4640     case IPCOP_msgsnd:
4641         ret = do_msgsnd(first, ptr, second, third);
4642         break;
4643 
4644     case IPCOP_msgctl:
4645         ret = do_msgctl(first, second, ptr);
4646         break;
4647 
4648     case IPCOP_msgrcv:
4649         switch (version) {
4650         case 0:
4651             {
4652                 struct target_ipc_kludge {
4653                     abi_long msgp;
4654                     abi_long msgtyp;
4655                 } *tmp;
4656 
4657                 if (!lock_user_struct(VERIFY_READ, tmp, ptr, 1)) {
4658                     ret = -TARGET_EFAULT;
4659                     break;
4660                 }
4661 
4662                 ret = do_msgrcv(first, tswapal(tmp->msgp), second, tswapal(tmp->msgtyp), third);
4663 
4664                 unlock_user_struct(tmp, ptr, 0);
4665                 break;
4666             }
4667         default:
4668             ret = do_msgrcv(first, ptr, second, fifth, third);
4669         }
4670         break;
4671 
4672     case IPCOP_shmat:
4673         switch (version) {
4674         default:
4675         {
4676             abi_ulong raddr;
4677             raddr = do_shmat(cpu_env, first, ptr, second);
4678             if (is_error(raddr))
4679                 return get_errno(raddr);
4680             if (put_user_ual(raddr, third))
4681                 return -TARGET_EFAULT;
4682             break;
4683         }
4684         case 1:
4685             ret = -TARGET_EINVAL;
4686             break;
4687         }
4688 	break;
4689     case IPCOP_shmdt:
4690         ret = do_shmdt(ptr);
4691 	break;
4692 
4693     case IPCOP_shmget:
4694 	/* IPC_* flag values are the same on all linux platforms */
4695 	ret = get_errno(shmget(first, second, third));
4696 	break;
4697 
4698 	/* IPC_* and SHM_* command values are the same on all linux platforms */
4699     case IPCOP_shmctl:
4700         ret = do_shmctl(first, second, ptr);
4701         break;
4702     default:
4703         qemu_log_mask(LOG_UNIMP, "Unsupported ipc call: %d (version %d)\n",
4704                       call, version);
4705 	ret = -TARGET_ENOSYS;
4706 	break;
4707     }
4708     return ret;
4709 }
4710 #endif
4711 
4712 /* kernel structure types definitions */
4713 
4714 #define STRUCT(name, ...) STRUCT_ ## name,
4715 #define STRUCT_SPECIAL(name) STRUCT_ ## name,
4716 enum {
4717 #include "syscall_types.h"
4718 STRUCT_MAX
4719 };
4720 #undef STRUCT
4721 #undef STRUCT_SPECIAL
4722 
4723 #define STRUCT(name, ...) static const argtype struct_ ## name ## _def[] = {  __VA_ARGS__, TYPE_NULL };
4724 #define STRUCT_SPECIAL(name)
4725 #include "syscall_types.h"
4726 #undef STRUCT
4727 #undef STRUCT_SPECIAL
4728 
4729 #define MAX_STRUCT_SIZE 4096
4730 
4731 #ifdef CONFIG_FIEMAP
4732 /* So fiemap access checks don't overflow on 32 bit systems.
4733  * This is very slightly smaller than the limit imposed by
4734  * the underlying kernel.
4735  */
4736 #define FIEMAP_MAX_EXTENTS ((UINT_MAX - sizeof(struct fiemap))  \
4737                             / sizeof(struct fiemap_extent))
4738 
4739 static abi_long do_ioctl_fs_ioc_fiemap(const IOCTLEntry *ie, uint8_t *buf_temp,
4740                                        int fd, int cmd, abi_long arg)
4741 {
4742     /* The parameter for this ioctl is a struct fiemap followed
4743      * by an array of struct fiemap_extent whose size is set
4744      * in fiemap->fm_extent_count. The array is filled in by the
4745      * ioctl.
4746      */
4747     int target_size_in, target_size_out;
4748     struct fiemap *fm;
4749     const argtype *arg_type = ie->arg_type;
4750     const argtype extent_arg_type[] = { MK_STRUCT(STRUCT_fiemap_extent) };
4751     void *argptr, *p;
4752     abi_long ret;
4753     int i, extent_size = thunk_type_size(extent_arg_type, 0);
4754     uint32_t outbufsz;
4755     int free_fm = 0;
4756 
4757     assert(arg_type[0] == TYPE_PTR);
4758     assert(ie->access == IOC_RW);
4759     arg_type++;
4760     target_size_in = thunk_type_size(arg_type, 0);
4761     argptr = lock_user(VERIFY_READ, arg, target_size_in, 1);
4762     if (!argptr) {
4763         return -TARGET_EFAULT;
4764     }
4765     thunk_convert(buf_temp, argptr, arg_type, THUNK_HOST);
4766     unlock_user(argptr, arg, 0);
4767     fm = (struct fiemap *)buf_temp;
4768     if (fm->fm_extent_count > FIEMAP_MAX_EXTENTS) {
4769         return -TARGET_EINVAL;
4770     }
4771 
4772     outbufsz = sizeof (*fm) +
4773         (sizeof(struct fiemap_extent) * fm->fm_extent_count);
4774 
4775     if (outbufsz > MAX_STRUCT_SIZE) {
4776         /* We can't fit all the extents into the fixed size buffer.
4777          * Allocate one that is large enough and use it instead.
4778          */
4779         fm = g_try_malloc(outbufsz);
4780         if (!fm) {
4781             return -TARGET_ENOMEM;
4782         }
4783         memcpy(fm, buf_temp, sizeof(struct fiemap));
4784         free_fm = 1;
4785     }
4786     ret = get_errno(safe_ioctl(fd, ie->host_cmd, fm));
4787     if (!is_error(ret)) {
4788         target_size_out = target_size_in;
4789         /* An extent_count of 0 means we were only counting the extents
4790          * so there are no structs to copy
4791          */
4792         if (fm->fm_extent_count != 0) {
4793             target_size_out += fm->fm_mapped_extents * extent_size;
4794         }
4795         argptr = lock_user(VERIFY_WRITE, arg, target_size_out, 0);
4796         if (!argptr) {
4797             ret = -TARGET_EFAULT;
4798         } else {
4799             /* Convert the struct fiemap */
4800             thunk_convert(argptr, fm, arg_type, THUNK_TARGET);
4801             if (fm->fm_extent_count != 0) {
4802                 p = argptr + target_size_in;
4803                 /* ...and then all the struct fiemap_extents */
4804                 for (i = 0; i < fm->fm_mapped_extents; i++) {
4805                     thunk_convert(p, &fm->fm_extents[i], extent_arg_type,
4806                                   THUNK_TARGET);
4807                     p += extent_size;
4808                 }
4809             }
4810             unlock_user(argptr, arg, target_size_out);
4811         }
4812     }
4813     if (free_fm) {
4814         g_free(fm);
4815     }
4816     return ret;
4817 }
4818 #endif
4819 
4820 static abi_long do_ioctl_ifconf(const IOCTLEntry *ie, uint8_t *buf_temp,
4821                                 int fd, int cmd, abi_long arg)
4822 {
4823     const argtype *arg_type = ie->arg_type;
4824     int target_size;
4825     void *argptr;
4826     int ret;
4827     struct ifconf *host_ifconf;
4828     uint32_t outbufsz;
4829     const argtype ifreq_arg_type[] = { MK_STRUCT(STRUCT_sockaddr_ifreq) };
4830     const argtype ifreq_max_type[] = { MK_STRUCT(STRUCT_ifmap_ifreq) };
4831     int target_ifreq_size;
4832     int nb_ifreq;
4833     int free_buf = 0;
4834     int i;
4835     int target_ifc_len;
4836     abi_long target_ifc_buf;
4837     int host_ifc_len;
4838     char *host_ifc_buf;
4839 
4840     assert(arg_type[0] == TYPE_PTR);
4841     assert(ie->access == IOC_RW);
4842 
4843     arg_type++;
4844     target_size = thunk_type_size(arg_type, 0);
4845 
4846     argptr = lock_user(VERIFY_READ, arg, target_size, 1);
4847     if (!argptr)
4848         return -TARGET_EFAULT;
4849     thunk_convert(buf_temp, argptr, arg_type, THUNK_HOST);
4850     unlock_user(argptr, arg, 0);
4851 
4852     host_ifconf = (struct ifconf *)(unsigned long)buf_temp;
4853     target_ifc_buf = (abi_long)(unsigned long)host_ifconf->ifc_buf;
4854     target_ifreq_size = thunk_type_size(ifreq_max_type, 0);
4855 
4856     if (target_ifc_buf != 0) {
4857         target_ifc_len = host_ifconf->ifc_len;
4858         nb_ifreq = target_ifc_len / target_ifreq_size;
4859         host_ifc_len = nb_ifreq * sizeof(struct ifreq);
4860 
4861         outbufsz = sizeof(*host_ifconf) + host_ifc_len;
4862         if (outbufsz > MAX_STRUCT_SIZE) {
4863             /*
4864              * We can't fit all the extents into the fixed size buffer.
4865              * Allocate one that is large enough and use it instead.
4866              */
4867             host_ifconf = malloc(outbufsz);
4868             if (!host_ifconf) {
4869                 return -TARGET_ENOMEM;
4870             }
4871             memcpy(host_ifconf, buf_temp, sizeof(*host_ifconf));
4872             free_buf = 1;
4873         }
4874         host_ifc_buf = (char *)host_ifconf + sizeof(*host_ifconf);
4875 
4876         host_ifconf->ifc_len = host_ifc_len;
4877     } else {
4878       host_ifc_buf = NULL;
4879     }
4880     host_ifconf->ifc_buf = host_ifc_buf;
4881 
4882     ret = get_errno(safe_ioctl(fd, ie->host_cmd, host_ifconf));
4883     if (!is_error(ret)) {
4884 	/* convert host ifc_len to target ifc_len */
4885 
4886         nb_ifreq = host_ifconf->ifc_len / sizeof(struct ifreq);
4887         target_ifc_len = nb_ifreq * target_ifreq_size;
4888         host_ifconf->ifc_len = target_ifc_len;
4889 
4890 	/* restore target ifc_buf */
4891 
4892         host_ifconf->ifc_buf = (char *)(unsigned long)target_ifc_buf;
4893 
4894 	/* copy struct ifconf to target user */
4895 
4896         argptr = lock_user(VERIFY_WRITE, arg, target_size, 0);
4897         if (!argptr)
4898             return -TARGET_EFAULT;
4899         thunk_convert(argptr, host_ifconf, arg_type, THUNK_TARGET);
4900         unlock_user(argptr, arg, target_size);
4901 
4902         if (target_ifc_buf != 0) {
4903             /* copy ifreq[] to target user */
4904             argptr = lock_user(VERIFY_WRITE, target_ifc_buf, target_ifc_len, 0);
4905             for (i = 0; i < nb_ifreq ; i++) {
4906                 thunk_convert(argptr + i * target_ifreq_size,
4907                               host_ifc_buf + i * sizeof(struct ifreq),
4908                               ifreq_arg_type, THUNK_TARGET);
4909             }
4910             unlock_user(argptr, target_ifc_buf, target_ifc_len);
4911         }
4912     }
4913 
4914     if (free_buf) {
4915         free(host_ifconf);
4916     }
4917 
4918     return ret;
4919 }
4920 
4921 #if defined(CONFIG_USBFS)
4922 #if HOST_LONG_BITS > 64
4923 #error USBDEVFS thunks do not support >64 bit hosts yet.
4924 #endif
4925 struct live_urb {
4926     uint64_t target_urb_adr;
4927     uint64_t target_buf_adr;
4928     char *target_buf_ptr;
4929     struct usbdevfs_urb host_urb;
4930 };
4931 
4932 static GHashTable *usbdevfs_urb_hashtable(void)
4933 {
4934     static GHashTable *urb_hashtable;
4935 
4936     if (!urb_hashtable) {
4937         urb_hashtable = g_hash_table_new(g_int64_hash, g_int64_equal);
4938     }
4939     return urb_hashtable;
4940 }
4941 
4942 static void urb_hashtable_insert(struct live_urb *urb)
4943 {
4944     GHashTable *urb_hashtable = usbdevfs_urb_hashtable();
4945     g_hash_table_insert(urb_hashtable, urb, urb);
4946 }
4947 
4948 static struct live_urb *urb_hashtable_lookup(uint64_t target_urb_adr)
4949 {
4950     GHashTable *urb_hashtable = usbdevfs_urb_hashtable();
4951     return g_hash_table_lookup(urb_hashtable, &target_urb_adr);
4952 }
4953 
4954 static void urb_hashtable_remove(struct live_urb *urb)
4955 {
4956     GHashTable *urb_hashtable = usbdevfs_urb_hashtable();
4957     g_hash_table_remove(urb_hashtable, urb);
4958 }
4959 
4960 static abi_long
4961 do_ioctl_usbdevfs_reapurb(const IOCTLEntry *ie, uint8_t *buf_temp,
4962                           int fd, int cmd, abi_long arg)
4963 {
4964     const argtype usbfsurb_arg_type[] = { MK_STRUCT(STRUCT_usbdevfs_urb) };
4965     const argtype ptrvoid_arg_type[] = { TYPE_PTRVOID, 0, 0 };
4966     struct live_urb *lurb;
4967     void *argptr;
4968     uint64_t hurb;
4969     int target_size;
4970     uintptr_t target_urb_adr;
4971     abi_long ret;
4972 
4973     target_size = thunk_type_size(usbfsurb_arg_type, THUNK_TARGET);
4974 
4975     memset(buf_temp, 0, sizeof(uint64_t));
4976     ret = get_errno(safe_ioctl(fd, ie->host_cmd, buf_temp));
4977     if (is_error(ret)) {
4978         return ret;
4979     }
4980 
4981     memcpy(&hurb, buf_temp, sizeof(uint64_t));
4982     lurb = (void *)((uintptr_t)hurb - offsetof(struct live_urb, host_urb));
4983     if (!lurb->target_urb_adr) {
4984         return -TARGET_EFAULT;
4985     }
4986     urb_hashtable_remove(lurb);
4987     unlock_user(lurb->target_buf_ptr, lurb->target_buf_adr,
4988         lurb->host_urb.buffer_length);
4989     lurb->target_buf_ptr = NULL;
4990 
4991     /* restore the guest buffer pointer */
4992     lurb->host_urb.buffer = (void *)(uintptr_t)lurb->target_buf_adr;
4993 
4994     /* update the guest urb struct */
4995     argptr = lock_user(VERIFY_WRITE, lurb->target_urb_adr, target_size, 0);
4996     if (!argptr) {
4997         g_free(lurb);
4998         return -TARGET_EFAULT;
4999     }
5000     thunk_convert(argptr, &lurb->host_urb, usbfsurb_arg_type, THUNK_TARGET);
5001     unlock_user(argptr, lurb->target_urb_adr, target_size);
5002 
5003     target_size = thunk_type_size(ptrvoid_arg_type, THUNK_TARGET);
5004     /* write back the urb handle */
5005     argptr = lock_user(VERIFY_WRITE, arg, target_size, 0);
5006     if (!argptr) {
5007         g_free(lurb);
5008         return -TARGET_EFAULT;
5009     }
5010 
5011     /* GHashTable uses 64-bit keys but thunk_convert expects uintptr_t */
5012     target_urb_adr = lurb->target_urb_adr;
5013     thunk_convert(argptr, &target_urb_adr, ptrvoid_arg_type, THUNK_TARGET);
5014     unlock_user(argptr, arg, target_size);
5015 
5016     g_free(lurb);
5017     return ret;
5018 }
5019 
5020 static abi_long
5021 do_ioctl_usbdevfs_discardurb(const IOCTLEntry *ie,
5022                              uint8_t *buf_temp __attribute__((unused)),
5023                              int fd, int cmd, abi_long arg)
5024 {
5025     struct live_urb *lurb;
5026 
5027     /* map target address back to host URB with metadata. */
5028     lurb = urb_hashtable_lookup(arg);
5029     if (!lurb) {
5030         return -TARGET_EFAULT;
5031     }
5032     return get_errno(safe_ioctl(fd, ie->host_cmd, &lurb->host_urb));
5033 }
5034 
5035 static abi_long
5036 do_ioctl_usbdevfs_submiturb(const IOCTLEntry *ie, uint8_t *buf_temp,
5037                             int fd, int cmd, abi_long arg)
5038 {
5039     const argtype *arg_type = ie->arg_type;
5040     int target_size;
5041     abi_long ret;
5042     void *argptr;
5043     int rw_dir;
5044     struct live_urb *lurb;
5045 
5046     /*
5047      * each submitted URB needs to map to a unique ID for the
5048      * kernel, and that unique ID needs to be a pointer to
5049      * host memory.  hence, we need to malloc for each URB.
5050      * isochronous transfers have a variable length struct.
5051      */
5052     arg_type++;
5053     target_size = thunk_type_size(arg_type, THUNK_TARGET);
5054 
5055     /* construct host copy of urb and metadata */
5056     lurb = g_try_malloc0(sizeof(struct live_urb));
5057     if (!lurb) {
5058         return -TARGET_ENOMEM;
5059     }
5060 
5061     argptr = lock_user(VERIFY_READ, arg, target_size, 1);
5062     if (!argptr) {
5063         g_free(lurb);
5064         return -TARGET_EFAULT;
5065     }
5066     thunk_convert(&lurb->host_urb, argptr, arg_type, THUNK_HOST);
5067     unlock_user(argptr, arg, 0);
5068 
5069     lurb->target_urb_adr = arg;
5070     lurb->target_buf_adr = (uintptr_t)lurb->host_urb.buffer;
5071 
5072     /* buffer space used depends on endpoint type so lock the entire buffer */
5073     /* control type urbs should check the buffer contents for true direction */
5074     rw_dir = lurb->host_urb.endpoint & USB_DIR_IN ? VERIFY_WRITE : VERIFY_READ;
5075     lurb->target_buf_ptr = lock_user(rw_dir, lurb->target_buf_adr,
5076         lurb->host_urb.buffer_length, 1);
5077     if (lurb->target_buf_ptr == NULL) {
5078         g_free(lurb);
5079         return -TARGET_EFAULT;
5080     }
5081 
5082     /* update buffer pointer in host copy */
5083     lurb->host_urb.buffer = lurb->target_buf_ptr;
5084 
5085     ret = get_errno(safe_ioctl(fd, ie->host_cmd, &lurb->host_urb));
5086     if (is_error(ret)) {
5087         unlock_user(lurb->target_buf_ptr, lurb->target_buf_adr, 0);
5088         g_free(lurb);
5089     } else {
5090         urb_hashtable_insert(lurb);
5091     }
5092 
5093     return ret;
5094 }
5095 #endif /* CONFIG_USBFS */
5096 
5097 static abi_long do_ioctl_dm(const IOCTLEntry *ie, uint8_t *buf_temp, int fd,
5098                             int cmd, abi_long arg)
5099 {
5100     void *argptr;
5101     struct dm_ioctl *host_dm;
5102     abi_long guest_data;
5103     uint32_t guest_data_size;
5104     int target_size;
5105     const argtype *arg_type = ie->arg_type;
5106     abi_long ret;
5107     void *big_buf = NULL;
5108     char *host_data;
5109 
5110     arg_type++;
5111     target_size = thunk_type_size(arg_type, 0);
5112     argptr = lock_user(VERIFY_READ, arg, target_size, 1);
5113     if (!argptr) {
5114         ret = -TARGET_EFAULT;
5115         goto out;
5116     }
5117     thunk_convert(buf_temp, argptr, arg_type, THUNK_HOST);
5118     unlock_user(argptr, arg, 0);
5119 
5120     /* buf_temp is too small, so fetch things into a bigger buffer */
5121     big_buf = g_malloc0(((struct dm_ioctl*)buf_temp)->data_size * 2);
5122     memcpy(big_buf, buf_temp, target_size);
5123     buf_temp = big_buf;
5124     host_dm = big_buf;
5125 
5126     guest_data = arg + host_dm->data_start;
5127     if ((guest_data - arg) < 0) {
5128         ret = -TARGET_EINVAL;
5129         goto out;
5130     }
5131     guest_data_size = host_dm->data_size - host_dm->data_start;
5132     host_data = (char*)host_dm + host_dm->data_start;
5133 
5134     argptr = lock_user(VERIFY_READ, guest_data, guest_data_size, 1);
5135     if (!argptr) {
5136         ret = -TARGET_EFAULT;
5137         goto out;
5138     }
5139 
5140     switch (ie->host_cmd) {
5141     case DM_REMOVE_ALL:
5142     case DM_LIST_DEVICES:
5143     case DM_DEV_CREATE:
5144     case DM_DEV_REMOVE:
5145     case DM_DEV_SUSPEND:
5146     case DM_DEV_STATUS:
5147     case DM_DEV_WAIT:
5148     case DM_TABLE_STATUS:
5149     case DM_TABLE_CLEAR:
5150     case DM_TABLE_DEPS:
5151     case DM_LIST_VERSIONS:
5152         /* no input data */
5153         break;
5154     case DM_DEV_RENAME:
5155     case DM_DEV_SET_GEOMETRY:
5156         /* data contains only strings */
5157         memcpy(host_data, argptr, guest_data_size);
5158         break;
5159     case DM_TARGET_MSG:
5160         memcpy(host_data, argptr, guest_data_size);
5161         *(uint64_t*)host_data = tswap64(*(uint64_t*)argptr);
5162         break;
5163     case DM_TABLE_LOAD:
5164     {
5165         void *gspec = argptr;
5166         void *cur_data = host_data;
5167         const argtype arg_type[] = { MK_STRUCT(STRUCT_dm_target_spec) };
5168         int spec_size = thunk_type_size(arg_type, 0);
5169         int i;
5170 
5171         for (i = 0; i < host_dm->target_count; i++) {
5172             struct dm_target_spec *spec = cur_data;
5173             uint32_t next;
5174             int slen;
5175 
5176             thunk_convert(spec, gspec, arg_type, THUNK_HOST);
5177             slen = strlen((char*)gspec + spec_size) + 1;
5178             next = spec->next;
5179             spec->next = sizeof(*spec) + slen;
5180             strcpy((char*)&spec[1], gspec + spec_size);
5181             gspec += next;
5182             cur_data += spec->next;
5183         }
5184         break;
5185     }
5186     default:
5187         ret = -TARGET_EINVAL;
5188         unlock_user(argptr, guest_data, 0);
5189         goto out;
5190     }
5191     unlock_user(argptr, guest_data, 0);
5192 
5193     ret = get_errno(safe_ioctl(fd, ie->host_cmd, buf_temp));
5194     if (!is_error(ret)) {
5195         guest_data = arg + host_dm->data_start;
5196         guest_data_size = host_dm->data_size - host_dm->data_start;
5197         argptr = lock_user(VERIFY_WRITE, guest_data, guest_data_size, 0);
5198         switch (ie->host_cmd) {
5199         case DM_REMOVE_ALL:
5200         case DM_DEV_CREATE:
5201         case DM_DEV_REMOVE:
5202         case DM_DEV_RENAME:
5203         case DM_DEV_SUSPEND:
5204         case DM_DEV_STATUS:
5205         case DM_TABLE_LOAD:
5206         case DM_TABLE_CLEAR:
5207         case DM_TARGET_MSG:
5208         case DM_DEV_SET_GEOMETRY:
5209             /* no return data */
5210             break;
5211         case DM_LIST_DEVICES:
5212         {
5213             struct dm_name_list *nl = (void*)host_dm + host_dm->data_start;
5214             uint32_t remaining_data = guest_data_size;
5215             void *cur_data = argptr;
5216             const argtype arg_type[] = { MK_STRUCT(STRUCT_dm_name_list) };
5217             int nl_size = 12; /* can't use thunk_size due to alignment */
5218 
5219             while (1) {
5220                 uint32_t next = nl->next;
5221                 if (next) {
5222                     nl->next = nl_size + (strlen(nl->name) + 1);
5223                 }
5224                 if (remaining_data < nl->next) {
5225                     host_dm->flags |= DM_BUFFER_FULL_FLAG;
5226                     break;
5227                 }
5228                 thunk_convert(cur_data, nl, arg_type, THUNK_TARGET);
5229                 strcpy(cur_data + nl_size, nl->name);
5230                 cur_data += nl->next;
5231                 remaining_data -= nl->next;
5232                 if (!next) {
5233                     break;
5234                 }
5235                 nl = (void*)nl + next;
5236             }
5237             break;
5238         }
5239         case DM_DEV_WAIT:
5240         case DM_TABLE_STATUS:
5241         {
5242             struct dm_target_spec *spec = (void*)host_dm + host_dm->data_start;
5243             void *cur_data = argptr;
5244             const argtype arg_type[] = { MK_STRUCT(STRUCT_dm_target_spec) };
5245             int spec_size = thunk_type_size(arg_type, 0);
5246             int i;
5247 
5248             for (i = 0; i < host_dm->target_count; i++) {
5249                 uint32_t next = spec->next;
5250                 int slen = strlen((char*)&spec[1]) + 1;
5251                 spec->next = (cur_data - argptr) + spec_size + slen;
5252                 if (guest_data_size < spec->next) {
5253                     host_dm->flags |= DM_BUFFER_FULL_FLAG;
5254                     break;
5255                 }
5256                 thunk_convert(cur_data, spec, arg_type, THUNK_TARGET);
5257                 strcpy(cur_data + spec_size, (char*)&spec[1]);
5258                 cur_data = argptr + spec->next;
5259                 spec = (void*)host_dm + host_dm->data_start + next;
5260             }
5261             break;
5262         }
5263         case DM_TABLE_DEPS:
5264         {
5265             void *hdata = (void*)host_dm + host_dm->data_start;
5266             int count = *(uint32_t*)hdata;
5267             uint64_t *hdev = hdata + 8;
5268             uint64_t *gdev = argptr + 8;
5269             int i;
5270 
5271             *(uint32_t*)argptr = tswap32(count);
5272             for (i = 0; i < count; i++) {
5273                 *gdev = tswap64(*hdev);
5274                 gdev++;
5275                 hdev++;
5276             }
5277             break;
5278         }
5279         case DM_LIST_VERSIONS:
5280         {
5281             struct dm_target_versions *vers = (void*)host_dm + host_dm->data_start;
5282             uint32_t remaining_data = guest_data_size;
5283             void *cur_data = argptr;
5284             const argtype arg_type[] = { MK_STRUCT(STRUCT_dm_target_versions) };
5285             int vers_size = thunk_type_size(arg_type, 0);
5286 
5287             while (1) {
5288                 uint32_t next = vers->next;
5289                 if (next) {
5290                     vers->next = vers_size + (strlen(vers->name) + 1);
5291                 }
5292                 if (remaining_data < vers->next) {
5293                     host_dm->flags |= DM_BUFFER_FULL_FLAG;
5294                     break;
5295                 }
5296                 thunk_convert(cur_data, vers, arg_type, THUNK_TARGET);
5297                 strcpy(cur_data + vers_size, vers->name);
5298                 cur_data += vers->next;
5299                 remaining_data -= vers->next;
5300                 if (!next) {
5301                     break;
5302                 }
5303                 vers = (void*)vers + next;
5304             }
5305             break;
5306         }
5307         default:
5308             unlock_user(argptr, guest_data, 0);
5309             ret = -TARGET_EINVAL;
5310             goto out;
5311         }
5312         unlock_user(argptr, guest_data, guest_data_size);
5313 
5314         argptr = lock_user(VERIFY_WRITE, arg, target_size, 0);
5315         if (!argptr) {
5316             ret = -TARGET_EFAULT;
5317             goto out;
5318         }
5319         thunk_convert(argptr, buf_temp, arg_type, THUNK_TARGET);
5320         unlock_user(argptr, arg, target_size);
5321     }
5322 out:
5323     g_free(big_buf);
5324     return ret;
5325 }
5326 
5327 static abi_long do_ioctl_blkpg(const IOCTLEntry *ie, uint8_t *buf_temp, int fd,
5328                                int cmd, abi_long arg)
5329 {
5330     void *argptr;
5331     int target_size;
5332     const argtype *arg_type = ie->arg_type;
5333     const argtype part_arg_type[] = { MK_STRUCT(STRUCT_blkpg_partition) };
5334     abi_long ret;
5335 
5336     struct blkpg_ioctl_arg *host_blkpg = (void*)buf_temp;
5337     struct blkpg_partition host_part;
5338 
5339     /* Read and convert blkpg */
5340     arg_type++;
5341     target_size = thunk_type_size(arg_type, 0);
5342     argptr = lock_user(VERIFY_READ, arg, target_size, 1);
5343     if (!argptr) {
5344         ret = -TARGET_EFAULT;
5345         goto out;
5346     }
5347     thunk_convert(buf_temp, argptr, arg_type, THUNK_HOST);
5348     unlock_user(argptr, arg, 0);
5349 
5350     switch (host_blkpg->op) {
5351     case BLKPG_ADD_PARTITION:
5352     case BLKPG_DEL_PARTITION:
5353         /* payload is struct blkpg_partition */
5354         break;
5355     default:
5356         /* Unknown opcode */
5357         ret = -TARGET_EINVAL;
5358         goto out;
5359     }
5360 
5361     /* Read and convert blkpg->data */
5362     arg = (abi_long)(uintptr_t)host_blkpg->data;
5363     target_size = thunk_type_size(part_arg_type, 0);
5364     argptr = lock_user(VERIFY_READ, arg, target_size, 1);
5365     if (!argptr) {
5366         ret = -TARGET_EFAULT;
5367         goto out;
5368     }
5369     thunk_convert(&host_part, argptr, part_arg_type, THUNK_HOST);
5370     unlock_user(argptr, arg, 0);
5371 
5372     /* Swizzle the data pointer to our local copy and call! */
5373     host_blkpg->data = &host_part;
5374     ret = get_errno(safe_ioctl(fd, ie->host_cmd, host_blkpg));
5375 
5376 out:
5377     return ret;
5378 }
5379 
5380 static abi_long do_ioctl_rt(const IOCTLEntry *ie, uint8_t *buf_temp,
5381                                 int fd, int cmd, abi_long arg)
5382 {
5383     const argtype *arg_type = ie->arg_type;
5384     const StructEntry *se;
5385     const argtype *field_types;
5386     const int *dst_offsets, *src_offsets;
5387     int target_size;
5388     void *argptr;
5389     abi_ulong *target_rt_dev_ptr = NULL;
5390     unsigned long *host_rt_dev_ptr = NULL;
5391     abi_long ret;
5392     int i;
5393 
5394     assert(ie->access == IOC_W);
5395     assert(*arg_type == TYPE_PTR);
5396     arg_type++;
5397     assert(*arg_type == TYPE_STRUCT);
5398     target_size = thunk_type_size(arg_type, 0);
5399     argptr = lock_user(VERIFY_READ, arg, target_size, 1);
5400     if (!argptr) {
5401         return -TARGET_EFAULT;
5402     }
5403     arg_type++;
5404     assert(*arg_type == (int)STRUCT_rtentry);
5405     se = struct_entries + *arg_type++;
5406     assert(se->convert[0] == NULL);
5407     /* convert struct here to be able to catch rt_dev string */
5408     field_types = se->field_types;
5409     dst_offsets = se->field_offsets[THUNK_HOST];
5410     src_offsets = se->field_offsets[THUNK_TARGET];
5411     for (i = 0; i < se->nb_fields; i++) {
5412         if (dst_offsets[i] == offsetof(struct rtentry, rt_dev)) {
5413             assert(*field_types == TYPE_PTRVOID);
5414             target_rt_dev_ptr = (abi_ulong *)(argptr + src_offsets[i]);
5415             host_rt_dev_ptr = (unsigned long *)(buf_temp + dst_offsets[i]);
5416             if (*target_rt_dev_ptr != 0) {
5417                 *host_rt_dev_ptr = (unsigned long)lock_user_string(
5418                                                   tswapal(*target_rt_dev_ptr));
5419                 if (!*host_rt_dev_ptr) {
5420                     unlock_user(argptr, arg, 0);
5421                     return -TARGET_EFAULT;
5422                 }
5423             } else {
5424                 *host_rt_dev_ptr = 0;
5425             }
5426             field_types++;
5427             continue;
5428         }
5429         field_types = thunk_convert(buf_temp + dst_offsets[i],
5430                                     argptr + src_offsets[i],
5431                                     field_types, THUNK_HOST);
5432     }
5433     unlock_user(argptr, arg, 0);
5434 
5435     ret = get_errno(safe_ioctl(fd, ie->host_cmd, buf_temp));
5436 
5437     assert(host_rt_dev_ptr != NULL);
5438     assert(target_rt_dev_ptr != NULL);
5439     if (*host_rt_dev_ptr != 0) {
5440         unlock_user((void *)*host_rt_dev_ptr,
5441                     *target_rt_dev_ptr, 0);
5442     }
5443     return ret;
5444 }
5445 
5446 static abi_long do_ioctl_kdsigaccept(const IOCTLEntry *ie, uint8_t *buf_temp,
5447                                      int fd, int cmd, abi_long arg)
5448 {
5449     int sig = target_to_host_signal(arg);
5450     return get_errno(safe_ioctl(fd, ie->host_cmd, sig));
5451 }
5452 
5453 static abi_long do_ioctl_SIOCGSTAMP(const IOCTLEntry *ie, uint8_t *buf_temp,
5454                                     int fd, int cmd, abi_long arg)
5455 {
5456     struct timeval tv;
5457     abi_long ret;
5458 
5459     ret = get_errno(safe_ioctl(fd, SIOCGSTAMP, &tv));
5460     if (is_error(ret)) {
5461         return ret;
5462     }
5463 
5464     if (cmd == (int)TARGET_SIOCGSTAMP_OLD) {
5465         if (copy_to_user_timeval(arg, &tv)) {
5466             return -TARGET_EFAULT;
5467         }
5468     } else {
5469         if (copy_to_user_timeval64(arg, &tv)) {
5470             return -TARGET_EFAULT;
5471         }
5472     }
5473 
5474     return ret;
5475 }
5476 
5477 static abi_long do_ioctl_SIOCGSTAMPNS(const IOCTLEntry *ie, uint8_t *buf_temp,
5478                                       int fd, int cmd, abi_long arg)
5479 {
5480     struct timespec ts;
5481     abi_long ret;
5482 
5483     ret = get_errno(safe_ioctl(fd, SIOCGSTAMPNS, &ts));
5484     if (is_error(ret)) {
5485         return ret;
5486     }
5487 
5488     if (cmd == (int)TARGET_SIOCGSTAMPNS_OLD) {
5489         if (host_to_target_timespec(arg, &ts)) {
5490             return -TARGET_EFAULT;
5491         }
5492     } else{
5493         if (host_to_target_timespec64(arg, &ts)) {
5494             return -TARGET_EFAULT;
5495         }
5496     }
5497 
5498     return ret;
5499 }
5500 
5501 #ifdef TIOCGPTPEER
5502 static abi_long do_ioctl_tiocgptpeer(const IOCTLEntry *ie, uint8_t *buf_temp,
5503                                      int fd, int cmd, abi_long arg)
5504 {
5505     int flags = target_to_host_bitmask(arg, fcntl_flags_tbl);
5506     return get_errno(safe_ioctl(fd, ie->host_cmd, flags));
5507 }
5508 #endif
5509 
5510 #ifdef HAVE_DRM_H
5511 
5512 static void unlock_drm_version(struct drm_version *host_ver,
5513                                struct target_drm_version *target_ver,
5514                                bool copy)
5515 {
5516     unlock_user(host_ver->name, target_ver->name,
5517                                 copy ? host_ver->name_len : 0);
5518     unlock_user(host_ver->date, target_ver->date,
5519                                 copy ? host_ver->date_len : 0);
5520     unlock_user(host_ver->desc, target_ver->desc,
5521                                 copy ? host_ver->desc_len : 0);
5522 }
5523 
5524 static inline abi_long target_to_host_drmversion(struct drm_version *host_ver,
5525                                           struct target_drm_version *target_ver)
5526 {
5527     memset(host_ver, 0, sizeof(*host_ver));
5528 
5529     __get_user(host_ver->name_len, &target_ver->name_len);
5530     if (host_ver->name_len) {
5531         host_ver->name = lock_user(VERIFY_WRITE, target_ver->name,
5532                                    target_ver->name_len, 0);
5533         if (!host_ver->name) {
5534             return -EFAULT;
5535         }
5536     }
5537 
5538     __get_user(host_ver->date_len, &target_ver->date_len);
5539     if (host_ver->date_len) {
5540         host_ver->date = lock_user(VERIFY_WRITE, target_ver->date,
5541                                    target_ver->date_len, 0);
5542         if (!host_ver->date) {
5543             goto err;
5544         }
5545     }
5546 
5547     __get_user(host_ver->desc_len, &target_ver->desc_len);
5548     if (host_ver->desc_len) {
5549         host_ver->desc = lock_user(VERIFY_WRITE, target_ver->desc,
5550                                    target_ver->desc_len, 0);
5551         if (!host_ver->desc) {
5552             goto err;
5553         }
5554     }
5555 
5556     return 0;
5557 err:
5558     unlock_drm_version(host_ver, target_ver, false);
5559     return -EFAULT;
5560 }
5561 
5562 static inline void host_to_target_drmversion(
5563                                           struct target_drm_version *target_ver,
5564                                           struct drm_version *host_ver)
5565 {
5566     __put_user(host_ver->version_major, &target_ver->version_major);
5567     __put_user(host_ver->version_minor, &target_ver->version_minor);
5568     __put_user(host_ver->version_patchlevel, &target_ver->version_patchlevel);
5569     __put_user(host_ver->name_len, &target_ver->name_len);
5570     __put_user(host_ver->date_len, &target_ver->date_len);
5571     __put_user(host_ver->desc_len, &target_ver->desc_len);
5572     unlock_drm_version(host_ver, target_ver, true);
5573 }
5574 
5575 static abi_long do_ioctl_drm(const IOCTLEntry *ie, uint8_t *buf_temp,
5576                              int fd, int cmd, abi_long arg)
5577 {
5578     struct drm_version *ver;
5579     struct target_drm_version *target_ver;
5580     abi_long ret;
5581 
5582     switch (ie->host_cmd) {
5583     case DRM_IOCTL_VERSION:
5584         if (!lock_user_struct(VERIFY_WRITE, target_ver, arg, 0)) {
5585             return -TARGET_EFAULT;
5586         }
5587         ver = (struct drm_version *)buf_temp;
5588         ret = target_to_host_drmversion(ver, target_ver);
5589         if (!is_error(ret)) {
5590             ret = get_errno(safe_ioctl(fd, ie->host_cmd, ver));
5591             if (is_error(ret)) {
5592                 unlock_drm_version(ver, target_ver, false);
5593             } else {
5594                 host_to_target_drmversion(target_ver, ver);
5595             }
5596         }
5597         unlock_user_struct(target_ver, arg, 0);
5598         return ret;
5599     }
5600     return -TARGET_ENOSYS;
5601 }
5602 
5603 static abi_long do_ioctl_drm_i915_getparam(const IOCTLEntry *ie,
5604                                            struct drm_i915_getparam *gparam,
5605                                            int fd, abi_long arg)
5606 {
5607     abi_long ret;
5608     int value;
5609     struct target_drm_i915_getparam *target_gparam;
5610 
5611     if (!lock_user_struct(VERIFY_READ, target_gparam, arg, 0)) {
5612         return -TARGET_EFAULT;
5613     }
5614 
5615     __get_user(gparam->param, &target_gparam->param);
5616     gparam->value = &value;
5617     ret = get_errno(safe_ioctl(fd, ie->host_cmd, gparam));
5618     put_user_s32(value, target_gparam->value);
5619 
5620     unlock_user_struct(target_gparam, arg, 0);
5621     return ret;
5622 }
5623 
5624 static abi_long do_ioctl_drm_i915(const IOCTLEntry *ie, uint8_t *buf_temp,
5625                                   int fd, int cmd, abi_long arg)
5626 {
5627     switch (ie->host_cmd) {
5628     case DRM_IOCTL_I915_GETPARAM:
5629         return do_ioctl_drm_i915_getparam(ie,
5630                                           (struct drm_i915_getparam *)buf_temp,
5631                                           fd, arg);
5632     default:
5633         return -TARGET_ENOSYS;
5634     }
5635 }
5636 
5637 #endif
5638 
5639 static abi_long do_ioctl_TUNSETTXFILTER(const IOCTLEntry *ie, uint8_t *buf_temp,
5640                                         int fd, int cmd, abi_long arg)
5641 {
5642     struct tun_filter *filter = (struct tun_filter *)buf_temp;
5643     struct tun_filter *target_filter;
5644     char *target_addr;
5645 
5646     assert(ie->access == IOC_W);
5647 
5648     target_filter = lock_user(VERIFY_READ, arg, sizeof(*target_filter), 1);
5649     if (!target_filter) {
5650         return -TARGET_EFAULT;
5651     }
5652     filter->flags = tswap16(target_filter->flags);
5653     filter->count = tswap16(target_filter->count);
5654     unlock_user(target_filter, arg, 0);
5655 
5656     if (filter->count) {
5657         if (offsetof(struct tun_filter, addr) + filter->count * ETH_ALEN >
5658             MAX_STRUCT_SIZE) {
5659             return -TARGET_EFAULT;
5660         }
5661 
5662         target_addr = lock_user(VERIFY_READ,
5663                                 arg + offsetof(struct tun_filter, addr),
5664                                 filter->count * ETH_ALEN, 1);
5665         if (!target_addr) {
5666             return -TARGET_EFAULT;
5667         }
5668         memcpy(filter->addr, target_addr, filter->count * ETH_ALEN);
5669         unlock_user(target_addr, arg + offsetof(struct tun_filter, addr), 0);
5670     }
5671 
5672     return get_errno(safe_ioctl(fd, ie->host_cmd, filter));
5673 }
5674 
5675 IOCTLEntry ioctl_entries[] = {
5676 #define IOCTL(cmd, access, ...) \
5677     { TARGET_ ## cmd, cmd, #cmd, access, 0, {  __VA_ARGS__ } },
5678 #define IOCTL_SPECIAL(cmd, access, dofn, ...)                      \
5679     { TARGET_ ## cmd, cmd, #cmd, access, dofn, {  __VA_ARGS__ } },
5680 #define IOCTL_IGNORE(cmd) \
5681     { TARGET_ ## cmd, 0, #cmd },
5682 #include "ioctls.h"
5683     { 0, 0, },
5684 };
5685 
5686 /* ??? Implement proper locking for ioctls.  */
5687 /* do_ioctl() Must return target values and target errnos. */
5688 static abi_long do_ioctl(int fd, int cmd, abi_long arg)
5689 {
5690     const IOCTLEntry *ie;
5691     const argtype *arg_type;
5692     abi_long ret;
5693     uint8_t buf_temp[MAX_STRUCT_SIZE];
5694     int target_size;
5695     void *argptr;
5696 
5697     ie = ioctl_entries;
5698     for(;;) {
5699         if (ie->target_cmd == 0) {
5700             qemu_log_mask(
5701                 LOG_UNIMP, "Unsupported ioctl: cmd=0x%04lx\n", (long)cmd);
5702             return -TARGET_ENOSYS;
5703         }
5704         if (ie->target_cmd == cmd)
5705             break;
5706         ie++;
5707     }
5708     arg_type = ie->arg_type;
5709     if (ie->do_ioctl) {
5710         return ie->do_ioctl(ie, buf_temp, fd, cmd, arg);
5711     } else if (!ie->host_cmd) {
5712         /* Some architectures define BSD ioctls in their headers
5713            that are not implemented in Linux.  */
5714         return -TARGET_ENOSYS;
5715     }
5716 
5717     switch(arg_type[0]) {
5718     case TYPE_NULL:
5719         /* no argument */
5720         ret = get_errno(safe_ioctl(fd, ie->host_cmd));
5721         break;
5722     case TYPE_PTRVOID:
5723     case TYPE_INT:
5724     case TYPE_LONG:
5725     case TYPE_ULONG:
5726         ret = get_errno(safe_ioctl(fd, ie->host_cmd, arg));
5727         break;
5728     case TYPE_PTR:
5729         arg_type++;
5730         target_size = thunk_type_size(arg_type, 0);
5731         switch(ie->access) {
5732         case IOC_R:
5733             ret = get_errno(safe_ioctl(fd, ie->host_cmd, buf_temp));
5734             if (!is_error(ret)) {
5735                 argptr = lock_user(VERIFY_WRITE, arg, target_size, 0);
5736                 if (!argptr)
5737                     return -TARGET_EFAULT;
5738                 thunk_convert(argptr, buf_temp, arg_type, THUNK_TARGET);
5739                 unlock_user(argptr, arg, target_size);
5740             }
5741             break;
5742         case IOC_W:
5743             argptr = lock_user(VERIFY_READ, arg, target_size, 1);
5744             if (!argptr)
5745                 return -TARGET_EFAULT;
5746             thunk_convert(buf_temp, argptr, arg_type, THUNK_HOST);
5747             unlock_user(argptr, arg, 0);
5748             ret = get_errno(safe_ioctl(fd, ie->host_cmd, buf_temp));
5749             break;
5750         default:
5751         case IOC_RW:
5752             argptr = lock_user(VERIFY_READ, arg, target_size, 1);
5753             if (!argptr)
5754                 return -TARGET_EFAULT;
5755             thunk_convert(buf_temp, argptr, arg_type, THUNK_HOST);
5756             unlock_user(argptr, arg, 0);
5757             ret = get_errno(safe_ioctl(fd, ie->host_cmd, buf_temp));
5758             if (!is_error(ret)) {
5759                 argptr = lock_user(VERIFY_WRITE, arg, target_size, 0);
5760                 if (!argptr)
5761                     return -TARGET_EFAULT;
5762                 thunk_convert(argptr, buf_temp, arg_type, THUNK_TARGET);
5763                 unlock_user(argptr, arg, target_size);
5764             }
5765             break;
5766         }
5767         break;
5768     default:
5769         qemu_log_mask(LOG_UNIMP,
5770                       "Unsupported ioctl type: cmd=0x%04lx type=%d\n",
5771                       (long)cmd, arg_type[0]);
5772         ret = -TARGET_ENOSYS;
5773         break;
5774     }
5775     return ret;
5776 }
5777 
5778 static const bitmask_transtbl iflag_tbl[] = {
5779         { TARGET_IGNBRK, TARGET_IGNBRK, IGNBRK, IGNBRK },
5780         { TARGET_BRKINT, TARGET_BRKINT, BRKINT, BRKINT },
5781         { TARGET_IGNPAR, TARGET_IGNPAR, IGNPAR, IGNPAR },
5782         { TARGET_PARMRK, TARGET_PARMRK, PARMRK, PARMRK },
5783         { TARGET_INPCK, TARGET_INPCK, INPCK, INPCK },
5784         { TARGET_ISTRIP, TARGET_ISTRIP, ISTRIP, ISTRIP },
5785         { TARGET_INLCR, TARGET_INLCR, INLCR, INLCR },
5786         { TARGET_IGNCR, TARGET_IGNCR, IGNCR, IGNCR },
5787         { TARGET_ICRNL, TARGET_ICRNL, ICRNL, ICRNL },
5788         { TARGET_IUCLC, TARGET_IUCLC, IUCLC, IUCLC },
5789         { TARGET_IXON, TARGET_IXON, IXON, IXON },
5790         { TARGET_IXANY, TARGET_IXANY, IXANY, IXANY },
5791         { TARGET_IXOFF, TARGET_IXOFF, IXOFF, IXOFF },
5792         { TARGET_IMAXBEL, TARGET_IMAXBEL, IMAXBEL, IMAXBEL },
5793         { TARGET_IUTF8, TARGET_IUTF8, IUTF8, IUTF8},
5794         { 0, 0, 0, 0 }
5795 };
5796 
5797 static const bitmask_transtbl oflag_tbl[] = {
5798 	{ TARGET_OPOST, TARGET_OPOST, OPOST, OPOST },
5799 	{ TARGET_OLCUC, TARGET_OLCUC, OLCUC, OLCUC },
5800 	{ TARGET_ONLCR, TARGET_ONLCR, ONLCR, ONLCR },
5801 	{ TARGET_OCRNL, TARGET_OCRNL, OCRNL, OCRNL },
5802 	{ TARGET_ONOCR, TARGET_ONOCR, ONOCR, ONOCR },
5803 	{ TARGET_ONLRET, TARGET_ONLRET, ONLRET, ONLRET },
5804 	{ TARGET_OFILL, TARGET_OFILL, OFILL, OFILL },
5805 	{ TARGET_OFDEL, TARGET_OFDEL, OFDEL, OFDEL },
5806 	{ TARGET_NLDLY, TARGET_NL0, NLDLY, NL0 },
5807 	{ TARGET_NLDLY, TARGET_NL1, NLDLY, NL1 },
5808 	{ TARGET_CRDLY, TARGET_CR0, CRDLY, CR0 },
5809 	{ TARGET_CRDLY, TARGET_CR1, CRDLY, CR1 },
5810 	{ TARGET_CRDLY, TARGET_CR2, CRDLY, CR2 },
5811 	{ TARGET_CRDLY, TARGET_CR3, CRDLY, CR3 },
5812 	{ TARGET_TABDLY, TARGET_TAB0, TABDLY, TAB0 },
5813 	{ TARGET_TABDLY, TARGET_TAB1, TABDLY, TAB1 },
5814 	{ TARGET_TABDLY, TARGET_TAB2, TABDLY, TAB2 },
5815 	{ TARGET_TABDLY, TARGET_TAB3, TABDLY, TAB3 },
5816 	{ TARGET_BSDLY, TARGET_BS0, BSDLY, BS0 },
5817 	{ TARGET_BSDLY, TARGET_BS1, BSDLY, BS1 },
5818 	{ TARGET_VTDLY, TARGET_VT0, VTDLY, VT0 },
5819 	{ TARGET_VTDLY, TARGET_VT1, VTDLY, VT1 },
5820 	{ TARGET_FFDLY, TARGET_FF0, FFDLY, FF0 },
5821 	{ TARGET_FFDLY, TARGET_FF1, FFDLY, FF1 },
5822 	{ 0, 0, 0, 0 }
5823 };
5824 
5825 static const bitmask_transtbl cflag_tbl[] = {
5826 	{ TARGET_CBAUD, TARGET_B0, CBAUD, B0 },
5827 	{ TARGET_CBAUD, TARGET_B50, CBAUD, B50 },
5828 	{ TARGET_CBAUD, TARGET_B75, CBAUD, B75 },
5829 	{ TARGET_CBAUD, TARGET_B110, CBAUD, B110 },
5830 	{ TARGET_CBAUD, TARGET_B134, CBAUD, B134 },
5831 	{ TARGET_CBAUD, TARGET_B150, CBAUD, B150 },
5832 	{ TARGET_CBAUD, TARGET_B200, CBAUD, B200 },
5833 	{ TARGET_CBAUD, TARGET_B300, CBAUD, B300 },
5834 	{ TARGET_CBAUD, TARGET_B600, CBAUD, B600 },
5835 	{ TARGET_CBAUD, TARGET_B1200, CBAUD, B1200 },
5836 	{ TARGET_CBAUD, TARGET_B1800, CBAUD, B1800 },
5837 	{ TARGET_CBAUD, TARGET_B2400, CBAUD, B2400 },
5838 	{ TARGET_CBAUD, TARGET_B4800, CBAUD, B4800 },
5839 	{ TARGET_CBAUD, TARGET_B9600, CBAUD, B9600 },
5840 	{ TARGET_CBAUD, TARGET_B19200, CBAUD, B19200 },
5841 	{ TARGET_CBAUD, TARGET_B38400, CBAUD, B38400 },
5842 	{ TARGET_CBAUD, TARGET_B57600, CBAUD, B57600 },
5843 	{ TARGET_CBAUD, TARGET_B115200, CBAUD, B115200 },
5844 	{ TARGET_CBAUD, TARGET_B230400, CBAUD, B230400 },
5845 	{ TARGET_CBAUD, TARGET_B460800, CBAUD, B460800 },
5846 	{ TARGET_CSIZE, TARGET_CS5, CSIZE, CS5 },
5847 	{ TARGET_CSIZE, TARGET_CS6, CSIZE, CS6 },
5848 	{ TARGET_CSIZE, TARGET_CS7, CSIZE, CS7 },
5849 	{ TARGET_CSIZE, TARGET_CS8, CSIZE, CS8 },
5850 	{ TARGET_CSTOPB, TARGET_CSTOPB, CSTOPB, CSTOPB },
5851 	{ TARGET_CREAD, TARGET_CREAD, CREAD, CREAD },
5852 	{ TARGET_PARENB, TARGET_PARENB, PARENB, PARENB },
5853 	{ TARGET_PARODD, TARGET_PARODD, PARODD, PARODD },
5854 	{ TARGET_HUPCL, TARGET_HUPCL, HUPCL, HUPCL },
5855 	{ TARGET_CLOCAL, TARGET_CLOCAL, CLOCAL, CLOCAL },
5856 	{ TARGET_CRTSCTS, TARGET_CRTSCTS, CRTSCTS, CRTSCTS },
5857 	{ 0, 0, 0, 0 }
5858 };
5859 
5860 static const bitmask_transtbl lflag_tbl[] = {
5861   { TARGET_ISIG, TARGET_ISIG, ISIG, ISIG },
5862   { TARGET_ICANON, TARGET_ICANON, ICANON, ICANON },
5863   { TARGET_XCASE, TARGET_XCASE, XCASE, XCASE },
5864   { TARGET_ECHO, TARGET_ECHO, ECHO, ECHO },
5865   { TARGET_ECHOE, TARGET_ECHOE, ECHOE, ECHOE },
5866   { TARGET_ECHOK, TARGET_ECHOK, ECHOK, ECHOK },
5867   { TARGET_ECHONL, TARGET_ECHONL, ECHONL, ECHONL },
5868   { TARGET_NOFLSH, TARGET_NOFLSH, NOFLSH, NOFLSH },
5869   { TARGET_TOSTOP, TARGET_TOSTOP, TOSTOP, TOSTOP },
5870   { TARGET_ECHOCTL, TARGET_ECHOCTL, ECHOCTL, ECHOCTL },
5871   { TARGET_ECHOPRT, TARGET_ECHOPRT, ECHOPRT, ECHOPRT },
5872   { TARGET_ECHOKE, TARGET_ECHOKE, ECHOKE, ECHOKE },
5873   { TARGET_FLUSHO, TARGET_FLUSHO, FLUSHO, FLUSHO },
5874   { TARGET_PENDIN, TARGET_PENDIN, PENDIN, PENDIN },
5875   { TARGET_IEXTEN, TARGET_IEXTEN, IEXTEN, IEXTEN },
5876   { TARGET_EXTPROC, TARGET_EXTPROC, EXTPROC, EXTPROC},
5877   { 0, 0, 0, 0 }
5878 };
5879 
5880 static void target_to_host_termios (void *dst, const void *src)
5881 {
5882     struct host_termios *host = dst;
5883     const struct target_termios *target = src;
5884 
5885     host->c_iflag =
5886         target_to_host_bitmask(tswap32(target->c_iflag), iflag_tbl);
5887     host->c_oflag =
5888         target_to_host_bitmask(tswap32(target->c_oflag), oflag_tbl);
5889     host->c_cflag =
5890         target_to_host_bitmask(tswap32(target->c_cflag), cflag_tbl);
5891     host->c_lflag =
5892         target_to_host_bitmask(tswap32(target->c_lflag), lflag_tbl);
5893     host->c_line = target->c_line;
5894 
5895     memset(host->c_cc, 0, sizeof(host->c_cc));
5896     host->c_cc[VINTR] = target->c_cc[TARGET_VINTR];
5897     host->c_cc[VQUIT] = target->c_cc[TARGET_VQUIT];
5898     host->c_cc[VERASE] = target->c_cc[TARGET_VERASE];
5899     host->c_cc[VKILL] = target->c_cc[TARGET_VKILL];
5900     host->c_cc[VEOF] = target->c_cc[TARGET_VEOF];
5901     host->c_cc[VTIME] = target->c_cc[TARGET_VTIME];
5902     host->c_cc[VMIN] = target->c_cc[TARGET_VMIN];
5903     host->c_cc[VSWTC] = target->c_cc[TARGET_VSWTC];
5904     host->c_cc[VSTART] = target->c_cc[TARGET_VSTART];
5905     host->c_cc[VSTOP] = target->c_cc[TARGET_VSTOP];
5906     host->c_cc[VSUSP] = target->c_cc[TARGET_VSUSP];
5907     host->c_cc[VEOL] = target->c_cc[TARGET_VEOL];
5908     host->c_cc[VREPRINT] = target->c_cc[TARGET_VREPRINT];
5909     host->c_cc[VDISCARD] = target->c_cc[TARGET_VDISCARD];
5910     host->c_cc[VWERASE] = target->c_cc[TARGET_VWERASE];
5911     host->c_cc[VLNEXT] = target->c_cc[TARGET_VLNEXT];
5912     host->c_cc[VEOL2] = target->c_cc[TARGET_VEOL2];
5913 }
5914 
5915 static void host_to_target_termios (void *dst, const void *src)
5916 {
5917     struct target_termios *target = dst;
5918     const struct host_termios *host = src;
5919 
5920     target->c_iflag =
5921         tswap32(host_to_target_bitmask(host->c_iflag, iflag_tbl));
5922     target->c_oflag =
5923         tswap32(host_to_target_bitmask(host->c_oflag, oflag_tbl));
5924     target->c_cflag =
5925         tswap32(host_to_target_bitmask(host->c_cflag, cflag_tbl));
5926     target->c_lflag =
5927         tswap32(host_to_target_bitmask(host->c_lflag, lflag_tbl));
5928     target->c_line = host->c_line;
5929 
5930     memset(target->c_cc, 0, sizeof(target->c_cc));
5931     target->c_cc[TARGET_VINTR] = host->c_cc[VINTR];
5932     target->c_cc[TARGET_VQUIT] = host->c_cc[VQUIT];
5933     target->c_cc[TARGET_VERASE] = host->c_cc[VERASE];
5934     target->c_cc[TARGET_VKILL] = host->c_cc[VKILL];
5935     target->c_cc[TARGET_VEOF] = host->c_cc[VEOF];
5936     target->c_cc[TARGET_VTIME] = host->c_cc[VTIME];
5937     target->c_cc[TARGET_VMIN] = host->c_cc[VMIN];
5938     target->c_cc[TARGET_VSWTC] = host->c_cc[VSWTC];
5939     target->c_cc[TARGET_VSTART] = host->c_cc[VSTART];
5940     target->c_cc[TARGET_VSTOP] = host->c_cc[VSTOP];
5941     target->c_cc[TARGET_VSUSP] = host->c_cc[VSUSP];
5942     target->c_cc[TARGET_VEOL] = host->c_cc[VEOL];
5943     target->c_cc[TARGET_VREPRINT] = host->c_cc[VREPRINT];
5944     target->c_cc[TARGET_VDISCARD] = host->c_cc[VDISCARD];
5945     target->c_cc[TARGET_VWERASE] = host->c_cc[VWERASE];
5946     target->c_cc[TARGET_VLNEXT] = host->c_cc[VLNEXT];
5947     target->c_cc[TARGET_VEOL2] = host->c_cc[VEOL2];
5948 }
5949 
5950 static const StructEntry struct_termios_def = {
5951     .convert = { host_to_target_termios, target_to_host_termios },
5952     .size = { sizeof(struct target_termios), sizeof(struct host_termios) },
5953     .align = { __alignof__(struct target_termios), __alignof__(struct host_termios) },
5954     .print = print_termios,
5955 };
5956 
5957 static const bitmask_transtbl mmap_flags_tbl[] = {
5958     { TARGET_MAP_SHARED, TARGET_MAP_SHARED, MAP_SHARED, MAP_SHARED },
5959     { TARGET_MAP_PRIVATE, TARGET_MAP_PRIVATE, MAP_PRIVATE, MAP_PRIVATE },
5960     { TARGET_MAP_FIXED, TARGET_MAP_FIXED, MAP_FIXED, MAP_FIXED },
5961     { TARGET_MAP_ANONYMOUS, TARGET_MAP_ANONYMOUS,
5962       MAP_ANONYMOUS, MAP_ANONYMOUS },
5963     { TARGET_MAP_GROWSDOWN, TARGET_MAP_GROWSDOWN,
5964       MAP_GROWSDOWN, MAP_GROWSDOWN },
5965     { TARGET_MAP_DENYWRITE, TARGET_MAP_DENYWRITE,
5966       MAP_DENYWRITE, MAP_DENYWRITE },
5967     { TARGET_MAP_EXECUTABLE, TARGET_MAP_EXECUTABLE,
5968       MAP_EXECUTABLE, MAP_EXECUTABLE },
5969     { TARGET_MAP_LOCKED, TARGET_MAP_LOCKED, MAP_LOCKED, MAP_LOCKED },
5970     { TARGET_MAP_NORESERVE, TARGET_MAP_NORESERVE,
5971       MAP_NORESERVE, MAP_NORESERVE },
5972     { TARGET_MAP_HUGETLB, TARGET_MAP_HUGETLB, MAP_HUGETLB, MAP_HUGETLB },
5973     /* MAP_STACK had been ignored by the kernel for quite some time.
5974        Recognize it for the target insofar as we do not want to pass
5975        it through to the host.  */
5976     { TARGET_MAP_STACK, TARGET_MAP_STACK, 0, 0 },
5977     { 0, 0, 0, 0 }
5978 };
5979 
5980 /*
5981  * NOTE: TARGET_ABI32 is defined for TARGET_I386 (but not for TARGET_X86_64)
5982  *       TARGET_I386 is defined if TARGET_X86_64 is defined
5983  */
5984 #if defined(TARGET_I386)
5985 
5986 /* NOTE: there is really one LDT for all the threads */
5987 static uint8_t *ldt_table;
5988 
5989 static abi_long read_ldt(abi_ulong ptr, unsigned long bytecount)
5990 {
5991     int size;
5992     void *p;
5993 
5994     if (!ldt_table)
5995         return 0;
5996     size = TARGET_LDT_ENTRIES * TARGET_LDT_ENTRY_SIZE;
5997     if (size > bytecount)
5998         size = bytecount;
5999     p = lock_user(VERIFY_WRITE, ptr, size, 0);
6000     if (!p)
6001         return -TARGET_EFAULT;
6002     /* ??? Should this by byteswapped?  */
6003     memcpy(p, ldt_table, size);
6004     unlock_user(p, ptr, size);
6005     return size;
6006 }
6007 
6008 /* XXX: add locking support */
6009 static abi_long write_ldt(CPUX86State *env,
6010                           abi_ulong ptr, unsigned long bytecount, int oldmode)
6011 {
6012     struct target_modify_ldt_ldt_s ldt_info;
6013     struct target_modify_ldt_ldt_s *target_ldt_info;
6014     int seg_32bit, contents, read_exec_only, limit_in_pages;
6015     int seg_not_present, useable, lm;
6016     uint32_t *lp, entry_1, entry_2;
6017 
6018     if (bytecount != sizeof(ldt_info))
6019         return -TARGET_EINVAL;
6020     if (!lock_user_struct(VERIFY_READ, target_ldt_info, ptr, 1))
6021         return -TARGET_EFAULT;
6022     ldt_info.entry_number = tswap32(target_ldt_info->entry_number);
6023     ldt_info.base_addr = tswapal(target_ldt_info->base_addr);
6024     ldt_info.limit = tswap32(target_ldt_info->limit);
6025     ldt_info.flags = tswap32(target_ldt_info->flags);
6026     unlock_user_struct(target_ldt_info, ptr, 0);
6027 
6028     if (ldt_info.entry_number >= TARGET_LDT_ENTRIES)
6029         return -TARGET_EINVAL;
6030     seg_32bit = ldt_info.flags & 1;
6031     contents = (ldt_info.flags >> 1) & 3;
6032     read_exec_only = (ldt_info.flags >> 3) & 1;
6033     limit_in_pages = (ldt_info.flags >> 4) & 1;
6034     seg_not_present = (ldt_info.flags >> 5) & 1;
6035     useable = (ldt_info.flags >> 6) & 1;
6036 #ifdef TARGET_ABI32
6037     lm = 0;
6038 #else
6039     lm = (ldt_info.flags >> 7) & 1;
6040 #endif
6041     if (contents == 3) {
6042         if (oldmode)
6043             return -TARGET_EINVAL;
6044         if (seg_not_present == 0)
6045             return -TARGET_EINVAL;
6046     }
6047     /* allocate the LDT */
6048     if (!ldt_table) {
6049         env->ldt.base = target_mmap(0,
6050                                     TARGET_LDT_ENTRIES * TARGET_LDT_ENTRY_SIZE,
6051                                     PROT_READ|PROT_WRITE,
6052                                     MAP_ANONYMOUS|MAP_PRIVATE, -1, 0);
6053         if (env->ldt.base == -1)
6054             return -TARGET_ENOMEM;
6055         memset(g2h_untagged(env->ldt.base), 0,
6056                TARGET_LDT_ENTRIES * TARGET_LDT_ENTRY_SIZE);
6057         env->ldt.limit = 0xffff;
6058         ldt_table = g2h_untagged(env->ldt.base);
6059     }
6060 
6061     /* NOTE: same code as Linux kernel */
6062     /* Allow LDTs to be cleared by the user. */
6063     if (ldt_info.base_addr == 0 && ldt_info.limit == 0) {
6064         if (oldmode ||
6065             (contents == 0		&&
6066              read_exec_only == 1	&&
6067              seg_32bit == 0		&&
6068              limit_in_pages == 0	&&
6069              seg_not_present == 1	&&
6070              useable == 0 )) {
6071             entry_1 = 0;
6072             entry_2 = 0;
6073             goto install;
6074         }
6075     }
6076 
6077     entry_1 = ((ldt_info.base_addr & 0x0000ffff) << 16) |
6078         (ldt_info.limit & 0x0ffff);
6079     entry_2 = (ldt_info.base_addr & 0xff000000) |
6080         ((ldt_info.base_addr & 0x00ff0000) >> 16) |
6081         (ldt_info.limit & 0xf0000) |
6082         ((read_exec_only ^ 1) << 9) |
6083         (contents << 10) |
6084         ((seg_not_present ^ 1) << 15) |
6085         (seg_32bit << 22) |
6086         (limit_in_pages << 23) |
6087         (lm << 21) |
6088         0x7000;
6089     if (!oldmode)
6090         entry_2 |= (useable << 20);
6091 
6092     /* Install the new entry ...  */
6093 install:
6094     lp = (uint32_t *)(ldt_table + (ldt_info.entry_number << 3));
6095     lp[0] = tswap32(entry_1);
6096     lp[1] = tswap32(entry_2);
6097     return 0;
6098 }
6099 
6100 /* specific and weird i386 syscalls */
6101 static abi_long do_modify_ldt(CPUX86State *env, int func, abi_ulong ptr,
6102                               unsigned long bytecount)
6103 {
6104     abi_long ret;
6105 
6106     switch (func) {
6107     case 0:
6108         ret = read_ldt(ptr, bytecount);
6109         break;
6110     case 1:
6111         ret = write_ldt(env, ptr, bytecount, 1);
6112         break;
6113     case 0x11:
6114         ret = write_ldt(env, ptr, bytecount, 0);
6115         break;
6116     default:
6117         ret = -TARGET_ENOSYS;
6118         break;
6119     }
6120     return ret;
6121 }
6122 
6123 #if defined(TARGET_ABI32)
6124 abi_long do_set_thread_area(CPUX86State *env, abi_ulong ptr)
6125 {
6126     uint64_t *gdt_table = g2h_untagged(env->gdt.base);
6127     struct target_modify_ldt_ldt_s ldt_info;
6128     struct target_modify_ldt_ldt_s *target_ldt_info;
6129     int seg_32bit, contents, read_exec_only, limit_in_pages;
6130     int seg_not_present, useable, lm;
6131     uint32_t *lp, entry_1, entry_2;
6132     int i;
6133 
6134     lock_user_struct(VERIFY_WRITE, target_ldt_info, ptr, 1);
6135     if (!target_ldt_info)
6136         return -TARGET_EFAULT;
6137     ldt_info.entry_number = tswap32(target_ldt_info->entry_number);
6138     ldt_info.base_addr = tswapal(target_ldt_info->base_addr);
6139     ldt_info.limit = tswap32(target_ldt_info->limit);
6140     ldt_info.flags = tswap32(target_ldt_info->flags);
6141     if (ldt_info.entry_number == -1) {
6142         for (i=TARGET_GDT_ENTRY_TLS_MIN; i<=TARGET_GDT_ENTRY_TLS_MAX; i++) {
6143             if (gdt_table[i] == 0) {
6144                 ldt_info.entry_number = i;
6145                 target_ldt_info->entry_number = tswap32(i);
6146                 break;
6147             }
6148         }
6149     }
6150     unlock_user_struct(target_ldt_info, ptr, 1);
6151 
6152     if (ldt_info.entry_number < TARGET_GDT_ENTRY_TLS_MIN ||
6153         ldt_info.entry_number > TARGET_GDT_ENTRY_TLS_MAX)
6154            return -TARGET_EINVAL;
6155     seg_32bit = ldt_info.flags & 1;
6156     contents = (ldt_info.flags >> 1) & 3;
6157     read_exec_only = (ldt_info.flags >> 3) & 1;
6158     limit_in_pages = (ldt_info.flags >> 4) & 1;
6159     seg_not_present = (ldt_info.flags >> 5) & 1;
6160     useable = (ldt_info.flags >> 6) & 1;
6161 #ifdef TARGET_ABI32
6162     lm = 0;
6163 #else
6164     lm = (ldt_info.flags >> 7) & 1;
6165 #endif
6166 
6167     if (contents == 3) {
6168         if (seg_not_present == 0)
6169             return -TARGET_EINVAL;
6170     }
6171 
6172     /* NOTE: same code as Linux kernel */
6173     /* Allow LDTs to be cleared by the user. */
6174     if (ldt_info.base_addr == 0 && ldt_info.limit == 0) {
6175         if ((contents == 0             &&
6176              read_exec_only == 1       &&
6177              seg_32bit == 0            &&
6178              limit_in_pages == 0       &&
6179              seg_not_present == 1      &&
6180              useable == 0 )) {
6181             entry_1 = 0;
6182             entry_2 = 0;
6183             goto install;
6184         }
6185     }
6186 
6187     entry_1 = ((ldt_info.base_addr & 0x0000ffff) << 16) |
6188         (ldt_info.limit & 0x0ffff);
6189     entry_2 = (ldt_info.base_addr & 0xff000000) |
6190         ((ldt_info.base_addr & 0x00ff0000) >> 16) |
6191         (ldt_info.limit & 0xf0000) |
6192         ((read_exec_only ^ 1) << 9) |
6193         (contents << 10) |
6194         ((seg_not_present ^ 1) << 15) |
6195         (seg_32bit << 22) |
6196         (limit_in_pages << 23) |
6197         (useable << 20) |
6198         (lm << 21) |
6199         0x7000;
6200 
6201     /* Install the new entry ...  */
6202 install:
6203     lp = (uint32_t *)(gdt_table + ldt_info.entry_number);
6204     lp[0] = tswap32(entry_1);
6205     lp[1] = tswap32(entry_2);
6206     return 0;
6207 }
6208 
6209 static abi_long do_get_thread_area(CPUX86State *env, abi_ulong ptr)
6210 {
6211     struct target_modify_ldt_ldt_s *target_ldt_info;
6212     uint64_t *gdt_table = g2h_untagged(env->gdt.base);
6213     uint32_t base_addr, limit, flags;
6214     int seg_32bit, contents, read_exec_only, limit_in_pages, idx;
6215     int seg_not_present, useable, lm;
6216     uint32_t *lp, entry_1, entry_2;
6217 
6218     lock_user_struct(VERIFY_WRITE, target_ldt_info, ptr, 1);
6219     if (!target_ldt_info)
6220         return -TARGET_EFAULT;
6221     idx = tswap32(target_ldt_info->entry_number);
6222     if (idx < TARGET_GDT_ENTRY_TLS_MIN ||
6223         idx > TARGET_GDT_ENTRY_TLS_MAX) {
6224         unlock_user_struct(target_ldt_info, ptr, 1);
6225         return -TARGET_EINVAL;
6226     }
6227     lp = (uint32_t *)(gdt_table + idx);
6228     entry_1 = tswap32(lp[0]);
6229     entry_2 = tswap32(lp[1]);
6230 
6231     read_exec_only = ((entry_2 >> 9) & 1) ^ 1;
6232     contents = (entry_2 >> 10) & 3;
6233     seg_not_present = ((entry_2 >> 15) & 1) ^ 1;
6234     seg_32bit = (entry_2 >> 22) & 1;
6235     limit_in_pages = (entry_2 >> 23) & 1;
6236     useable = (entry_2 >> 20) & 1;
6237 #ifdef TARGET_ABI32
6238     lm = 0;
6239 #else
6240     lm = (entry_2 >> 21) & 1;
6241 #endif
6242     flags = (seg_32bit << 0) | (contents << 1) |
6243         (read_exec_only << 3) | (limit_in_pages << 4) |
6244         (seg_not_present << 5) | (useable << 6) | (lm << 7);
6245     limit = (entry_1 & 0xffff) | (entry_2  & 0xf0000);
6246     base_addr = (entry_1 >> 16) |
6247         (entry_2 & 0xff000000) |
6248         ((entry_2 & 0xff) << 16);
6249     target_ldt_info->base_addr = tswapal(base_addr);
6250     target_ldt_info->limit = tswap32(limit);
6251     target_ldt_info->flags = tswap32(flags);
6252     unlock_user_struct(target_ldt_info, ptr, 1);
6253     return 0;
6254 }
6255 
6256 abi_long do_arch_prctl(CPUX86State *env, int code, abi_ulong addr)
6257 {
6258     return -TARGET_ENOSYS;
6259 }
6260 #else
6261 abi_long do_arch_prctl(CPUX86State *env, int code, abi_ulong addr)
6262 {
6263     abi_long ret = 0;
6264     abi_ulong val;
6265     int idx;
6266 
6267     switch(code) {
6268     case TARGET_ARCH_SET_GS:
6269     case TARGET_ARCH_SET_FS:
6270         if (code == TARGET_ARCH_SET_GS)
6271             idx = R_GS;
6272         else
6273             idx = R_FS;
6274         cpu_x86_load_seg(env, idx, 0);
6275         env->segs[idx].base = addr;
6276         break;
6277     case TARGET_ARCH_GET_GS:
6278     case TARGET_ARCH_GET_FS:
6279         if (code == TARGET_ARCH_GET_GS)
6280             idx = R_GS;
6281         else
6282             idx = R_FS;
6283         val = env->segs[idx].base;
6284         if (put_user(val, addr, abi_ulong))
6285             ret = -TARGET_EFAULT;
6286         break;
6287     default:
6288         ret = -TARGET_EINVAL;
6289         break;
6290     }
6291     return ret;
6292 }
6293 #endif /* defined(TARGET_ABI32 */
6294 
6295 #endif /* defined(TARGET_I386) */
6296 
6297 #define NEW_STACK_SIZE 0x40000
6298 
6299 
6300 static pthread_mutex_t clone_lock = PTHREAD_MUTEX_INITIALIZER;
6301 typedef struct {
6302     CPUArchState *env;
6303     pthread_mutex_t mutex;
6304     pthread_cond_t cond;
6305     pthread_t thread;
6306     uint32_t tid;
6307     abi_ulong child_tidptr;
6308     abi_ulong parent_tidptr;
6309     sigset_t sigmask;
6310 } new_thread_info;
6311 
6312 static void *clone_func(void *arg)
6313 {
6314     new_thread_info *info = arg;
6315     CPUArchState *env;
6316     CPUState *cpu;
6317     TaskState *ts;
6318 
6319     rcu_register_thread();
6320     tcg_register_thread();
6321     env = info->env;
6322     cpu = env_cpu(env);
6323     thread_cpu = cpu;
6324     ts = (TaskState *)cpu->opaque;
6325     info->tid = sys_gettid();
6326     task_settid(ts);
6327     if (info->child_tidptr)
6328         put_user_u32(info->tid, info->child_tidptr);
6329     if (info->parent_tidptr)
6330         put_user_u32(info->tid, info->parent_tidptr);
6331     qemu_guest_random_seed_thread_part2(cpu->random_seed);
6332     /* Enable signals.  */
6333     sigprocmask(SIG_SETMASK, &info->sigmask, NULL);
6334     /* Signal to the parent that we're ready.  */
6335     pthread_mutex_lock(&info->mutex);
6336     pthread_cond_broadcast(&info->cond);
6337     pthread_mutex_unlock(&info->mutex);
6338     /* Wait until the parent has finished initializing the tls state.  */
6339     pthread_mutex_lock(&clone_lock);
6340     pthread_mutex_unlock(&clone_lock);
6341     cpu_loop(env);
6342     /* never exits */
6343     return NULL;
6344 }
6345 
6346 /* do_fork() Must return host values and target errnos (unlike most
6347    do_*() functions). */
6348 static int do_fork(CPUArchState *env, unsigned int flags, abi_ulong newsp,
6349                    abi_ulong parent_tidptr, target_ulong newtls,
6350                    abi_ulong child_tidptr)
6351 {
6352     CPUState *cpu = env_cpu(env);
6353     int ret;
6354     TaskState *ts;
6355     CPUState *new_cpu;
6356     CPUArchState *new_env;
6357     sigset_t sigmask;
6358 
6359     flags &= ~CLONE_IGNORED_FLAGS;
6360 
6361     /* Emulate vfork() with fork() */
6362     if (flags & CLONE_VFORK)
6363         flags &= ~(CLONE_VFORK | CLONE_VM);
6364 
6365     if (flags & CLONE_VM) {
6366         TaskState *parent_ts = (TaskState *)cpu->opaque;
6367         new_thread_info info;
6368         pthread_attr_t attr;
6369 
6370         if (((flags & CLONE_THREAD_FLAGS) != CLONE_THREAD_FLAGS) ||
6371             (flags & CLONE_INVALID_THREAD_FLAGS)) {
6372             return -TARGET_EINVAL;
6373         }
6374 
6375         ts = g_new0(TaskState, 1);
6376         init_task_state(ts);
6377 
6378         /* Grab a mutex so that thread setup appears atomic.  */
6379         pthread_mutex_lock(&clone_lock);
6380 
6381         /*
6382          * If this is our first additional thread, we need to ensure we
6383          * generate code for parallel execution and flush old translations.
6384          * Do this now so that the copy gets CF_PARALLEL too.
6385          */
6386         if (!(cpu->tcg_cflags & CF_PARALLEL)) {
6387             cpu->tcg_cflags |= CF_PARALLEL;
6388             tb_flush(cpu);
6389         }
6390 
6391         /* we create a new CPU instance. */
6392         new_env = cpu_copy(env);
6393         /* Init regs that differ from the parent.  */
6394         cpu_clone_regs_child(new_env, newsp, flags);
6395         cpu_clone_regs_parent(env, flags);
6396         new_cpu = env_cpu(new_env);
6397         new_cpu->opaque = ts;
6398         ts->bprm = parent_ts->bprm;
6399         ts->info = parent_ts->info;
6400         ts->signal_mask = parent_ts->signal_mask;
6401 
6402         if (flags & CLONE_CHILD_CLEARTID) {
6403             ts->child_tidptr = child_tidptr;
6404         }
6405 
6406         if (flags & CLONE_SETTLS) {
6407             cpu_set_tls (new_env, newtls);
6408         }
6409 
6410         memset(&info, 0, sizeof(info));
6411         pthread_mutex_init(&info.mutex, NULL);
6412         pthread_mutex_lock(&info.mutex);
6413         pthread_cond_init(&info.cond, NULL);
6414         info.env = new_env;
6415         if (flags & CLONE_CHILD_SETTID) {
6416             info.child_tidptr = child_tidptr;
6417         }
6418         if (flags & CLONE_PARENT_SETTID) {
6419             info.parent_tidptr = parent_tidptr;
6420         }
6421 
6422         ret = pthread_attr_init(&attr);
6423         ret = pthread_attr_setstacksize(&attr, NEW_STACK_SIZE);
6424         ret = pthread_attr_setdetachstate(&attr, PTHREAD_CREATE_DETACHED);
6425         /* It is not safe to deliver signals until the child has finished
6426            initializing, so temporarily block all signals.  */
6427         sigfillset(&sigmask);
6428         sigprocmask(SIG_BLOCK, &sigmask, &info.sigmask);
6429         cpu->random_seed = qemu_guest_random_seed_thread_part1();
6430 
6431         ret = pthread_create(&info.thread, &attr, clone_func, &info);
6432         /* TODO: Free new CPU state if thread creation failed.  */
6433 
6434         sigprocmask(SIG_SETMASK, &info.sigmask, NULL);
6435         pthread_attr_destroy(&attr);
6436         if (ret == 0) {
6437             /* Wait for the child to initialize.  */
6438             pthread_cond_wait(&info.cond, &info.mutex);
6439             ret = info.tid;
6440         } else {
6441             ret = -1;
6442         }
6443         pthread_mutex_unlock(&info.mutex);
6444         pthread_cond_destroy(&info.cond);
6445         pthread_mutex_destroy(&info.mutex);
6446         pthread_mutex_unlock(&clone_lock);
6447     } else {
6448         /* if no CLONE_VM, we consider it is a fork */
6449         if (flags & CLONE_INVALID_FORK_FLAGS) {
6450             return -TARGET_EINVAL;
6451         }
6452 
6453         /* We can't support custom termination signals */
6454         if ((flags & CSIGNAL) != TARGET_SIGCHLD) {
6455             return -TARGET_EINVAL;
6456         }
6457 
6458         if (block_signals()) {
6459             return -TARGET_ERESTARTSYS;
6460         }
6461 
6462         fork_start();
6463         ret = fork();
6464         if (ret == 0) {
6465             /* Child Process.  */
6466             cpu_clone_regs_child(env, newsp, flags);
6467             fork_end(1);
6468             /* There is a race condition here.  The parent process could
6469                theoretically read the TID in the child process before the child
6470                tid is set.  This would require using either ptrace
6471                (not implemented) or having *_tidptr to point at a shared memory
6472                mapping.  We can't repeat the spinlock hack used above because
6473                the child process gets its own copy of the lock.  */
6474             if (flags & CLONE_CHILD_SETTID)
6475                 put_user_u32(sys_gettid(), child_tidptr);
6476             if (flags & CLONE_PARENT_SETTID)
6477                 put_user_u32(sys_gettid(), parent_tidptr);
6478             ts = (TaskState *)cpu->opaque;
6479             if (flags & CLONE_SETTLS)
6480                 cpu_set_tls (env, newtls);
6481             if (flags & CLONE_CHILD_CLEARTID)
6482                 ts->child_tidptr = child_tidptr;
6483         } else {
6484             cpu_clone_regs_parent(env, flags);
6485             fork_end(0);
6486         }
6487     }
6488     return ret;
6489 }
6490 
6491 /* warning : doesn't handle linux specific flags... */
6492 static int target_to_host_fcntl_cmd(int cmd)
6493 {
6494     int ret;
6495 
6496     switch(cmd) {
6497     case TARGET_F_DUPFD:
6498     case TARGET_F_GETFD:
6499     case TARGET_F_SETFD:
6500     case TARGET_F_GETFL:
6501     case TARGET_F_SETFL:
6502     case TARGET_F_OFD_GETLK:
6503     case TARGET_F_OFD_SETLK:
6504     case TARGET_F_OFD_SETLKW:
6505         ret = cmd;
6506         break;
6507     case TARGET_F_GETLK:
6508         ret = F_GETLK64;
6509         break;
6510     case TARGET_F_SETLK:
6511         ret = F_SETLK64;
6512         break;
6513     case TARGET_F_SETLKW:
6514         ret = F_SETLKW64;
6515         break;
6516     case TARGET_F_GETOWN:
6517         ret = F_GETOWN;
6518         break;
6519     case TARGET_F_SETOWN:
6520         ret = F_SETOWN;
6521         break;
6522     case TARGET_F_GETSIG:
6523         ret = F_GETSIG;
6524         break;
6525     case TARGET_F_SETSIG:
6526         ret = F_SETSIG;
6527         break;
6528 #if TARGET_ABI_BITS == 32
6529     case TARGET_F_GETLK64:
6530         ret = F_GETLK64;
6531         break;
6532     case TARGET_F_SETLK64:
6533         ret = F_SETLK64;
6534         break;
6535     case TARGET_F_SETLKW64:
6536         ret = F_SETLKW64;
6537         break;
6538 #endif
6539     case TARGET_F_SETLEASE:
6540         ret = F_SETLEASE;
6541         break;
6542     case TARGET_F_GETLEASE:
6543         ret = F_GETLEASE;
6544         break;
6545 #ifdef F_DUPFD_CLOEXEC
6546     case TARGET_F_DUPFD_CLOEXEC:
6547         ret = F_DUPFD_CLOEXEC;
6548         break;
6549 #endif
6550     case TARGET_F_NOTIFY:
6551         ret = F_NOTIFY;
6552         break;
6553 #ifdef F_GETOWN_EX
6554     case TARGET_F_GETOWN_EX:
6555         ret = F_GETOWN_EX;
6556         break;
6557 #endif
6558 #ifdef F_SETOWN_EX
6559     case TARGET_F_SETOWN_EX:
6560         ret = F_SETOWN_EX;
6561         break;
6562 #endif
6563 #ifdef F_SETPIPE_SZ
6564     case TARGET_F_SETPIPE_SZ:
6565         ret = F_SETPIPE_SZ;
6566         break;
6567     case TARGET_F_GETPIPE_SZ:
6568         ret = F_GETPIPE_SZ;
6569         break;
6570 #endif
6571 #ifdef F_ADD_SEALS
6572     case TARGET_F_ADD_SEALS:
6573         ret = F_ADD_SEALS;
6574         break;
6575     case TARGET_F_GET_SEALS:
6576         ret = F_GET_SEALS;
6577         break;
6578 #endif
6579     default:
6580         ret = -TARGET_EINVAL;
6581         break;
6582     }
6583 
6584 #if defined(__powerpc64__)
6585     /* On PPC64, glibc headers has the F_*LK* defined to 12, 13 and 14 and
6586      * is not supported by kernel. The glibc fcntl call actually adjusts
6587      * them to 5, 6 and 7 before making the syscall(). Since we make the
6588      * syscall directly, adjust to what is supported by the kernel.
6589      */
6590     if (ret >= F_GETLK64 && ret <= F_SETLKW64) {
6591         ret -= F_GETLK64 - 5;
6592     }
6593 #endif
6594 
6595     return ret;
6596 }
6597 
6598 #define FLOCK_TRANSTBL \
6599     switch (type) { \
6600     TRANSTBL_CONVERT(F_RDLCK); \
6601     TRANSTBL_CONVERT(F_WRLCK); \
6602     TRANSTBL_CONVERT(F_UNLCK); \
6603     }
6604 
6605 static int target_to_host_flock(int type)
6606 {
6607 #define TRANSTBL_CONVERT(a) case TARGET_##a: return a
6608     FLOCK_TRANSTBL
6609 #undef  TRANSTBL_CONVERT
6610     return -TARGET_EINVAL;
6611 }
6612 
6613 static int host_to_target_flock(int type)
6614 {
6615 #define TRANSTBL_CONVERT(a) case a: return TARGET_##a
6616     FLOCK_TRANSTBL
6617 #undef  TRANSTBL_CONVERT
6618     /* if we don't know how to convert the value coming
6619      * from the host we copy to the target field as-is
6620      */
6621     return type;
6622 }
6623 
6624 static inline abi_long copy_from_user_flock(struct flock64 *fl,
6625                                             abi_ulong target_flock_addr)
6626 {
6627     struct target_flock *target_fl;
6628     int l_type;
6629 
6630     if (!lock_user_struct(VERIFY_READ, target_fl, target_flock_addr, 1)) {
6631         return -TARGET_EFAULT;
6632     }
6633 
6634     __get_user(l_type, &target_fl->l_type);
6635     l_type = target_to_host_flock(l_type);
6636     if (l_type < 0) {
6637         return l_type;
6638     }
6639     fl->l_type = l_type;
6640     __get_user(fl->l_whence, &target_fl->l_whence);
6641     __get_user(fl->l_start, &target_fl->l_start);
6642     __get_user(fl->l_len, &target_fl->l_len);
6643     __get_user(fl->l_pid, &target_fl->l_pid);
6644     unlock_user_struct(target_fl, target_flock_addr, 0);
6645     return 0;
6646 }
6647 
6648 static inline abi_long copy_to_user_flock(abi_ulong target_flock_addr,
6649                                           const struct flock64 *fl)
6650 {
6651     struct target_flock *target_fl;
6652     short l_type;
6653 
6654     if (!lock_user_struct(VERIFY_WRITE, target_fl, target_flock_addr, 0)) {
6655         return -TARGET_EFAULT;
6656     }
6657 
6658     l_type = host_to_target_flock(fl->l_type);
6659     __put_user(l_type, &target_fl->l_type);
6660     __put_user(fl->l_whence, &target_fl->l_whence);
6661     __put_user(fl->l_start, &target_fl->l_start);
6662     __put_user(fl->l_len, &target_fl->l_len);
6663     __put_user(fl->l_pid, &target_fl->l_pid);
6664     unlock_user_struct(target_fl, target_flock_addr, 1);
6665     return 0;
6666 }
6667 
6668 typedef abi_long from_flock64_fn(struct flock64 *fl, abi_ulong target_addr);
6669 typedef abi_long to_flock64_fn(abi_ulong target_addr, const struct flock64 *fl);
6670 
6671 #if defined(TARGET_ARM) && TARGET_ABI_BITS == 32
6672 static inline abi_long copy_from_user_oabi_flock64(struct flock64 *fl,
6673                                                    abi_ulong target_flock_addr)
6674 {
6675     struct target_oabi_flock64 *target_fl;
6676     int l_type;
6677 
6678     if (!lock_user_struct(VERIFY_READ, target_fl, target_flock_addr, 1)) {
6679         return -TARGET_EFAULT;
6680     }
6681 
6682     __get_user(l_type, &target_fl->l_type);
6683     l_type = target_to_host_flock(l_type);
6684     if (l_type < 0) {
6685         return l_type;
6686     }
6687     fl->l_type = l_type;
6688     __get_user(fl->l_whence, &target_fl->l_whence);
6689     __get_user(fl->l_start, &target_fl->l_start);
6690     __get_user(fl->l_len, &target_fl->l_len);
6691     __get_user(fl->l_pid, &target_fl->l_pid);
6692     unlock_user_struct(target_fl, target_flock_addr, 0);
6693     return 0;
6694 }
6695 
6696 static inline abi_long copy_to_user_oabi_flock64(abi_ulong target_flock_addr,
6697                                                  const struct flock64 *fl)
6698 {
6699     struct target_oabi_flock64 *target_fl;
6700     short l_type;
6701 
6702     if (!lock_user_struct(VERIFY_WRITE, target_fl, target_flock_addr, 0)) {
6703         return -TARGET_EFAULT;
6704     }
6705 
6706     l_type = host_to_target_flock(fl->l_type);
6707     __put_user(l_type, &target_fl->l_type);
6708     __put_user(fl->l_whence, &target_fl->l_whence);
6709     __put_user(fl->l_start, &target_fl->l_start);
6710     __put_user(fl->l_len, &target_fl->l_len);
6711     __put_user(fl->l_pid, &target_fl->l_pid);
6712     unlock_user_struct(target_fl, target_flock_addr, 1);
6713     return 0;
6714 }
6715 #endif
6716 
6717 static inline abi_long copy_from_user_flock64(struct flock64 *fl,
6718                                               abi_ulong target_flock_addr)
6719 {
6720     struct target_flock64 *target_fl;
6721     int l_type;
6722 
6723     if (!lock_user_struct(VERIFY_READ, target_fl, target_flock_addr, 1)) {
6724         return -TARGET_EFAULT;
6725     }
6726 
6727     __get_user(l_type, &target_fl->l_type);
6728     l_type = target_to_host_flock(l_type);
6729     if (l_type < 0) {
6730         return l_type;
6731     }
6732     fl->l_type = l_type;
6733     __get_user(fl->l_whence, &target_fl->l_whence);
6734     __get_user(fl->l_start, &target_fl->l_start);
6735     __get_user(fl->l_len, &target_fl->l_len);
6736     __get_user(fl->l_pid, &target_fl->l_pid);
6737     unlock_user_struct(target_fl, target_flock_addr, 0);
6738     return 0;
6739 }
6740 
6741 static inline abi_long copy_to_user_flock64(abi_ulong target_flock_addr,
6742                                             const struct flock64 *fl)
6743 {
6744     struct target_flock64 *target_fl;
6745     short l_type;
6746 
6747     if (!lock_user_struct(VERIFY_WRITE, target_fl, target_flock_addr, 0)) {
6748         return -TARGET_EFAULT;
6749     }
6750 
6751     l_type = host_to_target_flock(fl->l_type);
6752     __put_user(l_type, &target_fl->l_type);
6753     __put_user(fl->l_whence, &target_fl->l_whence);
6754     __put_user(fl->l_start, &target_fl->l_start);
6755     __put_user(fl->l_len, &target_fl->l_len);
6756     __put_user(fl->l_pid, &target_fl->l_pid);
6757     unlock_user_struct(target_fl, target_flock_addr, 1);
6758     return 0;
6759 }
6760 
6761 static abi_long do_fcntl(int fd, int cmd, abi_ulong arg)
6762 {
6763     struct flock64 fl64;
6764 #ifdef F_GETOWN_EX
6765     struct f_owner_ex fox;
6766     struct target_f_owner_ex *target_fox;
6767 #endif
6768     abi_long ret;
6769     int host_cmd = target_to_host_fcntl_cmd(cmd);
6770 
6771     if (host_cmd == -TARGET_EINVAL)
6772 	    return host_cmd;
6773 
6774     switch(cmd) {
6775     case TARGET_F_GETLK:
6776         ret = copy_from_user_flock(&fl64, arg);
6777         if (ret) {
6778             return ret;
6779         }
6780         ret = get_errno(safe_fcntl(fd, host_cmd, &fl64));
6781         if (ret == 0) {
6782             ret = copy_to_user_flock(arg, &fl64);
6783         }
6784         break;
6785 
6786     case TARGET_F_SETLK:
6787     case TARGET_F_SETLKW:
6788         ret = copy_from_user_flock(&fl64, arg);
6789         if (ret) {
6790             return ret;
6791         }
6792         ret = get_errno(safe_fcntl(fd, host_cmd, &fl64));
6793         break;
6794 
6795     case TARGET_F_GETLK64:
6796     case TARGET_F_OFD_GETLK:
6797         ret = copy_from_user_flock64(&fl64, arg);
6798         if (ret) {
6799             return ret;
6800         }
6801         ret = get_errno(safe_fcntl(fd, host_cmd, &fl64));
6802         if (ret == 0) {
6803             ret = copy_to_user_flock64(arg, &fl64);
6804         }
6805         break;
6806     case TARGET_F_SETLK64:
6807     case TARGET_F_SETLKW64:
6808     case TARGET_F_OFD_SETLK:
6809     case TARGET_F_OFD_SETLKW:
6810         ret = copy_from_user_flock64(&fl64, arg);
6811         if (ret) {
6812             return ret;
6813         }
6814         ret = get_errno(safe_fcntl(fd, host_cmd, &fl64));
6815         break;
6816 
6817     case TARGET_F_GETFL:
6818         ret = get_errno(safe_fcntl(fd, host_cmd, arg));
6819         if (ret >= 0) {
6820             ret = host_to_target_bitmask(ret, fcntl_flags_tbl);
6821         }
6822         break;
6823 
6824     case TARGET_F_SETFL:
6825         ret = get_errno(safe_fcntl(fd, host_cmd,
6826                                    target_to_host_bitmask(arg,
6827                                                           fcntl_flags_tbl)));
6828         break;
6829 
6830 #ifdef F_GETOWN_EX
6831     case TARGET_F_GETOWN_EX:
6832         ret = get_errno(safe_fcntl(fd, host_cmd, &fox));
6833         if (ret >= 0) {
6834             if (!lock_user_struct(VERIFY_WRITE, target_fox, arg, 0))
6835                 return -TARGET_EFAULT;
6836             target_fox->type = tswap32(fox.type);
6837             target_fox->pid = tswap32(fox.pid);
6838             unlock_user_struct(target_fox, arg, 1);
6839         }
6840         break;
6841 #endif
6842 
6843 #ifdef F_SETOWN_EX
6844     case TARGET_F_SETOWN_EX:
6845         if (!lock_user_struct(VERIFY_READ, target_fox, arg, 1))
6846             return -TARGET_EFAULT;
6847         fox.type = tswap32(target_fox->type);
6848         fox.pid = tswap32(target_fox->pid);
6849         unlock_user_struct(target_fox, arg, 0);
6850         ret = get_errno(safe_fcntl(fd, host_cmd, &fox));
6851         break;
6852 #endif
6853 
6854     case TARGET_F_SETSIG:
6855         ret = get_errno(safe_fcntl(fd, host_cmd, target_to_host_signal(arg)));
6856         break;
6857 
6858     case TARGET_F_GETSIG:
6859         ret = host_to_target_signal(get_errno(safe_fcntl(fd, host_cmd, arg)));
6860         break;
6861 
6862     case TARGET_F_SETOWN:
6863     case TARGET_F_GETOWN:
6864     case TARGET_F_SETLEASE:
6865     case TARGET_F_GETLEASE:
6866     case TARGET_F_SETPIPE_SZ:
6867     case TARGET_F_GETPIPE_SZ:
6868     case TARGET_F_ADD_SEALS:
6869     case TARGET_F_GET_SEALS:
6870         ret = get_errno(safe_fcntl(fd, host_cmd, arg));
6871         break;
6872 
6873     default:
6874         ret = get_errno(safe_fcntl(fd, cmd, arg));
6875         break;
6876     }
6877     return ret;
6878 }
6879 
6880 #ifdef USE_UID16
6881 
6882 static inline int high2lowuid(int uid)
6883 {
6884     if (uid > 65535)
6885         return 65534;
6886     else
6887         return uid;
6888 }
6889 
6890 static inline int high2lowgid(int gid)
6891 {
6892     if (gid > 65535)
6893         return 65534;
6894     else
6895         return gid;
6896 }
6897 
6898 static inline int low2highuid(int uid)
6899 {
6900     if ((int16_t)uid == -1)
6901         return -1;
6902     else
6903         return uid;
6904 }
6905 
6906 static inline int low2highgid(int gid)
6907 {
6908     if ((int16_t)gid == -1)
6909         return -1;
6910     else
6911         return gid;
6912 }
6913 static inline int tswapid(int id)
6914 {
6915     return tswap16(id);
6916 }
6917 
6918 #define put_user_id(x, gaddr) put_user_u16(x, gaddr)
6919 
6920 #else /* !USE_UID16 */
6921 static inline int high2lowuid(int uid)
6922 {
6923     return uid;
6924 }
6925 static inline int high2lowgid(int gid)
6926 {
6927     return gid;
6928 }
6929 static inline int low2highuid(int uid)
6930 {
6931     return uid;
6932 }
6933 static inline int low2highgid(int gid)
6934 {
6935     return gid;
6936 }
6937 static inline int tswapid(int id)
6938 {
6939     return tswap32(id);
6940 }
6941 
6942 #define put_user_id(x, gaddr) put_user_u32(x, gaddr)
6943 
6944 #endif /* USE_UID16 */
6945 
6946 /* We must do direct syscalls for setting UID/GID, because we want to
6947  * implement the Linux system call semantics of "change only for this thread",
6948  * not the libc/POSIX semantics of "change for all threads in process".
6949  * (See http://ewontfix.com/17/ for more details.)
6950  * We use the 32-bit version of the syscalls if present; if it is not
6951  * then either the host architecture supports 32-bit UIDs natively with
6952  * the standard syscall, or the 16-bit UID is the best we can do.
6953  */
6954 #ifdef __NR_setuid32
6955 #define __NR_sys_setuid __NR_setuid32
6956 #else
6957 #define __NR_sys_setuid __NR_setuid
6958 #endif
6959 #ifdef __NR_setgid32
6960 #define __NR_sys_setgid __NR_setgid32
6961 #else
6962 #define __NR_sys_setgid __NR_setgid
6963 #endif
6964 #ifdef __NR_setresuid32
6965 #define __NR_sys_setresuid __NR_setresuid32
6966 #else
6967 #define __NR_sys_setresuid __NR_setresuid
6968 #endif
6969 #ifdef __NR_setresgid32
6970 #define __NR_sys_setresgid __NR_setresgid32
6971 #else
6972 #define __NR_sys_setresgid __NR_setresgid
6973 #endif
6974 
6975 _syscall1(int, sys_setuid, uid_t, uid)
6976 _syscall1(int, sys_setgid, gid_t, gid)
6977 _syscall3(int, sys_setresuid, uid_t, ruid, uid_t, euid, uid_t, suid)
6978 _syscall3(int, sys_setresgid, gid_t, rgid, gid_t, egid, gid_t, sgid)
6979 
6980 void syscall_init(void)
6981 {
6982     IOCTLEntry *ie;
6983     const argtype *arg_type;
6984     int size;
6985 
6986     thunk_init(STRUCT_MAX);
6987 
6988 #define STRUCT(name, ...) thunk_register_struct(STRUCT_ ## name, #name, struct_ ## name ## _def);
6989 #define STRUCT_SPECIAL(name) thunk_register_struct_direct(STRUCT_ ## name, #name, &struct_ ## name ## _def);
6990 #include "syscall_types.h"
6991 #undef STRUCT
6992 #undef STRUCT_SPECIAL
6993 
6994     /* we patch the ioctl size if necessary. We rely on the fact that
6995        no ioctl has all the bits at '1' in the size field */
6996     ie = ioctl_entries;
6997     while (ie->target_cmd != 0) {
6998         if (((ie->target_cmd >> TARGET_IOC_SIZESHIFT) & TARGET_IOC_SIZEMASK) ==
6999             TARGET_IOC_SIZEMASK) {
7000             arg_type = ie->arg_type;
7001             if (arg_type[0] != TYPE_PTR) {
7002                 fprintf(stderr, "cannot patch size for ioctl 0x%x\n",
7003                         ie->target_cmd);
7004                 exit(1);
7005             }
7006             arg_type++;
7007             size = thunk_type_size(arg_type, 0);
7008             ie->target_cmd = (ie->target_cmd &
7009                               ~(TARGET_IOC_SIZEMASK << TARGET_IOC_SIZESHIFT)) |
7010                 (size << TARGET_IOC_SIZESHIFT);
7011         }
7012 
7013         /* automatic consistency check if same arch */
7014 #if (defined(__i386__) && defined(TARGET_I386) && defined(TARGET_ABI32)) || \
7015     (defined(__x86_64__) && defined(TARGET_X86_64))
7016         if (unlikely(ie->target_cmd != ie->host_cmd)) {
7017             fprintf(stderr, "ERROR: ioctl(%s): target=0x%x host=0x%x\n",
7018                     ie->name, ie->target_cmd, ie->host_cmd);
7019         }
7020 #endif
7021         ie++;
7022     }
7023 }
7024 
7025 #ifdef TARGET_NR_truncate64
7026 static inline abi_long target_truncate64(void *cpu_env, const char *arg1,
7027                                          abi_long arg2,
7028                                          abi_long arg3,
7029                                          abi_long arg4)
7030 {
7031     if (regpairs_aligned(cpu_env, TARGET_NR_truncate64)) {
7032         arg2 = arg3;
7033         arg3 = arg4;
7034     }
7035     return get_errno(truncate64(arg1, target_offset64(arg2, arg3)));
7036 }
7037 #endif
7038 
7039 #ifdef TARGET_NR_ftruncate64
7040 static inline abi_long target_ftruncate64(void *cpu_env, abi_long arg1,
7041                                           abi_long arg2,
7042                                           abi_long arg3,
7043                                           abi_long arg4)
7044 {
7045     if (regpairs_aligned(cpu_env, TARGET_NR_ftruncate64)) {
7046         arg2 = arg3;
7047         arg3 = arg4;
7048     }
7049     return get_errno(ftruncate64(arg1, target_offset64(arg2, arg3)));
7050 }
7051 #endif
7052 
7053 #if defined(TARGET_NR_timer_settime) || \
7054     (defined(TARGET_NR_timerfd_settime) && defined(CONFIG_TIMERFD))
7055 static inline abi_long target_to_host_itimerspec(struct itimerspec *host_its,
7056                                                  abi_ulong target_addr)
7057 {
7058     if (target_to_host_timespec(&host_its->it_interval, target_addr +
7059                                 offsetof(struct target_itimerspec,
7060                                          it_interval)) ||
7061         target_to_host_timespec(&host_its->it_value, target_addr +
7062                                 offsetof(struct target_itimerspec,
7063                                          it_value))) {
7064         return -TARGET_EFAULT;
7065     }
7066 
7067     return 0;
7068 }
7069 #endif
7070 
7071 #if defined(TARGET_NR_timer_settime64) || \
7072     (defined(TARGET_NR_timerfd_settime64) && defined(CONFIG_TIMERFD))
7073 static inline abi_long target_to_host_itimerspec64(struct itimerspec *host_its,
7074                                                    abi_ulong target_addr)
7075 {
7076     if (target_to_host_timespec64(&host_its->it_interval, target_addr +
7077                                   offsetof(struct target__kernel_itimerspec,
7078                                            it_interval)) ||
7079         target_to_host_timespec64(&host_its->it_value, target_addr +
7080                                   offsetof(struct target__kernel_itimerspec,
7081                                            it_value))) {
7082         return -TARGET_EFAULT;
7083     }
7084 
7085     return 0;
7086 }
7087 #endif
7088 
7089 #if ((defined(TARGET_NR_timerfd_gettime) || \
7090       defined(TARGET_NR_timerfd_settime)) && defined(CONFIG_TIMERFD)) || \
7091       defined(TARGET_NR_timer_gettime) || defined(TARGET_NR_timer_settime)
7092 static inline abi_long host_to_target_itimerspec(abi_ulong target_addr,
7093                                                  struct itimerspec *host_its)
7094 {
7095     if (host_to_target_timespec(target_addr + offsetof(struct target_itimerspec,
7096                                                        it_interval),
7097                                 &host_its->it_interval) ||
7098         host_to_target_timespec(target_addr + offsetof(struct target_itimerspec,
7099                                                        it_value),
7100                                 &host_its->it_value)) {
7101         return -TARGET_EFAULT;
7102     }
7103     return 0;
7104 }
7105 #endif
7106 
7107 #if ((defined(TARGET_NR_timerfd_gettime64) || \
7108       defined(TARGET_NR_timerfd_settime64)) && defined(CONFIG_TIMERFD)) || \
7109       defined(TARGET_NR_timer_gettime64) || defined(TARGET_NR_timer_settime64)
7110 static inline abi_long host_to_target_itimerspec64(abi_ulong target_addr,
7111                                                    struct itimerspec *host_its)
7112 {
7113     if (host_to_target_timespec64(target_addr +
7114                                   offsetof(struct target__kernel_itimerspec,
7115                                            it_interval),
7116                                   &host_its->it_interval) ||
7117         host_to_target_timespec64(target_addr +
7118                                   offsetof(struct target__kernel_itimerspec,
7119                                            it_value),
7120                                   &host_its->it_value)) {
7121         return -TARGET_EFAULT;
7122     }
7123     return 0;
7124 }
7125 #endif
7126 
7127 #if defined(TARGET_NR_adjtimex) || \
7128     (defined(TARGET_NR_clock_adjtime) && defined(CONFIG_CLOCK_ADJTIME))
7129 static inline abi_long target_to_host_timex(struct timex *host_tx,
7130                                             abi_long target_addr)
7131 {
7132     struct target_timex *target_tx;
7133 
7134     if (!lock_user_struct(VERIFY_READ, target_tx, target_addr, 1)) {
7135         return -TARGET_EFAULT;
7136     }
7137 
7138     __get_user(host_tx->modes, &target_tx->modes);
7139     __get_user(host_tx->offset, &target_tx->offset);
7140     __get_user(host_tx->freq, &target_tx->freq);
7141     __get_user(host_tx->maxerror, &target_tx->maxerror);
7142     __get_user(host_tx->esterror, &target_tx->esterror);
7143     __get_user(host_tx->status, &target_tx->status);
7144     __get_user(host_tx->constant, &target_tx->constant);
7145     __get_user(host_tx->precision, &target_tx->precision);
7146     __get_user(host_tx->tolerance, &target_tx->tolerance);
7147     __get_user(host_tx->time.tv_sec, &target_tx->time.tv_sec);
7148     __get_user(host_tx->time.tv_usec, &target_tx->time.tv_usec);
7149     __get_user(host_tx->tick, &target_tx->tick);
7150     __get_user(host_tx->ppsfreq, &target_tx->ppsfreq);
7151     __get_user(host_tx->jitter, &target_tx->jitter);
7152     __get_user(host_tx->shift, &target_tx->shift);
7153     __get_user(host_tx->stabil, &target_tx->stabil);
7154     __get_user(host_tx->jitcnt, &target_tx->jitcnt);
7155     __get_user(host_tx->calcnt, &target_tx->calcnt);
7156     __get_user(host_tx->errcnt, &target_tx->errcnt);
7157     __get_user(host_tx->stbcnt, &target_tx->stbcnt);
7158     __get_user(host_tx->tai, &target_tx->tai);
7159 
7160     unlock_user_struct(target_tx, target_addr, 0);
7161     return 0;
7162 }
7163 
7164 static inline abi_long host_to_target_timex(abi_long target_addr,
7165                                             struct timex *host_tx)
7166 {
7167     struct target_timex *target_tx;
7168 
7169     if (!lock_user_struct(VERIFY_WRITE, target_tx, target_addr, 0)) {
7170         return -TARGET_EFAULT;
7171     }
7172 
7173     __put_user(host_tx->modes, &target_tx->modes);
7174     __put_user(host_tx->offset, &target_tx->offset);
7175     __put_user(host_tx->freq, &target_tx->freq);
7176     __put_user(host_tx->maxerror, &target_tx->maxerror);
7177     __put_user(host_tx->esterror, &target_tx->esterror);
7178     __put_user(host_tx->status, &target_tx->status);
7179     __put_user(host_tx->constant, &target_tx->constant);
7180     __put_user(host_tx->precision, &target_tx->precision);
7181     __put_user(host_tx->tolerance, &target_tx->tolerance);
7182     __put_user(host_tx->time.tv_sec, &target_tx->time.tv_sec);
7183     __put_user(host_tx->time.tv_usec, &target_tx->time.tv_usec);
7184     __put_user(host_tx->tick, &target_tx->tick);
7185     __put_user(host_tx->ppsfreq, &target_tx->ppsfreq);
7186     __put_user(host_tx->jitter, &target_tx->jitter);
7187     __put_user(host_tx->shift, &target_tx->shift);
7188     __put_user(host_tx->stabil, &target_tx->stabil);
7189     __put_user(host_tx->jitcnt, &target_tx->jitcnt);
7190     __put_user(host_tx->calcnt, &target_tx->calcnt);
7191     __put_user(host_tx->errcnt, &target_tx->errcnt);
7192     __put_user(host_tx->stbcnt, &target_tx->stbcnt);
7193     __put_user(host_tx->tai, &target_tx->tai);
7194 
7195     unlock_user_struct(target_tx, target_addr, 1);
7196     return 0;
7197 }
7198 #endif
7199 
7200 
7201 #if defined(TARGET_NR_clock_adjtime64) && defined(CONFIG_CLOCK_ADJTIME)
7202 static inline abi_long target_to_host_timex64(struct timex *host_tx,
7203                                               abi_long target_addr)
7204 {
7205     struct target__kernel_timex *target_tx;
7206 
7207     if (copy_from_user_timeval64(&host_tx->time, target_addr +
7208                                  offsetof(struct target__kernel_timex,
7209                                           time))) {
7210         return -TARGET_EFAULT;
7211     }
7212 
7213     if (!lock_user_struct(VERIFY_READ, target_tx, target_addr, 1)) {
7214         return -TARGET_EFAULT;
7215     }
7216 
7217     __get_user(host_tx->modes, &target_tx->modes);
7218     __get_user(host_tx->offset, &target_tx->offset);
7219     __get_user(host_tx->freq, &target_tx->freq);
7220     __get_user(host_tx->maxerror, &target_tx->maxerror);
7221     __get_user(host_tx->esterror, &target_tx->esterror);
7222     __get_user(host_tx->status, &target_tx->status);
7223     __get_user(host_tx->constant, &target_tx->constant);
7224     __get_user(host_tx->precision, &target_tx->precision);
7225     __get_user(host_tx->tolerance, &target_tx->tolerance);
7226     __get_user(host_tx->tick, &target_tx->tick);
7227     __get_user(host_tx->ppsfreq, &target_tx->ppsfreq);
7228     __get_user(host_tx->jitter, &target_tx->jitter);
7229     __get_user(host_tx->shift, &target_tx->shift);
7230     __get_user(host_tx->stabil, &target_tx->stabil);
7231     __get_user(host_tx->jitcnt, &target_tx->jitcnt);
7232     __get_user(host_tx->calcnt, &target_tx->calcnt);
7233     __get_user(host_tx->errcnt, &target_tx->errcnt);
7234     __get_user(host_tx->stbcnt, &target_tx->stbcnt);
7235     __get_user(host_tx->tai, &target_tx->tai);
7236 
7237     unlock_user_struct(target_tx, target_addr, 0);
7238     return 0;
7239 }
7240 
7241 static inline abi_long host_to_target_timex64(abi_long target_addr,
7242                                               struct timex *host_tx)
7243 {
7244     struct target__kernel_timex *target_tx;
7245 
7246    if (copy_to_user_timeval64(target_addr +
7247                               offsetof(struct target__kernel_timex, time),
7248                               &host_tx->time)) {
7249         return -TARGET_EFAULT;
7250     }
7251 
7252     if (!lock_user_struct(VERIFY_WRITE, target_tx, target_addr, 0)) {
7253         return -TARGET_EFAULT;
7254     }
7255 
7256     __put_user(host_tx->modes, &target_tx->modes);
7257     __put_user(host_tx->offset, &target_tx->offset);
7258     __put_user(host_tx->freq, &target_tx->freq);
7259     __put_user(host_tx->maxerror, &target_tx->maxerror);
7260     __put_user(host_tx->esterror, &target_tx->esterror);
7261     __put_user(host_tx->status, &target_tx->status);
7262     __put_user(host_tx->constant, &target_tx->constant);
7263     __put_user(host_tx->precision, &target_tx->precision);
7264     __put_user(host_tx->tolerance, &target_tx->tolerance);
7265     __put_user(host_tx->tick, &target_tx->tick);
7266     __put_user(host_tx->ppsfreq, &target_tx->ppsfreq);
7267     __put_user(host_tx->jitter, &target_tx->jitter);
7268     __put_user(host_tx->shift, &target_tx->shift);
7269     __put_user(host_tx->stabil, &target_tx->stabil);
7270     __put_user(host_tx->jitcnt, &target_tx->jitcnt);
7271     __put_user(host_tx->calcnt, &target_tx->calcnt);
7272     __put_user(host_tx->errcnt, &target_tx->errcnt);
7273     __put_user(host_tx->stbcnt, &target_tx->stbcnt);
7274     __put_user(host_tx->tai, &target_tx->tai);
7275 
7276     unlock_user_struct(target_tx, target_addr, 1);
7277     return 0;
7278 }
7279 #endif
7280 
7281 #ifndef HAVE_SIGEV_NOTIFY_THREAD_ID
7282 #define sigev_notify_thread_id _sigev_un._tid
7283 #endif
7284 
7285 static inline abi_long target_to_host_sigevent(struct sigevent *host_sevp,
7286                                                abi_ulong target_addr)
7287 {
7288     struct target_sigevent *target_sevp;
7289 
7290     if (!lock_user_struct(VERIFY_READ, target_sevp, target_addr, 1)) {
7291         return -TARGET_EFAULT;
7292     }
7293 
7294     /* This union is awkward on 64 bit systems because it has a 32 bit
7295      * integer and a pointer in it; we follow the conversion approach
7296      * used for handling sigval types in signal.c so the guest should get
7297      * the correct value back even if we did a 64 bit byteswap and it's
7298      * using the 32 bit integer.
7299      */
7300     host_sevp->sigev_value.sival_ptr =
7301         (void *)(uintptr_t)tswapal(target_sevp->sigev_value.sival_ptr);
7302     host_sevp->sigev_signo =
7303         target_to_host_signal(tswap32(target_sevp->sigev_signo));
7304     host_sevp->sigev_notify = tswap32(target_sevp->sigev_notify);
7305     host_sevp->sigev_notify_thread_id = tswap32(target_sevp->_sigev_un._tid);
7306 
7307     unlock_user_struct(target_sevp, target_addr, 1);
7308     return 0;
7309 }
7310 
7311 #if defined(TARGET_NR_mlockall)
7312 static inline int target_to_host_mlockall_arg(int arg)
7313 {
7314     int result = 0;
7315 
7316     if (arg & TARGET_MCL_CURRENT) {
7317         result |= MCL_CURRENT;
7318     }
7319     if (arg & TARGET_MCL_FUTURE) {
7320         result |= MCL_FUTURE;
7321     }
7322 #ifdef MCL_ONFAULT
7323     if (arg & TARGET_MCL_ONFAULT) {
7324         result |= MCL_ONFAULT;
7325     }
7326 #endif
7327 
7328     return result;
7329 }
7330 #endif
7331 
7332 #if (defined(TARGET_NR_stat64) || defined(TARGET_NR_lstat64) ||     \
7333      defined(TARGET_NR_fstat64) || defined(TARGET_NR_fstatat64) ||  \
7334      defined(TARGET_NR_newfstatat))
7335 static inline abi_long host_to_target_stat64(void *cpu_env,
7336                                              abi_ulong target_addr,
7337                                              struct stat *host_st)
7338 {
7339 #if defined(TARGET_ARM) && defined(TARGET_ABI32)
7340     if (((CPUARMState *)cpu_env)->eabi) {
7341         struct target_eabi_stat64 *target_st;
7342 
7343         if (!lock_user_struct(VERIFY_WRITE, target_st, target_addr, 0))
7344             return -TARGET_EFAULT;
7345         memset(target_st, 0, sizeof(struct target_eabi_stat64));
7346         __put_user(host_st->st_dev, &target_st->st_dev);
7347         __put_user(host_st->st_ino, &target_st->st_ino);
7348 #ifdef TARGET_STAT64_HAS_BROKEN_ST_INO
7349         __put_user(host_st->st_ino, &target_st->__st_ino);
7350 #endif
7351         __put_user(host_st->st_mode, &target_st->st_mode);
7352         __put_user(host_st->st_nlink, &target_st->st_nlink);
7353         __put_user(host_st->st_uid, &target_st->st_uid);
7354         __put_user(host_st->st_gid, &target_st->st_gid);
7355         __put_user(host_st->st_rdev, &target_st->st_rdev);
7356         __put_user(host_st->st_size, &target_st->st_size);
7357         __put_user(host_st->st_blksize, &target_st->st_blksize);
7358         __put_user(host_st->st_blocks, &target_st->st_blocks);
7359         __put_user(host_st->st_atime, &target_st->target_st_atime);
7360         __put_user(host_st->st_mtime, &target_st->target_st_mtime);
7361         __put_user(host_st->st_ctime, &target_st->target_st_ctime);
7362 #ifdef HAVE_STRUCT_STAT_ST_ATIM
7363         __put_user(host_st->st_atim.tv_nsec, &target_st->target_st_atime_nsec);
7364         __put_user(host_st->st_mtim.tv_nsec, &target_st->target_st_mtime_nsec);
7365         __put_user(host_st->st_ctim.tv_nsec, &target_st->target_st_ctime_nsec);
7366 #endif
7367         unlock_user_struct(target_st, target_addr, 1);
7368     } else
7369 #endif
7370     {
7371 #if defined(TARGET_HAS_STRUCT_STAT64)
7372         struct target_stat64 *target_st;
7373 #else
7374         struct target_stat *target_st;
7375 #endif
7376 
7377         if (!lock_user_struct(VERIFY_WRITE, target_st, target_addr, 0))
7378             return -TARGET_EFAULT;
7379         memset(target_st, 0, sizeof(*target_st));
7380         __put_user(host_st->st_dev, &target_st->st_dev);
7381         __put_user(host_st->st_ino, &target_st->st_ino);
7382 #ifdef TARGET_STAT64_HAS_BROKEN_ST_INO
7383         __put_user(host_st->st_ino, &target_st->__st_ino);
7384 #endif
7385         __put_user(host_st->st_mode, &target_st->st_mode);
7386         __put_user(host_st->st_nlink, &target_st->st_nlink);
7387         __put_user(host_st->st_uid, &target_st->st_uid);
7388         __put_user(host_st->st_gid, &target_st->st_gid);
7389         __put_user(host_st->st_rdev, &target_st->st_rdev);
7390         /* XXX: better use of kernel struct */
7391         __put_user(host_st->st_size, &target_st->st_size);
7392         __put_user(host_st->st_blksize, &target_st->st_blksize);
7393         __put_user(host_st->st_blocks, &target_st->st_blocks);
7394         __put_user(host_st->st_atime, &target_st->target_st_atime);
7395         __put_user(host_st->st_mtime, &target_st->target_st_mtime);
7396         __put_user(host_st->st_ctime, &target_st->target_st_ctime);
7397 #ifdef HAVE_STRUCT_STAT_ST_ATIM
7398         __put_user(host_st->st_atim.tv_nsec, &target_st->target_st_atime_nsec);
7399         __put_user(host_st->st_mtim.tv_nsec, &target_st->target_st_mtime_nsec);
7400         __put_user(host_st->st_ctim.tv_nsec, &target_st->target_st_ctime_nsec);
7401 #endif
7402         unlock_user_struct(target_st, target_addr, 1);
7403     }
7404 
7405     return 0;
7406 }
7407 #endif
7408 
7409 #if defined(TARGET_NR_statx) && defined(__NR_statx)
7410 static inline abi_long host_to_target_statx(struct target_statx *host_stx,
7411                                             abi_ulong target_addr)
7412 {
7413     struct target_statx *target_stx;
7414 
7415     if (!lock_user_struct(VERIFY_WRITE, target_stx, target_addr,  0)) {
7416         return -TARGET_EFAULT;
7417     }
7418     memset(target_stx, 0, sizeof(*target_stx));
7419 
7420     __put_user(host_stx->stx_mask, &target_stx->stx_mask);
7421     __put_user(host_stx->stx_blksize, &target_stx->stx_blksize);
7422     __put_user(host_stx->stx_attributes, &target_stx->stx_attributes);
7423     __put_user(host_stx->stx_nlink, &target_stx->stx_nlink);
7424     __put_user(host_stx->stx_uid, &target_stx->stx_uid);
7425     __put_user(host_stx->stx_gid, &target_stx->stx_gid);
7426     __put_user(host_stx->stx_mode, &target_stx->stx_mode);
7427     __put_user(host_stx->stx_ino, &target_stx->stx_ino);
7428     __put_user(host_stx->stx_size, &target_stx->stx_size);
7429     __put_user(host_stx->stx_blocks, &target_stx->stx_blocks);
7430     __put_user(host_stx->stx_attributes_mask, &target_stx->stx_attributes_mask);
7431     __put_user(host_stx->stx_atime.tv_sec, &target_stx->stx_atime.tv_sec);
7432     __put_user(host_stx->stx_atime.tv_nsec, &target_stx->stx_atime.tv_nsec);
7433     __put_user(host_stx->stx_btime.tv_sec, &target_stx->stx_btime.tv_sec);
7434     __put_user(host_stx->stx_btime.tv_nsec, &target_stx->stx_btime.tv_nsec);
7435     __put_user(host_stx->stx_ctime.tv_sec, &target_stx->stx_ctime.tv_sec);
7436     __put_user(host_stx->stx_ctime.tv_nsec, &target_stx->stx_ctime.tv_nsec);
7437     __put_user(host_stx->stx_mtime.tv_sec, &target_stx->stx_mtime.tv_sec);
7438     __put_user(host_stx->stx_mtime.tv_nsec, &target_stx->stx_mtime.tv_nsec);
7439     __put_user(host_stx->stx_rdev_major, &target_stx->stx_rdev_major);
7440     __put_user(host_stx->stx_rdev_minor, &target_stx->stx_rdev_minor);
7441     __put_user(host_stx->stx_dev_major, &target_stx->stx_dev_major);
7442     __put_user(host_stx->stx_dev_minor, &target_stx->stx_dev_minor);
7443 
7444     unlock_user_struct(target_stx, target_addr, 1);
7445 
7446     return 0;
7447 }
7448 #endif
7449 
7450 static int do_sys_futex(int *uaddr, int op, int val,
7451                          const struct timespec *timeout, int *uaddr2,
7452                          int val3)
7453 {
7454 #if HOST_LONG_BITS == 64
7455 #if defined(__NR_futex)
7456     /* always a 64-bit time_t, it doesn't define _time64 version  */
7457     return sys_futex(uaddr, op, val, timeout, uaddr2, val3);
7458 
7459 #endif
7460 #else /* HOST_LONG_BITS == 64 */
7461 #if defined(__NR_futex_time64)
7462     if (sizeof(timeout->tv_sec) == 8) {
7463         /* _time64 function on 32bit arch */
7464         return sys_futex_time64(uaddr, op, val, timeout, uaddr2, val3);
7465     }
7466 #endif
7467 #if defined(__NR_futex)
7468     /* old function on 32bit arch */
7469     return sys_futex(uaddr, op, val, timeout, uaddr2, val3);
7470 #endif
7471 #endif /* HOST_LONG_BITS == 64 */
7472     g_assert_not_reached();
7473 }
7474 
7475 static int do_safe_futex(int *uaddr, int op, int val,
7476                          const struct timespec *timeout, int *uaddr2,
7477                          int val3)
7478 {
7479 #if HOST_LONG_BITS == 64
7480 #if defined(__NR_futex)
7481     /* always a 64-bit time_t, it doesn't define _time64 version  */
7482     return get_errno(safe_futex(uaddr, op, val, timeout, uaddr2, val3));
7483 #endif
7484 #else /* HOST_LONG_BITS == 64 */
7485 #if defined(__NR_futex_time64)
7486     if (sizeof(timeout->tv_sec) == 8) {
7487         /* _time64 function on 32bit arch */
7488         return get_errno(safe_futex_time64(uaddr, op, val, timeout, uaddr2,
7489                                            val3));
7490     }
7491 #endif
7492 #if defined(__NR_futex)
7493     /* old function on 32bit arch */
7494     return get_errno(safe_futex(uaddr, op, val, timeout, uaddr2, val3));
7495 #endif
7496 #endif /* HOST_LONG_BITS == 64 */
7497     return -TARGET_ENOSYS;
7498 }
7499 
7500 /* ??? Using host futex calls even when target atomic operations
7501    are not really atomic probably breaks things.  However implementing
7502    futexes locally would make futexes shared between multiple processes
7503    tricky.  However they're probably useless because guest atomic
7504    operations won't work either.  */
7505 #if defined(TARGET_NR_futex)
7506 static int do_futex(CPUState *cpu, target_ulong uaddr, int op, int val,
7507                     target_ulong timeout, target_ulong uaddr2, int val3)
7508 {
7509     struct timespec ts, *pts;
7510     int base_op;
7511 
7512     /* ??? We assume FUTEX_* constants are the same on both host
7513        and target.  */
7514 #ifdef FUTEX_CMD_MASK
7515     base_op = op & FUTEX_CMD_MASK;
7516 #else
7517     base_op = op;
7518 #endif
7519     switch (base_op) {
7520     case FUTEX_WAIT:
7521     case FUTEX_WAIT_BITSET:
7522         if (timeout) {
7523             pts = &ts;
7524             target_to_host_timespec(pts, timeout);
7525         } else {
7526             pts = NULL;
7527         }
7528         return do_safe_futex(g2h(cpu, uaddr),
7529                              op, tswap32(val), pts, NULL, val3);
7530     case FUTEX_WAKE:
7531         return do_safe_futex(g2h(cpu, uaddr),
7532                              op, val, NULL, NULL, 0);
7533     case FUTEX_FD:
7534         return do_safe_futex(g2h(cpu, uaddr),
7535                              op, val, NULL, NULL, 0);
7536     case FUTEX_REQUEUE:
7537     case FUTEX_CMP_REQUEUE:
7538     case FUTEX_WAKE_OP:
7539         /* For FUTEX_REQUEUE, FUTEX_CMP_REQUEUE, and FUTEX_WAKE_OP, the
7540            TIMEOUT parameter is interpreted as a uint32_t by the kernel.
7541            But the prototype takes a `struct timespec *'; insert casts
7542            to satisfy the compiler.  We do not need to tswap TIMEOUT
7543            since it's not compared to guest memory.  */
7544         pts = (struct timespec *)(uintptr_t) timeout;
7545         return do_safe_futex(g2h(cpu, uaddr), op, val, pts, g2h(cpu, uaddr2),
7546                              (base_op == FUTEX_CMP_REQUEUE
7547                               ? tswap32(val3) : val3));
7548     default:
7549         return -TARGET_ENOSYS;
7550     }
7551 }
7552 #endif
7553 
7554 #if defined(TARGET_NR_futex_time64)
7555 static int do_futex_time64(CPUState *cpu, target_ulong uaddr, int op,
7556                            int val, target_ulong timeout,
7557                            target_ulong uaddr2, int val3)
7558 {
7559     struct timespec ts, *pts;
7560     int base_op;
7561 
7562     /* ??? We assume FUTEX_* constants are the same on both host
7563        and target.  */
7564 #ifdef FUTEX_CMD_MASK
7565     base_op = op & FUTEX_CMD_MASK;
7566 #else
7567     base_op = op;
7568 #endif
7569     switch (base_op) {
7570     case FUTEX_WAIT:
7571     case FUTEX_WAIT_BITSET:
7572         if (timeout) {
7573             pts = &ts;
7574             if (target_to_host_timespec64(pts, timeout)) {
7575                 return -TARGET_EFAULT;
7576             }
7577         } else {
7578             pts = NULL;
7579         }
7580         return do_safe_futex(g2h(cpu, uaddr), op,
7581                              tswap32(val), pts, NULL, val3);
7582     case FUTEX_WAKE:
7583         return do_safe_futex(g2h(cpu, uaddr), op, val, NULL, NULL, 0);
7584     case FUTEX_FD:
7585         return do_safe_futex(g2h(cpu, uaddr), op, val, NULL, NULL, 0);
7586     case FUTEX_REQUEUE:
7587     case FUTEX_CMP_REQUEUE:
7588     case FUTEX_WAKE_OP:
7589         /* For FUTEX_REQUEUE, FUTEX_CMP_REQUEUE, and FUTEX_WAKE_OP, the
7590            TIMEOUT parameter is interpreted as a uint32_t by the kernel.
7591            But the prototype takes a `struct timespec *'; insert casts
7592            to satisfy the compiler.  We do not need to tswap TIMEOUT
7593            since it's not compared to guest memory.  */
7594         pts = (struct timespec *)(uintptr_t) timeout;
7595         return do_safe_futex(g2h(cpu, uaddr), op, val, pts, g2h(cpu, uaddr2),
7596                              (base_op == FUTEX_CMP_REQUEUE
7597                               ? tswap32(val3) : val3));
7598     default:
7599         return -TARGET_ENOSYS;
7600     }
7601 }
7602 #endif
7603 
7604 #if defined(TARGET_NR_name_to_handle_at) && defined(CONFIG_OPEN_BY_HANDLE)
7605 static abi_long do_name_to_handle_at(abi_long dirfd, abi_long pathname,
7606                                      abi_long handle, abi_long mount_id,
7607                                      abi_long flags)
7608 {
7609     struct file_handle *target_fh;
7610     struct file_handle *fh;
7611     int mid = 0;
7612     abi_long ret;
7613     char *name;
7614     unsigned int size, total_size;
7615 
7616     if (get_user_s32(size, handle)) {
7617         return -TARGET_EFAULT;
7618     }
7619 
7620     name = lock_user_string(pathname);
7621     if (!name) {
7622         return -TARGET_EFAULT;
7623     }
7624 
7625     total_size = sizeof(struct file_handle) + size;
7626     target_fh = lock_user(VERIFY_WRITE, handle, total_size, 0);
7627     if (!target_fh) {
7628         unlock_user(name, pathname, 0);
7629         return -TARGET_EFAULT;
7630     }
7631 
7632     fh = g_malloc0(total_size);
7633     fh->handle_bytes = size;
7634 
7635     ret = get_errno(name_to_handle_at(dirfd, path(name), fh, &mid, flags));
7636     unlock_user(name, pathname, 0);
7637 
7638     /* man name_to_handle_at(2):
7639      * Other than the use of the handle_bytes field, the caller should treat
7640      * the file_handle structure as an opaque data type
7641      */
7642 
7643     memcpy(target_fh, fh, total_size);
7644     target_fh->handle_bytes = tswap32(fh->handle_bytes);
7645     target_fh->handle_type = tswap32(fh->handle_type);
7646     g_free(fh);
7647     unlock_user(target_fh, handle, total_size);
7648 
7649     if (put_user_s32(mid, mount_id)) {
7650         return -TARGET_EFAULT;
7651     }
7652 
7653     return ret;
7654 
7655 }
7656 #endif
7657 
7658 #if defined(TARGET_NR_open_by_handle_at) && defined(CONFIG_OPEN_BY_HANDLE)
7659 static abi_long do_open_by_handle_at(abi_long mount_fd, abi_long handle,
7660                                      abi_long flags)
7661 {
7662     struct file_handle *target_fh;
7663     struct file_handle *fh;
7664     unsigned int size, total_size;
7665     abi_long ret;
7666 
7667     if (get_user_s32(size, handle)) {
7668         return -TARGET_EFAULT;
7669     }
7670 
7671     total_size = sizeof(struct file_handle) + size;
7672     target_fh = lock_user(VERIFY_READ, handle, total_size, 1);
7673     if (!target_fh) {
7674         return -TARGET_EFAULT;
7675     }
7676 
7677     fh = g_memdup(target_fh, total_size);
7678     fh->handle_bytes = size;
7679     fh->handle_type = tswap32(target_fh->handle_type);
7680 
7681     ret = get_errno(open_by_handle_at(mount_fd, fh,
7682                     target_to_host_bitmask(flags, fcntl_flags_tbl)));
7683 
7684     g_free(fh);
7685 
7686     unlock_user(target_fh, handle, total_size);
7687 
7688     return ret;
7689 }
7690 #endif
7691 
7692 #if defined(TARGET_NR_signalfd) || defined(TARGET_NR_signalfd4)
7693 
7694 static abi_long do_signalfd4(int fd, abi_long mask, int flags)
7695 {
7696     int host_flags;
7697     target_sigset_t *target_mask;
7698     sigset_t host_mask;
7699     abi_long ret;
7700 
7701     if (flags & ~(TARGET_O_NONBLOCK_MASK | TARGET_O_CLOEXEC)) {
7702         return -TARGET_EINVAL;
7703     }
7704     if (!lock_user_struct(VERIFY_READ, target_mask, mask, 1)) {
7705         return -TARGET_EFAULT;
7706     }
7707 
7708     target_to_host_sigset(&host_mask, target_mask);
7709 
7710     host_flags = target_to_host_bitmask(flags, fcntl_flags_tbl);
7711 
7712     ret = get_errno(signalfd(fd, &host_mask, host_flags));
7713     if (ret >= 0) {
7714         fd_trans_register(ret, &target_signalfd_trans);
7715     }
7716 
7717     unlock_user_struct(target_mask, mask, 0);
7718 
7719     return ret;
7720 }
7721 #endif
7722 
7723 /* Map host to target signal numbers for the wait family of syscalls.
7724    Assume all other status bits are the same.  */
7725 int host_to_target_waitstatus(int status)
7726 {
7727     if (WIFSIGNALED(status)) {
7728         return host_to_target_signal(WTERMSIG(status)) | (status & ~0x7f);
7729     }
7730     if (WIFSTOPPED(status)) {
7731         return (host_to_target_signal(WSTOPSIG(status)) << 8)
7732                | (status & 0xff);
7733     }
7734     return status;
7735 }
7736 
7737 static int open_self_cmdline(void *cpu_env, int fd)
7738 {
7739     CPUState *cpu = env_cpu((CPUArchState *)cpu_env);
7740     struct linux_binprm *bprm = ((TaskState *)cpu->opaque)->bprm;
7741     int i;
7742 
7743     for (i = 0; i < bprm->argc; i++) {
7744         size_t len = strlen(bprm->argv[i]) + 1;
7745 
7746         if (write(fd, bprm->argv[i], len) != len) {
7747             return -1;
7748         }
7749     }
7750 
7751     return 0;
7752 }
7753 
7754 static int open_self_maps(void *cpu_env, int fd)
7755 {
7756     CPUState *cpu = env_cpu((CPUArchState *)cpu_env);
7757     TaskState *ts = cpu->opaque;
7758     GSList *map_info = read_self_maps();
7759     GSList *s;
7760     int count;
7761 
7762     for (s = map_info; s; s = g_slist_next(s)) {
7763         MapInfo *e = (MapInfo *) s->data;
7764 
7765         if (h2g_valid(e->start)) {
7766             unsigned long min = e->start;
7767             unsigned long max = e->end;
7768             int flags = page_get_flags(h2g(min));
7769             const char *path;
7770 
7771             max = h2g_valid(max - 1) ?
7772                 max : (uintptr_t) g2h_untagged(GUEST_ADDR_MAX) + 1;
7773 
7774             if (page_check_range(h2g(min), max - min, flags) == -1) {
7775                 continue;
7776             }
7777 
7778             if (h2g(min) == ts->info->stack_limit) {
7779                 path = "[stack]";
7780             } else {
7781                 path = e->path;
7782             }
7783 
7784             count = dprintf(fd, TARGET_ABI_FMT_ptr "-" TARGET_ABI_FMT_ptr
7785                             " %c%c%c%c %08" PRIx64 " %s %"PRId64,
7786                             h2g(min), h2g(max - 1) + 1,
7787                             (flags & PAGE_READ) ? 'r' : '-',
7788                             (flags & PAGE_WRITE_ORG) ? 'w' : '-',
7789                             (flags & PAGE_EXEC) ? 'x' : '-',
7790                             e->is_priv ? 'p' : '-',
7791                             (uint64_t) e->offset, e->dev, e->inode);
7792             if (path) {
7793                 dprintf(fd, "%*s%s\n", 73 - count, "", path);
7794             } else {
7795                 dprintf(fd, "\n");
7796             }
7797         }
7798     }
7799 
7800     free_self_maps(map_info);
7801 
7802 #ifdef TARGET_VSYSCALL_PAGE
7803     /*
7804      * We only support execution from the vsyscall page.
7805      * This is as if CONFIG_LEGACY_VSYSCALL_XONLY=y from v5.3.
7806      */
7807     count = dprintf(fd, TARGET_FMT_lx "-" TARGET_FMT_lx
7808                     " --xp 00000000 00:00 0",
7809                     TARGET_VSYSCALL_PAGE, TARGET_VSYSCALL_PAGE + TARGET_PAGE_SIZE);
7810     dprintf(fd, "%*s%s\n", 73 - count, "",  "[vsyscall]");
7811 #endif
7812 
7813     return 0;
7814 }
7815 
7816 static int open_self_stat(void *cpu_env, int fd)
7817 {
7818     CPUState *cpu = env_cpu((CPUArchState *)cpu_env);
7819     TaskState *ts = cpu->opaque;
7820     g_autoptr(GString) buf = g_string_new(NULL);
7821     int i;
7822 
7823     for (i = 0; i < 44; i++) {
7824         if (i == 0) {
7825             /* pid */
7826             g_string_printf(buf, FMT_pid " ", getpid());
7827         } else if (i == 1) {
7828             /* app name */
7829             gchar *bin = g_strrstr(ts->bprm->argv[0], "/");
7830             bin = bin ? bin + 1 : ts->bprm->argv[0];
7831             g_string_printf(buf, "(%.15s) ", bin);
7832         } else if (i == 3) {
7833             /* ppid */
7834             g_string_printf(buf, FMT_pid " ", getppid());
7835         } else if (i == 27) {
7836             /* stack bottom */
7837             g_string_printf(buf, TARGET_ABI_FMT_ld " ", ts->info->start_stack);
7838         } else {
7839             /* for the rest, there is MasterCard */
7840             g_string_printf(buf, "0%c", i == 43 ? '\n' : ' ');
7841         }
7842 
7843         if (write(fd, buf->str, buf->len) != buf->len) {
7844             return -1;
7845         }
7846     }
7847 
7848     return 0;
7849 }
7850 
7851 static int open_self_auxv(void *cpu_env, int fd)
7852 {
7853     CPUState *cpu = env_cpu((CPUArchState *)cpu_env);
7854     TaskState *ts = cpu->opaque;
7855     abi_ulong auxv = ts->info->saved_auxv;
7856     abi_ulong len = ts->info->auxv_len;
7857     char *ptr;
7858 
7859     /*
7860      * Auxiliary vector is stored in target process stack.
7861      * read in whole auxv vector and copy it to file
7862      */
7863     ptr = lock_user(VERIFY_READ, auxv, len, 0);
7864     if (ptr != NULL) {
7865         while (len > 0) {
7866             ssize_t r;
7867             r = write(fd, ptr, len);
7868             if (r <= 0) {
7869                 break;
7870             }
7871             len -= r;
7872             ptr += r;
7873         }
7874         lseek(fd, 0, SEEK_SET);
7875         unlock_user(ptr, auxv, len);
7876     }
7877 
7878     return 0;
7879 }
7880 
7881 static int is_proc_myself(const char *filename, const char *entry)
7882 {
7883     if (!strncmp(filename, "/proc/", strlen("/proc/"))) {
7884         filename += strlen("/proc/");
7885         if (!strncmp(filename, "self/", strlen("self/"))) {
7886             filename += strlen("self/");
7887         } else if (*filename >= '1' && *filename <= '9') {
7888             char myself[80];
7889             snprintf(myself, sizeof(myself), "%d/", getpid());
7890             if (!strncmp(filename, myself, strlen(myself))) {
7891                 filename += strlen(myself);
7892             } else {
7893                 return 0;
7894             }
7895         } else {
7896             return 0;
7897         }
7898         if (!strcmp(filename, entry)) {
7899             return 1;
7900         }
7901     }
7902     return 0;
7903 }
7904 
7905 #if defined(HOST_WORDS_BIGENDIAN) != defined(TARGET_WORDS_BIGENDIAN) || \
7906     defined(TARGET_SPARC) || defined(TARGET_M68K) || defined(TARGET_HPPA)
7907 static int is_proc(const char *filename, const char *entry)
7908 {
7909     return strcmp(filename, entry) == 0;
7910 }
7911 #endif
7912 
7913 #if defined(HOST_WORDS_BIGENDIAN) != defined(TARGET_WORDS_BIGENDIAN)
7914 static int open_net_route(void *cpu_env, int fd)
7915 {
7916     FILE *fp;
7917     char *line = NULL;
7918     size_t len = 0;
7919     ssize_t read;
7920 
7921     fp = fopen("/proc/net/route", "r");
7922     if (fp == NULL) {
7923         return -1;
7924     }
7925 
7926     /* read header */
7927 
7928     read = getline(&line, &len, fp);
7929     dprintf(fd, "%s", line);
7930 
7931     /* read routes */
7932 
7933     while ((read = getline(&line, &len, fp)) != -1) {
7934         char iface[16];
7935         uint32_t dest, gw, mask;
7936         unsigned int flags, refcnt, use, metric, mtu, window, irtt;
7937         int fields;
7938 
7939         fields = sscanf(line,
7940                         "%s\t%08x\t%08x\t%04x\t%d\t%d\t%d\t%08x\t%d\t%u\t%u\n",
7941                         iface, &dest, &gw, &flags, &refcnt, &use, &metric,
7942                         &mask, &mtu, &window, &irtt);
7943         if (fields != 11) {
7944             continue;
7945         }
7946         dprintf(fd, "%s\t%08x\t%08x\t%04x\t%d\t%d\t%d\t%08x\t%d\t%u\t%u\n",
7947                 iface, tswap32(dest), tswap32(gw), flags, refcnt, use,
7948                 metric, tswap32(mask), mtu, window, irtt);
7949     }
7950 
7951     free(line);
7952     fclose(fp);
7953 
7954     return 0;
7955 }
7956 #endif
7957 
7958 #if defined(TARGET_SPARC)
7959 static int open_cpuinfo(void *cpu_env, int fd)
7960 {
7961     dprintf(fd, "type\t\t: sun4u\n");
7962     return 0;
7963 }
7964 #endif
7965 
7966 #if defined(TARGET_HPPA)
7967 static int open_cpuinfo(void *cpu_env, int fd)
7968 {
7969     dprintf(fd, "cpu family\t: PA-RISC 1.1e\n");
7970     dprintf(fd, "cpu\t\t: PA7300LC (PCX-L2)\n");
7971     dprintf(fd, "capabilities\t: os32\n");
7972     dprintf(fd, "model\t\t: 9000/778/B160L\n");
7973     dprintf(fd, "model name\t: Merlin L2 160 QEMU (9000/778/B160L)\n");
7974     return 0;
7975 }
7976 #endif
7977 
7978 #if defined(TARGET_M68K)
7979 static int open_hardware(void *cpu_env, int fd)
7980 {
7981     dprintf(fd, "Model:\t\tqemu-m68k\n");
7982     return 0;
7983 }
7984 #endif
7985 
7986 static int do_openat(void *cpu_env, int dirfd, const char *pathname, int flags, mode_t mode)
7987 {
7988     struct fake_open {
7989         const char *filename;
7990         int (*fill)(void *cpu_env, int fd);
7991         int (*cmp)(const char *s1, const char *s2);
7992     };
7993     const struct fake_open *fake_open;
7994     static const struct fake_open fakes[] = {
7995         { "maps", open_self_maps, is_proc_myself },
7996         { "stat", open_self_stat, is_proc_myself },
7997         { "auxv", open_self_auxv, is_proc_myself },
7998         { "cmdline", open_self_cmdline, is_proc_myself },
7999 #if defined(HOST_WORDS_BIGENDIAN) != defined(TARGET_WORDS_BIGENDIAN)
8000         { "/proc/net/route", open_net_route, is_proc },
8001 #endif
8002 #if defined(TARGET_SPARC) || defined(TARGET_HPPA)
8003         { "/proc/cpuinfo", open_cpuinfo, is_proc },
8004 #endif
8005 #if defined(TARGET_M68K)
8006         { "/proc/hardware", open_hardware, is_proc },
8007 #endif
8008         { NULL, NULL, NULL }
8009     };
8010 
8011     if (is_proc_myself(pathname, "exe")) {
8012         int execfd = qemu_getauxval(AT_EXECFD);
8013         return execfd ? execfd : safe_openat(dirfd, exec_path, flags, mode);
8014     }
8015 
8016     for (fake_open = fakes; fake_open->filename; fake_open++) {
8017         if (fake_open->cmp(pathname, fake_open->filename)) {
8018             break;
8019         }
8020     }
8021 
8022     if (fake_open->filename) {
8023         const char *tmpdir;
8024         char filename[PATH_MAX];
8025         int fd, r;
8026 
8027         /* create temporary file to map stat to */
8028         tmpdir = getenv("TMPDIR");
8029         if (!tmpdir)
8030             tmpdir = "/tmp";
8031         snprintf(filename, sizeof(filename), "%s/qemu-open.XXXXXX", tmpdir);
8032         fd = mkstemp(filename);
8033         if (fd < 0) {
8034             return fd;
8035         }
8036         unlink(filename);
8037 
8038         if ((r = fake_open->fill(cpu_env, fd))) {
8039             int e = errno;
8040             close(fd);
8041             errno = e;
8042             return r;
8043         }
8044         lseek(fd, 0, SEEK_SET);
8045 
8046         return fd;
8047     }
8048 
8049     return safe_openat(dirfd, path(pathname), flags, mode);
8050 }
8051 
8052 #define TIMER_MAGIC 0x0caf0000
8053 #define TIMER_MAGIC_MASK 0xffff0000
8054 
8055 /* Convert QEMU provided timer ID back to internal 16bit index format */
8056 static target_timer_t get_timer_id(abi_long arg)
8057 {
8058     target_timer_t timerid = arg;
8059 
8060     if ((timerid & TIMER_MAGIC_MASK) != TIMER_MAGIC) {
8061         return -TARGET_EINVAL;
8062     }
8063 
8064     timerid &= 0xffff;
8065 
8066     if (timerid >= ARRAY_SIZE(g_posix_timers)) {
8067         return -TARGET_EINVAL;
8068     }
8069 
8070     return timerid;
8071 }
8072 
8073 static int target_to_host_cpu_mask(unsigned long *host_mask,
8074                                    size_t host_size,
8075                                    abi_ulong target_addr,
8076                                    size_t target_size)
8077 {
8078     unsigned target_bits = sizeof(abi_ulong) * 8;
8079     unsigned host_bits = sizeof(*host_mask) * 8;
8080     abi_ulong *target_mask;
8081     unsigned i, j;
8082 
8083     assert(host_size >= target_size);
8084 
8085     target_mask = lock_user(VERIFY_READ, target_addr, target_size, 1);
8086     if (!target_mask) {
8087         return -TARGET_EFAULT;
8088     }
8089     memset(host_mask, 0, host_size);
8090 
8091     for (i = 0 ; i < target_size / sizeof(abi_ulong); i++) {
8092         unsigned bit = i * target_bits;
8093         abi_ulong val;
8094 
8095         __get_user(val, &target_mask[i]);
8096         for (j = 0; j < target_bits; j++, bit++) {
8097             if (val & (1UL << j)) {
8098                 host_mask[bit / host_bits] |= 1UL << (bit % host_bits);
8099             }
8100         }
8101     }
8102 
8103     unlock_user(target_mask, target_addr, 0);
8104     return 0;
8105 }
8106 
8107 static int host_to_target_cpu_mask(const unsigned long *host_mask,
8108                                    size_t host_size,
8109                                    abi_ulong target_addr,
8110                                    size_t target_size)
8111 {
8112     unsigned target_bits = sizeof(abi_ulong) * 8;
8113     unsigned host_bits = sizeof(*host_mask) * 8;
8114     abi_ulong *target_mask;
8115     unsigned i, j;
8116 
8117     assert(host_size >= target_size);
8118 
8119     target_mask = lock_user(VERIFY_WRITE, target_addr, target_size, 0);
8120     if (!target_mask) {
8121         return -TARGET_EFAULT;
8122     }
8123 
8124     for (i = 0 ; i < target_size / sizeof(abi_ulong); i++) {
8125         unsigned bit = i * target_bits;
8126         abi_ulong val = 0;
8127 
8128         for (j = 0; j < target_bits; j++, bit++) {
8129             if (host_mask[bit / host_bits] & (1UL << (bit % host_bits))) {
8130                 val |= 1UL << j;
8131             }
8132         }
8133         __put_user(val, &target_mask[i]);
8134     }
8135 
8136     unlock_user(target_mask, target_addr, target_size);
8137     return 0;
8138 }
8139 
8140 #ifdef TARGET_NR_getdents
8141 static int do_getdents(abi_long arg1, abi_long arg2, abi_long arg3)
8142 {
8143     int ret;
8144 
8145 #ifdef EMULATE_GETDENTS_WITH_GETDENTS
8146 # if TARGET_ABI_BITS == 32 && HOST_LONG_BITS == 64
8147     struct target_dirent *target_dirp;
8148     struct linux_dirent *dirp;
8149     abi_long count = arg3;
8150 
8151     dirp = g_try_malloc(count);
8152     if (!dirp) {
8153         return -TARGET_ENOMEM;
8154     }
8155 
8156     ret = get_errno(sys_getdents(arg1, dirp, count));
8157     if (!is_error(ret)) {
8158         struct linux_dirent *de;
8159         struct target_dirent *tde;
8160         int len = ret;
8161         int reclen, treclen;
8162         int count1, tnamelen;
8163 
8164         count1 = 0;
8165         de = dirp;
8166         target_dirp = lock_user(VERIFY_WRITE, arg2, count, 0);
8167         if (!target_dirp) {
8168             return -TARGET_EFAULT;
8169         }
8170         tde = target_dirp;
8171         while (len > 0) {
8172             reclen = de->d_reclen;
8173             tnamelen = reclen - offsetof(struct linux_dirent, d_name);
8174             assert(tnamelen >= 0);
8175             treclen = tnamelen + offsetof(struct target_dirent, d_name);
8176             assert(count1 + treclen <= count);
8177             tde->d_reclen = tswap16(treclen);
8178             tde->d_ino = tswapal(de->d_ino);
8179             tde->d_off = tswapal(de->d_off);
8180             memcpy(tde->d_name, de->d_name, tnamelen);
8181             de = (struct linux_dirent *)((char *)de + reclen);
8182             len -= reclen;
8183             tde = (struct target_dirent *)((char *)tde + treclen);
8184             count1 += treclen;
8185         }
8186         ret = count1;
8187         unlock_user(target_dirp, arg2, ret);
8188     }
8189     g_free(dirp);
8190 # else
8191     struct linux_dirent *dirp;
8192     abi_long count = arg3;
8193 
8194     dirp = lock_user(VERIFY_WRITE, arg2, count, 0);
8195     if (!dirp) {
8196         return -TARGET_EFAULT;
8197     }
8198     ret = get_errno(sys_getdents(arg1, dirp, count));
8199     if (!is_error(ret)) {
8200         struct linux_dirent *de;
8201         int len = ret;
8202         int reclen;
8203         de = dirp;
8204         while (len > 0) {
8205             reclen = de->d_reclen;
8206             if (reclen > len) {
8207                 break;
8208             }
8209             de->d_reclen = tswap16(reclen);
8210             tswapls(&de->d_ino);
8211             tswapls(&de->d_off);
8212             de = (struct linux_dirent *)((char *)de + reclen);
8213             len -= reclen;
8214         }
8215     }
8216     unlock_user(dirp, arg2, ret);
8217 # endif
8218 #else
8219     /* Implement getdents in terms of getdents64 */
8220     struct linux_dirent64 *dirp;
8221     abi_long count = arg3;
8222 
8223     dirp = lock_user(VERIFY_WRITE, arg2, count, 0);
8224     if (!dirp) {
8225         return -TARGET_EFAULT;
8226     }
8227     ret = get_errno(sys_getdents64(arg1, dirp, count));
8228     if (!is_error(ret)) {
8229         /*
8230          * Convert the dirent64 structs to target dirent.  We do this
8231          * in-place, since we can guarantee that a target_dirent is no
8232          * larger than a dirent64; however this means we have to be
8233          * careful to read everything before writing in the new format.
8234          */
8235         struct linux_dirent64 *de;
8236         struct target_dirent *tde;
8237         int len = ret;
8238         int tlen = 0;
8239 
8240         de = dirp;
8241         tde = (struct target_dirent *)dirp;
8242         while (len > 0) {
8243             int namelen, treclen;
8244             int reclen = de->d_reclen;
8245             uint64_t ino = de->d_ino;
8246             int64_t off = de->d_off;
8247             uint8_t type = de->d_type;
8248 
8249             namelen = strlen(de->d_name);
8250             treclen = offsetof(struct target_dirent, d_name) + namelen + 2;
8251             treclen = QEMU_ALIGN_UP(treclen, sizeof(abi_long));
8252 
8253             memmove(tde->d_name, de->d_name, namelen + 1);
8254             tde->d_ino = tswapal(ino);
8255             tde->d_off = tswapal(off);
8256             tde->d_reclen = tswap16(treclen);
8257             /*
8258              * The target_dirent type is in what was formerly a padding
8259              * byte at the end of the structure:
8260              */
8261             *(((char *)tde) + treclen - 1) = type;
8262 
8263             de = (struct linux_dirent64 *)((char *)de + reclen);
8264             tde = (struct target_dirent *)((char *)tde + treclen);
8265             len -= reclen;
8266             tlen += treclen;
8267         }
8268         ret = tlen;
8269     }
8270     unlock_user(dirp, arg2, ret);
8271 #endif
8272     return ret;
8273 }
8274 #endif /* TARGET_NR_getdents */
8275 
8276 #if defined(TARGET_NR_getdents64) && defined(__NR_getdents64)
8277 static int do_getdents64(abi_long arg1, abi_long arg2, abi_long arg3)
8278 {
8279     struct linux_dirent64 *dirp;
8280     abi_long count = arg3;
8281     int ret;
8282 
8283     dirp = lock_user(VERIFY_WRITE, arg2, count, 0);
8284     if (!dirp) {
8285         return -TARGET_EFAULT;
8286     }
8287     ret = get_errno(sys_getdents64(arg1, dirp, count));
8288     if (!is_error(ret)) {
8289         struct linux_dirent64 *de;
8290         int len = ret;
8291         int reclen;
8292         de = dirp;
8293         while (len > 0) {
8294             reclen = de->d_reclen;
8295             if (reclen > len) {
8296                 break;
8297             }
8298             de->d_reclen = tswap16(reclen);
8299             tswap64s((uint64_t *)&de->d_ino);
8300             tswap64s((uint64_t *)&de->d_off);
8301             de = (struct linux_dirent64 *)((char *)de + reclen);
8302             len -= reclen;
8303         }
8304     }
8305     unlock_user(dirp, arg2, ret);
8306     return ret;
8307 }
8308 #endif /* TARGET_NR_getdents64 */
8309 
8310 #if defined(TARGET_NR_pivot_root) && defined(__NR_pivot_root)
8311 _syscall2(int, pivot_root, const char *, new_root, const char *, put_old)
8312 #endif
8313 
8314 /* This is an internal helper for do_syscall so that it is easier
8315  * to have a single return point, so that actions, such as logging
8316  * of syscall results, can be performed.
8317  * All errnos that do_syscall() returns must be -TARGET_<errcode>.
8318  */
8319 static abi_long do_syscall1(void *cpu_env, int num, abi_long arg1,
8320                             abi_long arg2, abi_long arg3, abi_long arg4,
8321                             abi_long arg5, abi_long arg6, abi_long arg7,
8322                             abi_long arg8)
8323 {
8324     CPUState *cpu = env_cpu(cpu_env);
8325     abi_long ret;
8326 #if defined(TARGET_NR_stat) || defined(TARGET_NR_stat64) \
8327     || defined(TARGET_NR_lstat) || defined(TARGET_NR_lstat64) \
8328     || defined(TARGET_NR_fstat) || defined(TARGET_NR_fstat64) \
8329     || defined(TARGET_NR_statx)
8330     struct stat st;
8331 #endif
8332 #if defined(TARGET_NR_statfs) || defined(TARGET_NR_statfs64) \
8333     || defined(TARGET_NR_fstatfs)
8334     struct statfs stfs;
8335 #endif
8336     void *p;
8337 
8338     switch(num) {
8339     case TARGET_NR_exit:
8340         /* In old applications this may be used to implement _exit(2).
8341            However in threaded applications it is used for thread termination,
8342            and _exit_group is used for application termination.
8343            Do thread termination if we have more then one thread.  */
8344 
8345         if (block_signals()) {
8346             return -TARGET_ERESTARTSYS;
8347         }
8348 
8349         pthread_mutex_lock(&clone_lock);
8350 
8351         if (CPU_NEXT(first_cpu)) {
8352             TaskState *ts = cpu->opaque;
8353 
8354             object_property_set_bool(OBJECT(cpu), "realized", false, NULL);
8355             object_unref(OBJECT(cpu));
8356             /*
8357              * At this point the CPU should be unrealized and removed
8358              * from cpu lists. We can clean-up the rest of the thread
8359              * data without the lock held.
8360              */
8361 
8362             pthread_mutex_unlock(&clone_lock);
8363 
8364             if (ts->child_tidptr) {
8365                 put_user_u32(0, ts->child_tidptr);
8366                 do_sys_futex(g2h(cpu, ts->child_tidptr),
8367                              FUTEX_WAKE, INT_MAX, NULL, NULL, 0);
8368             }
8369             thread_cpu = NULL;
8370             g_free(ts);
8371             rcu_unregister_thread();
8372             pthread_exit(NULL);
8373         }
8374 
8375         pthread_mutex_unlock(&clone_lock);
8376         preexit_cleanup(cpu_env, arg1);
8377         _exit(arg1);
8378         return 0; /* avoid warning */
8379     case TARGET_NR_read:
8380         if (arg2 == 0 && arg3 == 0) {
8381             return get_errno(safe_read(arg1, 0, 0));
8382         } else {
8383             if (!(p = lock_user(VERIFY_WRITE, arg2, arg3, 0)))
8384                 return -TARGET_EFAULT;
8385             ret = get_errno(safe_read(arg1, p, arg3));
8386             if (ret >= 0 &&
8387                 fd_trans_host_to_target_data(arg1)) {
8388                 ret = fd_trans_host_to_target_data(arg1)(p, ret);
8389             }
8390             unlock_user(p, arg2, ret);
8391         }
8392         return ret;
8393     case TARGET_NR_write:
8394         if (arg2 == 0 && arg3 == 0) {
8395             return get_errno(safe_write(arg1, 0, 0));
8396         }
8397         if (!(p = lock_user(VERIFY_READ, arg2, arg3, 1)))
8398             return -TARGET_EFAULT;
8399         if (fd_trans_target_to_host_data(arg1)) {
8400             void *copy = g_malloc(arg3);
8401             memcpy(copy, p, arg3);
8402             ret = fd_trans_target_to_host_data(arg1)(copy, arg3);
8403             if (ret >= 0) {
8404                 ret = get_errno(safe_write(arg1, copy, ret));
8405             }
8406             g_free(copy);
8407         } else {
8408             ret = get_errno(safe_write(arg1, p, arg3));
8409         }
8410         unlock_user(p, arg2, 0);
8411         return ret;
8412 
8413 #ifdef TARGET_NR_open
8414     case TARGET_NR_open:
8415         if (!(p = lock_user_string(arg1)))
8416             return -TARGET_EFAULT;
8417         ret = get_errno(do_openat(cpu_env, AT_FDCWD, p,
8418                                   target_to_host_bitmask(arg2, fcntl_flags_tbl),
8419                                   arg3));
8420         fd_trans_unregister(ret);
8421         unlock_user(p, arg1, 0);
8422         return ret;
8423 #endif
8424     case TARGET_NR_openat:
8425         if (!(p = lock_user_string(arg2)))
8426             return -TARGET_EFAULT;
8427         ret = get_errno(do_openat(cpu_env, arg1, p,
8428                                   target_to_host_bitmask(arg3, fcntl_flags_tbl),
8429                                   arg4));
8430         fd_trans_unregister(ret);
8431         unlock_user(p, arg2, 0);
8432         return ret;
8433 #if defined(TARGET_NR_name_to_handle_at) && defined(CONFIG_OPEN_BY_HANDLE)
8434     case TARGET_NR_name_to_handle_at:
8435         ret = do_name_to_handle_at(arg1, arg2, arg3, arg4, arg5);
8436         return ret;
8437 #endif
8438 #if defined(TARGET_NR_open_by_handle_at) && defined(CONFIG_OPEN_BY_HANDLE)
8439     case TARGET_NR_open_by_handle_at:
8440         ret = do_open_by_handle_at(arg1, arg2, arg3);
8441         fd_trans_unregister(ret);
8442         return ret;
8443 #endif
8444     case TARGET_NR_close:
8445         fd_trans_unregister(arg1);
8446         return get_errno(close(arg1));
8447 
8448     case TARGET_NR_brk:
8449         return do_brk(arg1);
8450 #ifdef TARGET_NR_fork
8451     case TARGET_NR_fork:
8452         return get_errno(do_fork(cpu_env, TARGET_SIGCHLD, 0, 0, 0, 0));
8453 #endif
8454 #ifdef TARGET_NR_waitpid
8455     case TARGET_NR_waitpid:
8456         {
8457             int status;
8458             ret = get_errno(safe_wait4(arg1, &status, arg3, 0));
8459             if (!is_error(ret) && arg2 && ret
8460                 && put_user_s32(host_to_target_waitstatus(status), arg2))
8461                 return -TARGET_EFAULT;
8462         }
8463         return ret;
8464 #endif
8465 #ifdef TARGET_NR_waitid
8466     case TARGET_NR_waitid:
8467         {
8468             siginfo_t info;
8469             info.si_pid = 0;
8470             ret = get_errno(safe_waitid(arg1, arg2, &info, arg4, NULL));
8471             if (!is_error(ret) && arg3 && info.si_pid != 0) {
8472                 if (!(p = lock_user(VERIFY_WRITE, arg3, sizeof(target_siginfo_t), 0)))
8473                     return -TARGET_EFAULT;
8474                 host_to_target_siginfo(p, &info);
8475                 unlock_user(p, arg3, sizeof(target_siginfo_t));
8476             }
8477         }
8478         return ret;
8479 #endif
8480 #ifdef TARGET_NR_creat /* not on alpha */
8481     case TARGET_NR_creat:
8482         if (!(p = lock_user_string(arg1)))
8483             return -TARGET_EFAULT;
8484         ret = get_errno(creat(p, arg2));
8485         fd_trans_unregister(ret);
8486         unlock_user(p, arg1, 0);
8487         return ret;
8488 #endif
8489 #ifdef TARGET_NR_link
8490     case TARGET_NR_link:
8491         {
8492             void * p2;
8493             p = lock_user_string(arg1);
8494             p2 = lock_user_string(arg2);
8495             if (!p || !p2)
8496                 ret = -TARGET_EFAULT;
8497             else
8498                 ret = get_errno(link(p, p2));
8499             unlock_user(p2, arg2, 0);
8500             unlock_user(p, arg1, 0);
8501         }
8502         return ret;
8503 #endif
8504 #if defined(TARGET_NR_linkat)
8505     case TARGET_NR_linkat:
8506         {
8507             void * p2 = NULL;
8508             if (!arg2 || !arg4)
8509                 return -TARGET_EFAULT;
8510             p  = lock_user_string(arg2);
8511             p2 = lock_user_string(arg4);
8512             if (!p || !p2)
8513                 ret = -TARGET_EFAULT;
8514             else
8515                 ret = get_errno(linkat(arg1, p, arg3, p2, arg5));
8516             unlock_user(p, arg2, 0);
8517             unlock_user(p2, arg4, 0);
8518         }
8519         return ret;
8520 #endif
8521 #ifdef TARGET_NR_unlink
8522     case TARGET_NR_unlink:
8523         if (!(p = lock_user_string(arg1)))
8524             return -TARGET_EFAULT;
8525         ret = get_errno(unlink(p));
8526         unlock_user(p, arg1, 0);
8527         return ret;
8528 #endif
8529 #if defined(TARGET_NR_unlinkat)
8530     case TARGET_NR_unlinkat:
8531         if (!(p = lock_user_string(arg2)))
8532             return -TARGET_EFAULT;
8533         ret = get_errno(unlinkat(arg1, p, arg3));
8534         unlock_user(p, arg2, 0);
8535         return ret;
8536 #endif
8537     case TARGET_NR_execve:
8538         {
8539             char **argp, **envp;
8540             int argc, envc;
8541             abi_ulong gp;
8542             abi_ulong guest_argp;
8543             abi_ulong guest_envp;
8544             abi_ulong addr;
8545             char **q;
8546 
8547             argc = 0;
8548             guest_argp = arg2;
8549             for (gp = guest_argp; gp; gp += sizeof(abi_ulong)) {
8550                 if (get_user_ual(addr, gp))
8551                     return -TARGET_EFAULT;
8552                 if (!addr)
8553                     break;
8554                 argc++;
8555             }
8556             envc = 0;
8557             guest_envp = arg3;
8558             for (gp = guest_envp; gp; gp += sizeof(abi_ulong)) {
8559                 if (get_user_ual(addr, gp))
8560                     return -TARGET_EFAULT;
8561                 if (!addr)
8562                     break;
8563                 envc++;
8564             }
8565 
8566             argp = g_new0(char *, argc + 1);
8567             envp = g_new0(char *, envc + 1);
8568 
8569             for (gp = guest_argp, q = argp; gp;
8570                   gp += sizeof(abi_ulong), q++) {
8571                 if (get_user_ual(addr, gp))
8572                     goto execve_efault;
8573                 if (!addr)
8574                     break;
8575                 if (!(*q = lock_user_string(addr)))
8576                     goto execve_efault;
8577             }
8578             *q = NULL;
8579 
8580             for (gp = guest_envp, q = envp; gp;
8581                   gp += sizeof(abi_ulong), q++) {
8582                 if (get_user_ual(addr, gp))
8583                     goto execve_efault;
8584                 if (!addr)
8585                     break;
8586                 if (!(*q = lock_user_string(addr)))
8587                     goto execve_efault;
8588             }
8589             *q = NULL;
8590 
8591             if (!(p = lock_user_string(arg1)))
8592                 goto execve_efault;
8593             /* Although execve() is not an interruptible syscall it is
8594              * a special case where we must use the safe_syscall wrapper:
8595              * if we allow a signal to happen before we make the host
8596              * syscall then we will 'lose' it, because at the point of
8597              * execve the process leaves QEMU's control. So we use the
8598              * safe syscall wrapper to ensure that we either take the
8599              * signal as a guest signal, or else it does not happen
8600              * before the execve completes and makes it the other
8601              * program's problem.
8602              */
8603             ret = get_errno(safe_execve(p, argp, envp));
8604             unlock_user(p, arg1, 0);
8605 
8606             goto execve_end;
8607 
8608         execve_efault:
8609             ret = -TARGET_EFAULT;
8610 
8611         execve_end:
8612             for (gp = guest_argp, q = argp; *q;
8613                   gp += sizeof(abi_ulong), q++) {
8614                 if (get_user_ual(addr, gp)
8615                     || !addr)
8616                     break;
8617                 unlock_user(*q, addr, 0);
8618             }
8619             for (gp = guest_envp, q = envp; *q;
8620                   gp += sizeof(abi_ulong), q++) {
8621                 if (get_user_ual(addr, gp)
8622                     || !addr)
8623                     break;
8624                 unlock_user(*q, addr, 0);
8625             }
8626 
8627             g_free(argp);
8628             g_free(envp);
8629         }
8630         return ret;
8631     case TARGET_NR_chdir:
8632         if (!(p = lock_user_string(arg1)))
8633             return -TARGET_EFAULT;
8634         ret = get_errno(chdir(p));
8635         unlock_user(p, arg1, 0);
8636         return ret;
8637 #ifdef TARGET_NR_time
8638     case TARGET_NR_time:
8639         {
8640             time_t host_time;
8641             ret = get_errno(time(&host_time));
8642             if (!is_error(ret)
8643                 && arg1
8644                 && put_user_sal(host_time, arg1))
8645                 return -TARGET_EFAULT;
8646         }
8647         return ret;
8648 #endif
8649 #ifdef TARGET_NR_mknod
8650     case TARGET_NR_mknod:
8651         if (!(p = lock_user_string(arg1)))
8652             return -TARGET_EFAULT;
8653         ret = get_errno(mknod(p, arg2, arg3));
8654         unlock_user(p, arg1, 0);
8655         return ret;
8656 #endif
8657 #if defined(TARGET_NR_mknodat)
8658     case TARGET_NR_mknodat:
8659         if (!(p = lock_user_string(arg2)))
8660             return -TARGET_EFAULT;
8661         ret = get_errno(mknodat(arg1, p, arg3, arg4));
8662         unlock_user(p, arg2, 0);
8663         return ret;
8664 #endif
8665 #ifdef TARGET_NR_chmod
8666     case TARGET_NR_chmod:
8667         if (!(p = lock_user_string(arg1)))
8668             return -TARGET_EFAULT;
8669         ret = get_errno(chmod(p, arg2));
8670         unlock_user(p, arg1, 0);
8671         return ret;
8672 #endif
8673 #ifdef TARGET_NR_lseek
8674     case TARGET_NR_lseek:
8675         return get_errno(lseek(arg1, arg2, arg3));
8676 #endif
8677 #if defined(TARGET_NR_getxpid) && defined(TARGET_ALPHA)
8678     /* Alpha specific */
8679     case TARGET_NR_getxpid:
8680         ((CPUAlphaState *)cpu_env)->ir[IR_A4] = getppid();
8681         return get_errno(getpid());
8682 #endif
8683 #ifdef TARGET_NR_getpid
8684     case TARGET_NR_getpid:
8685         return get_errno(getpid());
8686 #endif
8687     case TARGET_NR_mount:
8688         {
8689             /* need to look at the data field */
8690             void *p2, *p3;
8691 
8692             if (arg1) {
8693                 p = lock_user_string(arg1);
8694                 if (!p) {
8695                     return -TARGET_EFAULT;
8696                 }
8697             } else {
8698                 p = NULL;
8699             }
8700 
8701             p2 = lock_user_string(arg2);
8702             if (!p2) {
8703                 if (arg1) {
8704                     unlock_user(p, arg1, 0);
8705                 }
8706                 return -TARGET_EFAULT;
8707             }
8708 
8709             if (arg3) {
8710                 p3 = lock_user_string(arg3);
8711                 if (!p3) {
8712                     if (arg1) {
8713                         unlock_user(p, arg1, 0);
8714                     }
8715                     unlock_user(p2, arg2, 0);
8716                     return -TARGET_EFAULT;
8717                 }
8718             } else {
8719                 p3 = NULL;
8720             }
8721 
8722             /* FIXME - arg5 should be locked, but it isn't clear how to
8723              * do that since it's not guaranteed to be a NULL-terminated
8724              * string.
8725              */
8726             if (!arg5) {
8727                 ret = mount(p, p2, p3, (unsigned long)arg4, NULL);
8728             } else {
8729                 ret = mount(p, p2, p3, (unsigned long)arg4, g2h(cpu, arg5));
8730             }
8731             ret = get_errno(ret);
8732 
8733             if (arg1) {
8734                 unlock_user(p, arg1, 0);
8735             }
8736             unlock_user(p2, arg2, 0);
8737             if (arg3) {
8738                 unlock_user(p3, arg3, 0);
8739             }
8740         }
8741         return ret;
8742 #if defined(TARGET_NR_umount) || defined(TARGET_NR_oldumount)
8743 #if defined(TARGET_NR_umount)
8744     case TARGET_NR_umount:
8745 #endif
8746 #if defined(TARGET_NR_oldumount)
8747     case TARGET_NR_oldumount:
8748 #endif
8749         if (!(p = lock_user_string(arg1)))
8750             return -TARGET_EFAULT;
8751         ret = get_errno(umount(p));
8752         unlock_user(p, arg1, 0);
8753         return ret;
8754 #endif
8755 #ifdef TARGET_NR_stime /* not on alpha */
8756     case TARGET_NR_stime:
8757         {
8758             struct timespec ts;
8759             ts.tv_nsec = 0;
8760             if (get_user_sal(ts.tv_sec, arg1)) {
8761                 return -TARGET_EFAULT;
8762             }
8763             return get_errno(clock_settime(CLOCK_REALTIME, &ts));
8764         }
8765 #endif
8766 #ifdef TARGET_NR_alarm /* not on alpha */
8767     case TARGET_NR_alarm:
8768         return alarm(arg1);
8769 #endif
8770 #ifdef TARGET_NR_pause /* not on alpha */
8771     case TARGET_NR_pause:
8772         if (!block_signals()) {
8773             sigsuspend(&((TaskState *)cpu->opaque)->signal_mask);
8774         }
8775         return -TARGET_EINTR;
8776 #endif
8777 #ifdef TARGET_NR_utime
8778     case TARGET_NR_utime:
8779         {
8780             struct utimbuf tbuf, *host_tbuf;
8781             struct target_utimbuf *target_tbuf;
8782             if (arg2) {
8783                 if (!lock_user_struct(VERIFY_READ, target_tbuf, arg2, 1))
8784                     return -TARGET_EFAULT;
8785                 tbuf.actime = tswapal(target_tbuf->actime);
8786                 tbuf.modtime = tswapal(target_tbuf->modtime);
8787                 unlock_user_struct(target_tbuf, arg2, 0);
8788                 host_tbuf = &tbuf;
8789             } else {
8790                 host_tbuf = NULL;
8791             }
8792             if (!(p = lock_user_string(arg1)))
8793                 return -TARGET_EFAULT;
8794             ret = get_errno(utime(p, host_tbuf));
8795             unlock_user(p, arg1, 0);
8796         }
8797         return ret;
8798 #endif
8799 #ifdef TARGET_NR_utimes
8800     case TARGET_NR_utimes:
8801         {
8802             struct timeval *tvp, tv[2];
8803             if (arg2) {
8804                 if (copy_from_user_timeval(&tv[0], arg2)
8805                     || copy_from_user_timeval(&tv[1],
8806                                               arg2 + sizeof(struct target_timeval)))
8807                     return -TARGET_EFAULT;
8808                 tvp = tv;
8809             } else {
8810                 tvp = NULL;
8811             }
8812             if (!(p = lock_user_string(arg1)))
8813                 return -TARGET_EFAULT;
8814             ret = get_errno(utimes(p, tvp));
8815             unlock_user(p, arg1, 0);
8816         }
8817         return ret;
8818 #endif
8819 #if defined(TARGET_NR_futimesat)
8820     case TARGET_NR_futimesat:
8821         {
8822             struct timeval *tvp, tv[2];
8823             if (arg3) {
8824                 if (copy_from_user_timeval(&tv[0], arg3)
8825                     || copy_from_user_timeval(&tv[1],
8826                                               arg3 + sizeof(struct target_timeval)))
8827                     return -TARGET_EFAULT;
8828                 tvp = tv;
8829             } else {
8830                 tvp = NULL;
8831             }
8832             if (!(p = lock_user_string(arg2))) {
8833                 return -TARGET_EFAULT;
8834             }
8835             ret = get_errno(futimesat(arg1, path(p), tvp));
8836             unlock_user(p, arg2, 0);
8837         }
8838         return ret;
8839 #endif
8840 #ifdef TARGET_NR_access
8841     case TARGET_NR_access:
8842         if (!(p = lock_user_string(arg1))) {
8843             return -TARGET_EFAULT;
8844         }
8845         ret = get_errno(access(path(p), arg2));
8846         unlock_user(p, arg1, 0);
8847         return ret;
8848 #endif
8849 #if defined(TARGET_NR_faccessat) && defined(__NR_faccessat)
8850     case TARGET_NR_faccessat:
8851         if (!(p = lock_user_string(arg2))) {
8852             return -TARGET_EFAULT;
8853         }
8854         ret = get_errno(faccessat(arg1, p, arg3, 0));
8855         unlock_user(p, arg2, 0);
8856         return ret;
8857 #endif
8858 #ifdef TARGET_NR_nice /* not on alpha */
8859     case TARGET_NR_nice:
8860         return get_errno(nice(arg1));
8861 #endif
8862     case TARGET_NR_sync:
8863         sync();
8864         return 0;
8865 #if defined(TARGET_NR_syncfs) && defined(CONFIG_SYNCFS)
8866     case TARGET_NR_syncfs:
8867         return get_errno(syncfs(arg1));
8868 #endif
8869     case TARGET_NR_kill:
8870         return get_errno(safe_kill(arg1, target_to_host_signal(arg2)));
8871 #ifdef TARGET_NR_rename
8872     case TARGET_NR_rename:
8873         {
8874             void *p2;
8875             p = lock_user_string(arg1);
8876             p2 = lock_user_string(arg2);
8877             if (!p || !p2)
8878                 ret = -TARGET_EFAULT;
8879             else
8880                 ret = get_errno(rename(p, p2));
8881             unlock_user(p2, arg2, 0);
8882             unlock_user(p, arg1, 0);
8883         }
8884         return ret;
8885 #endif
8886 #if defined(TARGET_NR_renameat)
8887     case TARGET_NR_renameat:
8888         {
8889             void *p2;
8890             p  = lock_user_string(arg2);
8891             p2 = lock_user_string(arg4);
8892             if (!p || !p2)
8893                 ret = -TARGET_EFAULT;
8894             else
8895                 ret = get_errno(renameat(arg1, p, arg3, p2));
8896             unlock_user(p2, arg4, 0);
8897             unlock_user(p, arg2, 0);
8898         }
8899         return ret;
8900 #endif
8901 #if defined(TARGET_NR_renameat2)
8902     case TARGET_NR_renameat2:
8903         {
8904             void *p2;
8905             p  = lock_user_string(arg2);
8906             p2 = lock_user_string(arg4);
8907             if (!p || !p2) {
8908                 ret = -TARGET_EFAULT;
8909             } else {
8910                 ret = get_errno(sys_renameat2(arg1, p, arg3, p2, arg5));
8911             }
8912             unlock_user(p2, arg4, 0);
8913             unlock_user(p, arg2, 0);
8914         }
8915         return ret;
8916 #endif
8917 #ifdef TARGET_NR_mkdir
8918     case TARGET_NR_mkdir:
8919         if (!(p = lock_user_string(arg1)))
8920             return -TARGET_EFAULT;
8921         ret = get_errno(mkdir(p, arg2));
8922         unlock_user(p, arg1, 0);
8923         return ret;
8924 #endif
8925 #if defined(TARGET_NR_mkdirat)
8926     case TARGET_NR_mkdirat:
8927         if (!(p = lock_user_string(arg2)))
8928             return -TARGET_EFAULT;
8929         ret = get_errno(mkdirat(arg1, p, arg3));
8930         unlock_user(p, arg2, 0);
8931         return ret;
8932 #endif
8933 #ifdef TARGET_NR_rmdir
8934     case TARGET_NR_rmdir:
8935         if (!(p = lock_user_string(arg1)))
8936             return -TARGET_EFAULT;
8937         ret = get_errno(rmdir(p));
8938         unlock_user(p, arg1, 0);
8939         return ret;
8940 #endif
8941     case TARGET_NR_dup:
8942         ret = get_errno(dup(arg1));
8943         if (ret >= 0) {
8944             fd_trans_dup(arg1, ret);
8945         }
8946         return ret;
8947 #ifdef TARGET_NR_pipe
8948     case TARGET_NR_pipe:
8949         return do_pipe(cpu_env, arg1, 0, 0);
8950 #endif
8951 #ifdef TARGET_NR_pipe2
8952     case TARGET_NR_pipe2:
8953         return do_pipe(cpu_env, arg1,
8954                        target_to_host_bitmask(arg2, fcntl_flags_tbl), 1);
8955 #endif
8956     case TARGET_NR_times:
8957         {
8958             struct target_tms *tmsp;
8959             struct tms tms;
8960             ret = get_errno(times(&tms));
8961             if (arg1) {
8962                 tmsp = lock_user(VERIFY_WRITE, arg1, sizeof(struct target_tms), 0);
8963                 if (!tmsp)
8964                     return -TARGET_EFAULT;
8965                 tmsp->tms_utime = tswapal(host_to_target_clock_t(tms.tms_utime));
8966                 tmsp->tms_stime = tswapal(host_to_target_clock_t(tms.tms_stime));
8967                 tmsp->tms_cutime = tswapal(host_to_target_clock_t(tms.tms_cutime));
8968                 tmsp->tms_cstime = tswapal(host_to_target_clock_t(tms.tms_cstime));
8969             }
8970             if (!is_error(ret))
8971                 ret = host_to_target_clock_t(ret);
8972         }
8973         return ret;
8974     case TARGET_NR_acct:
8975         if (arg1 == 0) {
8976             ret = get_errno(acct(NULL));
8977         } else {
8978             if (!(p = lock_user_string(arg1))) {
8979                 return -TARGET_EFAULT;
8980             }
8981             ret = get_errno(acct(path(p)));
8982             unlock_user(p, arg1, 0);
8983         }
8984         return ret;
8985 #ifdef TARGET_NR_umount2
8986     case TARGET_NR_umount2:
8987         if (!(p = lock_user_string(arg1)))
8988             return -TARGET_EFAULT;
8989         ret = get_errno(umount2(p, arg2));
8990         unlock_user(p, arg1, 0);
8991         return ret;
8992 #endif
8993     case TARGET_NR_ioctl:
8994         return do_ioctl(arg1, arg2, arg3);
8995 #ifdef TARGET_NR_fcntl
8996     case TARGET_NR_fcntl:
8997         return do_fcntl(arg1, arg2, arg3);
8998 #endif
8999     case TARGET_NR_setpgid:
9000         return get_errno(setpgid(arg1, arg2));
9001     case TARGET_NR_umask:
9002         return get_errno(umask(arg1));
9003     case TARGET_NR_chroot:
9004         if (!(p = lock_user_string(arg1)))
9005             return -TARGET_EFAULT;
9006         ret = get_errno(chroot(p));
9007         unlock_user(p, arg1, 0);
9008         return ret;
9009 #ifdef TARGET_NR_dup2
9010     case TARGET_NR_dup2:
9011         ret = get_errno(dup2(arg1, arg2));
9012         if (ret >= 0) {
9013             fd_trans_dup(arg1, arg2);
9014         }
9015         return ret;
9016 #endif
9017 #if defined(CONFIG_DUP3) && defined(TARGET_NR_dup3)
9018     case TARGET_NR_dup3:
9019     {
9020         int host_flags;
9021 
9022         if ((arg3 & ~TARGET_O_CLOEXEC) != 0) {
9023             return -EINVAL;
9024         }
9025         host_flags = target_to_host_bitmask(arg3, fcntl_flags_tbl);
9026         ret = get_errno(dup3(arg1, arg2, host_flags));
9027         if (ret >= 0) {
9028             fd_trans_dup(arg1, arg2);
9029         }
9030         return ret;
9031     }
9032 #endif
9033 #ifdef TARGET_NR_getppid /* not on alpha */
9034     case TARGET_NR_getppid:
9035         return get_errno(getppid());
9036 #endif
9037 #ifdef TARGET_NR_getpgrp
9038     case TARGET_NR_getpgrp:
9039         return get_errno(getpgrp());
9040 #endif
9041     case TARGET_NR_setsid:
9042         return get_errno(setsid());
9043 #ifdef TARGET_NR_sigaction
9044     case TARGET_NR_sigaction:
9045         {
9046 #if defined(TARGET_MIPS)
9047 	    struct target_sigaction act, oact, *pact, *old_act;
9048 
9049 	    if (arg2) {
9050                 if (!lock_user_struct(VERIFY_READ, old_act, arg2, 1))
9051                     return -TARGET_EFAULT;
9052 		act._sa_handler = old_act->_sa_handler;
9053 		target_siginitset(&act.sa_mask, old_act->sa_mask.sig[0]);
9054 		act.sa_flags = old_act->sa_flags;
9055 		unlock_user_struct(old_act, arg2, 0);
9056 		pact = &act;
9057 	    } else {
9058 		pact = NULL;
9059 	    }
9060 
9061         ret = get_errno(do_sigaction(arg1, pact, &oact, 0));
9062 
9063 	    if (!is_error(ret) && arg3) {
9064                 if (!lock_user_struct(VERIFY_WRITE, old_act, arg3, 0))
9065                     return -TARGET_EFAULT;
9066 		old_act->_sa_handler = oact._sa_handler;
9067 		old_act->sa_flags = oact.sa_flags;
9068 		old_act->sa_mask.sig[0] = oact.sa_mask.sig[0];
9069 		old_act->sa_mask.sig[1] = 0;
9070 		old_act->sa_mask.sig[2] = 0;
9071 		old_act->sa_mask.sig[3] = 0;
9072 		unlock_user_struct(old_act, arg3, 1);
9073 	    }
9074 #else
9075             struct target_old_sigaction *old_act;
9076             struct target_sigaction act, oact, *pact;
9077             if (arg2) {
9078                 if (!lock_user_struct(VERIFY_READ, old_act, arg2, 1))
9079                     return -TARGET_EFAULT;
9080                 act._sa_handler = old_act->_sa_handler;
9081                 target_siginitset(&act.sa_mask, old_act->sa_mask);
9082                 act.sa_flags = old_act->sa_flags;
9083 #ifdef TARGET_ARCH_HAS_SA_RESTORER
9084                 act.sa_restorer = old_act->sa_restorer;
9085 #endif
9086                 unlock_user_struct(old_act, arg2, 0);
9087                 pact = &act;
9088             } else {
9089                 pact = NULL;
9090             }
9091             ret = get_errno(do_sigaction(arg1, pact, &oact, 0));
9092             if (!is_error(ret) && arg3) {
9093                 if (!lock_user_struct(VERIFY_WRITE, old_act, arg3, 0))
9094                     return -TARGET_EFAULT;
9095                 old_act->_sa_handler = oact._sa_handler;
9096                 old_act->sa_mask = oact.sa_mask.sig[0];
9097                 old_act->sa_flags = oact.sa_flags;
9098 #ifdef TARGET_ARCH_HAS_SA_RESTORER
9099                 old_act->sa_restorer = oact.sa_restorer;
9100 #endif
9101                 unlock_user_struct(old_act, arg3, 1);
9102             }
9103 #endif
9104         }
9105         return ret;
9106 #endif
9107     case TARGET_NR_rt_sigaction:
9108         {
9109             /*
9110              * For Alpha and SPARC this is a 5 argument syscall, with
9111              * a 'restorer' parameter which must be copied into the
9112              * sa_restorer field of the sigaction struct.
9113              * For Alpha that 'restorer' is arg5; for SPARC it is arg4,
9114              * and arg5 is the sigsetsize.
9115              */
9116 #if defined(TARGET_ALPHA)
9117             target_ulong sigsetsize = arg4;
9118             target_ulong restorer = arg5;
9119 #elif defined(TARGET_SPARC)
9120             target_ulong restorer = arg4;
9121             target_ulong sigsetsize = arg5;
9122 #else
9123             target_ulong sigsetsize = arg4;
9124             target_ulong restorer = 0;
9125 #endif
9126             struct target_sigaction *act = NULL;
9127             struct target_sigaction *oact = NULL;
9128 
9129             if (sigsetsize != sizeof(target_sigset_t)) {
9130                 return -TARGET_EINVAL;
9131             }
9132             if (arg2 && !lock_user_struct(VERIFY_READ, act, arg2, 1)) {
9133                 return -TARGET_EFAULT;
9134             }
9135             if (arg3 && !lock_user_struct(VERIFY_WRITE, oact, arg3, 0)) {
9136                 ret = -TARGET_EFAULT;
9137             } else {
9138                 ret = get_errno(do_sigaction(arg1, act, oact, restorer));
9139                 if (oact) {
9140                     unlock_user_struct(oact, arg3, 1);
9141                 }
9142             }
9143             if (act) {
9144                 unlock_user_struct(act, arg2, 0);
9145             }
9146         }
9147         return ret;
9148 #ifdef TARGET_NR_sgetmask /* not on alpha */
9149     case TARGET_NR_sgetmask:
9150         {
9151             sigset_t cur_set;
9152             abi_ulong target_set;
9153             ret = do_sigprocmask(0, NULL, &cur_set);
9154             if (!ret) {
9155                 host_to_target_old_sigset(&target_set, &cur_set);
9156                 ret = target_set;
9157             }
9158         }
9159         return ret;
9160 #endif
9161 #ifdef TARGET_NR_ssetmask /* not on alpha */
9162     case TARGET_NR_ssetmask:
9163         {
9164             sigset_t set, oset;
9165             abi_ulong target_set = arg1;
9166             target_to_host_old_sigset(&set, &target_set);
9167             ret = do_sigprocmask(SIG_SETMASK, &set, &oset);
9168             if (!ret) {
9169                 host_to_target_old_sigset(&target_set, &oset);
9170                 ret = target_set;
9171             }
9172         }
9173         return ret;
9174 #endif
9175 #ifdef TARGET_NR_sigprocmask
9176     case TARGET_NR_sigprocmask:
9177         {
9178 #if defined(TARGET_ALPHA)
9179             sigset_t set, oldset;
9180             abi_ulong mask;
9181             int how;
9182 
9183             switch (arg1) {
9184             case TARGET_SIG_BLOCK:
9185                 how = SIG_BLOCK;
9186                 break;
9187             case TARGET_SIG_UNBLOCK:
9188                 how = SIG_UNBLOCK;
9189                 break;
9190             case TARGET_SIG_SETMASK:
9191                 how = SIG_SETMASK;
9192                 break;
9193             default:
9194                 return -TARGET_EINVAL;
9195             }
9196             mask = arg2;
9197             target_to_host_old_sigset(&set, &mask);
9198 
9199             ret = do_sigprocmask(how, &set, &oldset);
9200             if (!is_error(ret)) {
9201                 host_to_target_old_sigset(&mask, &oldset);
9202                 ret = mask;
9203                 ((CPUAlphaState *)cpu_env)->ir[IR_V0] = 0; /* force no error */
9204             }
9205 #else
9206             sigset_t set, oldset, *set_ptr;
9207             int how;
9208 
9209             if (arg2) {
9210                 switch (arg1) {
9211                 case TARGET_SIG_BLOCK:
9212                     how = SIG_BLOCK;
9213                     break;
9214                 case TARGET_SIG_UNBLOCK:
9215                     how = SIG_UNBLOCK;
9216                     break;
9217                 case TARGET_SIG_SETMASK:
9218                     how = SIG_SETMASK;
9219                     break;
9220                 default:
9221                     return -TARGET_EINVAL;
9222                 }
9223                 if (!(p = lock_user(VERIFY_READ, arg2, sizeof(target_sigset_t), 1)))
9224                     return -TARGET_EFAULT;
9225                 target_to_host_old_sigset(&set, p);
9226                 unlock_user(p, arg2, 0);
9227                 set_ptr = &set;
9228             } else {
9229                 how = 0;
9230                 set_ptr = NULL;
9231             }
9232             ret = do_sigprocmask(how, set_ptr, &oldset);
9233             if (!is_error(ret) && arg3) {
9234                 if (!(p = lock_user(VERIFY_WRITE, arg3, sizeof(target_sigset_t), 0)))
9235                     return -TARGET_EFAULT;
9236                 host_to_target_old_sigset(p, &oldset);
9237                 unlock_user(p, arg3, sizeof(target_sigset_t));
9238             }
9239 #endif
9240         }
9241         return ret;
9242 #endif
9243     case TARGET_NR_rt_sigprocmask:
9244         {
9245             int how = arg1;
9246             sigset_t set, oldset, *set_ptr;
9247 
9248             if (arg4 != sizeof(target_sigset_t)) {
9249                 return -TARGET_EINVAL;
9250             }
9251 
9252             if (arg2) {
9253                 switch(how) {
9254                 case TARGET_SIG_BLOCK:
9255                     how = SIG_BLOCK;
9256                     break;
9257                 case TARGET_SIG_UNBLOCK:
9258                     how = SIG_UNBLOCK;
9259                     break;
9260                 case TARGET_SIG_SETMASK:
9261                     how = SIG_SETMASK;
9262                     break;
9263                 default:
9264                     return -TARGET_EINVAL;
9265                 }
9266                 if (!(p = lock_user(VERIFY_READ, arg2, sizeof(target_sigset_t), 1)))
9267                     return -TARGET_EFAULT;
9268                 target_to_host_sigset(&set, p);
9269                 unlock_user(p, arg2, 0);
9270                 set_ptr = &set;
9271             } else {
9272                 how = 0;
9273                 set_ptr = NULL;
9274             }
9275             ret = do_sigprocmask(how, set_ptr, &oldset);
9276             if (!is_error(ret) && arg3) {
9277                 if (!(p = lock_user(VERIFY_WRITE, arg3, sizeof(target_sigset_t), 0)))
9278                     return -TARGET_EFAULT;
9279                 host_to_target_sigset(p, &oldset);
9280                 unlock_user(p, arg3, sizeof(target_sigset_t));
9281             }
9282         }
9283         return ret;
9284 #ifdef TARGET_NR_sigpending
9285     case TARGET_NR_sigpending:
9286         {
9287             sigset_t set;
9288             ret = get_errno(sigpending(&set));
9289             if (!is_error(ret)) {
9290                 if (!(p = lock_user(VERIFY_WRITE, arg1, sizeof(target_sigset_t), 0)))
9291                     return -TARGET_EFAULT;
9292                 host_to_target_old_sigset(p, &set);
9293                 unlock_user(p, arg1, sizeof(target_sigset_t));
9294             }
9295         }
9296         return ret;
9297 #endif
9298     case TARGET_NR_rt_sigpending:
9299         {
9300             sigset_t set;
9301 
9302             /* Yes, this check is >, not != like most. We follow the kernel's
9303              * logic and it does it like this because it implements
9304              * NR_sigpending through the same code path, and in that case
9305              * the old_sigset_t is smaller in size.
9306              */
9307             if (arg2 > sizeof(target_sigset_t)) {
9308                 return -TARGET_EINVAL;
9309             }
9310 
9311             ret = get_errno(sigpending(&set));
9312             if (!is_error(ret)) {
9313                 if (!(p = lock_user(VERIFY_WRITE, arg1, sizeof(target_sigset_t), 0)))
9314                     return -TARGET_EFAULT;
9315                 host_to_target_sigset(p, &set);
9316                 unlock_user(p, arg1, sizeof(target_sigset_t));
9317             }
9318         }
9319         return ret;
9320 #ifdef TARGET_NR_sigsuspend
9321     case TARGET_NR_sigsuspend:
9322         {
9323             TaskState *ts = cpu->opaque;
9324 #if defined(TARGET_ALPHA)
9325             abi_ulong mask = arg1;
9326             target_to_host_old_sigset(&ts->sigsuspend_mask, &mask);
9327 #else
9328             if (!(p = lock_user(VERIFY_READ, arg1, sizeof(target_sigset_t), 1)))
9329                 return -TARGET_EFAULT;
9330             target_to_host_old_sigset(&ts->sigsuspend_mask, p);
9331             unlock_user(p, arg1, 0);
9332 #endif
9333             ret = get_errno(safe_rt_sigsuspend(&ts->sigsuspend_mask,
9334                                                SIGSET_T_SIZE));
9335             if (ret != -TARGET_ERESTARTSYS) {
9336                 ts->in_sigsuspend = 1;
9337             }
9338         }
9339         return ret;
9340 #endif
9341     case TARGET_NR_rt_sigsuspend:
9342         {
9343             TaskState *ts = cpu->opaque;
9344 
9345             if (arg2 != sizeof(target_sigset_t)) {
9346                 return -TARGET_EINVAL;
9347             }
9348             if (!(p = lock_user(VERIFY_READ, arg1, sizeof(target_sigset_t), 1)))
9349                 return -TARGET_EFAULT;
9350             target_to_host_sigset(&ts->sigsuspend_mask, p);
9351             unlock_user(p, arg1, 0);
9352             ret = get_errno(safe_rt_sigsuspend(&ts->sigsuspend_mask,
9353                                                SIGSET_T_SIZE));
9354             if (ret != -TARGET_ERESTARTSYS) {
9355                 ts->in_sigsuspend = 1;
9356             }
9357         }
9358         return ret;
9359 #ifdef TARGET_NR_rt_sigtimedwait
9360     case TARGET_NR_rt_sigtimedwait:
9361         {
9362             sigset_t set;
9363             struct timespec uts, *puts;
9364             siginfo_t uinfo;
9365 
9366             if (arg4 != sizeof(target_sigset_t)) {
9367                 return -TARGET_EINVAL;
9368             }
9369 
9370             if (!(p = lock_user(VERIFY_READ, arg1, sizeof(target_sigset_t), 1)))
9371                 return -TARGET_EFAULT;
9372             target_to_host_sigset(&set, p);
9373             unlock_user(p, arg1, 0);
9374             if (arg3) {
9375                 puts = &uts;
9376                 if (target_to_host_timespec(puts, arg3)) {
9377                     return -TARGET_EFAULT;
9378                 }
9379             } else {
9380                 puts = NULL;
9381             }
9382             ret = get_errno(safe_rt_sigtimedwait(&set, &uinfo, puts,
9383                                                  SIGSET_T_SIZE));
9384             if (!is_error(ret)) {
9385                 if (arg2) {
9386                     p = lock_user(VERIFY_WRITE, arg2, sizeof(target_siginfo_t),
9387                                   0);
9388                     if (!p) {
9389                         return -TARGET_EFAULT;
9390                     }
9391                     host_to_target_siginfo(p, &uinfo);
9392                     unlock_user(p, arg2, sizeof(target_siginfo_t));
9393                 }
9394                 ret = host_to_target_signal(ret);
9395             }
9396         }
9397         return ret;
9398 #endif
9399 #ifdef TARGET_NR_rt_sigtimedwait_time64
9400     case TARGET_NR_rt_sigtimedwait_time64:
9401         {
9402             sigset_t set;
9403             struct timespec uts, *puts;
9404             siginfo_t uinfo;
9405 
9406             if (arg4 != sizeof(target_sigset_t)) {
9407                 return -TARGET_EINVAL;
9408             }
9409 
9410             p = lock_user(VERIFY_READ, arg1, sizeof(target_sigset_t), 1);
9411             if (!p) {
9412                 return -TARGET_EFAULT;
9413             }
9414             target_to_host_sigset(&set, p);
9415             unlock_user(p, arg1, 0);
9416             if (arg3) {
9417                 puts = &uts;
9418                 if (target_to_host_timespec64(puts, arg3)) {
9419                     return -TARGET_EFAULT;
9420                 }
9421             } else {
9422                 puts = NULL;
9423             }
9424             ret = get_errno(safe_rt_sigtimedwait(&set, &uinfo, puts,
9425                                                  SIGSET_T_SIZE));
9426             if (!is_error(ret)) {
9427                 if (arg2) {
9428                     p = lock_user(VERIFY_WRITE, arg2,
9429                                   sizeof(target_siginfo_t), 0);
9430                     if (!p) {
9431                         return -TARGET_EFAULT;
9432                     }
9433                     host_to_target_siginfo(p, &uinfo);
9434                     unlock_user(p, arg2, sizeof(target_siginfo_t));
9435                 }
9436                 ret = host_to_target_signal(ret);
9437             }
9438         }
9439         return ret;
9440 #endif
9441     case TARGET_NR_rt_sigqueueinfo:
9442         {
9443             siginfo_t uinfo;
9444 
9445             p = lock_user(VERIFY_READ, arg3, sizeof(target_siginfo_t), 1);
9446             if (!p) {
9447                 return -TARGET_EFAULT;
9448             }
9449             target_to_host_siginfo(&uinfo, p);
9450             unlock_user(p, arg3, 0);
9451             ret = get_errno(sys_rt_sigqueueinfo(arg1, arg2, &uinfo));
9452         }
9453         return ret;
9454     case TARGET_NR_rt_tgsigqueueinfo:
9455         {
9456             siginfo_t uinfo;
9457 
9458             p = lock_user(VERIFY_READ, arg4, sizeof(target_siginfo_t), 1);
9459             if (!p) {
9460                 return -TARGET_EFAULT;
9461             }
9462             target_to_host_siginfo(&uinfo, p);
9463             unlock_user(p, arg4, 0);
9464             ret = get_errno(sys_rt_tgsigqueueinfo(arg1, arg2, arg3, &uinfo));
9465         }
9466         return ret;
9467 #ifdef TARGET_NR_sigreturn
9468     case TARGET_NR_sigreturn:
9469         if (block_signals()) {
9470             return -TARGET_ERESTARTSYS;
9471         }
9472         return do_sigreturn(cpu_env);
9473 #endif
9474     case TARGET_NR_rt_sigreturn:
9475         if (block_signals()) {
9476             return -TARGET_ERESTARTSYS;
9477         }
9478         return do_rt_sigreturn(cpu_env);
9479     case TARGET_NR_sethostname:
9480         if (!(p = lock_user_string(arg1)))
9481             return -TARGET_EFAULT;
9482         ret = get_errno(sethostname(p, arg2));
9483         unlock_user(p, arg1, 0);
9484         return ret;
9485 #ifdef TARGET_NR_setrlimit
9486     case TARGET_NR_setrlimit:
9487         {
9488             int resource = target_to_host_resource(arg1);
9489             struct target_rlimit *target_rlim;
9490             struct rlimit rlim;
9491             if (!lock_user_struct(VERIFY_READ, target_rlim, arg2, 1))
9492                 return -TARGET_EFAULT;
9493             rlim.rlim_cur = target_to_host_rlim(target_rlim->rlim_cur);
9494             rlim.rlim_max = target_to_host_rlim(target_rlim->rlim_max);
9495             unlock_user_struct(target_rlim, arg2, 0);
9496             /*
9497              * If we just passed through resource limit settings for memory then
9498              * they would also apply to QEMU's own allocations, and QEMU will
9499              * crash or hang or die if its allocations fail. Ideally we would
9500              * track the guest allocations in QEMU and apply the limits ourselves.
9501              * For now, just tell the guest the call succeeded but don't actually
9502              * limit anything.
9503              */
9504             if (resource != RLIMIT_AS &&
9505                 resource != RLIMIT_DATA &&
9506                 resource != RLIMIT_STACK) {
9507                 return get_errno(setrlimit(resource, &rlim));
9508             } else {
9509                 return 0;
9510             }
9511         }
9512 #endif
9513 #ifdef TARGET_NR_getrlimit
9514     case TARGET_NR_getrlimit:
9515         {
9516             int resource = target_to_host_resource(arg1);
9517             struct target_rlimit *target_rlim;
9518             struct rlimit rlim;
9519 
9520             ret = get_errno(getrlimit(resource, &rlim));
9521             if (!is_error(ret)) {
9522                 if (!lock_user_struct(VERIFY_WRITE, target_rlim, arg2, 0))
9523                     return -TARGET_EFAULT;
9524                 target_rlim->rlim_cur = host_to_target_rlim(rlim.rlim_cur);
9525                 target_rlim->rlim_max = host_to_target_rlim(rlim.rlim_max);
9526                 unlock_user_struct(target_rlim, arg2, 1);
9527             }
9528         }
9529         return ret;
9530 #endif
9531     case TARGET_NR_getrusage:
9532         {
9533             struct rusage rusage;
9534             ret = get_errno(getrusage(arg1, &rusage));
9535             if (!is_error(ret)) {
9536                 ret = host_to_target_rusage(arg2, &rusage);
9537             }
9538         }
9539         return ret;
9540 #if defined(TARGET_NR_gettimeofday)
9541     case TARGET_NR_gettimeofday:
9542         {
9543             struct timeval tv;
9544             struct timezone tz;
9545 
9546             ret = get_errno(gettimeofday(&tv, &tz));
9547             if (!is_error(ret)) {
9548                 if (arg1 && copy_to_user_timeval(arg1, &tv)) {
9549                     return -TARGET_EFAULT;
9550                 }
9551                 if (arg2 && copy_to_user_timezone(arg2, &tz)) {
9552                     return -TARGET_EFAULT;
9553                 }
9554             }
9555         }
9556         return ret;
9557 #endif
9558 #if defined(TARGET_NR_settimeofday)
9559     case TARGET_NR_settimeofday:
9560         {
9561             struct timeval tv, *ptv = NULL;
9562             struct timezone tz, *ptz = NULL;
9563 
9564             if (arg1) {
9565                 if (copy_from_user_timeval(&tv, arg1)) {
9566                     return -TARGET_EFAULT;
9567                 }
9568                 ptv = &tv;
9569             }
9570 
9571             if (arg2) {
9572                 if (copy_from_user_timezone(&tz, arg2)) {
9573                     return -TARGET_EFAULT;
9574                 }
9575                 ptz = &tz;
9576             }
9577 
9578             return get_errno(settimeofday(ptv, ptz));
9579         }
9580 #endif
9581 #if defined(TARGET_NR_select)
9582     case TARGET_NR_select:
9583 #if defined(TARGET_WANT_NI_OLD_SELECT)
9584         /* some architectures used to have old_select here
9585          * but now ENOSYS it.
9586          */
9587         ret = -TARGET_ENOSYS;
9588 #elif defined(TARGET_WANT_OLD_SYS_SELECT)
9589         ret = do_old_select(arg1);
9590 #else
9591         ret = do_select(arg1, arg2, arg3, arg4, arg5);
9592 #endif
9593         return ret;
9594 #endif
9595 #ifdef TARGET_NR_pselect6
9596     case TARGET_NR_pselect6:
9597         return do_pselect6(arg1, arg2, arg3, arg4, arg5, arg6, false);
9598 #endif
9599 #ifdef TARGET_NR_pselect6_time64
9600     case TARGET_NR_pselect6_time64:
9601         return do_pselect6(arg1, arg2, arg3, arg4, arg5, arg6, true);
9602 #endif
9603 #ifdef TARGET_NR_symlink
9604     case TARGET_NR_symlink:
9605         {
9606             void *p2;
9607             p = lock_user_string(arg1);
9608             p2 = lock_user_string(arg2);
9609             if (!p || !p2)
9610                 ret = -TARGET_EFAULT;
9611             else
9612                 ret = get_errno(symlink(p, p2));
9613             unlock_user(p2, arg2, 0);
9614             unlock_user(p, arg1, 0);
9615         }
9616         return ret;
9617 #endif
9618 #if defined(TARGET_NR_symlinkat)
9619     case TARGET_NR_symlinkat:
9620         {
9621             void *p2;
9622             p  = lock_user_string(arg1);
9623             p2 = lock_user_string(arg3);
9624             if (!p || !p2)
9625                 ret = -TARGET_EFAULT;
9626             else
9627                 ret = get_errno(symlinkat(p, arg2, p2));
9628             unlock_user(p2, arg3, 0);
9629             unlock_user(p, arg1, 0);
9630         }
9631         return ret;
9632 #endif
9633 #ifdef TARGET_NR_readlink
9634     case TARGET_NR_readlink:
9635         {
9636             void *p2;
9637             p = lock_user_string(arg1);
9638             p2 = lock_user(VERIFY_WRITE, arg2, arg3, 0);
9639             if (!p || !p2) {
9640                 ret = -TARGET_EFAULT;
9641             } else if (!arg3) {
9642                 /* Short circuit this for the magic exe check. */
9643                 ret = -TARGET_EINVAL;
9644             } else if (is_proc_myself((const char *)p, "exe")) {
9645                 char real[PATH_MAX], *temp;
9646                 temp = realpath(exec_path, real);
9647                 /* Return value is # of bytes that we wrote to the buffer. */
9648                 if (temp == NULL) {
9649                     ret = get_errno(-1);
9650                 } else {
9651                     /* Don't worry about sign mismatch as earlier mapping
9652                      * logic would have thrown a bad address error. */
9653                     ret = MIN(strlen(real), arg3);
9654                     /* We cannot NUL terminate the string. */
9655                     memcpy(p2, real, ret);
9656                 }
9657             } else {
9658                 ret = get_errno(readlink(path(p), p2, arg3));
9659             }
9660             unlock_user(p2, arg2, ret);
9661             unlock_user(p, arg1, 0);
9662         }
9663         return ret;
9664 #endif
9665 #if defined(TARGET_NR_readlinkat)
9666     case TARGET_NR_readlinkat:
9667         {
9668             void *p2;
9669             p  = lock_user_string(arg2);
9670             p2 = lock_user(VERIFY_WRITE, arg3, arg4, 0);
9671             if (!p || !p2) {
9672                 ret = -TARGET_EFAULT;
9673             } else if (is_proc_myself((const char *)p, "exe")) {
9674                 char real[PATH_MAX], *temp;
9675                 temp = realpath(exec_path, real);
9676                 ret = temp == NULL ? get_errno(-1) : strlen(real) ;
9677                 snprintf((char *)p2, arg4, "%s", real);
9678             } else {
9679                 ret = get_errno(readlinkat(arg1, path(p), p2, arg4));
9680             }
9681             unlock_user(p2, arg3, ret);
9682             unlock_user(p, arg2, 0);
9683         }
9684         return ret;
9685 #endif
9686 #ifdef TARGET_NR_swapon
9687     case TARGET_NR_swapon:
9688         if (!(p = lock_user_string(arg1)))
9689             return -TARGET_EFAULT;
9690         ret = get_errno(swapon(p, arg2));
9691         unlock_user(p, arg1, 0);
9692         return ret;
9693 #endif
9694     case TARGET_NR_reboot:
9695         if (arg3 == LINUX_REBOOT_CMD_RESTART2) {
9696            /* arg4 must be ignored in all other cases */
9697            p = lock_user_string(arg4);
9698            if (!p) {
9699                return -TARGET_EFAULT;
9700            }
9701            ret = get_errno(reboot(arg1, arg2, arg3, p));
9702            unlock_user(p, arg4, 0);
9703         } else {
9704            ret = get_errno(reboot(arg1, arg2, arg3, NULL));
9705         }
9706         return ret;
9707 #ifdef TARGET_NR_mmap
9708     case TARGET_NR_mmap:
9709 #if (defined(TARGET_I386) && defined(TARGET_ABI32)) || \
9710     (defined(TARGET_ARM) && defined(TARGET_ABI32)) || \
9711     defined(TARGET_M68K) || defined(TARGET_CRIS) || defined(TARGET_MICROBLAZE) \
9712     || defined(TARGET_S390X)
9713         {
9714             abi_ulong *v;
9715             abi_ulong v1, v2, v3, v4, v5, v6;
9716             if (!(v = lock_user(VERIFY_READ, arg1, 6 * sizeof(abi_ulong), 1)))
9717                 return -TARGET_EFAULT;
9718             v1 = tswapal(v[0]);
9719             v2 = tswapal(v[1]);
9720             v3 = tswapal(v[2]);
9721             v4 = tswapal(v[3]);
9722             v5 = tswapal(v[4]);
9723             v6 = tswapal(v[5]);
9724             unlock_user(v, arg1, 0);
9725             ret = get_errno(target_mmap(v1, v2, v3,
9726                                         target_to_host_bitmask(v4, mmap_flags_tbl),
9727                                         v5, v6));
9728         }
9729 #else
9730         /* mmap pointers are always untagged */
9731         ret = get_errno(target_mmap(arg1, arg2, arg3,
9732                                     target_to_host_bitmask(arg4, mmap_flags_tbl),
9733                                     arg5,
9734                                     arg6));
9735 #endif
9736         return ret;
9737 #endif
9738 #ifdef TARGET_NR_mmap2
9739     case TARGET_NR_mmap2:
9740 #ifndef MMAP_SHIFT
9741 #define MMAP_SHIFT 12
9742 #endif
9743         ret = target_mmap(arg1, arg2, arg3,
9744                           target_to_host_bitmask(arg4, mmap_flags_tbl),
9745                           arg5, arg6 << MMAP_SHIFT);
9746         return get_errno(ret);
9747 #endif
9748     case TARGET_NR_munmap:
9749         arg1 = cpu_untagged_addr(cpu, arg1);
9750         return get_errno(target_munmap(arg1, arg2));
9751     case TARGET_NR_mprotect:
9752         arg1 = cpu_untagged_addr(cpu, arg1);
9753         {
9754             TaskState *ts = cpu->opaque;
9755             /* Special hack to detect libc making the stack executable.  */
9756             if ((arg3 & PROT_GROWSDOWN)
9757                 && arg1 >= ts->info->stack_limit
9758                 && arg1 <= ts->info->start_stack) {
9759                 arg3 &= ~PROT_GROWSDOWN;
9760                 arg2 = arg2 + arg1 - ts->info->stack_limit;
9761                 arg1 = ts->info->stack_limit;
9762             }
9763         }
9764         return get_errno(target_mprotect(arg1, arg2, arg3));
9765 #ifdef TARGET_NR_mremap
9766     case TARGET_NR_mremap:
9767         arg1 = cpu_untagged_addr(cpu, arg1);
9768         /* mremap new_addr (arg5) is always untagged */
9769         return get_errno(target_mremap(arg1, arg2, arg3, arg4, arg5));
9770 #endif
9771         /* ??? msync/mlock/munlock are broken for softmmu.  */
9772 #ifdef TARGET_NR_msync
9773     case TARGET_NR_msync:
9774         return get_errno(msync(g2h(cpu, arg1), arg2, arg3));
9775 #endif
9776 #ifdef TARGET_NR_mlock
9777     case TARGET_NR_mlock:
9778         return get_errno(mlock(g2h(cpu, arg1), arg2));
9779 #endif
9780 #ifdef TARGET_NR_munlock
9781     case TARGET_NR_munlock:
9782         return get_errno(munlock(g2h(cpu, arg1), arg2));
9783 #endif
9784 #ifdef TARGET_NR_mlockall
9785     case TARGET_NR_mlockall:
9786         return get_errno(mlockall(target_to_host_mlockall_arg(arg1)));
9787 #endif
9788 #ifdef TARGET_NR_munlockall
9789     case TARGET_NR_munlockall:
9790         return get_errno(munlockall());
9791 #endif
9792 #ifdef TARGET_NR_truncate
9793     case TARGET_NR_truncate:
9794         if (!(p = lock_user_string(arg1)))
9795             return -TARGET_EFAULT;
9796         ret = get_errno(truncate(p, arg2));
9797         unlock_user(p, arg1, 0);
9798         return ret;
9799 #endif
9800 #ifdef TARGET_NR_ftruncate
9801     case TARGET_NR_ftruncate:
9802         return get_errno(ftruncate(arg1, arg2));
9803 #endif
9804     case TARGET_NR_fchmod:
9805         return get_errno(fchmod(arg1, arg2));
9806 #if defined(TARGET_NR_fchmodat)
9807     case TARGET_NR_fchmodat:
9808         if (!(p = lock_user_string(arg2)))
9809             return -TARGET_EFAULT;
9810         ret = get_errno(fchmodat(arg1, p, arg3, 0));
9811         unlock_user(p, arg2, 0);
9812         return ret;
9813 #endif
9814     case TARGET_NR_getpriority:
9815         /* Note that negative values are valid for getpriority, so we must
9816            differentiate based on errno settings.  */
9817         errno = 0;
9818         ret = getpriority(arg1, arg2);
9819         if (ret == -1 && errno != 0) {
9820             return -host_to_target_errno(errno);
9821         }
9822 #ifdef TARGET_ALPHA
9823         /* Return value is the unbiased priority.  Signal no error.  */
9824         ((CPUAlphaState *)cpu_env)->ir[IR_V0] = 0;
9825 #else
9826         /* Return value is a biased priority to avoid negative numbers.  */
9827         ret = 20 - ret;
9828 #endif
9829         return ret;
9830     case TARGET_NR_setpriority:
9831         return get_errno(setpriority(arg1, arg2, arg3));
9832 #ifdef TARGET_NR_statfs
9833     case TARGET_NR_statfs:
9834         if (!(p = lock_user_string(arg1))) {
9835             return -TARGET_EFAULT;
9836         }
9837         ret = get_errno(statfs(path(p), &stfs));
9838         unlock_user(p, arg1, 0);
9839     convert_statfs:
9840         if (!is_error(ret)) {
9841             struct target_statfs *target_stfs;
9842 
9843             if (!lock_user_struct(VERIFY_WRITE, target_stfs, arg2, 0))
9844                 return -TARGET_EFAULT;
9845             __put_user(stfs.f_type, &target_stfs->f_type);
9846             __put_user(stfs.f_bsize, &target_stfs->f_bsize);
9847             __put_user(stfs.f_blocks, &target_stfs->f_blocks);
9848             __put_user(stfs.f_bfree, &target_stfs->f_bfree);
9849             __put_user(stfs.f_bavail, &target_stfs->f_bavail);
9850             __put_user(stfs.f_files, &target_stfs->f_files);
9851             __put_user(stfs.f_ffree, &target_stfs->f_ffree);
9852             __put_user(stfs.f_fsid.__val[0], &target_stfs->f_fsid.val[0]);
9853             __put_user(stfs.f_fsid.__val[1], &target_stfs->f_fsid.val[1]);
9854             __put_user(stfs.f_namelen, &target_stfs->f_namelen);
9855             __put_user(stfs.f_frsize, &target_stfs->f_frsize);
9856 #ifdef _STATFS_F_FLAGS
9857             __put_user(stfs.f_flags, &target_stfs->f_flags);
9858 #else
9859             __put_user(0, &target_stfs->f_flags);
9860 #endif
9861             memset(target_stfs->f_spare, 0, sizeof(target_stfs->f_spare));
9862             unlock_user_struct(target_stfs, arg2, 1);
9863         }
9864         return ret;
9865 #endif
9866 #ifdef TARGET_NR_fstatfs
9867     case TARGET_NR_fstatfs:
9868         ret = get_errno(fstatfs(arg1, &stfs));
9869         goto convert_statfs;
9870 #endif
9871 #ifdef TARGET_NR_statfs64
9872     case TARGET_NR_statfs64:
9873         if (!(p = lock_user_string(arg1))) {
9874             return -TARGET_EFAULT;
9875         }
9876         ret = get_errno(statfs(path(p), &stfs));
9877         unlock_user(p, arg1, 0);
9878     convert_statfs64:
9879         if (!is_error(ret)) {
9880             struct target_statfs64 *target_stfs;
9881 
9882             if (!lock_user_struct(VERIFY_WRITE, target_stfs, arg3, 0))
9883                 return -TARGET_EFAULT;
9884             __put_user(stfs.f_type, &target_stfs->f_type);
9885             __put_user(stfs.f_bsize, &target_stfs->f_bsize);
9886             __put_user(stfs.f_blocks, &target_stfs->f_blocks);
9887             __put_user(stfs.f_bfree, &target_stfs->f_bfree);
9888             __put_user(stfs.f_bavail, &target_stfs->f_bavail);
9889             __put_user(stfs.f_files, &target_stfs->f_files);
9890             __put_user(stfs.f_ffree, &target_stfs->f_ffree);
9891             __put_user(stfs.f_fsid.__val[0], &target_stfs->f_fsid.val[0]);
9892             __put_user(stfs.f_fsid.__val[1], &target_stfs->f_fsid.val[1]);
9893             __put_user(stfs.f_namelen, &target_stfs->f_namelen);
9894             __put_user(stfs.f_frsize, &target_stfs->f_frsize);
9895 #ifdef _STATFS_F_FLAGS
9896             __put_user(stfs.f_flags, &target_stfs->f_flags);
9897 #else
9898             __put_user(0, &target_stfs->f_flags);
9899 #endif
9900             memset(target_stfs->f_spare, 0, sizeof(target_stfs->f_spare));
9901             unlock_user_struct(target_stfs, arg3, 1);
9902         }
9903         return ret;
9904     case TARGET_NR_fstatfs64:
9905         ret = get_errno(fstatfs(arg1, &stfs));
9906         goto convert_statfs64;
9907 #endif
9908 #ifdef TARGET_NR_socketcall
9909     case TARGET_NR_socketcall:
9910         return do_socketcall(arg1, arg2);
9911 #endif
9912 #ifdef TARGET_NR_accept
9913     case TARGET_NR_accept:
9914         return do_accept4(arg1, arg2, arg3, 0);
9915 #endif
9916 #ifdef TARGET_NR_accept4
9917     case TARGET_NR_accept4:
9918         return do_accept4(arg1, arg2, arg3, arg4);
9919 #endif
9920 #ifdef TARGET_NR_bind
9921     case TARGET_NR_bind:
9922         return do_bind(arg1, arg2, arg3);
9923 #endif
9924 #ifdef TARGET_NR_connect
9925     case TARGET_NR_connect:
9926         return do_connect(arg1, arg2, arg3);
9927 #endif
9928 #ifdef TARGET_NR_getpeername
9929     case TARGET_NR_getpeername:
9930         return do_getpeername(arg1, arg2, arg3);
9931 #endif
9932 #ifdef TARGET_NR_getsockname
9933     case TARGET_NR_getsockname:
9934         return do_getsockname(arg1, arg2, arg3);
9935 #endif
9936 #ifdef TARGET_NR_getsockopt
9937     case TARGET_NR_getsockopt:
9938         return do_getsockopt(arg1, arg2, arg3, arg4, arg5);
9939 #endif
9940 #ifdef TARGET_NR_listen
9941     case TARGET_NR_listen:
9942         return get_errno(listen(arg1, arg2));
9943 #endif
9944 #ifdef TARGET_NR_recv
9945     case TARGET_NR_recv:
9946         return do_recvfrom(arg1, arg2, arg3, arg4, 0, 0);
9947 #endif
9948 #ifdef TARGET_NR_recvfrom
9949     case TARGET_NR_recvfrom:
9950         return do_recvfrom(arg1, arg2, arg3, arg4, arg5, arg6);
9951 #endif
9952 #ifdef TARGET_NR_recvmsg
9953     case TARGET_NR_recvmsg:
9954         return do_sendrecvmsg(arg1, arg2, arg3, 0);
9955 #endif
9956 #ifdef TARGET_NR_send
9957     case TARGET_NR_send:
9958         return do_sendto(arg1, arg2, arg3, arg4, 0, 0);
9959 #endif
9960 #ifdef TARGET_NR_sendmsg
9961     case TARGET_NR_sendmsg:
9962         return do_sendrecvmsg(arg1, arg2, arg3, 1);
9963 #endif
9964 #ifdef TARGET_NR_sendmmsg
9965     case TARGET_NR_sendmmsg:
9966         return do_sendrecvmmsg(arg1, arg2, arg3, arg4, 1);
9967 #endif
9968 #ifdef TARGET_NR_recvmmsg
9969     case TARGET_NR_recvmmsg:
9970         return do_sendrecvmmsg(arg1, arg2, arg3, arg4, 0);
9971 #endif
9972 #ifdef TARGET_NR_sendto
9973     case TARGET_NR_sendto:
9974         return do_sendto(arg1, arg2, arg3, arg4, arg5, arg6);
9975 #endif
9976 #ifdef TARGET_NR_shutdown
9977     case TARGET_NR_shutdown:
9978         return get_errno(shutdown(arg1, arg2));
9979 #endif
9980 #if defined(TARGET_NR_getrandom) && defined(__NR_getrandom)
9981     case TARGET_NR_getrandom:
9982         p = lock_user(VERIFY_WRITE, arg1, arg2, 0);
9983         if (!p) {
9984             return -TARGET_EFAULT;
9985         }
9986         ret = get_errno(getrandom(p, arg2, arg3));
9987         unlock_user(p, arg1, ret);
9988         return ret;
9989 #endif
9990 #ifdef TARGET_NR_socket
9991     case TARGET_NR_socket:
9992         return do_socket(arg1, arg2, arg3);
9993 #endif
9994 #ifdef TARGET_NR_socketpair
9995     case TARGET_NR_socketpair:
9996         return do_socketpair(arg1, arg2, arg3, arg4);
9997 #endif
9998 #ifdef TARGET_NR_setsockopt
9999     case TARGET_NR_setsockopt:
10000         return do_setsockopt(arg1, arg2, arg3, arg4, (socklen_t) arg5);
10001 #endif
10002 #if defined(TARGET_NR_syslog)
10003     case TARGET_NR_syslog:
10004         {
10005             int len = arg2;
10006 
10007             switch (arg1) {
10008             case TARGET_SYSLOG_ACTION_CLOSE:         /* Close log */
10009             case TARGET_SYSLOG_ACTION_OPEN:          /* Open log */
10010             case TARGET_SYSLOG_ACTION_CLEAR:         /* Clear ring buffer */
10011             case TARGET_SYSLOG_ACTION_CONSOLE_OFF:   /* Disable logging */
10012             case TARGET_SYSLOG_ACTION_CONSOLE_ON:    /* Enable logging */
10013             case TARGET_SYSLOG_ACTION_CONSOLE_LEVEL: /* Set messages level */
10014             case TARGET_SYSLOG_ACTION_SIZE_UNREAD:   /* Number of chars */
10015             case TARGET_SYSLOG_ACTION_SIZE_BUFFER:   /* Size of the buffer */
10016                 return get_errno(sys_syslog((int)arg1, NULL, (int)arg3));
10017             case TARGET_SYSLOG_ACTION_READ:          /* Read from log */
10018             case TARGET_SYSLOG_ACTION_READ_CLEAR:    /* Read/clear msgs */
10019             case TARGET_SYSLOG_ACTION_READ_ALL:      /* Read last messages */
10020                 {
10021                     if (len < 0) {
10022                         return -TARGET_EINVAL;
10023                     }
10024                     if (len == 0) {
10025                         return 0;
10026                     }
10027                     p = lock_user(VERIFY_WRITE, arg2, arg3, 0);
10028                     if (!p) {
10029                         return -TARGET_EFAULT;
10030                     }
10031                     ret = get_errno(sys_syslog((int)arg1, p, (int)arg3));
10032                     unlock_user(p, arg2, arg3);
10033                 }
10034                 return ret;
10035             default:
10036                 return -TARGET_EINVAL;
10037             }
10038         }
10039         break;
10040 #endif
10041     case TARGET_NR_setitimer:
10042         {
10043             struct itimerval value, ovalue, *pvalue;
10044 
10045             if (arg2) {
10046                 pvalue = &value;
10047                 if (copy_from_user_timeval(&pvalue->it_interval, arg2)
10048                     || copy_from_user_timeval(&pvalue->it_value,
10049                                               arg2 + sizeof(struct target_timeval)))
10050                     return -TARGET_EFAULT;
10051             } else {
10052                 pvalue = NULL;
10053             }
10054             ret = get_errno(setitimer(arg1, pvalue, &ovalue));
10055             if (!is_error(ret) && arg3) {
10056                 if (copy_to_user_timeval(arg3,
10057                                          &ovalue.it_interval)
10058                     || copy_to_user_timeval(arg3 + sizeof(struct target_timeval),
10059                                             &ovalue.it_value))
10060                     return -TARGET_EFAULT;
10061             }
10062         }
10063         return ret;
10064     case TARGET_NR_getitimer:
10065         {
10066             struct itimerval value;
10067 
10068             ret = get_errno(getitimer(arg1, &value));
10069             if (!is_error(ret) && arg2) {
10070                 if (copy_to_user_timeval(arg2,
10071                                          &value.it_interval)
10072                     || copy_to_user_timeval(arg2 + sizeof(struct target_timeval),
10073                                             &value.it_value))
10074                     return -TARGET_EFAULT;
10075             }
10076         }
10077         return ret;
10078 #ifdef TARGET_NR_stat
10079     case TARGET_NR_stat:
10080         if (!(p = lock_user_string(arg1))) {
10081             return -TARGET_EFAULT;
10082         }
10083         ret = get_errno(stat(path(p), &st));
10084         unlock_user(p, arg1, 0);
10085         goto do_stat;
10086 #endif
10087 #ifdef TARGET_NR_lstat
10088     case TARGET_NR_lstat:
10089         if (!(p = lock_user_string(arg1))) {
10090             return -TARGET_EFAULT;
10091         }
10092         ret = get_errno(lstat(path(p), &st));
10093         unlock_user(p, arg1, 0);
10094         goto do_stat;
10095 #endif
10096 #ifdef TARGET_NR_fstat
10097     case TARGET_NR_fstat:
10098         {
10099             ret = get_errno(fstat(arg1, &st));
10100 #if defined(TARGET_NR_stat) || defined(TARGET_NR_lstat)
10101         do_stat:
10102 #endif
10103             if (!is_error(ret)) {
10104                 struct target_stat *target_st;
10105 
10106                 if (!lock_user_struct(VERIFY_WRITE, target_st, arg2, 0))
10107                     return -TARGET_EFAULT;
10108                 memset(target_st, 0, sizeof(*target_st));
10109                 __put_user(st.st_dev, &target_st->st_dev);
10110                 __put_user(st.st_ino, &target_st->st_ino);
10111                 __put_user(st.st_mode, &target_st->st_mode);
10112                 __put_user(st.st_uid, &target_st->st_uid);
10113                 __put_user(st.st_gid, &target_st->st_gid);
10114                 __put_user(st.st_nlink, &target_st->st_nlink);
10115                 __put_user(st.st_rdev, &target_st->st_rdev);
10116                 __put_user(st.st_size, &target_st->st_size);
10117                 __put_user(st.st_blksize, &target_st->st_blksize);
10118                 __put_user(st.st_blocks, &target_st->st_blocks);
10119                 __put_user(st.st_atime, &target_st->target_st_atime);
10120                 __put_user(st.st_mtime, &target_st->target_st_mtime);
10121                 __put_user(st.st_ctime, &target_st->target_st_ctime);
10122 #if defined(HAVE_STRUCT_STAT_ST_ATIM) && defined(TARGET_STAT_HAVE_NSEC)
10123                 __put_user(st.st_atim.tv_nsec,
10124                            &target_st->target_st_atime_nsec);
10125                 __put_user(st.st_mtim.tv_nsec,
10126                            &target_st->target_st_mtime_nsec);
10127                 __put_user(st.st_ctim.tv_nsec,
10128                            &target_st->target_st_ctime_nsec);
10129 #endif
10130                 unlock_user_struct(target_st, arg2, 1);
10131             }
10132         }
10133         return ret;
10134 #endif
10135     case TARGET_NR_vhangup:
10136         return get_errno(vhangup());
10137 #ifdef TARGET_NR_syscall
10138     case TARGET_NR_syscall:
10139         return do_syscall(cpu_env, arg1 & 0xffff, arg2, arg3, arg4, arg5,
10140                           arg6, arg7, arg8, 0);
10141 #endif
10142 #if defined(TARGET_NR_wait4)
10143     case TARGET_NR_wait4:
10144         {
10145             int status;
10146             abi_long status_ptr = arg2;
10147             struct rusage rusage, *rusage_ptr;
10148             abi_ulong target_rusage = arg4;
10149             abi_long rusage_err;
10150             if (target_rusage)
10151                 rusage_ptr = &rusage;
10152             else
10153                 rusage_ptr = NULL;
10154             ret = get_errno(safe_wait4(arg1, &status, arg3, rusage_ptr));
10155             if (!is_error(ret)) {
10156                 if (status_ptr && ret) {
10157                     status = host_to_target_waitstatus(status);
10158                     if (put_user_s32(status, status_ptr))
10159                         return -TARGET_EFAULT;
10160                 }
10161                 if (target_rusage) {
10162                     rusage_err = host_to_target_rusage(target_rusage, &rusage);
10163                     if (rusage_err) {
10164                         ret = rusage_err;
10165                     }
10166                 }
10167             }
10168         }
10169         return ret;
10170 #endif
10171 #ifdef TARGET_NR_swapoff
10172     case TARGET_NR_swapoff:
10173         if (!(p = lock_user_string(arg1)))
10174             return -TARGET_EFAULT;
10175         ret = get_errno(swapoff(p));
10176         unlock_user(p, arg1, 0);
10177         return ret;
10178 #endif
10179     case TARGET_NR_sysinfo:
10180         {
10181             struct target_sysinfo *target_value;
10182             struct sysinfo value;
10183             ret = get_errno(sysinfo(&value));
10184             if (!is_error(ret) && arg1)
10185             {
10186                 if (!lock_user_struct(VERIFY_WRITE, target_value, arg1, 0))
10187                     return -TARGET_EFAULT;
10188                 __put_user(value.uptime, &target_value->uptime);
10189                 __put_user(value.loads[0], &target_value->loads[0]);
10190                 __put_user(value.loads[1], &target_value->loads[1]);
10191                 __put_user(value.loads[2], &target_value->loads[2]);
10192                 __put_user(value.totalram, &target_value->totalram);
10193                 __put_user(value.freeram, &target_value->freeram);
10194                 __put_user(value.sharedram, &target_value->sharedram);
10195                 __put_user(value.bufferram, &target_value->bufferram);
10196                 __put_user(value.totalswap, &target_value->totalswap);
10197                 __put_user(value.freeswap, &target_value->freeswap);
10198                 __put_user(value.procs, &target_value->procs);
10199                 __put_user(value.totalhigh, &target_value->totalhigh);
10200                 __put_user(value.freehigh, &target_value->freehigh);
10201                 __put_user(value.mem_unit, &target_value->mem_unit);
10202                 unlock_user_struct(target_value, arg1, 1);
10203             }
10204         }
10205         return ret;
10206 #ifdef TARGET_NR_ipc
10207     case TARGET_NR_ipc:
10208         return do_ipc(cpu_env, arg1, arg2, arg3, arg4, arg5, arg6);
10209 #endif
10210 #ifdef TARGET_NR_semget
10211     case TARGET_NR_semget:
10212         return get_errno(semget(arg1, arg2, arg3));
10213 #endif
10214 #ifdef TARGET_NR_semop
10215     case TARGET_NR_semop:
10216         return do_semtimedop(arg1, arg2, arg3, 0, false);
10217 #endif
10218 #ifdef TARGET_NR_semtimedop
10219     case TARGET_NR_semtimedop:
10220         return do_semtimedop(arg1, arg2, arg3, arg4, false);
10221 #endif
10222 #ifdef TARGET_NR_semtimedop_time64
10223     case TARGET_NR_semtimedop_time64:
10224         return do_semtimedop(arg1, arg2, arg3, arg4, true);
10225 #endif
10226 #ifdef TARGET_NR_semctl
10227     case TARGET_NR_semctl:
10228         return do_semctl(arg1, arg2, arg3, arg4);
10229 #endif
10230 #ifdef TARGET_NR_msgctl
10231     case TARGET_NR_msgctl:
10232         return do_msgctl(arg1, arg2, arg3);
10233 #endif
10234 #ifdef TARGET_NR_msgget
10235     case TARGET_NR_msgget:
10236         return get_errno(msgget(arg1, arg2));
10237 #endif
10238 #ifdef TARGET_NR_msgrcv
10239     case TARGET_NR_msgrcv:
10240         return do_msgrcv(arg1, arg2, arg3, arg4, arg5);
10241 #endif
10242 #ifdef TARGET_NR_msgsnd
10243     case TARGET_NR_msgsnd:
10244         return do_msgsnd(arg1, arg2, arg3, arg4);
10245 #endif
10246 #ifdef TARGET_NR_shmget
10247     case TARGET_NR_shmget:
10248         return get_errno(shmget(arg1, arg2, arg3));
10249 #endif
10250 #ifdef TARGET_NR_shmctl
10251     case TARGET_NR_shmctl:
10252         return do_shmctl(arg1, arg2, arg3);
10253 #endif
10254 #ifdef TARGET_NR_shmat
10255     case TARGET_NR_shmat:
10256         return do_shmat(cpu_env, arg1, arg2, arg3);
10257 #endif
10258 #ifdef TARGET_NR_shmdt
10259     case TARGET_NR_shmdt:
10260         return do_shmdt(arg1);
10261 #endif
10262     case TARGET_NR_fsync:
10263         return get_errno(fsync(arg1));
10264     case TARGET_NR_clone:
10265         /* Linux manages to have three different orderings for its
10266          * arguments to clone(); the BACKWARDS and BACKWARDS2 defines
10267          * match the kernel's CONFIG_CLONE_* settings.
10268          * Microblaze is further special in that it uses a sixth
10269          * implicit argument to clone for the TLS pointer.
10270          */
10271 #if defined(TARGET_MICROBLAZE)
10272         ret = get_errno(do_fork(cpu_env, arg1, arg2, arg4, arg6, arg5));
10273 #elif defined(TARGET_CLONE_BACKWARDS)
10274         ret = get_errno(do_fork(cpu_env, arg1, arg2, arg3, arg4, arg5));
10275 #elif defined(TARGET_CLONE_BACKWARDS2)
10276         ret = get_errno(do_fork(cpu_env, arg2, arg1, arg3, arg5, arg4));
10277 #else
10278         ret = get_errno(do_fork(cpu_env, arg1, arg2, arg3, arg5, arg4));
10279 #endif
10280         return ret;
10281 #ifdef __NR_exit_group
10282         /* new thread calls */
10283     case TARGET_NR_exit_group:
10284         preexit_cleanup(cpu_env, arg1);
10285         return get_errno(exit_group(arg1));
10286 #endif
10287     case TARGET_NR_setdomainname:
10288         if (!(p = lock_user_string(arg1)))
10289             return -TARGET_EFAULT;
10290         ret = get_errno(setdomainname(p, arg2));
10291         unlock_user(p, arg1, 0);
10292         return ret;
10293     case TARGET_NR_uname:
10294         /* no need to transcode because we use the linux syscall */
10295         {
10296             struct new_utsname * buf;
10297 
10298             if (!lock_user_struct(VERIFY_WRITE, buf, arg1, 0))
10299                 return -TARGET_EFAULT;
10300             ret = get_errno(sys_uname(buf));
10301             if (!is_error(ret)) {
10302                 /* Overwrite the native machine name with whatever is being
10303                    emulated. */
10304                 g_strlcpy(buf->machine, cpu_to_uname_machine(cpu_env),
10305                           sizeof(buf->machine));
10306                 /* Allow the user to override the reported release.  */
10307                 if (qemu_uname_release && *qemu_uname_release) {
10308                     g_strlcpy(buf->release, qemu_uname_release,
10309                               sizeof(buf->release));
10310                 }
10311             }
10312             unlock_user_struct(buf, arg1, 1);
10313         }
10314         return ret;
10315 #ifdef TARGET_I386
10316     case TARGET_NR_modify_ldt:
10317         return do_modify_ldt(cpu_env, arg1, arg2, arg3);
10318 #if !defined(TARGET_X86_64)
10319     case TARGET_NR_vm86:
10320         return do_vm86(cpu_env, arg1, arg2);
10321 #endif
10322 #endif
10323 #if defined(TARGET_NR_adjtimex)
10324     case TARGET_NR_adjtimex:
10325         {
10326             struct timex host_buf;
10327 
10328             if (target_to_host_timex(&host_buf, arg1) != 0) {
10329                 return -TARGET_EFAULT;
10330             }
10331             ret = get_errno(adjtimex(&host_buf));
10332             if (!is_error(ret)) {
10333                 if (host_to_target_timex(arg1, &host_buf) != 0) {
10334                     return -TARGET_EFAULT;
10335                 }
10336             }
10337         }
10338         return ret;
10339 #endif
10340 #if defined(TARGET_NR_clock_adjtime) && defined(CONFIG_CLOCK_ADJTIME)
10341     case TARGET_NR_clock_adjtime:
10342         {
10343             struct timex htx, *phtx = &htx;
10344 
10345             if (target_to_host_timex(phtx, arg2) != 0) {
10346                 return -TARGET_EFAULT;
10347             }
10348             ret = get_errno(clock_adjtime(arg1, phtx));
10349             if (!is_error(ret) && phtx) {
10350                 if (host_to_target_timex(arg2, phtx) != 0) {
10351                     return -TARGET_EFAULT;
10352                 }
10353             }
10354         }
10355         return ret;
10356 #endif
10357 #if defined(TARGET_NR_clock_adjtime64) && defined(CONFIG_CLOCK_ADJTIME)
10358     case TARGET_NR_clock_adjtime64:
10359         {
10360             struct timex htx;
10361 
10362             if (target_to_host_timex64(&htx, arg2) != 0) {
10363                 return -TARGET_EFAULT;
10364             }
10365             ret = get_errno(clock_adjtime(arg1, &htx));
10366             if (!is_error(ret) && host_to_target_timex64(arg2, &htx)) {
10367                     return -TARGET_EFAULT;
10368             }
10369         }
10370         return ret;
10371 #endif
10372     case TARGET_NR_getpgid:
10373         return get_errno(getpgid(arg1));
10374     case TARGET_NR_fchdir:
10375         return get_errno(fchdir(arg1));
10376     case TARGET_NR_personality:
10377         return get_errno(personality(arg1));
10378 #ifdef TARGET_NR__llseek /* Not on alpha */
10379     case TARGET_NR__llseek:
10380         {
10381             int64_t res;
10382 #if !defined(__NR_llseek)
10383             res = lseek(arg1, ((uint64_t)arg2 << 32) | (abi_ulong)arg3, arg5);
10384             if (res == -1) {
10385                 ret = get_errno(res);
10386             } else {
10387                 ret = 0;
10388             }
10389 #else
10390             ret = get_errno(_llseek(arg1, arg2, arg3, &res, arg5));
10391 #endif
10392             if ((ret == 0) && put_user_s64(res, arg4)) {
10393                 return -TARGET_EFAULT;
10394             }
10395         }
10396         return ret;
10397 #endif
10398 #ifdef TARGET_NR_getdents
10399     case TARGET_NR_getdents:
10400         return do_getdents(arg1, arg2, arg3);
10401 #endif /* TARGET_NR_getdents */
10402 #if defined(TARGET_NR_getdents64) && defined(__NR_getdents64)
10403     case TARGET_NR_getdents64:
10404         return do_getdents64(arg1, arg2, arg3);
10405 #endif /* TARGET_NR_getdents64 */
10406 #if defined(TARGET_NR__newselect)
10407     case TARGET_NR__newselect:
10408         return do_select(arg1, arg2, arg3, arg4, arg5);
10409 #endif
10410 #ifdef TARGET_NR_poll
10411     case TARGET_NR_poll:
10412         return do_ppoll(arg1, arg2, arg3, arg4, arg5, false, false);
10413 #endif
10414 #ifdef TARGET_NR_ppoll
10415     case TARGET_NR_ppoll:
10416         return do_ppoll(arg1, arg2, arg3, arg4, arg5, true, false);
10417 #endif
10418 #ifdef TARGET_NR_ppoll_time64
10419     case TARGET_NR_ppoll_time64:
10420         return do_ppoll(arg1, arg2, arg3, arg4, arg5, true, true);
10421 #endif
10422     case TARGET_NR_flock:
10423         /* NOTE: the flock constant seems to be the same for every
10424            Linux platform */
10425         return get_errno(safe_flock(arg1, arg2));
10426     case TARGET_NR_readv:
10427         {
10428             struct iovec *vec = lock_iovec(VERIFY_WRITE, arg2, arg3, 0);
10429             if (vec != NULL) {
10430                 ret = get_errno(safe_readv(arg1, vec, arg3));
10431                 unlock_iovec(vec, arg2, arg3, 1);
10432             } else {
10433                 ret = -host_to_target_errno(errno);
10434             }
10435         }
10436         return ret;
10437     case TARGET_NR_writev:
10438         {
10439             struct iovec *vec = lock_iovec(VERIFY_READ, arg2, arg3, 1);
10440             if (vec != NULL) {
10441                 ret = get_errno(safe_writev(arg1, vec, arg3));
10442                 unlock_iovec(vec, arg2, arg3, 0);
10443             } else {
10444                 ret = -host_to_target_errno(errno);
10445             }
10446         }
10447         return ret;
10448 #if defined(TARGET_NR_preadv)
10449     case TARGET_NR_preadv:
10450         {
10451             struct iovec *vec = lock_iovec(VERIFY_WRITE, arg2, arg3, 0);
10452             if (vec != NULL) {
10453                 unsigned long low, high;
10454 
10455                 target_to_host_low_high(arg4, arg5, &low, &high);
10456                 ret = get_errno(safe_preadv(arg1, vec, arg3, low, high));
10457                 unlock_iovec(vec, arg2, arg3, 1);
10458             } else {
10459                 ret = -host_to_target_errno(errno);
10460            }
10461         }
10462         return ret;
10463 #endif
10464 #if defined(TARGET_NR_pwritev)
10465     case TARGET_NR_pwritev:
10466         {
10467             struct iovec *vec = lock_iovec(VERIFY_READ, arg2, arg3, 1);
10468             if (vec != NULL) {
10469                 unsigned long low, high;
10470 
10471                 target_to_host_low_high(arg4, arg5, &low, &high);
10472                 ret = get_errno(safe_pwritev(arg1, vec, arg3, low, high));
10473                 unlock_iovec(vec, arg2, arg3, 0);
10474             } else {
10475                 ret = -host_to_target_errno(errno);
10476            }
10477         }
10478         return ret;
10479 #endif
10480     case TARGET_NR_getsid:
10481         return get_errno(getsid(arg1));
10482 #if defined(TARGET_NR_fdatasync) /* Not on alpha (osf_datasync ?) */
10483     case TARGET_NR_fdatasync:
10484         return get_errno(fdatasync(arg1));
10485 #endif
10486     case TARGET_NR_sched_getaffinity:
10487         {
10488             unsigned int mask_size;
10489             unsigned long *mask;
10490 
10491             /*
10492              * sched_getaffinity needs multiples of ulong, so need to take
10493              * care of mismatches between target ulong and host ulong sizes.
10494              */
10495             if (arg2 & (sizeof(abi_ulong) - 1)) {
10496                 return -TARGET_EINVAL;
10497             }
10498             mask_size = (arg2 + (sizeof(*mask) - 1)) & ~(sizeof(*mask) - 1);
10499 
10500             mask = alloca(mask_size);
10501             memset(mask, 0, mask_size);
10502             ret = get_errno(sys_sched_getaffinity(arg1, mask_size, mask));
10503 
10504             if (!is_error(ret)) {
10505                 if (ret > arg2) {
10506                     /* More data returned than the caller's buffer will fit.
10507                      * This only happens if sizeof(abi_long) < sizeof(long)
10508                      * and the caller passed us a buffer holding an odd number
10509                      * of abi_longs. If the host kernel is actually using the
10510                      * extra 4 bytes then fail EINVAL; otherwise we can just
10511                      * ignore them and only copy the interesting part.
10512                      */
10513                     int numcpus = sysconf(_SC_NPROCESSORS_CONF);
10514                     if (numcpus > arg2 * 8) {
10515                         return -TARGET_EINVAL;
10516                     }
10517                     ret = arg2;
10518                 }
10519 
10520                 if (host_to_target_cpu_mask(mask, mask_size, arg3, ret)) {
10521                     return -TARGET_EFAULT;
10522                 }
10523             }
10524         }
10525         return ret;
10526     case TARGET_NR_sched_setaffinity:
10527         {
10528             unsigned int mask_size;
10529             unsigned long *mask;
10530 
10531             /*
10532              * sched_setaffinity needs multiples of ulong, so need to take
10533              * care of mismatches between target ulong and host ulong sizes.
10534              */
10535             if (arg2 & (sizeof(abi_ulong) - 1)) {
10536                 return -TARGET_EINVAL;
10537             }
10538             mask_size = (arg2 + (sizeof(*mask) - 1)) & ~(sizeof(*mask) - 1);
10539             mask = alloca(mask_size);
10540 
10541             ret = target_to_host_cpu_mask(mask, mask_size, arg3, arg2);
10542             if (ret) {
10543                 return ret;
10544             }
10545 
10546             return get_errno(sys_sched_setaffinity(arg1, mask_size, mask));
10547         }
10548     case TARGET_NR_getcpu:
10549         {
10550             unsigned cpu, node;
10551             ret = get_errno(sys_getcpu(arg1 ? &cpu : NULL,
10552                                        arg2 ? &node : NULL,
10553                                        NULL));
10554             if (is_error(ret)) {
10555                 return ret;
10556             }
10557             if (arg1 && put_user_u32(cpu, arg1)) {
10558                 return -TARGET_EFAULT;
10559             }
10560             if (arg2 && put_user_u32(node, arg2)) {
10561                 return -TARGET_EFAULT;
10562             }
10563         }
10564         return ret;
10565     case TARGET_NR_sched_setparam:
10566         {
10567             struct sched_param *target_schp;
10568             struct sched_param schp;
10569 
10570             if (arg2 == 0) {
10571                 return -TARGET_EINVAL;
10572             }
10573             if (!lock_user_struct(VERIFY_READ, target_schp, arg2, 1))
10574                 return -TARGET_EFAULT;
10575             schp.sched_priority = tswap32(target_schp->sched_priority);
10576             unlock_user_struct(target_schp, arg2, 0);
10577             return get_errno(sched_setparam(arg1, &schp));
10578         }
10579     case TARGET_NR_sched_getparam:
10580         {
10581             struct sched_param *target_schp;
10582             struct sched_param schp;
10583 
10584             if (arg2 == 0) {
10585                 return -TARGET_EINVAL;
10586             }
10587             ret = get_errno(sched_getparam(arg1, &schp));
10588             if (!is_error(ret)) {
10589                 if (!lock_user_struct(VERIFY_WRITE, target_schp, arg2, 0))
10590                     return -TARGET_EFAULT;
10591                 target_schp->sched_priority = tswap32(schp.sched_priority);
10592                 unlock_user_struct(target_schp, arg2, 1);
10593             }
10594         }
10595         return ret;
10596     case TARGET_NR_sched_setscheduler:
10597         {
10598             struct sched_param *target_schp;
10599             struct sched_param schp;
10600             if (arg3 == 0) {
10601                 return -TARGET_EINVAL;
10602             }
10603             if (!lock_user_struct(VERIFY_READ, target_schp, arg3, 1))
10604                 return -TARGET_EFAULT;
10605             schp.sched_priority = tswap32(target_schp->sched_priority);
10606             unlock_user_struct(target_schp, arg3, 0);
10607             return get_errno(sched_setscheduler(arg1, arg2, &schp));
10608         }
10609     case TARGET_NR_sched_getscheduler:
10610         return get_errno(sched_getscheduler(arg1));
10611     case TARGET_NR_sched_yield:
10612         return get_errno(sched_yield());
10613     case TARGET_NR_sched_get_priority_max:
10614         return get_errno(sched_get_priority_max(arg1));
10615     case TARGET_NR_sched_get_priority_min:
10616         return get_errno(sched_get_priority_min(arg1));
10617 #ifdef TARGET_NR_sched_rr_get_interval
10618     case TARGET_NR_sched_rr_get_interval:
10619         {
10620             struct timespec ts;
10621             ret = get_errno(sched_rr_get_interval(arg1, &ts));
10622             if (!is_error(ret)) {
10623                 ret = host_to_target_timespec(arg2, &ts);
10624             }
10625         }
10626         return ret;
10627 #endif
10628 #ifdef TARGET_NR_sched_rr_get_interval_time64
10629     case TARGET_NR_sched_rr_get_interval_time64:
10630         {
10631             struct timespec ts;
10632             ret = get_errno(sched_rr_get_interval(arg1, &ts));
10633             if (!is_error(ret)) {
10634                 ret = host_to_target_timespec64(arg2, &ts);
10635             }
10636         }
10637         return ret;
10638 #endif
10639 #if defined(TARGET_NR_nanosleep)
10640     case TARGET_NR_nanosleep:
10641         {
10642             struct timespec req, rem;
10643             target_to_host_timespec(&req, arg1);
10644             ret = get_errno(safe_nanosleep(&req, &rem));
10645             if (is_error(ret) && arg2) {
10646                 host_to_target_timespec(arg2, &rem);
10647             }
10648         }
10649         return ret;
10650 #endif
10651     case TARGET_NR_prctl:
10652         switch (arg1) {
10653         case PR_GET_PDEATHSIG:
10654         {
10655             int deathsig;
10656             ret = get_errno(prctl(arg1, &deathsig, arg3, arg4, arg5));
10657             if (!is_error(ret) && arg2
10658                 && put_user_s32(deathsig, arg2)) {
10659                 return -TARGET_EFAULT;
10660             }
10661             return ret;
10662         }
10663 #ifdef PR_GET_NAME
10664         case PR_GET_NAME:
10665         {
10666             void *name = lock_user(VERIFY_WRITE, arg2, 16, 1);
10667             if (!name) {
10668                 return -TARGET_EFAULT;
10669             }
10670             ret = get_errno(prctl(arg1, (unsigned long)name,
10671                                   arg3, arg4, arg5));
10672             unlock_user(name, arg2, 16);
10673             return ret;
10674         }
10675         case PR_SET_NAME:
10676         {
10677             void *name = lock_user(VERIFY_READ, arg2, 16, 1);
10678             if (!name) {
10679                 return -TARGET_EFAULT;
10680             }
10681             ret = get_errno(prctl(arg1, (unsigned long)name,
10682                                   arg3, arg4, arg5));
10683             unlock_user(name, arg2, 0);
10684             return ret;
10685         }
10686 #endif
10687 #ifdef TARGET_MIPS
10688         case TARGET_PR_GET_FP_MODE:
10689         {
10690             CPUMIPSState *env = ((CPUMIPSState *)cpu_env);
10691             ret = 0;
10692             if (env->CP0_Status & (1 << CP0St_FR)) {
10693                 ret |= TARGET_PR_FP_MODE_FR;
10694             }
10695             if (env->CP0_Config5 & (1 << CP0C5_FRE)) {
10696                 ret |= TARGET_PR_FP_MODE_FRE;
10697             }
10698             return ret;
10699         }
10700         case TARGET_PR_SET_FP_MODE:
10701         {
10702             CPUMIPSState *env = ((CPUMIPSState *)cpu_env);
10703             bool old_fr = env->CP0_Status & (1 << CP0St_FR);
10704             bool old_fre = env->CP0_Config5 & (1 << CP0C5_FRE);
10705             bool new_fr = arg2 & TARGET_PR_FP_MODE_FR;
10706             bool new_fre = arg2 & TARGET_PR_FP_MODE_FRE;
10707 
10708             const unsigned int known_bits = TARGET_PR_FP_MODE_FR |
10709                                             TARGET_PR_FP_MODE_FRE;
10710 
10711             /* If nothing to change, return right away, successfully.  */
10712             if (old_fr == new_fr && old_fre == new_fre) {
10713                 return 0;
10714             }
10715             /* Check the value is valid */
10716             if (arg2 & ~known_bits) {
10717                 return -TARGET_EOPNOTSUPP;
10718             }
10719             /* Setting FRE without FR is not supported.  */
10720             if (new_fre && !new_fr) {
10721                 return -TARGET_EOPNOTSUPP;
10722             }
10723             if (new_fr && !(env->active_fpu.fcr0 & (1 << FCR0_F64))) {
10724                 /* FR1 is not supported */
10725                 return -TARGET_EOPNOTSUPP;
10726             }
10727             if (!new_fr && (env->active_fpu.fcr0 & (1 << FCR0_F64))
10728                 && !(env->CP0_Status_rw_bitmask & (1 << CP0St_FR))) {
10729                 /* cannot set FR=0 */
10730                 return -TARGET_EOPNOTSUPP;
10731             }
10732             if (new_fre && !(env->active_fpu.fcr0 & (1 << FCR0_FREP))) {
10733                 /* Cannot set FRE=1 */
10734                 return -TARGET_EOPNOTSUPP;
10735             }
10736 
10737             int i;
10738             fpr_t *fpr = env->active_fpu.fpr;
10739             for (i = 0; i < 32 ; i += 2) {
10740                 if (!old_fr && new_fr) {
10741                     fpr[i].w[!FP_ENDIAN_IDX] = fpr[i + 1].w[FP_ENDIAN_IDX];
10742                 } else if (old_fr && !new_fr) {
10743                     fpr[i + 1].w[FP_ENDIAN_IDX] = fpr[i].w[!FP_ENDIAN_IDX];
10744                 }
10745             }
10746 
10747             if (new_fr) {
10748                 env->CP0_Status |= (1 << CP0St_FR);
10749                 env->hflags |= MIPS_HFLAG_F64;
10750             } else {
10751                 env->CP0_Status &= ~(1 << CP0St_FR);
10752                 env->hflags &= ~MIPS_HFLAG_F64;
10753             }
10754             if (new_fre) {
10755                 env->CP0_Config5 |= (1 << CP0C5_FRE);
10756                 if (env->active_fpu.fcr0 & (1 << FCR0_FREP)) {
10757                     env->hflags |= MIPS_HFLAG_FRE;
10758                 }
10759             } else {
10760                 env->CP0_Config5 &= ~(1 << CP0C5_FRE);
10761                 env->hflags &= ~MIPS_HFLAG_FRE;
10762             }
10763 
10764             return 0;
10765         }
10766 #endif /* MIPS */
10767 #ifdef TARGET_AARCH64
10768         case TARGET_PR_SVE_SET_VL:
10769             /*
10770              * We cannot support either PR_SVE_SET_VL_ONEXEC or
10771              * PR_SVE_VL_INHERIT.  Note the kernel definition
10772              * of sve_vl_valid allows for VQ=512, i.e. VL=8192,
10773              * even though the current architectural maximum is VQ=16.
10774              */
10775             ret = -TARGET_EINVAL;
10776             if (cpu_isar_feature(aa64_sve, env_archcpu(cpu_env))
10777                 && arg2 >= 0 && arg2 <= 512 * 16 && !(arg2 & 15)) {
10778                 CPUARMState *env = cpu_env;
10779                 ARMCPU *cpu = env_archcpu(env);
10780                 uint32_t vq, old_vq;
10781 
10782                 old_vq = (env->vfp.zcr_el[1] & 0xf) + 1;
10783                 vq = MAX(arg2 / 16, 1);
10784                 vq = MIN(vq, cpu->sve_max_vq);
10785 
10786                 if (vq < old_vq) {
10787                     aarch64_sve_narrow_vq(env, vq);
10788                 }
10789                 env->vfp.zcr_el[1] = vq - 1;
10790                 arm_rebuild_hflags(env);
10791                 ret = vq * 16;
10792             }
10793             return ret;
10794         case TARGET_PR_SVE_GET_VL:
10795             ret = -TARGET_EINVAL;
10796             {
10797                 ARMCPU *cpu = env_archcpu(cpu_env);
10798                 if (cpu_isar_feature(aa64_sve, cpu)) {
10799                     ret = ((cpu->env.vfp.zcr_el[1] & 0xf) + 1) * 16;
10800                 }
10801             }
10802             return ret;
10803         case TARGET_PR_PAC_RESET_KEYS:
10804             {
10805                 CPUARMState *env = cpu_env;
10806                 ARMCPU *cpu = env_archcpu(env);
10807 
10808                 if (arg3 || arg4 || arg5) {
10809                     return -TARGET_EINVAL;
10810                 }
10811                 if (cpu_isar_feature(aa64_pauth, cpu)) {
10812                     int all = (TARGET_PR_PAC_APIAKEY | TARGET_PR_PAC_APIBKEY |
10813                                TARGET_PR_PAC_APDAKEY | TARGET_PR_PAC_APDBKEY |
10814                                TARGET_PR_PAC_APGAKEY);
10815                     int ret = 0;
10816                     Error *err = NULL;
10817 
10818                     if (arg2 == 0) {
10819                         arg2 = all;
10820                     } else if (arg2 & ~all) {
10821                         return -TARGET_EINVAL;
10822                     }
10823                     if (arg2 & TARGET_PR_PAC_APIAKEY) {
10824                         ret |= qemu_guest_getrandom(&env->keys.apia,
10825                                                     sizeof(ARMPACKey), &err);
10826                     }
10827                     if (arg2 & TARGET_PR_PAC_APIBKEY) {
10828                         ret |= qemu_guest_getrandom(&env->keys.apib,
10829                                                     sizeof(ARMPACKey), &err);
10830                     }
10831                     if (arg2 & TARGET_PR_PAC_APDAKEY) {
10832                         ret |= qemu_guest_getrandom(&env->keys.apda,
10833                                                     sizeof(ARMPACKey), &err);
10834                     }
10835                     if (arg2 & TARGET_PR_PAC_APDBKEY) {
10836                         ret |= qemu_guest_getrandom(&env->keys.apdb,
10837                                                     sizeof(ARMPACKey), &err);
10838                     }
10839                     if (arg2 & TARGET_PR_PAC_APGAKEY) {
10840                         ret |= qemu_guest_getrandom(&env->keys.apga,
10841                                                     sizeof(ARMPACKey), &err);
10842                     }
10843                     if (ret != 0) {
10844                         /*
10845                          * Some unknown failure in the crypto.  The best
10846                          * we can do is log it and fail the syscall.
10847                          * The real syscall cannot fail this way.
10848                          */
10849                         qemu_log_mask(LOG_UNIMP,
10850                                       "PR_PAC_RESET_KEYS: Crypto failure: %s",
10851                                       error_get_pretty(err));
10852                         error_free(err);
10853                         return -TARGET_EIO;
10854                     }
10855                     return 0;
10856                 }
10857             }
10858             return -TARGET_EINVAL;
10859         case TARGET_PR_SET_TAGGED_ADDR_CTRL:
10860             {
10861                 abi_ulong valid_mask = TARGET_PR_TAGGED_ADDR_ENABLE;
10862                 CPUARMState *env = cpu_env;
10863                 ARMCPU *cpu = env_archcpu(env);
10864 
10865                 if (cpu_isar_feature(aa64_mte, cpu)) {
10866                     valid_mask |= TARGET_PR_MTE_TCF_MASK;
10867                     valid_mask |= TARGET_PR_MTE_TAG_MASK;
10868                 }
10869 
10870                 if ((arg2 & ~valid_mask) || arg3 || arg4 || arg5) {
10871                     return -TARGET_EINVAL;
10872                 }
10873                 env->tagged_addr_enable = arg2 & TARGET_PR_TAGGED_ADDR_ENABLE;
10874 
10875                 if (cpu_isar_feature(aa64_mte, cpu)) {
10876                     switch (arg2 & TARGET_PR_MTE_TCF_MASK) {
10877                     case TARGET_PR_MTE_TCF_NONE:
10878                     case TARGET_PR_MTE_TCF_SYNC:
10879                     case TARGET_PR_MTE_TCF_ASYNC:
10880                         break;
10881                     default:
10882                         return -EINVAL;
10883                     }
10884 
10885                     /*
10886                      * Write PR_MTE_TCF to SCTLR_EL1[TCF0].
10887                      * Note that the syscall values are consistent with hw.
10888                      */
10889                     env->cp15.sctlr_el[1] =
10890                         deposit64(env->cp15.sctlr_el[1], 38, 2,
10891                                   arg2 >> TARGET_PR_MTE_TCF_SHIFT);
10892 
10893                     /*
10894                      * Write PR_MTE_TAG to GCR_EL1[Exclude].
10895                      * Note that the syscall uses an include mask,
10896                      * and hardware uses an exclude mask -- invert.
10897                      */
10898                     env->cp15.gcr_el1 =
10899                         deposit64(env->cp15.gcr_el1, 0, 16,
10900                                   ~arg2 >> TARGET_PR_MTE_TAG_SHIFT);
10901                     arm_rebuild_hflags(env);
10902                 }
10903                 return 0;
10904             }
10905         case TARGET_PR_GET_TAGGED_ADDR_CTRL:
10906             {
10907                 abi_long ret = 0;
10908                 CPUARMState *env = cpu_env;
10909                 ARMCPU *cpu = env_archcpu(env);
10910 
10911                 if (arg2 || arg3 || arg4 || arg5) {
10912                     return -TARGET_EINVAL;
10913                 }
10914                 if (env->tagged_addr_enable) {
10915                     ret |= TARGET_PR_TAGGED_ADDR_ENABLE;
10916                 }
10917                 if (cpu_isar_feature(aa64_mte, cpu)) {
10918                     /* See above. */
10919                     ret |= (extract64(env->cp15.sctlr_el[1], 38, 2)
10920                             << TARGET_PR_MTE_TCF_SHIFT);
10921                     ret = deposit64(ret, TARGET_PR_MTE_TAG_SHIFT, 16,
10922                                     ~env->cp15.gcr_el1);
10923                 }
10924                 return ret;
10925             }
10926 #endif /* AARCH64 */
10927         case PR_GET_SECCOMP:
10928         case PR_SET_SECCOMP:
10929             /* Disable seccomp to prevent the target disabling syscalls we
10930              * need. */
10931             return -TARGET_EINVAL;
10932         default:
10933             /* Most prctl options have no pointer arguments */
10934             return get_errno(prctl(arg1, arg2, arg3, arg4, arg5));
10935         }
10936         break;
10937 #ifdef TARGET_NR_arch_prctl
10938     case TARGET_NR_arch_prctl:
10939         return do_arch_prctl(cpu_env, arg1, arg2);
10940 #endif
10941 #ifdef TARGET_NR_pread64
10942     case TARGET_NR_pread64:
10943         if (regpairs_aligned(cpu_env, num)) {
10944             arg4 = arg5;
10945             arg5 = arg6;
10946         }
10947         if (arg2 == 0 && arg3 == 0) {
10948             /* Special-case NULL buffer and zero length, which should succeed */
10949             p = 0;
10950         } else {
10951             p = lock_user(VERIFY_WRITE, arg2, arg3, 0);
10952             if (!p) {
10953                 return -TARGET_EFAULT;
10954             }
10955         }
10956         ret = get_errno(pread64(arg1, p, arg3, target_offset64(arg4, arg5)));
10957         unlock_user(p, arg2, ret);
10958         return ret;
10959     case TARGET_NR_pwrite64:
10960         if (regpairs_aligned(cpu_env, num)) {
10961             arg4 = arg5;
10962             arg5 = arg6;
10963         }
10964         if (arg2 == 0 && arg3 == 0) {
10965             /* Special-case NULL buffer and zero length, which should succeed */
10966             p = 0;
10967         } else {
10968             p = lock_user(VERIFY_READ, arg2, arg3, 1);
10969             if (!p) {
10970                 return -TARGET_EFAULT;
10971             }
10972         }
10973         ret = get_errno(pwrite64(arg1, p, arg3, target_offset64(arg4, arg5)));
10974         unlock_user(p, arg2, 0);
10975         return ret;
10976 #endif
10977     case TARGET_NR_getcwd:
10978         if (!(p = lock_user(VERIFY_WRITE, arg1, arg2, 0)))
10979             return -TARGET_EFAULT;
10980         ret = get_errno(sys_getcwd1(p, arg2));
10981         unlock_user(p, arg1, ret);
10982         return ret;
10983     case TARGET_NR_capget:
10984     case TARGET_NR_capset:
10985     {
10986         struct target_user_cap_header *target_header;
10987         struct target_user_cap_data *target_data = NULL;
10988         struct __user_cap_header_struct header;
10989         struct __user_cap_data_struct data[2];
10990         struct __user_cap_data_struct *dataptr = NULL;
10991         int i, target_datalen;
10992         int data_items = 1;
10993 
10994         if (!lock_user_struct(VERIFY_WRITE, target_header, arg1, 1)) {
10995             return -TARGET_EFAULT;
10996         }
10997         header.version = tswap32(target_header->version);
10998         header.pid = tswap32(target_header->pid);
10999 
11000         if (header.version != _LINUX_CAPABILITY_VERSION) {
11001             /* Version 2 and up takes pointer to two user_data structs */
11002             data_items = 2;
11003         }
11004 
11005         target_datalen = sizeof(*target_data) * data_items;
11006 
11007         if (arg2) {
11008             if (num == TARGET_NR_capget) {
11009                 target_data = lock_user(VERIFY_WRITE, arg2, target_datalen, 0);
11010             } else {
11011                 target_data = lock_user(VERIFY_READ, arg2, target_datalen, 1);
11012             }
11013             if (!target_data) {
11014                 unlock_user_struct(target_header, arg1, 0);
11015                 return -TARGET_EFAULT;
11016             }
11017 
11018             if (num == TARGET_NR_capset) {
11019                 for (i = 0; i < data_items; i++) {
11020                     data[i].effective = tswap32(target_data[i].effective);
11021                     data[i].permitted = tswap32(target_data[i].permitted);
11022                     data[i].inheritable = tswap32(target_data[i].inheritable);
11023                 }
11024             }
11025 
11026             dataptr = data;
11027         }
11028 
11029         if (num == TARGET_NR_capget) {
11030             ret = get_errno(capget(&header, dataptr));
11031         } else {
11032             ret = get_errno(capset(&header, dataptr));
11033         }
11034 
11035         /* The kernel always updates version for both capget and capset */
11036         target_header->version = tswap32(header.version);
11037         unlock_user_struct(target_header, arg1, 1);
11038 
11039         if (arg2) {
11040             if (num == TARGET_NR_capget) {
11041                 for (i = 0; i < data_items; i++) {
11042                     target_data[i].effective = tswap32(data[i].effective);
11043                     target_data[i].permitted = tswap32(data[i].permitted);
11044                     target_data[i].inheritable = tswap32(data[i].inheritable);
11045                 }
11046                 unlock_user(target_data, arg2, target_datalen);
11047             } else {
11048                 unlock_user(target_data, arg2, 0);
11049             }
11050         }
11051         return ret;
11052     }
11053     case TARGET_NR_sigaltstack:
11054         return do_sigaltstack(arg1, arg2, cpu_env);
11055 
11056 #ifdef CONFIG_SENDFILE
11057 #ifdef TARGET_NR_sendfile
11058     case TARGET_NR_sendfile:
11059     {
11060         off_t *offp = NULL;
11061         off_t off;
11062         if (arg3) {
11063             ret = get_user_sal(off, arg3);
11064             if (is_error(ret)) {
11065                 return ret;
11066             }
11067             offp = &off;
11068         }
11069         ret = get_errno(sendfile(arg1, arg2, offp, arg4));
11070         if (!is_error(ret) && arg3) {
11071             abi_long ret2 = put_user_sal(off, arg3);
11072             if (is_error(ret2)) {
11073                 ret = ret2;
11074             }
11075         }
11076         return ret;
11077     }
11078 #endif
11079 #ifdef TARGET_NR_sendfile64
11080     case TARGET_NR_sendfile64:
11081     {
11082         off_t *offp = NULL;
11083         off_t off;
11084         if (arg3) {
11085             ret = get_user_s64(off, arg3);
11086             if (is_error(ret)) {
11087                 return ret;
11088             }
11089             offp = &off;
11090         }
11091         ret = get_errno(sendfile(arg1, arg2, offp, arg4));
11092         if (!is_error(ret) && arg3) {
11093             abi_long ret2 = put_user_s64(off, arg3);
11094             if (is_error(ret2)) {
11095                 ret = ret2;
11096             }
11097         }
11098         return ret;
11099     }
11100 #endif
11101 #endif
11102 #ifdef TARGET_NR_vfork
11103     case TARGET_NR_vfork:
11104         return get_errno(do_fork(cpu_env,
11105                          CLONE_VFORK | CLONE_VM | TARGET_SIGCHLD,
11106                          0, 0, 0, 0));
11107 #endif
11108 #ifdef TARGET_NR_ugetrlimit
11109     case TARGET_NR_ugetrlimit:
11110     {
11111 	struct rlimit rlim;
11112 	int resource = target_to_host_resource(arg1);
11113 	ret = get_errno(getrlimit(resource, &rlim));
11114 	if (!is_error(ret)) {
11115 	    struct target_rlimit *target_rlim;
11116             if (!lock_user_struct(VERIFY_WRITE, target_rlim, arg2, 0))
11117                 return -TARGET_EFAULT;
11118 	    target_rlim->rlim_cur = host_to_target_rlim(rlim.rlim_cur);
11119 	    target_rlim->rlim_max = host_to_target_rlim(rlim.rlim_max);
11120             unlock_user_struct(target_rlim, arg2, 1);
11121 	}
11122         return ret;
11123     }
11124 #endif
11125 #ifdef TARGET_NR_truncate64
11126     case TARGET_NR_truncate64:
11127         if (!(p = lock_user_string(arg1)))
11128             return -TARGET_EFAULT;
11129 	ret = target_truncate64(cpu_env, p, arg2, arg3, arg4);
11130         unlock_user(p, arg1, 0);
11131         return ret;
11132 #endif
11133 #ifdef TARGET_NR_ftruncate64
11134     case TARGET_NR_ftruncate64:
11135         return target_ftruncate64(cpu_env, arg1, arg2, arg3, arg4);
11136 #endif
11137 #ifdef TARGET_NR_stat64
11138     case TARGET_NR_stat64:
11139         if (!(p = lock_user_string(arg1))) {
11140             return -TARGET_EFAULT;
11141         }
11142         ret = get_errno(stat(path(p), &st));
11143         unlock_user(p, arg1, 0);
11144         if (!is_error(ret))
11145             ret = host_to_target_stat64(cpu_env, arg2, &st);
11146         return ret;
11147 #endif
11148 #ifdef TARGET_NR_lstat64
11149     case TARGET_NR_lstat64:
11150         if (!(p = lock_user_string(arg1))) {
11151             return -TARGET_EFAULT;
11152         }
11153         ret = get_errno(lstat(path(p), &st));
11154         unlock_user(p, arg1, 0);
11155         if (!is_error(ret))
11156             ret = host_to_target_stat64(cpu_env, arg2, &st);
11157         return ret;
11158 #endif
11159 #ifdef TARGET_NR_fstat64
11160     case TARGET_NR_fstat64:
11161         ret = get_errno(fstat(arg1, &st));
11162         if (!is_error(ret))
11163             ret = host_to_target_stat64(cpu_env, arg2, &st);
11164         return ret;
11165 #endif
11166 #if (defined(TARGET_NR_fstatat64) || defined(TARGET_NR_newfstatat))
11167 #ifdef TARGET_NR_fstatat64
11168     case TARGET_NR_fstatat64:
11169 #endif
11170 #ifdef TARGET_NR_newfstatat
11171     case TARGET_NR_newfstatat:
11172 #endif
11173         if (!(p = lock_user_string(arg2))) {
11174             return -TARGET_EFAULT;
11175         }
11176         ret = get_errno(fstatat(arg1, path(p), &st, arg4));
11177         unlock_user(p, arg2, 0);
11178         if (!is_error(ret))
11179             ret = host_to_target_stat64(cpu_env, arg3, &st);
11180         return ret;
11181 #endif
11182 #if defined(TARGET_NR_statx)
11183     case TARGET_NR_statx:
11184         {
11185             struct target_statx *target_stx;
11186             int dirfd = arg1;
11187             int flags = arg3;
11188 
11189             p = lock_user_string(arg2);
11190             if (p == NULL) {
11191                 return -TARGET_EFAULT;
11192             }
11193 #if defined(__NR_statx)
11194             {
11195                 /*
11196                  * It is assumed that struct statx is architecture independent.
11197                  */
11198                 struct target_statx host_stx;
11199                 int mask = arg4;
11200 
11201                 ret = get_errno(sys_statx(dirfd, p, flags, mask, &host_stx));
11202                 if (!is_error(ret)) {
11203                     if (host_to_target_statx(&host_stx, arg5) != 0) {
11204                         unlock_user(p, arg2, 0);
11205                         return -TARGET_EFAULT;
11206                     }
11207                 }
11208 
11209                 if (ret != -TARGET_ENOSYS) {
11210                     unlock_user(p, arg2, 0);
11211                     return ret;
11212                 }
11213             }
11214 #endif
11215             ret = get_errno(fstatat(dirfd, path(p), &st, flags));
11216             unlock_user(p, arg2, 0);
11217 
11218             if (!is_error(ret)) {
11219                 if (!lock_user_struct(VERIFY_WRITE, target_stx, arg5, 0)) {
11220                     return -TARGET_EFAULT;
11221                 }
11222                 memset(target_stx, 0, sizeof(*target_stx));
11223                 __put_user(major(st.st_dev), &target_stx->stx_dev_major);
11224                 __put_user(minor(st.st_dev), &target_stx->stx_dev_minor);
11225                 __put_user(st.st_ino, &target_stx->stx_ino);
11226                 __put_user(st.st_mode, &target_stx->stx_mode);
11227                 __put_user(st.st_uid, &target_stx->stx_uid);
11228                 __put_user(st.st_gid, &target_stx->stx_gid);
11229                 __put_user(st.st_nlink, &target_stx->stx_nlink);
11230                 __put_user(major(st.st_rdev), &target_stx->stx_rdev_major);
11231                 __put_user(minor(st.st_rdev), &target_stx->stx_rdev_minor);
11232                 __put_user(st.st_size, &target_stx->stx_size);
11233                 __put_user(st.st_blksize, &target_stx->stx_blksize);
11234                 __put_user(st.st_blocks, &target_stx->stx_blocks);
11235                 __put_user(st.st_atime, &target_stx->stx_atime.tv_sec);
11236                 __put_user(st.st_mtime, &target_stx->stx_mtime.tv_sec);
11237                 __put_user(st.st_ctime, &target_stx->stx_ctime.tv_sec);
11238                 unlock_user_struct(target_stx, arg5, 1);
11239             }
11240         }
11241         return ret;
11242 #endif
11243 #ifdef TARGET_NR_lchown
11244     case TARGET_NR_lchown:
11245         if (!(p = lock_user_string(arg1)))
11246             return -TARGET_EFAULT;
11247         ret = get_errno(lchown(p, low2highuid(arg2), low2highgid(arg3)));
11248         unlock_user(p, arg1, 0);
11249         return ret;
11250 #endif
11251 #ifdef TARGET_NR_getuid
11252     case TARGET_NR_getuid:
11253         return get_errno(high2lowuid(getuid()));
11254 #endif
11255 #ifdef TARGET_NR_getgid
11256     case TARGET_NR_getgid:
11257         return get_errno(high2lowgid(getgid()));
11258 #endif
11259 #ifdef TARGET_NR_geteuid
11260     case TARGET_NR_geteuid:
11261         return get_errno(high2lowuid(geteuid()));
11262 #endif
11263 #ifdef TARGET_NR_getegid
11264     case TARGET_NR_getegid:
11265         return get_errno(high2lowgid(getegid()));
11266 #endif
11267     case TARGET_NR_setreuid:
11268         return get_errno(setreuid(low2highuid(arg1), low2highuid(arg2)));
11269     case TARGET_NR_setregid:
11270         return get_errno(setregid(low2highgid(arg1), low2highgid(arg2)));
11271     case TARGET_NR_getgroups:
11272         {
11273             int gidsetsize = arg1;
11274             target_id *target_grouplist;
11275             gid_t *grouplist;
11276             int i;
11277 
11278             grouplist = alloca(gidsetsize * sizeof(gid_t));
11279             ret = get_errno(getgroups(gidsetsize, grouplist));
11280             if (gidsetsize == 0)
11281                 return ret;
11282             if (!is_error(ret)) {
11283                 target_grouplist = lock_user(VERIFY_WRITE, arg2, gidsetsize * sizeof(target_id), 0);
11284                 if (!target_grouplist)
11285                     return -TARGET_EFAULT;
11286                 for(i = 0;i < ret; i++)
11287                     target_grouplist[i] = tswapid(high2lowgid(grouplist[i]));
11288                 unlock_user(target_grouplist, arg2, gidsetsize * sizeof(target_id));
11289             }
11290         }
11291         return ret;
11292     case TARGET_NR_setgroups:
11293         {
11294             int gidsetsize = arg1;
11295             target_id *target_grouplist;
11296             gid_t *grouplist = NULL;
11297             int i;
11298             if (gidsetsize) {
11299                 grouplist = alloca(gidsetsize * sizeof(gid_t));
11300                 target_grouplist = lock_user(VERIFY_READ, arg2, gidsetsize * sizeof(target_id), 1);
11301                 if (!target_grouplist) {
11302                     return -TARGET_EFAULT;
11303                 }
11304                 for (i = 0; i < gidsetsize; i++) {
11305                     grouplist[i] = low2highgid(tswapid(target_grouplist[i]));
11306                 }
11307                 unlock_user(target_grouplist, arg2, 0);
11308             }
11309             return get_errno(setgroups(gidsetsize, grouplist));
11310         }
11311     case TARGET_NR_fchown:
11312         return get_errno(fchown(arg1, low2highuid(arg2), low2highgid(arg3)));
11313 #if defined(TARGET_NR_fchownat)
11314     case TARGET_NR_fchownat:
11315         if (!(p = lock_user_string(arg2)))
11316             return -TARGET_EFAULT;
11317         ret = get_errno(fchownat(arg1, p, low2highuid(arg3),
11318                                  low2highgid(arg4), arg5));
11319         unlock_user(p, arg2, 0);
11320         return ret;
11321 #endif
11322 #ifdef TARGET_NR_setresuid
11323     case TARGET_NR_setresuid:
11324         return get_errno(sys_setresuid(low2highuid(arg1),
11325                                        low2highuid(arg2),
11326                                        low2highuid(arg3)));
11327 #endif
11328 #ifdef TARGET_NR_getresuid
11329     case TARGET_NR_getresuid:
11330         {
11331             uid_t ruid, euid, suid;
11332             ret = get_errno(getresuid(&ruid, &euid, &suid));
11333             if (!is_error(ret)) {
11334                 if (put_user_id(high2lowuid(ruid), arg1)
11335                     || put_user_id(high2lowuid(euid), arg2)
11336                     || put_user_id(high2lowuid(suid), arg3))
11337                     return -TARGET_EFAULT;
11338             }
11339         }
11340         return ret;
11341 #endif
11342 #ifdef TARGET_NR_getresgid
11343     case TARGET_NR_setresgid:
11344         return get_errno(sys_setresgid(low2highgid(arg1),
11345                                        low2highgid(arg2),
11346                                        low2highgid(arg3)));
11347 #endif
11348 #ifdef TARGET_NR_getresgid
11349     case TARGET_NR_getresgid:
11350         {
11351             gid_t rgid, egid, sgid;
11352             ret = get_errno(getresgid(&rgid, &egid, &sgid));
11353             if (!is_error(ret)) {
11354                 if (put_user_id(high2lowgid(rgid), arg1)
11355                     || put_user_id(high2lowgid(egid), arg2)
11356                     || put_user_id(high2lowgid(sgid), arg3))
11357                     return -TARGET_EFAULT;
11358             }
11359         }
11360         return ret;
11361 #endif
11362 #ifdef TARGET_NR_chown
11363     case TARGET_NR_chown:
11364         if (!(p = lock_user_string(arg1)))
11365             return -TARGET_EFAULT;
11366         ret = get_errno(chown(p, low2highuid(arg2), low2highgid(arg3)));
11367         unlock_user(p, arg1, 0);
11368         return ret;
11369 #endif
11370     case TARGET_NR_setuid:
11371         return get_errno(sys_setuid(low2highuid(arg1)));
11372     case TARGET_NR_setgid:
11373         return get_errno(sys_setgid(low2highgid(arg1)));
11374     case TARGET_NR_setfsuid:
11375         return get_errno(setfsuid(arg1));
11376     case TARGET_NR_setfsgid:
11377         return get_errno(setfsgid(arg1));
11378 
11379 #ifdef TARGET_NR_lchown32
11380     case TARGET_NR_lchown32:
11381         if (!(p = lock_user_string(arg1)))
11382             return -TARGET_EFAULT;
11383         ret = get_errno(lchown(p, arg2, arg3));
11384         unlock_user(p, arg1, 0);
11385         return ret;
11386 #endif
11387 #ifdef TARGET_NR_getuid32
11388     case TARGET_NR_getuid32:
11389         return get_errno(getuid());
11390 #endif
11391 
11392 #if defined(TARGET_NR_getxuid) && defined(TARGET_ALPHA)
11393    /* Alpha specific */
11394     case TARGET_NR_getxuid:
11395          {
11396             uid_t euid;
11397             euid=geteuid();
11398             ((CPUAlphaState *)cpu_env)->ir[IR_A4]=euid;
11399          }
11400         return get_errno(getuid());
11401 #endif
11402 #if defined(TARGET_NR_getxgid) && defined(TARGET_ALPHA)
11403    /* Alpha specific */
11404     case TARGET_NR_getxgid:
11405          {
11406             uid_t egid;
11407             egid=getegid();
11408             ((CPUAlphaState *)cpu_env)->ir[IR_A4]=egid;
11409          }
11410         return get_errno(getgid());
11411 #endif
11412 #if defined(TARGET_NR_osf_getsysinfo) && defined(TARGET_ALPHA)
11413     /* Alpha specific */
11414     case TARGET_NR_osf_getsysinfo:
11415         ret = -TARGET_EOPNOTSUPP;
11416         switch (arg1) {
11417           case TARGET_GSI_IEEE_FP_CONTROL:
11418             {
11419                 uint64_t fpcr = cpu_alpha_load_fpcr(cpu_env);
11420                 uint64_t swcr = ((CPUAlphaState *)cpu_env)->swcr;
11421 
11422                 swcr &= ~SWCR_STATUS_MASK;
11423                 swcr |= (fpcr >> 35) & SWCR_STATUS_MASK;
11424 
11425                 if (put_user_u64 (swcr, arg2))
11426                         return -TARGET_EFAULT;
11427                 ret = 0;
11428             }
11429             break;
11430 
11431           /* case GSI_IEEE_STATE_AT_SIGNAL:
11432              -- Not implemented in linux kernel.
11433              case GSI_UACPROC:
11434              -- Retrieves current unaligned access state; not much used.
11435              case GSI_PROC_TYPE:
11436              -- Retrieves implver information; surely not used.
11437              case GSI_GET_HWRPB:
11438              -- Grabs a copy of the HWRPB; surely not used.
11439           */
11440         }
11441         return ret;
11442 #endif
11443 #if defined(TARGET_NR_osf_setsysinfo) && defined(TARGET_ALPHA)
11444     /* Alpha specific */
11445     case TARGET_NR_osf_setsysinfo:
11446         ret = -TARGET_EOPNOTSUPP;
11447         switch (arg1) {
11448           case TARGET_SSI_IEEE_FP_CONTROL:
11449             {
11450                 uint64_t swcr, fpcr;
11451 
11452                 if (get_user_u64 (swcr, arg2)) {
11453                     return -TARGET_EFAULT;
11454                 }
11455 
11456                 /*
11457                  * The kernel calls swcr_update_status to update the
11458                  * status bits from the fpcr at every point that it
11459                  * could be queried.  Therefore, we store the status
11460                  * bits only in FPCR.
11461                  */
11462                 ((CPUAlphaState *)cpu_env)->swcr
11463                     = swcr & (SWCR_TRAP_ENABLE_MASK | SWCR_MAP_MASK);
11464 
11465                 fpcr = cpu_alpha_load_fpcr(cpu_env);
11466                 fpcr &= ((uint64_t)FPCR_DYN_MASK << 32);
11467                 fpcr |= alpha_ieee_swcr_to_fpcr(swcr);
11468                 cpu_alpha_store_fpcr(cpu_env, fpcr);
11469                 ret = 0;
11470             }
11471             break;
11472 
11473           case TARGET_SSI_IEEE_RAISE_EXCEPTION:
11474             {
11475                 uint64_t exc, fpcr, fex;
11476 
11477                 if (get_user_u64(exc, arg2)) {
11478                     return -TARGET_EFAULT;
11479                 }
11480                 exc &= SWCR_STATUS_MASK;
11481                 fpcr = cpu_alpha_load_fpcr(cpu_env);
11482 
11483                 /* Old exceptions are not signaled.  */
11484                 fex = alpha_ieee_fpcr_to_swcr(fpcr);
11485                 fex = exc & ~fex;
11486                 fex >>= SWCR_STATUS_TO_EXCSUM_SHIFT;
11487                 fex &= ((CPUArchState *)cpu_env)->swcr;
11488 
11489                 /* Update the hardware fpcr.  */
11490                 fpcr |= alpha_ieee_swcr_to_fpcr(exc);
11491                 cpu_alpha_store_fpcr(cpu_env, fpcr);
11492 
11493                 if (fex) {
11494                     int si_code = TARGET_FPE_FLTUNK;
11495                     target_siginfo_t info;
11496 
11497                     if (fex & SWCR_TRAP_ENABLE_DNO) {
11498                         si_code = TARGET_FPE_FLTUND;
11499                     }
11500                     if (fex & SWCR_TRAP_ENABLE_INE) {
11501                         si_code = TARGET_FPE_FLTRES;
11502                     }
11503                     if (fex & SWCR_TRAP_ENABLE_UNF) {
11504                         si_code = TARGET_FPE_FLTUND;
11505                     }
11506                     if (fex & SWCR_TRAP_ENABLE_OVF) {
11507                         si_code = TARGET_FPE_FLTOVF;
11508                     }
11509                     if (fex & SWCR_TRAP_ENABLE_DZE) {
11510                         si_code = TARGET_FPE_FLTDIV;
11511                     }
11512                     if (fex & SWCR_TRAP_ENABLE_INV) {
11513                         si_code = TARGET_FPE_FLTINV;
11514                     }
11515 
11516                     info.si_signo = SIGFPE;
11517                     info.si_errno = 0;
11518                     info.si_code = si_code;
11519                     info._sifields._sigfault._addr
11520                         = ((CPUArchState *)cpu_env)->pc;
11521                     queue_signal((CPUArchState *)cpu_env, info.si_signo,
11522                                  QEMU_SI_FAULT, &info);
11523                 }
11524                 ret = 0;
11525             }
11526             break;
11527 
11528           /* case SSI_NVPAIRS:
11529              -- Used with SSIN_UACPROC to enable unaligned accesses.
11530              case SSI_IEEE_STATE_AT_SIGNAL:
11531              case SSI_IEEE_IGNORE_STATE_AT_SIGNAL:
11532              -- Not implemented in linux kernel
11533           */
11534         }
11535         return ret;
11536 #endif
11537 #ifdef TARGET_NR_osf_sigprocmask
11538     /* Alpha specific.  */
11539     case TARGET_NR_osf_sigprocmask:
11540         {
11541             abi_ulong mask;
11542             int how;
11543             sigset_t set, oldset;
11544 
11545             switch(arg1) {
11546             case TARGET_SIG_BLOCK:
11547                 how = SIG_BLOCK;
11548                 break;
11549             case TARGET_SIG_UNBLOCK:
11550                 how = SIG_UNBLOCK;
11551                 break;
11552             case TARGET_SIG_SETMASK:
11553                 how = SIG_SETMASK;
11554                 break;
11555             default:
11556                 return -TARGET_EINVAL;
11557             }
11558             mask = arg2;
11559             target_to_host_old_sigset(&set, &mask);
11560             ret = do_sigprocmask(how, &set, &oldset);
11561             if (!ret) {
11562                 host_to_target_old_sigset(&mask, &oldset);
11563                 ret = mask;
11564             }
11565         }
11566         return ret;
11567 #endif
11568 
11569 #ifdef TARGET_NR_getgid32
11570     case TARGET_NR_getgid32:
11571         return get_errno(getgid());
11572 #endif
11573 #ifdef TARGET_NR_geteuid32
11574     case TARGET_NR_geteuid32:
11575         return get_errno(geteuid());
11576 #endif
11577 #ifdef TARGET_NR_getegid32
11578     case TARGET_NR_getegid32:
11579         return get_errno(getegid());
11580 #endif
11581 #ifdef TARGET_NR_setreuid32
11582     case TARGET_NR_setreuid32:
11583         return get_errno(setreuid(arg1, arg2));
11584 #endif
11585 #ifdef TARGET_NR_setregid32
11586     case TARGET_NR_setregid32:
11587         return get_errno(setregid(arg1, arg2));
11588 #endif
11589 #ifdef TARGET_NR_getgroups32
11590     case TARGET_NR_getgroups32:
11591         {
11592             int gidsetsize = arg1;
11593             uint32_t *target_grouplist;
11594             gid_t *grouplist;
11595             int i;
11596 
11597             grouplist = alloca(gidsetsize * sizeof(gid_t));
11598             ret = get_errno(getgroups(gidsetsize, grouplist));
11599             if (gidsetsize == 0)
11600                 return ret;
11601             if (!is_error(ret)) {
11602                 target_grouplist = lock_user(VERIFY_WRITE, arg2, gidsetsize * 4, 0);
11603                 if (!target_grouplist) {
11604                     return -TARGET_EFAULT;
11605                 }
11606                 for(i = 0;i < ret; i++)
11607                     target_grouplist[i] = tswap32(grouplist[i]);
11608                 unlock_user(target_grouplist, arg2, gidsetsize * 4);
11609             }
11610         }
11611         return ret;
11612 #endif
11613 #ifdef TARGET_NR_setgroups32
11614     case TARGET_NR_setgroups32:
11615         {
11616             int gidsetsize = arg1;
11617             uint32_t *target_grouplist;
11618             gid_t *grouplist;
11619             int i;
11620 
11621             grouplist = alloca(gidsetsize * sizeof(gid_t));
11622             target_grouplist = lock_user(VERIFY_READ, arg2, gidsetsize * 4, 1);
11623             if (!target_grouplist) {
11624                 return -TARGET_EFAULT;
11625             }
11626             for(i = 0;i < gidsetsize; i++)
11627                 grouplist[i] = tswap32(target_grouplist[i]);
11628             unlock_user(target_grouplist, arg2, 0);
11629             return get_errno(setgroups(gidsetsize, grouplist));
11630         }
11631 #endif
11632 #ifdef TARGET_NR_fchown32
11633     case TARGET_NR_fchown32:
11634         return get_errno(fchown(arg1, arg2, arg3));
11635 #endif
11636 #ifdef TARGET_NR_setresuid32
11637     case TARGET_NR_setresuid32:
11638         return get_errno(sys_setresuid(arg1, arg2, arg3));
11639 #endif
11640 #ifdef TARGET_NR_getresuid32
11641     case TARGET_NR_getresuid32:
11642         {
11643             uid_t ruid, euid, suid;
11644             ret = get_errno(getresuid(&ruid, &euid, &suid));
11645             if (!is_error(ret)) {
11646                 if (put_user_u32(ruid, arg1)
11647                     || put_user_u32(euid, arg2)
11648                     || put_user_u32(suid, arg3))
11649                     return -TARGET_EFAULT;
11650             }
11651         }
11652         return ret;
11653 #endif
11654 #ifdef TARGET_NR_setresgid32
11655     case TARGET_NR_setresgid32:
11656         return get_errno(sys_setresgid(arg1, arg2, arg3));
11657 #endif
11658 #ifdef TARGET_NR_getresgid32
11659     case TARGET_NR_getresgid32:
11660         {
11661             gid_t rgid, egid, sgid;
11662             ret = get_errno(getresgid(&rgid, &egid, &sgid));
11663             if (!is_error(ret)) {
11664                 if (put_user_u32(rgid, arg1)
11665                     || put_user_u32(egid, arg2)
11666                     || put_user_u32(sgid, arg3))
11667                     return -TARGET_EFAULT;
11668             }
11669         }
11670         return ret;
11671 #endif
11672 #ifdef TARGET_NR_chown32
11673     case TARGET_NR_chown32:
11674         if (!(p = lock_user_string(arg1)))
11675             return -TARGET_EFAULT;
11676         ret = get_errno(chown(p, arg2, arg3));
11677         unlock_user(p, arg1, 0);
11678         return ret;
11679 #endif
11680 #ifdef TARGET_NR_setuid32
11681     case TARGET_NR_setuid32:
11682         return get_errno(sys_setuid(arg1));
11683 #endif
11684 #ifdef TARGET_NR_setgid32
11685     case TARGET_NR_setgid32:
11686         return get_errno(sys_setgid(arg1));
11687 #endif
11688 #ifdef TARGET_NR_setfsuid32
11689     case TARGET_NR_setfsuid32:
11690         return get_errno(setfsuid(arg1));
11691 #endif
11692 #ifdef TARGET_NR_setfsgid32
11693     case TARGET_NR_setfsgid32:
11694         return get_errno(setfsgid(arg1));
11695 #endif
11696 #ifdef TARGET_NR_mincore
11697     case TARGET_NR_mincore:
11698         {
11699             void *a = lock_user(VERIFY_READ, arg1, arg2, 0);
11700             if (!a) {
11701                 return -TARGET_ENOMEM;
11702             }
11703             p = lock_user_string(arg3);
11704             if (!p) {
11705                 ret = -TARGET_EFAULT;
11706             } else {
11707                 ret = get_errno(mincore(a, arg2, p));
11708                 unlock_user(p, arg3, ret);
11709             }
11710             unlock_user(a, arg1, 0);
11711         }
11712         return ret;
11713 #endif
11714 #ifdef TARGET_NR_arm_fadvise64_64
11715     case TARGET_NR_arm_fadvise64_64:
11716         /* arm_fadvise64_64 looks like fadvise64_64 but
11717          * with different argument order: fd, advice, offset, len
11718          * rather than the usual fd, offset, len, advice.
11719          * Note that offset and len are both 64-bit so appear as
11720          * pairs of 32-bit registers.
11721          */
11722         ret = posix_fadvise(arg1, target_offset64(arg3, arg4),
11723                             target_offset64(arg5, arg6), arg2);
11724         return -host_to_target_errno(ret);
11725 #endif
11726 
11727 #if TARGET_ABI_BITS == 32
11728 
11729 #ifdef TARGET_NR_fadvise64_64
11730     case TARGET_NR_fadvise64_64:
11731 #if defined(TARGET_PPC) || defined(TARGET_XTENSA)
11732         /* 6 args: fd, advice, offset (high, low), len (high, low) */
11733         ret = arg2;
11734         arg2 = arg3;
11735         arg3 = arg4;
11736         arg4 = arg5;
11737         arg5 = arg6;
11738         arg6 = ret;
11739 #else
11740         /* 6 args: fd, offset (high, low), len (high, low), advice */
11741         if (regpairs_aligned(cpu_env, num)) {
11742             /* offset is in (3,4), len in (5,6) and advice in 7 */
11743             arg2 = arg3;
11744             arg3 = arg4;
11745             arg4 = arg5;
11746             arg5 = arg6;
11747             arg6 = arg7;
11748         }
11749 #endif
11750         ret = posix_fadvise(arg1, target_offset64(arg2, arg3),
11751                             target_offset64(arg4, arg5), arg6);
11752         return -host_to_target_errno(ret);
11753 #endif
11754 
11755 #ifdef TARGET_NR_fadvise64
11756     case TARGET_NR_fadvise64:
11757         /* 5 args: fd, offset (high, low), len, advice */
11758         if (regpairs_aligned(cpu_env, num)) {
11759             /* offset is in (3,4), len in 5 and advice in 6 */
11760             arg2 = arg3;
11761             arg3 = arg4;
11762             arg4 = arg5;
11763             arg5 = arg6;
11764         }
11765         ret = posix_fadvise(arg1, target_offset64(arg2, arg3), arg4, arg5);
11766         return -host_to_target_errno(ret);
11767 #endif
11768 
11769 #else /* not a 32-bit ABI */
11770 #if defined(TARGET_NR_fadvise64_64) || defined(TARGET_NR_fadvise64)
11771 #ifdef TARGET_NR_fadvise64_64
11772     case TARGET_NR_fadvise64_64:
11773 #endif
11774 #ifdef TARGET_NR_fadvise64
11775     case TARGET_NR_fadvise64:
11776 #endif
11777 #ifdef TARGET_S390X
11778         switch (arg4) {
11779         case 4: arg4 = POSIX_FADV_NOREUSE + 1; break; /* make sure it's an invalid value */
11780         case 5: arg4 = POSIX_FADV_NOREUSE + 2; break; /* ditto */
11781         case 6: arg4 = POSIX_FADV_DONTNEED; break;
11782         case 7: arg4 = POSIX_FADV_NOREUSE; break;
11783         default: break;
11784         }
11785 #endif
11786         return -host_to_target_errno(posix_fadvise(arg1, arg2, arg3, arg4));
11787 #endif
11788 #endif /* end of 64-bit ABI fadvise handling */
11789 
11790 #ifdef TARGET_NR_madvise
11791     case TARGET_NR_madvise:
11792         /* A straight passthrough may not be safe because qemu sometimes
11793            turns private file-backed mappings into anonymous mappings.
11794            This will break MADV_DONTNEED.
11795            This is a hint, so ignoring and returning success is ok.  */
11796         return 0;
11797 #endif
11798 #ifdef TARGET_NR_fcntl64
11799     case TARGET_NR_fcntl64:
11800     {
11801         int cmd;
11802         struct flock64 fl;
11803         from_flock64_fn *copyfrom = copy_from_user_flock64;
11804         to_flock64_fn *copyto = copy_to_user_flock64;
11805 
11806 #ifdef TARGET_ARM
11807         if (!((CPUARMState *)cpu_env)->eabi) {
11808             copyfrom = copy_from_user_oabi_flock64;
11809             copyto = copy_to_user_oabi_flock64;
11810         }
11811 #endif
11812 
11813         cmd = target_to_host_fcntl_cmd(arg2);
11814         if (cmd == -TARGET_EINVAL) {
11815             return cmd;
11816         }
11817 
11818         switch(arg2) {
11819         case TARGET_F_GETLK64:
11820             ret = copyfrom(&fl, arg3);
11821             if (ret) {
11822                 break;
11823             }
11824             ret = get_errno(safe_fcntl(arg1, cmd, &fl));
11825             if (ret == 0) {
11826                 ret = copyto(arg3, &fl);
11827             }
11828 	    break;
11829 
11830         case TARGET_F_SETLK64:
11831         case TARGET_F_SETLKW64:
11832             ret = copyfrom(&fl, arg3);
11833             if (ret) {
11834                 break;
11835             }
11836             ret = get_errno(safe_fcntl(arg1, cmd, &fl));
11837 	    break;
11838         default:
11839             ret = do_fcntl(arg1, arg2, arg3);
11840             break;
11841         }
11842         return ret;
11843     }
11844 #endif
11845 #ifdef TARGET_NR_cacheflush
11846     case TARGET_NR_cacheflush:
11847         /* self-modifying code is handled automatically, so nothing needed */
11848         return 0;
11849 #endif
11850 #ifdef TARGET_NR_getpagesize
11851     case TARGET_NR_getpagesize:
11852         return TARGET_PAGE_SIZE;
11853 #endif
11854     case TARGET_NR_gettid:
11855         return get_errno(sys_gettid());
11856 #ifdef TARGET_NR_readahead
11857     case TARGET_NR_readahead:
11858 #if TARGET_ABI_BITS == 32
11859         if (regpairs_aligned(cpu_env, num)) {
11860             arg2 = arg3;
11861             arg3 = arg4;
11862             arg4 = arg5;
11863         }
11864         ret = get_errno(readahead(arg1, target_offset64(arg2, arg3) , arg4));
11865 #else
11866         ret = get_errno(readahead(arg1, arg2, arg3));
11867 #endif
11868         return ret;
11869 #endif
11870 #ifdef CONFIG_ATTR
11871 #ifdef TARGET_NR_setxattr
11872     case TARGET_NR_listxattr:
11873     case TARGET_NR_llistxattr:
11874     {
11875         void *p, *b = 0;
11876         if (arg2) {
11877             b = lock_user(VERIFY_WRITE, arg2, arg3, 0);
11878             if (!b) {
11879                 return -TARGET_EFAULT;
11880             }
11881         }
11882         p = lock_user_string(arg1);
11883         if (p) {
11884             if (num == TARGET_NR_listxattr) {
11885                 ret = get_errno(listxattr(p, b, arg3));
11886             } else {
11887                 ret = get_errno(llistxattr(p, b, arg3));
11888             }
11889         } else {
11890             ret = -TARGET_EFAULT;
11891         }
11892         unlock_user(p, arg1, 0);
11893         unlock_user(b, arg2, arg3);
11894         return ret;
11895     }
11896     case TARGET_NR_flistxattr:
11897     {
11898         void *b = 0;
11899         if (arg2) {
11900             b = lock_user(VERIFY_WRITE, arg2, arg3, 0);
11901             if (!b) {
11902                 return -TARGET_EFAULT;
11903             }
11904         }
11905         ret = get_errno(flistxattr(arg1, b, arg3));
11906         unlock_user(b, arg2, arg3);
11907         return ret;
11908     }
11909     case TARGET_NR_setxattr:
11910     case TARGET_NR_lsetxattr:
11911         {
11912             void *p, *n, *v = 0;
11913             if (arg3) {
11914                 v = lock_user(VERIFY_READ, arg3, arg4, 1);
11915                 if (!v) {
11916                     return -TARGET_EFAULT;
11917                 }
11918             }
11919             p = lock_user_string(arg1);
11920             n = lock_user_string(arg2);
11921             if (p && n) {
11922                 if (num == TARGET_NR_setxattr) {
11923                     ret = get_errno(setxattr(p, n, v, arg4, arg5));
11924                 } else {
11925                     ret = get_errno(lsetxattr(p, n, v, arg4, arg5));
11926                 }
11927             } else {
11928                 ret = -TARGET_EFAULT;
11929             }
11930             unlock_user(p, arg1, 0);
11931             unlock_user(n, arg2, 0);
11932             unlock_user(v, arg3, 0);
11933         }
11934         return ret;
11935     case TARGET_NR_fsetxattr:
11936         {
11937             void *n, *v = 0;
11938             if (arg3) {
11939                 v = lock_user(VERIFY_READ, arg3, arg4, 1);
11940                 if (!v) {
11941                     return -TARGET_EFAULT;
11942                 }
11943             }
11944             n = lock_user_string(arg2);
11945             if (n) {
11946                 ret = get_errno(fsetxattr(arg1, n, v, arg4, arg5));
11947             } else {
11948                 ret = -TARGET_EFAULT;
11949             }
11950             unlock_user(n, arg2, 0);
11951             unlock_user(v, arg3, 0);
11952         }
11953         return ret;
11954     case TARGET_NR_getxattr:
11955     case TARGET_NR_lgetxattr:
11956         {
11957             void *p, *n, *v = 0;
11958             if (arg3) {
11959                 v = lock_user(VERIFY_WRITE, arg3, arg4, 0);
11960                 if (!v) {
11961                     return -TARGET_EFAULT;
11962                 }
11963             }
11964             p = lock_user_string(arg1);
11965             n = lock_user_string(arg2);
11966             if (p && n) {
11967                 if (num == TARGET_NR_getxattr) {
11968                     ret = get_errno(getxattr(p, n, v, arg4));
11969                 } else {
11970                     ret = get_errno(lgetxattr(p, n, v, arg4));
11971                 }
11972             } else {
11973                 ret = -TARGET_EFAULT;
11974             }
11975             unlock_user(p, arg1, 0);
11976             unlock_user(n, arg2, 0);
11977             unlock_user(v, arg3, arg4);
11978         }
11979         return ret;
11980     case TARGET_NR_fgetxattr:
11981         {
11982             void *n, *v = 0;
11983             if (arg3) {
11984                 v = lock_user(VERIFY_WRITE, arg3, arg4, 0);
11985                 if (!v) {
11986                     return -TARGET_EFAULT;
11987                 }
11988             }
11989             n = lock_user_string(arg2);
11990             if (n) {
11991                 ret = get_errno(fgetxattr(arg1, n, v, arg4));
11992             } else {
11993                 ret = -TARGET_EFAULT;
11994             }
11995             unlock_user(n, arg2, 0);
11996             unlock_user(v, arg3, arg4);
11997         }
11998         return ret;
11999     case TARGET_NR_removexattr:
12000     case TARGET_NR_lremovexattr:
12001         {
12002             void *p, *n;
12003             p = lock_user_string(arg1);
12004             n = lock_user_string(arg2);
12005             if (p && n) {
12006                 if (num == TARGET_NR_removexattr) {
12007                     ret = get_errno(removexattr(p, n));
12008                 } else {
12009                     ret = get_errno(lremovexattr(p, n));
12010                 }
12011             } else {
12012                 ret = -TARGET_EFAULT;
12013             }
12014             unlock_user(p, arg1, 0);
12015             unlock_user(n, arg2, 0);
12016         }
12017         return ret;
12018     case TARGET_NR_fremovexattr:
12019         {
12020             void *n;
12021             n = lock_user_string(arg2);
12022             if (n) {
12023                 ret = get_errno(fremovexattr(arg1, n));
12024             } else {
12025                 ret = -TARGET_EFAULT;
12026             }
12027             unlock_user(n, arg2, 0);
12028         }
12029         return ret;
12030 #endif
12031 #endif /* CONFIG_ATTR */
12032 #ifdef TARGET_NR_set_thread_area
12033     case TARGET_NR_set_thread_area:
12034 #if defined(TARGET_MIPS)
12035       ((CPUMIPSState *) cpu_env)->active_tc.CP0_UserLocal = arg1;
12036       return 0;
12037 #elif defined(TARGET_CRIS)
12038       if (arg1 & 0xff)
12039           ret = -TARGET_EINVAL;
12040       else {
12041           ((CPUCRISState *) cpu_env)->pregs[PR_PID] = arg1;
12042           ret = 0;
12043       }
12044       return ret;
12045 #elif defined(TARGET_I386) && defined(TARGET_ABI32)
12046       return do_set_thread_area(cpu_env, arg1);
12047 #elif defined(TARGET_M68K)
12048       {
12049           TaskState *ts = cpu->opaque;
12050           ts->tp_value = arg1;
12051           return 0;
12052       }
12053 #else
12054       return -TARGET_ENOSYS;
12055 #endif
12056 #endif
12057 #ifdef TARGET_NR_get_thread_area
12058     case TARGET_NR_get_thread_area:
12059 #if defined(TARGET_I386) && defined(TARGET_ABI32)
12060         return do_get_thread_area(cpu_env, arg1);
12061 #elif defined(TARGET_M68K)
12062         {
12063             TaskState *ts = cpu->opaque;
12064             return ts->tp_value;
12065         }
12066 #else
12067         return -TARGET_ENOSYS;
12068 #endif
12069 #endif
12070 #ifdef TARGET_NR_getdomainname
12071     case TARGET_NR_getdomainname:
12072         return -TARGET_ENOSYS;
12073 #endif
12074 
12075 #ifdef TARGET_NR_clock_settime
12076     case TARGET_NR_clock_settime:
12077     {
12078         struct timespec ts;
12079 
12080         ret = target_to_host_timespec(&ts, arg2);
12081         if (!is_error(ret)) {
12082             ret = get_errno(clock_settime(arg1, &ts));
12083         }
12084         return ret;
12085     }
12086 #endif
12087 #ifdef TARGET_NR_clock_settime64
12088     case TARGET_NR_clock_settime64:
12089     {
12090         struct timespec ts;
12091 
12092         ret = target_to_host_timespec64(&ts, arg2);
12093         if (!is_error(ret)) {
12094             ret = get_errno(clock_settime(arg1, &ts));
12095         }
12096         return ret;
12097     }
12098 #endif
12099 #ifdef TARGET_NR_clock_gettime
12100     case TARGET_NR_clock_gettime:
12101     {
12102         struct timespec ts;
12103         ret = get_errno(clock_gettime(arg1, &ts));
12104         if (!is_error(ret)) {
12105             ret = host_to_target_timespec(arg2, &ts);
12106         }
12107         return ret;
12108     }
12109 #endif
12110 #ifdef TARGET_NR_clock_gettime64
12111     case TARGET_NR_clock_gettime64:
12112     {
12113         struct timespec ts;
12114         ret = get_errno(clock_gettime(arg1, &ts));
12115         if (!is_error(ret)) {
12116             ret = host_to_target_timespec64(arg2, &ts);
12117         }
12118         return ret;
12119     }
12120 #endif
12121 #ifdef TARGET_NR_clock_getres
12122     case TARGET_NR_clock_getres:
12123     {
12124         struct timespec ts;
12125         ret = get_errno(clock_getres(arg1, &ts));
12126         if (!is_error(ret)) {
12127             host_to_target_timespec(arg2, &ts);
12128         }
12129         return ret;
12130     }
12131 #endif
12132 #ifdef TARGET_NR_clock_getres_time64
12133     case TARGET_NR_clock_getres_time64:
12134     {
12135         struct timespec ts;
12136         ret = get_errno(clock_getres(arg1, &ts));
12137         if (!is_error(ret)) {
12138             host_to_target_timespec64(arg2, &ts);
12139         }
12140         return ret;
12141     }
12142 #endif
12143 #ifdef TARGET_NR_clock_nanosleep
12144     case TARGET_NR_clock_nanosleep:
12145     {
12146         struct timespec ts;
12147         if (target_to_host_timespec(&ts, arg3)) {
12148             return -TARGET_EFAULT;
12149         }
12150         ret = get_errno(safe_clock_nanosleep(arg1, arg2,
12151                                              &ts, arg4 ? &ts : NULL));
12152         /*
12153          * if the call is interrupted by a signal handler, it fails
12154          * with error -TARGET_EINTR and if arg4 is not NULL and arg2 is not
12155          * TIMER_ABSTIME, it returns the remaining unslept time in arg4.
12156          */
12157         if (ret == -TARGET_EINTR && arg4 && arg2 != TIMER_ABSTIME &&
12158             host_to_target_timespec(arg4, &ts)) {
12159               return -TARGET_EFAULT;
12160         }
12161 
12162         return ret;
12163     }
12164 #endif
12165 #ifdef TARGET_NR_clock_nanosleep_time64
12166     case TARGET_NR_clock_nanosleep_time64:
12167     {
12168         struct timespec ts;
12169 
12170         if (target_to_host_timespec64(&ts, arg3)) {
12171             return -TARGET_EFAULT;
12172         }
12173 
12174         ret = get_errno(safe_clock_nanosleep(arg1, arg2,
12175                                              &ts, arg4 ? &ts : NULL));
12176 
12177         if (ret == -TARGET_EINTR && arg4 && arg2 != TIMER_ABSTIME &&
12178             host_to_target_timespec64(arg4, &ts)) {
12179             return -TARGET_EFAULT;
12180         }
12181         return ret;
12182     }
12183 #endif
12184 
12185 #if defined(TARGET_NR_set_tid_address) && defined(__NR_set_tid_address)
12186     case TARGET_NR_set_tid_address:
12187         return get_errno(set_tid_address((int *)g2h(cpu, arg1)));
12188 #endif
12189 
12190     case TARGET_NR_tkill:
12191         return get_errno(safe_tkill((int)arg1, target_to_host_signal(arg2)));
12192 
12193     case TARGET_NR_tgkill:
12194         return get_errno(safe_tgkill((int)arg1, (int)arg2,
12195                          target_to_host_signal(arg3)));
12196 
12197 #ifdef TARGET_NR_set_robust_list
12198     case TARGET_NR_set_robust_list:
12199     case TARGET_NR_get_robust_list:
12200         /* The ABI for supporting robust futexes has userspace pass
12201          * the kernel a pointer to a linked list which is updated by
12202          * userspace after the syscall; the list is walked by the kernel
12203          * when the thread exits. Since the linked list in QEMU guest
12204          * memory isn't a valid linked list for the host and we have
12205          * no way to reliably intercept the thread-death event, we can't
12206          * support these. Silently return ENOSYS so that guest userspace
12207          * falls back to a non-robust futex implementation (which should
12208          * be OK except in the corner case of the guest crashing while
12209          * holding a mutex that is shared with another process via
12210          * shared memory).
12211          */
12212         return -TARGET_ENOSYS;
12213 #endif
12214 
12215 #if defined(TARGET_NR_utimensat)
12216     case TARGET_NR_utimensat:
12217         {
12218             struct timespec *tsp, ts[2];
12219             if (!arg3) {
12220                 tsp = NULL;
12221             } else {
12222                 if (target_to_host_timespec(ts, arg3)) {
12223                     return -TARGET_EFAULT;
12224                 }
12225                 if (target_to_host_timespec(ts + 1, arg3 +
12226                                             sizeof(struct target_timespec))) {
12227                     return -TARGET_EFAULT;
12228                 }
12229                 tsp = ts;
12230             }
12231             if (!arg2)
12232                 ret = get_errno(sys_utimensat(arg1, NULL, tsp, arg4));
12233             else {
12234                 if (!(p = lock_user_string(arg2))) {
12235                     return -TARGET_EFAULT;
12236                 }
12237                 ret = get_errno(sys_utimensat(arg1, path(p), tsp, arg4));
12238                 unlock_user(p, arg2, 0);
12239             }
12240         }
12241         return ret;
12242 #endif
12243 #ifdef TARGET_NR_utimensat_time64
12244     case TARGET_NR_utimensat_time64:
12245         {
12246             struct timespec *tsp, ts[2];
12247             if (!arg3) {
12248                 tsp = NULL;
12249             } else {
12250                 if (target_to_host_timespec64(ts, arg3)) {
12251                     return -TARGET_EFAULT;
12252                 }
12253                 if (target_to_host_timespec64(ts + 1, arg3 +
12254                                      sizeof(struct target__kernel_timespec))) {
12255                     return -TARGET_EFAULT;
12256                 }
12257                 tsp = ts;
12258             }
12259             if (!arg2)
12260                 ret = get_errno(sys_utimensat(arg1, NULL, tsp, arg4));
12261             else {
12262                 p = lock_user_string(arg2);
12263                 if (!p) {
12264                     return -TARGET_EFAULT;
12265                 }
12266                 ret = get_errno(sys_utimensat(arg1, path(p), tsp, arg4));
12267                 unlock_user(p, arg2, 0);
12268             }
12269         }
12270         return ret;
12271 #endif
12272 #ifdef TARGET_NR_futex
12273     case TARGET_NR_futex:
12274         return do_futex(cpu, arg1, arg2, arg3, arg4, arg5, arg6);
12275 #endif
12276 #ifdef TARGET_NR_futex_time64
12277     case TARGET_NR_futex_time64:
12278         return do_futex_time64(cpu, arg1, arg2, arg3, arg4, arg5, arg6);
12279 #endif
12280 #if defined(TARGET_NR_inotify_init) && defined(__NR_inotify_init)
12281     case TARGET_NR_inotify_init:
12282         ret = get_errno(sys_inotify_init());
12283         if (ret >= 0) {
12284             fd_trans_register(ret, &target_inotify_trans);
12285         }
12286         return ret;
12287 #endif
12288 #ifdef CONFIG_INOTIFY1
12289 #if defined(TARGET_NR_inotify_init1) && defined(__NR_inotify_init1)
12290     case TARGET_NR_inotify_init1:
12291         ret = get_errno(sys_inotify_init1(target_to_host_bitmask(arg1,
12292                                           fcntl_flags_tbl)));
12293         if (ret >= 0) {
12294             fd_trans_register(ret, &target_inotify_trans);
12295         }
12296         return ret;
12297 #endif
12298 #endif
12299 #if defined(TARGET_NR_inotify_add_watch) && defined(__NR_inotify_add_watch)
12300     case TARGET_NR_inotify_add_watch:
12301         p = lock_user_string(arg2);
12302         ret = get_errno(sys_inotify_add_watch(arg1, path(p), arg3));
12303         unlock_user(p, arg2, 0);
12304         return ret;
12305 #endif
12306 #if defined(TARGET_NR_inotify_rm_watch) && defined(__NR_inotify_rm_watch)
12307     case TARGET_NR_inotify_rm_watch:
12308         return get_errno(sys_inotify_rm_watch(arg1, arg2));
12309 #endif
12310 
12311 #if defined(TARGET_NR_mq_open) && defined(__NR_mq_open)
12312     case TARGET_NR_mq_open:
12313         {
12314             struct mq_attr posix_mq_attr;
12315             struct mq_attr *pposix_mq_attr;
12316             int host_flags;
12317 
12318             host_flags = target_to_host_bitmask(arg2, fcntl_flags_tbl);
12319             pposix_mq_attr = NULL;
12320             if (arg4) {
12321                 if (copy_from_user_mq_attr(&posix_mq_attr, arg4) != 0) {
12322                     return -TARGET_EFAULT;
12323                 }
12324                 pposix_mq_attr = &posix_mq_attr;
12325             }
12326             p = lock_user_string(arg1 - 1);
12327             if (!p) {
12328                 return -TARGET_EFAULT;
12329             }
12330             ret = get_errno(mq_open(p, host_flags, arg3, pposix_mq_attr));
12331             unlock_user (p, arg1, 0);
12332         }
12333         return ret;
12334 
12335     case TARGET_NR_mq_unlink:
12336         p = lock_user_string(arg1 - 1);
12337         if (!p) {
12338             return -TARGET_EFAULT;
12339         }
12340         ret = get_errno(mq_unlink(p));
12341         unlock_user (p, arg1, 0);
12342         return ret;
12343 
12344 #ifdef TARGET_NR_mq_timedsend
12345     case TARGET_NR_mq_timedsend:
12346         {
12347             struct timespec ts;
12348 
12349             p = lock_user (VERIFY_READ, arg2, arg3, 1);
12350             if (arg5 != 0) {
12351                 if (target_to_host_timespec(&ts, arg5)) {
12352                     return -TARGET_EFAULT;
12353                 }
12354                 ret = get_errno(safe_mq_timedsend(arg1, p, arg3, arg4, &ts));
12355                 if (!is_error(ret) && host_to_target_timespec(arg5, &ts)) {
12356                     return -TARGET_EFAULT;
12357                 }
12358             } else {
12359                 ret = get_errno(safe_mq_timedsend(arg1, p, arg3, arg4, NULL));
12360             }
12361             unlock_user (p, arg2, arg3);
12362         }
12363         return ret;
12364 #endif
12365 #ifdef TARGET_NR_mq_timedsend_time64
12366     case TARGET_NR_mq_timedsend_time64:
12367         {
12368             struct timespec ts;
12369 
12370             p = lock_user(VERIFY_READ, arg2, arg3, 1);
12371             if (arg5 != 0) {
12372                 if (target_to_host_timespec64(&ts, arg5)) {
12373                     return -TARGET_EFAULT;
12374                 }
12375                 ret = get_errno(safe_mq_timedsend(arg1, p, arg3, arg4, &ts));
12376                 if (!is_error(ret) && host_to_target_timespec64(arg5, &ts)) {
12377                     return -TARGET_EFAULT;
12378                 }
12379             } else {
12380                 ret = get_errno(safe_mq_timedsend(arg1, p, arg3, arg4, NULL));
12381             }
12382             unlock_user(p, arg2, arg3);
12383         }
12384         return ret;
12385 #endif
12386 
12387 #ifdef TARGET_NR_mq_timedreceive
12388     case TARGET_NR_mq_timedreceive:
12389         {
12390             struct timespec ts;
12391             unsigned int prio;
12392 
12393             p = lock_user (VERIFY_READ, arg2, arg3, 1);
12394             if (arg5 != 0) {
12395                 if (target_to_host_timespec(&ts, arg5)) {
12396                     return -TARGET_EFAULT;
12397                 }
12398                 ret = get_errno(safe_mq_timedreceive(arg1, p, arg3,
12399                                                      &prio, &ts));
12400                 if (!is_error(ret) && host_to_target_timespec(arg5, &ts)) {
12401                     return -TARGET_EFAULT;
12402                 }
12403             } else {
12404                 ret = get_errno(safe_mq_timedreceive(arg1, p, arg3,
12405                                                      &prio, NULL));
12406             }
12407             unlock_user (p, arg2, arg3);
12408             if (arg4 != 0)
12409                 put_user_u32(prio, arg4);
12410         }
12411         return ret;
12412 #endif
12413 #ifdef TARGET_NR_mq_timedreceive_time64
12414     case TARGET_NR_mq_timedreceive_time64:
12415         {
12416             struct timespec ts;
12417             unsigned int prio;
12418 
12419             p = lock_user(VERIFY_READ, arg2, arg3, 1);
12420             if (arg5 != 0) {
12421                 if (target_to_host_timespec64(&ts, arg5)) {
12422                     return -TARGET_EFAULT;
12423                 }
12424                 ret = get_errno(safe_mq_timedreceive(arg1, p, arg3,
12425                                                      &prio, &ts));
12426                 if (!is_error(ret) && host_to_target_timespec64(arg5, &ts)) {
12427                     return -TARGET_EFAULT;
12428                 }
12429             } else {
12430                 ret = get_errno(safe_mq_timedreceive(arg1, p, arg3,
12431                                                      &prio, NULL));
12432             }
12433             unlock_user(p, arg2, arg3);
12434             if (arg4 != 0) {
12435                 put_user_u32(prio, arg4);
12436             }
12437         }
12438         return ret;
12439 #endif
12440 
12441     /* Not implemented for now... */
12442 /*     case TARGET_NR_mq_notify: */
12443 /*         break; */
12444 
12445     case TARGET_NR_mq_getsetattr:
12446         {
12447             struct mq_attr posix_mq_attr_in, posix_mq_attr_out;
12448             ret = 0;
12449             if (arg2 != 0) {
12450                 copy_from_user_mq_attr(&posix_mq_attr_in, arg2);
12451                 ret = get_errno(mq_setattr(arg1, &posix_mq_attr_in,
12452                                            &posix_mq_attr_out));
12453             } else if (arg3 != 0) {
12454                 ret = get_errno(mq_getattr(arg1, &posix_mq_attr_out));
12455             }
12456             if (ret == 0 && arg3 != 0) {
12457                 copy_to_user_mq_attr(arg3, &posix_mq_attr_out);
12458             }
12459         }
12460         return ret;
12461 #endif
12462 
12463 #ifdef CONFIG_SPLICE
12464 #ifdef TARGET_NR_tee
12465     case TARGET_NR_tee:
12466         {
12467             ret = get_errno(tee(arg1,arg2,arg3,arg4));
12468         }
12469         return ret;
12470 #endif
12471 #ifdef TARGET_NR_splice
12472     case TARGET_NR_splice:
12473         {
12474             loff_t loff_in, loff_out;
12475             loff_t *ploff_in = NULL, *ploff_out = NULL;
12476             if (arg2) {
12477                 if (get_user_u64(loff_in, arg2)) {
12478                     return -TARGET_EFAULT;
12479                 }
12480                 ploff_in = &loff_in;
12481             }
12482             if (arg4) {
12483                 if (get_user_u64(loff_out, arg4)) {
12484                     return -TARGET_EFAULT;
12485                 }
12486                 ploff_out = &loff_out;
12487             }
12488             ret = get_errno(splice(arg1, ploff_in, arg3, ploff_out, arg5, arg6));
12489             if (arg2) {
12490                 if (put_user_u64(loff_in, arg2)) {
12491                     return -TARGET_EFAULT;
12492                 }
12493             }
12494             if (arg4) {
12495                 if (put_user_u64(loff_out, arg4)) {
12496                     return -TARGET_EFAULT;
12497                 }
12498             }
12499         }
12500         return ret;
12501 #endif
12502 #ifdef TARGET_NR_vmsplice
12503 	case TARGET_NR_vmsplice:
12504         {
12505             struct iovec *vec = lock_iovec(VERIFY_READ, arg2, arg3, 1);
12506             if (vec != NULL) {
12507                 ret = get_errno(vmsplice(arg1, vec, arg3, arg4));
12508                 unlock_iovec(vec, arg2, arg3, 0);
12509             } else {
12510                 ret = -host_to_target_errno(errno);
12511             }
12512         }
12513         return ret;
12514 #endif
12515 #endif /* CONFIG_SPLICE */
12516 #ifdef CONFIG_EVENTFD
12517 #if defined(TARGET_NR_eventfd)
12518     case TARGET_NR_eventfd:
12519         ret = get_errno(eventfd(arg1, 0));
12520         if (ret >= 0) {
12521             fd_trans_register(ret, &target_eventfd_trans);
12522         }
12523         return ret;
12524 #endif
12525 #if defined(TARGET_NR_eventfd2)
12526     case TARGET_NR_eventfd2:
12527     {
12528         int host_flags = arg2 & (~(TARGET_O_NONBLOCK_MASK | TARGET_O_CLOEXEC));
12529         if (arg2 & TARGET_O_NONBLOCK) {
12530             host_flags |= O_NONBLOCK;
12531         }
12532         if (arg2 & TARGET_O_CLOEXEC) {
12533             host_flags |= O_CLOEXEC;
12534         }
12535         ret = get_errno(eventfd(arg1, host_flags));
12536         if (ret >= 0) {
12537             fd_trans_register(ret, &target_eventfd_trans);
12538         }
12539         return ret;
12540     }
12541 #endif
12542 #endif /* CONFIG_EVENTFD  */
12543 #if defined(CONFIG_FALLOCATE) && defined(TARGET_NR_fallocate)
12544     case TARGET_NR_fallocate:
12545 #if TARGET_ABI_BITS == 32
12546         ret = get_errno(fallocate(arg1, arg2, target_offset64(arg3, arg4),
12547                                   target_offset64(arg5, arg6)));
12548 #else
12549         ret = get_errno(fallocate(arg1, arg2, arg3, arg4));
12550 #endif
12551         return ret;
12552 #endif
12553 #if defined(CONFIG_SYNC_FILE_RANGE)
12554 #if defined(TARGET_NR_sync_file_range)
12555     case TARGET_NR_sync_file_range:
12556 #if TARGET_ABI_BITS == 32
12557 #if defined(TARGET_MIPS)
12558         ret = get_errno(sync_file_range(arg1, target_offset64(arg3, arg4),
12559                                         target_offset64(arg5, arg6), arg7));
12560 #else
12561         ret = get_errno(sync_file_range(arg1, target_offset64(arg2, arg3),
12562                                         target_offset64(arg4, arg5), arg6));
12563 #endif /* !TARGET_MIPS */
12564 #else
12565         ret = get_errno(sync_file_range(arg1, arg2, arg3, arg4));
12566 #endif
12567         return ret;
12568 #endif
12569 #if defined(TARGET_NR_sync_file_range2) || \
12570     defined(TARGET_NR_arm_sync_file_range)
12571 #if defined(TARGET_NR_sync_file_range2)
12572     case TARGET_NR_sync_file_range2:
12573 #endif
12574 #if defined(TARGET_NR_arm_sync_file_range)
12575     case TARGET_NR_arm_sync_file_range:
12576 #endif
12577         /* This is like sync_file_range but the arguments are reordered */
12578 #if TARGET_ABI_BITS == 32
12579         ret = get_errno(sync_file_range(arg1, target_offset64(arg3, arg4),
12580                                         target_offset64(arg5, arg6), arg2));
12581 #else
12582         ret = get_errno(sync_file_range(arg1, arg3, arg4, arg2));
12583 #endif
12584         return ret;
12585 #endif
12586 #endif
12587 #if defined(TARGET_NR_signalfd4)
12588     case TARGET_NR_signalfd4:
12589         return do_signalfd4(arg1, arg2, arg4);
12590 #endif
12591 #if defined(TARGET_NR_signalfd)
12592     case TARGET_NR_signalfd:
12593         return do_signalfd4(arg1, arg2, 0);
12594 #endif
12595 #if defined(CONFIG_EPOLL)
12596 #if defined(TARGET_NR_epoll_create)
12597     case TARGET_NR_epoll_create:
12598         return get_errno(epoll_create(arg1));
12599 #endif
12600 #if defined(TARGET_NR_epoll_create1) && defined(CONFIG_EPOLL_CREATE1)
12601     case TARGET_NR_epoll_create1:
12602         return get_errno(epoll_create1(target_to_host_bitmask(arg1, fcntl_flags_tbl)));
12603 #endif
12604 #if defined(TARGET_NR_epoll_ctl)
12605     case TARGET_NR_epoll_ctl:
12606     {
12607         struct epoll_event ep;
12608         struct epoll_event *epp = 0;
12609         if (arg4) {
12610             if (arg2 != EPOLL_CTL_DEL) {
12611                 struct target_epoll_event *target_ep;
12612                 if (!lock_user_struct(VERIFY_READ, target_ep, arg4, 1)) {
12613                     return -TARGET_EFAULT;
12614                 }
12615                 ep.events = tswap32(target_ep->events);
12616                 /*
12617                  * The epoll_data_t union is just opaque data to the kernel,
12618                  * so we transfer all 64 bits across and need not worry what
12619                  * actual data type it is.
12620                  */
12621                 ep.data.u64 = tswap64(target_ep->data.u64);
12622                 unlock_user_struct(target_ep, arg4, 0);
12623             }
12624             /*
12625              * before kernel 2.6.9, EPOLL_CTL_DEL operation required a
12626              * non-null pointer, even though this argument is ignored.
12627              *
12628              */
12629             epp = &ep;
12630         }
12631         return get_errno(epoll_ctl(arg1, arg2, arg3, epp));
12632     }
12633 #endif
12634 
12635 #if defined(TARGET_NR_epoll_wait) || defined(TARGET_NR_epoll_pwait)
12636 #if defined(TARGET_NR_epoll_wait)
12637     case TARGET_NR_epoll_wait:
12638 #endif
12639 #if defined(TARGET_NR_epoll_pwait)
12640     case TARGET_NR_epoll_pwait:
12641 #endif
12642     {
12643         struct target_epoll_event *target_ep;
12644         struct epoll_event *ep;
12645         int epfd = arg1;
12646         int maxevents = arg3;
12647         int timeout = arg4;
12648 
12649         if (maxevents <= 0 || maxevents > TARGET_EP_MAX_EVENTS) {
12650             return -TARGET_EINVAL;
12651         }
12652 
12653         target_ep = lock_user(VERIFY_WRITE, arg2,
12654                               maxevents * sizeof(struct target_epoll_event), 1);
12655         if (!target_ep) {
12656             return -TARGET_EFAULT;
12657         }
12658 
12659         ep = g_try_new(struct epoll_event, maxevents);
12660         if (!ep) {
12661             unlock_user(target_ep, arg2, 0);
12662             return -TARGET_ENOMEM;
12663         }
12664 
12665         switch (num) {
12666 #if defined(TARGET_NR_epoll_pwait)
12667         case TARGET_NR_epoll_pwait:
12668         {
12669             target_sigset_t *target_set;
12670             sigset_t _set, *set = &_set;
12671 
12672             if (arg5) {
12673                 if (arg6 != sizeof(target_sigset_t)) {
12674                     ret = -TARGET_EINVAL;
12675                     break;
12676                 }
12677 
12678                 target_set = lock_user(VERIFY_READ, arg5,
12679                                        sizeof(target_sigset_t), 1);
12680                 if (!target_set) {
12681                     ret = -TARGET_EFAULT;
12682                     break;
12683                 }
12684                 target_to_host_sigset(set, target_set);
12685                 unlock_user(target_set, arg5, 0);
12686             } else {
12687                 set = NULL;
12688             }
12689 
12690             ret = get_errno(safe_epoll_pwait(epfd, ep, maxevents, timeout,
12691                                              set, SIGSET_T_SIZE));
12692             break;
12693         }
12694 #endif
12695 #if defined(TARGET_NR_epoll_wait)
12696         case TARGET_NR_epoll_wait:
12697             ret = get_errno(safe_epoll_pwait(epfd, ep, maxevents, timeout,
12698                                              NULL, 0));
12699             break;
12700 #endif
12701         default:
12702             ret = -TARGET_ENOSYS;
12703         }
12704         if (!is_error(ret)) {
12705             int i;
12706             for (i = 0; i < ret; i++) {
12707                 target_ep[i].events = tswap32(ep[i].events);
12708                 target_ep[i].data.u64 = tswap64(ep[i].data.u64);
12709             }
12710             unlock_user(target_ep, arg2,
12711                         ret * sizeof(struct target_epoll_event));
12712         } else {
12713             unlock_user(target_ep, arg2, 0);
12714         }
12715         g_free(ep);
12716         return ret;
12717     }
12718 #endif
12719 #endif
12720 #ifdef TARGET_NR_prlimit64
12721     case TARGET_NR_prlimit64:
12722     {
12723         /* args: pid, resource number, ptr to new rlimit, ptr to old rlimit */
12724         struct target_rlimit64 *target_rnew, *target_rold;
12725         struct host_rlimit64 rnew, rold, *rnewp = 0;
12726         int resource = target_to_host_resource(arg2);
12727 
12728         if (arg3 && (resource != RLIMIT_AS &&
12729                      resource != RLIMIT_DATA &&
12730                      resource != RLIMIT_STACK)) {
12731             if (!lock_user_struct(VERIFY_READ, target_rnew, arg3, 1)) {
12732                 return -TARGET_EFAULT;
12733             }
12734             rnew.rlim_cur = tswap64(target_rnew->rlim_cur);
12735             rnew.rlim_max = tswap64(target_rnew->rlim_max);
12736             unlock_user_struct(target_rnew, arg3, 0);
12737             rnewp = &rnew;
12738         }
12739 
12740         ret = get_errno(sys_prlimit64(arg1, resource, rnewp, arg4 ? &rold : 0));
12741         if (!is_error(ret) && arg4) {
12742             if (!lock_user_struct(VERIFY_WRITE, target_rold, arg4, 1)) {
12743                 return -TARGET_EFAULT;
12744             }
12745             target_rold->rlim_cur = tswap64(rold.rlim_cur);
12746             target_rold->rlim_max = tswap64(rold.rlim_max);
12747             unlock_user_struct(target_rold, arg4, 1);
12748         }
12749         return ret;
12750     }
12751 #endif
12752 #ifdef TARGET_NR_gethostname
12753     case TARGET_NR_gethostname:
12754     {
12755         char *name = lock_user(VERIFY_WRITE, arg1, arg2, 0);
12756         if (name) {
12757             ret = get_errno(gethostname(name, arg2));
12758             unlock_user(name, arg1, arg2);
12759         } else {
12760             ret = -TARGET_EFAULT;
12761         }
12762         return ret;
12763     }
12764 #endif
12765 #ifdef TARGET_NR_atomic_cmpxchg_32
12766     case TARGET_NR_atomic_cmpxchg_32:
12767     {
12768         /* should use start_exclusive from main.c */
12769         abi_ulong mem_value;
12770         if (get_user_u32(mem_value, arg6)) {
12771             target_siginfo_t info;
12772             info.si_signo = SIGSEGV;
12773             info.si_errno = 0;
12774             info.si_code = TARGET_SEGV_MAPERR;
12775             info._sifields._sigfault._addr = arg6;
12776             queue_signal((CPUArchState *)cpu_env, info.si_signo,
12777                          QEMU_SI_FAULT, &info);
12778             ret = 0xdeadbeef;
12779 
12780         }
12781         if (mem_value == arg2)
12782             put_user_u32(arg1, arg6);
12783         return mem_value;
12784     }
12785 #endif
12786 #ifdef TARGET_NR_atomic_barrier
12787     case TARGET_NR_atomic_barrier:
12788         /* Like the kernel implementation and the
12789            qemu arm barrier, no-op this? */
12790         return 0;
12791 #endif
12792 
12793 #ifdef TARGET_NR_timer_create
12794     case TARGET_NR_timer_create:
12795     {
12796         /* args: clockid_t clockid, struct sigevent *sevp, timer_t *timerid */
12797 
12798         struct sigevent host_sevp = { {0}, }, *phost_sevp = NULL;
12799 
12800         int clkid = arg1;
12801         int timer_index = next_free_host_timer();
12802 
12803         if (timer_index < 0) {
12804             ret = -TARGET_EAGAIN;
12805         } else {
12806             timer_t *phtimer = g_posix_timers  + timer_index;
12807 
12808             if (arg2) {
12809                 phost_sevp = &host_sevp;
12810                 ret = target_to_host_sigevent(phost_sevp, arg2);
12811                 if (ret != 0) {
12812                     return ret;
12813                 }
12814             }
12815 
12816             ret = get_errno(timer_create(clkid, phost_sevp, phtimer));
12817             if (ret) {
12818                 phtimer = NULL;
12819             } else {
12820                 if (put_user(TIMER_MAGIC | timer_index, arg3, target_timer_t)) {
12821                     return -TARGET_EFAULT;
12822                 }
12823             }
12824         }
12825         return ret;
12826     }
12827 #endif
12828 
12829 #ifdef TARGET_NR_timer_settime
12830     case TARGET_NR_timer_settime:
12831     {
12832         /* args: timer_t timerid, int flags, const struct itimerspec *new_value,
12833          * struct itimerspec * old_value */
12834         target_timer_t timerid = get_timer_id(arg1);
12835 
12836         if (timerid < 0) {
12837             ret = timerid;
12838         } else if (arg3 == 0) {
12839             ret = -TARGET_EINVAL;
12840         } else {
12841             timer_t htimer = g_posix_timers[timerid];
12842             struct itimerspec hspec_new = {{0},}, hspec_old = {{0},};
12843 
12844             if (target_to_host_itimerspec(&hspec_new, arg3)) {
12845                 return -TARGET_EFAULT;
12846             }
12847             ret = get_errno(
12848                           timer_settime(htimer, arg2, &hspec_new, &hspec_old));
12849             if (arg4 && host_to_target_itimerspec(arg4, &hspec_old)) {
12850                 return -TARGET_EFAULT;
12851             }
12852         }
12853         return ret;
12854     }
12855 #endif
12856 
12857 #ifdef TARGET_NR_timer_settime64
12858     case TARGET_NR_timer_settime64:
12859     {
12860         target_timer_t timerid = get_timer_id(arg1);
12861 
12862         if (timerid < 0) {
12863             ret = timerid;
12864         } else if (arg3 == 0) {
12865             ret = -TARGET_EINVAL;
12866         } else {
12867             timer_t htimer = g_posix_timers[timerid];
12868             struct itimerspec hspec_new = {{0},}, hspec_old = {{0},};
12869 
12870             if (target_to_host_itimerspec64(&hspec_new, arg3)) {
12871                 return -TARGET_EFAULT;
12872             }
12873             ret = get_errno(
12874                           timer_settime(htimer, arg2, &hspec_new, &hspec_old));
12875             if (arg4 && host_to_target_itimerspec64(arg4, &hspec_old)) {
12876                 return -TARGET_EFAULT;
12877             }
12878         }
12879         return ret;
12880     }
12881 #endif
12882 
12883 #ifdef TARGET_NR_timer_gettime
12884     case TARGET_NR_timer_gettime:
12885     {
12886         /* args: timer_t timerid, struct itimerspec *curr_value */
12887         target_timer_t timerid = get_timer_id(arg1);
12888 
12889         if (timerid < 0) {
12890             ret = timerid;
12891         } else if (!arg2) {
12892             ret = -TARGET_EFAULT;
12893         } else {
12894             timer_t htimer = g_posix_timers[timerid];
12895             struct itimerspec hspec;
12896             ret = get_errno(timer_gettime(htimer, &hspec));
12897 
12898             if (host_to_target_itimerspec(arg2, &hspec)) {
12899                 ret = -TARGET_EFAULT;
12900             }
12901         }
12902         return ret;
12903     }
12904 #endif
12905 
12906 #ifdef TARGET_NR_timer_gettime64
12907     case TARGET_NR_timer_gettime64:
12908     {
12909         /* args: timer_t timerid, struct itimerspec64 *curr_value */
12910         target_timer_t timerid = get_timer_id(arg1);
12911 
12912         if (timerid < 0) {
12913             ret = timerid;
12914         } else if (!arg2) {
12915             ret = -TARGET_EFAULT;
12916         } else {
12917             timer_t htimer = g_posix_timers[timerid];
12918             struct itimerspec hspec;
12919             ret = get_errno(timer_gettime(htimer, &hspec));
12920 
12921             if (host_to_target_itimerspec64(arg2, &hspec)) {
12922                 ret = -TARGET_EFAULT;
12923             }
12924         }
12925         return ret;
12926     }
12927 #endif
12928 
12929 #ifdef TARGET_NR_timer_getoverrun
12930     case TARGET_NR_timer_getoverrun:
12931     {
12932         /* args: timer_t timerid */
12933         target_timer_t timerid = get_timer_id(arg1);
12934 
12935         if (timerid < 0) {
12936             ret = timerid;
12937         } else {
12938             timer_t htimer = g_posix_timers[timerid];
12939             ret = get_errno(timer_getoverrun(htimer));
12940         }
12941         return ret;
12942     }
12943 #endif
12944 
12945 #ifdef TARGET_NR_timer_delete
12946     case TARGET_NR_timer_delete:
12947     {
12948         /* args: timer_t timerid */
12949         target_timer_t timerid = get_timer_id(arg1);
12950 
12951         if (timerid < 0) {
12952             ret = timerid;
12953         } else {
12954             timer_t htimer = g_posix_timers[timerid];
12955             ret = get_errno(timer_delete(htimer));
12956             g_posix_timers[timerid] = 0;
12957         }
12958         return ret;
12959     }
12960 #endif
12961 
12962 #if defined(TARGET_NR_timerfd_create) && defined(CONFIG_TIMERFD)
12963     case TARGET_NR_timerfd_create:
12964         return get_errno(timerfd_create(arg1,
12965                           target_to_host_bitmask(arg2, fcntl_flags_tbl)));
12966 #endif
12967 
12968 #if defined(TARGET_NR_timerfd_gettime) && defined(CONFIG_TIMERFD)
12969     case TARGET_NR_timerfd_gettime:
12970         {
12971             struct itimerspec its_curr;
12972 
12973             ret = get_errno(timerfd_gettime(arg1, &its_curr));
12974 
12975             if (arg2 && host_to_target_itimerspec(arg2, &its_curr)) {
12976                 return -TARGET_EFAULT;
12977             }
12978         }
12979         return ret;
12980 #endif
12981 
12982 #if defined(TARGET_NR_timerfd_gettime64) && defined(CONFIG_TIMERFD)
12983     case TARGET_NR_timerfd_gettime64:
12984         {
12985             struct itimerspec its_curr;
12986 
12987             ret = get_errno(timerfd_gettime(arg1, &its_curr));
12988 
12989             if (arg2 && host_to_target_itimerspec64(arg2, &its_curr)) {
12990                 return -TARGET_EFAULT;
12991             }
12992         }
12993         return ret;
12994 #endif
12995 
12996 #if defined(TARGET_NR_timerfd_settime) && defined(CONFIG_TIMERFD)
12997     case TARGET_NR_timerfd_settime:
12998         {
12999             struct itimerspec its_new, its_old, *p_new;
13000 
13001             if (arg3) {
13002                 if (target_to_host_itimerspec(&its_new, arg3)) {
13003                     return -TARGET_EFAULT;
13004                 }
13005                 p_new = &its_new;
13006             } else {
13007                 p_new = NULL;
13008             }
13009 
13010             ret = get_errno(timerfd_settime(arg1, arg2, p_new, &its_old));
13011 
13012             if (arg4 && host_to_target_itimerspec(arg4, &its_old)) {
13013                 return -TARGET_EFAULT;
13014             }
13015         }
13016         return ret;
13017 #endif
13018 
13019 #if defined(TARGET_NR_timerfd_settime64) && defined(CONFIG_TIMERFD)
13020     case TARGET_NR_timerfd_settime64:
13021         {
13022             struct itimerspec its_new, its_old, *p_new;
13023 
13024             if (arg3) {
13025                 if (target_to_host_itimerspec64(&its_new, arg3)) {
13026                     return -TARGET_EFAULT;
13027                 }
13028                 p_new = &its_new;
13029             } else {
13030                 p_new = NULL;
13031             }
13032 
13033             ret = get_errno(timerfd_settime(arg1, arg2, p_new, &its_old));
13034 
13035             if (arg4 && host_to_target_itimerspec64(arg4, &its_old)) {
13036                 return -TARGET_EFAULT;
13037             }
13038         }
13039         return ret;
13040 #endif
13041 
13042 #if defined(TARGET_NR_ioprio_get) && defined(__NR_ioprio_get)
13043     case TARGET_NR_ioprio_get:
13044         return get_errno(ioprio_get(arg1, arg2));
13045 #endif
13046 
13047 #if defined(TARGET_NR_ioprio_set) && defined(__NR_ioprio_set)
13048     case TARGET_NR_ioprio_set:
13049         return get_errno(ioprio_set(arg1, arg2, arg3));
13050 #endif
13051 
13052 #if defined(TARGET_NR_setns) && defined(CONFIG_SETNS)
13053     case TARGET_NR_setns:
13054         return get_errno(setns(arg1, arg2));
13055 #endif
13056 #if defined(TARGET_NR_unshare) && defined(CONFIG_SETNS)
13057     case TARGET_NR_unshare:
13058         return get_errno(unshare(arg1));
13059 #endif
13060 #if defined(TARGET_NR_kcmp) && defined(__NR_kcmp)
13061     case TARGET_NR_kcmp:
13062         return get_errno(kcmp(arg1, arg2, arg3, arg4, arg5));
13063 #endif
13064 #ifdef TARGET_NR_swapcontext
13065     case TARGET_NR_swapcontext:
13066         /* PowerPC specific.  */
13067         return do_swapcontext(cpu_env, arg1, arg2, arg3);
13068 #endif
13069 #ifdef TARGET_NR_memfd_create
13070     case TARGET_NR_memfd_create:
13071         p = lock_user_string(arg1);
13072         if (!p) {
13073             return -TARGET_EFAULT;
13074         }
13075         ret = get_errno(memfd_create(p, arg2));
13076         fd_trans_unregister(ret);
13077         unlock_user(p, arg1, 0);
13078         return ret;
13079 #endif
13080 #if defined TARGET_NR_membarrier && defined __NR_membarrier
13081     case TARGET_NR_membarrier:
13082         return get_errno(membarrier(arg1, arg2));
13083 #endif
13084 
13085 #if defined(TARGET_NR_copy_file_range) && defined(__NR_copy_file_range)
13086     case TARGET_NR_copy_file_range:
13087         {
13088             loff_t inoff, outoff;
13089             loff_t *pinoff = NULL, *poutoff = NULL;
13090 
13091             if (arg2) {
13092                 if (get_user_u64(inoff, arg2)) {
13093                     return -TARGET_EFAULT;
13094                 }
13095                 pinoff = &inoff;
13096             }
13097             if (arg4) {
13098                 if (get_user_u64(outoff, arg4)) {
13099                     return -TARGET_EFAULT;
13100                 }
13101                 poutoff = &outoff;
13102             }
13103             /* Do not sign-extend the count parameter. */
13104             ret = get_errno(safe_copy_file_range(arg1, pinoff, arg3, poutoff,
13105                                                  (abi_ulong)arg5, arg6));
13106             if (!is_error(ret) && ret > 0) {
13107                 if (arg2) {
13108                     if (put_user_u64(inoff, arg2)) {
13109                         return -TARGET_EFAULT;
13110                     }
13111                 }
13112                 if (arg4) {
13113                     if (put_user_u64(outoff, arg4)) {
13114                         return -TARGET_EFAULT;
13115                     }
13116                 }
13117             }
13118         }
13119         return ret;
13120 #endif
13121 
13122 #if defined(TARGET_NR_pivot_root)
13123     case TARGET_NR_pivot_root:
13124         {
13125             void *p2;
13126             p = lock_user_string(arg1); /* new_root */
13127             p2 = lock_user_string(arg2); /* put_old */
13128             if (!p || !p2) {
13129                 ret = -TARGET_EFAULT;
13130             } else {
13131                 ret = get_errno(pivot_root(p, p2));
13132             }
13133             unlock_user(p2, arg2, 0);
13134             unlock_user(p, arg1, 0);
13135         }
13136         return ret;
13137 #endif
13138 
13139     default:
13140         qemu_log_mask(LOG_UNIMP, "Unsupported syscall: %d\n", num);
13141         return -TARGET_ENOSYS;
13142     }
13143     return ret;
13144 }
13145 
13146 abi_long do_syscall(void *cpu_env, int num, abi_long arg1,
13147                     abi_long arg2, abi_long arg3, abi_long arg4,
13148                     abi_long arg5, abi_long arg6, abi_long arg7,
13149                     abi_long arg8)
13150 {
13151     CPUState *cpu = env_cpu(cpu_env);
13152     abi_long ret;
13153 
13154 #ifdef DEBUG_ERESTARTSYS
13155     /* Debug-only code for exercising the syscall-restart code paths
13156      * in the per-architecture cpu main loops: restart every syscall
13157      * the guest makes once before letting it through.
13158      */
13159     {
13160         static bool flag;
13161         flag = !flag;
13162         if (flag) {
13163             return -TARGET_ERESTARTSYS;
13164         }
13165     }
13166 #endif
13167 
13168     record_syscall_start(cpu, num, arg1,
13169                          arg2, arg3, arg4, arg5, arg6, arg7, arg8);
13170 
13171     if (unlikely(qemu_loglevel_mask(LOG_STRACE))) {
13172         print_syscall(cpu_env, num, arg1, arg2, arg3, arg4, arg5, arg6);
13173     }
13174 
13175     ret = do_syscall1(cpu_env, num, arg1, arg2, arg3, arg4,
13176                       arg5, arg6, arg7, arg8);
13177 
13178     if (unlikely(qemu_loglevel_mask(LOG_STRACE))) {
13179         print_syscall_ret(cpu_env, num, ret, arg1, arg2,
13180                           arg3, arg4, arg5, arg6);
13181     }
13182 
13183     record_syscall_return(cpu, num, ret);
13184     return ret;
13185 }
13186