xref: /openbmc/qemu/linux-user/syscall.c (revision 84946457)
1 /*
2  *  Linux syscalls
3  *
4  *  Copyright (c) 2003 Fabrice Bellard
5  *
6  *  This program is free software; you can redistribute it and/or modify
7  *  it under the terms of the GNU General Public License as published by
8  *  the Free Software Foundation; either version 2 of the License, or
9  *  (at your option) any later version.
10  *
11  *  This program is distributed in the hope that it will be useful,
12  *  but WITHOUT ANY WARRANTY; without even the implied warranty of
13  *  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
14  *  GNU General Public License for more details.
15  *
16  *  You should have received a copy of the GNU General Public License
17  *  along with this program; if not, see <http://www.gnu.org/licenses/>.
18  */
19 #define _ATFILE_SOURCE
20 #include "qemu/osdep.h"
21 #include "qemu/cutils.h"
22 #include "qemu/path.h"
23 #include "qemu/memfd.h"
24 #include "qemu/queue.h"
25 #include <elf.h>
26 #include <endian.h>
27 #include <grp.h>
28 #include <sys/ipc.h>
29 #include <sys/msg.h>
30 #include <sys/wait.h>
31 #include <sys/mount.h>
32 #include <sys/file.h>
33 #include <sys/fsuid.h>
34 #include <sys/personality.h>
35 #include <sys/prctl.h>
36 #include <sys/resource.h>
37 #include <sys/swap.h>
38 #include <linux/capability.h>
39 #include <sched.h>
40 #include <sys/timex.h>
41 #include <sys/socket.h>
42 #include <linux/sockios.h>
43 #include <sys/un.h>
44 #include <sys/uio.h>
45 #include <poll.h>
46 #include <sys/times.h>
47 #include <sys/shm.h>
48 #include <sys/sem.h>
49 #include <sys/statfs.h>
50 #include <utime.h>
51 #include <sys/sysinfo.h>
52 #include <sys/signalfd.h>
53 //#include <sys/user.h>
54 #include <netinet/ip.h>
55 #include <netinet/tcp.h>
56 #include <linux/wireless.h>
57 #include <linux/icmp.h>
58 #include <linux/icmpv6.h>
59 #include <linux/errqueue.h>
60 #include <linux/random.h>
61 #ifdef CONFIG_TIMERFD
62 #include <sys/timerfd.h>
63 #endif
64 #ifdef CONFIG_EVENTFD
65 #include <sys/eventfd.h>
66 #endif
67 #ifdef CONFIG_EPOLL
68 #include <sys/epoll.h>
69 #endif
70 #ifdef CONFIG_ATTR
71 #include "qemu/xattr.h"
72 #endif
73 #ifdef CONFIG_SENDFILE
74 #include <sys/sendfile.h>
75 #endif
76 #ifdef HAVE_SYS_KCOV_H
77 #include <sys/kcov.h>
78 #endif
79 
80 #define termios host_termios
81 #define winsize host_winsize
82 #define termio host_termio
83 #define sgttyb host_sgttyb /* same as target */
84 #define tchars host_tchars /* same as target */
85 #define ltchars host_ltchars /* same as target */
86 
87 #include <linux/termios.h>
88 #include <linux/unistd.h>
89 #include <linux/cdrom.h>
90 #include <linux/hdreg.h>
91 #include <linux/soundcard.h>
92 #include <linux/kd.h>
93 #include <linux/mtio.h>
94 #include <linux/fs.h>
95 #include <linux/fd.h>
96 #if defined(CONFIG_FIEMAP)
97 #include <linux/fiemap.h>
98 #endif
99 #include <linux/fb.h>
100 #if defined(CONFIG_USBFS)
101 #include <linux/usbdevice_fs.h>
102 #include <linux/usb/ch9.h>
103 #endif
104 #include <linux/vt.h>
105 #include <linux/dm-ioctl.h>
106 #include <linux/reboot.h>
107 #include <linux/route.h>
108 #include <linux/filter.h>
109 #include <linux/blkpg.h>
110 #include <netpacket/packet.h>
111 #include <linux/netlink.h>
112 #include <linux/if_alg.h>
113 #include <linux/rtc.h>
114 #include <sound/asound.h>
115 #ifdef HAVE_BTRFS_H
116 #include <linux/btrfs.h>
117 #endif
118 #ifdef HAVE_DRM_H
119 #include <libdrm/drm.h>
120 #include <libdrm/i915_drm.h>
121 #endif
122 #include "linux_loop.h"
123 #include "uname.h"
124 
125 #include "qemu.h"
126 #include "qemu/guest-random.h"
127 #include "qemu/selfmap.h"
128 #include "user/syscall-trace.h"
129 #include "qapi/error.h"
130 #include "fd-trans.h"
131 #include "tcg/tcg.h"
132 
133 #ifndef CLONE_IO
134 #define CLONE_IO                0x80000000      /* Clone io context */
135 #endif
136 
137 /* We can't directly call the host clone syscall, because this will
138  * badly confuse libc (breaking mutexes, for example). So we must
139  * divide clone flags into:
140  *  * flag combinations that look like pthread_create()
141  *  * flag combinations that look like fork()
142  *  * flags we can implement within QEMU itself
143  *  * flags we can't support and will return an error for
144  */
145 /* For thread creation, all these flags must be present; for
146  * fork, none must be present.
147  */
148 #define CLONE_THREAD_FLAGS                              \
149     (CLONE_VM | CLONE_FS | CLONE_FILES |                \
150      CLONE_SIGHAND | CLONE_THREAD | CLONE_SYSVSEM)
151 
152 /* These flags are ignored:
153  * CLONE_DETACHED is now ignored by the kernel;
154  * CLONE_IO is just an optimisation hint to the I/O scheduler
155  */
156 #define CLONE_IGNORED_FLAGS                     \
157     (CLONE_DETACHED | CLONE_IO)
158 
159 /* Flags for fork which we can implement within QEMU itself */
160 #define CLONE_OPTIONAL_FORK_FLAGS               \
161     (CLONE_SETTLS | CLONE_PARENT_SETTID |       \
162      CLONE_CHILD_CLEARTID | CLONE_CHILD_SETTID)
163 
164 /* Flags for thread creation which we can implement within QEMU itself */
165 #define CLONE_OPTIONAL_THREAD_FLAGS                             \
166     (CLONE_SETTLS | CLONE_PARENT_SETTID |                       \
167      CLONE_CHILD_CLEARTID | CLONE_CHILD_SETTID | CLONE_PARENT)
168 
169 #define CLONE_INVALID_FORK_FLAGS                                        \
170     (~(CSIGNAL | CLONE_OPTIONAL_FORK_FLAGS | CLONE_IGNORED_FLAGS))
171 
172 #define CLONE_INVALID_THREAD_FLAGS                                      \
173     (~(CSIGNAL | CLONE_THREAD_FLAGS | CLONE_OPTIONAL_THREAD_FLAGS |     \
174        CLONE_IGNORED_FLAGS))
175 
176 /* CLONE_VFORK is special cased early in do_fork(). The other flag bits
177  * have almost all been allocated. We cannot support any of
178  * CLONE_NEWNS, CLONE_NEWCGROUP, CLONE_NEWUTS, CLONE_NEWIPC,
179  * CLONE_NEWUSER, CLONE_NEWPID, CLONE_NEWNET, CLONE_PTRACE, CLONE_UNTRACED.
180  * The checks against the invalid thread masks above will catch these.
181  * (The one remaining unallocated bit is 0x1000 which used to be CLONE_PID.)
182  */
183 
184 /* Define DEBUG_ERESTARTSYS to force every syscall to be restarted
185  * once. This exercises the codepaths for restart.
186  */
187 //#define DEBUG_ERESTARTSYS
188 
189 //#include <linux/msdos_fs.h>
190 #define	VFAT_IOCTL_READDIR_BOTH		_IOR('r', 1, struct linux_dirent [2])
191 #define	VFAT_IOCTL_READDIR_SHORT	_IOR('r', 2, struct linux_dirent [2])
192 
193 #undef _syscall0
194 #undef _syscall1
195 #undef _syscall2
196 #undef _syscall3
197 #undef _syscall4
198 #undef _syscall5
199 #undef _syscall6
200 
201 #define _syscall0(type,name)		\
202 static type name (void)			\
203 {					\
204 	return syscall(__NR_##name);	\
205 }
206 
207 #define _syscall1(type,name,type1,arg1)		\
208 static type name (type1 arg1)			\
209 {						\
210 	return syscall(__NR_##name, arg1);	\
211 }
212 
213 #define _syscall2(type,name,type1,arg1,type2,arg2)	\
214 static type name (type1 arg1,type2 arg2)		\
215 {							\
216 	return syscall(__NR_##name, arg1, arg2);	\
217 }
218 
219 #define _syscall3(type,name,type1,arg1,type2,arg2,type3,arg3)	\
220 static type name (type1 arg1,type2 arg2,type3 arg3)		\
221 {								\
222 	return syscall(__NR_##name, arg1, arg2, arg3);		\
223 }
224 
225 #define _syscall4(type,name,type1,arg1,type2,arg2,type3,arg3,type4,arg4)	\
226 static type name (type1 arg1,type2 arg2,type3 arg3,type4 arg4)			\
227 {										\
228 	return syscall(__NR_##name, arg1, arg2, arg3, arg4);			\
229 }
230 
231 #define _syscall5(type,name,type1,arg1,type2,arg2,type3,arg3,type4,arg4,	\
232 		  type5,arg5)							\
233 static type name (type1 arg1,type2 arg2,type3 arg3,type4 arg4,type5 arg5)	\
234 {										\
235 	return syscall(__NR_##name, arg1, arg2, arg3, arg4, arg5);		\
236 }
237 
238 
239 #define _syscall6(type,name,type1,arg1,type2,arg2,type3,arg3,type4,arg4,	\
240 		  type5,arg5,type6,arg6)					\
241 static type name (type1 arg1,type2 arg2,type3 arg3,type4 arg4,type5 arg5,	\
242                   type6 arg6)							\
243 {										\
244 	return syscall(__NR_##name, arg1, arg2, arg3, arg4, arg5, arg6);	\
245 }
246 
247 
248 #define __NR_sys_uname __NR_uname
249 #define __NR_sys_getcwd1 __NR_getcwd
250 #define __NR_sys_getdents __NR_getdents
251 #define __NR_sys_getdents64 __NR_getdents64
252 #define __NR_sys_getpriority __NR_getpriority
253 #define __NR_sys_rt_sigqueueinfo __NR_rt_sigqueueinfo
254 #define __NR_sys_rt_tgsigqueueinfo __NR_rt_tgsigqueueinfo
255 #define __NR_sys_syslog __NR_syslog
256 #if defined(__NR_futex)
257 # define __NR_sys_futex __NR_futex
258 #endif
259 #if defined(__NR_futex_time64)
260 # define __NR_sys_futex_time64 __NR_futex_time64
261 #endif
262 #define __NR_sys_inotify_init __NR_inotify_init
263 #define __NR_sys_inotify_add_watch __NR_inotify_add_watch
264 #define __NR_sys_inotify_rm_watch __NR_inotify_rm_watch
265 #define __NR_sys_statx __NR_statx
266 
267 #if defined(__alpha__) || defined(__x86_64__) || defined(__s390x__)
268 #define __NR__llseek __NR_lseek
269 #endif
270 
271 /* Newer kernel ports have llseek() instead of _llseek() */
272 #if defined(TARGET_NR_llseek) && !defined(TARGET_NR__llseek)
273 #define TARGET_NR__llseek TARGET_NR_llseek
274 #endif
275 
276 #define __NR_sys_gettid __NR_gettid
277 _syscall0(int, sys_gettid)
278 
279 /* For the 64-bit guest on 32-bit host case we must emulate
280  * getdents using getdents64, because otherwise the host
281  * might hand us back more dirent records than we can fit
282  * into the guest buffer after structure format conversion.
283  * Otherwise we emulate getdents with getdents if the host has it.
284  */
285 #if defined(__NR_getdents) && HOST_LONG_BITS >= TARGET_ABI_BITS
286 #define EMULATE_GETDENTS_WITH_GETDENTS
287 #endif
288 
289 #if defined(TARGET_NR_getdents) && defined(EMULATE_GETDENTS_WITH_GETDENTS)
290 _syscall3(int, sys_getdents, uint, fd, struct linux_dirent *, dirp, uint, count);
291 #endif
292 #if (defined(TARGET_NR_getdents) && \
293       !defined(EMULATE_GETDENTS_WITH_GETDENTS)) || \
294     (defined(TARGET_NR_getdents64) && defined(__NR_getdents64))
295 _syscall3(int, sys_getdents64, uint, fd, struct linux_dirent64 *, dirp, uint, count);
296 #endif
297 #if defined(TARGET_NR__llseek) && defined(__NR_llseek)
298 _syscall5(int, _llseek,  uint,  fd, ulong, hi, ulong, lo,
299           loff_t *, res, uint, wh);
300 #endif
301 _syscall3(int, sys_rt_sigqueueinfo, pid_t, pid, int, sig, siginfo_t *, uinfo)
302 _syscall4(int, sys_rt_tgsigqueueinfo, pid_t, pid, pid_t, tid, int, sig,
303           siginfo_t *, uinfo)
304 _syscall3(int,sys_syslog,int,type,char*,bufp,int,len)
305 #ifdef __NR_exit_group
306 _syscall1(int,exit_group,int,error_code)
307 #endif
308 #if defined(TARGET_NR_set_tid_address) && defined(__NR_set_tid_address)
309 _syscall1(int,set_tid_address,int *,tidptr)
310 #endif
311 #if defined(__NR_futex)
312 _syscall6(int,sys_futex,int *,uaddr,int,op,int,val,
313           const struct timespec *,timeout,int *,uaddr2,int,val3)
314 #endif
315 #if defined(__NR_futex_time64)
316 _syscall6(int,sys_futex_time64,int *,uaddr,int,op,int,val,
317           const struct timespec *,timeout,int *,uaddr2,int,val3)
318 #endif
319 #define __NR_sys_sched_getaffinity __NR_sched_getaffinity
320 _syscall3(int, sys_sched_getaffinity, pid_t, pid, unsigned int, len,
321           unsigned long *, user_mask_ptr);
322 #define __NR_sys_sched_setaffinity __NR_sched_setaffinity
323 _syscall3(int, sys_sched_setaffinity, pid_t, pid, unsigned int, len,
324           unsigned long *, user_mask_ptr);
325 #define __NR_sys_getcpu __NR_getcpu
326 _syscall3(int, sys_getcpu, unsigned *, cpu, unsigned *, node, void *, tcache);
327 _syscall4(int, reboot, int, magic1, int, magic2, unsigned int, cmd,
328           void *, arg);
329 _syscall2(int, capget, struct __user_cap_header_struct *, header,
330           struct __user_cap_data_struct *, data);
331 _syscall2(int, capset, struct __user_cap_header_struct *, header,
332           struct __user_cap_data_struct *, data);
333 #if defined(TARGET_NR_ioprio_get) && defined(__NR_ioprio_get)
334 _syscall2(int, ioprio_get, int, which, int, who)
335 #endif
336 #if defined(TARGET_NR_ioprio_set) && defined(__NR_ioprio_set)
337 _syscall3(int, ioprio_set, int, which, int, who, int, ioprio)
338 #endif
339 #if defined(TARGET_NR_getrandom) && defined(__NR_getrandom)
340 _syscall3(int, getrandom, void *, buf, size_t, buflen, unsigned int, flags)
341 #endif
342 
343 #if defined(TARGET_NR_kcmp) && defined(__NR_kcmp)
344 _syscall5(int, kcmp, pid_t, pid1, pid_t, pid2, int, type,
345           unsigned long, idx1, unsigned long, idx2)
346 #endif
347 
348 /*
349  * It is assumed that struct statx is architecture independent.
350  */
351 #if defined(TARGET_NR_statx) && defined(__NR_statx)
352 _syscall5(int, sys_statx, int, dirfd, const char *, pathname, int, flags,
353           unsigned int, mask, struct target_statx *, statxbuf)
354 #endif
355 #if defined(TARGET_NR_membarrier) && defined(__NR_membarrier)
356 _syscall2(int, membarrier, int, cmd, int, flags)
357 #endif
358 
359 static bitmask_transtbl fcntl_flags_tbl[] = {
360   { TARGET_O_ACCMODE,   TARGET_O_WRONLY,    O_ACCMODE,   O_WRONLY,    },
361   { TARGET_O_ACCMODE,   TARGET_O_RDWR,      O_ACCMODE,   O_RDWR,      },
362   { TARGET_O_CREAT,     TARGET_O_CREAT,     O_CREAT,     O_CREAT,     },
363   { TARGET_O_EXCL,      TARGET_O_EXCL,      O_EXCL,      O_EXCL,      },
364   { TARGET_O_NOCTTY,    TARGET_O_NOCTTY,    O_NOCTTY,    O_NOCTTY,    },
365   { TARGET_O_TRUNC,     TARGET_O_TRUNC,     O_TRUNC,     O_TRUNC,     },
366   { TARGET_O_APPEND,    TARGET_O_APPEND,    O_APPEND,    O_APPEND,    },
367   { TARGET_O_NONBLOCK,  TARGET_O_NONBLOCK,  O_NONBLOCK,  O_NONBLOCK,  },
368   { TARGET_O_SYNC,      TARGET_O_DSYNC,     O_SYNC,      O_DSYNC,     },
369   { TARGET_O_SYNC,      TARGET_O_SYNC,      O_SYNC,      O_SYNC,      },
370   { TARGET_FASYNC,      TARGET_FASYNC,      FASYNC,      FASYNC,      },
371   { TARGET_O_DIRECTORY, TARGET_O_DIRECTORY, O_DIRECTORY, O_DIRECTORY, },
372   { TARGET_O_NOFOLLOW,  TARGET_O_NOFOLLOW,  O_NOFOLLOW,  O_NOFOLLOW,  },
373 #if defined(O_DIRECT)
374   { TARGET_O_DIRECT,    TARGET_O_DIRECT,    O_DIRECT,    O_DIRECT,    },
375 #endif
376 #if defined(O_NOATIME)
377   { TARGET_O_NOATIME,   TARGET_O_NOATIME,   O_NOATIME,   O_NOATIME    },
378 #endif
379 #if defined(O_CLOEXEC)
380   { TARGET_O_CLOEXEC,   TARGET_O_CLOEXEC,   O_CLOEXEC,   O_CLOEXEC    },
381 #endif
382 #if defined(O_PATH)
383   { TARGET_O_PATH,      TARGET_O_PATH,      O_PATH,      O_PATH       },
384 #endif
385 #if defined(O_TMPFILE)
386   { TARGET_O_TMPFILE,   TARGET_O_TMPFILE,   O_TMPFILE,   O_TMPFILE    },
387 #endif
388   /* Don't terminate the list prematurely on 64-bit host+guest.  */
389 #if TARGET_O_LARGEFILE != 0 || O_LARGEFILE != 0
390   { TARGET_O_LARGEFILE, TARGET_O_LARGEFILE, O_LARGEFILE, O_LARGEFILE, },
391 #endif
392   { 0, 0, 0, 0 }
393 };
394 
395 _syscall2(int, sys_getcwd1, char *, buf, size_t, size)
396 
397 #if defined(TARGET_NR_utimensat) || defined(TARGET_NR_utimensat_time64)
398 #if defined(__NR_utimensat)
399 #define __NR_sys_utimensat __NR_utimensat
400 _syscall4(int,sys_utimensat,int,dirfd,const char *,pathname,
401           const struct timespec *,tsp,int,flags)
402 #else
403 static int sys_utimensat(int dirfd, const char *pathname,
404                          const struct timespec times[2], int flags)
405 {
406     errno = ENOSYS;
407     return -1;
408 }
409 #endif
410 #endif /* TARGET_NR_utimensat */
411 
412 #ifdef TARGET_NR_renameat2
413 #if defined(__NR_renameat2)
414 #define __NR_sys_renameat2 __NR_renameat2
415 _syscall5(int, sys_renameat2, int, oldfd, const char *, old, int, newfd,
416           const char *, new, unsigned int, flags)
417 #else
418 static int sys_renameat2(int oldfd, const char *old,
419                          int newfd, const char *new, int flags)
420 {
421     if (flags == 0) {
422         return renameat(oldfd, old, newfd, new);
423     }
424     errno = ENOSYS;
425     return -1;
426 }
427 #endif
428 #endif /* TARGET_NR_renameat2 */
429 
430 #ifdef CONFIG_INOTIFY
431 #include <sys/inotify.h>
432 
433 #if defined(TARGET_NR_inotify_init) && defined(__NR_inotify_init)
434 static int sys_inotify_init(void)
435 {
436   return (inotify_init());
437 }
438 #endif
439 #if defined(TARGET_NR_inotify_add_watch) && defined(__NR_inotify_add_watch)
440 static int sys_inotify_add_watch(int fd,const char *pathname, int32_t mask)
441 {
442   return (inotify_add_watch(fd, pathname, mask));
443 }
444 #endif
445 #if defined(TARGET_NR_inotify_rm_watch) && defined(__NR_inotify_rm_watch)
446 static int sys_inotify_rm_watch(int fd, int32_t wd)
447 {
448   return (inotify_rm_watch(fd, wd));
449 }
450 #endif
451 #ifdef CONFIG_INOTIFY1
452 #if defined(TARGET_NR_inotify_init1) && defined(__NR_inotify_init1)
453 static int sys_inotify_init1(int flags)
454 {
455   return (inotify_init1(flags));
456 }
457 #endif
458 #endif
459 #else
460 /* Userspace can usually survive runtime without inotify */
461 #undef TARGET_NR_inotify_init
462 #undef TARGET_NR_inotify_init1
463 #undef TARGET_NR_inotify_add_watch
464 #undef TARGET_NR_inotify_rm_watch
465 #endif /* CONFIG_INOTIFY  */
466 
467 #if defined(TARGET_NR_prlimit64)
468 #ifndef __NR_prlimit64
469 # define __NR_prlimit64 -1
470 #endif
471 #define __NR_sys_prlimit64 __NR_prlimit64
472 /* The glibc rlimit structure may not be that used by the underlying syscall */
473 struct host_rlimit64 {
474     uint64_t rlim_cur;
475     uint64_t rlim_max;
476 };
477 _syscall4(int, sys_prlimit64, pid_t, pid, int, resource,
478           const struct host_rlimit64 *, new_limit,
479           struct host_rlimit64 *, old_limit)
480 #endif
481 
482 
483 #if defined(TARGET_NR_timer_create)
484 /* Maximum of 32 active POSIX timers allowed at any one time. */
485 static timer_t g_posix_timers[32] = { 0, } ;
486 
487 static inline int next_free_host_timer(void)
488 {
489     int k ;
490     /* FIXME: Does finding the next free slot require a lock? */
491     for (k = 0; k < ARRAY_SIZE(g_posix_timers); k++) {
492         if (g_posix_timers[k] == 0) {
493             g_posix_timers[k] = (timer_t) 1;
494             return k;
495         }
496     }
497     return -1;
498 }
499 #endif
500 
501 #define ERRNO_TABLE_SIZE 1200
502 
503 /* target_to_host_errno_table[] is initialized from
504  * host_to_target_errno_table[] in syscall_init(). */
505 static uint16_t target_to_host_errno_table[ERRNO_TABLE_SIZE] = {
506 };
507 
508 /*
509  * This list is the union of errno values overridden in asm-<arch>/errno.h
510  * minus the errnos that are not actually generic to all archs.
511  */
512 static uint16_t host_to_target_errno_table[ERRNO_TABLE_SIZE] = {
513     [EAGAIN]		= TARGET_EAGAIN,
514     [EIDRM]		= TARGET_EIDRM,
515     [ECHRNG]		= TARGET_ECHRNG,
516     [EL2NSYNC]		= TARGET_EL2NSYNC,
517     [EL3HLT]		= TARGET_EL3HLT,
518     [EL3RST]		= TARGET_EL3RST,
519     [ELNRNG]		= TARGET_ELNRNG,
520     [EUNATCH]		= TARGET_EUNATCH,
521     [ENOCSI]		= TARGET_ENOCSI,
522     [EL2HLT]		= TARGET_EL2HLT,
523     [EDEADLK]		= TARGET_EDEADLK,
524     [ENOLCK]		= TARGET_ENOLCK,
525     [EBADE]		= TARGET_EBADE,
526     [EBADR]		= TARGET_EBADR,
527     [EXFULL]		= TARGET_EXFULL,
528     [ENOANO]		= TARGET_ENOANO,
529     [EBADRQC]		= TARGET_EBADRQC,
530     [EBADSLT]		= TARGET_EBADSLT,
531     [EBFONT]		= TARGET_EBFONT,
532     [ENOSTR]		= TARGET_ENOSTR,
533     [ENODATA]		= TARGET_ENODATA,
534     [ETIME]		= TARGET_ETIME,
535     [ENOSR]		= TARGET_ENOSR,
536     [ENONET]		= TARGET_ENONET,
537     [ENOPKG]		= TARGET_ENOPKG,
538     [EREMOTE]		= TARGET_EREMOTE,
539     [ENOLINK]		= TARGET_ENOLINK,
540     [EADV]		= TARGET_EADV,
541     [ESRMNT]		= TARGET_ESRMNT,
542     [ECOMM]		= TARGET_ECOMM,
543     [EPROTO]		= TARGET_EPROTO,
544     [EDOTDOT]		= TARGET_EDOTDOT,
545     [EMULTIHOP]		= TARGET_EMULTIHOP,
546     [EBADMSG]		= TARGET_EBADMSG,
547     [ENAMETOOLONG]	= TARGET_ENAMETOOLONG,
548     [EOVERFLOW]		= TARGET_EOVERFLOW,
549     [ENOTUNIQ]		= TARGET_ENOTUNIQ,
550     [EBADFD]		= TARGET_EBADFD,
551     [EREMCHG]		= TARGET_EREMCHG,
552     [ELIBACC]		= TARGET_ELIBACC,
553     [ELIBBAD]		= TARGET_ELIBBAD,
554     [ELIBSCN]		= TARGET_ELIBSCN,
555     [ELIBMAX]		= TARGET_ELIBMAX,
556     [ELIBEXEC]		= TARGET_ELIBEXEC,
557     [EILSEQ]		= TARGET_EILSEQ,
558     [ENOSYS]		= TARGET_ENOSYS,
559     [ELOOP]		= TARGET_ELOOP,
560     [ERESTART]		= TARGET_ERESTART,
561     [ESTRPIPE]		= TARGET_ESTRPIPE,
562     [ENOTEMPTY]		= TARGET_ENOTEMPTY,
563     [EUSERS]		= TARGET_EUSERS,
564     [ENOTSOCK]		= TARGET_ENOTSOCK,
565     [EDESTADDRREQ]	= TARGET_EDESTADDRREQ,
566     [EMSGSIZE]		= TARGET_EMSGSIZE,
567     [EPROTOTYPE]	= TARGET_EPROTOTYPE,
568     [ENOPROTOOPT]	= TARGET_ENOPROTOOPT,
569     [EPROTONOSUPPORT]	= TARGET_EPROTONOSUPPORT,
570     [ESOCKTNOSUPPORT]	= TARGET_ESOCKTNOSUPPORT,
571     [EOPNOTSUPP]	= TARGET_EOPNOTSUPP,
572     [EPFNOSUPPORT]	= TARGET_EPFNOSUPPORT,
573     [EAFNOSUPPORT]	= TARGET_EAFNOSUPPORT,
574     [EADDRINUSE]	= TARGET_EADDRINUSE,
575     [EADDRNOTAVAIL]	= TARGET_EADDRNOTAVAIL,
576     [ENETDOWN]		= TARGET_ENETDOWN,
577     [ENETUNREACH]	= TARGET_ENETUNREACH,
578     [ENETRESET]		= TARGET_ENETRESET,
579     [ECONNABORTED]	= TARGET_ECONNABORTED,
580     [ECONNRESET]	= TARGET_ECONNRESET,
581     [ENOBUFS]		= TARGET_ENOBUFS,
582     [EISCONN]		= TARGET_EISCONN,
583     [ENOTCONN]		= TARGET_ENOTCONN,
584     [EUCLEAN]		= TARGET_EUCLEAN,
585     [ENOTNAM]		= TARGET_ENOTNAM,
586     [ENAVAIL]		= TARGET_ENAVAIL,
587     [EISNAM]		= TARGET_EISNAM,
588     [EREMOTEIO]		= TARGET_EREMOTEIO,
589     [EDQUOT]            = TARGET_EDQUOT,
590     [ESHUTDOWN]		= TARGET_ESHUTDOWN,
591     [ETOOMANYREFS]	= TARGET_ETOOMANYREFS,
592     [ETIMEDOUT]		= TARGET_ETIMEDOUT,
593     [ECONNREFUSED]	= TARGET_ECONNREFUSED,
594     [EHOSTDOWN]		= TARGET_EHOSTDOWN,
595     [EHOSTUNREACH]	= TARGET_EHOSTUNREACH,
596     [EALREADY]		= TARGET_EALREADY,
597     [EINPROGRESS]	= TARGET_EINPROGRESS,
598     [ESTALE]		= TARGET_ESTALE,
599     [ECANCELED]		= TARGET_ECANCELED,
600     [ENOMEDIUM]		= TARGET_ENOMEDIUM,
601     [EMEDIUMTYPE]	= TARGET_EMEDIUMTYPE,
602 #ifdef ENOKEY
603     [ENOKEY]		= TARGET_ENOKEY,
604 #endif
605 #ifdef EKEYEXPIRED
606     [EKEYEXPIRED]	= TARGET_EKEYEXPIRED,
607 #endif
608 #ifdef EKEYREVOKED
609     [EKEYREVOKED]	= TARGET_EKEYREVOKED,
610 #endif
611 #ifdef EKEYREJECTED
612     [EKEYREJECTED]	= TARGET_EKEYREJECTED,
613 #endif
614 #ifdef EOWNERDEAD
615     [EOWNERDEAD]	= TARGET_EOWNERDEAD,
616 #endif
617 #ifdef ENOTRECOVERABLE
618     [ENOTRECOVERABLE]	= TARGET_ENOTRECOVERABLE,
619 #endif
620 #ifdef ENOMSG
621     [ENOMSG]            = TARGET_ENOMSG,
622 #endif
623 #ifdef ERKFILL
624     [ERFKILL]           = TARGET_ERFKILL,
625 #endif
626 #ifdef EHWPOISON
627     [EHWPOISON]         = TARGET_EHWPOISON,
628 #endif
629 };
630 
631 static inline int host_to_target_errno(int err)
632 {
633     if (err >= 0 && err < ERRNO_TABLE_SIZE &&
634         host_to_target_errno_table[err]) {
635         return host_to_target_errno_table[err];
636     }
637     return err;
638 }
639 
640 static inline int target_to_host_errno(int err)
641 {
642     if (err >= 0 && err < ERRNO_TABLE_SIZE &&
643         target_to_host_errno_table[err]) {
644         return target_to_host_errno_table[err];
645     }
646     return err;
647 }
648 
649 static inline abi_long get_errno(abi_long ret)
650 {
651     if (ret == -1)
652         return -host_to_target_errno(errno);
653     else
654         return ret;
655 }
656 
657 const char *target_strerror(int err)
658 {
659     if (err == TARGET_ERESTARTSYS) {
660         return "To be restarted";
661     }
662     if (err == TARGET_QEMU_ESIGRETURN) {
663         return "Successful exit from sigreturn";
664     }
665 
666     if ((err >= ERRNO_TABLE_SIZE) || (err < 0)) {
667         return NULL;
668     }
669     return strerror(target_to_host_errno(err));
670 }
671 
672 #define safe_syscall0(type, name) \
673 static type safe_##name(void) \
674 { \
675     return safe_syscall(__NR_##name); \
676 }
677 
678 #define safe_syscall1(type, name, type1, arg1) \
679 static type safe_##name(type1 arg1) \
680 { \
681     return safe_syscall(__NR_##name, arg1); \
682 }
683 
684 #define safe_syscall2(type, name, type1, arg1, type2, arg2) \
685 static type safe_##name(type1 arg1, type2 arg2) \
686 { \
687     return safe_syscall(__NR_##name, arg1, arg2); \
688 }
689 
690 #define safe_syscall3(type, name, type1, arg1, type2, arg2, type3, arg3) \
691 static type safe_##name(type1 arg1, type2 arg2, type3 arg3) \
692 { \
693     return safe_syscall(__NR_##name, arg1, arg2, arg3); \
694 }
695 
696 #define safe_syscall4(type, name, type1, arg1, type2, arg2, type3, arg3, \
697     type4, arg4) \
698 static type safe_##name(type1 arg1, type2 arg2, type3 arg3, type4 arg4) \
699 { \
700     return safe_syscall(__NR_##name, arg1, arg2, arg3, arg4); \
701 }
702 
703 #define safe_syscall5(type, name, type1, arg1, type2, arg2, type3, arg3, \
704     type4, arg4, type5, arg5) \
705 static type safe_##name(type1 arg1, type2 arg2, type3 arg3, type4 arg4, \
706     type5 arg5) \
707 { \
708     return safe_syscall(__NR_##name, arg1, arg2, arg3, arg4, arg5); \
709 }
710 
711 #define safe_syscall6(type, name, type1, arg1, type2, arg2, type3, arg3, \
712     type4, arg4, type5, arg5, type6, arg6) \
713 static type safe_##name(type1 arg1, type2 arg2, type3 arg3, type4 arg4, \
714     type5 arg5, type6 arg6) \
715 { \
716     return safe_syscall(__NR_##name, arg1, arg2, arg3, arg4, arg5, arg6); \
717 }
718 
719 safe_syscall3(ssize_t, read, int, fd, void *, buff, size_t, count)
720 safe_syscall3(ssize_t, write, int, fd, const void *, buff, size_t, count)
721 safe_syscall4(int, openat, int, dirfd, const char *, pathname, \
722               int, flags, mode_t, mode)
723 #if defined(TARGET_NR_wait4) || defined(TARGET_NR_waitpid)
724 safe_syscall4(pid_t, wait4, pid_t, pid, int *, status, int, options, \
725               struct rusage *, rusage)
726 #endif
727 safe_syscall5(int, waitid, idtype_t, idtype, id_t, id, siginfo_t *, infop, \
728               int, options, struct rusage *, rusage)
729 safe_syscall3(int, execve, const char *, filename, char **, argv, char **, envp)
730 #if defined(TARGET_NR_select) || defined(TARGET_NR__newselect) || \
731     defined(TARGET_NR_pselect6) || defined(TARGET_NR_pselect6_time64)
732 safe_syscall6(int, pselect6, int, nfds, fd_set *, readfds, fd_set *, writefds, \
733               fd_set *, exceptfds, struct timespec *, timeout, void *, sig)
734 #endif
735 #if defined(TARGET_NR_ppoll) || defined(TARGET_NR_ppoll_time64)
736 safe_syscall5(int, ppoll, struct pollfd *, ufds, unsigned int, nfds,
737               struct timespec *, tsp, const sigset_t *, sigmask,
738               size_t, sigsetsize)
739 #endif
740 safe_syscall6(int, epoll_pwait, int, epfd, struct epoll_event *, events,
741               int, maxevents, int, timeout, const sigset_t *, sigmask,
742               size_t, sigsetsize)
743 #if defined(__NR_futex)
744 safe_syscall6(int,futex,int *,uaddr,int,op,int,val, \
745               const struct timespec *,timeout,int *,uaddr2,int,val3)
746 #endif
747 #if defined(__NR_futex_time64)
748 safe_syscall6(int,futex_time64,int *,uaddr,int,op,int,val, \
749               const struct timespec *,timeout,int *,uaddr2,int,val3)
750 #endif
751 safe_syscall2(int, rt_sigsuspend, sigset_t *, newset, size_t, sigsetsize)
752 safe_syscall2(int, kill, pid_t, pid, int, sig)
753 safe_syscall2(int, tkill, int, tid, int, sig)
754 safe_syscall3(int, tgkill, int, tgid, int, pid, int, sig)
755 safe_syscall3(ssize_t, readv, int, fd, const struct iovec *, iov, int, iovcnt)
756 safe_syscall3(ssize_t, writev, int, fd, const struct iovec *, iov, int, iovcnt)
757 safe_syscall5(ssize_t, preadv, int, fd, const struct iovec *, iov, int, iovcnt,
758               unsigned long, pos_l, unsigned long, pos_h)
759 safe_syscall5(ssize_t, pwritev, int, fd, const struct iovec *, iov, int, iovcnt,
760               unsigned long, pos_l, unsigned long, pos_h)
761 safe_syscall3(int, connect, int, fd, const struct sockaddr *, addr,
762               socklen_t, addrlen)
763 safe_syscall6(ssize_t, sendto, int, fd, const void *, buf, size_t, len,
764               int, flags, const struct sockaddr *, addr, socklen_t, addrlen)
765 safe_syscall6(ssize_t, recvfrom, int, fd, void *, buf, size_t, len,
766               int, flags, struct sockaddr *, addr, socklen_t *, addrlen)
767 safe_syscall3(ssize_t, sendmsg, int, fd, const struct msghdr *, msg, int, flags)
768 safe_syscall3(ssize_t, recvmsg, int, fd, struct msghdr *, msg, int, flags)
769 safe_syscall2(int, flock, int, fd, int, operation)
770 #if defined(TARGET_NR_rt_sigtimedwait) || defined(TARGET_NR_rt_sigtimedwait_time64)
771 safe_syscall4(int, rt_sigtimedwait, const sigset_t *, these, siginfo_t *, uinfo,
772               const struct timespec *, uts, size_t, sigsetsize)
773 #endif
774 safe_syscall4(int, accept4, int, fd, struct sockaddr *, addr, socklen_t *, len,
775               int, flags)
776 #if defined(TARGET_NR_nanosleep)
777 safe_syscall2(int, nanosleep, const struct timespec *, req,
778               struct timespec *, rem)
779 #endif
780 #if defined(TARGET_NR_clock_nanosleep) || \
781     defined(TARGET_NR_clock_nanosleep_time64)
782 safe_syscall4(int, clock_nanosleep, const clockid_t, clock, int, flags,
783               const struct timespec *, req, struct timespec *, rem)
784 #endif
785 #ifdef __NR_ipc
786 #ifdef __s390x__
787 safe_syscall5(int, ipc, int, call, long, first, long, second, long, third,
788               void *, ptr)
789 #else
790 safe_syscall6(int, ipc, int, call, long, first, long, second, long, third,
791               void *, ptr, long, fifth)
792 #endif
793 #endif
794 #ifdef __NR_msgsnd
795 safe_syscall4(int, msgsnd, int, msgid, const void *, msgp, size_t, sz,
796               int, flags)
797 #endif
798 #ifdef __NR_msgrcv
799 safe_syscall5(int, msgrcv, int, msgid, void *, msgp, size_t, sz,
800               long, msgtype, int, flags)
801 #endif
802 #ifdef __NR_semtimedop
803 safe_syscall4(int, semtimedop, int, semid, struct sembuf *, tsops,
804               unsigned, nsops, const struct timespec *, timeout)
805 #endif
806 #if defined(TARGET_NR_mq_timedsend) || \
807     defined(TARGET_NR_mq_timedsend_time64)
808 safe_syscall5(int, mq_timedsend, int, mqdes, const char *, msg_ptr,
809               size_t, len, unsigned, prio, const struct timespec *, timeout)
810 #endif
811 #if defined(TARGET_NR_mq_timedreceive) || \
812     defined(TARGET_NR_mq_timedreceive_time64)
813 safe_syscall5(int, mq_timedreceive, int, mqdes, char *, msg_ptr,
814               size_t, len, unsigned *, prio, const struct timespec *, timeout)
815 #endif
816 #if defined(TARGET_NR_copy_file_range) && defined(__NR_copy_file_range)
817 safe_syscall6(ssize_t, copy_file_range, int, infd, loff_t *, pinoff,
818               int, outfd, loff_t *, poutoff, size_t, length,
819               unsigned int, flags)
820 #endif
821 
822 /* We do ioctl like this rather than via safe_syscall3 to preserve the
823  * "third argument might be integer or pointer or not present" behaviour of
824  * the libc function.
825  */
826 #define safe_ioctl(...) safe_syscall(__NR_ioctl, __VA_ARGS__)
827 /* Similarly for fcntl. Note that callers must always:
828  *  pass the F_GETLK64 etc constants rather than the unsuffixed F_GETLK
829  *  use the flock64 struct rather than unsuffixed flock
830  * This will then work and use a 64-bit offset for both 32-bit and 64-bit hosts.
831  */
832 #ifdef __NR_fcntl64
833 #define safe_fcntl(...) safe_syscall(__NR_fcntl64, __VA_ARGS__)
834 #else
835 #define safe_fcntl(...) safe_syscall(__NR_fcntl, __VA_ARGS__)
836 #endif
837 
838 static inline int host_to_target_sock_type(int host_type)
839 {
840     int target_type;
841 
842     switch (host_type & 0xf /* SOCK_TYPE_MASK */) {
843     case SOCK_DGRAM:
844         target_type = TARGET_SOCK_DGRAM;
845         break;
846     case SOCK_STREAM:
847         target_type = TARGET_SOCK_STREAM;
848         break;
849     default:
850         target_type = host_type & 0xf /* SOCK_TYPE_MASK */;
851         break;
852     }
853 
854 #if defined(SOCK_CLOEXEC)
855     if (host_type & SOCK_CLOEXEC) {
856         target_type |= TARGET_SOCK_CLOEXEC;
857     }
858 #endif
859 
860 #if defined(SOCK_NONBLOCK)
861     if (host_type & SOCK_NONBLOCK) {
862         target_type |= TARGET_SOCK_NONBLOCK;
863     }
864 #endif
865 
866     return target_type;
867 }
868 
869 static abi_ulong target_brk;
870 static abi_ulong target_original_brk;
871 static abi_ulong brk_page;
872 
873 void target_set_brk(abi_ulong new_brk)
874 {
875     target_original_brk = target_brk = HOST_PAGE_ALIGN(new_brk);
876     brk_page = HOST_PAGE_ALIGN(target_brk);
877 }
878 
879 //#define DEBUGF_BRK(message, args...) do { fprintf(stderr, (message), ## args); } while (0)
880 #define DEBUGF_BRK(message, args...)
881 
882 /* do_brk() must return target values and target errnos. */
883 abi_long do_brk(abi_ulong new_brk)
884 {
885     abi_long mapped_addr;
886     abi_ulong new_alloc_size;
887 
888     DEBUGF_BRK("do_brk(" TARGET_ABI_FMT_lx ") -> ", new_brk);
889 
890     if (!new_brk) {
891         DEBUGF_BRK(TARGET_ABI_FMT_lx " (!new_brk)\n", target_brk);
892         return target_brk;
893     }
894     if (new_brk < target_original_brk) {
895         DEBUGF_BRK(TARGET_ABI_FMT_lx " (new_brk < target_original_brk)\n",
896                    target_brk);
897         return target_brk;
898     }
899 
900     /* If the new brk is less than the highest page reserved to the
901      * target heap allocation, set it and we're almost done...  */
902     if (new_brk <= brk_page) {
903         /* Heap contents are initialized to zero, as for anonymous
904          * mapped pages.  */
905         if (new_brk > target_brk) {
906             memset(g2h(target_brk), 0, new_brk - target_brk);
907         }
908 	target_brk = new_brk;
909         DEBUGF_BRK(TARGET_ABI_FMT_lx " (new_brk <= brk_page)\n", target_brk);
910 	return target_brk;
911     }
912 
913     /* We need to allocate more memory after the brk... Note that
914      * we don't use MAP_FIXED because that will map over the top of
915      * any existing mapping (like the one with the host libc or qemu
916      * itself); instead we treat "mapped but at wrong address" as
917      * a failure and unmap again.
918      */
919     new_alloc_size = HOST_PAGE_ALIGN(new_brk - brk_page);
920     mapped_addr = get_errno(target_mmap(brk_page, new_alloc_size,
921                                         PROT_READ|PROT_WRITE,
922                                         MAP_ANON|MAP_PRIVATE, 0, 0));
923 
924     if (mapped_addr == brk_page) {
925         /* Heap contents are initialized to zero, as for anonymous
926          * mapped pages.  Technically the new pages are already
927          * initialized to zero since they *are* anonymous mapped
928          * pages, however we have to take care with the contents that
929          * come from the remaining part of the previous page: it may
930          * contains garbage data due to a previous heap usage (grown
931          * then shrunken).  */
932         memset(g2h(target_brk), 0, brk_page - target_brk);
933 
934         target_brk = new_brk;
935         brk_page = HOST_PAGE_ALIGN(target_brk);
936         DEBUGF_BRK(TARGET_ABI_FMT_lx " (mapped_addr == brk_page)\n",
937             target_brk);
938         return target_brk;
939     } else if (mapped_addr != -1) {
940         /* Mapped but at wrong address, meaning there wasn't actually
941          * enough space for this brk.
942          */
943         target_munmap(mapped_addr, new_alloc_size);
944         mapped_addr = -1;
945         DEBUGF_BRK(TARGET_ABI_FMT_lx " (mapped_addr != -1)\n", target_brk);
946     }
947     else {
948         DEBUGF_BRK(TARGET_ABI_FMT_lx " (otherwise)\n", target_brk);
949     }
950 
951 #if defined(TARGET_ALPHA)
952     /* We (partially) emulate OSF/1 on Alpha, which requires we
953        return a proper errno, not an unchanged brk value.  */
954     return -TARGET_ENOMEM;
955 #endif
956     /* For everything else, return the previous break. */
957     return target_brk;
958 }
959 
960 #if defined(TARGET_NR_select) || defined(TARGET_NR__newselect) || \
961     defined(TARGET_NR_pselect6) || defined(TARGET_NR_pselect6_time64)
962 static inline abi_long copy_from_user_fdset(fd_set *fds,
963                                             abi_ulong target_fds_addr,
964                                             int n)
965 {
966     int i, nw, j, k;
967     abi_ulong b, *target_fds;
968 
969     nw = DIV_ROUND_UP(n, TARGET_ABI_BITS);
970     if (!(target_fds = lock_user(VERIFY_READ,
971                                  target_fds_addr,
972                                  sizeof(abi_ulong) * nw,
973                                  1)))
974         return -TARGET_EFAULT;
975 
976     FD_ZERO(fds);
977     k = 0;
978     for (i = 0; i < nw; i++) {
979         /* grab the abi_ulong */
980         __get_user(b, &target_fds[i]);
981         for (j = 0; j < TARGET_ABI_BITS; j++) {
982             /* check the bit inside the abi_ulong */
983             if ((b >> j) & 1)
984                 FD_SET(k, fds);
985             k++;
986         }
987     }
988 
989     unlock_user(target_fds, target_fds_addr, 0);
990 
991     return 0;
992 }
993 
994 static inline abi_ulong copy_from_user_fdset_ptr(fd_set *fds, fd_set **fds_ptr,
995                                                  abi_ulong target_fds_addr,
996                                                  int n)
997 {
998     if (target_fds_addr) {
999         if (copy_from_user_fdset(fds, target_fds_addr, n))
1000             return -TARGET_EFAULT;
1001         *fds_ptr = fds;
1002     } else {
1003         *fds_ptr = NULL;
1004     }
1005     return 0;
1006 }
1007 
1008 static inline abi_long copy_to_user_fdset(abi_ulong target_fds_addr,
1009                                           const fd_set *fds,
1010                                           int n)
1011 {
1012     int i, nw, j, k;
1013     abi_long v;
1014     abi_ulong *target_fds;
1015 
1016     nw = DIV_ROUND_UP(n, TARGET_ABI_BITS);
1017     if (!(target_fds = lock_user(VERIFY_WRITE,
1018                                  target_fds_addr,
1019                                  sizeof(abi_ulong) * nw,
1020                                  0)))
1021         return -TARGET_EFAULT;
1022 
1023     k = 0;
1024     for (i = 0; i < nw; i++) {
1025         v = 0;
1026         for (j = 0; j < TARGET_ABI_BITS; j++) {
1027             v |= ((abi_ulong)(FD_ISSET(k, fds) != 0) << j);
1028             k++;
1029         }
1030         __put_user(v, &target_fds[i]);
1031     }
1032 
1033     unlock_user(target_fds, target_fds_addr, sizeof(abi_ulong) * nw);
1034 
1035     return 0;
1036 }
1037 #endif
1038 
1039 #if defined(__alpha__)
1040 #define HOST_HZ 1024
1041 #else
1042 #define HOST_HZ 100
1043 #endif
1044 
1045 static inline abi_long host_to_target_clock_t(long ticks)
1046 {
1047 #if HOST_HZ == TARGET_HZ
1048     return ticks;
1049 #else
1050     return ((int64_t)ticks * TARGET_HZ) / HOST_HZ;
1051 #endif
1052 }
1053 
1054 static inline abi_long host_to_target_rusage(abi_ulong target_addr,
1055                                              const struct rusage *rusage)
1056 {
1057     struct target_rusage *target_rusage;
1058 
1059     if (!lock_user_struct(VERIFY_WRITE, target_rusage, target_addr, 0))
1060         return -TARGET_EFAULT;
1061     target_rusage->ru_utime.tv_sec = tswapal(rusage->ru_utime.tv_sec);
1062     target_rusage->ru_utime.tv_usec = tswapal(rusage->ru_utime.tv_usec);
1063     target_rusage->ru_stime.tv_sec = tswapal(rusage->ru_stime.tv_sec);
1064     target_rusage->ru_stime.tv_usec = tswapal(rusage->ru_stime.tv_usec);
1065     target_rusage->ru_maxrss = tswapal(rusage->ru_maxrss);
1066     target_rusage->ru_ixrss = tswapal(rusage->ru_ixrss);
1067     target_rusage->ru_idrss = tswapal(rusage->ru_idrss);
1068     target_rusage->ru_isrss = tswapal(rusage->ru_isrss);
1069     target_rusage->ru_minflt = tswapal(rusage->ru_minflt);
1070     target_rusage->ru_majflt = tswapal(rusage->ru_majflt);
1071     target_rusage->ru_nswap = tswapal(rusage->ru_nswap);
1072     target_rusage->ru_inblock = tswapal(rusage->ru_inblock);
1073     target_rusage->ru_oublock = tswapal(rusage->ru_oublock);
1074     target_rusage->ru_msgsnd = tswapal(rusage->ru_msgsnd);
1075     target_rusage->ru_msgrcv = tswapal(rusage->ru_msgrcv);
1076     target_rusage->ru_nsignals = tswapal(rusage->ru_nsignals);
1077     target_rusage->ru_nvcsw = tswapal(rusage->ru_nvcsw);
1078     target_rusage->ru_nivcsw = tswapal(rusage->ru_nivcsw);
1079     unlock_user_struct(target_rusage, target_addr, 1);
1080 
1081     return 0;
1082 }
1083 
1084 #ifdef TARGET_NR_setrlimit
1085 static inline rlim_t target_to_host_rlim(abi_ulong target_rlim)
1086 {
1087     abi_ulong target_rlim_swap;
1088     rlim_t result;
1089 
1090     target_rlim_swap = tswapal(target_rlim);
1091     if (target_rlim_swap == TARGET_RLIM_INFINITY)
1092         return RLIM_INFINITY;
1093 
1094     result = target_rlim_swap;
1095     if (target_rlim_swap != (rlim_t)result)
1096         return RLIM_INFINITY;
1097 
1098     return result;
1099 }
1100 #endif
1101 
1102 #if defined(TARGET_NR_getrlimit) || defined(TARGET_NR_ugetrlimit)
1103 static inline abi_ulong host_to_target_rlim(rlim_t rlim)
1104 {
1105     abi_ulong target_rlim_swap;
1106     abi_ulong result;
1107 
1108     if (rlim == RLIM_INFINITY || rlim != (abi_long)rlim)
1109         target_rlim_swap = TARGET_RLIM_INFINITY;
1110     else
1111         target_rlim_swap = rlim;
1112     result = tswapal(target_rlim_swap);
1113 
1114     return result;
1115 }
1116 #endif
1117 
1118 static inline int target_to_host_resource(int code)
1119 {
1120     switch (code) {
1121     case TARGET_RLIMIT_AS:
1122         return RLIMIT_AS;
1123     case TARGET_RLIMIT_CORE:
1124         return RLIMIT_CORE;
1125     case TARGET_RLIMIT_CPU:
1126         return RLIMIT_CPU;
1127     case TARGET_RLIMIT_DATA:
1128         return RLIMIT_DATA;
1129     case TARGET_RLIMIT_FSIZE:
1130         return RLIMIT_FSIZE;
1131     case TARGET_RLIMIT_LOCKS:
1132         return RLIMIT_LOCKS;
1133     case TARGET_RLIMIT_MEMLOCK:
1134         return RLIMIT_MEMLOCK;
1135     case TARGET_RLIMIT_MSGQUEUE:
1136         return RLIMIT_MSGQUEUE;
1137     case TARGET_RLIMIT_NICE:
1138         return RLIMIT_NICE;
1139     case TARGET_RLIMIT_NOFILE:
1140         return RLIMIT_NOFILE;
1141     case TARGET_RLIMIT_NPROC:
1142         return RLIMIT_NPROC;
1143     case TARGET_RLIMIT_RSS:
1144         return RLIMIT_RSS;
1145     case TARGET_RLIMIT_RTPRIO:
1146         return RLIMIT_RTPRIO;
1147     case TARGET_RLIMIT_SIGPENDING:
1148         return RLIMIT_SIGPENDING;
1149     case TARGET_RLIMIT_STACK:
1150         return RLIMIT_STACK;
1151     default:
1152         return code;
1153     }
1154 }
1155 
1156 static inline abi_long copy_from_user_timeval(struct timeval *tv,
1157                                               abi_ulong target_tv_addr)
1158 {
1159     struct target_timeval *target_tv;
1160 
1161     if (!lock_user_struct(VERIFY_READ, target_tv, target_tv_addr, 1)) {
1162         return -TARGET_EFAULT;
1163     }
1164 
1165     __get_user(tv->tv_sec, &target_tv->tv_sec);
1166     __get_user(tv->tv_usec, &target_tv->tv_usec);
1167 
1168     unlock_user_struct(target_tv, target_tv_addr, 0);
1169 
1170     return 0;
1171 }
1172 
1173 static inline abi_long copy_to_user_timeval(abi_ulong target_tv_addr,
1174                                             const struct timeval *tv)
1175 {
1176     struct target_timeval *target_tv;
1177 
1178     if (!lock_user_struct(VERIFY_WRITE, target_tv, target_tv_addr, 0)) {
1179         return -TARGET_EFAULT;
1180     }
1181 
1182     __put_user(tv->tv_sec, &target_tv->tv_sec);
1183     __put_user(tv->tv_usec, &target_tv->tv_usec);
1184 
1185     unlock_user_struct(target_tv, target_tv_addr, 1);
1186 
1187     return 0;
1188 }
1189 
1190 #if defined(TARGET_NR_clock_adjtime64) && defined(CONFIG_CLOCK_ADJTIME)
1191 static inline abi_long copy_from_user_timeval64(struct timeval *tv,
1192                                                 abi_ulong target_tv_addr)
1193 {
1194     struct target__kernel_sock_timeval *target_tv;
1195 
1196     if (!lock_user_struct(VERIFY_READ, target_tv, target_tv_addr, 1)) {
1197         return -TARGET_EFAULT;
1198     }
1199 
1200     __get_user(tv->tv_sec, &target_tv->tv_sec);
1201     __get_user(tv->tv_usec, &target_tv->tv_usec);
1202 
1203     unlock_user_struct(target_tv, target_tv_addr, 0);
1204 
1205     return 0;
1206 }
1207 #endif
1208 
1209 static inline abi_long copy_to_user_timeval64(abi_ulong target_tv_addr,
1210                                               const struct timeval *tv)
1211 {
1212     struct target__kernel_sock_timeval *target_tv;
1213 
1214     if (!lock_user_struct(VERIFY_WRITE, target_tv, target_tv_addr, 0)) {
1215         return -TARGET_EFAULT;
1216     }
1217 
1218     __put_user(tv->tv_sec, &target_tv->tv_sec);
1219     __put_user(tv->tv_usec, &target_tv->tv_usec);
1220 
1221     unlock_user_struct(target_tv, target_tv_addr, 1);
1222 
1223     return 0;
1224 }
1225 
1226 #if defined(TARGET_NR_futex) || \
1227     defined(TARGET_NR_rt_sigtimedwait) || \
1228     defined(TARGET_NR_pselect6) || defined(TARGET_NR_pselect6) || \
1229     defined(TARGET_NR_nanosleep) || defined(TARGET_NR_clock_settime) || \
1230     defined(TARGET_NR_utimensat) || defined(TARGET_NR_mq_timedsend) || \
1231     defined(TARGET_NR_mq_timedreceive) || defined(TARGET_NR_ipc) || \
1232     defined(TARGET_NR_semop) || defined(TARGET_NR_semtimedop) || \
1233     defined(TARGET_NR_timer_settime) || \
1234     (defined(TARGET_NR_timerfd_settime) && defined(CONFIG_TIMERFD))
1235 static inline abi_long target_to_host_timespec(struct timespec *host_ts,
1236                                                abi_ulong target_addr)
1237 {
1238     struct target_timespec *target_ts;
1239 
1240     if (!lock_user_struct(VERIFY_READ, target_ts, target_addr, 1)) {
1241         return -TARGET_EFAULT;
1242     }
1243     __get_user(host_ts->tv_sec, &target_ts->tv_sec);
1244     __get_user(host_ts->tv_nsec, &target_ts->tv_nsec);
1245     unlock_user_struct(target_ts, target_addr, 0);
1246     return 0;
1247 }
1248 #endif
1249 
1250 #if defined(TARGET_NR_clock_settime64) || defined(TARGET_NR_futex_time64) || \
1251     defined(TARGET_NR_timer_settime64) || \
1252     defined(TARGET_NR_mq_timedsend_time64) || \
1253     defined(TARGET_NR_mq_timedreceive_time64) || \
1254     (defined(TARGET_NR_timerfd_settime64) && defined(CONFIG_TIMERFD)) || \
1255     defined(TARGET_NR_clock_nanosleep_time64) || \
1256     defined(TARGET_NR_rt_sigtimedwait_time64) || \
1257     defined(TARGET_NR_utimensat) || \
1258     defined(TARGET_NR_utimensat_time64) || \
1259     defined(TARGET_NR_semtimedop_time64) || \
1260     defined(TARGET_NR_pselect6_time64) || defined(TARGET_NR_ppoll_time64)
1261 static inline abi_long target_to_host_timespec64(struct timespec *host_ts,
1262                                                  abi_ulong target_addr)
1263 {
1264     struct target__kernel_timespec *target_ts;
1265 
1266     if (!lock_user_struct(VERIFY_READ, target_ts, target_addr, 1)) {
1267         return -TARGET_EFAULT;
1268     }
1269     __get_user(host_ts->tv_sec, &target_ts->tv_sec);
1270     __get_user(host_ts->tv_nsec, &target_ts->tv_nsec);
1271     /* in 32bit mode, this drops the padding */
1272     host_ts->tv_nsec = (long)(abi_long)host_ts->tv_nsec;
1273     unlock_user_struct(target_ts, target_addr, 0);
1274     return 0;
1275 }
1276 #endif
1277 
1278 static inline abi_long host_to_target_timespec(abi_ulong target_addr,
1279                                                struct timespec *host_ts)
1280 {
1281     struct target_timespec *target_ts;
1282 
1283     if (!lock_user_struct(VERIFY_WRITE, target_ts, target_addr, 0)) {
1284         return -TARGET_EFAULT;
1285     }
1286     __put_user(host_ts->tv_sec, &target_ts->tv_sec);
1287     __put_user(host_ts->tv_nsec, &target_ts->tv_nsec);
1288     unlock_user_struct(target_ts, target_addr, 1);
1289     return 0;
1290 }
1291 
1292 static inline abi_long host_to_target_timespec64(abi_ulong target_addr,
1293                                                  struct timespec *host_ts)
1294 {
1295     struct target__kernel_timespec *target_ts;
1296 
1297     if (!lock_user_struct(VERIFY_WRITE, target_ts, target_addr, 0)) {
1298         return -TARGET_EFAULT;
1299     }
1300     __put_user(host_ts->tv_sec, &target_ts->tv_sec);
1301     __put_user(host_ts->tv_nsec, &target_ts->tv_nsec);
1302     unlock_user_struct(target_ts, target_addr, 1);
1303     return 0;
1304 }
1305 
1306 #if defined(TARGET_NR_gettimeofday)
1307 static inline abi_long copy_to_user_timezone(abi_ulong target_tz_addr,
1308                                              struct timezone *tz)
1309 {
1310     struct target_timezone *target_tz;
1311 
1312     if (!lock_user_struct(VERIFY_WRITE, target_tz, target_tz_addr, 1)) {
1313         return -TARGET_EFAULT;
1314     }
1315 
1316     __put_user(tz->tz_minuteswest, &target_tz->tz_minuteswest);
1317     __put_user(tz->tz_dsttime, &target_tz->tz_dsttime);
1318 
1319     unlock_user_struct(target_tz, target_tz_addr, 1);
1320 
1321     return 0;
1322 }
1323 #endif
1324 
1325 #if defined(TARGET_NR_settimeofday)
1326 static inline abi_long copy_from_user_timezone(struct timezone *tz,
1327                                                abi_ulong target_tz_addr)
1328 {
1329     struct target_timezone *target_tz;
1330 
1331     if (!lock_user_struct(VERIFY_READ, target_tz, target_tz_addr, 1)) {
1332         return -TARGET_EFAULT;
1333     }
1334 
1335     __get_user(tz->tz_minuteswest, &target_tz->tz_minuteswest);
1336     __get_user(tz->tz_dsttime, &target_tz->tz_dsttime);
1337 
1338     unlock_user_struct(target_tz, target_tz_addr, 0);
1339 
1340     return 0;
1341 }
1342 #endif
1343 
1344 #if defined(TARGET_NR_mq_open) && defined(__NR_mq_open)
1345 #include <mqueue.h>
1346 
1347 static inline abi_long copy_from_user_mq_attr(struct mq_attr *attr,
1348                                               abi_ulong target_mq_attr_addr)
1349 {
1350     struct target_mq_attr *target_mq_attr;
1351 
1352     if (!lock_user_struct(VERIFY_READ, target_mq_attr,
1353                           target_mq_attr_addr, 1))
1354         return -TARGET_EFAULT;
1355 
1356     __get_user(attr->mq_flags, &target_mq_attr->mq_flags);
1357     __get_user(attr->mq_maxmsg, &target_mq_attr->mq_maxmsg);
1358     __get_user(attr->mq_msgsize, &target_mq_attr->mq_msgsize);
1359     __get_user(attr->mq_curmsgs, &target_mq_attr->mq_curmsgs);
1360 
1361     unlock_user_struct(target_mq_attr, target_mq_attr_addr, 0);
1362 
1363     return 0;
1364 }
1365 
1366 static inline abi_long copy_to_user_mq_attr(abi_ulong target_mq_attr_addr,
1367                                             const struct mq_attr *attr)
1368 {
1369     struct target_mq_attr *target_mq_attr;
1370 
1371     if (!lock_user_struct(VERIFY_WRITE, target_mq_attr,
1372                           target_mq_attr_addr, 0))
1373         return -TARGET_EFAULT;
1374 
1375     __put_user(attr->mq_flags, &target_mq_attr->mq_flags);
1376     __put_user(attr->mq_maxmsg, &target_mq_attr->mq_maxmsg);
1377     __put_user(attr->mq_msgsize, &target_mq_attr->mq_msgsize);
1378     __put_user(attr->mq_curmsgs, &target_mq_attr->mq_curmsgs);
1379 
1380     unlock_user_struct(target_mq_attr, target_mq_attr_addr, 1);
1381 
1382     return 0;
1383 }
1384 #endif
1385 
1386 #if defined(TARGET_NR_select) || defined(TARGET_NR__newselect)
1387 /* do_select() must return target values and target errnos. */
1388 static abi_long do_select(int n,
1389                           abi_ulong rfd_addr, abi_ulong wfd_addr,
1390                           abi_ulong efd_addr, abi_ulong target_tv_addr)
1391 {
1392     fd_set rfds, wfds, efds;
1393     fd_set *rfds_ptr, *wfds_ptr, *efds_ptr;
1394     struct timeval tv;
1395     struct timespec ts, *ts_ptr;
1396     abi_long ret;
1397 
1398     ret = copy_from_user_fdset_ptr(&rfds, &rfds_ptr, rfd_addr, n);
1399     if (ret) {
1400         return ret;
1401     }
1402     ret = copy_from_user_fdset_ptr(&wfds, &wfds_ptr, wfd_addr, n);
1403     if (ret) {
1404         return ret;
1405     }
1406     ret = copy_from_user_fdset_ptr(&efds, &efds_ptr, efd_addr, n);
1407     if (ret) {
1408         return ret;
1409     }
1410 
1411     if (target_tv_addr) {
1412         if (copy_from_user_timeval(&tv, target_tv_addr))
1413             return -TARGET_EFAULT;
1414         ts.tv_sec = tv.tv_sec;
1415         ts.tv_nsec = tv.tv_usec * 1000;
1416         ts_ptr = &ts;
1417     } else {
1418         ts_ptr = NULL;
1419     }
1420 
1421     ret = get_errno(safe_pselect6(n, rfds_ptr, wfds_ptr, efds_ptr,
1422                                   ts_ptr, NULL));
1423 
1424     if (!is_error(ret)) {
1425         if (rfd_addr && copy_to_user_fdset(rfd_addr, &rfds, n))
1426             return -TARGET_EFAULT;
1427         if (wfd_addr && copy_to_user_fdset(wfd_addr, &wfds, n))
1428             return -TARGET_EFAULT;
1429         if (efd_addr && copy_to_user_fdset(efd_addr, &efds, n))
1430             return -TARGET_EFAULT;
1431 
1432         if (target_tv_addr) {
1433             tv.tv_sec = ts.tv_sec;
1434             tv.tv_usec = ts.tv_nsec / 1000;
1435             if (copy_to_user_timeval(target_tv_addr, &tv)) {
1436                 return -TARGET_EFAULT;
1437             }
1438         }
1439     }
1440 
1441     return ret;
1442 }
1443 
1444 #if defined(TARGET_WANT_OLD_SYS_SELECT)
1445 static abi_long do_old_select(abi_ulong arg1)
1446 {
1447     struct target_sel_arg_struct *sel;
1448     abi_ulong inp, outp, exp, tvp;
1449     long nsel;
1450 
1451     if (!lock_user_struct(VERIFY_READ, sel, arg1, 1)) {
1452         return -TARGET_EFAULT;
1453     }
1454 
1455     nsel = tswapal(sel->n);
1456     inp = tswapal(sel->inp);
1457     outp = tswapal(sel->outp);
1458     exp = tswapal(sel->exp);
1459     tvp = tswapal(sel->tvp);
1460 
1461     unlock_user_struct(sel, arg1, 0);
1462 
1463     return do_select(nsel, inp, outp, exp, tvp);
1464 }
1465 #endif
1466 #endif
1467 
1468 #if defined(TARGET_NR_pselect6) || defined(TARGET_NR_pselect6_time64)
1469 static abi_long do_pselect6(abi_long arg1, abi_long arg2, abi_long arg3,
1470                             abi_long arg4, abi_long arg5, abi_long arg6,
1471                             bool time64)
1472 {
1473     abi_long rfd_addr, wfd_addr, efd_addr, n, ts_addr;
1474     fd_set rfds, wfds, efds;
1475     fd_set *rfds_ptr, *wfds_ptr, *efds_ptr;
1476     struct timespec ts, *ts_ptr;
1477     abi_long ret;
1478 
1479     /*
1480      * The 6th arg is actually two args smashed together,
1481      * so we cannot use the C library.
1482      */
1483     sigset_t set;
1484     struct {
1485         sigset_t *set;
1486         size_t size;
1487     } sig, *sig_ptr;
1488 
1489     abi_ulong arg_sigset, arg_sigsize, *arg7;
1490     target_sigset_t *target_sigset;
1491 
1492     n = arg1;
1493     rfd_addr = arg2;
1494     wfd_addr = arg3;
1495     efd_addr = arg4;
1496     ts_addr = arg5;
1497 
1498     ret = copy_from_user_fdset_ptr(&rfds, &rfds_ptr, rfd_addr, n);
1499     if (ret) {
1500         return ret;
1501     }
1502     ret = copy_from_user_fdset_ptr(&wfds, &wfds_ptr, wfd_addr, n);
1503     if (ret) {
1504         return ret;
1505     }
1506     ret = copy_from_user_fdset_ptr(&efds, &efds_ptr, efd_addr, n);
1507     if (ret) {
1508         return ret;
1509     }
1510 
1511     /*
1512      * This takes a timespec, and not a timeval, so we cannot
1513      * use the do_select() helper ...
1514      */
1515     if (ts_addr) {
1516         if (time64) {
1517             if (target_to_host_timespec64(&ts, ts_addr)) {
1518                 return -TARGET_EFAULT;
1519             }
1520         } else {
1521             if (target_to_host_timespec(&ts, ts_addr)) {
1522                 return -TARGET_EFAULT;
1523             }
1524         }
1525             ts_ptr = &ts;
1526     } else {
1527         ts_ptr = NULL;
1528     }
1529 
1530     /* Extract the two packed args for the sigset */
1531     if (arg6) {
1532         sig_ptr = &sig;
1533         sig.size = SIGSET_T_SIZE;
1534 
1535         arg7 = lock_user(VERIFY_READ, arg6, sizeof(*arg7) * 2, 1);
1536         if (!arg7) {
1537             return -TARGET_EFAULT;
1538         }
1539         arg_sigset = tswapal(arg7[0]);
1540         arg_sigsize = tswapal(arg7[1]);
1541         unlock_user(arg7, arg6, 0);
1542 
1543         if (arg_sigset) {
1544             sig.set = &set;
1545             if (arg_sigsize != sizeof(*target_sigset)) {
1546                 /* Like the kernel, we enforce correct size sigsets */
1547                 return -TARGET_EINVAL;
1548             }
1549             target_sigset = lock_user(VERIFY_READ, arg_sigset,
1550                                       sizeof(*target_sigset), 1);
1551             if (!target_sigset) {
1552                 return -TARGET_EFAULT;
1553             }
1554             target_to_host_sigset(&set, target_sigset);
1555             unlock_user(target_sigset, arg_sigset, 0);
1556         } else {
1557             sig.set = NULL;
1558         }
1559     } else {
1560         sig_ptr = NULL;
1561     }
1562 
1563     ret = get_errno(safe_pselect6(n, rfds_ptr, wfds_ptr, efds_ptr,
1564                                   ts_ptr, sig_ptr));
1565 
1566     if (!is_error(ret)) {
1567         if (rfd_addr && copy_to_user_fdset(rfd_addr, &rfds, n)) {
1568             return -TARGET_EFAULT;
1569         }
1570         if (wfd_addr && copy_to_user_fdset(wfd_addr, &wfds, n)) {
1571             return -TARGET_EFAULT;
1572         }
1573         if (efd_addr && copy_to_user_fdset(efd_addr, &efds, n)) {
1574             return -TARGET_EFAULT;
1575         }
1576         if (time64) {
1577             if (ts_addr && host_to_target_timespec64(ts_addr, &ts)) {
1578                 return -TARGET_EFAULT;
1579             }
1580         } else {
1581             if (ts_addr && host_to_target_timespec(ts_addr, &ts)) {
1582                 return -TARGET_EFAULT;
1583             }
1584         }
1585     }
1586     return ret;
1587 }
1588 #endif
1589 
1590 #if defined(TARGET_NR_poll) || defined(TARGET_NR_ppoll) || \
1591     defined(TARGET_NR_ppoll_time64)
1592 static abi_long do_ppoll(abi_long arg1, abi_long arg2, abi_long arg3,
1593                          abi_long arg4, abi_long arg5, bool ppoll, bool time64)
1594 {
1595     struct target_pollfd *target_pfd;
1596     unsigned int nfds = arg2;
1597     struct pollfd *pfd;
1598     unsigned int i;
1599     abi_long ret;
1600 
1601     pfd = NULL;
1602     target_pfd = NULL;
1603     if (nfds) {
1604         if (nfds > (INT_MAX / sizeof(struct target_pollfd))) {
1605             return -TARGET_EINVAL;
1606         }
1607         target_pfd = lock_user(VERIFY_WRITE, arg1,
1608                                sizeof(struct target_pollfd) * nfds, 1);
1609         if (!target_pfd) {
1610             return -TARGET_EFAULT;
1611         }
1612 
1613         pfd = alloca(sizeof(struct pollfd) * nfds);
1614         for (i = 0; i < nfds; i++) {
1615             pfd[i].fd = tswap32(target_pfd[i].fd);
1616             pfd[i].events = tswap16(target_pfd[i].events);
1617         }
1618     }
1619     if (ppoll) {
1620         struct timespec _timeout_ts, *timeout_ts = &_timeout_ts;
1621         target_sigset_t *target_set;
1622         sigset_t _set, *set = &_set;
1623 
1624         if (arg3) {
1625             if (time64) {
1626                 if (target_to_host_timespec64(timeout_ts, arg3)) {
1627                     unlock_user(target_pfd, arg1, 0);
1628                     return -TARGET_EFAULT;
1629                 }
1630             } else {
1631                 if (target_to_host_timespec(timeout_ts, arg3)) {
1632                     unlock_user(target_pfd, arg1, 0);
1633                     return -TARGET_EFAULT;
1634                 }
1635             }
1636         } else {
1637             timeout_ts = NULL;
1638         }
1639 
1640         if (arg4) {
1641             if (arg5 != sizeof(target_sigset_t)) {
1642                 unlock_user(target_pfd, arg1, 0);
1643                 return -TARGET_EINVAL;
1644             }
1645 
1646             target_set = lock_user(VERIFY_READ, arg4,
1647                                    sizeof(target_sigset_t), 1);
1648             if (!target_set) {
1649                 unlock_user(target_pfd, arg1, 0);
1650                 return -TARGET_EFAULT;
1651             }
1652             target_to_host_sigset(set, target_set);
1653         } else {
1654             set = NULL;
1655         }
1656 
1657         ret = get_errno(safe_ppoll(pfd, nfds, timeout_ts,
1658                                    set, SIGSET_T_SIZE));
1659 
1660         if (!is_error(ret) && arg3) {
1661             if (time64) {
1662                 if (host_to_target_timespec64(arg3, timeout_ts)) {
1663                     return -TARGET_EFAULT;
1664                 }
1665             } else {
1666                 if (host_to_target_timespec(arg3, timeout_ts)) {
1667                     return -TARGET_EFAULT;
1668                 }
1669             }
1670         }
1671         if (arg4) {
1672             unlock_user(target_set, arg4, 0);
1673         }
1674     } else {
1675           struct timespec ts, *pts;
1676 
1677           if (arg3 >= 0) {
1678               /* Convert ms to secs, ns */
1679               ts.tv_sec = arg3 / 1000;
1680               ts.tv_nsec = (arg3 % 1000) * 1000000LL;
1681               pts = &ts;
1682           } else {
1683               /* -ve poll() timeout means "infinite" */
1684               pts = NULL;
1685           }
1686           ret = get_errno(safe_ppoll(pfd, nfds, pts, NULL, 0));
1687     }
1688 
1689     if (!is_error(ret)) {
1690         for (i = 0; i < nfds; i++) {
1691             target_pfd[i].revents = tswap16(pfd[i].revents);
1692         }
1693     }
1694     unlock_user(target_pfd, arg1, sizeof(struct target_pollfd) * nfds);
1695     return ret;
1696 }
1697 #endif
1698 
1699 static abi_long do_pipe2(int host_pipe[], int flags)
1700 {
1701 #ifdef CONFIG_PIPE2
1702     return pipe2(host_pipe, flags);
1703 #else
1704     return -ENOSYS;
1705 #endif
1706 }
1707 
1708 static abi_long do_pipe(void *cpu_env, abi_ulong pipedes,
1709                         int flags, int is_pipe2)
1710 {
1711     int host_pipe[2];
1712     abi_long ret;
1713     ret = flags ? do_pipe2(host_pipe, flags) : pipe(host_pipe);
1714 
1715     if (is_error(ret))
1716         return get_errno(ret);
1717 
1718     /* Several targets have special calling conventions for the original
1719        pipe syscall, but didn't replicate this into the pipe2 syscall.  */
1720     if (!is_pipe2) {
1721 #if defined(TARGET_ALPHA)
1722         ((CPUAlphaState *)cpu_env)->ir[IR_A4] = host_pipe[1];
1723         return host_pipe[0];
1724 #elif defined(TARGET_MIPS)
1725         ((CPUMIPSState*)cpu_env)->active_tc.gpr[3] = host_pipe[1];
1726         return host_pipe[0];
1727 #elif defined(TARGET_SH4)
1728         ((CPUSH4State*)cpu_env)->gregs[1] = host_pipe[1];
1729         return host_pipe[0];
1730 #elif defined(TARGET_SPARC)
1731         ((CPUSPARCState*)cpu_env)->regwptr[1] = host_pipe[1];
1732         return host_pipe[0];
1733 #endif
1734     }
1735 
1736     if (put_user_s32(host_pipe[0], pipedes)
1737         || put_user_s32(host_pipe[1], pipedes + sizeof(host_pipe[0])))
1738         return -TARGET_EFAULT;
1739     return get_errno(ret);
1740 }
1741 
1742 static inline abi_long target_to_host_ip_mreq(struct ip_mreqn *mreqn,
1743                                               abi_ulong target_addr,
1744                                               socklen_t len)
1745 {
1746     struct target_ip_mreqn *target_smreqn;
1747 
1748     target_smreqn = lock_user(VERIFY_READ, target_addr, len, 1);
1749     if (!target_smreqn)
1750         return -TARGET_EFAULT;
1751     mreqn->imr_multiaddr.s_addr = target_smreqn->imr_multiaddr.s_addr;
1752     mreqn->imr_address.s_addr = target_smreqn->imr_address.s_addr;
1753     if (len == sizeof(struct target_ip_mreqn))
1754         mreqn->imr_ifindex = tswapal(target_smreqn->imr_ifindex);
1755     unlock_user(target_smreqn, target_addr, 0);
1756 
1757     return 0;
1758 }
1759 
1760 static inline abi_long target_to_host_sockaddr(int fd, struct sockaddr *addr,
1761                                                abi_ulong target_addr,
1762                                                socklen_t len)
1763 {
1764     const socklen_t unix_maxlen = sizeof (struct sockaddr_un);
1765     sa_family_t sa_family;
1766     struct target_sockaddr *target_saddr;
1767 
1768     if (fd_trans_target_to_host_addr(fd)) {
1769         return fd_trans_target_to_host_addr(fd)(addr, target_addr, len);
1770     }
1771 
1772     target_saddr = lock_user(VERIFY_READ, target_addr, len, 1);
1773     if (!target_saddr)
1774         return -TARGET_EFAULT;
1775 
1776     sa_family = tswap16(target_saddr->sa_family);
1777 
1778     /* Oops. The caller might send a incomplete sun_path; sun_path
1779      * must be terminated by \0 (see the manual page), but
1780      * unfortunately it is quite common to specify sockaddr_un
1781      * length as "strlen(x->sun_path)" while it should be
1782      * "strlen(...) + 1". We'll fix that here if needed.
1783      * Linux kernel has a similar feature.
1784      */
1785 
1786     if (sa_family == AF_UNIX) {
1787         if (len < unix_maxlen && len > 0) {
1788             char *cp = (char*)target_saddr;
1789 
1790             if ( cp[len-1] && !cp[len] )
1791                 len++;
1792         }
1793         if (len > unix_maxlen)
1794             len = unix_maxlen;
1795     }
1796 
1797     memcpy(addr, target_saddr, len);
1798     addr->sa_family = sa_family;
1799     if (sa_family == AF_NETLINK) {
1800         struct sockaddr_nl *nladdr;
1801 
1802         nladdr = (struct sockaddr_nl *)addr;
1803         nladdr->nl_pid = tswap32(nladdr->nl_pid);
1804         nladdr->nl_groups = tswap32(nladdr->nl_groups);
1805     } else if (sa_family == AF_PACKET) {
1806 	struct target_sockaddr_ll *lladdr;
1807 
1808 	lladdr = (struct target_sockaddr_ll *)addr;
1809 	lladdr->sll_ifindex = tswap32(lladdr->sll_ifindex);
1810 	lladdr->sll_hatype = tswap16(lladdr->sll_hatype);
1811     }
1812     unlock_user(target_saddr, target_addr, 0);
1813 
1814     return 0;
1815 }
1816 
1817 static inline abi_long host_to_target_sockaddr(abi_ulong target_addr,
1818                                                struct sockaddr *addr,
1819                                                socklen_t len)
1820 {
1821     struct target_sockaddr *target_saddr;
1822 
1823     if (len == 0) {
1824         return 0;
1825     }
1826     assert(addr);
1827 
1828     target_saddr = lock_user(VERIFY_WRITE, target_addr, len, 0);
1829     if (!target_saddr)
1830         return -TARGET_EFAULT;
1831     memcpy(target_saddr, addr, len);
1832     if (len >= offsetof(struct target_sockaddr, sa_family) +
1833         sizeof(target_saddr->sa_family)) {
1834         target_saddr->sa_family = tswap16(addr->sa_family);
1835     }
1836     if (addr->sa_family == AF_NETLINK &&
1837         len >= sizeof(struct target_sockaddr_nl)) {
1838         struct target_sockaddr_nl *target_nl =
1839                (struct target_sockaddr_nl *)target_saddr;
1840         target_nl->nl_pid = tswap32(target_nl->nl_pid);
1841         target_nl->nl_groups = tswap32(target_nl->nl_groups);
1842     } else if (addr->sa_family == AF_PACKET) {
1843         struct sockaddr_ll *target_ll = (struct sockaddr_ll *)target_saddr;
1844         target_ll->sll_ifindex = tswap32(target_ll->sll_ifindex);
1845         target_ll->sll_hatype = tswap16(target_ll->sll_hatype);
1846     } else if (addr->sa_family == AF_INET6 &&
1847                len >= sizeof(struct target_sockaddr_in6)) {
1848         struct target_sockaddr_in6 *target_in6 =
1849                (struct target_sockaddr_in6 *)target_saddr;
1850         target_in6->sin6_scope_id = tswap16(target_in6->sin6_scope_id);
1851     }
1852     unlock_user(target_saddr, target_addr, len);
1853 
1854     return 0;
1855 }
1856 
1857 static inline abi_long target_to_host_cmsg(struct msghdr *msgh,
1858                                            struct target_msghdr *target_msgh)
1859 {
1860     struct cmsghdr *cmsg = CMSG_FIRSTHDR(msgh);
1861     abi_long msg_controllen;
1862     abi_ulong target_cmsg_addr;
1863     struct target_cmsghdr *target_cmsg, *target_cmsg_start;
1864     socklen_t space = 0;
1865 
1866     msg_controllen = tswapal(target_msgh->msg_controllen);
1867     if (msg_controllen < sizeof (struct target_cmsghdr))
1868         goto the_end;
1869     target_cmsg_addr = tswapal(target_msgh->msg_control);
1870     target_cmsg = lock_user(VERIFY_READ, target_cmsg_addr, msg_controllen, 1);
1871     target_cmsg_start = target_cmsg;
1872     if (!target_cmsg)
1873         return -TARGET_EFAULT;
1874 
1875     while (cmsg && target_cmsg) {
1876         void *data = CMSG_DATA(cmsg);
1877         void *target_data = TARGET_CMSG_DATA(target_cmsg);
1878 
1879         int len = tswapal(target_cmsg->cmsg_len)
1880             - sizeof(struct target_cmsghdr);
1881 
1882         space += CMSG_SPACE(len);
1883         if (space > msgh->msg_controllen) {
1884             space -= CMSG_SPACE(len);
1885             /* This is a QEMU bug, since we allocated the payload
1886              * area ourselves (unlike overflow in host-to-target
1887              * conversion, which is just the guest giving us a buffer
1888              * that's too small). It can't happen for the payload types
1889              * we currently support; if it becomes an issue in future
1890              * we would need to improve our allocation strategy to
1891              * something more intelligent than "twice the size of the
1892              * target buffer we're reading from".
1893              */
1894             qemu_log_mask(LOG_UNIMP,
1895                           ("Unsupported ancillary data %d/%d: "
1896                            "unhandled msg size\n"),
1897                           tswap32(target_cmsg->cmsg_level),
1898                           tswap32(target_cmsg->cmsg_type));
1899             break;
1900         }
1901 
1902         if (tswap32(target_cmsg->cmsg_level) == TARGET_SOL_SOCKET) {
1903             cmsg->cmsg_level = SOL_SOCKET;
1904         } else {
1905             cmsg->cmsg_level = tswap32(target_cmsg->cmsg_level);
1906         }
1907         cmsg->cmsg_type = tswap32(target_cmsg->cmsg_type);
1908         cmsg->cmsg_len = CMSG_LEN(len);
1909 
1910         if (cmsg->cmsg_level == SOL_SOCKET && cmsg->cmsg_type == SCM_RIGHTS) {
1911             int *fd = (int *)data;
1912             int *target_fd = (int *)target_data;
1913             int i, numfds = len / sizeof(int);
1914 
1915             for (i = 0; i < numfds; i++) {
1916                 __get_user(fd[i], target_fd + i);
1917             }
1918         } else if (cmsg->cmsg_level == SOL_SOCKET
1919                &&  cmsg->cmsg_type == SCM_CREDENTIALS) {
1920             struct ucred *cred = (struct ucred *)data;
1921             struct target_ucred *target_cred =
1922                 (struct target_ucred *)target_data;
1923 
1924             __get_user(cred->pid, &target_cred->pid);
1925             __get_user(cred->uid, &target_cred->uid);
1926             __get_user(cred->gid, &target_cred->gid);
1927         } else {
1928             qemu_log_mask(LOG_UNIMP, "Unsupported ancillary data: %d/%d\n",
1929                           cmsg->cmsg_level, cmsg->cmsg_type);
1930             memcpy(data, target_data, len);
1931         }
1932 
1933         cmsg = CMSG_NXTHDR(msgh, cmsg);
1934         target_cmsg = TARGET_CMSG_NXTHDR(target_msgh, target_cmsg,
1935                                          target_cmsg_start);
1936     }
1937     unlock_user(target_cmsg, target_cmsg_addr, 0);
1938  the_end:
1939     msgh->msg_controllen = space;
1940     return 0;
1941 }
1942 
1943 static inline abi_long host_to_target_cmsg(struct target_msghdr *target_msgh,
1944                                            struct msghdr *msgh)
1945 {
1946     struct cmsghdr *cmsg = CMSG_FIRSTHDR(msgh);
1947     abi_long msg_controllen;
1948     abi_ulong target_cmsg_addr;
1949     struct target_cmsghdr *target_cmsg, *target_cmsg_start;
1950     socklen_t space = 0;
1951 
1952     msg_controllen = tswapal(target_msgh->msg_controllen);
1953     if (msg_controllen < sizeof (struct target_cmsghdr))
1954         goto the_end;
1955     target_cmsg_addr = tswapal(target_msgh->msg_control);
1956     target_cmsg = lock_user(VERIFY_WRITE, target_cmsg_addr, msg_controllen, 0);
1957     target_cmsg_start = target_cmsg;
1958     if (!target_cmsg)
1959         return -TARGET_EFAULT;
1960 
1961     while (cmsg && target_cmsg) {
1962         void *data = CMSG_DATA(cmsg);
1963         void *target_data = TARGET_CMSG_DATA(target_cmsg);
1964 
1965         int len = cmsg->cmsg_len - sizeof(struct cmsghdr);
1966         int tgt_len, tgt_space;
1967 
1968         /* We never copy a half-header but may copy half-data;
1969          * this is Linux's behaviour in put_cmsg(). Note that
1970          * truncation here is a guest problem (which we report
1971          * to the guest via the CTRUNC bit), unlike truncation
1972          * in target_to_host_cmsg, which is a QEMU bug.
1973          */
1974         if (msg_controllen < sizeof(struct target_cmsghdr)) {
1975             target_msgh->msg_flags |= tswap32(MSG_CTRUNC);
1976             break;
1977         }
1978 
1979         if (cmsg->cmsg_level == SOL_SOCKET) {
1980             target_cmsg->cmsg_level = tswap32(TARGET_SOL_SOCKET);
1981         } else {
1982             target_cmsg->cmsg_level = tswap32(cmsg->cmsg_level);
1983         }
1984         target_cmsg->cmsg_type = tswap32(cmsg->cmsg_type);
1985 
1986         /* Payload types which need a different size of payload on
1987          * the target must adjust tgt_len here.
1988          */
1989         tgt_len = len;
1990         switch (cmsg->cmsg_level) {
1991         case SOL_SOCKET:
1992             switch (cmsg->cmsg_type) {
1993             case SO_TIMESTAMP:
1994                 tgt_len = sizeof(struct target_timeval);
1995                 break;
1996             default:
1997                 break;
1998             }
1999             break;
2000         default:
2001             break;
2002         }
2003 
2004         if (msg_controllen < TARGET_CMSG_LEN(tgt_len)) {
2005             target_msgh->msg_flags |= tswap32(MSG_CTRUNC);
2006             tgt_len = msg_controllen - sizeof(struct target_cmsghdr);
2007         }
2008 
2009         /* We must now copy-and-convert len bytes of payload
2010          * into tgt_len bytes of destination space. Bear in mind
2011          * that in both source and destination we may be dealing
2012          * with a truncated value!
2013          */
2014         switch (cmsg->cmsg_level) {
2015         case SOL_SOCKET:
2016             switch (cmsg->cmsg_type) {
2017             case SCM_RIGHTS:
2018             {
2019                 int *fd = (int *)data;
2020                 int *target_fd = (int *)target_data;
2021                 int i, numfds = tgt_len / sizeof(int);
2022 
2023                 for (i = 0; i < numfds; i++) {
2024                     __put_user(fd[i], target_fd + i);
2025                 }
2026                 break;
2027             }
2028             case SO_TIMESTAMP:
2029             {
2030                 struct timeval *tv = (struct timeval *)data;
2031                 struct target_timeval *target_tv =
2032                     (struct target_timeval *)target_data;
2033 
2034                 if (len != sizeof(struct timeval) ||
2035                     tgt_len != sizeof(struct target_timeval)) {
2036                     goto unimplemented;
2037                 }
2038 
2039                 /* copy struct timeval to target */
2040                 __put_user(tv->tv_sec, &target_tv->tv_sec);
2041                 __put_user(tv->tv_usec, &target_tv->tv_usec);
2042                 break;
2043             }
2044             case SCM_CREDENTIALS:
2045             {
2046                 struct ucred *cred = (struct ucred *)data;
2047                 struct target_ucred *target_cred =
2048                     (struct target_ucred *)target_data;
2049 
2050                 __put_user(cred->pid, &target_cred->pid);
2051                 __put_user(cred->uid, &target_cred->uid);
2052                 __put_user(cred->gid, &target_cred->gid);
2053                 break;
2054             }
2055             default:
2056                 goto unimplemented;
2057             }
2058             break;
2059 
2060         case SOL_IP:
2061             switch (cmsg->cmsg_type) {
2062             case IP_TTL:
2063             {
2064                 uint32_t *v = (uint32_t *)data;
2065                 uint32_t *t_int = (uint32_t *)target_data;
2066 
2067                 if (len != sizeof(uint32_t) ||
2068                     tgt_len != sizeof(uint32_t)) {
2069                     goto unimplemented;
2070                 }
2071                 __put_user(*v, t_int);
2072                 break;
2073             }
2074             case IP_RECVERR:
2075             {
2076                 struct errhdr_t {
2077                    struct sock_extended_err ee;
2078                    struct sockaddr_in offender;
2079                 };
2080                 struct errhdr_t *errh = (struct errhdr_t *)data;
2081                 struct errhdr_t *target_errh =
2082                     (struct errhdr_t *)target_data;
2083 
2084                 if (len != sizeof(struct errhdr_t) ||
2085                     tgt_len != sizeof(struct errhdr_t)) {
2086                     goto unimplemented;
2087                 }
2088                 __put_user(errh->ee.ee_errno, &target_errh->ee.ee_errno);
2089                 __put_user(errh->ee.ee_origin, &target_errh->ee.ee_origin);
2090                 __put_user(errh->ee.ee_type,  &target_errh->ee.ee_type);
2091                 __put_user(errh->ee.ee_code, &target_errh->ee.ee_code);
2092                 __put_user(errh->ee.ee_pad, &target_errh->ee.ee_pad);
2093                 __put_user(errh->ee.ee_info, &target_errh->ee.ee_info);
2094                 __put_user(errh->ee.ee_data, &target_errh->ee.ee_data);
2095                 host_to_target_sockaddr((unsigned long) &target_errh->offender,
2096                     (void *) &errh->offender, sizeof(errh->offender));
2097                 break;
2098             }
2099             default:
2100                 goto unimplemented;
2101             }
2102             break;
2103 
2104         case SOL_IPV6:
2105             switch (cmsg->cmsg_type) {
2106             case IPV6_HOPLIMIT:
2107             {
2108                 uint32_t *v = (uint32_t *)data;
2109                 uint32_t *t_int = (uint32_t *)target_data;
2110 
2111                 if (len != sizeof(uint32_t) ||
2112                     tgt_len != sizeof(uint32_t)) {
2113                     goto unimplemented;
2114                 }
2115                 __put_user(*v, t_int);
2116                 break;
2117             }
2118             case IPV6_RECVERR:
2119             {
2120                 struct errhdr6_t {
2121                    struct sock_extended_err ee;
2122                    struct sockaddr_in6 offender;
2123                 };
2124                 struct errhdr6_t *errh = (struct errhdr6_t *)data;
2125                 struct errhdr6_t *target_errh =
2126                     (struct errhdr6_t *)target_data;
2127 
2128                 if (len != sizeof(struct errhdr6_t) ||
2129                     tgt_len != sizeof(struct errhdr6_t)) {
2130                     goto unimplemented;
2131                 }
2132                 __put_user(errh->ee.ee_errno, &target_errh->ee.ee_errno);
2133                 __put_user(errh->ee.ee_origin, &target_errh->ee.ee_origin);
2134                 __put_user(errh->ee.ee_type,  &target_errh->ee.ee_type);
2135                 __put_user(errh->ee.ee_code, &target_errh->ee.ee_code);
2136                 __put_user(errh->ee.ee_pad, &target_errh->ee.ee_pad);
2137                 __put_user(errh->ee.ee_info, &target_errh->ee.ee_info);
2138                 __put_user(errh->ee.ee_data, &target_errh->ee.ee_data);
2139                 host_to_target_sockaddr((unsigned long) &target_errh->offender,
2140                     (void *) &errh->offender, sizeof(errh->offender));
2141                 break;
2142             }
2143             default:
2144                 goto unimplemented;
2145             }
2146             break;
2147 
2148         default:
2149         unimplemented:
2150             qemu_log_mask(LOG_UNIMP, "Unsupported ancillary data: %d/%d\n",
2151                           cmsg->cmsg_level, cmsg->cmsg_type);
2152             memcpy(target_data, data, MIN(len, tgt_len));
2153             if (tgt_len > len) {
2154                 memset(target_data + len, 0, tgt_len - len);
2155             }
2156         }
2157 
2158         target_cmsg->cmsg_len = tswapal(TARGET_CMSG_LEN(tgt_len));
2159         tgt_space = TARGET_CMSG_SPACE(tgt_len);
2160         if (msg_controllen < tgt_space) {
2161             tgt_space = msg_controllen;
2162         }
2163         msg_controllen -= tgt_space;
2164         space += tgt_space;
2165         cmsg = CMSG_NXTHDR(msgh, cmsg);
2166         target_cmsg = TARGET_CMSG_NXTHDR(target_msgh, target_cmsg,
2167                                          target_cmsg_start);
2168     }
2169     unlock_user(target_cmsg, target_cmsg_addr, space);
2170  the_end:
2171     target_msgh->msg_controllen = tswapal(space);
2172     return 0;
2173 }
2174 
2175 /* do_setsockopt() Must return target values and target errnos. */
2176 static abi_long do_setsockopt(int sockfd, int level, int optname,
2177                               abi_ulong optval_addr, socklen_t optlen)
2178 {
2179     abi_long ret;
2180     int val;
2181     struct ip_mreqn *ip_mreq;
2182     struct ip_mreq_source *ip_mreq_source;
2183 
2184     switch(level) {
2185     case SOL_TCP:
2186         /* TCP options all take an 'int' value.  */
2187         if (optlen < sizeof(uint32_t))
2188             return -TARGET_EINVAL;
2189 
2190         if (get_user_u32(val, optval_addr))
2191             return -TARGET_EFAULT;
2192         ret = get_errno(setsockopt(sockfd, level, optname, &val, sizeof(val)));
2193         break;
2194     case SOL_IP:
2195         switch(optname) {
2196         case IP_TOS:
2197         case IP_TTL:
2198         case IP_HDRINCL:
2199         case IP_ROUTER_ALERT:
2200         case IP_RECVOPTS:
2201         case IP_RETOPTS:
2202         case IP_PKTINFO:
2203         case IP_MTU_DISCOVER:
2204         case IP_RECVERR:
2205         case IP_RECVTTL:
2206         case IP_RECVTOS:
2207 #ifdef IP_FREEBIND
2208         case IP_FREEBIND:
2209 #endif
2210         case IP_MULTICAST_TTL:
2211         case IP_MULTICAST_LOOP:
2212             val = 0;
2213             if (optlen >= sizeof(uint32_t)) {
2214                 if (get_user_u32(val, optval_addr))
2215                     return -TARGET_EFAULT;
2216             } else if (optlen >= 1) {
2217                 if (get_user_u8(val, optval_addr))
2218                     return -TARGET_EFAULT;
2219             }
2220             ret = get_errno(setsockopt(sockfd, level, optname, &val, sizeof(val)));
2221             break;
2222         case IP_ADD_MEMBERSHIP:
2223         case IP_DROP_MEMBERSHIP:
2224             if (optlen < sizeof (struct target_ip_mreq) ||
2225                 optlen > sizeof (struct target_ip_mreqn))
2226                 return -TARGET_EINVAL;
2227 
2228             ip_mreq = (struct ip_mreqn *) alloca(optlen);
2229             target_to_host_ip_mreq(ip_mreq, optval_addr, optlen);
2230             ret = get_errno(setsockopt(sockfd, level, optname, ip_mreq, optlen));
2231             break;
2232 
2233         case IP_BLOCK_SOURCE:
2234         case IP_UNBLOCK_SOURCE:
2235         case IP_ADD_SOURCE_MEMBERSHIP:
2236         case IP_DROP_SOURCE_MEMBERSHIP:
2237             if (optlen != sizeof (struct target_ip_mreq_source))
2238                 return -TARGET_EINVAL;
2239 
2240             ip_mreq_source = lock_user(VERIFY_READ, optval_addr, optlen, 1);
2241             ret = get_errno(setsockopt(sockfd, level, optname, ip_mreq_source, optlen));
2242             unlock_user (ip_mreq_source, optval_addr, 0);
2243             break;
2244 
2245         default:
2246             goto unimplemented;
2247         }
2248         break;
2249     case SOL_IPV6:
2250         switch (optname) {
2251         case IPV6_MTU_DISCOVER:
2252         case IPV6_MTU:
2253         case IPV6_V6ONLY:
2254         case IPV6_RECVPKTINFO:
2255         case IPV6_UNICAST_HOPS:
2256         case IPV6_MULTICAST_HOPS:
2257         case IPV6_MULTICAST_LOOP:
2258         case IPV6_RECVERR:
2259         case IPV6_RECVHOPLIMIT:
2260         case IPV6_2292HOPLIMIT:
2261         case IPV6_CHECKSUM:
2262         case IPV6_ADDRFORM:
2263         case IPV6_2292PKTINFO:
2264         case IPV6_RECVTCLASS:
2265         case IPV6_RECVRTHDR:
2266         case IPV6_2292RTHDR:
2267         case IPV6_RECVHOPOPTS:
2268         case IPV6_2292HOPOPTS:
2269         case IPV6_RECVDSTOPTS:
2270         case IPV6_2292DSTOPTS:
2271         case IPV6_TCLASS:
2272 #ifdef IPV6_RECVPATHMTU
2273         case IPV6_RECVPATHMTU:
2274 #endif
2275 #ifdef IPV6_TRANSPARENT
2276         case IPV6_TRANSPARENT:
2277 #endif
2278 #ifdef IPV6_FREEBIND
2279         case IPV6_FREEBIND:
2280 #endif
2281 #ifdef IPV6_RECVORIGDSTADDR
2282         case IPV6_RECVORIGDSTADDR:
2283 #endif
2284             val = 0;
2285             if (optlen < sizeof(uint32_t)) {
2286                 return -TARGET_EINVAL;
2287             }
2288             if (get_user_u32(val, optval_addr)) {
2289                 return -TARGET_EFAULT;
2290             }
2291             ret = get_errno(setsockopt(sockfd, level, optname,
2292                                        &val, sizeof(val)));
2293             break;
2294         case IPV6_PKTINFO:
2295         {
2296             struct in6_pktinfo pki;
2297 
2298             if (optlen < sizeof(pki)) {
2299                 return -TARGET_EINVAL;
2300             }
2301 
2302             if (copy_from_user(&pki, optval_addr, sizeof(pki))) {
2303                 return -TARGET_EFAULT;
2304             }
2305 
2306             pki.ipi6_ifindex = tswap32(pki.ipi6_ifindex);
2307 
2308             ret = get_errno(setsockopt(sockfd, level, optname,
2309                                        &pki, sizeof(pki)));
2310             break;
2311         }
2312         case IPV6_ADD_MEMBERSHIP:
2313         case IPV6_DROP_MEMBERSHIP:
2314         {
2315             struct ipv6_mreq ipv6mreq;
2316 
2317             if (optlen < sizeof(ipv6mreq)) {
2318                 return -TARGET_EINVAL;
2319             }
2320 
2321             if (copy_from_user(&ipv6mreq, optval_addr, sizeof(ipv6mreq))) {
2322                 return -TARGET_EFAULT;
2323             }
2324 
2325             ipv6mreq.ipv6mr_interface = tswap32(ipv6mreq.ipv6mr_interface);
2326 
2327             ret = get_errno(setsockopt(sockfd, level, optname,
2328                                        &ipv6mreq, sizeof(ipv6mreq)));
2329             break;
2330         }
2331         default:
2332             goto unimplemented;
2333         }
2334         break;
2335     case SOL_ICMPV6:
2336         switch (optname) {
2337         case ICMPV6_FILTER:
2338         {
2339             struct icmp6_filter icmp6f;
2340 
2341             if (optlen > sizeof(icmp6f)) {
2342                 optlen = sizeof(icmp6f);
2343             }
2344 
2345             if (copy_from_user(&icmp6f, optval_addr, optlen)) {
2346                 return -TARGET_EFAULT;
2347             }
2348 
2349             for (val = 0; val < 8; val++) {
2350                 icmp6f.data[val] = tswap32(icmp6f.data[val]);
2351             }
2352 
2353             ret = get_errno(setsockopt(sockfd, level, optname,
2354                                        &icmp6f, optlen));
2355             break;
2356         }
2357         default:
2358             goto unimplemented;
2359         }
2360         break;
2361     case SOL_RAW:
2362         switch (optname) {
2363         case ICMP_FILTER:
2364         case IPV6_CHECKSUM:
2365             /* those take an u32 value */
2366             if (optlen < sizeof(uint32_t)) {
2367                 return -TARGET_EINVAL;
2368             }
2369 
2370             if (get_user_u32(val, optval_addr)) {
2371                 return -TARGET_EFAULT;
2372             }
2373             ret = get_errno(setsockopt(sockfd, level, optname,
2374                                        &val, sizeof(val)));
2375             break;
2376 
2377         default:
2378             goto unimplemented;
2379         }
2380         break;
2381 #if defined(SOL_ALG) && defined(ALG_SET_KEY) && defined(ALG_SET_AEAD_AUTHSIZE)
2382     case SOL_ALG:
2383         switch (optname) {
2384         case ALG_SET_KEY:
2385         {
2386             char *alg_key = g_malloc(optlen);
2387 
2388             if (!alg_key) {
2389                 return -TARGET_ENOMEM;
2390             }
2391             if (copy_from_user(alg_key, optval_addr, optlen)) {
2392                 g_free(alg_key);
2393                 return -TARGET_EFAULT;
2394             }
2395             ret = get_errno(setsockopt(sockfd, level, optname,
2396                                        alg_key, optlen));
2397             g_free(alg_key);
2398             break;
2399         }
2400         case ALG_SET_AEAD_AUTHSIZE:
2401         {
2402             ret = get_errno(setsockopt(sockfd, level, optname,
2403                                        NULL, optlen));
2404             break;
2405         }
2406         default:
2407             goto unimplemented;
2408         }
2409         break;
2410 #endif
2411     case TARGET_SOL_SOCKET:
2412         switch (optname) {
2413         case TARGET_SO_RCVTIMEO:
2414         {
2415                 struct timeval tv;
2416 
2417                 optname = SO_RCVTIMEO;
2418 
2419 set_timeout:
2420                 if (optlen != sizeof(struct target_timeval)) {
2421                     return -TARGET_EINVAL;
2422                 }
2423 
2424                 if (copy_from_user_timeval(&tv, optval_addr)) {
2425                     return -TARGET_EFAULT;
2426                 }
2427 
2428                 ret = get_errno(setsockopt(sockfd, SOL_SOCKET, optname,
2429                                 &tv, sizeof(tv)));
2430                 return ret;
2431         }
2432         case TARGET_SO_SNDTIMEO:
2433                 optname = SO_SNDTIMEO;
2434                 goto set_timeout;
2435         case TARGET_SO_ATTACH_FILTER:
2436         {
2437                 struct target_sock_fprog *tfprog;
2438                 struct target_sock_filter *tfilter;
2439                 struct sock_fprog fprog;
2440                 struct sock_filter *filter;
2441                 int i;
2442 
2443                 if (optlen != sizeof(*tfprog)) {
2444                     return -TARGET_EINVAL;
2445                 }
2446                 if (!lock_user_struct(VERIFY_READ, tfprog, optval_addr, 0)) {
2447                     return -TARGET_EFAULT;
2448                 }
2449                 if (!lock_user_struct(VERIFY_READ, tfilter,
2450                                       tswapal(tfprog->filter), 0)) {
2451                     unlock_user_struct(tfprog, optval_addr, 1);
2452                     return -TARGET_EFAULT;
2453                 }
2454 
2455                 fprog.len = tswap16(tfprog->len);
2456                 filter = g_try_new(struct sock_filter, fprog.len);
2457                 if (filter == NULL) {
2458                     unlock_user_struct(tfilter, tfprog->filter, 1);
2459                     unlock_user_struct(tfprog, optval_addr, 1);
2460                     return -TARGET_ENOMEM;
2461                 }
2462                 for (i = 0; i < fprog.len; i++) {
2463                     filter[i].code = tswap16(tfilter[i].code);
2464                     filter[i].jt = tfilter[i].jt;
2465                     filter[i].jf = tfilter[i].jf;
2466                     filter[i].k = tswap32(tfilter[i].k);
2467                 }
2468                 fprog.filter = filter;
2469 
2470                 ret = get_errno(setsockopt(sockfd, SOL_SOCKET,
2471                                 SO_ATTACH_FILTER, &fprog, sizeof(fprog)));
2472                 g_free(filter);
2473 
2474                 unlock_user_struct(tfilter, tfprog->filter, 1);
2475                 unlock_user_struct(tfprog, optval_addr, 1);
2476                 return ret;
2477         }
2478 	case TARGET_SO_BINDTODEVICE:
2479 	{
2480 		char *dev_ifname, *addr_ifname;
2481 
2482 		if (optlen > IFNAMSIZ - 1) {
2483 		    optlen = IFNAMSIZ - 1;
2484 		}
2485 		dev_ifname = lock_user(VERIFY_READ, optval_addr, optlen, 1);
2486 		if (!dev_ifname) {
2487 		    return -TARGET_EFAULT;
2488 		}
2489 		optname = SO_BINDTODEVICE;
2490 		addr_ifname = alloca(IFNAMSIZ);
2491 		memcpy(addr_ifname, dev_ifname, optlen);
2492 		addr_ifname[optlen] = 0;
2493 		ret = get_errno(setsockopt(sockfd, SOL_SOCKET, optname,
2494                                            addr_ifname, optlen));
2495 		unlock_user (dev_ifname, optval_addr, 0);
2496 		return ret;
2497 	}
2498         case TARGET_SO_LINGER:
2499         {
2500                 struct linger lg;
2501                 struct target_linger *tlg;
2502 
2503                 if (optlen != sizeof(struct target_linger)) {
2504                     return -TARGET_EINVAL;
2505                 }
2506                 if (!lock_user_struct(VERIFY_READ, tlg, optval_addr, 1)) {
2507                     return -TARGET_EFAULT;
2508                 }
2509                 __get_user(lg.l_onoff, &tlg->l_onoff);
2510                 __get_user(lg.l_linger, &tlg->l_linger);
2511                 ret = get_errno(setsockopt(sockfd, SOL_SOCKET, SO_LINGER,
2512                                 &lg, sizeof(lg)));
2513                 unlock_user_struct(tlg, optval_addr, 0);
2514                 return ret;
2515         }
2516             /* Options with 'int' argument.  */
2517         case TARGET_SO_DEBUG:
2518 		optname = SO_DEBUG;
2519 		break;
2520         case TARGET_SO_REUSEADDR:
2521 		optname = SO_REUSEADDR;
2522 		break;
2523 #ifdef SO_REUSEPORT
2524         case TARGET_SO_REUSEPORT:
2525                 optname = SO_REUSEPORT;
2526                 break;
2527 #endif
2528         case TARGET_SO_TYPE:
2529 		optname = SO_TYPE;
2530 		break;
2531         case TARGET_SO_ERROR:
2532 		optname = SO_ERROR;
2533 		break;
2534         case TARGET_SO_DONTROUTE:
2535 		optname = SO_DONTROUTE;
2536 		break;
2537         case TARGET_SO_BROADCAST:
2538 		optname = SO_BROADCAST;
2539 		break;
2540         case TARGET_SO_SNDBUF:
2541 		optname = SO_SNDBUF;
2542 		break;
2543         case TARGET_SO_SNDBUFFORCE:
2544                 optname = SO_SNDBUFFORCE;
2545                 break;
2546         case TARGET_SO_RCVBUF:
2547 		optname = SO_RCVBUF;
2548 		break;
2549         case TARGET_SO_RCVBUFFORCE:
2550                 optname = SO_RCVBUFFORCE;
2551                 break;
2552         case TARGET_SO_KEEPALIVE:
2553 		optname = SO_KEEPALIVE;
2554 		break;
2555         case TARGET_SO_OOBINLINE:
2556 		optname = SO_OOBINLINE;
2557 		break;
2558         case TARGET_SO_NO_CHECK:
2559 		optname = SO_NO_CHECK;
2560 		break;
2561         case TARGET_SO_PRIORITY:
2562 		optname = SO_PRIORITY;
2563 		break;
2564 #ifdef SO_BSDCOMPAT
2565         case TARGET_SO_BSDCOMPAT:
2566 		optname = SO_BSDCOMPAT;
2567 		break;
2568 #endif
2569         case TARGET_SO_PASSCRED:
2570 		optname = SO_PASSCRED;
2571 		break;
2572         case TARGET_SO_PASSSEC:
2573                 optname = SO_PASSSEC;
2574                 break;
2575         case TARGET_SO_TIMESTAMP:
2576 		optname = SO_TIMESTAMP;
2577 		break;
2578         case TARGET_SO_RCVLOWAT:
2579 		optname = SO_RCVLOWAT;
2580 		break;
2581         default:
2582             goto unimplemented;
2583         }
2584 	if (optlen < sizeof(uint32_t))
2585             return -TARGET_EINVAL;
2586 
2587 	if (get_user_u32(val, optval_addr))
2588             return -TARGET_EFAULT;
2589 	ret = get_errno(setsockopt(sockfd, SOL_SOCKET, optname, &val, sizeof(val)));
2590         break;
2591 #ifdef SOL_NETLINK
2592     case SOL_NETLINK:
2593         switch (optname) {
2594         case NETLINK_PKTINFO:
2595         case NETLINK_ADD_MEMBERSHIP:
2596         case NETLINK_DROP_MEMBERSHIP:
2597         case NETLINK_BROADCAST_ERROR:
2598         case NETLINK_NO_ENOBUFS:
2599 #if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 2, 0)
2600         case NETLINK_LISTEN_ALL_NSID:
2601         case NETLINK_CAP_ACK:
2602 #endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(4, 2, 0) */
2603 #if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 12, 0)
2604         case NETLINK_EXT_ACK:
2605 #endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(4, 12, 0) */
2606 #if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 20, 0)
2607         case NETLINK_GET_STRICT_CHK:
2608 #endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(4, 12, 0) */
2609             break;
2610         default:
2611             goto unimplemented;
2612         }
2613         val = 0;
2614         if (optlen < sizeof(uint32_t)) {
2615             return -TARGET_EINVAL;
2616         }
2617         if (get_user_u32(val, optval_addr)) {
2618             return -TARGET_EFAULT;
2619         }
2620         ret = get_errno(setsockopt(sockfd, SOL_NETLINK, optname, &val,
2621                                    sizeof(val)));
2622         break;
2623 #endif /* SOL_NETLINK */
2624     default:
2625     unimplemented:
2626         qemu_log_mask(LOG_UNIMP, "Unsupported setsockopt level=%d optname=%d\n",
2627                       level, optname);
2628         ret = -TARGET_ENOPROTOOPT;
2629     }
2630     return ret;
2631 }
2632 
2633 /* do_getsockopt() Must return target values and target errnos. */
2634 static abi_long do_getsockopt(int sockfd, int level, int optname,
2635                               abi_ulong optval_addr, abi_ulong optlen)
2636 {
2637     abi_long ret;
2638     int len, val;
2639     socklen_t lv;
2640 
2641     switch(level) {
2642     case TARGET_SOL_SOCKET:
2643         level = SOL_SOCKET;
2644         switch (optname) {
2645         /* These don't just return a single integer */
2646         case TARGET_SO_PEERNAME:
2647             goto unimplemented;
2648         case TARGET_SO_RCVTIMEO: {
2649             struct timeval tv;
2650             socklen_t tvlen;
2651 
2652             optname = SO_RCVTIMEO;
2653 
2654 get_timeout:
2655             if (get_user_u32(len, optlen)) {
2656                 return -TARGET_EFAULT;
2657             }
2658             if (len < 0) {
2659                 return -TARGET_EINVAL;
2660             }
2661 
2662             tvlen = sizeof(tv);
2663             ret = get_errno(getsockopt(sockfd, level, optname,
2664                                        &tv, &tvlen));
2665             if (ret < 0) {
2666                 return ret;
2667             }
2668             if (len > sizeof(struct target_timeval)) {
2669                 len = sizeof(struct target_timeval);
2670             }
2671             if (copy_to_user_timeval(optval_addr, &tv)) {
2672                 return -TARGET_EFAULT;
2673             }
2674             if (put_user_u32(len, optlen)) {
2675                 return -TARGET_EFAULT;
2676             }
2677             break;
2678         }
2679         case TARGET_SO_SNDTIMEO:
2680             optname = SO_SNDTIMEO;
2681             goto get_timeout;
2682         case TARGET_SO_PEERCRED: {
2683             struct ucred cr;
2684             socklen_t crlen;
2685             struct target_ucred *tcr;
2686 
2687             if (get_user_u32(len, optlen)) {
2688                 return -TARGET_EFAULT;
2689             }
2690             if (len < 0) {
2691                 return -TARGET_EINVAL;
2692             }
2693 
2694             crlen = sizeof(cr);
2695             ret = get_errno(getsockopt(sockfd, level, SO_PEERCRED,
2696                                        &cr, &crlen));
2697             if (ret < 0) {
2698                 return ret;
2699             }
2700             if (len > crlen) {
2701                 len = crlen;
2702             }
2703             if (!lock_user_struct(VERIFY_WRITE, tcr, optval_addr, 0)) {
2704                 return -TARGET_EFAULT;
2705             }
2706             __put_user(cr.pid, &tcr->pid);
2707             __put_user(cr.uid, &tcr->uid);
2708             __put_user(cr.gid, &tcr->gid);
2709             unlock_user_struct(tcr, optval_addr, 1);
2710             if (put_user_u32(len, optlen)) {
2711                 return -TARGET_EFAULT;
2712             }
2713             break;
2714         }
2715         case TARGET_SO_PEERSEC: {
2716             char *name;
2717 
2718             if (get_user_u32(len, optlen)) {
2719                 return -TARGET_EFAULT;
2720             }
2721             if (len < 0) {
2722                 return -TARGET_EINVAL;
2723             }
2724             name = lock_user(VERIFY_WRITE, optval_addr, len, 0);
2725             if (!name) {
2726                 return -TARGET_EFAULT;
2727             }
2728             lv = len;
2729             ret = get_errno(getsockopt(sockfd, level, SO_PEERSEC,
2730                                        name, &lv));
2731             if (put_user_u32(lv, optlen)) {
2732                 ret = -TARGET_EFAULT;
2733             }
2734             unlock_user(name, optval_addr, lv);
2735             break;
2736         }
2737         case TARGET_SO_LINGER:
2738         {
2739             struct linger lg;
2740             socklen_t lglen;
2741             struct target_linger *tlg;
2742 
2743             if (get_user_u32(len, optlen)) {
2744                 return -TARGET_EFAULT;
2745             }
2746             if (len < 0) {
2747                 return -TARGET_EINVAL;
2748             }
2749 
2750             lglen = sizeof(lg);
2751             ret = get_errno(getsockopt(sockfd, level, SO_LINGER,
2752                                        &lg, &lglen));
2753             if (ret < 0) {
2754                 return ret;
2755             }
2756             if (len > lglen) {
2757                 len = lglen;
2758             }
2759             if (!lock_user_struct(VERIFY_WRITE, tlg, optval_addr, 0)) {
2760                 return -TARGET_EFAULT;
2761             }
2762             __put_user(lg.l_onoff, &tlg->l_onoff);
2763             __put_user(lg.l_linger, &tlg->l_linger);
2764             unlock_user_struct(tlg, optval_addr, 1);
2765             if (put_user_u32(len, optlen)) {
2766                 return -TARGET_EFAULT;
2767             }
2768             break;
2769         }
2770         /* Options with 'int' argument.  */
2771         case TARGET_SO_DEBUG:
2772             optname = SO_DEBUG;
2773             goto int_case;
2774         case TARGET_SO_REUSEADDR:
2775             optname = SO_REUSEADDR;
2776             goto int_case;
2777 #ifdef SO_REUSEPORT
2778         case TARGET_SO_REUSEPORT:
2779             optname = SO_REUSEPORT;
2780             goto int_case;
2781 #endif
2782         case TARGET_SO_TYPE:
2783             optname = SO_TYPE;
2784             goto int_case;
2785         case TARGET_SO_ERROR:
2786             optname = SO_ERROR;
2787             goto int_case;
2788         case TARGET_SO_DONTROUTE:
2789             optname = SO_DONTROUTE;
2790             goto int_case;
2791         case TARGET_SO_BROADCAST:
2792             optname = SO_BROADCAST;
2793             goto int_case;
2794         case TARGET_SO_SNDBUF:
2795             optname = SO_SNDBUF;
2796             goto int_case;
2797         case TARGET_SO_RCVBUF:
2798             optname = SO_RCVBUF;
2799             goto int_case;
2800         case TARGET_SO_KEEPALIVE:
2801             optname = SO_KEEPALIVE;
2802             goto int_case;
2803         case TARGET_SO_OOBINLINE:
2804             optname = SO_OOBINLINE;
2805             goto int_case;
2806         case TARGET_SO_NO_CHECK:
2807             optname = SO_NO_CHECK;
2808             goto int_case;
2809         case TARGET_SO_PRIORITY:
2810             optname = SO_PRIORITY;
2811             goto int_case;
2812 #ifdef SO_BSDCOMPAT
2813         case TARGET_SO_BSDCOMPAT:
2814             optname = SO_BSDCOMPAT;
2815             goto int_case;
2816 #endif
2817         case TARGET_SO_PASSCRED:
2818             optname = SO_PASSCRED;
2819             goto int_case;
2820         case TARGET_SO_TIMESTAMP:
2821             optname = SO_TIMESTAMP;
2822             goto int_case;
2823         case TARGET_SO_RCVLOWAT:
2824             optname = SO_RCVLOWAT;
2825             goto int_case;
2826         case TARGET_SO_ACCEPTCONN:
2827             optname = SO_ACCEPTCONN;
2828             goto int_case;
2829         default:
2830             goto int_case;
2831         }
2832         break;
2833     case SOL_TCP:
2834         /* TCP options all take an 'int' value.  */
2835     int_case:
2836         if (get_user_u32(len, optlen))
2837             return -TARGET_EFAULT;
2838         if (len < 0)
2839             return -TARGET_EINVAL;
2840         lv = sizeof(lv);
2841         ret = get_errno(getsockopt(sockfd, level, optname, &val, &lv));
2842         if (ret < 0)
2843             return ret;
2844         if (optname == SO_TYPE) {
2845             val = host_to_target_sock_type(val);
2846         }
2847         if (len > lv)
2848             len = lv;
2849         if (len == 4) {
2850             if (put_user_u32(val, optval_addr))
2851                 return -TARGET_EFAULT;
2852         } else {
2853             if (put_user_u8(val, optval_addr))
2854                 return -TARGET_EFAULT;
2855         }
2856         if (put_user_u32(len, optlen))
2857             return -TARGET_EFAULT;
2858         break;
2859     case SOL_IP:
2860         switch(optname) {
2861         case IP_TOS:
2862         case IP_TTL:
2863         case IP_HDRINCL:
2864         case IP_ROUTER_ALERT:
2865         case IP_RECVOPTS:
2866         case IP_RETOPTS:
2867         case IP_PKTINFO:
2868         case IP_MTU_DISCOVER:
2869         case IP_RECVERR:
2870         case IP_RECVTOS:
2871 #ifdef IP_FREEBIND
2872         case IP_FREEBIND:
2873 #endif
2874         case IP_MULTICAST_TTL:
2875         case IP_MULTICAST_LOOP:
2876             if (get_user_u32(len, optlen))
2877                 return -TARGET_EFAULT;
2878             if (len < 0)
2879                 return -TARGET_EINVAL;
2880             lv = sizeof(lv);
2881             ret = get_errno(getsockopt(sockfd, level, optname, &val, &lv));
2882             if (ret < 0)
2883                 return ret;
2884             if (len < sizeof(int) && len > 0 && val >= 0 && val < 255) {
2885                 len = 1;
2886                 if (put_user_u32(len, optlen)
2887                     || put_user_u8(val, optval_addr))
2888                     return -TARGET_EFAULT;
2889             } else {
2890                 if (len > sizeof(int))
2891                     len = sizeof(int);
2892                 if (put_user_u32(len, optlen)
2893                     || put_user_u32(val, optval_addr))
2894                     return -TARGET_EFAULT;
2895             }
2896             break;
2897         default:
2898             ret = -TARGET_ENOPROTOOPT;
2899             break;
2900         }
2901         break;
2902     case SOL_IPV6:
2903         switch (optname) {
2904         case IPV6_MTU_DISCOVER:
2905         case IPV6_MTU:
2906         case IPV6_V6ONLY:
2907         case IPV6_RECVPKTINFO:
2908         case IPV6_UNICAST_HOPS:
2909         case IPV6_MULTICAST_HOPS:
2910         case IPV6_MULTICAST_LOOP:
2911         case IPV6_RECVERR:
2912         case IPV6_RECVHOPLIMIT:
2913         case IPV6_2292HOPLIMIT:
2914         case IPV6_CHECKSUM:
2915         case IPV6_ADDRFORM:
2916         case IPV6_2292PKTINFO:
2917         case IPV6_RECVTCLASS:
2918         case IPV6_RECVRTHDR:
2919         case IPV6_2292RTHDR:
2920         case IPV6_RECVHOPOPTS:
2921         case IPV6_2292HOPOPTS:
2922         case IPV6_RECVDSTOPTS:
2923         case IPV6_2292DSTOPTS:
2924         case IPV6_TCLASS:
2925 #ifdef IPV6_RECVPATHMTU
2926         case IPV6_RECVPATHMTU:
2927 #endif
2928 #ifdef IPV6_TRANSPARENT
2929         case IPV6_TRANSPARENT:
2930 #endif
2931 #ifdef IPV6_FREEBIND
2932         case IPV6_FREEBIND:
2933 #endif
2934 #ifdef IPV6_RECVORIGDSTADDR
2935         case IPV6_RECVORIGDSTADDR:
2936 #endif
2937             if (get_user_u32(len, optlen))
2938                 return -TARGET_EFAULT;
2939             if (len < 0)
2940                 return -TARGET_EINVAL;
2941             lv = sizeof(lv);
2942             ret = get_errno(getsockopt(sockfd, level, optname, &val, &lv));
2943             if (ret < 0)
2944                 return ret;
2945             if (len < sizeof(int) && len > 0 && val >= 0 && val < 255) {
2946                 len = 1;
2947                 if (put_user_u32(len, optlen)
2948                     || put_user_u8(val, optval_addr))
2949                     return -TARGET_EFAULT;
2950             } else {
2951                 if (len > sizeof(int))
2952                     len = sizeof(int);
2953                 if (put_user_u32(len, optlen)
2954                     || put_user_u32(val, optval_addr))
2955                     return -TARGET_EFAULT;
2956             }
2957             break;
2958         default:
2959             ret = -TARGET_ENOPROTOOPT;
2960             break;
2961         }
2962         break;
2963 #ifdef SOL_NETLINK
2964     case SOL_NETLINK:
2965         switch (optname) {
2966         case NETLINK_PKTINFO:
2967         case NETLINK_BROADCAST_ERROR:
2968         case NETLINK_NO_ENOBUFS:
2969 #if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 2, 0)
2970         case NETLINK_LISTEN_ALL_NSID:
2971         case NETLINK_CAP_ACK:
2972 #endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(4, 2, 0) */
2973 #if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 12, 0)
2974         case NETLINK_EXT_ACK:
2975 #endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(4, 12, 0) */
2976 #if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 20, 0)
2977         case NETLINK_GET_STRICT_CHK:
2978 #endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(4, 12, 0) */
2979             if (get_user_u32(len, optlen)) {
2980                 return -TARGET_EFAULT;
2981             }
2982             if (len != sizeof(val)) {
2983                 return -TARGET_EINVAL;
2984             }
2985             lv = len;
2986             ret = get_errno(getsockopt(sockfd, level, optname, &val, &lv));
2987             if (ret < 0) {
2988                 return ret;
2989             }
2990             if (put_user_u32(lv, optlen)
2991                 || put_user_u32(val, optval_addr)) {
2992                 return -TARGET_EFAULT;
2993             }
2994             break;
2995 #if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 2, 0)
2996         case NETLINK_LIST_MEMBERSHIPS:
2997         {
2998             uint32_t *results;
2999             int i;
3000             if (get_user_u32(len, optlen)) {
3001                 return -TARGET_EFAULT;
3002             }
3003             if (len < 0) {
3004                 return -TARGET_EINVAL;
3005             }
3006             results = lock_user(VERIFY_WRITE, optval_addr, len, 1);
3007             if (!results) {
3008                 return -TARGET_EFAULT;
3009             }
3010             lv = len;
3011             ret = get_errno(getsockopt(sockfd, level, optname, results, &lv));
3012             if (ret < 0) {
3013                 unlock_user(results, optval_addr, 0);
3014                 return ret;
3015             }
3016             /* swap host endianess to target endianess. */
3017             for (i = 0; i < (len / sizeof(uint32_t)); i++) {
3018                 results[i] = tswap32(results[i]);
3019             }
3020             if (put_user_u32(lv, optlen)) {
3021                 return -TARGET_EFAULT;
3022             }
3023             unlock_user(results, optval_addr, 0);
3024             break;
3025         }
3026 #endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(4, 2, 0) */
3027         default:
3028             goto unimplemented;
3029         }
3030         break;
3031 #endif /* SOL_NETLINK */
3032     default:
3033     unimplemented:
3034         qemu_log_mask(LOG_UNIMP,
3035                       "getsockopt level=%d optname=%d not yet supported\n",
3036                       level, optname);
3037         ret = -TARGET_EOPNOTSUPP;
3038         break;
3039     }
3040     return ret;
3041 }
3042 
3043 /* Convert target low/high pair representing file offset into the host
3044  * low/high pair. This function doesn't handle offsets bigger than 64 bits
3045  * as the kernel doesn't handle them either.
3046  */
3047 static void target_to_host_low_high(abi_ulong tlow,
3048                                     abi_ulong thigh,
3049                                     unsigned long *hlow,
3050                                     unsigned long *hhigh)
3051 {
3052     uint64_t off = tlow |
3053         ((unsigned long long)thigh << TARGET_LONG_BITS / 2) <<
3054         TARGET_LONG_BITS / 2;
3055 
3056     *hlow = off;
3057     *hhigh = (off >> HOST_LONG_BITS / 2) >> HOST_LONG_BITS / 2;
3058 }
3059 
3060 static struct iovec *lock_iovec(int type, abi_ulong target_addr,
3061                                 abi_ulong count, int copy)
3062 {
3063     struct target_iovec *target_vec;
3064     struct iovec *vec;
3065     abi_ulong total_len, max_len;
3066     int i;
3067     int err = 0;
3068     bool bad_address = false;
3069 
3070     if (count == 0) {
3071         errno = 0;
3072         return NULL;
3073     }
3074     if (count > IOV_MAX) {
3075         errno = EINVAL;
3076         return NULL;
3077     }
3078 
3079     vec = g_try_new0(struct iovec, count);
3080     if (vec == NULL) {
3081         errno = ENOMEM;
3082         return NULL;
3083     }
3084 
3085     target_vec = lock_user(VERIFY_READ, target_addr,
3086                            count * sizeof(struct target_iovec), 1);
3087     if (target_vec == NULL) {
3088         err = EFAULT;
3089         goto fail2;
3090     }
3091 
3092     /* ??? If host page size > target page size, this will result in a
3093        value larger than what we can actually support.  */
3094     max_len = 0x7fffffff & TARGET_PAGE_MASK;
3095     total_len = 0;
3096 
3097     for (i = 0; i < count; i++) {
3098         abi_ulong base = tswapal(target_vec[i].iov_base);
3099         abi_long len = tswapal(target_vec[i].iov_len);
3100 
3101         if (len < 0) {
3102             err = EINVAL;
3103             goto fail;
3104         } else if (len == 0) {
3105             /* Zero length pointer is ignored.  */
3106             vec[i].iov_base = 0;
3107         } else {
3108             vec[i].iov_base = lock_user(type, base, len, copy);
3109             /* If the first buffer pointer is bad, this is a fault.  But
3110              * subsequent bad buffers will result in a partial write; this
3111              * is realized by filling the vector with null pointers and
3112              * zero lengths. */
3113             if (!vec[i].iov_base) {
3114                 if (i == 0) {
3115                     err = EFAULT;
3116                     goto fail;
3117                 } else {
3118                     bad_address = true;
3119                 }
3120             }
3121             if (bad_address) {
3122                 len = 0;
3123             }
3124             if (len > max_len - total_len) {
3125                 len = max_len - total_len;
3126             }
3127         }
3128         vec[i].iov_len = len;
3129         total_len += len;
3130     }
3131 
3132     unlock_user(target_vec, target_addr, 0);
3133     return vec;
3134 
3135  fail:
3136     while (--i >= 0) {
3137         if (tswapal(target_vec[i].iov_len) > 0) {
3138             unlock_user(vec[i].iov_base, tswapal(target_vec[i].iov_base), 0);
3139         }
3140     }
3141     unlock_user(target_vec, target_addr, 0);
3142  fail2:
3143     g_free(vec);
3144     errno = err;
3145     return NULL;
3146 }
3147 
3148 static void unlock_iovec(struct iovec *vec, abi_ulong target_addr,
3149                          abi_ulong count, int copy)
3150 {
3151     struct target_iovec *target_vec;
3152     int i;
3153 
3154     target_vec = lock_user(VERIFY_READ, target_addr,
3155                            count * sizeof(struct target_iovec), 1);
3156     if (target_vec) {
3157         for (i = 0; i < count; i++) {
3158             abi_ulong base = tswapal(target_vec[i].iov_base);
3159             abi_long len = tswapal(target_vec[i].iov_len);
3160             if (len < 0) {
3161                 break;
3162             }
3163             unlock_user(vec[i].iov_base, base, copy ? vec[i].iov_len : 0);
3164         }
3165         unlock_user(target_vec, target_addr, 0);
3166     }
3167 
3168     g_free(vec);
3169 }
3170 
3171 static inline int target_to_host_sock_type(int *type)
3172 {
3173     int host_type = 0;
3174     int target_type = *type;
3175 
3176     switch (target_type & TARGET_SOCK_TYPE_MASK) {
3177     case TARGET_SOCK_DGRAM:
3178         host_type = SOCK_DGRAM;
3179         break;
3180     case TARGET_SOCK_STREAM:
3181         host_type = SOCK_STREAM;
3182         break;
3183     default:
3184         host_type = target_type & TARGET_SOCK_TYPE_MASK;
3185         break;
3186     }
3187     if (target_type & TARGET_SOCK_CLOEXEC) {
3188 #if defined(SOCK_CLOEXEC)
3189         host_type |= SOCK_CLOEXEC;
3190 #else
3191         return -TARGET_EINVAL;
3192 #endif
3193     }
3194     if (target_type & TARGET_SOCK_NONBLOCK) {
3195 #if defined(SOCK_NONBLOCK)
3196         host_type |= SOCK_NONBLOCK;
3197 #elif !defined(O_NONBLOCK)
3198         return -TARGET_EINVAL;
3199 #endif
3200     }
3201     *type = host_type;
3202     return 0;
3203 }
3204 
3205 /* Try to emulate socket type flags after socket creation.  */
3206 static int sock_flags_fixup(int fd, int target_type)
3207 {
3208 #if !defined(SOCK_NONBLOCK) && defined(O_NONBLOCK)
3209     if (target_type & TARGET_SOCK_NONBLOCK) {
3210         int flags = fcntl(fd, F_GETFL);
3211         if (fcntl(fd, F_SETFL, O_NONBLOCK | flags) == -1) {
3212             close(fd);
3213             return -TARGET_EINVAL;
3214         }
3215     }
3216 #endif
3217     return fd;
3218 }
3219 
3220 /* do_socket() Must return target values and target errnos. */
3221 static abi_long do_socket(int domain, int type, int protocol)
3222 {
3223     int target_type = type;
3224     int ret;
3225 
3226     ret = target_to_host_sock_type(&type);
3227     if (ret) {
3228         return ret;
3229     }
3230 
3231     if (domain == PF_NETLINK && !(
3232 #ifdef CONFIG_RTNETLINK
3233          protocol == NETLINK_ROUTE ||
3234 #endif
3235          protocol == NETLINK_KOBJECT_UEVENT ||
3236          protocol == NETLINK_AUDIT)) {
3237         return -TARGET_EPROTONOSUPPORT;
3238     }
3239 
3240     if (domain == AF_PACKET ||
3241         (domain == AF_INET && type == SOCK_PACKET)) {
3242         protocol = tswap16(protocol);
3243     }
3244 
3245     ret = get_errno(socket(domain, type, protocol));
3246     if (ret >= 0) {
3247         ret = sock_flags_fixup(ret, target_type);
3248         if (type == SOCK_PACKET) {
3249             /* Manage an obsolete case :
3250              * if socket type is SOCK_PACKET, bind by name
3251              */
3252             fd_trans_register(ret, &target_packet_trans);
3253         } else if (domain == PF_NETLINK) {
3254             switch (protocol) {
3255 #ifdef CONFIG_RTNETLINK
3256             case NETLINK_ROUTE:
3257                 fd_trans_register(ret, &target_netlink_route_trans);
3258                 break;
3259 #endif
3260             case NETLINK_KOBJECT_UEVENT:
3261                 /* nothing to do: messages are strings */
3262                 break;
3263             case NETLINK_AUDIT:
3264                 fd_trans_register(ret, &target_netlink_audit_trans);
3265                 break;
3266             default:
3267                 g_assert_not_reached();
3268             }
3269         }
3270     }
3271     return ret;
3272 }
3273 
3274 /* do_bind() Must return target values and target errnos. */
3275 static abi_long do_bind(int sockfd, abi_ulong target_addr,
3276                         socklen_t addrlen)
3277 {
3278     void *addr;
3279     abi_long ret;
3280 
3281     if ((int)addrlen < 0) {
3282         return -TARGET_EINVAL;
3283     }
3284 
3285     addr = alloca(addrlen+1);
3286 
3287     ret = target_to_host_sockaddr(sockfd, addr, target_addr, addrlen);
3288     if (ret)
3289         return ret;
3290 
3291     return get_errno(bind(sockfd, addr, addrlen));
3292 }
3293 
3294 /* do_connect() Must return target values and target errnos. */
3295 static abi_long do_connect(int sockfd, abi_ulong target_addr,
3296                            socklen_t addrlen)
3297 {
3298     void *addr;
3299     abi_long ret;
3300 
3301     if ((int)addrlen < 0) {
3302         return -TARGET_EINVAL;
3303     }
3304 
3305     addr = alloca(addrlen+1);
3306 
3307     ret = target_to_host_sockaddr(sockfd, addr, target_addr, addrlen);
3308     if (ret)
3309         return ret;
3310 
3311     return get_errno(safe_connect(sockfd, addr, addrlen));
3312 }
3313 
3314 /* do_sendrecvmsg_locked() Must return target values and target errnos. */
3315 static abi_long do_sendrecvmsg_locked(int fd, struct target_msghdr *msgp,
3316                                       int flags, int send)
3317 {
3318     abi_long ret, len;
3319     struct msghdr msg;
3320     abi_ulong count;
3321     struct iovec *vec;
3322     abi_ulong target_vec;
3323 
3324     if (msgp->msg_name) {
3325         msg.msg_namelen = tswap32(msgp->msg_namelen);
3326         msg.msg_name = alloca(msg.msg_namelen+1);
3327         ret = target_to_host_sockaddr(fd, msg.msg_name,
3328                                       tswapal(msgp->msg_name),
3329                                       msg.msg_namelen);
3330         if (ret == -TARGET_EFAULT) {
3331             /* For connected sockets msg_name and msg_namelen must
3332              * be ignored, so returning EFAULT immediately is wrong.
3333              * Instead, pass a bad msg_name to the host kernel, and
3334              * let it decide whether to return EFAULT or not.
3335              */
3336             msg.msg_name = (void *)-1;
3337         } else if (ret) {
3338             goto out2;
3339         }
3340     } else {
3341         msg.msg_name = NULL;
3342         msg.msg_namelen = 0;
3343     }
3344     msg.msg_controllen = 2 * tswapal(msgp->msg_controllen);
3345     msg.msg_control = alloca(msg.msg_controllen);
3346     memset(msg.msg_control, 0, msg.msg_controllen);
3347 
3348     msg.msg_flags = tswap32(msgp->msg_flags);
3349 
3350     count = tswapal(msgp->msg_iovlen);
3351     target_vec = tswapal(msgp->msg_iov);
3352 
3353     if (count > IOV_MAX) {
3354         /* sendrcvmsg returns a different errno for this condition than
3355          * readv/writev, so we must catch it here before lock_iovec() does.
3356          */
3357         ret = -TARGET_EMSGSIZE;
3358         goto out2;
3359     }
3360 
3361     vec = lock_iovec(send ? VERIFY_READ : VERIFY_WRITE,
3362                      target_vec, count, send);
3363     if (vec == NULL) {
3364         ret = -host_to_target_errno(errno);
3365         goto out2;
3366     }
3367     msg.msg_iovlen = count;
3368     msg.msg_iov = vec;
3369 
3370     if (send) {
3371         if (fd_trans_target_to_host_data(fd)) {
3372             void *host_msg;
3373 
3374             host_msg = g_malloc(msg.msg_iov->iov_len);
3375             memcpy(host_msg, msg.msg_iov->iov_base, msg.msg_iov->iov_len);
3376             ret = fd_trans_target_to_host_data(fd)(host_msg,
3377                                                    msg.msg_iov->iov_len);
3378             if (ret >= 0) {
3379                 msg.msg_iov->iov_base = host_msg;
3380                 ret = get_errno(safe_sendmsg(fd, &msg, flags));
3381             }
3382             g_free(host_msg);
3383         } else {
3384             ret = target_to_host_cmsg(&msg, msgp);
3385             if (ret == 0) {
3386                 ret = get_errno(safe_sendmsg(fd, &msg, flags));
3387             }
3388         }
3389     } else {
3390         ret = get_errno(safe_recvmsg(fd, &msg, flags));
3391         if (!is_error(ret)) {
3392             len = ret;
3393             if (fd_trans_host_to_target_data(fd)) {
3394                 ret = fd_trans_host_to_target_data(fd)(msg.msg_iov->iov_base,
3395                                                MIN(msg.msg_iov->iov_len, len));
3396             } else {
3397                 ret = host_to_target_cmsg(msgp, &msg);
3398             }
3399             if (!is_error(ret)) {
3400                 msgp->msg_namelen = tswap32(msg.msg_namelen);
3401                 msgp->msg_flags = tswap32(msg.msg_flags);
3402                 if (msg.msg_name != NULL && msg.msg_name != (void *)-1) {
3403                     ret = host_to_target_sockaddr(tswapal(msgp->msg_name),
3404                                     msg.msg_name, msg.msg_namelen);
3405                     if (ret) {
3406                         goto out;
3407                     }
3408                 }
3409 
3410                 ret = len;
3411             }
3412         }
3413     }
3414 
3415 out:
3416     unlock_iovec(vec, target_vec, count, !send);
3417 out2:
3418     return ret;
3419 }
3420 
3421 static abi_long do_sendrecvmsg(int fd, abi_ulong target_msg,
3422                                int flags, int send)
3423 {
3424     abi_long ret;
3425     struct target_msghdr *msgp;
3426 
3427     if (!lock_user_struct(send ? VERIFY_READ : VERIFY_WRITE,
3428                           msgp,
3429                           target_msg,
3430                           send ? 1 : 0)) {
3431         return -TARGET_EFAULT;
3432     }
3433     ret = do_sendrecvmsg_locked(fd, msgp, flags, send);
3434     unlock_user_struct(msgp, target_msg, send ? 0 : 1);
3435     return ret;
3436 }
3437 
3438 /* We don't rely on the C library to have sendmmsg/recvmmsg support,
3439  * so it might not have this *mmsg-specific flag either.
3440  */
3441 #ifndef MSG_WAITFORONE
3442 #define MSG_WAITFORONE 0x10000
3443 #endif
3444 
3445 static abi_long do_sendrecvmmsg(int fd, abi_ulong target_msgvec,
3446                                 unsigned int vlen, unsigned int flags,
3447                                 int send)
3448 {
3449     struct target_mmsghdr *mmsgp;
3450     abi_long ret = 0;
3451     int i;
3452 
3453     if (vlen > UIO_MAXIOV) {
3454         vlen = UIO_MAXIOV;
3455     }
3456 
3457     mmsgp = lock_user(VERIFY_WRITE, target_msgvec, sizeof(*mmsgp) * vlen, 1);
3458     if (!mmsgp) {
3459         return -TARGET_EFAULT;
3460     }
3461 
3462     for (i = 0; i < vlen; i++) {
3463         ret = do_sendrecvmsg_locked(fd, &mmsgp[i].msg_hdr, flags, send);
3464         if (is_error(ret)) {
3465             break;
3466         }
3467         mmsgp[i].msg_len = tswap32(ret);
3468         /* MSG_WAITFORONE turns on MSG_DONTWAIT after one packet */
3469         if (flags & MSG_WAITFORONE) {
3470             flags |= MSG_DONTWAIT;
3471         }
3472     }
3473 
3474     unlock_user(mmsgp, target_msgvec, sizeof(*mmsgp) * i);
3475 
3476     /* Return number of datagrams sent if we sent any at all;
3477      * otherwise return the error.
3478      */
3479     if (i) {
3480         return i;
3481     }
3482     return ret;
3483 }
3484 
3485 /* do_accept4() Must return target values and target errnos. */
3486 static abi_long do_accept4(int fd, abi_ulong target_addr,
3487                            abi_ulong target_addrlen_addr, int flags)
3488 {
3489     socklen_t addrlen, ret_addrlen;
3490     void *addr;
3491     abi_long ret;
3492     int host_flags;
3493 
3494     host_flags = target_to_host_bitmask(flags, fcntl_flags_tbl);
3495 
3496     if (target_addr == 0) {
3497         return get_errno(safe_accept4(fd, NULL, NULL, host_flags));
3498     }
3499 
3500     /* linux returns EFAULT if addrlen pointer is invalid */
3501     if (get_user_u32(addrlen, target_addrlen_addr))
3502         return -TARGET_EFAULT;
3503 
3504     if ((int)addrlen < 0) {
3505         return -TARGET_EINVAL;
3506     }
3507 
3508     if (!access_ok(VERIFY_WRITE, target_addr, addrlen))
3509         return -TARGET_EFAULT;
3510 
3511     addr = alloca(addrlen);
3512 
3513     ret_addrlen = addrlen;
3514     ret = get_errno(safe_accept4(fd, addr, &ret_addrlen, host_flags));
3515     if (!is_error(ret)) {
3516         host_to_target_sockaddr(target_addr, addr, MIN(addrlen, ret_addrlen));
3517         if (put_user_u32(ret_addrlen, target_addrlen_addr)) {
3518             ret = -TARGET_EFAULT;
3519         }
3520     }
3521     return ret;
3522 }
3523 
3524 /* do_getpeername() Must return target values and target errnos. */
3525 static abi_long do_getpeername(int fd, abi_ulong target_addr,
3526                                abi_ulong target_addrlen_addr)
3527 {
3528     socklen_t addrlen, ret_addrlen;
3529     void *addr;
3530     abi_long ret;
3531 
3532     if (get_user_u32(addrlen, target_addrlen_addr))
3533         return -TARGET_EFAULT;
3534 
3535     if ((int)addrlen < 0) {
3536         return -TARGET_EINVAL;
3537     }
3538 
3539     if (!access_ok(VERIFY_WRITE, target_addr, addrlen))
3540         return -TARGET_EFAULT;
3541 
3542     addr = alloca(addrlen);
3543 
3544     ret_addrlen = addrlen;
3545     ret = get_errno(getpeername(fd, addr, &ret_addrlen));
3546     if (!is_error(ret)) {
3547         host_to_target_sockaddr(target_addr, addr, MIN(addrlen, ret_addrlen));
3548         if (put_user_u32(ret_addrlen, target_addrlen_addr)) {
3549             ret = -TARGET_EFAULT;
3550         }
3551     }
3552     return ret;
3553 }
3554 
3555 /* do_getsockname() Must return target values and target errnos. */
3556 static abi_long do_getsockname(int fd, abi_ulong target_addr,
3557                                abi_ulong target_addrlen_addr)
3558 {
3559     socklen_t addrlen, ret_addrlen;
3560     void *addr;
3561     abi_long ret;
3562 
3563     if (get_user_u32(addrlen, target_addrlen_addr))
3564         return -TARGET_EFAULT;
3565 
3566     if ((int)addrlen < 0) {
3567         return -TARGET_EINVAL;
3568     }
3569 
3570     if (!access_ok(VERIFY_WRITE, target_addr, addrlen))
3571         return -TARGET_EFAULT;
3572 
3573     addr = alloca(addrlen);
3574 
3575     ret_addrlen = addrlen;
3576     ret = get_errno(getsockname(fd, addr, &ret_addrlen));
3577     if (!is_error(ret)) {
3578         host_to_target_sockaddr(target_addr, addr, MIN(addrlen, ret_addrlen));
3579         if (put_user_u32(ret_addrlen, target_addrlen_addr)) {
3580             ret = -TARGET_EFAULT;
3581         }
3582     }
3583     return ret;
3584 }
3585 
3586 /* do_socketpair() Must return target values and target errnos. */
3587 static abi_long do_socketpair(int domain, int type, int protocol,
3588                               abi_ulong target_tab_addr)
3589 {
3590     int tab[2];
3591     abi_long ret;
3592 
3593     target_to_host_sock_type(&type);
3594 
3595     ret = get_errno(socketpair(domain, type, protocol, tab));
3596     if (!is_error(ret)) {
3597         if (put_user_s32(tab[0], target_tab_addr)
3598             || put_user_s32(tab[1], target_tab_addr + sizeof(tab[0])))
3599             ret = -TARGET_EFAULT;
3600     }
3601     return ret;
3602 }
3603 
3604 /* do_sendto() Must return target values and target errnos. */
3605 static abi_long do_sendto(int fd, abi_ulong msg, size_t len, int flags,
3606                           abi_ulong target_addr, socklen_t addrlen)
3607 {
3608     void *addr;
3609     void *host_msg;
3610     void *copy_msg = NULL;
3611     abi_long ret;
3612 
3613     if ((int)addrlen < 0) {
3614         return -TARGET_EINVAL;
3615     }
3616 
3617     host_msg = lock_user(VERIFY_READ, msg, len, 1);
3618     if (!host_msg)
3619         return -TARGET_EFAULT;
3620     if (fd_trans_target_to_host_data(fd)) {
3621         copy_msg = host_msg;
3622         host_msg = g_malloc(len);
3623         memcpy(host_msg, copy_msg, len);
3624         ret = fd_trans_target_to_host_data(fd)(host_msg, len);
3625         if (ret < 0) {
3626             goto fail;
3627         }
3628     }
3629     if (target_addr) {
3630         addr = alloca(addrlen+1);
3631         ret = target_to_host_sockaddr(fd, addr, target_addr, addrlen);
3632         if (ret) {
3633             goto fail;
3634         }
3635         ret = get_errno(safe_sendto(fd, host_msg, len, flags, addr, addrlen));
3636     } else {
3637         ret = get_errno(safe_sendto(fd, host_msg, len, flags, NULL, 0));
3638     }
3639 fail:
3640     if (copy_msg) {
3641         g_free(host_msg);
3642         host_msg = copy_msg;
3643     }
3644     unlock_user(host_msg, msg, 0);
3645     return ret;
3646 }
3647 
3648 /* do_recvfrom() Must return target values and target errnos. */
3649 static abi_long do_recvfrom(int fd, abi_ulong msg, size_t len, int flags,
3650                             abi_ulong target_addr,
3651                             abi_ulong target_addrlen)
3652 {
3653     socklen_t addrlen, ret_addrlen;
3654     void *addr;
3655     void *host_msg;
3656     abi_long ret;
3657 
3658     host_msg = lock_user(VERIFY_WRITE, msg, len, 0);
3659     if (!host_msg)
3660         return -TARGET_EFAULT;
3661     if (target_addr) {
3662         if (get_user_u32(addrlen, target_addrlen)) {
3663             ret = -TARGET_EFAULT;
3664             goto fail;
3665         }
3666         if ((int)addrlen < 0) {
3667             ret = -TARGET_EINVAL;
3668             goto fail;
3669         }
3670         addr = alloca(addrlen);
3671         ret_addrlen = addrlen;
3672         ret = get_errno(safe_recvfrom(fd, host_msg, len, flags,
3673                                       addr, &ret_addrlen));
3674     } else {
3675         addr = NULL; /* To keep compiler quiet.  */
3676         addrlen = 0; /* To keep compiler quiet.  */
3677         ret = get_errno(safe_recvfrom(fd, host_msg, len, flags, NULL, 0));
3678     }
3679     if (!is_error(ret)) {
3680         if (fd_trans_host_to_target_data(fd)) {
3681             abi_long trans;
3682             trans = fd_trans_host_to_target_data(fd)(host_msg, MIN(ret, len));
3683             if (is_error(trans)) {
3684                 ret = trans;
3685                 goto fail;
3686             }
3687         }
3688         if (target_addr) {
3689             host_to_target_sockaddr(target_addr, addr,
3690                                     MIN(addrlen, ret_addrlen));
3691             if (put_user_u32(ret_addrlen, target_addrlen)) {
3692                 ret = -TARGET_EFAULT;
3693                 goto fail;
3694             }
3695         }
3696         unlock_user(host_msg, msg, len);
3697     } else {
3698 fail:
3699         unlock_user(host_msg, msg, 0);
3700     }
3701     return ret;
3702 }
3703 
3704 #ifdef TARGET_NR_socketcall
3705 /* do_socketcall() must return target values and target errnos. */
3706 static abi_long do_socketcall(int num, abi_ulong vptr)
3707 {
3708     static const unsigned nargs[] = { /* number of arguments per operation */
3709         [TARGET_SYS_SOCKET] = 3,      /* domain, type, protocol */
3710         [TARGET_SYS_BIND] = 3,        /* fd, addr, addrlen */
3711         [TARGET_SYS_CONNECT] = 3,     /* fd, addr, addrlen */
3712         [TARGET_SYS_LISTEN] = 2,      /* fd, backlog */
3713         [TARGET_SYS_ACCEPT] = 3,      /* fd, addr, addrlen */
3714         [TARGET_SYS_GETSOCKNAME] = 3, /* fd, addr, addrlen */
3715         [TARGET_SYS_GETPEERNAME] = 3, /* fd, addr, addrlen */
3716         [TARGET_SYS_SOCKETPAIR] = 4,  /* domain, type, protocol, tab */
3717         [TARGET_SYS_SEND] = 4,        /* fd, msg, len, flags */
3718         [TARGET_SYS_RECV] = 4,        /* fd, msg, len, flags */
3719         [TARGET_SYS_SENDTO] = 6,      /* fd, msg, len, flags, addr, addrlen */
3720         [TARGET_SYS_RECVFROM] = 6,    /* fd, msg, len, flags, addr, addrlen */
3721         [TARGET_SYS_SHUTDOWN] = 2,    /* fd, how */
3722         [TARGET_SYS_SETSOCKOPT] = 5,  /* fd, level, optname, optval, optlen */
3723         [TARGET_SYS_GETSOCKOPT] = 5,  /* fd, level, optname, optval, optlen */
3724         [TARGET_SYS_SENDMSG] = 3,     /* fd, msg, flags */
3725         [TARGET_SYS_RECVMSG] = 3,     /* fd, msg, flags */
3726         [TARGET_SYS_ACCEPT4] = 4,     /* fd, addr, addrlen, flags */
3727         [TARGET_SYS_RECVMMSG] = 4,    /* fd, msgvec, vlen, flags */
3728         [TARGET_SYS_SENDMMSG] = 4,    /* fd, msgvec, vlen, flags */
3729     };
3730     abi_long a[6]; /* max 6 args */
3731     unsigned i;
3732 
3733     /* check the range of the first argument num */
3734     /* (TARGET_SYS_SENDMMSG is the highest among TARGET_SYS_xxx) */
3735     if (num < 1 || num > TARGET_SYS_SENDMMSG) {
3736         return -TARGET_EINVAL;
3737     }
3738     /* ensure we have space for args */
3739     if (nargs[num] > ARRAY_SIZE(a)) {
3740         return -TARGET_EINVAL;
3741     }
3742     /* collect the arguments in a[] according to nargs[] */
3743     for (i = 0; i < nargs[num]; ++i) {
3744         if (get_user_ual(a[i], vptr + i * sizeof(abi_long)) != 0) {
3745             return -TARGET_EFAULT;
3746         }
3747     }
3748     /* now when we have the args, invoke the appropriate underlying function */
3749     switch (num) {
3750     case TARGET_SYS_SOCKET: /* domain, type, protocol */
3751         return do_socket(a[0], a[1], a[2]);
3752     case TARGET_SYS_BIND: /* sockfd, addr, addrlen */
3753         return do_bind(a[0], a[1], a[2]);
3754     case TARGET_SYS_CONNECT: /* sockfd, addr, addrlen */
3755         return do_connect(a[0], a[1], a[2]);
3756     case TARGET_SYS_LISTEN: /* sockfd, backlog */
3757         return get_errno(listen(a[0], a[1]));
3758     case TARGET_SYS_ACCEPT: /* sockfd, addr, addrlen */
3759         return do_accept4(a[0], a[1], a[2], 0);
3760     case TARGET_SYS_GETSOCKNAME: /* sockfd, addr, addrlen */
3761         return do_getsockname(a[0], a[1], a[2]);
3762     case TARGET_SYS_GETPEERNAME: /* sockfd, addr, addrlen */
3763         return do_getpeername(a[0], a[1], a[2]);
3764     case TARGET_SYS_SOCKETPAIR: /* domain, type, protocol, tab */
3765         return do_socketpair(a[0], a[1], a[2], a[3]);
3766     case TARGET_SYS_SEND: /* sockfd, msg, len, flags */
3767         return do_sendto(a[0], a[1], a[2], a[3], 0, 0);
3768     case TARGET_SYS_RECV: /* sockfd, msg, len, flags */
3769         return do_recvfrom(a[0], a[1], a[2], a[3], 0, 0);
3770     case TARGET_SYS_SENDTO: /* sockfd, msg, len, flags, addr, addrlen */
3771         return do_sendto(a[0], a[1], a[2], a[3], a[4], a[5]);
3772     case TARGET_SYS_RECVFROM: /* sockfd, msg, len, flags, addr, addrlen */
3773         return do_recvfrom(a[0], a[1], a[2], a[3], a[4], a[5]);
3774     case TARGET_SYS_SHUTDOWN: /* sockfd, how */
3775         return get_errno(shutdown(a[0], a[1]));
3776     case TARGET_SYS_SETSOCKOPT: /* sockfd, level, optname, optval, optlen */
3777         return do_setsockopt(a[0], a[1], a[2], a[3], a[4]);
3778     case TARGET_SYS_GETSOCKOPT: /* sockfd, level, optname, optval, optlen */
3779         return do_getsockopt(a[0], a[1], a[2], a[3], a[4]);
3780     case TARGET_SYS_SENDMSG: /* sockfd, msg, flags */
3781         return do_sendrecvmsg(a[0], a[1], a[2], 1);
3782     case TARGET_SYS_RECVMSG: /* sockfd, msg, flags */
3783         return do_sendrecvmsg(a[0], a[1], a[2], 0);
3784     case TARGET_SYS_ACCEPT4: /* sockfd, addr, addrlen, flags */
3785         return do_accept4(a[0], a[1], a[2], a[3]);
3786     case TARGET_SYS_RECVMMSG: /* sockfd, msgvec, vlen, flags */
3787         return do_sendrecvmmsg(a[0], a[1], a[2], a[3], 0);
3788     case TARGET_SYS_SENDMMSG: /* sockfd, msgvec, vlen, flags */
3789         return do_sendrecvmmsg(a[0], a[1], a[2], a[3], 1);
3790     default:
3791         qemu_log_mask(LOG_UNIMP, "Unsupported socketcall: %d\n", num);
3792         return -TARGET_EINVAL;
3793     }
3794 }
3795 #endif
3796 
3797 #define N_SHM_REGIONS	32
3798 
3799 static struct shm_region {
3800     abi_ulong start;
3801     abi_ulong size;
3802     bool in_use;
3803 } shm_regions[N_SHM_REGIONS];
3804 
3805 #ifndef TARGET_SEMID64_DS
3806 /* asm-generic version of this struct */
3807 struct target_semid64_ds
3808 {
3809   struct target_ipc_perm sem_perm;
3810   abi_ulong sem_otime;
3811 #if TARGET_ABI_BITS == 32
3812   abi_ulong __unused1;
3813 #endif
3814   abi_ulong sem_ctime;
3815 #if TARGET_ABI_BITS == 32
3816   abi_ulong __unused2;
3817 #endif
3818   abi_ulong sem_nsems;
3819   abi_ulong __unused3;
3820   abi_ulong __unused4;
3821 };
3822 #endif
3823 
3824 static inline abi_long target_to_host_ipc_perm(struct ipc_perm *host_ip,
3825                                                abi_ulong target_addr)
3826 {
3827     struct target_ipc_perm *target_ip;
3828     struct target_semid64_ds *target_sd;
3829 
3830     if (!lock_user_struct(VERIFY_READ, target_sd, target_addr, 1))
3831         return -TARGET_EFAULT;
3832     target_ip = &(target_sd->sem_perm);
3833     host_ip->__key = tswap32(target_ip->__key);
3834     host_ip->uid = tswap32(target_ip->uid);
3835     host_ip->gid = tswap32(target_ip->gid);
3836     host_ip->cuid = tswap32(target_ip->cuid);
3837     host_ip->cgid = tswap32(target_ip->cgid);
3838 #if defined(TARGET_ALPHA) || defined(TARGET_MIPS) || defined(TARGET_PPC)
3839     host_ip->mode = tswap32(target_ip->mode);
3840 #else
3841     host_ip->mode = tswap16(target_ip->mode);
3842 #endif
3843 #if defined(TARGET_PPC)
3844     host_ip->__seq = tswap32(target_ip->__seq);
3845 #else
3846     host_ip->__seq = tswap16(target_ip->__seq);
3847 #endif
3848     unlock_user_struct(target_sd, target_addr, 0);
3849     return 0;
3850 }
3851 
3852 static inline abi_long host_to_target_ipc_perm(abi_ulong target_addr,
3853                                                struct ipc_perm *host_ip)
3854 {
3855     struct target_ipc_perm *target_ip;
3856     struct target_semid64_ds *target_sd;
3857 
3858     if (!lock_user_struct(VERIFY_WRITE, target_sd, target_addr, 0))
3859         return -TARGET_EFAULT;
3860     target_ip = &(target_sd->sem_perm);
3861     target_ip->__key = tswap32(host_ip->__key);
3862     target_ip->uid = tswap32(host_ip->uid);
3863     target_ip->gid = tswap32(host_ip->gid);
3864     target_ip->cuid = tswap32(host_ip->cuid);
3865     target_ip->cgid = tswap32(host_ip->cgid);
3866 #if defined(TARGET_ALPHA) || defined(TARGET_MIPS) || defined(TARGET_PPC)
3867     target_ip->mode = tswap32(host_ip->mode);
3868 #else
3869     target_ip->mode = tswap16(host_ip->mode);
3870 #endif
3871 #if defined(TARGET_PPC)
3872     target_ip->__seq = tswap32(host_ip->__seq);
3873 #else
3874     target_ip->__seq = tswap16(host_ip->__seq);
3875 #endif
3876     unlock_user_struct(target_sd, target_addr, 1);
3877     return 0;
3878 }
3879 
3880 static inline abi_long target_to_host_semid_ds(struct semid_ds *host_sd,
3881                                                abi_ulong target_addr)
3882 {
3883     struct target_semid64_ds *target_sd;
3884 
3885     if (!lock_user_struct(VERIFY_READ, target_sd, target_addr, 1))
3886         return -TARGET_EFAULT;
3887     if (target_to_host_ipc_perm(&(host_sd->sem_perm),target_addr))
3888         return -TARGET_EFAULT;
3889     host_sd->sem_nsems = tswapal(target_sd->sem_nsems);
3890     host_sd->sem_otime = tswapal(target_sd->sem_otime);
3891     host_sd->sem_ctime = tswapal(target_sd->sem_ctime);
3892     unlock_user_struct(target_sd, target_addr, 0);
3893     return 0;
3894 }
3895 
3896 static inline abi_long host_to_target_semid_ds(abi_ulong target_addr,
3897                                                struct semid_ds *host_sd)
3898 {
3899     struct target_semid64_ds *target_sd;
3900 
3901     if (!lock_user_struct(VERIFY_WRITE, target_sd, target_addr, 0))
3902         return -TARGET_EFAULT;
3903     if (host_to_target_ipc_perm(target_addr,&(host_sd->sem_perm)))
3904         return -TARGET_EFAULT;
3905     target_sd->sem_nsems = tswapal(host_sd->sem_nsems);
3906     target_sd->sem_otime = tswapal(host_sd->sem_otime);
3907     target_sd->sem_ctime = tswapal(host_sd->sem_ctime);
3908     unlock_user_struct(target_sd, target_addr, 1);
3909     return 0;
3910 }
3911 
3912 struct target_seminfo {
3913     int semmap;
3914     int semmni;
3915     int semmns;
3916     int semmnu;
3917     int semmsl;
3918     int semopm;
3919     int semume;
3920     int semusz;
3921     int semvmx;
3922     int semaem;
3923 };
3924 
3925 static inline abi_long host_to_target_seminfo(abi_ulong target_addr,
3926                                               struct seminfo *host_seminfo)
3927 {
3928     struct target_seminfo *target_seminfo;
3929     if (!lock_user_struct(VERIFY_WRITE, target_seminfo, target_addr, 0))
3930         return -TARGET_EFAULT;
3931     __put_user(host_seminfo->semmap, &target_seminfo->semmap);
3932     __put_user(host_seminfo->semmni, &target_seminfo->semmni);
3933     __put_user(host_seminfo->semmns, &target_seminfo->semmns);
3934     __put_user(host_seminfo->semmnu, &target_seminfo->semmnu);
3935     __put_user(host_seminfo->semmsl, &target_seminfo->semmsl);
3936     __put_user(host_seminfo->semopm, &target_seminfo->semopm);
3937     __put_user(host_seminfo->semume, &target_seminfo->semume);
3938     __put_user(host_seminfo->semusz, &target_seminfo->semusz);
3939     __put_user(host_seminfo->semvmx, &target_seminfo->semvmx);
3940     __put_user(host_seminfo->semaem, &target_seminfo->semaem);
3941     unlock_user_struct(target_seminfo, target_addr, 1);
3942     return 0;
3943 }
3944 
3945 union semun {
3946 	int val;
3947 	struct semid_ds *buf;
3948 	unsigned short *array;
3949 	struct seminfo *__buf;
3950 };
3951 
3952 union target_semun {
3953 	int val;
3954 	abi_ulong buf;
3955 	abi_ulong array;
3956 	abi_ulong __buf;
3957 };
3958 
3959 static inline abi_long target_to_host_semarray(int semid, unsigned short **host_array,
3960                                                abi_ulong target_addr)
3961 {
3962     int nsems;
3963     unsigned short *array;
3964     union semun semun;
3965     struct semid_ds semid_ds;
3966     int i, ret;
3967 
3968     semun.buf = &semid_ds;
3969 
3970     ret = semctl(semid, 0, IPC_STAT, semun);
3971     if (ret == -1)
3972         return get_errno(ret);
3973 
3974     nsems = semid_ds.sem_nsems;
3975 
3976     *host_array = g_try_new(unsigned short, nsems);
3977     if (!*host_array) {
3978         return -TARGET_ENOMEM;
3979     }
3980     array = lock_user(VERIFY_READ, target_addr,
3981                       nsems*sizeof(unsigned short), 1);
3982     if (!array) {
3983         g_free(*host_array);
3984         return -TARGET_EFAULT;
3985     }
3986 
3987     for(i=0; i<nsems; i++) {
3988         __get_user((*host_array)[i], &array[i]);
3989     }
3990     unlock_user(array, target_addr, 0);
3991 
3992     return 0;
3993 }
3994 
3995 static inline abi_long host_to_target_semarray(int semid, abi_ulong target_addr,
3996                                                unsigned short **host_array)
3997 {
3998     int nsems;
3999     unsigned short *array;
4000     union semun semun;
4001     struct semid_ds semid_ds;
4002     int i, ret;
4003 
4004     semun.buf = &semid_ds;
4005 
4006     ret = semctl(semid, 0, IPC_STAT, semun);
4007     if (ret == -1)
4008         return get_errno(ret);
4009 
4010     nsems = semid_ds.sem_nsems;
4011 
4012     array = lock_user(VERIFY_WRITE, target_addr,
4013                       nsems*sizeof(unsigned short), 0);
4014     if (!array)
4015         return -TARGET_EFAULT;
4016 
4017     for(i=0; i<nsems; i++) {
4018         __put_user((*host_array)[i], &array[i]);
4019     }
4020     g_free(*host_array);
4021     unlock_user(array, target_addr, 1);
4022 
4023     return 0;
4024 }
4025 
4026 static inline abi_long do_semctl(int semid, int semnum, int cmd,
4027                                  abi_ulong target_arg)
4028 {
4029     union target_semun target_su = { .buf = target_arg };
4030     union semun arg;
4031     struct semid_ds dsarg;
4032     unsigned short *array = NULL;
4033     struct seminfo seminfo;
4034     abi_long ret = -TARGET_EINVAL;
4035     abi_long err;
4036     cmd &= 0xff;
4037 
4038     switch( cmd ) {
4039 	case GETVAL:
4040 	case SETVAL:
4041             /* In 64 bit cross-endian situations, we will erroneously pick up
4042              * the wrong half of the union for the "val" element.  To rectify
4043              * this, the entire 8-byte structure is byteswapped, followed by
4044 	     * a swap of the 4 byte val field. In other cases, the data is
4045 	     * already in proper host byte order. */
4046 	    if (sizeof(target_su.val) != (sizeof(target_su.buf))) {
4047 		target_su.buf = tswapal(target_su.buf);
4048 		arg.val = tswap32(target_su.val);
4049 	    } else {
4050 		arg.val = target_su.val;
4051 	    }
4052             ret = get_errno(semctl(semid, semnum, cmd, arg));
4053             break;
4054 	case GETALL:
4055 	case SETALL:
4056             err = target_to_host_semarray(semid, &array, target_su.array);
4057             if (err)
4058                 return err;
4059             arg.array = array;
4060             ret = get_errno(semctl(semid, semnum, cmd, arg));
4061             err = host_to_target_semarray(semid, target_su.array, &array);
4062             if (err)
4063                 return err;
4064             break;
4065 	case IPC_STAT:
4066 	case IPC_SET:
4067 	case SEM_STAT:
4068             err = target_to_host_semid_ds(&dsarg, target_su.buf);
4069             if (err)
4070                 return err;
4071             arg.buf = &dsarg;
4072             ret = get_errno(semctl(semid, semnum, cmd, arg));
4073             err = host_to_target_semid_ds(target_su.buf, &dsarg);
4074             if (err)
4075                 return err;
4076             break;
4077 	case IPC_INFO:
4078 	case SEM_INFO:
4079             arg.__buf = &seminfo;
4080             ret = get_errno(semctl(semid, semnum, cmd, arg));
4081             err = host_to_target_seminfo(target_su.__buf, &seminfo);
4082             if (err)
4083                 return err;
4084             break;
4085 	case IPC_RMID:
4086 	case GETPID:
4087 	case GETNCNT:
4088 	case GETZCNT:
4089             ret = get_errno(semctl(semid, semnum, cmd, NULL));
4090             break;
4091     }
4092 
4093     return ret;
4094 }
4095 
4096 struct target_sembuf {
4097     unsigned short sem_num;
4098     short sem_op;
4099     short sem_flg;
4100 };
4101 
4102 static inline abi_long target_to_host_sembuf(struct sembuf *host_sembuf,
4103                                              abi_ulong target_addr,
4104                                              unsigned nsops)
4105 {
4106     struct target_sembuf *target_sembuf;
4107     int i;
4108 
4109     target_sembuf = lock_user(VERIFY_READ, target_addr,
4110                               nsops*sizeof(struct target_sembuf), 1);
4111     if (!target_sembuf)
4112         return -TARGET_EFAULT;
4113 
4114     for(i=0; i<nsops; i++) {
4115         __get_user(host_sembuf[i].sem_num, &target_sembuf[i].sem_num);
4116         __get_user(host_sembuf[i].sem_op, &target_sembuf[i].sem_op);
4117         __get_user(host_sembuf[i].sem_flg, &target_sembuf[i].sem_flg);
4118     }
4119 
4120     unlock_user(target_sembuf, target_addr, 0);
4121 
4122     return 0;
4123 }
4124 
4125 #if defined(TARGET_NR_ipc) || defined(TARGET_NR_semop) || \
4126     defined(TARGET_NR_semtimedop) || defined(TARGET_NR_semtimedop_time64)
4127 
4128 /*
4129  * This macro is required to handle the s390 variants, which passes the
4130  * arguments in a different order than default.
4131  */
4132 #ifdef __s390x__
4133 #define SEMTIMEDOP_IPC_ARGS(__nsops, __sops, __timeout) \
4134   (__nsops), (__timeout), (__sops)
4135 #else
4136 #define SEMTIMEDOP_IPC_ARGS(__nsops, __sops, __timeout) \
4137   (__nsops), 0, (__sops), (__timeout)
4138 #endif
4139 
4140 static inline abi_long do_semtimedop(int semid,
4141                                      abi_long ptr,
4142                                      unsigned nsops,
4143                                      abi_long timeout, bool time64)
4144 {
4145     struct sembuf *sops;
4146     struct timespec ts, *pts = NULL;
4147     abi_long ret;
4148 
4149     if (timeout) {
4150         pts = &ts;
4151         if (time64) {
4152             if (target_to_host_timespec64(pts, timeout)) {
4153                 return -TARGET_EFAULT;
4154             }
4155         } else {
4156             if (target_to_host_timespec(pts, timeout)) {
4157                 return -TARGET_EFAULT;
4158             }
4159         }
4160     }
4161 
4162     if (nsops > TARGET_SEMOPM) {
4163         return -TARGET_E2BIG;
4164     }
4165 
4166     sops = g_new(struct sembuf, nsops);
4167 
4168     if (target_to_host_sembuf(sops, ptr, nsops)) {
4169         g_free(sops);
4170         return -TARGET_EFAULT;
4171     }
4172 
4173     ret = -TARGET_ENOSYS;
4174 #ifdef __NR_semtimedop
4175     ret = get_errno(safe_semtimedop(semid, sops, nsops, pts));
4176 #endif
4177 #ifdef __NR_ipc
4178     if (ret == -TARGET_ENOSYS) {
4179         ret = get_errno(safe_ipc(IPCOP_semtimedop, semid,
4180                                  SEMTIMEDOP_IPC_ARGS(nsops, sops, (long)pts)));
4181     }
4182 #endif
4183     g_free(sops);
4184     return ret;
4185 }
4186 #endif
4187 
4188 struct target_msqid_ds
4189 {
4190     struct target_ipc_perm msg_perm;
4191     abi_ulong msg_stime;
4192 #if TARGET_ABI_BITS == 32
4193     abi_ulong __unused1;
4194 #endif
4195     abi_ulong msg_rtime;
4196 #if TARGET_ABI_BITS == 32
4197     abi_ulong __unused2;
4198 #endif
4199     abi_ulong msg_ctime;
4200 #if TARGET_ABI_BITS == 32
4201     abi_ulong __unused3;
4202 #endif
4203     abi_ulong __msg_cbytes;
4204     abi_ulong msg_qnum;
4205     abi_ulong msg_qbytes;
4206     abi_ulong msg_lspid;
4207     abi_ulong msg_lrpid;
4208     abi_ulong __unused4;
4209     abi_ulong __unused5;
4210 };
4211 
4212 static inline abi_long target_to_host_msqid_ds(struct msqid_ds *host_md,
4213                                                abi_ulong target_addr)
4214 {
4215     struct target_msqid_ds *target_md;
4216 
4217     if (!lock_user_struct(VERIFY_READ, target_md, target_addr, 1))
4218         return -TARGET_EFAULT;
4219     if (target_to_host_ipc_perm(&(host_md->msg_perm),target_addr))
4220         return -TARGET_EFAULT;
4221     host_md->msg_stime = tswapal(target_md->msg_stime);
4222     host_md->msg_rtime = tswapal(target_md->msg_rtime);
4223     host_md->msg_ctime = tswapal(target_md->msg_ctime);
4224     host_md->__msg_cbytes = tswapal(target_md->__msg_cbytes);
4225     host_md->msg_qnum = tswapal(target_md->msg_qnum);
4226     host_md->msg_qbytes = tswapal(target_md->msg_qbytes);
4227     host_md->msg_lspid = tswapal(target_md->msg_lspid);
4228     host_md->msg_lrpid = tswapal(target_md->msg_lrpid);
4229     unlock_user_struct(target_md, target_addr, 0);
4230     return 0;
4231 }
4232 
4233 static inline abi_long host_to_target_msqid_ds(abi_ulong target_addr,
4234                                                struct msqid_ds *host_md)
4235 {
4236     struct target_msqid_ds *target_md;
4237 
4238     if (!lock_user_struct(VERIFY_WRITE, target_md, target_addr, 0))
4239         return -TARGET_EFAULT;
4240     if (host_to_target_ipc_perm(target_addr,&(host_md->msg_perm)))
4241         return -TARGET_EFAULT;
4242     target_md->msg_stime = tswapal(host_md->msg_stime);
4243     target_md->msg_rtime = tswapal(host_md->msg_rtime);
4244     target_md->msg_ctime = tswapal(host_md->msg_ctime);
4245     target_md->__msg_cbytes = tswapal(host_md->__msg_cbytes);
4246     target_md->msg_qnum = tswapal(host_md->msg_qnum);
4247     target_md->msg_qbytes = tswapal(host_md->msg_qbytes);
4248     target_md->msg_lspid = tswapal(host_md->msg_lspid);
4249     target_md->msg_lrpid = tswapal(host_md->msg_lrpid);
4250     unlock_user_struct(target_md, target_addr, 1);
4251     return 0;
4252 }
4253 
4254 struct target_msginfo {
4255     int msgpool;
4256     int msgmap;
4257     int msgmax;
4258     int msgmnb;
4259     int msgmni;
4260     int msgssz;
4261     int msgtql;
4262     unsigned short int msgseg;
4263 };
4264 
4265 static inline abi_long host_to_target_msginfo(abi_ulong target_addr,
4266                                               struct msginfo *host_msginfo)
4267 {
4268     struct target_msginfo *target_msginfo;
4269     if (!lock_user_struct(VERIFY_WRITE, target_msginfo, target_addr, 0))
4270         return -TARGET_EFAULT;
4271     __put_user(host_msginfo->msgpool, &target_msginfo->msgpool);
4272     __put_user(host_msginfo->msgmap, &target_msginfo->msgmap);
4273     __put_user(host_msginfo->msgmax, &target_msginfo->msgmax);
4274     __put_user(host_msginfo->msgmnb, &target_msginfo->msgmnb);
4275     __put_user(host_msginfo->msgmni, &target_msginfo->msgmni);
4276     __put_user(host_msginfo->msgssz, &target_msginfo->msgssz);
4277     __put_user(host_msginfo->msgtql, &target_msginfo->msgtql);
4278     __put_user(host_msginfo->msgseg, &target_msginfo->msgseg);
4279     unlock_user_struct(target_msginfo, target_addr, 1);
4280     return 0;
4281 }
4282 
4283 static inline abi_long do_msgctl(int msgid, int cmd, abi_long ptr)
4284 {
4285     struct msqid_ds dsarg;
4286     struct msginfo msginfo;
4287     abi_long ret = -TARGET_EINVAL;
4288 
4289     cmd &= 0xff;
4290 
4291     switch (cmd) {
4292     case IPC_STAT:
4293     case IPC_SET:
4294     case MSG_STAT:
4295         if (target_to_host_msqid_ds(&dsarg,ptr))
4296             return -TARGET_EFAULT;
4297         ret = get_errno(msgctl(msgid, cmd, &dsarg));
4298         if (host_to_target_msqid_ds(ptr,&dsarg))
4299             return -TARGET_EFAULT;
4300         break;
4301     case IPC_RMID:
4302         ret = get_errno(msgctl(msgid, cmd, NULL));
4303         break;
4304     case IPC_INFO:
4305     case MSG_INFO:
4306         ret = get_errno(msgctl(msgid, cmd, (struct msqid_ds *)&msginfo));
4307         if (host_to_target_msginfo(ptr, &msginfo))
4308             return -TARGET_EFAULT;
4309         break;
4310     }
4311 
4312     return ret;
4313 }
4314 
4315 struct target_msgbuf {
4316     abi_long mtype;
4317     char	mtext[1];
4318 };
4319 
4320 static inline abi_long do_msgsnd(int msqid, abi_long msgp,
4321                                  ssize_t msgsz, int msgflg)
4322 {
4323     struct target_msgbuf *target_mb;
4324     struct msgbuf *host_mb;
4325     abi_long ret = 0;
4326 
4327     if (msgsz < 0) {
4328         return -TARGET_EINVAL;
4329     }
4330 
4331     if (!lock_user_struct(VERIFY_READ, target_mb, msgp, 0))
4332         return -TARGET_EFAULT;
4333     host_mb = g_try_malloc(msgsz + sizeof(long));
4334     if (!host_mb) {
4335         unlock_user_struct(target_mb, msgp, 0);
4336         return -TARGET_ENOMEM;
4337     }
4338     host_mb->mtype = (abi_long) tswapal(target_mb->mtype);
4339     memcpy(host_mb->mtext, target_mb->mtext, msgsz);
4340     ret = -TARGET_ENOSYS;
4341 #ifdef __NR_msgsnd
4342     ret = get_errno(safe_msgsnd(msqid, host_mb, msgsz, msgflg));
4343 #endif
4344 #ifdef __NR_ipc
4345     if (ret == -TARGET_ENOSYS) {
4346 #ifdef __s390x__
4347         ret = get_errno(safe_ipc(IPCOP_msgsnd, msqid, msgsz, msgflg,
4348                                  host_mb));
4349 #else
4350         ret = get_errno(safe_ipc(IPCOP_msgsnd, msqid, msgsz, msgflg,
4351                                  host_mb, 0));
4352 #endif
4353     }
4354 #endif
4355     g_free(host_mb);
4356     unlock_user_struct(target_mb, msgp, 0);
4357 
4358     return ret;
4359 }
4360 
4361 #ifdef __NR_ipc
4362 #if defined(__sparc__)
4363 /* SPARC for msgrcv it does not use the kludge on final 2 arguments.  */
4364 #define MSGRCV_ARGS(__msgp, __msgtyp) __msgp, __msgtyp
4365 #elif defined(__s390x__)
4366 /* The s390 sys_ipc variant has only five parameters.  */
4367 #define MSGRCV_ARGS(__msgp, __msgtyp) \
4368     ((long int[]){(long int)__msgp, __msgtyp})
4369 #else
4370 #define MSGRCV_ARGS(__msgp, __msgtyp) \
4371     ((long int[]){(long int)__msgp, __msgtyp}), 0
4372 #endif
4373 #endif
4374 
4375 static inline abi_long do_msgrcv(int msqid, abi_long msgp,
4376                                  ssize_t msgsz, abi_long msgtyp,
4377                                  int msgflg)
4378 {
4379     struct target_msgbuf *target_mb;
4380     char *target_mtext;
4381     struct msgbuf *host_mb;
4382     abi_long ret = 0;
4383 
4384     if (msgsz < 0) {
4385         return -TARGET_EINVAL;
4386     }
4387 
4388     if (!lock_user_struct(VERIFY_WRITE, target_mb, msgp, 0))
4389         return -TARGET_EFAULT;
4390 
4391     host_mb = g_try_malloc(msgsz + sizeof(long));
4392     if (!host_mb) {
4393         ret = -TARGET_ENOMEM;
4394         goto end;
4395     }
4396     ret = -TARGET_ENOSYS;
4397 #ifdef __NR_msgrcv
4398     ret = get_errno(safe_msgrcv(msqid, host_mb, msgsz, msgtyp, msgflg));
4399 #endif
4400 #ifdef __NR_ipc
4401     if (ret == -TARGET_ENOSYS) {
4402         ret = get_errno(safe_ipc(IPCOP_CALL(1, IPCOP_msgrcv), msqid, msgsz,
4403                         msgflg, MSGRCV_ARGS(host_mb, msgtyp)));
4404     }
4405 #endif
4406 
4407     if (ret > 0) {
4408         abi_ulong target_mtext_addr = msgp + sizeof(abi_ulong);
4409         target_mtext = lock_user(VERIFY_WRITE, target_mtext_addr, ret, 0);
4410         if (!target_mtext) {
4411             ret = -TARGET_EFAULT;
4412             goto end;
4413         }
4414         memcpy(target_mb->mtext, host_mb->mtext, ret);
4415         unlock_user(target_mtext, target_mtext_addr, ret);
4416     }
4417 
4418     target_mb->mtype = tswapal(host_mb->mtype);
4419 
4420 end:
4421     if (target_mb)
4422         unlock_user_struct(target_mb, msgp, 1);
4423     g_free(host_mb);
4424     return ret;
4425 }
4426 
4427 static inline abi_long target_to_host_shmid_ds(struct shmid_ds *host_sd,
4428                                                abi_ulong target_addr)
4429 {
4430     struct target_shmid_ds *target_sd;
4431 
4432     if (!lock_user_struct(VERIFY_READ, target_sd, target_addr, 1))
4433         return -TARGET_EFAULT;
4434     if (target_to_host_ipc_perm(&(host_sd->shm_perm), target_addr))
4435         return -TARGET_EFAULT;
4436     __get_user(host_sd->shm_segsz, &target_sd->shm_segsz);
4437     __get_user(host_sd->shm_atime, &target_sd->shm_atime);
4438     __get_user(host_sd->shm_dtime, &target_sd->shm_dtime);
4439     __get_user(host_sd->shm_ctime, &target_sd->shm_ctime);
4440     __get_user(host_sd->shm_cpid, &target_sd->shm_cpid);
4441     __get_user(host_sd->shm_lpid, &target_sd->shm_lpid);
4442     __get_user(host_sd->shm_nattch, &target_sd->shm_nattch);
4443     unlock_user_struct(target_sd, target_addr, 0);
4444     return 0;
4445 }
4446 
4447 static inline abi_long host_to_target_shmid_ds(abi_ulong target_addr,
4448                                                struct shmid_ds *host_sd)
4449 {
4450     struct target_shmid_ds *target_sd;
4451 
4452     if (!lock_user_struct(VERIFY_WRITE, target_sd, target_addr, 0))
4453         return -TARGET_EFAULT;
4454     if (host_to_target_ipc_perm(target_addr, &(host_sd->shm_perm)))
4455         return -TARGET_EFAULT;
4456     __put_user(host_sd->shm_segsz, &target_sd->shm_segsz);
4457     __put_user(host_sd->shm_atime, &target_sd->shm_atime);
4458     __put_user(host_sd->shm_dtime, &target_sd->shm_dtime);
4459     __put_user(host_sd->shm_ctime, &target_sd->shm_ctime);
4460     __put_user(host_sd->shm_cpid, &target_sd->shm_cpid);
4461     __put_user(host_sd->shm_lpid, &target_sd->shm_lpid);
4462     __put_user(host_sd->shm_nattch, &target_sd->shm_nattch);
4463     unlock_user_struct(target_sd, target_addr, 1);
4464     return 0;
4465 }
4466 
4467 struct  target_shminfo {
4468     abi_ulong shmmax;
4469     abi_ulong shmmin;
4470     abi_ulong shmmni;
4471     abi_ulong shmseg;
4472     abi_ulong shmall;
4473 };
4474 
4475 static inline abi_long host_to_target_shminfo(abi_ulong target_addr,
4476                                               struct shminfo *host_shminfo)
4477 {
4478     struct target_shminfo *target_shminfo;
4479     if (!lock_user_struct(VERIFY_WRITE, target_shminfo, target_addr, 0))
4480         return -TARGET_EFAULT;
4481     __put_user(host_shminfo->shmmax, &target_shminfo->shmmax);
4482     __put_user(host_shminfo->shmmin, &target_shminfo->shmmin);
4483     __put_user(host_shminfo->shmmni, &target_shminfo->shmmni);
4484     __put_user(host_shminfo->shmseg, &target_shminfo->shmseg);
4485     __put_user(host_shminfo->shmall, &target_shminfo->shmall);
4486     unlock_user_struct(target_shminfo, target_addr, 1);
4487     return 0;
4488 }
4489 
4490 struct target_shm_info {
4491     int used_ids;
4492     abi_ulong shm_tot;
4493     abi_ulong shm_rss;
4494     abi_ulong shm_swp;
4495     abi_ulong swap_attempts;
4496     abi_ulong swap_successes;
4497 };
4498 
4499 static inline abi_long host_to_target_shm_info(abi_ulong target_addr,
4500                                                struct shm_info *host_shm_info)
4501 {
4502     struct target_shm_info *target_shm_info;
4503     if (!lock_user_struct(VERIFY_WRITE, target_shm_info, target_addr, 0))
4504         return -TARGET_EFAULT;
4505     __put_user(host_shm_info->used_ids, &target_shm_info->used_ids);
4506     __put_user(host_shm_info->shm_tot, &target_shm_info->shm_tot);
4507     __put_user(host_shm_info->shm_rss, &target_shm_info->shm_rss);
4508     __put_user(host_shm_info->shm_swp, &target_shm_info->shm_swp);
4509     __put_user(host_shm_info->swap_attempts, &target_shm_info->swap_attempts);
4510     __put_user(host_shm_info->swap_successes, &target_shm_info->swap_successes);
4511     unlock_user_struct(target_shm_info, target_addr, 1);
4512     return 0;
4513 }
4514 
4515 static inline abi_long do_shmctl(int shmid, int cmd, abi_long buf)
4516 {
4517     struct shmid_ds dsarg;
4518     struct shminfo shminfo;
4519     struct shm_info shm_info;
4520     abi_long ret = -TARGET_EINVAL;
4521 
4522     cmd &= 0xff;
4523 
4524     switch(cmd) {
4525     case IPC_STAT:
4526     case IPC_SET:
4527     case SHM_STAT:
4528         if (target_to_host_shmid_ds(&dsarg, buf))
4529             return -TARGET_EFAULT;
4530         ret = get_errno(shmctl(shmid, cmd, &dsarg));
4531         if (host_to_target_shmid_ds(buf, &dsarg))
4532             return -TARGET_EFAULT;
4533         break;
4534     case IPC_INFO:
4535         ret = get_errno(shmctl(shmid, cmd, (struct shmid_ds *)&shminfo));
4536         if (host_to_target_shminfo(buf, &shminfo))
4537             return -TARGET_EFAULT;
4538         break;
4539     case SHM_INFO:
4540         ret = get_errno(shmctl(shmid, cmd, (struct shmid_ds *)&shm_info));
4541         if (host_to_target_shm_info(buf, &shm_info))
4542             return -TARGET_EFAULT;
4543         break;
4544     case IPC_RMID:
4545     case SHM_LOCK:
4546     case SHM_UNLOCK:
4547         ret = get_errno(shmctl(shmid, cmd, NULL));
4548         break;
4549     }
4550 
4551     return ret;
4552 }
4553 
4554 #ifndef TARGET_FORCE_SHMLBA
4555 /* For most architectures, SHMLBA is the same as the page size;
4556  * some architectures have larger values, in which case they should
4557  * define TARGET_FORCE_SHMLBA and provide a target_shmlba() function.
4558  * This corresponds to the kernel arch code defining __ARCH_FORCE_SHMLBA
4559  * and defining its own value for SHMLBA.
4560  *
4561  * The kernel also permits SHMLBA to be set by the architecture to a
4562  * value larger than the page size without setting __ARCH_FORCE_SHMLBA;
4563  * this means that addresses are rounded to the large size if
4564  * SHM_RND is set but addresses not aligned to that size are not rejected
4565  * as long as they are at least page-aligned. Since the only architecture
4566  * which uses this is ia64 this code doesn't provide for that oddity.
4567  */
4568 static inline abi_ulong target_shmlba(CPUArchState *cpu_env)
4569 {
4570     return TARGET_PAGE_SIZE;
4571 }
4572 #endif
4573 
4574 static inline abi_ulong do_shmat(CPUArchState *cpu_env,
4575                                  int shmid, abi_ulong shmaddr, int shmflg)
4576 {
4577     abi_long raddr;
4578     void *host_raddr;
4579     struct shmid_ds shm_info;
4580     int i,ret;
4581     abi_ulong shmlba;
4582 
4583     /* find out the length of the shared memory segment */
4584     ret = get_errno(shmctl(shmid, IPC_STAT, &shm_info));
4585     if (is_error(ret)) {
4586         /* can't get length, bail out */
4587         return ret;
4588     }
4589 
4590     shmlba = target_shmlba(cpu_env);
4591 
4592     if (shmaddr & (shmlba - 1)) {
4593         if (shmflg & SHM_RND) {
4594             shmaddr &= ~(shmlba - 1);
4595         } else {
4596             return -TARGET_EINVAL;
4597         }
4598     }
4599     if (!guest_range_valid(shmaddr, shm_info.shm_segsz)) {
4600         return -TARGET_EINVAL;
4601     }
4602 
4603     mmap_lock();
4604 
4605     if (shmaddr)
4606         host_raddr = shmat(shmid, (void *)g2h(shmaddr), shmflg);
4607     else {
4608         abi_ulong mmap_start;
4609 
4610         /* In order to use the host shmat, we need to honor host SHMLBA.  */
4611         mmap_start = mmap_find_vma(0, shm_info.shm_segsz, MAX(SHMLBA, shmlba));
4612 
4613         if (mmap_start == -1) {
4614             errno = ENOMEM;
4615             host_raddr = (void *)-1;
4616         } else
4617             host_raddr = shmat(shmid, g2h(mmap_start), shmflg | SHM_REMAP);
4618     }
4619 
4620     if (host_raddr == (void *)-1) {
4621         mmap_unlock();
4622         return get_errno((long)host_raddr);
4623     }
4624     raddr=h2g((unsigned long)host_raddr);
4625 
4626     page_set_flags(raddr, raddr + shm_info.shm_segsz,
4627                    PAGE_VALID | PAGE_READ |
4628                    ((shmflg & SHM_RDONLY)? 0 : PAGE_WRITE));
4629 
4630     for (i = 0; i < N_SHM_REGIONS; i++) {
4631         if (!shm_regions[i].in_use) {
4632             shm_regions[i].in_use = true;
4633             shm_regions[i].start = raddr;
4634             shm_regions[i].size = shm_info.shm_segsz;
4635             break;
4636         }
4637     }
4638 
4639     mmap_unlock();
4640     return raddr;
4641 
4642 }
4643 
4644 static inline abi_long do_shmdt(abi_ulong shmaddr)
4645 {
4646     int i;
4647     abi_long rv;
4648 
4649     mmap_lock();
4650 
4651     for (i = 0; i < N_SHM_REGIONS; ++i) {
4652         if (shm_regions[i].in_use && shm_regions[i].start == shmaddr) {
4653             shm_regions[i].in_use = false;
4654             page_set_flags(shmaddr, shmaddr + shm_regions[i].size, 0);
4655             break;
4656         }
4657     }
4658     rv = get_errno(shmdt(g2h(shmaddr)));
4659 
4660     mmap_unlock();
4661 
4662     return rv;
4663 }
4664 
4665 #ifdef TARGET_NR_ipc
4666 /* ??? This only works with linear mappings.  */
4667 /* do_ipc() must return target values and target errnos. */
4668 static abi_long do_ipc(CPUArchState *cpu_env,
4669                        unsigned int call, abi_long first,
4670                        abi_long second, abi_long third,
4671                        abi_long ptr, abi_long fifth)
4672 {
4673     int version;
4674     abi_long ret = 0;
4675 
4676     version = call >> 16;
4677     call &= 0xffff;
4678 
4679     switch (call) {
4680     case IPCOP_semop:
4681         ret = do_semtimedop(first, ptr, second, 0, false);
4682         break;
4683     case IPCOP_semtimedop:
4684     /*
4685      * The s390 sys_ipc variant has only five parameters instead of six
4686      * (as for default variant) and the only difference is the handling of
4687      * SEMTIMEDOP where on s390 the third parameter is used as a pointer
4688      * to a struct timespec where the generic variant uses fifth parameter.
4689      */
4690 #if defined(TARGET_S390X)
4691         ret = do_semtimedop(first, ptr, second, third, TARGET_ABI_BITS == 64);
4692 #else
4693         ret = do_semtimedop(first, ptr, second, fifth, TARGET_ABI_BITS == 64);
4694 #endif
4695         break;
4696 
4697     case IPCOP_semget:
4698         ret = get_errno(semget(first, second, third));
4699         break;
4700 
4701     case IPCOP_semctl: {
4702         /* The semun argument to semctl is passed by value, so dereference the
4703          * ptr argument. */
4704         abi_ulong atptr;
4705         get_user_ual(atptr, ptr);
4706         ret = do_semctl(first, second, third, atptr);
4707         break;
4708     }
4709 
4710     case IPCOP_msgget:
4711         ret = get_errno(msgget(first, second));
4712         break;
4713 
4714     case IPCOP_msgsnd:
4715         ret = do_msgsnd(first, ptr, second, third);
4716         break;
4717 
4718     case IPCOP_msgctl:
4719         ret = do_msgctl(first, second, ptr);
4720         break;
4721 
4722     case IPCOP_msgrcv:
4723         switch (version) {
4724         case 0:
4725             {
4726                 struct target_ipc_kludge {
4727                     abi_long msgp;
4728                     abi_long msgtyp;
4729                 } *tmp;
4730 
4731                 if (!lock_user_struct(VERIFY_READ, tmp, ptr, 1)) {
4732                     ret = -TARGET_EFAULT;
4733                     break;
4734                 }
4735 
4736                 ret = do_msgrcv(first, tswapal(tmp->msgp), second, tswapal(tmp->msgtyp), third);
4737 
4738                 unlock_user_struct(tmp, ptr, 0);
4739                 break;
4740             }
4741         default:
4742             ret = do_msgrcv(first, ptr, second, fifth, third);
4743         }
4744         break;
4745 
4746     case IPCOP_shmat:
4747         switch (version) {
4748         default:
4749         {
4750             abi_ulong raddr;
4751             raddr = do_shmat(cpu_env, first, ptr, second);
4752             if (is_error(raddr))
4753                 return get_errno(raddr);
4754             if (put_user_ual(raddr, third))
4755                 return -TARGET_EFAULT;
4756             break;
4757         }
4758         case 1:
4759             ret = -TARGET_EINVAL;
4760             break;
4761         }
4762 	break;
4763     case IPCOP_shmdt:
4764         ret = do_shmdt(ptr);
4765 	break;
4766 
4767     case IPCOP_shmget:
4768 	/* IPC_* flag values are the same on all linux platforms */
4769 	ret = get_errno(shmget(first, second, third));
4770 	break;
4771 
4772 	/* IPC_* and SHM_* command values are the same on all linux platforms */
4773     case IPCOP_shmctl:
4774         ret = do_shmctl(first, second, ptr);
4775         break;
4776     default:
4777         qemu_log_mask(LOG_UNIMP, "Unsupported ipc call: %d (version %d)\n",
4778                       call, version);
4779 	ret = -TARGET_ENOSYS;
4780 	break;
4781     }
4782     return ret;
4783 }
4784 #endif
4785 
4786 /* kernel structure types definitions */
4787 
4788 #define STRUCT(name, ...) STRUCT_ ## name,
4789 #define STRUCT_SPECIAL(name) STRUCT_ ## name,
4790 enum {
4791 #include "syscall_types.h"
4792 STRUCT_MAX
4793 };
4794 #undef STRUCT
4795 #undef STRUCT_SPECIAL
4796 
4797 #define STRUCT(name, ...) static const argtype struct_ ## name ## _def[] = {  __VA_ARGS__, TYPE_NULL };
4798 #define STRUCT_SPECIAL(name)
4799 #include "syscall_types.h"
4800 #undef STRUCT
4801 #undef STRUCT_SPECIAL
4802 
4803 #define MAX_STRUCT_SIZE 4096
4804 
4805 #ifdef CONFIG_FIEMAP
4806 /* So fiemap access checks don't overflow on 32 bit systems.
4807  * This is very slightly smaller than the limit imposed by
4808  * the underlying kernel.
4809  */
4810 #define FIEMAP_MAX_EXTENTS ((UINT_MAX - sizeof(struct fiemap))  \
4811                             / sizeof(struct fiemap_extent))
4812 
4813 static abi_long do_ioctl_fs_ioc_fiemap(const IOCTLEntry *ie, uint8_t *buf_temp,
4814                                        int fd, int cmd, abi_long arg)
4815 {
4816     /* The parameter for this ioctl is a struct fiemap followed
4817      * by an array of struct fiemap_extent whose size is set
4818      * in fiemap->fm_extent_count. The array is filled in by the
4819      * ioctl.
4820      */
4821     int target_size_in, target_size_out;
4822     struct fiemap *fm;
4823     const argtype *arg_type = ie->arg_type;
4824     const argtype extent_arg_type[] = { MK_STRUCT(STRUCT_fiemap_extent) };
4825     void *argptr, *p;
4826     abi_long ret;
4827     int i, extent_size = thunk_type_size(extent_arg_type, 0);
4828     uint32_t outbufsz;
4829     int free_fm = 0;
4830 
4831     assert(arg_type[0] == TYPE_PTR);
4832     assert(ie->access == IOC_RW);
4833     arg_type++;
4834     target_size_in = thunk_type_size(arg_type, 0);
4835     argptr = lock_user(VERIFY_READ, arg, target_size_in, 1);
4836     if (!argptr) {
4837         return -TARGET_EFAULT;
4838     }
4839     thunk_convert(buf_temp, argptr, arg_type, THUNK_HOST);
4840     unlock_user(argptr, arg, 0);
4841     fm = (struct fiemap *)buf_temp;
4842     if (fm->fm_extent_count > FIEMAP_MAX_EXTENTS) {
4843         return -TARGET_EINVAL;
4844     }
4845 
4846     outbufsz = sizeof (*fm) +
4847         (sizeof(struct fiemap_extent) * fm->fm_extent_count);
4848 
4849     if (outbufsz > MAX_STRUCT_SIZE) {
4850         /* We can't fit all the extents into the fixed size buffer.
4851          * Allocate one that is large enough and use it instead.
4852          */
4853         fm = g_try_malloc(outbufsz);
4854         if (!fm) {
4855             return -TARGET_ENOMEM;
4856         }
4857         memcpy(fm, buf_temp, sizeof(struct fiemap));
4858         free_fm = 1;
4859     }
4860     ret = get_errno(safe_ioctl(fd, ie->host_cmd, fm));
4861     if (!is_error(ret)) {
4862         target_size_out = target_size_in;
4863         /* An extent_count of 0 means we were only counting the extents
4864          * so there are no structs to copy
4865          */
4866         if (fm->fm_extent_count != 0) {
4867             target_size_out += fm->fm_mapped_extents * extent_size;
4868         }
4869         argptr = lock_user(VERIFY_WRITE, arg, target_size_out, 0);
4870         if (!argptr) {
4871             ret = -TARGET_EFAULT;
4872         } else {
4873             /* Convert the struct fiemap */
4874             thunk_convert(argptr, fm, arg_type, THUNK_TARGET);
4875             if (fm->fm_extent_count != 0) {
4876                 p = argptr + target_size_in;
4877                 /* ...and then all the struct fiemap_extents */
4878                 for (i = 0; i < fm->fm_mapped_extents; i++) {
4879                     thunk_convert(p, &fm->fm_extents[i], extent_arg_type,
4880                                   THUNK_TARGET);
4881                     p += extent_size;
4882                 }
4883             }
4884             unlock_user(argptr, arg, target_size_out);
4885         }
4886     }
4887     if (free_fm) {
4888         g_free(fm);
4889     }
4890     return ret;
4891 }
4892 #endif
4893 
4894 static abi_long do_ioctl_ifconf(const IOCTLEntry *ie, uint8_t *buf_temp,
4895                                 int fd, int cmd, abi_long arg)
4896 {
4897     const argtype *arg_type = ie->arg_type;
4898     int target_size;
4899     void *argptr;
4900     int ret;
4901     struct ifconf *host_ifconf;
4902     uint32_t outbufsz;
4903     const argtype ifreq_arg_type[] = { MK_STRUCT(STRUCT_sockaddr_ifreq) };
4904     int target_ifreq_size;
4905     int nb_ifreq;
4906     int free_buf = 0;
4907     int i;
4908     int target_ifc_len;
4909     abi_long target_ifc_buf;
4910     int host_ifc_len;
4911     char *host_ifc_buf;
4912 
4913     assert(arg_type[0] == TYPE_PTR);
4914     assert(ie->access == IOC_RW);
4915 
4916     arg_type++;
4917     target_size = thunk_type_size(arg_type, 0);
4918 
4919     argptr = lock_user(VERIFY_READ, arg, target_size, 1);
4920     if (!argptr)
4921         return -TARGET_EFAULT;
4922     thunk_convert(buf_temp, argptr, arg_type, THUNK_HOST);
4923     unlock_user(argptr, arg, 0);
4924 
4925     host_ifconf = (struct ifconf *)(unsigned long)buf_temp;
4926     target_ifc_buf = (abi_long)(unsigned long)host_ifconf->ifc_buf;
4927     target_ifreq_size = thunk_type_size(ifreq_arg_type, 0);
4928 
4929     if (target_ifc_buf != 0) {
4930         target_ifc_len = host_ifconf->ifc_len;
4931         nb_ifreq = target_ifc_len / target_ifreq_size;
4932         host_ifc_len = nb_ifreq * sizeof(struct ifreq);
4933 
4934         outbufsz = sizeof(*host_ifconf) + host_ifc_len;
4935         if (outbufsz > MAX_STRUCT_SIZE) {
4936             /*
4937              * We can't fit all the extents into the fixed size buffer.
4938              * Allocate one that is large enough and use it instead.
4939              */
4940             host_ifconf = malloc(outbufsz);
4941             if (!host_ifconf) {
4942                 return -TARGET_ENOMEM;
4943             }
4944             memcpy(host_ifconf, buf_temp, sizeof(*host_ifconf));
4945             free_buf = 1;
4946         }
4947         host_ifc_buf = (char *)host_ifconf + sizeof(*host_ifconf);
4948 
4949         host_ifconf->ifc_len = host_ifc_len;
4950     } else {
4951       host_ifc_buf = NULL;
4952     }
4953     host_ifconf->ifc_buf = host_ifc_buf;
4954 
4955     ret = get_errno(safe_ioctl(fd, ie->host_cmd, host_ifconf));
4956     if (!is_error(ret)) {
4957 	/* convert host ifc_len to target ifc_len */
4958 
4959         nb_ifreq = host_ifconf->ifc_len / sizeof(struct ifreq);
4960         target_ifc_len = nb_ifreq * target_ifreq_size;
4961         host_ifconf->ifc_len = target_ifc_len;
4962 
4963 	/* restore target ifc_buf */
4964 
4965         host_ifconf->ifc_buf = (char *)(unsigned long)target_ifc_buf;
4966 
4967 	/* copy struct ifconf to target user */
4968 
4969         argptr = lock_user(VERIFY_WRITE, arg, target_size, 0);
4970         if (!argptr)
4971             return -TARGET_EFAULT;
4972         thunk_convert(argptr, host_ifconf, arg_type, THUNK_TARGET);
4973         unlock_user(argptr, arg, target_size);
4974 
4975         if (target_ifc_buf != 0) {
4976             /* copy ifreq[] to target user */
4977             argptr = lock_user(VERIFY_WRITE, target_ifc_buf, target_ifc_len, 0);
4978             for (i = 0; i < nb_ifreq ; i++) {
4979                 thunk_convert(argptr + i * target_ifreq_size,
4980                               host_ifc_buf + i * sizeof(struct ifreq),
4981                               ifreq_arg_type, THUNK_TARGET);
4982             }
4983             unlock_user(argptr, target_ifc_buf, target_ifc_len);
4984         }
4985     }
4986 
4987     if (free_buf) {
4988         free(host_ifconf);
4989     }
4990 
4991     return ret;
4992 }
4993 
4994 #if defined(CONFIG_USBFS)
4995 #if HOST_LONG_BITS > 64
4996 #error USBDEVFS thunks do not support >64 bit hosts yet.
4997 #endif
4998 struct live_urb {
4999     uint64_t target_urb_adr;
5000     uint64_t target_buf_adr;
5001     char *target_buf_ptr;
5002     struct usbdevfs_urb host_urb;
5003 };
5004 
5005 static GHashTable *usbdevfs_urb_hashtable(void)
5006 {
5007     static GHashTable *urb_hashtable;
5008 
5009     if (!urb_hashtable) {
5010         urb_hashtable = g_hash_table_new(g_int64_hash, g_int64_equal);
5011     }
5012     return urb_hashtable;
5013 }
5014 
5015 static void urb_hashtable_insert(struct live_urb *urb)
5016 {
5017     GHashTable *urb_hashtable = usbdevfs_urb_hashtable();
5018     g_hash_table_insert(urb_hashtable, urb, urb);
5019 }
5020 
5021 static struct live_urb *urb_hashtable_lookup(uint64_t target_urb_adr)
5022 {
5023     GHashTable *urb_hashtable = usbdevfs_urb_hashtable();
5024     return g_hash_table_lookup(urb_hashtable, &target_urb_adr);
5025 }
5026 
5027 static void urb_hashtable_remove(struct live_urb *urb)
5028 {
5029     GHashTable *urb_hashtable = usbdevfs_urb_hashtable();
5030     g_hash_table_remove(urb_hashtable, urb);
5031 }
5032 
5033 static abi_long
5034 do_ioctl_usbdevfs_reapurb(const IOCTLEntry *ie, uint8_t *buf_temp,
5035                           int fd, int cmd, abi_long arg)
5036 {
5037     const argtype usbfsurb_arg_type[] = { MK_STRUCT(STRUCT_usbdevfs_urb) };
5038     const argtype ptrvoid_arg_type[] = { TYPE_PTRVOID, 0, 0 };
5039     struct live_urb *lurb;
5040     void *argptr;
5041     uint64_t hurb;
5042     int target_size;
5043     uintptr_t target_urb_adr;
5044     abi_long ret;
5045 
5046     target_size = thunk_type_size(usbfsurb_arg_type, THUNK_TARGET);
5047 
5048     memset(buf_temp, 0, sizeof(uint64_t));
5049     ret = get_errno(safe_ioctl(fd, ie->host_cmd, buf_temp));
5050     if (is_error(ret)) {
5051         return ret;
5052     }
5053 
5054     memcpy(&hurb, buf_temp, sizeof(uint64_t));
5055     lurb = (void *)((uintptr_t)hurb - offsetof(struct live_urb, host_urb));
5056     if (!lurb->target_urb_adr) {
5057         return -TARGET_EFAULT;
5058     }
5059     urb_hashtable_remove(lurb);
5060     unlock_user(lurb->target_buf_ptr, lurb->target_buf_adr,
5061         lurb->host_urb.buffer_length);
5062     lurb->target_buf_ptr = NULL;
5063 
5064     /* restore the guest buffer pointer */
5065     lurb->host_urb.buffer = (void *)(uintptr_t)lurb->target_buf_adr;
5066 
5067     /* update the guest urb struct */
5068     argptr = lock_user(VERIFY_WRITE, lurb->target_urb_adr, target_size, 0);
5069     if (!argptr) {
5070         g_free(lurb);
5071         return -TARGET_EFAULT;
5072     }
5073     thunk_convert(argptr, &lurb->host_urb, usbfsurb_arg_type, THUNK_TARGET);
5074     unlock_user(argptr, lurb->target_urb_adr, target_size);
5075 
5076     target_size = thunk_type_size(ptrvoid_arg_type, THUNK_TARGET);
5077     /* write back the urb handle */
5078     argptr = lock_user(VERIFY_WRITE, arg, target_size, 0);
5079     if (!argptr) {
5080         g_free(lurb);
5081         return -TARGET_EFAULT;
5082     }
5083 
5084     /* GHashTable uses 64-bit keys but thunk_convert expects uintptr_t */
5085     target_urb_adr = lurb->target_urb_adr;
5086     thunk_convert(argptr, &target_urb_adr, ptrvoid_arg_type, THUNK_TARGET);
5087     unlock_user(argptr, arg, target_size);
5088 
5089     g_free(lurb);
5090     return ret;
5091 }
5092 
5093 static abi_long
5094 do_ioctl_usbdevfs_discardurb(const IOCTLEntry *ie,
5095                              uint8_t *buf_temp __attribute__((unused)),
5096                              int fd, int cmd, abi_long arg)
5097 {
5098     struct live_urb *lurb;
5099 
5100     /* map target address back to host URB with metadata. */
5101     lurb = urb_hashtable_lookup(arg);
5102     if (!lurb) {
5103         return -TARGET_EFAULT;
5104     }
5105     return get_errno(safe_ioctl(fd, ie->host_cmd, &lurb->host_urb));
5106 }
5107 
5108 static abi_long
5109 do_ioctl_usbdevfs_submiturb(const IOCTLEntry *ie, uint8_t *buf_temp,
5110                             int fd, int cmd, abi_long arg)
5111 {
5112     const argtype *arg_type = ie->arg_type;
5113     int target_size;
5114     abi_long ret;
5115     void *argptr;
5116     int rw_dir;
5117     struct live_urb *lurb;
5118 
5119     /*
5120      * each submitted URB needs to map to a unique ID for the
5121      * kernel, and that unique ID needs to be a pointer to
5122      * host memory.  hence, we need to malloc for each URB.
5123      * isochronous transfers have a variable length struct.
5124      */
5125     arg_type++;
5126     target_size = thunk_type_size(arg_type, THUNK_TARGET);
5127 
5128     /* construct host copy of urb and metadata */
5129     lurb = g_try_malloc0(sizeof(struct live_urb));
5130     if (!lurb) {
5131         return -TARGET_ENOMEM;
5132     }
5133 
5134     argptr = lock_user(VERIFY_READ, arg, target_size, 1);
5135     if (!argptr) {
5136         g_free(lurb);
5137         return -TARGET_EFAULT;
5138     }
5139     thunk_convert(&lurb->host_urb, argptr, arg_type, THUNK_HOST);
5140     unlock_user(argptr, arg, 0);
5141 
5142     lurb->target_urb_adr = arg;
5143     lurb->target_buf_adr = (uintptr_t)lurb->host_urb.buffer;
5144 
5145     /* buffer space used depends on endpoint type so lock the entire buffer */
5146     /* control type urbs should check the buffer contents for true direction */
5147     rw_dir = lurb->host_urb.endpoint & USB_DIR_IN ? VERIFY_WRITE : VERIFY_READ;
5148     lurb->target_buf_ptr = lock_user(rw_dir, lurb->target_buf_adr,
5149         lurb->host_urb.buffer_length, 1);
5150     if (lurb->target_buf_ptr == NULL) {
5151         g_free(lurb);
5152         return -TARGET_EFAULT;
5153     }
5154 
5155     /* update buffer pointer in host copy */
5156     lurb->host_urb.buffer = lurb->target_buf_ptr;
5157 
5158     ret = get_errno(safe_ioctl(fd, ie->host_cmd, &lurb->host_urb));
5159     if (is_error(ret)) {
5160         unlock_user(lurb->target_buf_ptr, lurb->target_buf_adr, 0);
5161         g_free(lurb);
5162     } else {
5163         urb_hashtable_insert(lurb);
5164     }
5165 
5166     return ret;
5167 }
5168 #endif /* CONFIG_USBFS */
5169 
5170 static abi_long do_ioctl_dm(const IOCTLEntry *ie, uint8_t *buf_temp, int fd,
5171                             int cmd, abi_long arg)
5172 {
5173     void *argptr;
5174     struct dm_ioctl *host_dm;
5175     abi_long guest_data;
5176     uint32_t guest_data_size;
5177     int target_size;
5178     const argtype *arg_type = ie->arg_type;
5179     abi_long ret;
5180     void *big_buf = NULL;
5181     char *host_data;
5182 
5183     arg_type++;
5184     target_size = thunk_type_size(arg_type, 0);
5185     argptr = lock_user(VERIFY_READ, arg, target_size, 1);
5186     if (!argptr) {
5187         ret = -TARGET_EFAULT;
5188         goto out;
5189     }
5190     thunk_convert(buf_temp, argptr, arg_type, THUNK_HOST);
5191     unlock_user(argptr, arg, 0);
5192 
5193     /* buf_temp is too small, so fetch things into a bigger buffer */
5194     big_buf = g_malloc0(((struct dm_ioctl*)buf_temp)->data_size * 2);
5195     memcpy(big_buf, buf_temp, target_size);
5196     buf_temp = big_buf;
5197     host_dm = big_buf;
5198 
5199     guest_data = arg + host_dm->data_start;
5200     if ((guest_data - arg) < 0) {
5201         ret = -TARGET_EINVAL;
5202         goto out;
5203     }
5204     guest_data_size = host_dm->data_size - host_dm->data_start;
5205     host_data = (char*)host_dm + host_dm->data_start;
5206 
5207     argptr = lock_user(VERIFY_READ, guest_data, guest_data_size, 1);
5208     if (!argptr) {
5209         ret = -TARGET_EFAULT;
5210         goto out;
5211     }
5212 
5213     switch (ie->host_cmd) {
5214     case DM_REMOVE_ALL:
5215     case DM_LIST_DEVICES:
5216     case DM_DEV_CREATE:
5217     case DM_DEV_REMOVE:
5218     case DM_DEV_SUSPEND:
5219     case DM_DEV_STATUS:
5220     case DM_DEV_WAIT:
5221     case DM_TABLE_STATUS:
5222     case DM_TABLE_CLEAR:
5223     case DM_TABLE_DEPS:
5224     case DM_LIST_VERSIONS:
5225         /* no input data */
5226         break;
5227     case DM_DEV_RENAME:
5228     case DM_DEV_SET_GEOMETRY:
5229         /* data contains only strings */
5230         memcpy(host_data, argptr, guest_data_size);
5231         break;
5232     case DM_TARGET_MSG:
5233         memcpy(host_data, argptr, guest_data_size);
5234         *(uint64_t*)host_data = tswap64(*(uint64_t*)argptr);
5235         break;
5236     case DM_TABLE_LOAD:
5237     {
5238         void *gspec = argptr;
5239         void *cur_data = host_data;
5240         const argtype arg_type[] = { MK_STRUCT(STRUCT_dm_target_spec) };
5241         int spec_size = thunk_type_size(arg_type, 0);
5242         int i;
5243 
5244         for (i = 0; i < host_dm->target_count; i++) {
5245             struct dm_target_spec *spec = cur_data;
5246             uint32_t next;
5247             int slen;
5248 
5249             thunk_convert(spec, gspec, arg_type, THUNK_HOST);
5250             slen = strlen((char*)gspec + spec_size) + 1;
5251             next = spec->next;
5252             spec->next = sizeof(*spec) + slen;
5253             strcpy((char*)&spec[1], gspec + spec_size);
5254             gspec += next;
5255             cur_data += spec->next;
5256         }
5257         break;
5258     }
5259     default:
5260         ret = -TARGET_EINVAL;
5261         unlock_user(argptr, guest_data, 0);
5262         goto out;
5263     }
5264     unlock_user(argptr, guest_data, 0);
5265 
5266     ret = get_errno(safe_ioctl(fd, ie->host_cmd, buf_temp));
5267     if (!is_error(ret)) {
5268         guest_data = arg + host_dm->data_start;
5269         guest_data_size = host_dm->data_size - host_dm->data_start;
5270         argptr = lock_user(VERIFY_WRITE, guest_data, guest_data_size, 0);
5271         switch (ie->host_cmd) {
5272         case DM_REMOVE_ALL:
5273         case DM_DEV_CREATE:
5274         case DM_DEV_REMOVE:
5275         case DM_DEV_RENAME:
5276         case DM_DEV_SUSPEND:
5277         case DM_DEV_STATUS:
5278         case DM_TABLE_LOAD:
5279         case DM_TABLE_CLEAR:
5280         case DM_TARGET_MSG:
5281         case DM_DEV_SET_GEOMETRY:
5282             /* no return data */
5283             break;
5284         case DM_LIST_DEVICES:
5285         {
5286             struct dm_name_list *nl = (void*)host_dm + host_dm->data_start;
5287             uint32_t remaining_data = guest_data_size;
5288             void *cur_data = argptr;
5289             const argtype arg_type[] = { MK_STRUCT(STRUCT_dm_name_list) };
5290             int nl_size = 12; /* can't use thunk_size due to alignment */
5291 
5292             while (1) {
5293                 uint32_t next = nl->next;
5294                 if (next) {
5295                     nl->next = nl_size + (strlen(nl->name) + 1);
5296                 }
5297                 if (remaining_data < nl->next) {
5298                     host_dm->flags |= DM_BUFFER_FULL_FLAG;
5299                     break;
5300                 }
5301                 thunk_convert(cur_data, nl, arg_type, THUNK_TARGET);
5302                 strcpy(cur_data + nl_size, nl->name);
5303                 cur_data += nl->next;
5304                 remaining_data -= nl->next;
5305                 if (!next) {
5306                     break;
5307                 }
5308                 nl = (void*)nl + next;
5309             }
5310             break;
5311         }
5312         case DM_DEV_WAIT:
5313         case DM_TABLE_STATUS:
5314         {
5315             struct dm_target_spec *spec = (void*)host_dm + host_dm->data_start;
5316             void *cur_data = argptr;
5317             const argtype arg_type[] = { MK_STRUCT(STRUCT_dm_target_spec) };
5318             int spec_size = thunk_type_size(arg_type, 0);
5319             int i;
5320 
5321             for (i = 0; i < host_dm->target_count; i++) {
5322                 uint32_t next = spec->next;
5323                 int slen = strlen((char*)&spec[1]) + 1;
5324                 spec->next = (cur_data - argptr) + spec_size + slen;
5325                 if (guest_data_size < spec->next) {
5326                     host_dm->flags |= DM_BUFFER_FULL_FLAG;
5327                     break;
5328                 }
5329                 thunk_convert(cur_data, spec, arg_type, THUNK_TARGET);
5330                 strcpy(cur_data + spec_size, (char*)&spec[1]);
5331                 cur_data = argptr + spec->next;
5332                 spec = (void*)host_dm + host_dm->data_start + next;
5333             }
5334             break;
5335         }
5336         case DM_TABLE_DEPS:
5337         {
5338             void *hdata = (void*)host_dm + host_dm->data_start;
5339             int count = *(uint32_t*)hdata;
5340             uint64_t *hdev = hdata + 8;
5341             uint64_t *gdev = argptr + 8;
5342             int i;
5343 
5344             *(uint32_t*)argptr = tswap32(count);
5345             for (i = 0; i < count; i++) {
5346                 *gdev = tswap64(*hdev);
5347                 gdev++;
5348                 hdev++;
5349             }
5350             break;
5351         }
5352         case DM_LIST_VERSIONS:
5353         {
5354             struct dm_target_versions *vers = (void*)host_dm + host_dm->data_start;
5355             uint32_t remaining_data = guest_data_size;
5356             void *cur_data = argptr;
5357             const argtype arg_type[] = { MK_STRUCT(STRUCT_dm_target_versions) };
5358             int vers_size = thunk_type_size(arg_type, 0);
5359 
5360             while (1) {
5361                 uint32_t next = vers->next;
5362                 if (next) {
5363                     vers->next = vers_size + (strlen(vers->name) + 1);
5364                 }
5365                 if (remaining_data < vers->next) {
5366                     host_dm->flags |= DM_BUFFER_FULL_FLAG;
5367                     break;
5368                 }
5369                 thunk_convert(cur_data, vers, arg_type, THUNK_TARGET);
5370                 strcpy(cur_data + vers_size, vers->name);
5371                 cur_data += vers->next;
5372                 remaining_data -= vers->next;
5373                 if (!next) {
5374                     break;
5375                 }
5376                 vers = (void*)vers + next;
5377             }
5378             break;
5379         }
5380         default:
5381             unlock_user(argptr, guest_data, 0);
5382             ret = -TARGET_EINVAL;
5383             goto out;
5384         }
5385         unlock_user(argptr, guest_data, guest_data_size);
5386 
5387         argptr = lock_user(VERIFY_WRITE, arg, target_size, 0);
5388         if (!argptr) {
5389             ret = -TARGET_EFAULT;
5390             goto out;
5391         }
5392         thunk_convert(argptr, buf_temp, arg_type, THUNK_TARGET);
5393         unlock_user(argptr, arg, target_size);
5394     }
5395 out:
5396     g_free(big_buf);
5397     return ret;
5398 }
5399 
5400 static abi_long do_ioctl_blkpg(const IOCTLEntry *ie, uint8_t *buf_temp, int fd,
5401                                int cmd, abi_long arg)
5402 {
5403     void *argptr;
5404     int target_size;
5405     const argtype *arg_type = ie->arg_type;
5406     const argtype part_arg_type[] = { MK_STRUCT(STRUCT_blkpg_partition) };
5407     abi_long ret;
5408 
5409     struct blkpg_ioctl_arg *host_blkpg = (void*)buf_temp;
5410     struct blkpg_partition host_part;
5411 
5412     /* Read and convert blkpg */
5413     arg_type++;
5414     target_size = thunk_type_size(arg_type, 0);
5415     argptr = lock_user(VERIFY_READ, arg, target_size, 1);
5416     if (!argptr) {
5417         ret = -TARGET_EFAULT;
5418         goto out;
5419     }
5420     thunk_convert(buf_temp, argptr, arg_type, THUNK_HOST);
5421     unlock_user(argptr, arg, 0);
5422 
5423     switch (host_blkpg->op) {
5424     case BLKPG_ADD_PARTITION:
5425     case BLKPG_DEL_PARTITION:
5426         /* payload is struct blkpg_partition */
5427         break;
5428     default:
5429         /* Unknown opcode */
5430         ret = -TARGET_EINVAL;
5431         goto out;
5432     }
5433 
5434     /* Read and convert blkpg->data */
5435     arg = (abi_long)(uintptr_t)host_blkpg->data;
5436     target_size = thunk_type_size(part_arg_type, 0);
5437     argptr = lock_user(VERIFY_READ, arg, target_size, 1);
5438     if (!argptr) {
5439         ret = -TARGET_EFAULT;
5440         goto out;
5441     }
5442     thunk_convert(&host_part, argptr, part_arg_type, THUNK_HOST);
5443     unlock_user(argptr, arg, 0);
5444 
5445     /* Swizzle the data pointer to our local copy and call! */
5446     host_blkpg->data = &host_part;
5447     ret = get_errno(safe_ioctl(fd, ie->host_cmd, host_blkpg));
5448 
5449 out:
5450     return ret;
5451 }
5452 
5453 static abi_long do_ioctl_rt(const IOCTLEntry *ie, uint8_t *buf_temp,
5454                                 int fd, int cmd, abi_long arg)
5455 {
5456     const argtype *arg_type = ie->arg_type;
5457     const StructEntry *se;
5458     const argtype *field_types;
5459     const int *dst_offsets, *src_offsets;
5460     int target_size;
5461     void *argptr;
5462     abi_ulong *target_rt_dev_ptr = NULL;
5463     unsigned long *host_rt_dev_ptr = NULL;
5464     abi_long ret;
5465     int i;
5466 
5467     assert(ie->access == IOC_W);
5468     assert(*arg_type == TYPE_PTR);
5469     arg_type++;
5470     assert(*arg_type == TYPE_STRUCT);
5471     target_size = thunk_type_size(arg_type, 0);
5472     argptr = lock_user(VERIFY_READ, arg, target_size, 1);
5473     if (!argptr) {
5474         return -TARGET_EFAULT;
5475     }
5476     arg_type++;
5477     assert(*arg_type == (int)STRUCT_rtentry);
5478     se = struct_entries + *arg_type++;
5479     assert(se->convert[0] == NULL);
5480     /* convert struct here to be able to catch rt_dev string */
5481     field_types = se->field_types;
5482     dst_offsets = se->field_offsets[THUNK_HOST];
5483     src_offsets = se->field_offsets[THUNK_TARGET];
5484     for (i = 0; i < se->nb_fields; i++) {
5485         if (dst_offsets[i] == offsetof(struct rtentry, rt_dev)) {
5486             assert(*field_types == TYPE_PTRVOID);
5487             target_rt_dev_ptr = (abi_ulong *)(argptr + src_offsets[i]);
5488             host_rt_dev_ptr = (unsigned long *)(buf_temp + dst_offsets[i]);
5489             if (*target_rt_dev_ptr != 0) {
5490                 *host_rt_dev_ptr = (unsigned long)lock_user_string(
5491                                                   tswapal(*target_rt_dev_ptr));
5492                 if (!*host_rt_dev_ptr) {
5493                     unlock_user(argptr, arg, 0);
5494                     return -TARGET_EFAULT;
5495                 }
5496             } else {
5497                 *host_rt_dev_ptr = 0;
5498             }
5499             field_types++;
5500             continue;
5501         }
5502         field_types = thunk_convert(buf_temp + dst_offsets[i],
5503                                     argptr + src_offsets[i],
5504                                     field_types, THUNK_HOST);
5505     }
5506     unlock_user(argptr, arg, 0);
5507 
5508     ret = get_errno(safe_ioctl(fd, ie->host_cmd, buf_temp));
5509 
5510     assert(host_rt_dev_ptr != NULL);
5511     assert(target_rt_dev_ptr != NULL);
5512     if (*host_rt_dev_ptr != 0) {
5513         unlock_user((void *)*host_rt_dev_ptr,
5514                     *target_rt_dev_ptr, 0);
5515     }
5516     return ret;
5517 }
5518 
5519 static abi_long do_ioctl_kdsigaccept(const IOCTLEntry *ie, uint8_t *buf_temp,
5520                                      int fd, int cmd, abi_long arg)
5521 {
5522     int sig = target_to_host_signal(arg);
5523     return get_errno(safe_ioctl(fd, ie->host_cmd, sig));
5524 }
5525 
5526 static abi_long do_ioctl_SIOCGSTAMP(const IOCTLEntry *ie, uint8_t *buf_temp,
5527                                     int fd, int cmd, abi_long arg)
5528 {
5529     struct timeval tv;
5530     abi_long ret;
5531 
5532     ret = get_errno(safe_ioctl(fd, SIOCGSTAMP, &tv));
5533     if (is_error(ret)) {
5534         return ret;
5535     }
5536 
5537     if (cmd == (int)TARGET_SIOCGSTAMP_OLD) {
5538         if (copy_to_user_timeval(arg, &tv)) {
5539             return -TARGET_EFAULT;
5540         }
5541     } else {
5542         if (copy_to_user_timeval64(arg, &tv)) {
5543             return -TARGET_EFAULT;
5544         }
5545     }
5546 
5547     return ret;
5548 }
5549 
5550 static abi_long do_ioctl_SIOCGSTAMPNS(const IOCTLEntry *ie, uint8_t *buf_temp,
5551                                       int fd, int cmd, abi_long arg)
5552 {
5553     struct timespec ts;
5554     abi_long ret;
5555 
5556     ret = get_errno(safe_ioctl(fd, SIOCGSTAMPNS, &ts));
5557     if (is_error(ret)) {
5558         return ret;
5559     }
5560 
5561     if (cmd == (int)TARGET_SIOCGSTAMPNS_OLD) {
5562         if (host_to_target_timespec(arg, &ts)) {
5563             return -TARGET_EFAULT;
5564         }
5565     } else{
5566         if (host_to_target_timespec64(arg, &ts)) {
5567             return -TARGET_EFAULT;
5568         }
5569     }
5570 
5571     return ret;
5572 }
5573 
5574 #ifdef TIOCGPTPEER
5575 static abi_long do_ioctl_tiocgptpeer(const IOCTLEntry *ie, uint8_t *buf_temp,
5576                                      int fd, int cmd, abi_long arg)
5577 {
5578     int flags = target_to_host_bitmask(arg, fcntl_flags_tbl);
5579     return get_errno(safe_ioctl(fd, ie->host_cmd, flags));
5580 }
5581 #endif
5582 
5583 #ifdef HAVE_DRM_H
5584 
5585 static void unlock_drm_version(struct drm_version *host_ver,
5586                                struct target_drm_version *target_ver,
5587                                bool copy)
5588 {
5589     unlock_user(host_ver->name, target_ver->name,
5590                                 copy ? host_ver->name_len : 0);
5591     unlock_user(host_ver->date, target_ver->date,
5592                                 copy ? host_ver->date_len : 0);
5593     unlock_user(host_ver->desc, target_ver->desc,
5594                                 copy ? host_ver->desc_len : 0);
5595 }
5596 
5597 static inline abi_long target_to_host_drmversion(struct drm_version *host_ver,
5598                                           struct target_drm_version *target_ver)
5599 {
5600     memset(host_ver, 0, sizeof(*host_ver));
5601 
5602     __get_user(host_ver->name_len, &target_ver->name_len);
5603     if (host_ver->name_len) {
5604         host_ver->name = lock_user(VERIFY_WRITE, target_ver->name,
5605                                    target_ver->name_len, 0);
5606         if (!host_ver->name) {
5607             return -EFAULT;
5608         }
5609     }
5610 
5611     __get_user(host_ver->date_len, &target_ver->date_len);
5612     if (host_ver->date_len) {
5613         host_ver->date = lock_user(VERIFY_WRITE, target_ver->date,
5614                                    target_ver->date_len, 0);
5615         if (!host_ver->date) {
5616             goto err;
5617         }
5618     }
5619 
5620     __get_user(host_ver->desc_len, &target_ver->desc_len);
5621     if (host_ver->desc_len) {
5622         host_ver->desc = lock_user(VERIFY_WRITE, target_ver->desc,
5623                                    target_ver->desc_len, 0);
5624         if (!host_ver->desc) {
5625             goto err;
5626         }
5627     }
5628 
5629     return 0;
5630 err:
5631     unlock_drm_version(host_ver, target_ver, false);
5632     return -EFAULT;
5633 }
5634 
5635 static inline void host_to_target_drmversion(
5636                                           struct target_drm_version *target_ver,
5637                                           struct drm_version *host_ver)
5638 {
5639     __put_user(host_ver->version_major, &target_ver->version_major);
5640     __put_user(host_ver->version_minor, &target_ver->version_minor);
5641     __put_user(host_ver->version_patchlevel, &target_ver->version_patchlevel);
5642     __put_user(host_ver->name_len, &target_ver->name_len);
5643     __put_user(host_ver->date_len, &target_ver->date_len);
5644     __put_user(host_ver->desc_len, &target_ver->desc_len);
5645     unlock_drm_version(host_ver, target_ver, true);
5646 }
5647 
5648 static abi_long do_ioctl_drm(const IOCTLEntry *ie, uint8_t *buf_temp,
5649                              int fd, int cmd, abi_long arg)
5650 {
5651     struct drm_version *ver;
5652     struct target_drm_version *target_ver;
5653     abi_long ret;
5654 
5655     switch (ie->host_cmd) {
5656     case DRM_IOCTL_VERSION:
5657         if (!lock_user_struct(VERIFY_WRITE, target_ver, arg, 0)) {
5658             return -TARGET_EFAULT;
5659         }
5660         ver = (struct drm_version *)buf_temp;
5661         ret = target_to_host_drmversion(ver, target_ver);
5662         if (!is_error(ret)) {
5663             ret = get_errno(safe_ioctl(fd, ie->host_cmd, ver));
5664             if (is_error(ret)) {
5665                 unlock_drm_version(ver, target_ver, false);
5666             } else {
5667                 host_to_target_drmversion(target_ver, ver);
5668             }
5669         }
5670         unlock_user_struct(target_ver, arg, 0);
5671         return ret;
5672     }
5673     return -TARGET_ENOSYS;
5674 }
5675 
5676 static abi_long do_ioctl_drm_i915_getparam(const IOCTLEntry *ie,
5677                                            struct drm_i915_getparam *gparam,
5678                                            int fd, abi_long arg)
5679 {
5680     abi_long ret;
5681     int value;
5682     struct target_drm_i915_getparam *target_gparam;
5683 
5684     if (!lock_user_struct(VERIFY_READ, target_gparam, arg, 0)) {
5685         return -TARGET_EFAULT;
5686     }
5687 
5688     __get_user(gparam->param, &target_gparam->param);
5689     gparam->value = &value;
5690     ret = get_errno(safe_ioctl(fd, ie->host_cmd, gparam));
5691     put_user_s32(value, target_gparam->value);
5692 
5693     unlock_user_struct(target_gparam, arg, 0);
5694     return ret;
5695 }
5696 
5697 static abi_long do_ioctl_drm_i915(const IOCTLEntry *ie, uint8_t *buf_temp,
5698                                   int fd, int cmd, abi_long arg)
5699 {
5700     switch (ie->host_cmd) {
5701     case DRM_IOCTL_I915_GETPARAM:
5702         return do_ioctl_drm_i915_getparam(ie,
5703                                           (struct drm_i915_getparam *)buf_temp,
5704                                           fd, arg);
5705     default:
5706         return -TARGET_ENOSYS;
5707     }
5708 }
5709 
5710 #endif
5711 
5712 IOCTLEntry ioctl_entries[] = {
5713 #define IOCTL(cmd, access, ...) \
5714     { TARGET_ ## cmd, cmd, #cmd, access, 0, {  __VA_ARGS__ } },
5715 #define IOCTL_SPECIAL(cmd, access, dofn, ...)                      \
5716     { TARGET_ ## cmd, cmd, #cmd, access, dofn, {  __VA_ARGS__ } },
5717 #define IOCTL_IGNORE(cmd) \
5718     { TARGET_ ## cmd, 0, #cmd },
5719 #include "ioctls.h"
5720     { 0, 0, },
5721 };
5722 
5723 /* ??? Implement proper locking for ioctls.  */
5724 /* do_ioctl() Must return target values and target errnos. */
5725 static abi_long do_ioctl(int fd, int cmd, abi_long arg)
5726 {
5727     const IOCTLEntry *ie;
5728     const argtype *arg_type;
5729     abi_long ret;
5730     uint8_t buf_temp[MAX_STRUCT_SIZE];
5731     int target_size;
5732     void *argptr;
5733 
5734     ie = ioctl_entries;
5735     for(;;) {
5736         if (ie->target_cmd == 0) {
5737             qemu_log_mask(
5738                 LOG_UNIMP, "Unsupported ioctl: cmd=0x%04lx\n", (long)cmd);
5739             return -TARGET_ENOSYS;
5740         }
5741         if (ie->target_cmd == cmd)
5742             break;
5743         ie++;
5744     }
5745     arg_type = ie->arg_type;
5746     if (ie->do_ioctl) {
5747         return ie->do_ioctl(ie, buf_temp, fd, cmd, arg);
5748     } else if (!ie->host_cmd) {
5749         /* Some architectures define BSD ioctls in their headers
5750            that are not implemented in Linux.  */
5751         return -TARGET_ENOSYS;
5752     }
5753 
5754     switch(arg_type[0]) {
5755     case TYPE_NULL:
5756         /* no argument */
5757         ret = get_errno(safe_ioctl(fd, ie->host_cmd));
5758         break;
5759     case TYPE_PTRVOID:
5760     case TYPE_INT:
5761     case TYPE_LONG:
5762     case TYPE_ULONG:
5763         ret = get_errno(safe_ioctl(fd, ie->host_cmd, arg));
5764         break;
5765     case TYPE_PTR:
5766         arg_type++;
5767         target_size = thunk_type_size(arg_type, 0);
5768         switch(ie->access) {
5769         case IOC_R:
5770             ret = get_errno(safe_ioctl(fd, ie->host_cmd, buf_temp));
5771             if (!is_error(ret)) {
5772                 argptr = lock_user(VERIFY_WRITE, arg, target_size, 0);
5773                 if (!argptr)
5774                     return -TARGET_EFAULT;
5775                 thunk_convert(argptr, buf_temp, arg_type, THUNK_TARGET);
5776                 unlock_user(argptr, arg, target_size);
5777             }
5778             break;
5779         case IOC_W:
5780             argptr = lock_user(VERIFY_READ, arg, target_size, 1);
5781             if (!argptr)
5782                 return -TARGET_EFAULT;
5783             thunk_convert(buf_temp, argptr, arg_type, THUNK_HOST);
5784             unlock_user(argptr, arg, 0);
5785             ret = get_errno(safe_ioctl(fd, ie->host_cmd, buf_temp));
5786             break;
5787         default:
5788         case IOC_RW:
5789             argptr = lock_user(VERIFY_READ, arg, target_size, 1);
5790             if (!argptr)
5791                 return -TARGET_EFAULT;
5792             thunk_convert(buf_temp, argptr, arg_type, THUNK_HOST);
5793             unlock_user(argptr, arg, 0);
5794             ret = get_errno(safe_ioctl(fd, ie->host_cmd, buf_temp));
5795             if (!is_error(ret)) {
5796                 argptr = lock_user(VERIFY_WRITE, arg, target_size, 0);
5797                 if (!argptr)
5798                     return -TARGET_EFAULT;
5799                 thunk_convert(argptr, buf_temp, arg_type, THUNK_TARGET);
5800                 unlock_user(argptr, arg, target_size);
5801             }
5802             break;
5803         }
5804         break;
5805     default:
5806         qemu_log_mask(LOG_UNIMP,
5807                       "Unsupported ioctl type: cmd=0x%04lx type=%d\n",
5808                       (long)cmd, arg_type[0]);
5809         ret = -TARGET_ENOSYS;
5810         break;
5811     }
5812     return ret;
5813 }
5814 
5815 static const bitmask_transtbl iflag_tbl[] = {
5816         { TARGET_IGNBRK, TARGET_IGNBRK, IGNBRK, IGNBRK },
5817         { TARGET_BRKINT, TARGET_BRKINT, BRKINT, BRKINT },
5818         { TARGET_IGNPAR, TARGET_IGNPAR, IGNPAR, IGNPAR },
5819         { TARGET_PARMRK, TARGET_PARMRK, PARMRK, PARMRK },
5820         { TARGET_INPCK, TARGET_INPCK, INPCK, INPCK },
5821         { TARGET_ISTRIP, TARGET_ISTRIP, ISTRIP, ISTRIP },
5822         { TARGET_INLCR, TARGET_INLCR, INLCR, INLCR },
5823         { TARGET_IGNCR, TARGET_IGNCR, IGNCR, IGNCR },
5824         { TARGET_ICRNL, TARGET_ICRNL, ICRNL, ICRNL },
5825         { TARGET_IUCLC, TARGET_IUCLC, IUCLC, IUCLC },
5826         { TARGET_IXON, TARGET_IXON, IXON, IXON },
5827         { TARGET_IXANY, TARGET_IXANY, IXANY, IXANY },
5828         { TARGET_IXOFF, TARGET_IXOFF, IXOFF, IXOFF },
5829         { TARGET_IMAXBEL, TARGET_IMAXBEL, IMAXBEL, IMAXBEL },
5830         { TARGET_IUTF8, TARGET_IUTF8, IUTF8, IUTF8},
5831         { 0, 0, 0, 0 }
5832 };
5833 
5834 static const bitmask_transtbl oflag_tbl[] = {
5835 	{ TARGET_OPOST, TARGET_OPOST, OPOST, OPOST },
5836 	{ TARGET_OLCUC, TARGET_OLCUC, OLCUC, OLCUC },
5837 	{ TARGET_ONLCR, TARGET_ONLCR, ONLCR, ONLCR },
5838 	{ TARGET_OCRNL, TARGET_OCRNL, OCRNL, OCRNL },
5839 	{ TARGET_ONOCR, TARGET_ONOCR, ONOCR, ONOCR },
5840 	{ TARGET_ONLRET, TARGET_ONLRET, ONLRET, ONLRET },
5841 	{ TARGET_OFILL, TARGET_OFILL, OFILL, OFILL },
5842 	{ TARGET_OFDEL, TARGET_OFDEL, OFDEL, OFDEL },
5843 	{ TARGET_NLDLY, TARGET_NL0, NLDLY, NL0 },
5844 	{ TARGET_NLDLY, TARGET_NL1, NLDLY, NL1 },
5845 	{ TARGET_CRDLY, TARGET_CR0, CRDLY, CR0 },
5846 	{ TARGET_CRDLY, TARGET_CR1, CRDLY, CR1 },
5847 	{ TARGET_CRDLY, TARGET_CR2, CRDLY, CR2 },
5848 	{ TARGET_CRDLY, TARGET_CR3, CRDLY, CR3 },
5849 	{ TARGET_TABDLY, TARGET_TAB0, TABDLY, TAB0 },
5850 	{ TARGET_TABDLY, TARGET_TAB1, TABDLY, TAB1 },
5851 	{ TARGET_TABDLY, TARGET_TAB2, TABDLY, TAB2 },
5852 	{ TARGET_TABDLY, TARGET_TAB3, TABDLY, TAB3 },
5853 	{ TARGET_BSDLY, TARGET_BS0, BSDLY, BS0 },
5854 	{ TARGET_BSDLY, TARGET_BS1, BSDLY, BS1 },
5855 	{ TARGET_VTDLY, TARGET_VT0, VTDLY, VT0 },
5856 	{ TARGET_VTDLY, TARGET_VT1, VTDLY, VT1 },
5857 	{ TARGET_FFDLY, TARGET_FF0, FFDLY, FF0 },
5858 	{ TARGET_FFDLY, TARGET_FF1, FFDLY, FF1 },
5859 	{ 0, 0, 0, 0 }
5860 };
5861 
5862 static const bitmask_transtbl cflag_tbl[] = {
5863 	{ TARGET_CBAUD, TARGET_B0, CBAUD, B0 },
5864 	{ TARGET_CBAUD, TARGET_B50, CBAUD, B50 },
5865 	{ TARGET_CBAUD, TARGET_B75, CBAUD, B75 },
5866 	{ TARGET_CBAUD, TARGET_B110, CBAUD, B110 },
5867 	{ TARGET_CBAUD, TARGET_B134, CBAUD, B134 },
5868 	{ TARGET_CBAUD, TARGET_B150, CBAUD, B150 },
5869 	{ TARGET_CBAUD, TARGET_B200, CBAUD, B200 },
5870 	{ TARGET_CBAUD, TARGET_B300, CBAUD, B300 },
5871 	{ TARGET_CBAUD, TARGET_B600, CBAUD, B600 },
5872 	{ TARGET_CBAUD, TARGET_B1200, CBAUD, B1200 },
5873 	{ TARGET_CBAUD, TARGET_B1800, CBAUD, B1800 },
5874 	{ TARGET_CBAUD, TARGET_B2400, CBAUD, B2400 },
5875 	{ TARGET_CBAUD, TARGET_B4800, CBAUD, B4800 },
5876 	{ TARGET_CBAUD, TARGET_B9600, CBAUD, B9600 },
5877 	{ TARGET_CBAUD, TARGET_B19200, CBAUD, B19200 },
5878 	{ TARGET_CBAUD, TARGET_B38400, CBAUD, B38400 },
5879 	{ TARGET_CBAUD, TARGET_B57600, CBAUD, B57600 },
5880 	{ TARGET_CBAUD, TARGET_B115200, CBAUD, B115200 },
5881 	{ TARGET_CBAUD, TARGET_B230400, CBAUD, B230400 },
5882 	{ TARGET_CBAUD, TARGET_B460800, CBAUD, B460800 },
5883 	{ TARGET_CSIZE, TARGET_CS5, CSIZE, CS5 },
5884 	{ TARGET_CSIZE, TARGET_CS6, CSIZE, CS6 },
5885 	{ TARGET_CSIZE, TARGET_CS7, CSIZE, CS7 },
5886 	{ TARGET_CSIZE, TARGET_CS8, CSIZE, CS8 },
5887 	{ TARGET_CSTOPB, TARGET_CSTOPB, CSTOPB, CSTOPB },
5888 	{ TARGET_CREAD, TARGET_CREAD, CREAD, CREAD },
5889 	{ TARGET_PARENB, TARGET_PARENB, PARENB, PARENB },
5890 	{ TARGET_PARODD, TARGET_PARODD, PARODD, PARODD },
5891 	{ TARGET_HUPCL, TARGET_HUPCL, HUPCL, HUPCL },
5892 	{ TARGET_CLOCAL, TARGET_CLOCAL, CLOCAL, CLOCAL },
5893 	{ TARGET_CRTSCTS, TARGET_CRTSCTS, CRTSCTS, CRTSCTS },
5894 	{ 0, 0, 0, 0 }
5895 };
5896 
5897 static const bitmask_transtbl lflag_tbl[] = {
5898   { TARGET_ISIG, TARGET_ISIG, ISIG, ISIG },
5899   { TARGET_ICANON, TARGET_ICANON, ICANON, ICANON },
5900   { TARGET_XCASE, TARGET_XCASE, XCASE, XCASE },
5901   { TARGET_ECHO, TARGET_ECHO, ECHO, ECHO },
5902   { TARGET_ECHOE, TARGET_ECHOE, ECHOE, ECHOE },
5903   { TARGET_ECHOK, TARGET_ECHOK, ECHOK, ECHOK },
5904   { TARGET_ECHONL, TARGET_ECHONL, ECHONL, ECHONL },
5905   { TARGET_NOFLSH, TARGET_NOFLSH, NOFLSH, NOFLSH },
5906   { TARGET_TOSTOP, TARGET_TOSTOP, TOSTOP, TOSTOP },
5907   { TARGET_ECHOCTL, TARGET_ECHOCTL, ECHOCTL, ECHOCTL },
5908   { TARGET_ECHOPRT, TARGET_ECHOPRT, ECHOPRT, ECHOPRT },
5909   { TARGET_ECHOKE, TARGET_ECHOKE, ECHOKE, ECHOKE },
5910   { TARGET_FLUSHO, TARGET_FLUSHO, FLUSHO, FLUSHO },
5911   { TARGET_PENDIN, TARGET_PENDIN, PENDIN, PENDIN },
5912   { TARGET_IEXTEN, TARGET_IEXTEN, IEXTEN, IEXTEN },
5913   { TARGET_EXTPROC, TARGET_EXTPROC, EXTPROC, EXTPROC},
5914   { 0, 0, 0, 0 }
5915 };
5916 
5917 static void target_to_host_termios (void *dst, const void *src)
5918 {
5919     struct host_termios *host = dst;
5920     const struct target_termios *target = src;
5921 
5922     host->c_iflag =
5923         target_to_host_bitmask(tswap32(target->c_iflag), iflag_tbl);
5924     host->c_oflag =
5925         target_to_host_bitmask(tswap32(target->c_oflag), oflag_tbl);
5926     host->c_cflag =
5927         target_to_host_bitmask(tswap32(target->c_cflag), cflag_tbl);
5928     host->c_lflag =
5929         target_to_host_bitmask(tswap32(target->c_lflag), lflag_tbl);
5930     host->c_line = target->c_line;
5931 
5932     memset(host->c_cc, 0, sizeof(host->c_cc));
5933     host->c_cc[VINTR] = target->c_cc[TARGET_VINTR];
5934     host->c_cc[VQUIT] = target->c_cc[TARGET_VQUIT];
5935     host->c_cc[VERASE] = target->c_cc[TARGET_VERASE];
5936     host->c_cc[VKILL] = target->c_cc[TARGET_VKILL];
5937     host->c_cc[VEOF] = target->c_cc[TARGET_VEOF];
5938     host->c_cc[VTIME] = target->c_cc[TARGET_VTIME];
5939     host->c_cc[VMIN] = target->c_cc[TARGET_VMIN];
5940     host->c_cc[VSWTC] = target->c_cc[TARGET_VSWTC];
5941     host->c_cc[VSTART] = target->c_cc[TARGET_VSTART];
5942     host->c_cc[VSTOP] = target->c_cc[TARGET_VSTOP];
5943     host->c_cc[VSUSP] = target->c_cc[TARGET_VSUSP];
5944     host->c_cc[VEOL] = target->c_cc[TARGET_VEOL];
5945     host->c_cc[VREPRINT] = target->c_cc[TARGET_VREPRINT];
5946     host->c_cc[VDISCARD] = target->c_cc[TARGET_VDISCARD];
5947     host->c_cc[VWERASE] = target->c_cc[TARGET_VWERASE];
5948     host->c_cc[VLNEXT] = target->c_cc[TARGET_VLNEXT];
5949     host->c_cc[VEOL2] = target->c_cc[TARGET_VEOL2];
5950 }
5951 
5952 static void host_to_target_termios (void *dst, const void *src)
5953 {
5954     struct target_termios *target = dst;
5955     const struct host_termios *host = src;
5956 
5957     target->c_iflag =
5958         tswap32(host_to_target_bitmask(host->c_iflag, iflag_tbl));
5959     target->c_oflag =
5960         tswap32(host_to_target_bitmask(host->c_oflag, oflag_tbl));
5961     target->c_cflag =
5962         tswap32(host_to_target_bitmask(host->c_cflag, cflag_tbl));
5963     target->c_lflag =
5964         tswap32(host_to_target_bitmask(host->c_lflag, lflag_tbl));
5965     target->c_line = host->c_line;
5966 
5967     memset(target->c_cc, 0, sizeof(target->c_cc));
5968     target->c_cc[TARGET_VINTR] = host->c_cc[VINTR];
5969     target->c_cc[TARGET_VQUIT] = host->c_cc[VQUIT];
5970     target->c_cc[TARGET_VERASE] = host->c_cc[VERASE];
5971     target->c_cc[TARGET_VKILL] = host->c_cc[VKILL];
5972     target->c_cc[TARGET_VEOF] = host->c_cc[VEOF];
5973     target->c_cc[TARGET_VTIME] = host->c_cc[VTIME];
5974     target->c_cc[TARGET_VMIN] = host->c_cc[VMIN];
5975     target->c_cc[TARGET_VSWTC] = host->c_cc[VSWTC];
5976     target->c_cc[TARGET_VSTART] = host->c_cc[VSTART];
5977     target->c_cc[TARGET_VSTOP] = host->c_cc[VSTOP];
5978     target->c_cc[TARGET_VSUSP] = host->c_cc[VSUSP];
5979     target->c_cc[TARGET_VEOL] = host->c_cc[VEOL];
5980     target->c_cc[TARGET_VREPRINT] = host->c_cc[VREPRINT];
5981     target->c_cc[TARGET_VDISCARD] = host->c_cc[VDISCARD];
5982     target->c_cc[TARGET_VWERASE] = host->c_cc[VWERASE];
5983     target->c_cc[TARGET_VLNEXT] = host->c_cc[VLNEXT];
5984     target->c_cc[TARGET_VEOL2] = host->c_cc[VEOL2];
5985 }
5986 
5987 static const StructEntry struct_termios_def = {
5988     .convert = { host_to_target_termios, target_to_host_termios },
5989     .size = { sizeof(struct target_termios), sizeof(struct host_termios) },
5990     .align = { __alignof__(struct target_termios), __alignof__(struct host_termios) },
5991     .print = print_termios,
5992 };
5993 
5994 static bitmask_transtbl mmap_flags_tbl[] = {
5995     { TARGET_MAP_SHARED, TARGET_MAP_SHARED, MAP_SHARED, MAP_SHARED },
5996     { TARGET_MAP_PRIVATE, TARGET_MAP_PRIVATE, MAP_PRIVATE, MAP_PRIVATE },
5997     { TARGET_MAP_FIXED, TARGET_MAP_FIXED, MAP_FIXED, MAP_FIXED },
5998     { TARGET_MAP_ANONYMOUS, TARGET_MAP_ANONYMOUS,
5999       MAP_ANONYMOUS, MAP_ANONYMOUS },
6000     { TARGET_MAP_GROWSDOWN, TARGET_MAP_GROWSDOWN,
6001       MAP_GROWSDOWN, MAP_GROWSDOWN },
6002     { TARGET_MAP_DENYWRITE, TARGET_MAP_DENYWRITE,
6003       MAP_DENYWRITE, MAP_DENYWRITE },
6004     { TARGET_MAP_EXECUTABLE, TARGET_MAP_EXECUTABLE,
6005       MAP_EXECUTABLE, MAP_EXECUTABLE },
6006     { TARGET_MAP_LOCKED, TARGET_MAP_LOCKED, MAP_LOCKED, MAP_LOCKED },
6007     { TARGET_MAP_NORESERVE, TARGET_MAP_NORESERVE,
6008       MAP_NORESERVE, MAP_NORESERVE },
6009     { TARGET_MAP_HUGETLB, TARGET_MAP_HUGETLB, MAP_HUGETLB, MAP_HUGETLB },
6010     /* MAP_STACK had been ignored by the kernel for quite some time.
6011        Recognize it for the target insofar as we do not want to pass
6012        it through to the host.  */
6013     { TARGET_MAP_STACK, TARGET_MAP_STACK, 0, 0 },
6014     { 0, 0, 0, 0 }
6015 };
6016 
6017 /*
6018  * NOTE: TARGET_ABI32 is defined for TARGET_I386 (but not for TARGET_X86_64)
6019  *       TARGET_I386 is defined if TARGET_X86_64 is defined
6020  */
6021 #if defined(TARGET_I386)
6022 
6023 /* NOTE: there is really one LDT for all the threads */
6024 static uint8_t *ldt_table;
6025 
6026 static abi_long read_ldt(abi_ulong ptr, unsigned long bytecount)
6027 {
6028     int size;
6029     void *p;
6030 
6031     if (!ldt_table)
6032         return 0;
6033     size = TARGET_LDT_ENTRIES * TARGET_LDT_ENTRY_SIZE;
6034     if (size > bytecount)
6035         size = bytecount;
6036     p = lock_user(VERIFY_WRITE, ptr, size, 0);
6037     if (!p)
6038         return -TARGET_EFAULT;
6039     /* ??? Should this by byteswapped?  */
6040     memcpy(p, ldt_table, size);
6041     unlock_user(p, ptr, size);
6042     return size;
6043 }
6044 
6045 /* XXX: add locking support */
6046 static abi_long write_ldt(CPUX86State *env,
6047                           abi_ulong ptr, unsigned long bytecount, int oldmode)
6048 {
6049     struct target_modify_ldt_ldt_s ldt_info;
6050     struct target_modify_ldt_ldt_s *target_ldt_info;
6051     int seg_32bit, contents, read_exec_only, limit_in_pages;
6052     int seg_not_present, useable, lm;
6053     uint32_t *lp, entry_1, entry_2;
6054 
6055     if (bytecount != sizeof(ldt_info))
6056         return -TARGET_EINVAL;
6057     if (!lock_user_struct(VERIFY_READ, target_ldt_info, ptr, 1))
6058         return -TARGET_EFAULT;
6059     ldt_info.entry_number = tswap32(target_ldt_info->entry_number);
6060     ldt_info.base_addr = tswapal(target_ldt_info->base_addr);
6061     ldt_info.limit = tswap32(target_ldt_info->limit);
6062     ldt_info.flags = tswap32(target_ldt_info->flags);
6063     unlock_user_struct(target_ldt_info, ptr, 0);
6064 
6065     if (ldt_info.entry_number >= TARGET_LDT_ENTRIES)
6066         return -TARGET_EINVAL;
6067     seg_32bit = ldt_info.flags & 1;
6068     contents = (ldt_info.flags >> 1) & 3;
6069     read_exec_only = (ldt_info.flags >> 3) & 1;
6070     limit_in_pages = (ldt_info.flags >> 4) & 1;
6071     seg_not_present = (ldt_info.flags >> 5) & 1;
6072     useable = (ldt_info.flags >> 6) & 1;
6073 #ifdef TARGET_ABI32
6074     lm = 0;
6075 #else
6076     lm = (ldt_info.flags >> 7) & 1;
6077 #endif
6078     if (contents == 3) {
6079         if (oldmode)
6080             return -TARGET_EINVAL;
6081         if (seg_not_present == 0)
6082             return -TARGET_EINVAL;
6083     }
6084     /* allocate the LDT */
6085     if (!ldt_table) {
6086         env->ldt.base = target_mmap(0,
6087                                     TARGET_LDT_ENTRIES * TARGET_LDT_ENTRY_SIZE,
6088                                     PROT_READ|PROT_WRITE,
6089                                     MAP_ANONYMOUS|MAP_PRIVATE, -1, 0);
6090         if (env->ldt.base == -1)
6091             return -TARGET_ENOMEM;
6092         memset(g2h(env->ldt.base), 0,
6093                TARGET_LDT_ENTRIES * TARGET_LDT_ENTRY_SIZE);
6094         env->ldt.limit = 0xffff;
6095         ldt_table = g2h(env->ldt.base);
6096     }
6097 
6098     /* NOTE: same code as Linux kernel */
6099     /* Allow LDTs to be cleared by the user. */
6100     if (ldt_info.base_addr == 0 && ldt_info.limit == 0) {
6101         if (oldmode ||
6102             (contents == 0		&&
6103              read_exec_only == 1	&&
6104              seg_32bit == 0		&&
6105              limit_in_pages == 0	&&
6106              seg_not_present == 1	&&
6107              useable == 0 )) {
6108             entry_1 = 0;
6109             entry_2 = 0;
6110             goto install;
6111         }
6112     }
6113 
6114     entry_1 = ((ldt_info.base_addr & 0x0000ffff) << 16) |
6115         (ldt_info.limit & 0x0ffff);
6116     entry_2 = (ldt_info.base_addr & 0xff000000) |
6117         ((ldt_info.base_addr & 0x00ff0000) >> 16) |
6118         (ldt_info.limit & 0xf0000) |
6119         ((read_exec_only ^ 1) << 9) |
6120         (contents << 10) |
6121         ((seg_not_present ^ 1) << 15) |
6122         (seg_32bit << 22) |
6123         (limit_in_pages << 23) |
6124         (lm << 21) |
6125         0x7000;
6126     if (!oldmode)
6127         entry_2 |= (useable << 20);
6128 
6129     /* Install the new entry ...  */
6130 install:
6131     lp = (uint32_t *)(ldt_table + (ldt_info.entry_number << 3));
6132     lp[0] = tswap32(entry_1);
6133     lp[1] = tswap32(entry_2);
6134     return 0;
6135 }
6136 
6137 /* specific and weird i386 syscalls */
6138 static abi_long do_modify_ldt(CPUX86State *env, int func, abi_ulong ptr,
6139                               unsigned long bytecount)
6140 {
6141     abi_long ret;
6142 
6143     switch (func) {
6144     case 0:
6145         ret = read_ldt(ptr, bytecount);
6146         break;
6147     case 1:
6148         ret = write_ldt(env, ptr, bytecount, 1);
6149         break;
6150     case 0x11:
6151         ret = write_ldt(env, ptr, bytecount, 0);
6152         break;
6153     default:
6154         ret = -TARGET_ENOSYS;
6155         break;
6156     }
6157     return ret;
6158 }
6159 
6160 #if defined(TARGET_ABI32)
6161 abi_long do_set_thread_area(CPUX86State *env, abi_ulong ptr)
6162 {
6163     uint64_t *gdt_table = g2h(env->gdt.base);
6164     struct target_modify_ldt_ldt_s ldt_info;
6165     struct target_modify_ldt_ldt_s *target_ldt_info;
6166     int seg_32bit, contents, read_exec_only, limit_in_pages;
6167     int seg_not_present, useable, lm;
6168     uint32_t *lp, entry_1, entry_2;
6169     int i;
6170 
6171     lock_user_struct(VERIFY_WRITE, target_ldt_info, ptr, 1);
6172     if (!target_ldt_info)
6173         return -TARGET_EFAULT;
6174     ldt_info.entry_number = tswap32(target_ldt_info->entry_number);
6175     ldt_info.base_addr = tswapal(target_ldt_info->base_addr);
6176     ldt_info.limit = tswap32(target_ldt_info->limit);
6177     ldt_info.flags = tswap32(target_ldt_info->flags);
6178     if (ldt_info.entry_number == -1) {
6179         for (i=TARGET_GDT_ENTRY_TLS_MIN; i<=TARGET_GDT_ENTRY_TLS_MAX; i++) {
6180             if (gdt_table[i] == 0) {
6181                 ldt_info.entry_number = i;
6182                 target_ldt_info->entry_number = tswap32(i);
6183                 break;
6184             }
6185         }
6186     }
6187     unlock_user_struct(target_ldt_info, ptr, 1);
6188 
6189     if (ldt_info.entry_number < TARGET_GDT_ENTRY_TLS_MIN ||
6190         ldt_info.entry_number > TARGET_GDT_ENTRY_TLS_MAX)
6191            return -TARGET_EINVAL;
6192     seg_32bit = ldt_info.flags & 1;
6193     contents = (ldt_info.flags >> 1) & 3;
6194     read_exec_only = (ldt_info.flags >> 3) & 1;
6195     limit_in_pages = (ldt_info.flags >> 4) & 1;
6196     seg_not_present = (ldt_info.flags >> 5) & 1;
6197     useable = (ldt_info.flags >> 6) & 1;
6198 #ifdef TARGET_ABI32
6199     lm = 0;
6200 #else
6201     lm = (ldt_info.flags >> 7) & 1;
6202 #endif
6203 
6204     if (contents == 3) {
6205         if (seg_not_present == 0)
6206             return -TARGET_EINVAL;
6207     }
6208 
6209     /* NOTE: same code as Linux kernel */
6210     /* Allow LDTs to be cleared by the user. */
6211     if (ldt_info.base_addr == 0 && ldt_info.limit == 0) {
6212         if ((contents == 0             &&
6213              read_exec_only == 1       &&
6214              seg_32bit == 0            &&
6215              limit_in_pages == 0       &&
6216              seg_not_present == 1      &&
6217              useable == 0 )) {
6218             entry_1 = 0;
6219             entry_2 = 0;
6220             goto install;
6221         }
6222     }
6223 
6224     entry_1 = ((ldt_info.base_addr & 0x0000ffff) << 16) |
6225         (ldt_info.limit & 0x0ffff);
6226     entry_2 = (ldt_info.base_addr & 0xff000000) |
6227         ((ldt_info.base_addr & 0x00ff0000) >> 16) |
6228         (ldt_info.limit & 0xf0000) |
6229         ((read_exec_only ^ 1) << 9) |
6230         (contents << 10) |
6231         ((seg_not_present ^ 1) << 15) |
6232         (seg_32bit << 22) |
6233         (limit_in_pages << 23) |
6234         (useable << 20) |
6235         (lm << 21) |
6236         0x7000;
6237 
6238     /* Install the new entry ...  */
6239 install:
6240     lp = (uint32_t *)(gdt_table + ldt_info.entry_number);
6241     lp[0] = tswap32(entry_1);
6242     lp[1] = tswap32(entry_2);
6243     return 0;
6244 }
6245 
6246 static abi_long do_get_thread_area(CPUX86State *env, abi_ulong ptr)
6247 {
6248     struct target_modify_ldt_ldt_s *target_ldt_info;
6249     uint64_t *gdt_table = g2h(env->gdt.base);
6250     uint32_t base_addr, limit, flags;
6251     int seg_32bit, contents, read_exec_only, limit_in_pages, idx;
6252     int seg_not_present, useable, lm;
6253     uint32_t *lp, entry_1, entry_2;
6254 
6255     lock_user_struct(VERIFY_WRITE, target_ldt_info, ptr, 1);
6256     if (!target_ldt_info)
6257         return -TARGET_EFAULT;
6258     idx = tswap32(target_ldt_info->entry_number);
6259     if (idx < TARGET_GDT_ENTRY_TLS_MIN ||
6260         idx > TARGET_GDT_ENTRY_TLS_MAX) {
6261         unlock_user_struct(target_ldt_info, ptr, 1);
6262         return -TARGET_EINVAL;
6263     }
6264     lp = (uint32_t *)(gdt_table + idx);
6265     entry_1 = tswap32(lp[0]);
6266     entry_2 = tswap32(lp[1]);
6267 
6268     read_exec_only = ((entry_2 >> 9) & 1) ^ 1;
6269     contents = (entry_2 >> 10) & 3;
6270     seg_not_present = ((entry_2 >> 15) & 1) ^ 1;
6271     seg_32bit = (entry_2 >> 22) & 1;
6272     limit_in_pages = (entry_2 >> 23) & 1;
6273     useable = (entry_2 >> 20) & 1;
6274 #ifdef TARGET_ABI32
6275     lm = 0;
6276 #else
6277     lm = (entry_2 >> 21) & 1;
6278 #endif
6279     flags = (seg_32bit << 0) | (contents << 1) |
6280         (read_exec_only << 3) | (limit_in_pages << 4) |
6281         (seg_not_present << 5) | (useable << 6) | (lm << 7);
6282     limit = (entry_1 & 0xffff) | (entry_2  & 0xf0000);
6283     base_addr = (entry_1 >> 16) |
6284         (entry_2 & 0xff000000) |
6285         ((entry_2 & 0xff) << 16);
6286     target_ldt_info->base_addr = tswapal(base_addr);
6287     target_ldt_info->limit = tswap32(limit);
6288     target_ldt_info->flags = tswap32(flags);
6289     unlock_user_struct(target_ldt_info, ptr, 1);
6290     return 0;
6291 }
6292 
6293 abi_long do_arch_prctl(CPUX86State *env, int code, abi_ulong addr)
6294 {
6295     return -TARGET_ENOSYS;
6296 }
6297 #else
6298 abi_long do_arch_prctl(CPUX86State *env, int code, abi_ulong addr)
6299 {
6300     abi_long ret = 0;
6301     abi_ulong val;
6302     int idx;
6303 
6304     switch(code) {
6305     case TARGET_ARCH_SET_GS:
6306     case TARGET_ARCH_SET_FS:
6307         if (code == TARGET_ARCH_SET_GS)
6308             idx = R_GS;
6309         else
6310             idx = R_FS;
6311         cpu_x86_load_seg(env, idx, 0);
6312         env->segs[idx].base = addr;
6313         break;
6314     case TARGET_ARCH_GET_GS:
6315     case TARGET_ARCH_GET_FS:
6316         if (code == TARGET_ARCH_GET_GS)
6317             idx = R_GS;
6318         else
6319             idx = R_FS;
6320         val = env->segs[idx].base;
6321         if (put_user(val, addr, abi_ulong))
6322             ret = -TARGET_EFAULT;
6323         break;
6324     default:
6325         ret = -TARGET_EINVAL;
6326         break;
6327     }
6328     return ret;
6329 }
6330 #endif /* defined(TARGET_ABI32 */
6331 
6332 #endif /* defined(TARGET_I386) */
6333 
6334 #define NEW_STACK_SIZE 0x40000
6335 
6336 
6337 static pthread_mutex_t clone_lock = PTHREAD_MUTEX_INITIALIZER;
6338 typedef struct {
6339     CPUArchState *env;
6340     pthread_mutex_t mutex;
6341     pthread_cond_t cond;
6342     pthread_t thread;
6343     uint32_t tid;
6344     abi_ulong child_tidptr;
6345     abi_ulong parent_tidptr;
6346     sigset_t sigmask;
6347 } new_thread_info;
6348 
6349 static void *clone_func(void *arg)
6350 {
6351     new_thread_info *info = arg;
6352     CPUArchState *env;
6353     CPUState *cpu;
6354     TaskState *ts;
6355 
6356     rcu_register_thread();
6357     tcg_register_thread();
6358     env = info->env;
6359     cpu = env_cpu(env);
6360     thread_cpu = cpu;
6361     ts = (TaskState *)cpu->opaque;
6362     info->tid = sys_gettid();
6363     task_settid(ts);
6364     if (info->child_tidptr)
6365         put_user_u32(info->tid, info->child_tidptr);
6366     if (info->parent_tidptr)
6367         put_user_u32(info->tid, info->parent_tidptr);
6368     qemu_guest_random_seed_thread_part2(cpu->random_seed);
6369     /* Enable signals.  */
6370     sigprocmask(SIG_SETMASK, &info->sigmask, NULL);
6371     /* Signal to the parent that we're ready.  */
6372     pthread_mutex_lock(&info->mutex);
6373     pthread_cond_broadcast(&info->cond);
6374     pthread_mutex_unlock(&info->mutex);
6375     /* Wait until the parent has finished initializing the tls state.  */
6376     pthread_mutex_lock(&clone_lock);
6377     pthread_mutex_unlock(&clone_lock);
6378     cpu_loop(env);
6379     /* never exits */
6380     return NULL;
6381 }
6382 
6383 /* do_fork() Must return host values and target errnos (unlike most
6384    do_*() functions). */
6385 static int do_fork(CPUArchState *env, unsigned int flags, abi_ulong newsp,
6386                    abi_ulong parent_tidptr, target_ulong newtls,
6387                    abi_ulong child_tidptr)
6388 {
6389     CPUState *cpu = env_cpu(env);
6390     int ret;
6391     TaskState *ts;
6392     CPUState *new_cpu;
6393     CPUArchState *new_env;
6394     sigset_t sigmask;
6395 
6396     flags &= ~CLONE_IGNORED_FLAGS;
6397 
6398     /* Emulate vfork() with fork() */
6399     if (flags & CLONE_VFORK)
6400         flags &= ~(CLONE_VFORK | CLONE_VM);
6401 
6402     if (flags & CLONE_VM) {
6403         TaskState *parent_ts = (TaskState *)cpu->opaque;
6404         new_thread_info info;
6405         pthread_attr_t attr;
6406 
6407         if (((flags & CLONE_THREAD_FLAGS) != CLONE_THREAD_FLAGS) ||
6408             (flags & CLONE_INVALID_THREAD_FLAGS)) {
6409             return -TARGET_EINVAL;
6410         }
6411 
6412         ts = g_new0(TaskState, 1);
6413         init_task_state(ts);
6414 
6415         /* Grab a mutex so that thread setup appears atomic.  */
6416         pthread_mutex_lock(&clone_lock);
6417 
6418         /* we create a new CPU instance. */
6419         new_env = cpu_copy(env);
6420         /* Init regs that differ from the parent.  */
6421         cpu_clone_regs_child(new_env, newsp, flags);
6422         cpu_clone_regs_parent(env, flags);
6423         new_cpu = env_cpu(new_env);
6424         new_cpu->opaque = ts;
6425         ts->bprm = parent_ts->bprm;
6426         ts->info = parent_ts->info;
6427         ts->signal_mask = parent_ts->signal_mask;
6428 
6429         if (flags & CLONE_CHILD_CLEARTID) {
6430             ts->child_tidptr = child_tidptr;
6431         }
6432 
6433         if (flags & CLONE_SETTLS) {
6434             cpu_set_tls (new_env, newtls);
6435         }
6436 
6437         memset(&info, 0, sizeof(info));
6438         pthread_mutex_init(&info.mutex, NULL);
6439         pthread_mutex_lock(&info.mutex);
6440         pthread_cond_init(&info.cond, NULL);
6441         info.env = new_env;
6442         if (flags & CLONE_CHILD_SETTID) {
6443             info.child_tidptr = child_tidptr;
6444         }
6445         if (flags & CLONE_PARENT_SETTID) {
6446             info.parent_tidptr = parent_tidptr;
6447         }
6448 
6449         ret = pthread_attr_init(&attr);
6450         ret = pthread_attr_setstacksize(&attr, NEW_STACK_SIZE);
6451         ret = pthread_attr_setdetachstate(&attr, PTHREAD_CREATE_DETACHED);
6452         /* It is not safe to deliver signals until the child has finished
6453            initializing, so temporarily block all signals.  */
6454         sigfillset(&sigmask);
6455         sigprocmask(SIG_BLOCK, &sigmask, &info.sigmask);
6456         cpu->random_seed = qemu_guest_random_seed_thread_part1();
6457 
6458         /* If this is our first additional thread, we need to ensure we
6459          * generate code for parallel execution and flush old translations.
6460          */
6461         if (!parallel_cpus) {
6462             parallel_cpus = true;
6463             tb_flush(cpu);
6464         }
6465 
6466         ret = pthread_create(&info.thread, &attr, clone_func, &info);
6467         /* TODO: Free new CPU state if thread creation failed.  */
6468 
6469         sigprocmask(SIG_SETMASK, &info.sigmask, NULL);
6470         pthread_attr_destroy(&attr);
6471         if (ret == 0) {
6472             /* Wait for the child to initialize.  */
6473             pthread_cond_wait(&info.cond, &info.mutex);
6474             ret = info.tid;
6475         } else {
6476             ret = -1;
6477         }
6478         pthread_mutex_unlock(&info.mutex);
6479         pthread_cond_destroy(&info.cond);
6480         pthread_mutex_destroy(&info.mutex);
6481         pthread_mutex_unlock(&clone_lock);
6482     } else {
6483         /* if no CLONE_VM, we consider it is a fork */
6484         if (flags & CLONE_INVALID_FORK_FLAGS) {
6485             return -TARGET_EINVAL;
6486         }
6487 
6488         /* We can't support custom termination signals */
6489         if ((flags & CSIGNAL) != TARGET_SIGCHLD) {
6490             return -TARGET_EINVAL;
6491         }
6492 
6493         if (block_signals()) {
6494             return -TARGET_ERESTARTSYS;
6495         }
6496 
6497         fork_start();
6498         ret = fork();
6499         if (ret == 0) {
6500             /* Child Process.  */
6501             cpu_clone_regs_child(env, newsp, flags);
6502             fork_end(1);
6503             /* There is a race condition here.  The parent process could
6504                theoretically read the TID in the child process before the child
6505                tid is set.  This would require using either ptrace
6506                (not implemented) or having *_tidptr to point at a shared memory
6507                mapping.  We can't repeat the spinlock hack used above because
6508                the child process gets its own copy of the lock.  */
6509             if (flags & CLONE_CHILD_SETTID)
6510                 put_user_u32(sys_gettid(), child_tidptr);
6511             if (flags & CLONE_PARENT_SETTID)
6512                 put_user_u32(sys_gettid(), parent_tidptr);
6513             ts = (TaskState *)cpu->opaque;
6514             if (flags & CLONE_SETTLS)
6515                 cpu_set_tls (env, newtls);
6516             if (flags & CLONE_CHILD_CLEARTID)
6517                 ts->child_tidptr = child_tidptr;
6518         } else {
6519             cpu_clone_regs_parent(env, flags);
6520             fork_end(0);
6521         }
6522     }
6523     return ret;
6524 }
6525 
6526 /* warning : doesn't handle linux specific flags... */
6527 static int target_to_host_fcntl_cmd(int cmd)
6528 {
6529     int ret;
6530 
6531     switch(cmd) {
6532     case TARGET_F_DUPFD:
6533     case TARGET_F_GETFD:
6534     case TARGET_F_SETFD:
6535     case TARGET_F_GETFL:
6536     case TARGET_F_SETFL:
6537     case TARGET_F_OFD_GETLK:
6538     case TARGET_F_OFD_SETLK:
6539     case TARGET_F_OFD_SETLKW:
6540         ret = cmd;
6541         break;
6542     case TARGET_F_GETLK:
6543         ret = F_GETLK64;
6544         break;
6545     case TARGET_F_SETLK:
6546         ret = F_SETLK64;
6547         break;
6548     case TARGET_F_SETLKW:
6549         ret = F_SETLKW64;
6550         break;
6551     case TARGET_F_GETOWN:
6552         ret = F_GETOWN;
6553         break;
6554     case TARGET_F_SETOWN:
6555         ret = F_SETOWN;
6556         break;
6557     case TARGET_F_GETSIG:
6558         ret = F_GETSIG;
6559         break;
6560     case TARGET_F_SETSIG:
6561         ret = F_SETSIG;
6562         break;
6563 #if TARGET_ABI_BITS == 32
6564     case TARGET_F_GETLK64:
6565         ret = F_GETLK64;
6566         break;
6567     case TARGET_F_SETLK64:
6568         ret = F_SETLK64;
6569         break;
6570     case TARGET_F_SETLKW64:
6571         ret = F_SETLKW64;
6572         break;
6573 #endif
6574     case TARGET_F_SETLEASE:
6575         ret = F_SETLEASE;
6576         break;
6577     case TARGET_F_GETLEASE:
6578         ret = F_GETLEASE;
6579         break;
6580 #ifdef F_DUPFD_CLOEXEC
6581     case TARGET_F_DUPFD_CLOEXEC:
6582         ret = F_DUPFD_CLOEXEC;
6583         break;
6584 #endif
6585     case TARGET_F_NOTIFY:
6586         ret = F_NOTIFY;
6587         break;
6588 #ifdef F_GETOWN_EX
6589     case TARGET_F_GETOWN_EX:
6590         ret = F_GETOWN_EX;
6591         break;
6592 #endif
6593 #ifdef F_SETOWN_EX
6594     case TARGET_F_SETOWN_EX:
6595         ret = F_SETOWN_EX;
6596         break;
6597 #endif
6598 #ifdef F_SETPIPE_SZ
6599     case TARGET_F_SETPIPE_SZ:
6600         ret = F_SETPIPE_SZ;
6601         break;
6602     case TARGET_F_GETPIPE_SZ:
6603         ret = F_GETPIPE_SZ;
6604         break;
6605 #endif
6606     default:
6607         ret = -TARGET_EINVAL;
6608         break;
6609     }
6610 
6611 #if defined(__powerpc64__)
6612     /* On PPC64, glibc headers has the F_*LK* defined to 12, 13 and 14 and
6613      * is not supported by kernel. The glibc fcntl call actually adjusts
6614      * them to 5, 6 and 7 before making the syscall(). Since we make the
6615      * syscall directly, adjust to what is supported by the kernel.
6616      */
6617     if (ret >= F_GETLK64 && ret <= F_SETLKW64) {
6618         ret -= F_GETLK64 - 5;
6619     }
6620 #endif
6621 
6622     return ret;
6623 }
6624 
6625 #define FLOCK_TRANSTBL \
6626     switch (type) { \
6627     TRANSTBL_CONVERT(F_RDLCK); \
6628     TRANSTBL_CONVERT(F_WRLCK); \
6629     TRANSTBL_CONVERT(F_UNLCK); \
6630     TRANSTBL_CONVERT(F_EXLCK); \
6631     TRANSTBL_CONVERT(F_SHLCK); \
6632     }
6633 
6634 static int target_to_host_flock(int type)
6635 {
6636 #define TRANSTBL_CONVERT(a) case TARGET_##a: return a
6637     FLOCK_TRANSTBL
6638 #undef  TRANSTBL_CONVERT
6639     return -TARGET_EINVAL;
6640 }
6641 
6642 static int host_to_target_flock(int type)
6643 {
6644 #define TRANSTBL_CONVERT(a) case a: return TARGET_##a
6645     FLOCK_TRANSTBL
6646 #undef  TRANSTBL_CONVERT
6647     /* if we don't know how to convert the value coming
6648      * from the host we copy to the target field as-is
6649      */
6650     return type;
6651 }
6652 
6653 static inline abi_long copy_from_user_flock(struct flock64 *fl,
6654                                             abi_ulong target_flock_addr)
6655 {
6656     struct target_flock *target_fl;
6657     int l_type;
6658 
6659     if (!lock_user_struct(VERIFY_READ, target_fl, target_flock_addr, 1)) {
6660         return -TARGET_EFAULT;
6661     }
6662 
6663     __get_user(l_type, &target_fl->l_type);
6664     l_type = target_to_host_flock(l_type);
6665     if (l_type < 0) {
6666         return l_type;
6667     }
6668     fl->l_type = l_type;
6669     __get_user(fl->l_whence, &target_fl->l_whence);
6670     __get_user(fl->l_start, &target_fl->l_start);
6671     __get_user(fl->l_len, &target_fl->l_len);
6672     __get_user(fl->l_pid, &target_fl->l_pid);
6673     unlock_user_struct(target_fl, target_flock_addr, 0);
6674     return 0;
6675 }
6676 
6677 static inline abi_long copy_to_user_flock(abi_ulong target_flock_addr,
6678                                           const struct flock64 *fl)
6679 {
6680     struct target_flock *target_fl;
6681     short l_type;
6682 
6683     if (!lock_user_struct(VERIFY_WRITE, target_fl, target_flock_addr, 0)) {
6684         return -TARGET_EFAULT;
6685     }
6686 
6687     l_type = host_to_target_flock(fl->l_type);
6688     __put_user(l_type, &target_fl->l_type);
6689     __put_user(fl->l_whence, &target_fl->l_whence);
6690     __put_user(fl->l_start, &target_fl->l_start);
6691     __put_user(fl->l_len, &target_fl->l_len);
6692     __put_user(fl->l_pid, &target_fl->l_pid);
6693     unlock_user_struct(target_fl, target_flock_addr, 1);
6694     return 0;
6695 }
6696 
6697 typedef abi_long from_flock64_fn(struct flock64 *fl, abi_ulong target_addr);
6698 typedef abi_long to_flock64_fn(abi_ulong target_addr, const struct flock64 *fl);
6699 
6700 #if defined(TARGET_ARM) && TARGET_ABI_BITS == 32
6701 static inline abi_long copy_from_user_oabi_flock64(struct flock64 *fl,
6702                                                    abi_ulong target_flock_addr)
6703 {
6704     struct target_oabi_flock64 *target_fl;
6705     int l_type;
6706 
6707     if (!lock_user_struct(VERIFY_READ, target_fl, target_flock_addr, 1)) {
6708         return -TARGET_EFAULT;
6709     }
6710 
6711     __get_user(l_type, &target_fl->l_type);
6712     l_type = target_to_host_flock(l_type);
6713     if (l_type < 0) {
6714         return l_type;
6715     }
6716     fl->l_type = l_type;
6717     __get_user(fl->l_whence, &target_fl->l_whence);
6718     __get_user(fl->l_start, &target_fl->l_start);
6719     __get_user(fl->l_len, &target_fl->l_len);
6720     __get_user(fl->l_pid, &target_fl->l_pid);
6721     unlock_user_struct(target_fl, target_flock_addr, 0);
6722     return 0;
6723 }
6724 
6725 static inline abi_long copy_to_user_oabi_flock64(abi_ulong target_flock_addr,
6726                                                  const struct flock64 *fl)
6727 {
6728     struct target_oabi_flock64 *target_fl;
6729     short l_type;
6730 
6731     if (!lock_user_struct(VERIFY_WRITE, target_fl, target_flock_addr, 0)) {
6732         return -TARGET_EFAULT;
6733     }
6734 
6735     l_type = host_to_target_flock(fl->l_type);
6736     __put_user(l_type, &target_fl->l_type);
6737     __put_user(fl->l_whence, &target_fl->l_whence);
6738     __put_user(fl->l_start, &target_fl->l_start);
6739     __put_user(fl->l_len, &target_fl->l_len);
6740     __put_user(fl->l_pid, &target_fl->l_pid);
6741     unlock_user_struct(target_fl, target_flock_addr, 1);
6742     return 0;
6743 }
6744 #endif
6745 
6746 static inline abi_long copy_from_user_flock64(struct flock64 *fl,
6747                                               abi_ulong target_flock_addr)
6748 {
6749     struct target_flock64 *target_fl;
6750     int l_type;
6751 
6752     if (!lock_user_struct(VERIFY_READ, target_fl, target_flock_addr, 1)) {
6753         return -TARGET_EFAULT;
6754     }
6755 
6756     __get_user(l_type, &target_fl->l_type);
6757     l_type = target_to_host_flock(l_type);
6758     if (l_type < 0) {
6759         return l_type;
6760     }
6761     fl->l_type = l_type;
6762     __get_user(fl->l_whence, &target_fl->l_whence);
6763     __get_user(fl->l_start, &target_fl->l_start);
6764     __get_user(fl->l_len, &target_fl->l_len);
6765     __get_user(fl->l_pid, &target_fl->l_pid);
6766     unlock_user_struct(target_fl, target_flock_addr, 0);
6767     return 0;
6768 }
6769 
6770 static inline abi_long copy_to_user_flock64(abi_ulong target_flock_addr,
6771                                             const struct flock64 *fl)
6772 {
6773     struct target_flock64 *target_fl;
6774     short l_type;
6775 
6776     if (!lock_user_struct(VERIFY_WRITE, target_fl, target_flock_addr, 0)) {
6777         return -TARGET_EFAULT;
6778     }
6779 
6780     l_type = host_to_target_flock(fl->l_type);
6781     __put_user(l_type, &target_fl->l_type);
6782     __put_user(fl->l_whence, &target_fl->l_whence);
6783     __put_user(fl->l_start, &target_fl->l_start);
6784     __put_user(fl->l_len, &target_fl->l_len);
6785     __put_user(fl->l_pid, &target_fl->l_pid);
6786     unlock_user_struct(target_fl, target_flock_addr, 1);
6787     return 0;
6788 }
6789 
6790 static abi_long do_fcntl(int fd, int cmd, abi_ulong arg)
6791 {
6792     struct flock64 fl64;
6793 #ifdef F_GETOWN_EX
6794     struct f_owner_ex fox;
6795     struct target_f_owner_ex *target_fox;
6796 #endif
6797     abi_long ret;
6798     int host_cmd = target_to_host_fcntl_cmd(cmd);
6799 
6800     if (host_cmd == -TARGET_EINVAL)
6801 	    return host_cmd;
6802 
6803     switch(cmd) {
6804     case TARGET_F_GETLK:
6805         ret = copy_from_user_flock(&fl64, arg);
6806         if (ret) {
6807             return ret;
6808         }
6809         ret = get_errno(safe_fcntl(fd, host_cmd, &fl64));
6810         if (ret == 0) {
6811             ret = copy_to_user_flock(arg, &fl64);
6812         }
6813         break;
6814 
6815     case TARGET_F_SETLK:
6816     case TARGET_F_SETLKW:
6817         ret = copy_from_user_flock(&fl64, arg);
6818         if (ret) {
6819             return ret;
6820         }
6821         ret = get_errno(safe_fcntl(fd, host_cmd, &fl64));
6822         break;
6823 
6824     case TARGET_F_GETLK64:
6825     case TARGET_F_OFD_GETLK:
6826         ret = copy_from_user_flock64(&fl64, arg);
6827         if (ret) {
6828             return ret;
6829         }
6830         ret = get_errno(safe_fcntl(fd, host_cmd, &fl64));
6831         if (ret == 0) {
6832             ret = copy_to_user_flock64(arg, &fl64);
6833         }
6834         break;
6835     case TARGET_F_SETLK64:
6836     case TARGET_F_SETLKW64:
6837     case TARGET_F_OFD_SETLK:
6838     case TARGET_F_OFD_SETLKW:
6839         ret = copy_from_user_flock64(&fl64, arg);
6840         if (ret) {
6841             return ret;
6842         }
6843         ret = get_errno(safe_fcntl(fd, host_cmd, &fl64));
6844         break;
6845 
6846     case TARGET_F_GETFL:
6847         ret = get_errno(safe_fcntl(fd, host_cmd, arg));
6848         if (ret >= 0) {
6849             ret = host_to_target_bitmask(ret, fcntl_flags_tbl);
6850         }
6851         break;
6852 
6853     case TARGET_F_SETFL:
6854         ret = get_errno(safe_fcntl(fd, host_cmd,
6855                                    target_to_host_bitmask(arg,
6856                                                           fcntl_flags_tbl)));
6857         break;
6858 
6859 #ifdef F_GETOWN_EX
6860     case TARGET_F_GETOWN_EX:
6861         ret = get_errno(safe_fcntl(fd, host_cmd, &fox));
6862         if (ret >= 0) {
6863             if (!lock_user_struct(VERIFY_WRITE, target_fox, arg, 0))
6864                 return -TARGET_EFAULT;
6865             target_fox->type = tswap32(fox.type);
6866             target_fox->pid = tswap32(fox.pid);
6867             unlock_user_struct(target_fox, arg, 1);
6868         }
6869         break;
6870 #endif
6871 
6872 #ifdef F_SETOWN_EX
6873     case TARGET_F_SETOWN_EX:
6874         if (!lock_user_struct(VERIFY_READ, target_fox, arg, 1))
6875             return -TARGET_EFAULT;
6876         fox.type = tswap32(target_fox->type);
6877         fox.pid = tswap32(target_fox->pid);
6878         unlock_user_struct(target_fox, arg, 0);
6879         ret = get_errno(safe_fcntl(fd, host_cmd, &fox));
6880         break;
6881 #endif
6882 
6883     case TARGET_F_SETSIG:
6884         ret = get_errno(safe_fcntl(fd, host_cmd, target_to_host_signal(arg)));
6885         break;
6886 
6887     case TARGET_F_GETSIG:
6888         ret = host_to_target_signal(get_errno(safe_fcntl(fd, host_cmd, arg)));
6889         break;
6890 
6891     case TARGET_F_SETOWN:
6892     case TARGET_F_GETOWN:
6893     case TARGET_F_SETLEASE:
6894     case TARGET_F_GETLEASE:
6895     case TARGET_F_SETPIPE_SZ:
6896     case TARGET_F_GETPIPE_SZ:
6897         ret = get_errno(safe_fcntl(fd, host_cmd, arg));
6898         break;
6899 
6900     default:
6901         ret = get_errno(safe_fcntl(fd, cmd, arg));
6902         break;
6903     }
6904     return ret;
6905 }
6906 
6907 #ifdef USE_UID16
6908 
6909 static inline int high2lowuid(int uid)
6910 {
6911     if (uid > 65535)
6912         return 65534;
6913     else
6914         return uid;
6915 }
6916 
6917 static inline int high2lowgid(int gid)
6918 {
6919     if (gid > 65535)
6920         return 65534;
6921     else
6922         return gid;
6923 }
6924 
6925 static inline int low2highuid(int uid)
6926 {
6927     if ((int16_t)uid == -1)
6928         return -1;
6929     else
6930         return uid;
6931 }
6932 
6933 static inline int low2highgid(int gid)
6934 {
6935     if ((int16_t)gid == -1)
6936         return -1;
6937     else
6938         return gid;
6939 }
6940 static inline int tswapid(int id)
6941 {
6942     return tswap16(id);
6943 }
6944 
6945 #define put_user_id(x, gaddr) put_user_u16(x, gaddr)
6946 
6947 #else /* !USE_UID16 */
6948 static inline int high2lowuid(int uid)
6949 {
6950     return uid;
6951 }
6952 static inline int high2lowgid(int gid)
6953 {
6954     return gid;
6955 }
6956 static inline int low2highuid(int uid)
6957 {
6958     return uid;
6959 }
6960 static inline int low2highgid(int gid)
6961 {
6962     return gid;
6963 }
6964 static inline int tswapid(int id)
6965 {
6966     return tswap32(id);
6967 }
6968 
6969 #define put_user_id(x, gaddr) put_user_u32(x, gaddr)
6970 
6971 #endif /* USE_UID16 */
6972 
6973 /* We must do direct syscalls for setting UID/GID, because we want to
6974  * implement the Linux system call semantics of "change only for this thread",
6975  * not the libc/POSIX semantics of "change for all threads in process".
6976  * (See http://ewontfix.com/17/ for more details.)
6977  * We use the 32-bit version of the syscalls if present; if it is not
6978  * then either the host architecture supports 32-bit UIDs natively with
6979  * the standard syscall, or the 16-bit UID is the best we can do.
6980  */
6981 #ifdef __NR_setuid32
6982 #define __NR_sys_setuid __NR_setuid32
6983 #else
6984 #define __NR_sys_setuid __NR_setuid
6985 #endif
6986 #ifdef __NR_setgid32
6987 #define __NR_sys_setgid __NR_setgid32
6988 #else
6989 #define __NR_sys_setgid __NR_setgid
6990 #endif
6991 #ifdef __NR_setresuid32
6992 #define __NR_sys_setresuid __NR_setresuid32
6993 #else
6994 #define __NR_sys_setresuid __NR_setresuid
6995 #endif
6996 #ifdef __NR_setresgid32
6997 #define __NR_sys_setresgid __NR_setresgid32
6998 #else
6999 #define __NR_sys_setresgid __NR_setresgid
7000 #endif
7001 
7002 _syscall1(int, sys_setuid, uid_t, uid)
7003 _syscall1(int, sys_setgid, gid_t, gid)
7004 _syscall3(int, sys_setresuid, uid_t, ruid, uid_t, euid, uid_t, suid)
7005 _syscall3(int, sys_setresgid, gid_t, rgid, gid_t, egid, gid_t, sgid)
7006 
7007 void syscall_init(void)
7008 {
7009     IOCTLEntry *ie;
7010     const argtype *arg_type;
7011     int size;
7012     int i;
7013 
7014     thunk_init(STRUCT_MAX);
7015 
7016 #define STRUCT(name, ...) thunk_register_struct(STRUCT_ ## name, #name, struct_ ## name ## _def);
7017 #define STRUCT_SPECIAL(name) thunk_register_struct_direct(STRUCT_ ## name, #name, &struct_ ## name ## _def);
7018 #include "syscall_types.h"
7019 #undef STRUCT
7020 #undef STRUCT_SPECIAL
7021 
7022     /* Build target_to_host_errno_table[] table from
7023      * host_to_target_errno_table[]. */
7024     for (i = 0; i < ERRNO_TABLE_SIZE; i++) {
7025         target_to_host_errno_table[host_to_target_errno_table[i]] = i;
7026     }
7027 
7028     /* we patch the ioctl size if necessary. We rely on the fact that
7029        no ioctl has all the bits at '1' in the size field */
7030     ie = ioctl_entries;
7031     while (ie->target_cmd != 0) {
7032         if (((ie->target_cmd >> TARGET_IOC_SIZESHIFT) & TARGET_IOC_SIZEMASK) ==
7033             TARGET_IOC_SIZEMASK) {
7034             arg_type = ie->arg_type;
7035             if (arg_type[0] != TYPE_PTR) {
7036                 fprintf(stderr, "cannot patch size for ioctl 0x%x\n",
7037                         ie->target_cmd);
7038                 exit(1);
7039             }
7040             arg_type++;
7041             size = thunk_type_size(arg_type, 0);
7042             ie->target_cmd = (ie->target_cmd &
7043                               ~(TARGET_IOC_SIZEMASK << TARGET_IOC_SIZESHIFT)) |
7044                 (size << TARGET_IOC_SIZESHIFT);
7045         }
7046 
7047         /* automatic consistency check if same arch */
7048 #if (defined(__i386__) && defined(TARGET_I386) && defined(TARGET_ABI32)) || \
7049     (defined(__x86_64__) && defined(TARGET_X86_64))
7050         if (unlikely(ie->target_cmd != ie->host_cmd)) {
7051             fprintf(stderr, "ERROR: ioctl(%s): target=0x%x host=0x%x\n",
7052                     ie->name, ie->target_cmd, ie->host_cmd);
7053         }
7054 #endif
7055         ie++;
7056     }
7057 }
7058 
7059 #ifdef TARGET_NR_truncate64
7060 static inline abi_long target_truncate64(void *cpu_env, const char *arg1,
7061                                          abi_long arg2,
7062                                          abi_long arg3,
7063                                          abi_long arg4)
7064 {
7065     if (regpairs_aligned(cpu_env, TARGET_NR_truncate64)) {
7066         arg2 = arg3;
7067         arg3 = arg4;
7068     }
7069     return get_errno(truncate64(arg1, target_offset64(arg2, arg3)));
7070 }
7071 #endif
7072 
7073 #ifdef TARGET_NR_ftruncate64
7074 static inline abi_long target_ftruncate64(void *cpu_env, abi_long arg1,
7075                                           abi_long arg2,
7076                                           abi_long arg3,
7077                                           abi_long arg4)
7078 {
7079     if (regpairs_aligned(cpu_env, TARGET_NR_ftruncate64)) {
7080         arg2 = arg3;
7081         arg3 = arg4;
7082     }
7083     return get_errno(ftruncate64(arg1, target_offset64(arg2, arg3)));
7084 }
7085 #endif
7086 
7087 #if defined(TARGET_NR_timer_settime) || \
7088     (defined(TARGET_NR_timerfd_settime) && defined(CONFIG_TIMERFD))
7089 static inline abi_long target_to_host_itimerspec(struct itimerspec *host_its,
7090                                                  abi_ulong target_addr)
7091 {
7092     if (target_to_host_timespec(&host_its->it_interval, target_addr +
7093                                 offsetof(struct target_itimerspec,
7094                                          it_interval)) ||
7095         target_to_host_timespec(&host_its->it_value, target_addr +
7096                                 offsetof(struct target_itimerspec,
7097                                          it_value))) {
7098         return -TARGET_EFAULT;
7099     }
7100 
7101     return 0;
7102 }
7103 #endif
7104 
7105 #if defined(TARGET_NR_timer_settime64) || \
7106     (defined(TARGET_NR_timerfd_settime64) && defined(CONFIG_TIMERFD))
7107 static inline abi_long target_to_host_itimerspec64(struct itimerspec *host_its,
7108                                                    abi_ulong target_addr)
7109 {
7110     if (target_to_host_timespec64(&host_its->it_interval, target_addr +
7111                                   offsetof(struct target__kernel_itimerspec,
7112                                            it_interval)) ||
7113         target_to_host_timespec64(&host_its->it_value, target_addr +
7114                                   offsetof(struct target__kernel_itimerspec,
7115                                            it_value))) {
7116         return -TARGET_EFAULT;
7117     }
7118 
7119     return 0;
7120 }
7121 #endif
7122 
7123 #if ((defined(TARGET_NR_timerfd_gettime) || \
7124       defined(TARGET_NR_timerfd_settime)) && defined(CONFIG_TIMERFD)) || \
7125       defined(TARGET_NR_timer_gettime) || defined(TARGET_NR_timer_settime)
7126 static inline abi_long host_to_target_itimerspec(abi_ulong target_addr,
7127                                                  struct itimerspec *host_its)
7128 {
7129     if (host_to_target_timespec(target_addr + offsetof(struct target_itimerspec,
7130                                                        it_interval),
7131                                 &host_its->it_interval) ||
7132         host_to_target_timespec(target_addr + offsetof(struct target_itimerspec,
7133                                                        it_value),
7134                                 &host_its->it_value)) {
7135         return -TARGET_EFAULT;
7136     }
7137     return 0;
7138 }
7139 #endif
7140 
7141 #if ((defined(TARGET_NR_timerfd_gettime64) || \
7142       defined(TARGET_NR_timerfd_settime64)) && defined(CONFIG_TIMERFD)) || \
7143       defined(TARGET_NR_timer_gettime64) || defined(TARGET_NR_timer_settime64)
7144 static inline abi_long host_to_target_itimerspec64(abi_ulong target_addr,
7145                                                    struct itimerspec *host_its)
7146 {
7147     if (host_to_target_timespec64(target_addr +
7148                                   offsetof(struct target__kernel_itimerspec,
7149                                            it_interval),
7150                                   &host_its->it_interval) ||
7151         host_to_target_timespec64(target_addr +
7152                                   offsetof(struct target__kernel_itimerspec,
7153                                            it_value),
7154                                   &host_its->it_value)) {
7155         return -TARGET_EFAULT;
7156     }
7157     return 0;
7158 }
7159 #endif
7160 
7161 #if defined(TARGET_NR_adjtimex) || \
7162     (defined(TARGET_NR_clock_adjtime) && defined(CONFIG_CLOCK_ADJTIME))
7163 static inline abi_long target_to_host_timex(struct timex *host_tx,
7164                                             abi_long target_addr)
7165 {
7166     struct target_timex *target_tx;
7167 
7168     if (!lock_user_struct(VERIFY_READ, target_tx, target_addr, 1)) {
7169         return -TARGET_EFAULT;
7170     }
7171 
7172     __get_user(host_tx->modes, &target_tx->modes);
7173     __get_user(host_tx->offset, &target_tx->offset);
7174     __get_user(host_tx->freq, &target_tx->freq);
7175     __get_user(host_tx->maxerror, &target_tx->maxerror);
7176     __get_user(host_tx->esterror, &target_tx->esterror);
7177     __get_user(host_tx->status, &target_tx->status);
7178     __get_user(host_tx->constant, &target_tx->constant);
7179     __get_user(host_tx->precision, &target_tx->precision);
7180     __get_user(host_tx->tolerance, &target_tx->tolerance);
7181     __get_user(host_tx->time.tv_sec, &target_tx->time.tv_sec);
7182     __get_user(host_tx->time.tv_usec, &target_tx->time.tv_usec);
7183     __get_user(host_tx->tick, &target_tx->tick);
7184     __get_user(host_tx->ppsfreq, &target_tx->ppsfreq);
7185     __get_user(host_tx->jitter, &target_tx->jitter);
7186     __get_user(host_tx->shift, &target_tx->shift);
7187     __get_user(host_tx->stabil, &target_tx->stabil);
7188     __get_user(host_tx->jitcnt, &target_tx->jitcnt);
7189     __get_user(host_tx->calcnt, &target_tx->calcnt);
7190     __get_user(host_tx->errcnt, &target_tx->errcnt);
7191     __get_user(host_tx->stbcnt, &target_tx->stbcnt);
7192     __get_user(host_tx->tai, &target_tx->tai);
7193 
7194     unlock_user_struct(target_tx, target_addr, 0);
7195     return 0;
7196 }
7197 
7198 static inline abi_long host_to_target_timex(abi_long target_addr,
7199                                             struct timex *host_tx)
7200 {
7201     struct target_timex *target_tx;
7202 
7203     if (!lock_user_struct(VERIFY_WRITE, target_tx, target_addr, 0)) {
7204         return -TARGET_EFAULT;
7205     }
7206 
7207     __put_user(host_tx->modes, &target_tx->modes);
7208     __put_user(host_tx->offset, &target_tx->offset);
7209     __put_user(host_tx->freq, &target_tx->freq);
7210     __put_user(host_tx->maxerror, &target_tx->maxerror);
7211     __put_user(host_tx->esterror, &target_tx->esterror);
7212     __put_user(host_tx->status, &target_tx->status);
7213     __put_user(host_tx->constant, &target_tx->constant);
7214     __put_user(host_tx->precision, &target_tx->precision);
7215     __put_user(host_tx->tolerance, &target_tx->tolerance);
7216     __put_user(host_tx->time.tv_sec, &target_tx->time.tv_sec);
7217     __put_user(host_tx->time.tv_usec, &target_tx->time.tv_usec);
7218     __put_user(host_tx->tick, &target_tx->tick);
7219     __put_user(host_tx->ppsfreq, &target_tx->ppsfreq);
7220     __put_user(host_tx->jitter, &target_tx->jitter);
7221     __put_user(host_tx->shift, &target_tx->shift);
7222     __put_user(host_tx->stabil, &target_tx->stabil);
7223     __put_user(host_tx->jitcnt, &target_tx->jitcnt);
7224     __put_user(host_tx->calcnt, &target_tx->calcnt);
7225     __put_user(host_tx->errcnt, &target_tx->errcnt);
7226     __put_user(host_tx->stbcnt, &target_tx->stbcnt);
7227     __put_user(host_tx->tai, &target_tx->tai);
7228 
7229     unlock_user_struct(target_tx, target_addr, 1);
7230     return 0;
7231 }
7232 #endif
7233 
7234 
7235 #if defined(TARGET_NR_clock_adjtime64) && defined(CONFIG_CLOCK_ADJTIME)
7236 static inline abi_long target_to_host_timex64(struct timex *host_tx,
7237                                               abi_long target_addr)
7238 {
7239     struct target__kernel_timex *target_tx;
7240 
7241     if (copy_from_user_timeval64(&host_tx->time, target_addr +
7242                                  offsetof(struct target__kernel_timex,
7243                                           time))) {
7244         return -TARGET_EFAULT;
7245     }
7246 
7247     if (!lock_user_struct(VERIFY_READ, target_tx, target_addr, 1)) {
7248         return -TARGET_EFAULT;
7249     }
7250 
7251     __get_user(host_tx->modes, &target_tx->modes);
7252     __get_user(host_tx->offset, &target_tx->offset);
7253     __get_user(host_tx->freq, &target_tx->freq);
7254     __get_user(host_tx->maxerror, &target_tx->maxerror);
7255     __get_user(host_tx->esterror, &target_tx->esterror);
7256     __get_user(host_tx->status, &target_tx->status);
7257     __get_user(host_tx->constant, &target_tx->constant);
7258     __get_user(host_tx->precision, &target_tx->precision);
7259     __get_user(host_tx->tolerance, &target_tx->tolerance);
7260     __get_user(host_tx->tick, &target_tx->tick);
7261     __get_user(host_tx->ppsfreq, &target_tx->ppsfreq);
7262     __get_user(host_tx->jitter, &target_tx->jitter);
7263     __get_user(host_tx->shift, &target_tx->shift);
7264     __get_user(host_tx->stabil, &target_tx->stabil);
7265     __get_user(host_tx->jitcnt, &target_tx->jitcnt);
7266     __get_user(host_tx->calcnt, &target_tx->calcnt);
7267     __get_user(host_tx->errcnt, &target_tx->errcnt);
7268     __get_user(host_tx->stbcnt, &target_tx->stbcnt);
7269     __get_user(host_tx->tai, &target_tx->tai);
7270 
7271     unlock_user_struct(target_tx, target_addr, 0);
7272     return 0;
7273 }
7274 
7275 static inline abi_long host_to_target_timex64(abi_long target_addr,
7276                                               struct timex *host_tx)
7277 {
7278     struct target__kernel_timex *target_tx;
7279 
7280    if (copy_to_user_timeval64(target_addr +
7281                               offsetof(struct target__kernel_timex, time),
7282                               &host_tx->time)) {
7283         return -TARGET_EFAULT;
7284     }
7285 
7286     if (!lock_user_struct(VERIFY_WRITE, target_tx, target_addr, 0)) {
7287         return -TARGET_EFAULT;
7288     }
7289 
7290     __put_user(host_tx->modes, &target_tx->modes);
7291     __put_user(host_tx->offset, &target_tx->offset);
7292     __put_user(host_tx->freq, &target_tx->freq);
7293     __put_user(host_tx->maxerror, &target_tx->maxerror);
7294     __put_user(host_tx->esterror, &target_tx->esterror);
7295     __put_user(host_tx->status, &target_tx->status);
7296     __put_user(host_tx->constant, &target_tx->constant);
7297     __put_user(host_tx->precision, &target_tx->precision);
7298     __put_user(host_tx->tolerance, &target_tx->tolerance);
7299     __put_user(host_tx->tick, &target_tx->tick);
7300     __put_user(host_tx->ppsfreq, &target_tx->ppsfreq);
7301     __put_user(host_tx->jitter, &target_tx->jitter);
7302     __put_user(host_tx->shift, &target_tx->shift);
7303     __put_user(host_tx->stabil, &target_tx->stabil);
7304     __put_user(host_tx->jitcnt, &target_tx->jitcnt);
7305     __put_user(host_tx->calcnt, &target_tx->calcnt);
7306     __put_user(host_tx->errcnt, &target_tx->errcnt);
7307     __put_user(host_tx->stbcnt, &target_tx->stbcnt);
7308     __put_user(host_tx->tai, &target_tx->tai);
7309 
7310     unlock_user_struct(target_tx, target_addr, 1);
7311     return 0;
7312 }
7313 #endif
7314 
7315 static inline abi_long target_to_host_sigevent(struct sigevent *host_sevp,
7316                                                abi_ulong target_addr)
7317 {
7318     struct target_sigevent *target_sevp;
7319 
7320     if (!lock_user_struct(VERIFY_READ, target_sevp, target_addr, 1)) {
7321         return -TARGET_EFAULT;
7322     }
7323 
7324     /* This union is awkward on 64 bit systems because it has a 32 bit
7325      * integer and a pointer in it; we follow the conversion approach
7326      * used for handling sigval types in signal.c so the guest should get
7327      * the correct value back even if we did a 64 bit byteswap and it's
7328      * using the 32 bit integer.
7329      */
7330     host_sevp->sigev_value.sival_ptr =
7331         (void *)(uintptr_t)tswapal(target_sevp->sigev_value.sival_ptr);
7332     host_sevp->sigev_signo =
7333         target_to_host_signal(tswap32(target_sevp->sigev_signo));
7334     host_sevp->sigev_notify = tswap32(target_sevp->sigev_notify);
7335     host_sevp->_sigev_un._tid = tswap32(target_sevp->_sigev_un._tid);
7336 
7337     unlock_user_struct(target_sevp, target_addr, 1);
7338     return 0;
7339 }
7340 
7341 #if defined(TARGET_NR_mlockall)
7342 static inline int target_to_host_mlockall_arg(int arg)
7343 {
7344     int result = 0;
7345 
7346     if (arg & TARGET_MCL_CURRENT) {
7347         result |= MCL_CURRENT;
7348     }
7349     if (arg & TARGET_MCL_FUTURE) {
7350         result |= MCL_FUTURE;
7351     }
7352 #ifdef MCL_ONFAULT
7353     if (arg & TARGET_MCL_ONFAULT) {
7354         result |= MCL_ONFAULT;
7355     }
7356 #endif
7357 
7358     return result;
7359 }
7360 #endif
7361 
7362 #if (defined(TARGET_NR_stat64) || defined(TARGET_NR_lstat64) ||     \
7363      defined(TARGET_NR_fstat64) || defined(TARGET_NR_fstatat64) ||  \
7364      defined(TARGET_NR_newfstatat))
7365 static inline abi_long host_to_target_stat64(void *cpu_env,
7366                                              abi_ulong target_addr,
7367                                              struct stat *host_st)
7368 {
7369 #if defined(TARGET_ARM) && defined(TARGET_ABI32)
7370     if (((CPUARMState *)cpu_env)->eabi) {
7371         struct target_eabi_stat64 *target_st;
7372 
7373         if (!lock_user_struct(VERIFY_WRITE, target_st, target_addr, 0))
7374             return -TARGET_EFAULT;
7375         memset(target_st, 0, sizeof(struct target_eabi_stat64));
7376         __put_user(host_st->st_dev, &target_st->st_dev);
7377         __put_user(host_st->st_ino, &target_st->st_ino);
7378 #ifdef TARGET_STAT64_HAS_BROKEN_ST_INO
7379         __put_user(host_st->st_ino, &target_st->__st_ino);
7380 #endif
7381         __put_user(host_st->st_mode, &target_st->st_mode);
7382         __put_user(host_st->st_nlink, &target_st->st_nlink);
7383         __put_user(host_st->st_uid, &target_st->st_uid);
7384         __put_user(host_st->st_gid, &target_st->st_gid);
7385         __put_user(host_st->st_rdev, &target_st->st_rdev);
7386         __put_user(host_st->st_size, &target_st->st_size);
7387         __put_user(host_st->st_blksize, &target_st->st_blksize);
7388         __put_user(host_st->st_blocks, &target_st->st_blocks);
7389         __put_user(host_st->st_atime, &target_st->target_st_atime);
7390         __put_user(host_st->st_mtime, &target_st->target_st_mtime);
7391         __put_user(host_st->st_ctime, &target_st->target_st_ctime);
7392 #if _POSIX_C_SOURCE >= 200809L || _XOPEN_SOURCE >= 700
7393         __put_user(host_st->st_atim.tv_nsec, &target_st->target_st_atime_nsec);
7394         __put_user(host_st->st_mtim.tv_nsec, &target_st->target_st_mtime_nsec);
7395         __put_user(host_st->st_ctim.tv_nsec, &target_st->target_st_ctime_nsec);
7396 #endif
7397         unlock_user_struct(target_st, target_addr, 1);
7398     } else
7399 #endif
7400     {
7401 #if defined(TARGET_HAS_STRUCT_STAT64)
7402         struct target_stat64 *target_st;
7403 #else
7404         struct target_stat *target_st;
7405 #endif
7406 
7407         if (!lock_user_struct(VERIFY_WRITE, target_st, target_addr, 0))
7408             return -TARGET_EFAULT;
7409         memset(target_st, 0, sizeof(*target_st));
7410         __put_user(host_st->st_dev, &target_st->st_dev);
7411         __put_user(host_st->st_ino, &target_st->st_ino);
7412 #ifdef TARGET_STAT64_HAS_BROKEN_ST_INO
7413         __put_user(host_st->st_ino, &target_st->__st_ino);
7414 #endif
7415         __put_user(host_st->st_mode, &target_st->st_mode);
7416         __put_user(host_st->st_nlink, &target_st->st_nlink);
7417         __put_user(host_st->st_uid, &target_st->st_uid);
7418         __put_user(host_st->st_gid, &target_st->st_gid);
7419         __put_user(host_st->st_rdev, &target_st->st_rdev);
7420         /* XXX: better use of kernel struct */
7421         __put_user(host_st->st_size, &target_st->st_size);
7422         __put_user(host_st->st_blksize, &target_st->st_blksize);
7423         __put_user(host_st->st_blocks, &target_st->st_blocks);
7424         __put_user(host_st->st_atime, &target_st->target_st_atime);
7425         __put_user(host_st->st_mtime, &target_st->target_st_mtime);
7426         __put_user(host_st->st_ctime, &target_st->target_st_ctime);
7427 #if _POSIX_C_SOURCE >= 200809L || _XOPEN_SOURCE >= 700
7428         __put_user(host_st->st_atim.tv_nsec, &target_st->target_st_atime_nsec);
7429         __put_user(host_st->st_mtim.tv_nsec, &target_st->target_st_mtime_nsec);
7430         __put_user(host_st->st_ctim.tv_nsec, &target_st->target_st_ctime_nsec);
7431 #endif
7432         unlock_user_struct(target_st, target_addr, 1);
7433     }
7434 
7435     return 0;
7436 }
7437 #endif
7438 
7439 #if defined(TARGET_NR_statx) && defined(__NR_statx)
7440 static inline abi_long host_to_target_statx(struct target_statx *host_stx,
7441                                             abi_ulong target_addr)
7442 {
7443     struct target_statx *target_stx;
7444 
7445     if (!lock_user_struct(VERIFY_WRITE, target_stx, target_addr,  0)) {
7446         return -TARGET_EFAULT;
7447     }
7448     memset(target_stx, 0, sizeof(*target_stx));
7449 
7450     __put_user(host_stx->stx_mask, &target_stx->stx_mask);
7451     __put_user(host_stx->stx_blksize, &target_stx->stx_blksize);
7452     __put_user(host_stx->stx_attributes, &target_stx->stx_attributes);
7453     __put_user(host_stx->stx_nlink, &target_stx->stx_nlink);
7454     __put_user(host_stx->stx_uid, &target_stx->stx_uid);
7455     __put_user(host_stx->stx_gid, &target_stx->stx_gid);
7456     __put_user(host_stx->stx_mode, &target_stx->stx_mode);
7457     __put_user(host_stx->stx_ino, &target_stx->stx_ino);
7458     __put_user(host_stx->stx_size, &target_stx->stx_size);
7459     __put_user(host_stx->stx_blocks, &target_stx->stx_blocks);
7460     __put_user(host_stx->stx_attributes_mask, &target_stx->stx_attributes_mask);
7461     __put_user(host_stx->stx_atime.tv_sec, &target_stx->stx_atime.tv_sec);
7462     __put_user(host_stx->stx_atime.tv_nsec, &target_stx->stx_atime.tv_nsec);
7463     __put_user(host_stx->stx_btime.tv_sec, &target_stx->stx_btime.tv_sec);
7464     __put_user(host_stx->stx_btime.tv_nsec, &target_stx->stx_btime.tv_nsec);
7465     __put_user(host_stx->stx_ctime.tv_sec, &target_stx->stx_ctime.tv_sec);
7466     __put_user(host_stx->stx_ctime.tv_nsec, &target_stx->stx_ctime.tv_nsec);
7467     __put_user(host_stx->stx_mtime.tv_sec, &target_stx->stx_mtime.tv_sec);
7468     __put_user(host_stx->stx_mtime.tv_nsec, &target_stx->stx_mtime.tv_nsec);
7469     __put_user(host_stx->stx_rdev_major, &target_stx->stx_rdev_major);
7470     __put_user(host_stx->stx_rdev_minor, &target_stx->stx_rdev_minor);
7471     __put_user(host_stx->stx_dev_major, &target_stx->stx_dev_major);
7472     __put_user(host_stx->stx_dev_minor, &target_stx->stx_dev_minor);
7473 
7474     unlock_user_struct(target_stx, target_addr, 1);
7475 
7476     return 0;
7477 }
7478 #endif
7479 
7480 static int do_sys_futex(int *uaddr, int op, int val,
7481                          const struct timespec *timeout, int *uaddr2,
7482                          int val3)
7483 {
7484 #if HOST_LONG_BITS == 64
7485 #if defined(__NR_futex)
7486     /* always a 64-bit time_t, it doesn't define _time64 version  */
7487     return sys_futex(uaddr, op, val, timeout, uaddr2, val3);
7488 
7489 #endif
7490 #else /* HOST_LONG_BITS == 64 */
7491 #if defined(__NR_futex_time64)
7492     if (sizeof(timeout->tv_sec) == 8) {
7493         /* _time64 function on 32bit arch */
7494         return sys_futex_time64(uaddr, op, val, timeout, uaddr2, val3);
7495     }
7496 #endif
7497 #if defined(__NR_futex)
7498     /* old function on 32bit arch */
7499     return sys_futex(uaddr, op, val, timeout, uaddr2, val3);
7500 #endif
7501 #endif /* HOST_LONG_BITS == 64 */
7502     g_assert_not_reached();
7503 }
7504 
7505 static int do_safe_futex(int *uaddr, int op, int val,
7506                          const struct timespec *timeout, int *uaddr2,
7507                          int val3)
7508 {
7509 #if HOST_LONG_BITS == 64
7510 #if defined(__NR_futex)
7511     /* always a 64-bit time_t, it doesn't define _time64 version  */
7512     return get_errno(safe_futex(uaddr, op, val, timeout, uaddr2, val3));
7513 #endif
7514 #else /* HOST_LONG_BITS == 64 */
7515 #if defined(__NR_futex_time64)
7516     if (sizeof(timeout->tv_sec) == 8) {
7517         /* _time64 function on 32bit arch */
7518         return get_errno(safe_futex_time64(uaddr, op, val, timeout, uaddr2,
7519                                            val3));
7520     }
7521 #endif
7522 #if defined(__NR_futex)
7523     /* old function on 32bit arch */
7524     return get_errno(safe_futex(uaddr, op, val, timeout, uaddr2, val3));
7525 #endif
7526 #endif /* HOST_LONG_BITS == 64 */
7527     return -TARGET_ENOSYS;
7528 }
7529 
7530 /* ??? Using host futex calls even when target atomic operations
7531    are not really atomic probably breaks things.  However implementing
7532    futexes locally would make futexes shared between multiple processes
7533    tricky.  However they're probably useless because guest atomic
7534    operations won't work either.  */
7535 #if defined(TARGET_NR_futex)
7536 static int do_futex(target_ulong uaddr, int op, int val, target_ulong timeout,
7537                     target_ulong uaddr2, int val3)
7538 {
7539     struct timespec ts, *pts;
7540     int base_op;
7541 
7542     /* ??? We assume FUTEX_* constants are the same on both host
7543        and target.  */
7544 #ifdef FUTEX_CMD_MASK
7545     base_op = op & FUTEX_CMD_MASK;
7546 #else
7547     base_op = op;
7548 #endif
7549     switch (base_op) {
7550     case FUTEX_WAIT:
7551     case FUTEX_WAIT_BITSET:
7552         if (timeout) {
7553             pts = &ts;
7554             target_to_host_timespec(pts, timeout);
7555         } else {
7556             pts = NULL;
7557         }
7558         return do_safe_futex(g2h(uaddr), op, tswap32(val), pts, NULL, val3);
7559     case FUTEX_WAKE:
7560         return do_safe_futex(g2h(uaddr), op, val, NULL, NULL, 0);
7561     case FUTEX_FD:
7562         return do_safe_futex(g2h(uaddr), op, val, NULL, NULL, 0);
7563     case FUTEX_REQUEUE:
7564     case FUTEX_CMP_REQUEUE:
7565     case FUTEX_WAKE_OP:
7566         /* For FUTEX_REQUEUE, FUTEX_CMP_REQUEUE, and FUTEX_WAKE_OP, the
7567            TIMEOUT parameter is interpreted as a uint32_t by the kernel.
7568            But the prototype takes a `struct timespec *'; insert casts
7569            to satisfy the compiler.  We do not need to tswap TIMEOUT
7570            since it's not compared to guest memory.  */
7571         pts = (struct timespec *)(uintptr_t) timeout;
7572         return do_safe_futex(g2h(uaddr), op, val, pts, g2h(uaddr2),
7573                              (base_op == FUTEX_CMP_REQUEUE
7574                                       ? tswap32(val3)
7575                                       : val3));
7576     default:
7577         return -TARGET_ENOSYS;
7578     }
7579 }
7580 #endif
7581 
7582 #if defined(TARGET_NR_futex_time64)
7583 static int do_futex_time64(target_ulong uaddr, int op, int val, target_ulong timeout,
7584                            target_ulong uaddr2, int val3)
7585 {
7586     struct timespec ts, *pts;
7587     int base_op;
7588 
7589     /* ??? We assume FUTEX_* constants are the same on both host
7590        and target.  */
7591 #ifdef FUTEX_CMD_MASK
7592     base_op = op & FUTEX_CMD_MASK;
7593 #else
7594     base_op = op;
7595 #endif
7596     switch (base_op) {
7597     case FUTEX_WAIT:
7598     case FUTEX_WAIT_BITSET:
7599         if (timeout) {
7600             pts = &ts;
7601             if (target_to_host_timespec64(pts, timeout)) {
7602                 return -TARGET_EFAULT;
7603             }
7604         } else {
7605             pts = NULL;
7606         }
7607         return do_safe_futex(g2h(uaddr), op, tswap32(val), pts, NULL, val3);
7608     case FUTEX_WAKE:
7609         return do_safe_futex(g2h(uaddr), op, val, NULL, NULL, 0);
7610     case FUTEX_FD:
7611         return do_safe_futex(g2h(uaddr), op, val, NULL, NULL, 0);
7612     case FUTEX_REQUEUE:
7613     case FUTEX_CMP_REQUEUE:
7614     case FUTEX_WAKE_OP:
7615         /* For FUTEX_REQUEUE, FUTEX_CMP_REQUEUE, and FUTEX_WAKE_OP, the
7616            TIMEOUT parameter is interpreted as a uint32_t by the kernel.
7617            But the prototype takes a `struct timespec *'; insert casts
7618            to satisfy the compiler.  We do not need to tswap TIMEOUT
7619            since it's not compared to guest memory.  */
7620         pts = (struct timespec *)(uintptr_t) timeout;
7621         return do_safe_futex(g2h(uaddr), op, val, pts, g2h(uaddr2),
7622                              (base_op == FUTEX_CMP_REQUEUE
7623                                       ? tswap32(val3)
7624                                       : val3));
7625     default:
7626         return -TARGET_ENOSYS;
7627     }
7628 }
7629 #endif
7630 
7631 #if defined(TARGET_NR_name_to_handle_at) && defined(CONFIG_OPEN_BY_HANDLE)
7632 static abi_long do_name_to_handle_at(abi_long dirfd, abi_long pathname,
7633                                      abi_long handle, abi_long mount_id,
7634                                      abi_long flags)
7635 {
7636     struct file_handle *target_fh;
7637     struct file_handle *fh;
7638     int mid = 0;
7639     abi_long ret;
7640     char *name;
7641     unsigned int size, total_size;
7642 
7643     if (get_user_s32(size, handle)) {
7644         return -TARGET_EFAULT;
7645     }
7646 
7647     name = lock_user_string(pathname);
7648     if (!name) {
7649         return -TARGET_EFAULT;
7650     }
7651 
7652     total_size = sizeof(struct file_handle) + size;
7653     target_fh = lock_user(VERIFY_WRITE, handle, total_size, 0);
7654     if (!target_fh) {
7655         unlock_user(name, pathname, 0);
7656         return -TARGET_EFAULT;
7657     }
7658 
7659     fh = g_malloc0(total_size);
7660     fh->handle_bytes = size;
7661 
7662     ret = get_errno(name_to_handle_at(dirfd, path(name), fh, &mid, flags));
7663     unlock_user(name, pathname, 0);
7664 
7665     /* man name_to_handle_at(2):
7666      * Other than the use of the handle_bytes field, the caller should treat
7667      * the file_handle structure as an opaque data type
7668      */
7669 
7670     memcpy(target_fh, fh, total_size);
7671     target_fh->handle_bytes = tswap32(fh->handle_bytes);
7672     target_fh->handle_type = tswap32(fh->handle_type);
7673     g_free(fh);
7674     unlock_user(target_fh, handle, total_size);
7675 
7676     if (put_user_s32(mid, mount_id)) {
7677         return -TARGET_EFAULT;
7678     }
7679 
7680     return ret;
7681 
7682 }
7683 #endif
7684 
7685 #if defined(TARGET_NR_open_by_handle_at) && defined(CONFIG_OPEN_BY_HANDLE)
7686 static abi_long do_open_by_handle_at(abi_long mount_fd, abi_long handle,
7687                                      abi_long flags)
7688 {
7689     struct file_handle *target_fh;
7690     struct file_handle *fh;
7691     unsigned int size, total_size;
7692     abi_long ret;
7693 
7694     if (get_user_s32(size, handle)) {
7695         return -TARGET_EFAULT;
7696     }
7697 
7698     total_size = sizeof(struct file_handle) + size;
7699     target_fh = lock_user(VERIFY_READ, handle, total_size, 1);
7700     if (!target_fh) {
7701         return -TARGET_EFAULT;
7702     }
7703 
7704     fh = g_memdup(target_fh, total_size);
7705     fh->handle_bytes = size;
7706     fh->handle_type = tswap32(target_fh->handle_type);
7707 
7708     ret = get_errno(open_by_handle_at(mount_fd, fh,
7709                     target_to_host_bitmask(flags, fcntl_flags_tbl)));
7710 
7711     g_free(fh);
7712 
7713     unlock_user(target_fh, handle, total_size);
7714 
7715     return ret;
7716 }
7717 #endif
7718 
7719 #if defined(TARGET_NR_signalfd) || defined(TARGET_NR_signalfd4)
7720 
7721 static abi_long do_signalfd4(int fd, abi_long mask, int flags)
7722 {
7723     int host_flags;
7724     target_sigset_t *target_mask;
7725     sigset_t host_mask;
7726     abi_long ret;
7727 
7728     if (flags & ~(TARGET_O_NONBLOCK | TARGET_O_CLOEXEC)) {
7729         return -TARGET_EINVAL;
7730     }
7731     if (!lock_user_struct(VERIFY_READ, target_mask, mask, 1)) {
7732         return -TARGET_EFAULT;
7733     }
7734 
7735     target_to_host_sigset(&host_mask, target_mask);
7736 
7737     host_flags = target_to_host_bitmask(flags, fcntl_flags_tbl);
7738 
7739     ret = get_errno(signalfd(fd, &host_mask, host_flags));
7740     if (ret >= 0) {
7741         fd_trans_register(ret, &target_signalfd_trans);
7742     }
7743 
7744     unlock_user_struct(target_mask, mask, 0);
7745 
7746     return ret;
7747 }
7748 #endif
7749 
7750 /* Map host to target signal numbers for the wait family of syscalls.
7751    Assume all other status bits are the same.  */
7752 int host_to_target_waitstatus(int status)
7753 {
7754     if (WIFSIGNALED(status)) {
7755         return host_to_target_signal(WTERMSIG(status)) | (status & ~0x7f);
7756     }
7757     if (WIFSTOPPED(status)) {
7758         return (host_to_target_signal(WSTOPSIG(status)) << 8)
7759                | (status & 0xff);
7760     }
7761     return status;
7762 }
7763 
7764 static int open_self_cmdline(void *cpu_env, int fd)
7765 {
7766     CPUState *cpu = env_cpu((CPUArchState *)cpu_env);
7767     struct linux_binprm *bprm = ((TaskState *)cpu->opaque)->bprm;
7768     int i;
7769 
7770     for (i = 0; i < bprm->argc; i++) {
7771         size_t len = strlen(bprm->argv[i]) + 1;
7772 
7773         if (write(fd, bprm->argv[i], len) != len) {
7774             return -1;
7775         }
7776     }
7777 
7778     return 0;
7779 }
7780 
7781 static int open_self_maps(void *cpu_env, int fd)
7782 {
7783     CPUState *cpu = env_cpu((CPUArchState *)cpu_env);
7784     TaskState *ts = cpu->opaque;
7785     GSList *map_info = read_self_maps();
7786     GSList *s;
7787     int count;
7788 
7789     for (s = map_info; s; s = g_slist_next(s)) {
7790         MapInfo *e = (MapInfo *) s->data;
7791 
7792         if (h2g_valid(e->start)) {
7793             unsigned long min = e->start;
7794             unsigned long max = e->end;
7795             int flags = page_get_flags(h2g(min));
7796             const char *path;
7797 
7798             max = h2g_valid(max - 1) ?
7799                 max : (uintptr_t) g2h(GUEST_ADDR_MAX) + 1;
7800 
7801             if (page_check_range(h2g(min), max - min, flags) == -1) {
7802                 continue;
7803             }
7804 
7805             if (h2g(min) == ts->info->stack_limit) {
7806                 path = "[stack]";
7807             } else {
7808                 path = e->path;
7809             }
7810 
7811             count = dprintf(fd, TARGET_ABI_FMT_ptr "-" TARGET_ABI_FMT_ptr
7812                             " %c%c%c%c %08" PRIx64 " %s %"PRId64,
7813                             h2g(min), h2g(max - 1) + 1,
7814                             e->is_read ? 'r' : '-',
7815                             e->is_write ? 'w' : '-',
7816                             e->is_exec ? 'x' : '-',
7817                             e->is_priv ? 'p' : '-',
7818                             (uint64_t) e->offset, e->dev, e->inode);
7819             if (path) {
7820                 dprintf(fd, "%*s%s\n", 73 - count, "", path);
7821             } else {
7822                 dprintf(fd, "\n");
7823             }
7824         }
7825     }
7826 
7827     free_self_maps(map_info);
7828 
7829 #ifdef TARGET_VSYSCALL_PAGE
7830     /*
7831      * We only support execution from the vsyscall page.
7832      * This is as if CONFIG_LEGACY_VSYSCALL_XONLY=y from v5.3.
7833      */
7834     count = dprintf(fd, TARGET_FMT_lx "-" TARGET_FMT_lx
7835                     " --xp 00000000 00:00 0",
7836                     TARGET_VSYSCALL_PAGE, TARGET_VSYSCALL_PAGE + TARGET_PAGE_SIZE);
7837     dprintf(fd, "%*s%s\n", 73 - count, "",  "[vsyscall]");
7838 #endif
7839 
7840     return 0;
7841 }
7842 
7843 static int open_self_stat(void *cpu_env, int fd)
7844 {
7845     CPUState *cpu = env_cpu((CPUArchState *)cpu_env);
7846     TaskState *ts = cpu->opaque;
7847     g_autoptr(GString) buf = g_string_new(NULL);
7848     int i;
7849 
7850     for (i = 0; i < 44; i++) {
7851         if (i == 0) {
7852             /* pid */
7853             g_string_printf(buf, FMT_pid " ", getpid());
7854         } else if (i == 1) {
7855             /* app name */
7856             gchar *bin = g_strrstr(ts->bprm->argv[0], "/");
7857             bin = bin ? bin + 1 : ts->bprm->argv[0];
7858             g_string_printf(buf, "(%.15s) ", bin);
7859         } else if (i == 27) {
7860             /* stack bottom */
7861             g_string_printf(buf, TARGET_ABI_FMT_ld " ", ts->info->start_stack);
7862         } else {
7863             /* for the rest, there is MasterCard */
7864             g_string_printf(buf, "0%c", i == 43 ? '\n' : ' ');
7865         }
7866 
7867         if (write(fd, buf->str, buf->len) != buf->len) {
7868             return -1;
7869         }
7870     }
7871 
7872     return 0;
7873 }
7874 
7875 static int open_self_auxv(void *cpu_env, int fd)
7876 {
7877     CPUState *cpu = env_cpu((CPUArchState *)cpu_env);
7878     TaskState *ts = cpu->opaque;
7879     abi_ulong auxv = ts->info->saved_auxv;
7880     abi_ulong len = ts->info->auxv_len;
7881     char *ptr;
7882 
7883     /*
7884      * Auxiliary vector is stored in target process stack.
7885      * read in whole auxv vector and copy it to file
7886      */
7887     ptr = lock_user(VERIFY_READ, auxv, len, 0);
7888     if (ptr != NULL) {
7889         while (len > 0) {
7890             ssize_t r;
7891             r = write(fd, ptr, len);
7892             if (r <= 0) {
7893                 break;
7894             }
7895             len -= r;
7896             ptr += r;
7897         }
7898         lseek(fd, 0, SEEK_SET);
7899         unlock_user(ptr, auxv, len);
7900     }
7901 
7902     return 0;
7903 }
7904 
7905 static int is_proc_myself(const char *filename, const char *entry)
7906 {
7907     if (!strncmp(filename, "/proc/", strlen("/proc/"))) {
7908         filename += strlen("/proc/");
7909         if (!strncmp(filename, "self/", strlen("self/"))) {
7910             filename += strlen("self/");
7911         } else if (*filename >= '1' && *filename <= '9') {
7912             char myself[80];
7913             snprintf(myself, sizeof(myself), "%d/", getpid());
7914             if (!strncmp(filename, myself, strlen(myself))) {
7915                 filename += strlen(myself);
7916             } else {
7917                 return 0;
7918             }
7919         } else {
7920             return 0;
7921         }
7922         if (!strcmp(filename, entry)) {
7923             return 1;
7924         }
7925     }
7926     return 0;
7927 }
7928 
7929 #if defined(HOST_WORDS_BIGENDIAN) != defined(TARGET_WORDS_BIGENDIAN) || \
7930     defined(TARGET_SPARC) || defined(TARGET_M68K) || defined(TARGET_HPPA)
7931 static int is_proc(const char *filename, const char *entry)
7932 {
7933     return strcmp(filename, entry) == 0;
7934 }
7935 #endif
7936 
7937 #if defined(HOST_WORDS_BIGENDIAN) != defined(TARGET_WORDS_BIGENDIAN)
7938 static int open_net_route(void *cpu_env, int fd)
7939 {
7940     FILE *fp;
7941     char *line = NULL;
7942     size_t len = 0;
7943     ssize_t read;
7944 
7945     fp = fopen("/proc/net/route", "r");
7946     if (fp == NULL) {
7947         return -1;
7948     }
7949 
7950     /* read header */
7951 
7952     read = getline(&line, &len, fp);
7953     dprintf(fd, "%s", line);
7954 
7955     /* read routes */
7956 
7957     while ((read = getline(&line, &len, fp)) != -1) {
7958         char iface[16];
7959         uint32_t dest, gw, mask;
7960         unsigned int flags, refcnt, use, metric, mtu, window, irtt;
7961         int fields;
7962 
7963         fields = sscanf(line,
7964                         "%s\t%08x\t%08x\t%04x\t%d\t%d\t%d\t%08x\t%d\t%u\t%u\n",
7965                         iface, &dest, &gw, &flags, &refcnt, &use, &metric,
7966                         &mask, &mtu, &window, &irtt);
7967         if (fields != 11) {
7968             continue;
7969         }
7970         dprintf(fd, "%s\t%08x\t%08x\t%04x\t%d\t%d\t%d\t%08x\t%d\t%u\t%u\n",
7971                 iface, tswap32(dest), tswap32(gw), flags, refcnt, use,
7972                 metric, tswap32(mask), mtu, window, irtt);
7973     }
7974 
7975     free(line);
7976     fclose(fp);
7977 
7978     return 0;
7979 }
7980 #endif
7981 
7982 #if defined(TARGET_SPARC)
7983 static int open_cpuinfo(void *cpu_env, int fd)
7984 {
7985     dprintf(fd, "type\t\t: sun4u\n");
7986     return 0;
7987 }
7988 #endif
7989 
7990 #if defined(TARGET_HPPA)
7991 static int open_cpuinfo(void *cpu_env, int fd)
7992 {
7993     dprintf(fd, "cpu family\t: PA-RISC 1.1e\n");
7994     dprintf(fd, "cpu\t\t: PA7300LC (PCX-L2)\n");
7995     dprintf(fd, "capabilities\t: os32\n");
7996     dprintf(fd, "model\t\t: 9000/778/B160L\n");
7997     dprintf(fd, "model name\t: Merlin L2 160 QEMU (9000/778/B160L)\n");
7998     return 0;
7999 }
8000 #endif
8001 
8002 #if defined(TARGET_M68K)
8003 static int open_hardware(void *cpu_env, int fd)
8004 {
8005     dprintf(fd, "Model:\t\tqemu-m68k\n");
8006     return 0;
8007 }
8008 #endif
8009 
8010 static int do_openat(void *cpu_env, int dirfd, const char *pathname, int flags, mode_t mode)
8011 {
8012     struct fake_open {
8013         const char *filename;
8014         int (*fill)(void *cpu_env, int fd);
8015         int (*cmp)(const char *s1, const char *s2);
8016     };
8017     const struct fake_open *fake_open;
8018     static const struct fake_open fakes[] = {
8019         { "maps", open_self_maps, is_proc_myself },
8020         { "stat", open_self_stat, is_proc_myself },
8021         { "auxv", open_self_auxv, is_proc_myself },
8022         { "cmdline", open_self_cmdline, is_proc_myself },
8023 #if defined(HOST_WORDS_BIGENDIAN) != defined(TARGET_WORDS_BIGENDIAN)
8024         { "/proc/net/route", open_net_route, is_proc },
8025 #endif
8026 #if defined(TARGET_SPARC) || defined(TARGET_HPPA)
8027         { "/proc/cpuinfo", open_cpuinfo, is_proc },
8028 #endif
8029 #if defined(TARGET_M68K)
8030         { "/proc/hardware", open_hardware, is_proc },
8031 #endif
8032         { NULL, NULL, NULL }
8033     };
8034 
8035     if (is_proc_myself(pathname, "exe")) {
8036         int execfd = qemu_getauxval(AT_EXECFD);
8037         return execfd ? execfd : safe_openat(dirfd, exec_path, flags, mode);
8038     }
8039 
8040     for (fake_open = fakes; fake_open->filename; fake_open++) {
8041         if (fake_open->cmp(pathname, fake_open->filename)) {
8042             break;
8043         }
8044     }
8045 
8046     if (fake_open->filename) {
8047         const char *tmpdir;
8048         char filename[PATH_MAX];
8049         int fd, r;
8050 
8051         /* create temporary file to map stat to */
8052         tmpdir = getenv("TMPDIR");
8053         if (!tmpdir)
8054             tmpdir = "/tmp";
8055         snprintf(filename, sizeof(filename), "%s/qemu-open.XXXXXX", tmpdir);
8056         fd = mkstemp(filename);
8057         if (fd < 0) {
8058             return fd;
8059         }
8060         unlink(filename);
8061 
8062         if ((r = fake_open->fill(cpu_env, fd))) {
8063             int e = errno;
8064             close(fd);
8065             errno = e;
8066             return r;
8067         }
8068         lseek(fd, 0, SEEK_SET);
8069 
8070         return fd;
8071     }
8072 
8073     return safe_openat(dirfd, path(pathname), flags, mode);
8074 }
8075 
8076 #define TIMER_MAGIC 0x0caf0000
8077 #define TIMER_MAGIC_MASK 0xffff0000
8078 
8079 /* Convert QEMU provided timer ID back to internal 16bit index format */
8080 static target_timer_t get_timer_id(abi_long arg)
8081 {
8082     target_timer_t timerid = arg;
8083 
8084     if ((timerid & TIMER_MAGIC_MASK) != TIMER_MAGIC) {
8085         return -TARGET_EINVAL;
8086     }
8087 
8088     timerid &= 0xffff;
8089 
8090     if (timerid >= ARRAY_SIZE(g_posix_timers)) {
8091         return -TARGET_EINVAL;
8092     }
8093 
8094     return timerid;
8095 }
8096 
8097 static int target_to_host_cpu_mask(unsigned long *host_mask,
8098                                    size_t host_size,
8099                                    abi_ulong target_addr,
8100                                    size_t target_size)
8101 {
8102     unsigned target_bits = sizeof(abi_ulong) * 8;
8103     unsigned host_bits = sizeof(*host_mask) * 8;
8104     abi_ulong *target_mask;
8105     unsigned i, j;
8106 
8107     assert(host_size >= target_size);
8108 
8109     target_mask = lock_user(VERIFY_READ, target_addr, target_size, 1);
8110     if (!target_mask) {
8111         return -TARGET_EFAULT;
8112     }
8113     memset(host_mask, 0, host_size);
8114 
8115     for (i = 0 ; i < target_size / sizeof(abi_ulong); i++) {
8116         unsigned bit = i * target_bits;
8117         abi_ulong val;
8118 
8119         __get_user(val, &target_mask[i]);
8120         for (j = 0; j < target_bits; j++, bit++) {
8121             if (val & (1UL << j)) {
8122                 host_mask[bit / host_bits] |= 1UL << (bit % host_bits);
8123             }
8124         }
8125     }
8126 
8127     unlock_user(target_mask, target_addr, 0);
8128     return 0;
8129 }
8130 
8131 static int host_to_target_cpu_mask(const unsigned long *host_mask,
8132                                    size_t host_size,
8133                                    abi_ulong target_addr,
8134                                    size_t target_size)
8135 {
8136     unsigned target_bits = sizeof(abi_ulong) * 8;
8137     unsigned host_bits = sizeof(*host_mask) * 8;
8138     abi_ulong *target_mask;
8139     unsigned i, j;
8140 
8141     assert(host_size >= target_size);
8142 
8143     target_mask = lock_user(VERIFY_WRITE, target_addr, target_size, 0);
8144     if (!target_mask) {
8145         return -TARGET_EFAULT;
8146     }
8147 
8148     for (i = 0 ; i < target_size / sizeof(abi_ulong); i++) {
8149         unsigned bit = i * target_bits;
8150         abi_ulong val = 0;
8151 
8152         for (j = 0; j < target_bits; j++, bit++) {
8153             if (host_mask[bit / host_bits] & (1UL << (bit % host_bits))) {
8154                 val |= 1UL << j;
8155             }
8156         }
8157         __put_user(val, &target_mask[i]);
8158     }
8159 
8160     unlock_user(target_mask, target_addr, target_size);
8161     return 0;
8162 }
8163 
8164 /* This is an internal helper for do_syscall so that it is easier
8165  * to have a single return point, so that actions, such as logging
8166  * of syscall results, can be performed.
8167  * All errnos that do_syscall() returns must be -TARGET_<errcode>.
8168  */
8169 static abi_long do_syscall1(void *cpu_env, int num, abi_long arg1,
8170                             abi_long arg2, abi_long arg3, abi_long arg4,
8171                             abi_long arg5, abi_long arg6, abi_long arg7,
8172                             abi_long arg8)
8173 {
8174     CPUState *cpu = env_cpu(cpu_env);
8175     abi_long ret;
8176 #if defined(TARGET_NR_stat) || defined(TARGET_NR_stat64) \
8177     || defined(TARGET_NR_lstat) || defined(TARGET_NR_lstat64) \
8178     || defined(TARGET_NR_fstat) || defined(TARGET_NR_fstat64) \
8179     || defined(TARGET_NR_statx)
8180     struct stat st;
8181 #endif
8182 #if defined(TARGET_NR_statfs) || defined(TARGET_NR_statfs64) \
8183     || defined(TARGET_NR_fstatfs)
8184     struct statfs stfs;
8185 #endif
8186     void *p;
8187 
8188     switch(num) {
8189     case TARGET_NR_exit:
8190         /* In old applications this may be used to implement _exit(2).
8191            However in threaded applications it is used for thread termination,
8192            and _exit_group is used for application termination.
8193            Do thread termination if we have more then one thread.  */
8194 
8195         if (block_signals()) {
8196             return -TARGET_ERESTARTSYS;
8197         }
8198 
8199         pthread_mutex_lock(&clone_lock);
8200 
8201         if (CPU_NEXT(first_cpu)) {
8202             TaskState *ts = cpu->opaque;
8203 
8204             object_property_set_bool(OBJECT(cpu), "realized", false, NULL);
8205             object_unref(OBJECT(cpu));
8206             /*
8207              * At this point the CPU should be unrealized and removed
8208              * from cpu lists. We can clean-up the rest of the thread
8209              * data without the lock held.
8210              */
8211 
8212             pthread_mutex_unlock(&clone_lock);
8213 
8214             if (ts->child_tidptr) {
8215                 put_user_u32(0, ts->child_tidptr);
8216                 do_sys_futex(g2h(ts->child_tidptr), FUTEX_WAKE, INT_MAX,
8217                           NULL, NULL, 0);
8218             }
8219             thread_cpu = NULL;
8220             g_free(ts);
8221             rcu_unregister_thread();
8222             pthread_exit(NULL);
8223         }
8224 
8225         pthread_mutex_unlock(&clone_lock);
8226         preexit_cleanup(cpu_env, arg1);
8227         _exit(arg1);
8228         return 0; /* avoid warning */
8229     case TARGET_NR_read:
8230         if (arg2 == 0 && arg3 == 0) {
8231             return get_errno(safe_read(arg1, 0, 0));
8232         } else {
8233             if (!(p = lock_user(VERIFY_WRITE, arg2, arg3, 0)))
8234                 return -TARGET_EFAULT;
8235             ret = get_errno(safe_read(arg1, p, arg3));
8236             if (ret >= 0 &&
8237                 fd_trans_host_to_target_data(arg1)) {
8238                 ret = fd_trans_host_to_target_data(arg1)(p, ret);
8239             }
8240             unlock_user(p, arg2, ret);
8241         }
8242         return ret;
8243     case TARGET_NR_write:
8244         if (arg2 == 0 && arg3 == 0) {
8245             return get_errno(safe_write(arg1, 0, 0));
8246         }
8247         if (!(p = lock_user(VERIFY_READ, arg2, arg3, 1)))
8248             return -TARGET_EFAULT;
8249         if (fd_trans_target_to_host_data(arg1)) {
8250             void *copy = g_malloc(arg3);
8251             memcpy(copy, p, arg3);
8252             ret = fd_trans_target_to_host_data(arg1)(copy, arg3);
8253             if (ret >= 0) {
8254                 ret = get_errno(safe_write(arg1, copy, ret));
8255             }
8256             g_free(copy);
8257         } else {
8258             ret = get_errno(safe_write(arg1, p, arg3));
8259         }
8260         unlock_user(p, arg2, 0);
8261         return ret;
8262 
8263 #ifdef TARGET_NR_open
8264     case TARGET_NR_open:
8265         if (!(p = lock_user_string(arg1)))
8266             return -TARGET_EFAULT;
8267         ret = get_errno(do_openat(cpu_env, AT_FDCWD, p,
8268                                   target_to_host_bitmask(arg2, fcntl_flags_tbl),
8269                                   arg3));
8270         fd_trans_unregister(ret);
8271         unlock_user(p, arg1, 0);
8272         return ret;
8273 #endif
8274     case TARGET_NR_openat:
8275         if (!(p = lock_user_string(arg2)))
8276             return -TARGET_EFAULT;
8277         ret = get_errno(do_openat(cpu_env, arg1, p,
8278                                   target_to_host_bitmask(arg3, fcntl_flags_tbl),
8279                                   arg4));
8280         fd_trans_unregister(ret);
8281         unlock_user(p, arg2, 0);
8282         return ret;
8283 #if defined(TARGET_NR_name_to_handle_at) && defined(CONFIG_OPEN_BY_HANDLE)
8284     case TARGET_NR_name_to_handle_at:
8285         ret = do_name_to_handle_at(arg1, arg2, arg3, arg4, arg5);
8286         return ret;
8287 #endif
8288 #if defined(TARGET_NR_open_by_handle_at) && defined(CONFIG_OPEN_BY_HANDLE)
8289     case TARGET_NR_open_by_handle_at:
8290         ret = do_open_by_handle_at(arg1, arg2, arg3);
8291         fd_trans_unregister(ret);
8292         return ret;
8293 #endif
8294     case TARGET_NR_close:
8295         fd_trans_unregister(arg1);
8296         return get_errno(close(arg1));
8297 
8298     case TARGET_NR_brk:
8299         return do_brk(arg1);
8300 #ifdef TARGET_NR_fork
8301     case TARGET_NR_fork:
8302         return get_errno(do_fork(cpu_env, TARGET_SIGCHLD, 0, 0, 0, 0));
8303 #endif
8304 #ifdef TARGET_NR_waitpid
8305     case TARGET_NR_waitpid:
8306         {
8307             int status;
8308             ret = get_errno(safe_wait4(arg1, &status, arg3, 0));
8309             if (!is_error(ret) && arg2 && ret
8310                 && put_user_s32(host_to_target_waitstatus(status), arg2))
8311                 return -TARGET_EFAULT;
8312         }
8313         return ret;
8314 #endif
8315 #ifdef TARGET_NR_waitid
8316     case TARGET_NR_waitid:
8317         {
8318             siginfo_t info;
8319             info.si_pid = 0;
8320             ret = get_errno(safe_waitid(arg1, arg2, &info, arg4, NULL));
8321             if (!is_error(ret) && arg3 && info.si_pid != 0) {
8322                 if (!(p = lock_user(VERIFY_WRITE, arg3, sizeof(target_siginfo_t), 0)))
8323                     return -TARGET_EFAULT;
8324                 host_to_target_siginfo(p, &info);
8325                 unlock_user(p, arg3, sizeof(target_siginfo_t));
8326             }
8327         }
8328         return ret;
8329 #endif
8330 #ifdef TARGET_NR_creat /* not on alpha */
8331     case TARGET_NR_creat:
8332         if (!(p = lock_user_string(arg1)))
8333             return -TARGET_EFAULT;
8334         ret = get_errno(creat(p, arg2));
8335         fd_trans_unregister(ret);
8336         unlock_user(p, arg1, 0);
8337         return ret;
8338 #endif
8339 #ifdef TARGET_NR_link
8340     case TARGET_NR_link:
8341         {
8342             void * p2;
8343             p = lock_user_string(arg1);
8344             p2 = lock_user_string(arg2);
8345             if (!p || !p2)
8346                 ret = -TARGET_EFAULT;
8347             else
8348                 ret = get_errno(link(p, p2));
8349             unlock_user(p2, arg2, 0);
8350             unlock_user(p, arg1, 0);
8351         }
8352         return ret;
8353 #endif
8354 #if defined(TARGET_NR_linkat)
8355     case TARGET_NR_linkat:
8356         {
8357             void * p2 = NULL;
8358             if (!arg2 || !arg4)
8359                 return -TARGET_EFAULT;
8360             p  = lock_user_string(arg2);
8361             p2 = lock_user_string(arg4);
8362             if (!p || !p2)
8363                 ret = -TARGET_EFAULT;
8364             else
8365                 ret = get_errno(linkat(arg1, p, arg3, p2, arg5));
8366             unlock_user(p, arg2, 0);
8367             unlock_user(p2, arg4, 0);
8368         }
8369         return ret;
8370 #endif
8371 #ifdef TARGET_NR_unlink
8372     case TARGET_NR_unlink:
8373         if (!(p = lock_user_string(arg1)))
8374             return -TARGET_EFAULT;
8375         ret = get_errno(unlink(p));
8376         unlock_user(p, arg1, 0);
8377         return ret;
8378 #endif
8379 #if defined(TARGET_NR_unlinkat)
8380     case TARGET_NR_unlinkat:
8381         if (!(p = lock_user_string(arg2)))
8382             return -TARGET_EFAULT;
8383         ret = get_errno(unlinkat(arg1, p, arg3));
8384         unlock_user(p, arg2, 0);
8385         return ret;
8386 #endif
8387     case TARGET_NR_execve:
8388         {
8389             char **argp, **envp;
8390             int argc, envc;
8391             abi_ulong gp;
8392             abi_ulong guest_argp;
8393             abi_ulong guest_envp;
8394             abi_ulong addr;
8395             char **q;
8396             int total_size = 0;
8397 
8398             argc = 0;
8399             guest_argp = arg2;
8400             for (gp = guest_argp; gp; gp += sizeof(abi_ulong)) {
8401                 if (get_user_ual(addr, gp))
8402                     return -TARGET_EFAULT;
8403                 if (!addr)
8404                     break;
8405                 argc++;
8406             }
8407             envc = 0;
8408             guest_envp = arg3;
8409             for (gp = guest_envp; gp; gp += sizeof(abi_ulong)) {
8410                 if (get_user_ual(addr, gp))
8411                     return -TARGET_EFAULT;
8412                 if (!addr)
8413                     break;
8414                 envc++;
8415             }
8416 
8417             argp = g_new0(char *, argc + 1);
8418             envp = g_new0(char *, envc + 1);
8419 
8420             for (gp = guest_argp, q = argp; gp;
8421                   gp += sizeof(abi_ulong), q++) {
8422                 if (get_user_ual(addr, gp))
8423                     goto execve_efault;
8424                 if (!addr)
8425                     break;
8426                 if (!(*q = lock_user_string(addr)))
8427                     goto execve_efault;
8428                 total_size += strlen(*q) + 1;
8429             }
8430             *q = NULL;
8431 
8432             for (gp = guest_envp, q = envp; gp;
8433                   gp += sizeof(abi_ulong), q++) {
8434                 if (get_user_ual(addr, gp))
8435                     goto execve_efault;
8436                 if (!addr)
8437                     break;
8438                 if (!(*q = lock_user_string(addr)))
8439                     goto execve_efault;
8440                 total_size += strlen(*q) + 1;
8441             }
8442             *q = NULL;
8443 
8444             if (!(p = lock_user_string(arg1)))
8445                 goto execve_efault;
8446             /* Although execve() is not an interruptible syscall it is
8447              * a special case where we must use the safe_syscall wrapper:
8448              * if we allow a signal to happen before we make the host
8449              * syscall then we will 'lose' it, because at the point of
8450              * execve the process leaves QEMU's control. So we use the
8451              * safe syscall wrapper to ensure that we either take the
8452              * signal as a guest signal, or else it does not happen
8453              * before the execve completes and makes it the other
8454              * program's problem.
8455              */
8456             ret = get_errno(safe_execve(p, argp, envp));
8457             unlock_user(p, arg1, 0);
8458 
8459             goto execve_end;
8460 
8461         execve_efault:
8462             ret = -TARGET_EFAULT;
8463 
8464         execve_end:
8465             for (gp = guest_argp, q = argp; *q;
8466                   gp += sizeof(abi_ulong), q++) {
8467                 if (get_user_ual(addr, gp)
8468                     || !addr)
8469                     break;
8470                 unlock_user(*q, addr, 0);
8471             }
8472             for (gp = guest_envp, q = envp; *q;
8473                   gp += sizeof(abi_ulong), q++) {
8474                 if (get_user_ual(addr, gp)
8475                     || !addr)
8476                     break;
8477                 unlock_user(*q, addr, 0);
8478             }
8479 
8480             g_free(argp);
8481             g_free(envp);
8482         }
8483         return ret;
8484     case TARGET_NR_chdir:
8485         if (!(p = lock_user_string(arg1)))
8486             return -TARGET_EFAULT;
8487         ret = get_errno(chdir(p));
8488         unlock_user(p, arg1, 0);
8489         return ret;
8490 #ifdef TARGET_NR_time
8491     case TARGET_NR_time:
8492         {
8493             time_t host_time;
8494             ret = get_errno(time(&host_time));
8495             if (!is_error(ret)
8496                 && arg1
8497                 && put_user_sal(host_time, arg1))
8498                 return -TARGET_EFAULT;
8499         }
8500         return ret;
8501 #endif
8502 #ifdef TARGET_NR_mknod
8503     case TARGET_NR_mknod:
8504         if (!(p = lock_user_string(arg1)))
8505             return -TARGET_EFAULT;
8506         ret = get_errno(mknod(p, arg2, arg3));
8507         unlock_user(p, arg1, 0);
8508         return ret;
8509 #endif
8510 #if defined(TARGET_NR_mknodat)
8511     case TARGET_NR_mknodat:
8512         if (!(p = lock_user_string(arg2)))
8513             return -TARGET_EFAULT;
8514         ret = get_errno(mknodat(arg1, p, arg3, arg4));
8515         unlock_user(p, arg2, 0);
8516         return ret;
8517 #endif
8518 #ifdef TARGET_NR_chmod
8519     case TARGET_NR_chmod:
8520         if (!(p = lock_user_string(arg1)))
8521             return -TARGET_EFAULT;
8522         ret = get_errno(chmod(p, arg2));
8523         unlock_user(p, arg1, 0);
8524         return ret;
8525 #endif
8526 #ifdef TARGET_NR_lseek
8527     case TARGET_NR_lseek:
8528         return get_errno(lseek(arg1, arg2, arg3));
8529 #endif
8530 #if defined(TARGET_NR_getxpid) && defined(TARGET_ALPHA)
8531     /* Alpha specific */
8532     case TARGET_NR_getxpid:
8533         ((CPUAlphaState *)cpu_env)->ir[IR_A4] = getppid();
8534         return get_errno(getpid());
8535 #endif
8536 #ifdef TARGET_NR_getpid
8537     case TARGET_NR_getpid:
8538         return get_errno(getpid());
8539 #endif
8540     case TARGET_NR_mount:
8541         {
8542             /* need to look at the data field */
8543             void *p2, *p3;
8544 
8545             if (arg1) {
8546                 p = lock_user_string(arg1);
8547                 if (!p) {
8548                     return -TARGET_EFAULT;
8549                 }
8550             } else {
8551                 p = NULL;
8552             }
8553 
8554             p2 = lock_user_string(arg2);
8555             if (!p2) {
8556                 if (arg1) {
8557                     unlock_user(p, arg1, 0);
8558                 }
8559                 return -TARGET_EFAULT;
8560             }
8561 
8562             if (arg3) {
8563                 p3 = lock_user_string(arg3);
8564                 if (!p3) {
8565                     if (arg1) {
8566                         unlock_user(p, arg1, 0);
8567                     }
8568                     unlock_user(p2, arg2, 0);
8569                     return -TARGET_EFAULT;
8570                 }
8571             } else {
8572                 p3 = NULL;
8573             }
8574 
8575             /* FIXME - arg5 should be locked, but it isn't clear how to
8576              * do that since it's not guaranteed to be a NULL-terminated
8577              * string.
8578              */
8579             if (!arg5) {
8580                 ret = mount(p, p2, p3, (unsigned long)arg4, NULL);
8581             } else {
8582                 ret = mount(p, p2, p3, (unsigned long)arg4, g2h(arg5));
8583             }
8584             ret = get_errno(ret);
8585 
8586             if (arg1) {
8587                 unlock_user(p, arg1, 0);
8588             }
8589             unlock_user(p2, arg2, 0);
8590             if (arg3) {
8591                 unlock_user(p3, arg3, 0);
8592             }
8593         }
8594         return ret;
8595 #if defined(TARGET_NR_umount) || defined(TARGET_NR_oldumount)
8596 #if defined(TARGET_NR_umount)
8597     case TARGET_NR_umount:
8598 #endif
8599 #if defined(TARGET_NR_oldumount)
8600     case TARGET_NR_oldumount:
8601 #endif
8602         if (!(p = lock_user_string(arg1)))
8603             return -TARGET_EFAULT;
8604         ret = get_errno(umount(p));
8605         unlock_user(p, arg1, 0);
8606         return ret;
8607 #endif
8608 #ifdef TARGET_NR_stime /* not on alpha */
8609     case TARGET_NR_stime:
8610         {
8611             struct timespec ts;
8612             ts.tv_nsec = 0;
8613             if (get_user_sal(ts.tv_sec, arg1)) {
8614                 return -TARGET_EFAULT;
8615             }
8616             return get_errno(clock_settime(CLOCK_REALTIME, &ts));
8617         }
8618 #endif
8619 #ifdef TARGET_NR_alarm /* not on alpha */
8620     case TARGET_NR_alarm:
8621         return alarm(arg1);
8622 #endif
8623 #ifdef TARGET_NR_pause /* not on alpha */
8624     case TARGET_NR_pause:
8625         if (!block_signals()) {
8626             sigsuspend(&((TaskState *)cpu->opaque)->signal_mask);
8627         }
8628         return -TARGET_EINTR;
8629 #endif
8630 #ifdef TARGET_NR_utime
8631     case TARGET_NR_utime:
8632         {
8633             struct utimbuf tbuf, *host_tbuf;
8634             struct target_utimbuf *target_tbuf;
8635             if (arg2) {
8636                 if (!lock_user_struct(VERIFY_READ, target_tbuf, arg2, 1))
8637                     return -TARGET_EFAULT;
8638                 tbuf.actime = tswapal(target_tbuf->actime);
8639                 tbuf.modtime = tswapal(target_tbuf->modtime);
8640                 unlock_user_struct(target_tbuf, arg2, 0);
8641                 host_tbuf = &tbuf;
8642             } else {
8643                 host_tbuf = NULL;
8644             }
8645             if (!(p = lock_user_string(arg1)))
8646                 return -TARGET_EFAULT;
8647             ret = get_errno(utime(p, host_tbuf));
8648             unlock_user(p, arg1, 0);
8649         }
8650         return ret;
8651 #endif
8652 #ifdef TARGET_NR_utimes
8653     case TARGET_NR_utimes:
8654         {
8655             struct timeval *tvp, tv[2];
8656             if (arg2) {
8657                 if (copy_from_user_timeval(&tv[0], arg2)
8658                     || copy_from_user_timeval(&tv[1],
8659                                               arg2 + sizeof(struct target_timeval)))
8660                     return -TARGET_EFAULT;
8661                 tvp = tv;
8662             } else {
8663                 tvp = NULL;
8664             }
8665             if (!(p = lock_user_string(arg1)))
8666                 return -TARGET_EFAULT;
8667             ret = get_errno(utimes(p, tvp));
8668             unlock_user(p, arg1, 0);
8669         }
8670         return ret;
8671 #endif
8672 #if defined(TARGET_NR_futimesat)
8673     case TARGET_NR_futimesat:
8674         {
8675             struct timeval *tvp, tv[2];
8676             if (arg3) {
8677                 if (copy_from_user_timeval(&tv[0], arg3)
8678                     || copy_from_user_timeval(&tv[1],
8679                                               arg3 + sizeof(struct target_timeval)))
8680                     return -TARGET_EFAULT;
8681                 tvp = tv;
8682             } else {
8683                 tvp = NULL;
8684             }
8685             if (!(p = lock_user_string(arg2))) {
8686                 return -TARGET_EFAULT;
8687             }
8688             ret = get_errno(futimesat(arg1, path(p), tvp));
8689             unlock_user(p, arg2, 0);
8690         }
8691         return ret;
8692 #endif
8693 #ifdef TARGET_NR_access
8694     case TARGET_NR_access:
8695         if (!(p = lock_user_string(arg1))) {
8696             return -TARGET_EFAULT;
8697         }
8698         ret = get_errno(access(path(p), arg2));
8699         unlock_user(p, arg1, 0);
8700         return ret;
8701 #endif
8702 #if defined(TARGET_NR_faccessat) && defined(__NR_faccessat)
8703     case TARGET_NR_faccessat:
8704         if (!(p = lock_user_string(arg2))) {
8705             return -TARGET_EFAULT;
8706         }
8707         ret = get_errno(faccessat(arg1, p, arg3, 0));
8708         unlock_user(p, arg2, 0);
8709         return ret;
8710 #endif
8711 #ifdef TARGET_NR_nice /* not on alpha */
8712     case TARGET_NR_nice:
8713         return get_errno(nice(arg1));
8714 #endif
8715     case TARGET_NR_sync:
8716         sync();
8717         return 0;
8718 #if defined(TARGET_NR_syncfs) && defined(CONFIG_SYNCFS)
8719     case TARGET_NR_syncfs:
8720         return get_errno(syncfs(arg1));
8721 #endif
8722     case TARGET_NR_kill:
8723         return get_errno(safe_kill(arg1, target_to_host_signal(arg2)));
8724 #ifdef TARGET_NR_rename
8725     case TARGET_NR_rename:
8726         {
8727             void *p2;
8728             p = lock_user_string(arg1);
8729             p2 = lock_user_string(arg2);
8730             if (!p || !p2)
8731                 ret = -TARGET_EFAULT;
8732             else
8733                 ret = get_errno(rename(p, p2));
8734             unlock_user(p2, arg2, 0);
8735             unlock_user(p, arg1, 0);
8736         }
8737         return ret;
8738 #endif
8739 #if defined(TARGET_NR_renameat)
8740     case TARGET_NR_renameat:
8741         {
8742             void *p2;
8743             p  = lock_user_string(arg2);
8744             p2 = lock_user_string(arg4);
8745             if (!p || !p2)
8746                 ret = -TARGET_EFAULT;
8747             else
8748                 ret = get_errno(renameat(arg1, p, arg3, p2));
8749             unlock_user(p2, arg4, 0);
8750             unlock_user(p, arg2, 0);
8751         }
8752         return ret;
8753 #endif
8754 #if defined(TARGET_NR_renameat2)
8755     case TARGET_NR_renameat2:
8756         {
8757             void *p2;
8758             p  = lock_user_string(arg2);
8759             p2 = lock_user_string(arg4);
8760             if (!p || !p2) {
8761                 ret = -TARGET_EFAULT;
8762             } else {
8763                 ret = get_errno(sys_renameat2(arg1, p, arg3, p2, arg5));
8764             }
8765             unlock_user(p2, arg4, 0);
8766             unlock_user(p, arg2, 0);
8767         }
8768         return ret;
8769 #endif
8770 #ifdef TARGET_NR_mkdir
8771     case TARGET_NR_mkdir:
8772         if (!(p = lock_user_string(arg1)))
8773             return -TARGET_EFAULT;
8774         ret = get_errno(mkdir(p, arg2));
8775         unlock_user(p, arg1, 0);
8776         return ret;
8777 #endif
8778 #if defined(TARGET_NR_mkdirat)
8779     case TARGET_NR_mkdirat:
8780         if (!(p = lock_user_string(arg2)))
8781             return -TARGET_EFAULT;
8782         ret = get_errno(mkdirat(arg1, p, arg3));
8783         unlock_user(p, arg2, 0);
8784         return ret;
8785 #endif
8786 #ifdef TARGET_NR_rmdir
8787     case TARGET_NR_rmdir:
8788         if (!(p = lock_user_string(arg1)))
8789             return -TARGET_EFAULT;
8790         ret = get_errno(rmdir(p));
8791         unlock_user(p, arg1, 0);
8792         return ret;
8793 #endif
8794     case TARGET_NR_dup:
8795         ret = get_errno(dup(arg1));
8796         if (ret >= 0) {
8797             fd_trans_dup(arg1, ret);
8798         }
8799         return ret;
8800 #ifdef TARGET_NR_pipe
8801     case TARGET_NR_pipe:
8802         return do_pipe(cpu_env, arg1, 0, 0);
8803 #endif
8804 #ifdef TARGET_NR_pipe2
8805     case TARGET_NR_pipe2:
8806         return do_pipe(cpu_env, arg1,
8807                        target_to_host_bitmask(arg2, fcntl_flags_tbl), 1);
8808 #endif
8809     case TARGET_NR_times:
8810         {
8811             struct target_tms *tmsp;
8812             struct tms tms;
8813             ret = get_errno(times(&tms));
8814             if (arg1) {
8815                 tmsp = lock_user(VERIFY_WRITE, arg1, sizeof(struct target_tms), 0);
8816                 if (!tmsp)
8817                     return -TARGET_EFAULT;
8818                 tmsp->tms_utime = tswapal(host_to_target_clock_t(tms.tms_utime));
8819                 tmsp->tms_stime = tswapal(host_to_target_clock_t(tms.tms_stime));
8820                 tmsp->tms_cutime = tswapal(host_to_target_clock_t(tms.tms_cutime));
8821                 tmsp->tms_cstime = tswapal(host_to_target_clock_t(tms.tms_cstime));
8822             }
8823             if (!is_error(ret))
8824                 ret = host_to_target_clock_t(ret);
8825         }
8826         return ret;
8827     case TARGET_NR_acct:
8828         if (arg1 == 0) {
8829             ret = get_errno(acct(NULL));
8830         } else {
8831             if (!(p = lock_user_string(arg1))) {
8832                 return -TARGET_EFAULT;
8833             }
8834             ret = get_errno(acct(path(p)));
8835             unlock_user(p, arg1, 0);
8836         }
8837         return ret;
8838 #ifdef TARGET_NR_umount2
8839     case TARGET_NR_umount2:
8840         if (!(p = lock_user_string(arg1)))
8841             return -TARGET_EFAULT;
8842         ret = get_errno(umount2(p, arg2));
8843         unlock_user(p, arg1, 0);
8844         return ret;
8845 #endif
8846     case TARGET_NR_ioctl:
8847         return do_ioctl(arg1, arg2, arg3);
8848 #ifdef TARGET_NR_fcntl
8849     case TARGET_NR_fcntl:
8850         return do_fcntl(arg1, arg2, arg3);
8851 #endif
8852     case TARGET_NR_setpgid:
8853         return get_errno(setpgid(arg1, arg2));
8854     case TARGET_NR_umask:
8855         return get_errno(umask(arg1));
8856     case TARGET_NR_chroot:
8857         if (!(p = lock_user_string(arg1)))
8858             return -TARGET_EFAULT;
8859         ret = get_errno(chroot(p));
8860         unlock_user(p, arg1, 0);
8861         return ret;
8862 #ifdef TARGET_NR_dup2
8863     case TARGET_NR_dup2:
8864         ret = get_errno(dup2(arg1, arg2));
8865         if (ret >= 0) {
8866             fd_trans_dup(arg1, arg2);
8867         }
8868         return ret;
8869 #endif
8870 #if defined(CONFIG_DUP3) && defined(TARGET_NR_dup3)
8871     case TARGET_NR_dup3:
8872     {
8873         int host_flags;
8874 
8875         if ((arg3 & ~TARGET_O_CLOEXEC) != 0) {
8876             return -EINVAL;
8877         }
8878         host_flags = target_to_host_bitmask(arg3, fcntl_flags_tbl);
8879         ret = get_errno(dup3(arg1, arg2, host_flags));
8880         if (ret >= 0) {
8881             fd_trans_dup(arg1, arg2);
8882         }
8883         return ret;
8884     }
8885 #endif
8886 #ifdef TARGET_NR_getppid /* not on alpha */
8887     case TARGET_NR_getppid:
8888         return get_errno(getppid());
8889 #endif
8890 #ifdef TARGET_NR_getpgrp
8891     case TARGET_NR_getpgrp:
8892         return get_errno(getpgrp());
8893 #endif
8894     case TARGET_NR_setsid:
8895         return get_errno(setsid());
8896 #ifdef TARGET_NR_sigaction
8897     case TARGET_NR_sigaction:
8898         {
8899 #if defined(TARGET_ALPHA)
8900             struct target_sigaction act, oact, *pact = 0;
8901             struct target_old_sigaction *old_act;
8902             if (arg2) {
8903                 if (!lock_user_struct(VERIFY_READ, old_act, arg2, 1))
8904                     return -TARGET_EFAULT;
8905                 act._sa_handler = old_act->_sa_handler;
8906                 target_siginitset(&act.sa_mask, old_act->sa_mask);
8907                 act.sa_flags = old_act->sa_flags;
8908                 act.sa_restorer = 0;
8909                 unlock_user_struct(old_act, arg2, 0);
8910                 pact = &act;
8911             }
8912             ret = get_errno(do_sigaction(arg1, pact, &oact));
8913             if (!is_error(ret) && arg3) {
8914                 if (!lock_user_struct(VERIFY_WRITE, old_act, arg3, 0))
8915                     return -TARGET_EFAULT;
8916                 old_act->_sa_handler = oact._sa_handler;
8917                 old_act->sa_mask = oact.sa_mask.sig[0];
8918                 old_act->sa_flags = oact.sa_flags;
8919                 unlock_user_struct(old_act, arg3, 1);
8920             }
8921 #elif defined(TARGET_MIPS)
8922 	    struct target_sigaction act, oact, *pact, *old_act;
8923 
8924 	    if (arg2) {
8925                 if (!lock_user_struct(VERIFY_READ, old_act, arg2, 1))
8926                     return -TARGET_EFAULT;
8927 		act._sa_handler = old_act->_sa_handler;
8928 		target_siginitset(&act.sa_mask, old_act->sa_mask.sig[0]);
8929 		act.sa_flags = old_act->sa_flags;
8930 		unlock_user_struct(old_act, arg2, 0);
8931 		pact = &act;
8932 	    } else {
8933 		pact = NULL;
8934 	    }
8935 
8936 	    ret = get_errno(do_sigaction(arg1, pact, &oact));
8937 
8938 	    if (!is_error(ret) && arg3) {
8939                 if (!lock_user_struct(VERIFY_WRITE, old_act, arg3, 0))
8940                     return -TARGET_EFAULT;
8941 		old_act->_sa_handler = oact._sa_handler;
8942 		old_act->sa_flags = oact.sa_flags;
8943 		old_act->sa_mask.sig[0] = oact.sa_mask.sig[0];
8944 		old_act->sa_mask.sig[1] = 0;
8945 		old_act->sa_mask.sig[2] = 0;
8946 		old_act->sa_mask.sig[3] = 0;
8947 		unlock_user_struct(old_act, arg3, 1);
8948 	    }
8949 #else
8950             struct target_old_sigaction *old_act;
8951             struct target_sigaction act, oact, *pact;
8952             if (arg2) {
8953                 if (!lock_user_struct(VERIFY_READ, old_act, arg2, 1))
8954                     return -TARGET_EFAULT;
8955                 act._sa_handler = old_act->_sa_handler;
8956                 target_siginitset(&act.sa_mask, old_act->sa_mask);
8957                 act.sa_flags = old_act->sa_flags;
8958                 act.sa_restorer = old_act->sa_restorer;
8959 #ifdef TARGET_ARCH_HAS_KA_RESTORER
8960                 act.ka_restorer = 0;
8961 #endif
8962                 unlock_user_struct(old_act, arg2, 0);
8963                 pact = &act;
8964             } else {
8965                 pact = NULL;
8966             }
8967             ret = get_errno(do_sigaction(arg1, pact, &oact));
8968             if (!is_error(ret) && arg3) {
8969                 if (!lock_user_struct(VERIFY_WRITE, old_act, arg3, 0))
8970                     return -TARGET_EFAULT;
8971                 old_act->_sa_handler = oact._sa_handler;
8972                 old_act->sa_mask = oact.sa_mask.sig[0];
8973                 old_act->sa_flags = oact.sa_flags;
8974                 old_act->sa_restorer = oact.sa_restorer;
8975                 unlock_user_struct(old_act, arg3, 1);
8976             }
8977 #endif
8978         }
8979         return ret;
8980 #endif
8981     case TARGET_NR_rt_sigaction:
8982         {
8983 #if defined(TARGET_ALPHA)
8984             /* For Alpha and SPARC this is a 5 argument syscall, with
8985              * a 'restorer' parameter which must be copied into the
8986              * sa_restorer field of the sigaction struct.
8987              * For Alpha that 'restorer' is arg5; for SPARC it is arg4,
8988              * and arg5 is the sigsetsize.
8989              * Alpha also has a separate rt_sigaction struct that it uses
8990              * here; SPARC uses the usual sigaction struct.
8991              */
8992             struct target_rt_sigaction *rt_act;
8993             struct target_sigaction act, oact, *pact = 0;
8994 
8995             if (arg4 != sizeof(target_sigset_t)) {
8996                 return -TARGET_EINVAL;
8997             }
8998             if (arg2) {
8999                 if (!lock_user_struct(VERIFY_READ, rt_act, arg2, 1))
9000                     return -TARGET_EFAULT;
9001                 act._sa_handler = rt_act->_sa_handler;
9002                 act.sa_mask = rt_act->sa_mask;
9003                 act.sa_flags = rt_act->sa_flags;
9004                 act.sa_restorer = arg5;
9005                 unlock_user_struct(rt_act, arg2, 0);
9006                 pact = &act;
9007             }
9008             ret = get_errno(do_sigaction(arg1, pact, &oact));
9009             if (!is_error(ret) && arg3) {
9010                 if (!lock_user_struct(VERIFY_WRITE, rt_act, arg3, 0))
9011                     return -TARGET_EFAULT;
9012                 rt_act->_sa_handler = oact._sa_handler;
9013                 rt_act->sa_mask = oact.sa_mask;
9014                 rt_act->sa_flags = oact.sa_flags;
9015                 unlock_user_struct(rt_act, arg3, 1);
9016             }
9017 #else
9018 #ifdef TARGET_SPARC
9019             target_ulong restorer = arg4;
9020             target_ulong sigsetsize = arg5;
9021 #else
9022             target_ulong sigsetsize = arg4;
9023 #endif
9024             struct target_sigaction *act;
9025             struct target_sigaction *oact;
9026 
9027             if (sigsetsize != sizeof(target_sigset_t)) {
9028                 return -TARGET_EINVAL;
9029             }
9030             if (arg2) {
9031                 if (!lock_user_struct(VERIFY_READ, act, arg2, 1)) {
9032                     return -TARGET_EFAULT;
9033                 }
9034 #ifdef TARGET_ARCH_HAS_KA_RESTORER
9035                 act->ka_restorer = restorer;
9036 #endif
9037             } else {
9038                 act = NULL;
9039             }
9040             if (arg3) {
9041                 if (!lock_user_struct(VERIFY_WRITE, oact, arg3, 0)) {
9042                     ret = -TARGET_EFAULT;
9043                     goto rt_sigaction_fail;
9044                 }
9045             } else
9046                 oact = NULL;
9047             ret = get_errno(do_sigaction(arg1, act, oact));
9048 	rt_sigaction_fail:
9049             if (act)
9050                 unlock_user_struct(act, arg2, 0);
9051             if (oact)
9052                 unlock_user_struct(oact, arg3, 1);
9053 #endif
9054         }
9055         return ret;
9056 #ifdef TARGET_NR_sgetmask /* not on alpha */
9057     case TARGET_NR_sgetmask:
9058         {
9059             sigset_t cur_set;
9060             abi_ulong target_set;
9061             ret = do_sigprocmask(0, NULL, &cur_set);
9062             if (!ret) {
9063                 host_to_target_old_sigset(&target_set, &cur_set);
9064                 ret = target_set;
9065             }
9066         }
9067         return ret;
9068 #endif
9069 #ifdef TARGET_NR_ssetmask /* not on alpha */
9070     case TARGET_NR_ssetmask:
9071         {
9072             sigset_t set, oset;
9073             abi_ulong target_set = arg1;
9074             target_to_host_old_sigset(&set, &target_set);
9075             ret = do_sigprocmask(SIG_SETMASK, &set, &oset);
9076             if (!ret) {
9077                 host_to_target_old_sigset(&target_set, &oset);
9078                 ret = target_set;
9079             }
9080         }
9081         return ret;
9082 #endif
9083 #ifdef TARGET_NR_sigprocmask
9084     case TARGET_NR_sigprocmask:
9085         {
9086 #if defined(TARGET_ALPHA)
9087             sigset_t set, oldset;
9088             abi_ulong mask;
9089             int how;
9090 
9091             switch (arg1) {
9092             case TARGET_SIG_BLOCK:
9093                 how = SIG_BLOCK;
9094                 break;
9095             case TARGET_SIG_UNBLOCK:
9096                 how = SIG_UNBLOCK;
9097                 break;
9098             case TARGET_SIG_SETMASK:
9099                 how = SIG_SETMASK;
9100                 break;
9101             default:
9102                 return -TARGET_EINVAL;
9103             }
9104             mask = arg2;
9105             target_to_host_old_sigset(&set, &mask);
9106 
9107             ret = do_sigprocmask(how, &set, &oldset);
9108             if (!is_error(ret)) {
9109                 host_to_target_old_sigset(&mask, &oldset);
9110                 ret = mask;
9111                 ((CPUAlphaState *)cpu_env)->ir[IR_V0] = 0; /* force no error */
9112             }
9113 #else
9114             sigset_t set, oldset, *set_ptr;
9115             int how;
9116 
9117             if (arg2) {
9118                 switch (arg1) {
9119                 case TARGET_SIG_BLOCK:
9120                     how = SIG_BLOCK;
9121                     break;
9122                 case TARGET_SIG_UNBLOCK:
9123                     how = SIG_UNBLOCK;
9124                     break;
9125                 case TARGET_SIG_SETMASK:
9126                     how = SIG_SETMASK;
9127                     break;
9128                 default:
9129                     return -TARGET_EINVAL;
9130                 }
9131                 if (!(p = lock_user(VERIFY_READ, arg2, sizeof(target_sigset_t), 1)))
9132                     return -TARGET_EFAULT;
9133                 target_to_host_old_sigset(&set, p);
9134                 unlock_user(p, arg2, 0);
9135                 set_ptr = &set;
9136             } else {
9137                 how = 0;
9138                 set_ptr = NULL;
9139             }
9140             ret = do_sigprocmask(how, set_ptr, &oldset);
9141             if (!is_error(ret) && arg3) {
9142                 if (!(p = lock_user(VERIFY_WRITE, arg3, sizeof(target_sigset_t), 0)))
9143                     return -TARGET_EFAULT;
9144                 host_to_target_old_sigset(p, &oldset);
9145                 unlock_user(p, arg3, sizeof(target_sigset_t));
9146             }
9147 #endif
9148         }
9149         return ret;
9150 #endif
9151     case TARGET_NR_rt_sigprocmask:
9152         {
9153             int how = arg1;
9154             sigset_t set, oldset, *set_ptr;
9155 
9156             if (arg4 != sizeof(target_sigset_t)) {
9157                 return -TARGET_EINVAL;
9158             }
9159 
9160             if (arg2) {
9161                 switch(how) {
9162                 case TARGET_SIG_BLOCK:
9163                     how = SIG_BLOCK;
9164                     break;
9165                 case TARGET_SIG_UNBLOCK:
9166                     how = SIG_UNBLOCK;
9167                     break;
9168                 case TARGET_SIG_SETMASK:
9169                     how = SIG_SETMASK;
9170                     break;
9171                 default:
9172                     return -TARGET_EINVAL;
9173                 }
9174                 if (!(p = lock_user(VERIFY_READ, arg2, sizeof(target_sigset_t), 1)))
9175                     return -TARGET_EFAULT;
9176                 target_to_host_sigset(&set, p);
9177                 unlock_user(p, arg2, 0);
9178                 set_ptr = &set;
9179             } else {
9180                 how = 0;
9181                 set_ptr = NULL;
9182             }
9183             ret = do_sigprocmask(how, set_ptr, &oldset);
9184             if (!is_error(ret) && arg3) {
9185                 if (!(p = lock_user(VERIFY_WRITE, arg3, sizeof(target_sigset_t), 0)))
9186                     return -TARGET_EFAULT;
9187                 host_to_target_sigset(p, &oldset);
9188                 unlock_user(p, arg3, sizeof(target_sigset_t));
9189             }
9190         }
9191         return ret;
9192 #ifdef TARGET_NR_sigpending
9193     case TARGET_NR_sigpending:
9194         {
9195             sigset_t set;
9196             ret = get_errno(sigpending(&set));
9197             if (!is_error(ret)) {
9198                 if (!(p = lock_user(VERIFY_WRITE, arg1, sizeof(target_sigset_t), 0)))
9199                     return -TARGET_EFAULT;
9200                 host_to_target_old_sigset(p, &set);
9201                 unlock_user(p, arg1, sizeof(target_sigset_t));
9202             }
9203         }
9204         return ret;
9205 #endif
9206     case TARGET_NR_rt_sigpending:
9207         {
9208             sigset_t set;
9209 
9210             /* Yes, this check is >, not != like most. We follow the kernel's
9211              * logic and it does it like this because it implements
9212              * NR_sigpending through the same code path, and in that case
9213              * the old_sigset_t is smaller in size.
9214              */
9215             if (arg2 > sizeof(target_sigset_t)) {
9216                 return -TARGET_EINVAL;
9217             }
9218 
9219             ret = get_errno(sigpending(&set));
9220             if (!is_error(ret)) {
9221                 if (!(p = lock_user(VERIFY_WRITE, arg1, sizeof(target_sigset_t), 0)))
9222                     return -TARGET_EFAULT;
9223                 host_to_target_sigset(p, &set);
9224                 unlock_user(p, arg1, sizeof(target_sigset_t));
9225             }
9226         }
9227         return ret;
9228 #ifdef TARGET_NR_sigsuspend
9229     case TARGET_NR_sigsuspend:
9230         {
9231             TaskState *ts = cpu->opaque;
9232 #if defined(TARGET_ALPHA)
9233             abi_ulong mask = arg1;
9234             target_to_host_old_sigset(&ts->sigsuspend_mask, &mask);
9235 #else
9236             if (!(p = lock_user(VERIFY_READ, arg1, sizeof(target_sigset_t), 1)))
9237                 return -TARGET_EFAULT;
9238             target_to_host_old_sigset(&ts->sigsuspend_mask, p);
9239             unlock_user(p, arg1, 0);
9240 #endif
9241             ret = get_errno(safe_rt_sigsuspend(&ts->sigsuspend_mask,
9242                                                SIGSET_T_SIZE));
9243             if (ret != -TARGET_ERESTARTSYS) {
9244                 ts->in_sigsuspend = 1;
9245             }
9246         }
9247         return ret;
9248 #endif
9249     case TARGET_NR_rt_sigsuspend:
9250         {
9251             TaskState *ts = cpu->opaque;
9252 
9253             if (arg2 != sizeof(target_sigset_t)) {
9254                 return -TARGET_EINVAL;
9255             }
9256             if (!(p = lock_user(VERIFY_READ, arg1, sizeof(target_sigset_t), 1)))
9257                 return -TARGET_EFAULT;
9258             target_to_host_sigset(&ts->sigsuspend_mask, p);
9259             unlock_user(p, arg1, 0);
9260             ret = get_errno(safe_rt_sigsuspend(&ts->sigsuspend_mask,
9261                                                SIGSET_T_SIZE));
9262             if (ret != -TARGET_ERESTARTSYS) {
9263                 ts->in_sigsuspend = 1;
9264             }
9265         }
9266         return ret;
9267 #ifdef TARGET_NR_rt_sigtimedwait
9268     case TARGET_NR_rt_sigtimedwait:
9269         {
9270             sigset_t set;
9271             struct timespec uts, *puts;
9272             siginfo_t uinfo;
9273 
9274             if (arg4 != sizeof(target_sigset_t)) {
9275                 return -TARGET_EINVAL;
9276             }
9277 
9278             if (!(p = lock_user(VERIFY_READ, arg1, sizeof(target_sigset_t), 1)))
9279                 return -TARGET_EFAULT;
9280             target_to_host_sigset(&set, p);
9281             unlock_user(p, arg1, 0);
9282             if (arg3) {
9283                 puts = &uts;
9284                 if (target_to_host_timespec(puts, arg3)) {
9285                     return -TARGET_EFAULT;
9286                 }
9287             } else {
9288                 puts = NULL;
9289             }
9290             ret = get_errno(safe_rt_sigtimedwait(&set, &uinfo, puts,
9291                                                  SIGSET_T_SIZE));
9292             if (!is_error(ret)) {
9293                 if (arg2) {
9294                     p = lock_user(VERIFY_WRITE, arg2, sizeof(target_siginfo_t),
9295                                   0);
9296                     if (!p) {
9297                         return -TARGET_EFAULT;
9298                     }
9299                     host_to_target_siginfo(p, &uinfo);
9300                     unlock_user(p, arg2, sizeof(target_siginfo_t));
9301                 }
9302                 ret = host_to_target_signal(ret);
9303             }
9304         }
9305         return ret;
9306 #endif
9307 #ifdef TARGET_NR_rt_sigtimedwait_time64
9308     case TARGET_NR_rt_sigtimedwait_time64:
9309         {
9310             sigset_t set;
9311             struct timespec uts, *puts;
9312             siginfo_t uinfo;
9313 
9314             if (arg4 != sizeof(target_sigset_t)) {
9315                 return -TARGET_EINVAL;
9316             }
9317 
9318             p = lock_user(VERIFY_READ, arg1, sizeof(target_sigset_t), 1);
9319             if (!p) {
9320                 return -TARGET_EFAULT;
9321             }
9322             target_to_host_sigset(&set, p);
9323             unlock_user(p, arg1, 0);
9324             if (arg3) {
9325                 puts = &uts;
9326                 if (target_to_host_timespec64(puts, arg3)) {
9327                     return -TARGET_EFAULT;
9328                 }
9329             } else {
9330                 puts = NULL;
9331             }
9332             ret = get_errno(safe_rt_sigtimedwait(&set, &uinfo, puts,
9333                                                  SIGSET_T_SIZE));
9334             if (!is_error(ret)) {
9335                 if (arg2) {
9336                     p = lock_user(VERIFY_WRITE, arg2,
9337                                   sizeof(target_siginfo_t), 0);
9338                     if (!p) {
9339                         return -TARGET_EFAULT;
9340                     }
9341                     host_to_target_siginfo(p, &uinfo);
9342                     unlock_user(p, arg2, sizeof(target_siginfo_t));
9343                 }
9344                 ret = host_to_target_signal(ret);
9345             }
9346         }
9347         return ret;
9348 #endif
9349     case TARGET_NR_rt_sigqueueinfo:
9350         {
9351             siginfo_t uinfo;
9352 
9353             p = lock_user(VERIFY_READ, arg3, sizeof(target_siginfo_t), 1);
9354             if (!p) {
9355                 return -TARGET_EFAULT;
9356             }
9357             target_to_host_siginfo(&uinfo, p);
9358             unlock_user(p, arg3, 0);
9359             ret = get_errno(sys_rt_sigqueueinfo(arg1, arg2, &uinfo));
9360         }
9361         return ret;
9362     case TARGET_NR_rt_tgsigqueueinfo:
9363         {
9364             siginfo_t uinfo;
9365 
9366             p = lock_user(VERIFY_READ, arg4, sizeof(target_siginfo_t), 1);
9367             if (!p) {
9368                 return -TARGET_EFAULT;
9369             }
9370             target_to_host_siginfo(&uinfo, p);
9371             unlock_user(p, arg4, 0);
9372             ret = get_errno(sys_rt_tgsigqueueinfo(arg1, arg2, arg3, &uinfo));
9373         }
9374         return ret;
9375 #ifdef TARGET_NR_sigreturn
9376     case TARGET_NR_sigreturn:
9377         if (block_signals()) {
9378             return -TARGET_ERESTARTSYS;
9379         }
9380         return do_sigreturn(cpu_env);
9381 #endif
9382     case TARGET_NR_rt_sigreturn:
9383         if (block_signals()) {
9384             return -TARGET_ERESTARTSYS;
9385         }
9386         return do_rt_sigreturn(cpu_env);
9387     case TARGET_NR_sethostname:
9388         if (!(p = lock_user_string(arg1)))
9389             return -TARGET_EFAULT;
9390         ret = get_errno(sethostname(p, arg2));
9391         unlock_user(p, arg1, 0);
9392         return ret;
9393 #ifdef TARGET_NR_setrlimit
9394     case TARGET_NR_setrlimit:
9395         {
9396             int resource = target_to_host_resource(arg1);
9397             struct target_rlimit *target_rlim;
9398             struct rlimit rlim;
9399             if (!lock_user_struct(VERIFY_READ, target_rlim, arg2, 1))
9400                 return -TARGET_EFAULT;
9401             rlim.rlim_cur = target_to_host_rlim(target_rlim->rlim_cur);
9402             rlim.rlim_max = target_to_host_rlim(target_rlim->rlim_max);
9403             unlock_user_struct(target_rlim, arg2, 0);
9404             /*
9405              * If we just passed through resource limit settings for memory then
9406              * they would also apply to QEMU's own allocations, and QEMU will
9407              * crash or hang or die if its allocations fail. Ideally we would
9408              * track the guest allocations in QEMU and apply the limits ourselves.
9409              * For now, just tell the guest the call succeeded but don't actually
9410              * limit anything.
9411              */
9412             if (resource != RLIMIT_AS &&
9413                 resource != RLIMIT_DATA &&
9414                 resource != RLIMIT_STACK) {
9415                 return get_errno(setrlimit(resource, &rlim));
9416             } else {
9417                 return 0;
9418             }
9419         }
9420 #endif
9421 #ifdef TARGET_NR_getrlimit
9422     case TARGET_NR_getrlimit:
9423         {
9424             int resource = target_to_host_resource(arg1);
9425             struct target_rlimit *target_rlim;
9426             struct rlimit rlim;
9427 
9428             ret = get_errno(getrlimit(resource, &rlim));
9429             if (!is_error(ret)) {
9430                 if (!lock_user_struct(VERIFY_WRITE, target_rlim, arg2, 0))
9431                     return -TARGET_EFAULT;
9432                 target_rlim->rlim_cur = host_to_target_rlim(rlim.rlim_cur);
9433                 target_rlim->rlim_max = host_to_target_rlim(rlim.rlim_max);
9434                 unlock_user_struct(target_rlim, arg2, 1);
9435             }
9436         }
9437         return ret;
9438 #endif
9439     case TARGET_NR_getrusage:
9440         {
9441             struct rusage rusage;
9442             ret = get_errno(getrusage(arg1, &rusage));
9443             if (!is_error(ret)) {
9444                 ret = host_to_target_rusage(arg2, &rusage);
9445             }
9446         }
9447         return ret;
9448 #if defined(TARGET_NR_gettimeofday)
9449     case TARGET_NR_gettimeofday:
9450         {
9451             struct timeval tv;
9452             struct timezone tz;
9453 
9454             ret = get_errno(gettimeofday(&tv, &tz));
9455             if (!is_error(ret)) {
9456                 if (arg1 && copy_to_user_timeval(arg1, &tv)) {
9457                     return -TARGET_EFAULT;
9458                 }
9459                 if (arg2 && copy_to_user_timezone(arg2, &tz)) {
9460                     return -TARGET_EFAULT;
9461                 }
9462             }
9463         }
9464         return ret;
9465 #endif
9466 #if defined(TARGET_NR_settimeofday)
9467     case TARGET_NR_settimeofday:
9468         {
9469             struct timeval tv, *ptv = NULL;
9470             struct timezone tz, *ptz = NULL;
9471 
9472             if (arg1) {
9473                 if (copy_from_user_timeval(&tv, arg1)) {
9474                     return -TARGET_EFAULT;
9475                 }
9476                 ptv = &tv;
9477             }
9478 
9479             if (arg2) {
9480                 if (copy_from_user_timezone(&tz, arg2)) {
9481                     return -TARGET_EFAULT;
9482                 }
9483                 ptz = &tz;
9484             }
9485 
9486             return get_errno(settimeofday(ptv, ptz));
9487         }
9488 #endif
9489 #if defined(TARGET_NR_select)
9490     case TARGET_NR_select:
9491 #if defined(TARGET_WANT_NI_OLD_SELECT)
9492         /* some architectures used to have old_select here
9493          * but now ENOSYS it.
9494          */
9495         ret = -TARGET_ENOSYS;
9496 #elif defined(TARGET_WANT_OLD_SYS_SELECT)
9497         ret = do_old_select(arg1);
9498 #else
9499         ret = do_select(arg1, arg2, arg3, arg4, arg5);
9500 #endif
9501         return ret;
9502 #endif
9503 #ifdef TARGET_NR_pselect6
9504     case TARGET_NR_pselect6:
9505         return do_pselect6(arg1, arg2, arg3, arg4, arg5, arg6, false);
9506 #endif
9507 #ifdef TARGET_NR_pselect6_time64
9508     case TARGET_NR_pselect6_time64:
9509         return do_pselect6(arg1, arg2, arg3, arg4, arg5, arg6, true);
9510 #endif
9511 #ifdef TARGET_NR_symlink
9512     case TARGET_NR_symlink:
9513         {
9514             void *p2;
9515             p = lock_user_string(arg1);
9516             p2 = lock_user_string(arg2);
9517             if (!p || !p2)
9518                 ret = -TARGET_EFAULT;
9519             else
9520                 ret = get_errno(symlink(p, p2));
9521             unlock_user(p2, arg2, 0);
9522             unlock_user(p, arg1, 0);
9523         }
9524         return ret;
9525 #endif
9526 #if defined(TARGET_NR_symlinkat)
9527     case TARGET_NR_symlinkat:
9528         {
9529             void *p2;
9530             p  = lock_user_string(arg1);
9531             p2 = lock_user_string(arg3);
9532             if (!p || !p2)
9533                 ret = -TARGET_EFAULT;
9534             else
9535                 ret = get_errno(symlinkat(p, arg2, p2));
9536             unlock_user(p2, arg3, 0);
9537             unlock_user(p, arg1, 0);
9538         }
9539         return ret;
9540 #endif
9541 #ifdef TARGET_NR_readlink
9542     case TARGET_NR_readlink:
9543         {
9544             void *p2;
9545             p = lock_user_string(arg1);
9546             p2 = lock_user(VERIFY_WRITE, arg2, arg3, 0);
9547             if (!p || !p2) {
9548                 ret = -TARGET_EFAULT;
9549             } else if (!arg3) {
9550                 /* Short circuit this for the magic exe check. */
9551                 ret = -TARGET_EINVAL;
9552             } else if (is_proc_myself((const char *)p, "exe")) {
9553                 char real[PATH_MAX], *temp;
9554                 temp = realpath(exec_path, real);
9555                 /* Return value is # of bytes that we wrote to the buffer. */
9556                 if (temp == NULL) {
9557                     ret = get_errno(-1);
9558                 } else {
9559                     /* Don't worry about sign mismatch as earlier mapping
9560                      * logic would have thrown a bad address error. */
9561                     ret = MIN(strlen(real), arg3);
9562                     /* We cannot NUL terminate the string. */
9563                     memcpy(p2, real, ret);
9564                 }
9565             } else {
9566                 ret = get_errno(readlink(path(p), p2, arg3));
9567             }
9568             unlock_user(p2, arg2, ret);
9569             unlock_user(p, arg1, 0);
9570         }
9571         return ret;
9572 #endif
9573 #if defined(TARGET_NR_readlinkat)
9574     case TARGET_NR_readlinkat:
9575         {
9576             void *p2;
9577             p  = lock_user_string(arg2);
9578             p2 = lock_user(VERIFY_WRITE, arg3, arg4, 0);
9579             if (!p || !p2) {
9580                 ret = -TARGET_EFAULT;
9581             } else if (is_proc_myself((const char *)p, "exe")) {
9582                 char real[PATH_MAX], *temp;
9583                 temp = realpath(exec_path, real);
9584                 ret = temp == NULL ? get_errno(-1) : strlen(real) ;
9585                 snprintf((char *)p2, arg4, "%s", real);
9586             } else {
9587                 ret = get_errno(readlinkat(arg1, path(p), p2, arg4));
9588             }
9589             unlock_user(p2, arg3, ret);
9590             unlock_user(p, arg2, 0);
9591         }
9592         return ret;
9593 #endif
9594 #ifdef TARGET_NR_swapon
9595     case TARGET_NR_swapon:
9596         if (!(p = lock_user_string(arg1)))
9597             return -TARGET_EFAULT;
9598         ret = get_errno(swapon(p, arg2));
9599         unlock_user(p, arg1, 0);
9600         return ret;
9601 #endif
9602     case TARGET_NR_reboot:
9603         if (arg3 == LINUX_REBOOT_CMD_RESTART2) {
9604            /* arg4 must be ignored in all other cases */
9605            p = lock_user_string(arg4);
9606            if (!p) {
9607                return -TARGET_EFAULT;
9608            }
9609            ret = get_errno(reboot(arg1, arg2, arg3, p));
9610            unlock_user(p, arg4, 0);
9611         } else {
9612            ret = get_errno(reboot(arg1, arg2, arg3, NULL));
9613         }
9614         return ret;
9615 #ifdef TARGET_NR_mmap
9616     case TARGET_NR_mmap:
9617 #if (defined(TARGET_I386) && defined(TARGET_ABI32)) || \
9618     (defined(TARGET_ARM) && defined(TARGET_ABI32)) || \
9619     defined(TARGET_M68K) || defined(TARGET_CRIS) || defined(TARGET_MICROBLAZE) \
9620     || defined(TARGET_S390X)
9621         {
9622             abi_ulong *v;
9623             abi_ulong v1, v2, v3, v4, v5, v6;
9624             if (!(v = lock_user(VERIFY_READ, arg1, 6 * sizeof(abi_ulong), 1)))
9625                 return -TARGET_EFAULT;
9626             v1 = tswapal(v[0]);
9627             v2 = tswapal(v[1]);
9628             v3 = tswapal(v[2]);
9629             v4 = tswapal(v[3]);
9630             v5 = tswapal(v[4]);
9631             v6 = tswapal(v[5]);
9632             unlock_user(v, arg1, 0);
9633             ret = get_errno(target_mmap(v1, v2, v3,
9634                                         target_to_host_bitmask(v4, mmap_flags_tbl),
9635                                         v5, v6));
9636         }
9637 #else
9638         ret = get_errno(target_mmap(arg1, arg2, arg3,
9639                                     target_to_host_bitmask(arg4, mmap_flags_tbl),
9640                                     arg5,
9641                                     arg6));
9642 #endif
9643         return ret;
9644 #endif
9645 #ifdef TARGET_NR_mmap2
9646     case TARGET_NR_mmap2:
9647 #ifndef MMAP_SHIFT
9648 #define MMAP_SHIFT 12
9649 #endif
9650         ret = target_mmap(arg1, arg2, arg3,
9651                           target_to_host_bitmask(arg4, mmap_flags_tbl),
9652                           arg5, arg6 << MMAP_SHIFT);
9653         return get_errno(ret);
9654 #endif
9655     case TARGET_NR_munmap:
9656         return get_errno(target_munmap(arg1, arg2));
9657     case TARGET_NR_mprotect:
9658         {
9659             TaskState *ts = cpu->opaque;
9660             /* Special hack to detect libc making the stack executable.  */
9661             if ((arg3 & PROT_GROWSDOWN)
9662                 && arg1 >= ts->info->stack_limit
9663                 && arg1 <= ts->info->start_stack) {
9664                 arg3 &= ~PROT_GROWSDOWN;
9665                 arg2 = arg2 + arg1 - ts->info->stack_limit;
9666                 arg1 = ts->info->stack_limit;
9667             }
9668         }
9669         return get_errno(target_mprotect(arg1, arg2, arg3));
9670 #ifdef TARGET_NR_mremap
9671     case TARGET_NR_mremap:
9672         return get_errno(target_mremap(arg1, arg2, arg3, arg4, arg5));
9673 #endif
9674         /* ??? msync/mlock/munlock are broken for softmmu.  */
9675 #ifdef TARGET_NR_msync
9676     case TARGET_NR_msync:
9677         return get_errno(msync(g2h(arg1), arg2, arg3));
9678 #endif
9679 #ifdef TARGET_NR_mlock
9680     case TARGET_NR_mlock:
9681         return get_errno(mlock(g2h(arg1), arg2));
9682 #endif
9683 #ifdef TARGET_NR_munlock
9684     case TARGET_NR_munlock:
9685         return get_errno(munlock(g2h(arg1), arg2));
9686 #endif
9687 #ifdef TARGET_NR_mlockall
9688     case TARGET_NR_mlockall:
9689         return get_errno(mlockall(target_to_host_mlockall_arg(arg1)));
9690 #endif
9691 #ifdef TARGET_NR_munlockall
9692     case TARGET_NR_munlockall:
9693         return get_errno(munlockall());
9694 #endif
9695 #ifdef TARGET_NR_truncate
9696     case TARGET_NR_truncate:
9697         if (!(p = lock_user_string(arg1)))
9698             return -TARGET_EFAULT;
9699         ret = get_errno(truncate(p, arg2));
9700         unlock_user(p, arg1, 0);
9701         return ret;
9702 #endif
9703 #ifdef TARGET_NR_ftruncate
9704     case TARGET_NR_ftruncate:
9705         return get_errno(ftruncate(arg1, arg2));
9706 #endif
9707     case TARGET_NR_fchmod:
9708         return get_errno(fchmod(arg1, arg2));
9709 #if defined(TARGET_NR_fchmodat)
9710     case TARGET_NR_fchmodat:
9711         if (!(p = lock_user_string(arg2)))
9712             return -TARGET_EFAULT;
9713         ret = get_errno(fchmodat(arg1, p, arg3, 0));
9714         unlock_user(p, arg2, 0);
9715         return ret;
9716 #endif
9717     case TARGET_NR_getpriority:
9718         /* Note that negative values are valid for getpriority, so we must
9719            differentiate based on errno settings.  */
9720         errno = 0;
9721         ret = getpriority(arg1, arg2);
9722         if (ret == -1 && errno != 0) {
9723             return -host_to_target_errno(errno);
9724         }
9725 #ifdef TARGET_ALPHA
9726         /* Return value is the unbiased priority.  Signal no error.  */
9727         ((CPUAlphaState *)cpu_env)->ir[IR_V0] = 0;
9728 #else
9729         /* Return value is a biased priority to avoid negative numbers.  */
9730         ret = 20 - ret;
9731 #endif
9732         return ret;
9733     case TARGET_NR_setpriority:
9734         return get_errno(setpriority(arg1, arg2, arg3));
9735 #ifdef TARGET_NR_statfs
9736     case TARGET_NR_statfs:
9737         if (!(p = lock_user_string(arg1))) {
9738             return -TARGET_EFAULT;
9739         }
9740         ret = get_errno(statfs(path(p), &stfs));
9741         unlock_user(p, arg1, 0);
9742     convert_statfs:
9743         if (!is_error(ret)) {
9744             struct target_statfs *target_stfs;
9745 
9746             if (!lock_user_struct(VERIFY_WRITE, target_stfs, arg2, 0))
9747                 return -TARGET_EFAULT;
9748             __put_user(stfs.f_type, &target_stfs->f_type);
9749             __put_user(stfs.f_bsize, &target_stfs->f_bsize);
9750             __put_user(stfs.f_blocks, &target_stfs->f_blocks);
9751             __put_user(stfs.f_bfree, &target_stfs->f_bfree);
9752             __put_user(stfs.f_bavail, &target_stfs->f_bavail);
9753             __put_user(stfs.f_files, &target_stfs->f_files);
9754             __put_user(stfs.f_ffree, &target_stfs->f_ffree);
9755             __put_user(stfs.f_fsid.__val[0], &target_stfs->f_fsid.val[0]);
9756             __put_user(stfs.f_fsid.__val[1], &target_stfs->f_fsid.val[1]);
9757             __put_user(stfs.f_namelen, &target_stfs->f_namelen);
9758             __put_user(stfs.f_frsize, &target_stfs->f_frsize);
9759 #ifdef _STATFS_F_FLAGS
9760             __put_user(stfs.f_flags, &target_stfs->f_flags);
9761 #else
9762             __put_user(0, &target_stfs->f_flags);
9763 #endif
9764             memset(target_stfs->f_spare, 0, sizeof(target_stfs->f_spare));
9765             unlock_user_struct(target_stfs, arg2, 1);
9766         }
9767         return ret;
9768 #endif
9769 #ifdef TARGET_NR_fstatfs
9770     case TARGET_NR_fstatfs:
9771         ret = get_errno(fstatfs(arg1, &stfs));
9772         goto convert_statfs;
9773 #endif
9774 #ifdef TARGET_NR_statfs64
9775     case TARGET_NR_statfs64:
9776         if (!(p = lock_user_string(arg1))) {
9777             return -TARGET_EFAULT;
9778         }
9779         ret = get_errno(statfs(path(p), &stfs));
9780         unlock_user(p, arg1, 0);
9781     convert_statfs64:
9782         if (!is_error(ret)) {
9783             struct target_statfs64 *target_stfs;
9784 
9785             if (!lock_user_struct(VERIFY_WRITE, target_stfs, arg3, 0))
9786                 return -TARGET_EFAULT;
9787             __put_user(stfs.f_type, &target_stfs->f_type);
9788             __put_user(stfs.f_bsize, &target_stfs->f_bsize);
9789             __put_user(stfs.f_blocks, &target_stfs->f_blocks);
9790             __put_user(stfs.f_bfree, &target_stfs->f_bfree);
9791             __put_user(stfs.f_bavail, &target_stfs->f_bavail);
9792             __put_user(stfs.f_files, &target_stfs->f_files);
9793             __put_user(stfs.f_ffree, &target_stfs->f_ffree);
9794             __put_user(stfs.f_fsid.__val[0], &target_stfs->f_fsid.val[0]);
9795             __put_user(stfs.f_fsid.__val[1], &target_stfs->f_fsid.val[1]);
9796             __put_user(stfs.f_namelen, &target_stfs->f_namelen);
9797             __put_user(stfs.f_frsize, &target_stfs->f_frsize);
9798 #ifdef _STATFS_F_FLAGS
9799             __put_user(stfs.f_flags, &target_stfs->f_flags);
9800 #else
9801             __put_user(0, &target_stfs->f_flags);
9802 #endif
9803             memset(target_stfs->f_spare, 0, sizeof(target_stfs->f_spare));
9804             unlock_user_struct(target_stfs, arg3, 1);
9805         }
9806         return ret;
9807     case TARGET_NR_fstatfs64:
9808         ret = get_errno(fstatfs(arg1, &stfs));
9809         goto convert_statfs64;
9810 #endif
9811 #ifdef TARGET_NR_socketcall
9812     case TARGET_NR_socketcall:
9813         return do_socketcall(arg1, arg2);
9814 #endif
9815 #ifdef TARGET_NR_accept
9816     case TARGET_NR_accept:
9817         return do_accept4(arg1, arg2, arg3, 0);
9818 #endif
9819 #ifdef TARGET_NR_accept4
9820     case TARGET_NR_accept4:
9821         return do_accept4(arg1, arg2, arg3, arg4);
9822 #endif
9823 #ifdef TARGET_NR_bind
9824     case TARGET_NR_bind:
9825         return do_bind(arg1, arg2, arg3);
9826 #endif
9827 #ifdef TARGET_NR_connect
9828     case TARGET_NR_connect:
9829         return do_connect(arg1, arg2, arg3);
9830 #endif
9831 #ifdef TARGET_NR_getpeername
9832     case TARGET_NR_getpeername:
9833         return do_getpeername(arg1, arg2, arg3);
9834 #endif
9835 #ifdef TARGET_NR_getsockname
9836     case TARGET_NR_getsockname:
9837         return do_getsockname(arg1, arg2, arg3);
9838 #endif
9839 #ifdef TARGET_NR_getsockopt
9840     case TARGET_NR_getsockopt:
9841         return do_getsockopt(arg1, arg2, arg3, arg4, arg5);
9842 #endif
9843 #ifdef TARGET_NR_listen
9844     case TARGET_NR_listen:
9845         return get_errno(listen(arg1, arg2));
9846 #endif
9847 #ifdef TARGET_NR_recv
9848     case TARGET_NR_recv:
9849         return do_recvfrom(arg1, arg2, arg3, arg4, 0, 0);
9850 #endif
9851 #ifdef TARGET_NR_recvfrom
9852     case TARGET_NR_recvfrom:
9853         return do_recvfrom(arg1, arg2, arg3, arg4, arg5, arg6);
9854 #endif
9855 #ifdef TARGET_NR_recvmsg
9856     case TARGET_NR_recvmsg:
9857         return do_sendrecvmsg(arg1, arg2, arg3, 0);
9858 #endif
9859 #ifdef TARGET_NR_send
9860     case TARGET_NR_send:
9861         return do_sendto(arg1, arg2, arg3, arg4, 0, 0);
9862 #endif
9863 #ifdef TARGET_NR_sendmsg
9864     case TARGET_NR_sendmsg:
9865         return do_sendrecvmsg(arg1, arg2, arg3, 1);
9866 #endif
9867 #ifdef TARGET_NR_sendmmsg
9868     case TARGET_NR_sendmmsg:
9869         return do_sendrecvmmsg(arg1, arg2, arg3, arg4, 1);
9870 #endif
9871 #ifdef TARGET_NR_recvmmsg
9872     case TARGET_NR_recvmmsg:
9873         return do_sendrecvmmsg(arg1, arg2, arg3, arg4, 0);
9874 #endif
9875 #ifdef TARGET_NR_sendto
9876     case TARGET_NR_sendto:
9877         return do_sendto(arg1, arg2, arg3, arg4, arg5, arg6);
9878 #endif
9879 #ifdef TARGET_NR_shutdown
9880     case TARGET_NR_shutdown:
9881         return get_errno(shutdown(arg1, arg2));
9882 #endif
9883 #if defined(TARGET_NR_getrandom) && defined(__NR_getrandom)
9884     case TARGET_NR_getrandom:
9885         p = lock_user(VERIFY_WRITE, arg1, arg2, 0);
9886         if (!p) {
9887             return -TARGET_EFAULT;
9888         }
9889         ret = get_errno(getrandom(p, arg2, arg3));
9890         unlock_user(p, arg1, ret);
9891         return ret;
9892 #endif
9893 #ifdef TARGET_NR_socket
9894     case TARGET_NR_socket:
9895         return do_socket(arg1, arg2, arg3);
9896 #endif
9897 #ifdef TARGET_NR_socketpair
9898     case TARGET_NR_socketpair:
9899         return do_socketpair(arg1, arg2, arg3, arg4);
9900 #endif
9901 #ifdef TARGET_NR_setsockopt
9902     case TARGET_NR_setsockopt:
9903         return do_setsockopt(arg1, arg2, arg3, arg4, (socklen_t) arg5);
9904 #endif
9905 #if defined(TARGET_NR_syslog)
9906     case TARGET_NR_syslog:
9907         {
9908             int len = arg2;
9909 
9910             switch (arg1) {
9911             case TARGET_SYSLOG_ACTION_CLOSE:         /* Close log */
9912             case TARGET_SYSLOG_ACTION_OPEN:          /* Open log */
9913             case TARGET_SYSLOG_ACTION_CLEAR:         /* Clear ring buffer */
9914             case TARGET_SYSLOG_ACTION_CONSOLE_OFF:   /* Disable logging */
9915             case TARGET_SYSLOG_ACTION_CONSOLE_ON:    /* Enable logging */
9916             case TARGET_SYSLOG_ACTION_CONSOLE_LEVEL: /* Set messages level */
9917             case TARGET_SYSLOG_ACTION_SIZE_UNREAD:   /* Number of chars */
9918             case TARGET_SYSLOG_ACTION_SIZE_BUFFER:   /* Size of the buffer */
9919                 return get_errno(sys_syslog((int)arg1, NULL, (int)arg3));
9920             case TARGET_SYSLOG_ACTION_READ:          /* Read from log */
9921             case TARGET_SYSLOG_ACTION_READ_CLEAR:    /* Read/clear msgs */
9922             case TARGET_SYSLOG_ACTION_READ_ALL:      /* Read last messages */
9923                 {
9924                     if (len < 0) {
9925                         return -TARGET_EINVAL;
9926                     }
9927                     if (len == 0) {
9928                         return 0;
9929                     }
9930                     p = lock_user(VERIFY_WRITE, arg2, arg3, 0);
9931                     if (!p) {
9932                         return -TARGET_EFAULT;
9933                     }
9934                     ret = get_errno(sys_syslog((int)arg1, p, (int)arg3));
9935                     unlock_user(p, arg2, arg3);
9936                 }
9937                 return ret;
9938             default:
9939                 return -TARGET_EINVAL;
9940             }
9941         }
9942         break;
9943 #endif
9944     case TARGET_NR_setitimer:
9945         {
9946             struct itimerval value, ovalue, *pvalue;
9947 
9948             if (arg2) {
9949                 pvalue = &value;
9950                 if (copy_from_user_timeval(&pvalue->it_interval, arg2)
9951                     || copy_from_user_timeval(&pvalue->it_value,
9952                                               arg2 + sizeof(struct target_timeval)))
9953                     return -TARGET_EFAULT;
9954             } else {
9955                 pvalue = NULL;
9956             }
9957             ret = get_errno(setitimer(arg1, pvalue, &ovalue));
9958             if (!is_error(ret) && arg3) {
9959                 if (copy_to_user_timeval(arg3,
9960                                          &ovalue.it_interval)
9961                     || copy_to_user_timeval(arg3 + sizeof(struct target_timeval),
9962                                             &ovalue.it_value))
9963                     return -TARGET_EFAULT;
9964             }
9965         }
9966         return ret;
9967     case TARGET_NR_getitimer:
9968         {
9969             struct itimerval value;
9970 
9971             ret = get_errno(getitimer(arg1, &value));
9972             if (!is_error(ret) && arg2) {
9973                 if (copy_to_user_timeval(arg2,
9974                                          &value.it_interval)
9975                     || copy_to_user_timeval(arg2 + sizeof(struct target_timeval),
9976                                             &value.it_value))
9977                     return -TARGET_EFAULT;
9978             }
9979         }
9980         return ret;
9981 #ifdef TARGET_NR_stat
9982     case TARGET_NR_stat:
9983         if (!(p = lock_user_string(arg1))) {
9984             return -TARGET_EFAULT;
9985         }
9986         ret = get_errno(stat(path(p), &st));
9987         unlock_user(p, arg1, 0);
9988         goto do_stat;
9989 #endif
9990 #ifdef TARGET_NR_lstat
9991     case TARGET_NR_lstat:
9992         if (!(p = lock_user_string(arg1))) {
9993             return -TARGET_EFAULT;
9994         }
9995         ret = get_errno(lstat(path(p), &st));
9996         unlock_user(p, arg1, 0);
9997         goto do_stat;
9998 #endif
9999 #ifdef TARGET_NR_fstat
10000     case TARGET_NR_fstat:
10001         {
10002             ret = get_errno(fstat(arg1, &st));
10003 #if defined(TARGET_NR_stat) || defined(TARGET_NR_lstat)
10004         do_stat:
10005 #endif
10006             if (!is_error(ret)) {
10007                 struct target_stat *target_st;
10008 
10009                 if (!lock_user_struct(VERIFY_WRITE, target_st, arg2, 0))
10010                     return -TARGET_EFAULT;
10011                 memset(target_st, 0, sizeof(*target_st));
10012                 __put_user(st.st_dev, &target_st->st_dev);
10013                 __put_user(st.st_ino, &target_st->st_ino);
10014                 __put_user(st.st_mode, &target_st->st_mode);
10015                 __put_user(st.st_uid, &target_st->st_uid);
10016                 __put_user(st.st_gid, &target_st->st_gid);
10017                 __put_user(st.st_nlink, &target_st->st_nlink);
10018                 __put_user(st.st_rdev, &target_st->st_rdev);
10019                 __put_user(st.st_size, &target_st->st_size);
10020                 __put_user(st.st_blksize, &target_st->st_blksize);
10021                 __put_user(st.st_blocks, &target_st->st_blocks);
10022                 __put_user(st.st_atime, &target_st->target_st_atime);
10023                 __put_user(st.st_mtime, &target_st->target_st_mtime);
10024                 __put_user(st.st_ctime, &target_st->target_st_ctime);
10025 #if (_POSIX_C_SOURCE >= 200809L || _XOPEN_SOURCE >= 700) && \
10026     defined(TARGET_STAT_HAVE_NSEC)
10027                 __put_user(st.st_atim.tv_nsec,
10028                            &target_st->target_st_atime_nsec);
10029                 __put_user(st.st_mtim.tv_nsec,
10030                            &target_st->target_st_mtime_nsec);
10031                 __put_user(st.st_ctim.tv_nsec,
10032                            &target_st->target_st_ctime_nsec);
10033 #endif
10034                 unlock_user_struct(target_st, arg2, 1);
10035             }
10036         }
10037         return ret;
10038 #endif
10039     case TARGET_NR_vhangup:
10040         return get_errno(vhangup());
10041 #ifdef TARGET_NR_syscall
10042     case TARGET_NR_syscall:
10043         return do_syscall(cpu_env, arg1 & 0xffff, arg2, arg3, arg4, arg5,
10044                           arg6, arg7, arg8, 0);
10045 #endif
10046 #if defined(TARGET_NR_wait4)
10047     case TARGET_NR_wait4:
10048         {
10049             int status;
10050             abi_long status_ptr = arg2;
10051             struct rusage rusage, *rusage_ptr;
10052             abi_ulong target_rusage = arg4;
10053             abi_long rusage_err;
10054             if (target_rusage)
10055                 rusage_ptr = &rusage;
10056             else
10057                 rusage_ptr = NULL;
10058             ret = get_errno(safe_wait4(arg1, &status, arg3, rusage_ptr));
10059             if (!is_error(ret)) {
10060                 if (status_ptr && ret) {
10061                     status = host_to_target_waitstatus(status);
10062                     if (put_user_s32(status, status_ptr))
10063                         return -TARGET_EFAULT;
10064                 }
10065                 if (target_rusage) {
10066                     rusage_err = host_to_target_rusage(target_rusage, &rusage);
10067                     if (rusage_err) {
10068                         ret = rusage_err;
10069                     }
10070                 }
10071             }
10072         }
10073         return ret;
10074 #endif
10075 #ifdef TARGET_NR_swapoff
10076     case TARGET_NR_swapoff:
10077         if (!(p = lock_user_string(arg1)))
10078             return -TARGET_EFAULT;
10079         ret = get_errno(swapoff(p));
10080         unlock_user(p, arg1, 0);
10081         return ret;
10082 #endif
10083     case TARGET_NR_sysinfo:
10084         {
10085             struct target_sysinfo *target_value;
10086             struct sysinfo value;
10087             ret = get_errno(sysinfo(&value));
10088             if (!is_error(ret) && arg1)
10089             {
10090                 if (!lock_user_struct(VERIFY_WRITE, target_value, arg1, 0))
10091                     return -TARGET_EFAULT;
10092                 __put_user(value.uptime, &target_value->uptime);
10093                 __put_user(value.loads[0], &target_value->loads[0]);
10094                 __put_user(value.loads[1], &target_value->loads[1]);
10095                 __put_user(value.loads[2], &target_value->loads[2]);
10096                 __put_user(value.totalram, &target_value->totalram);
10097                 __put_user(value.freeram, &target_value->freeram);
10098                 __put_user(value.sharedram, &target_value->sharedram);
10099                 __put_user(value.bufferram, &target_value->bufferram);
10100                 __put_user(value.totalswap, &target_value->totalswap);
10101                 __put_user(value.freeswap, &target_value->freeswap);
10102                 __put_user(value.procs, &target_value->procs);
10103                 __put_user(value.totalhigh, &target_value->totalhigh);
10104                 __put_user(value.freehigh, &target_value->freehigh);
10105                 __put_user(value.mem_unit, &target_value->mem_unit);
10106                 unlock_user_struct(target_value, arg1, 1);
10107             }
10108         }
10109         return ret;
10110 #ifdef TARGET_NR_ipc
10111     case TARGET_NR_ipc:
10112         return do_ipc(cpu_env, arg1, arg2, arg3, arg4, arg5, arg6);
10113 #endif
10114 #ifdef TARGET_NR_semget
10115     case TARGET_NR_semget:
10116         return get_errno(semget(arg1, arg2, arg3));
10117 #endif
10118 #ifdef TARGET_NR_semop
10119     case TARGET_NR_semop:
10120         return do_semtimedop(arg1, arg2, arg3, 0, false);
10121 #endif
10122 #ifdef TARGET_NR_semtimedop
10123     case TARGET_NR_semtimedop:
10124         return do_semtimedop(arg1, arg2, arg3, arg4, false);
10125 #endif
10126 #ifdef TARGET_NR_semtimedop_time64
10127     case TARGET_NR_semtimedop_time64:
10128         return do_semtimedop(arg1, arg2, arg3, arg4, true);
10129 #endif
10130 #ifdef TARGET_NR_semctl
10131     case TARGET_NR_semctl:
10132         return do_semctl(arg1, arg2, arg3, arg4);
10133 #endif
10134 #ifdef TARGET_NR_msgctl
10135     case TARGET_NR_msgctl:
10136         return do_msgctl(arg1, arg2, arg3);
10137 #endif
10138 #ifdef TARGET_NR_msgget
10139     case TARGET_NR_msgget:
10140         return get_errno(msgget(arg1, arg2));
10141 #endif
10142 #ifdef TARGET_NR_msgrcv
10143     case TARGET_NR_msgrcv:
10144         return do_msgrcv(arg1, arg2, arg3, arg4, arg5);
10145 #endif
10146 #ifdef TARGET_NR_msgsnd
10147     case TARGET_NR_msgsnd:
10148         return do_msgsnd(arg1, arg2, arg3, arg4);
10149 #endif
10150 #ifdef TARGET_NR_shmget
10151     case TARGET_NR_shmget:
10152         return get_errno(shmget(arg1, arg2, arg3));
10153 #endif
10154 #ifdef TARGET_NR_shmctl
10155     case TARGET_NR_shmctl:
10156         return do_shmctl(arg1, arg2, arg3);
10157 #endif
10158 #ifdef TARGET_NR_shmat
10159     case TARGET_NR_shmat:
10160         return do_shmat(cpu_env, arg1, arg2, arg3);
10161 #endif
10162 #ifdef TARGET_NR_shmdt
10163     case TARGET_NR_shmdt:
10164         return do_shmdt(arg1);
10165 #endif
10166     case TARGET_NR_fsync:
10167         return get_errno(fsync(arg1));
10168     case TARGET_NR_clone:
10169         /* Linux manages to have three different orderings for its
10170          * arguments to clone(); the BACKWARDS and BACKWARDS2 defines
10171          * match the kernel's CONFIG_CLONE_* settings.
10172          * Microblaze is further special in that it uses a sixth
10173          * implicit argument to clone for the TLS pointer.
10174          */
10175 #if defined(TARGET_MICROBLAZE)
10176         ret = get_errno(do_fork(cpu_env, arg1, arg2, arg4, arg6, arg5));
10177 #elif defined(TARGET_CLONE_BACKWARDS)
10178         ret = get_errno(do_fork(cpu_env, arg1, arg2, arg3, arg4, arg5));
10179 #elif defined(TARGET_CLONE_BACKWARDS2)
10180         ret = get_errno(do_fork(cpu_env, arg2, arg1, arg3, arg5, arg4));
10181 #else
10182         ret = get_errno(do_fork(cpu_env, arg1, arg2, arg3, arg5, arg4));
10183 #endif
10184         return ret;
10185 #ifdef __NR_exit_group
10186         /* new thread calls */
10187     case TARGET_NR_exit_group:
10188         preexit_cleanup(cpu_env, arg1);
10189         return get_errno(exit_group(arg1));
10190 #endif
10191     case TARGET_NR_setdomainname:
10192         if (!(p = lock_user_string(arg1)))
10193             return -TARGET_EFAULT;
10194         ret = get_errno(setdomainname(p, arg2));
10195         unlock_user(p, arg1, 0);
10196         return ret;
10197     case TARGET_NR_uname:
10198         /* no need to transcode because we use the linux syscall */
10199         {
10200             struct new_utsname * buf;
10201 
10202             if (!lock_user_struct(VERIFY_WRITE, buf, arg1, 0))
10203                 return -TARGET_EFAULT;
10204             ret = get_errno(sys_uname(buf));
10205             if (!is_error(ret)) {
10206                 /* Overwrite the native machine name with whatever is being
10207                    emulated. */
10208                 g_strlcpy(buf->machine, cpu_to_uname_machine(cpu_env),
10209                           sizeof(buf->machine));
10210                 /* Allow the user to override the reported release.  */
10211                 if (qemu_uname_release && *qemu_uname_release) {
10212                     g_strlcpy(buf->release, qemu_uname_release,
10213                               sizeof(buf->release));
10214                 }
10215             }
10216             unlock_user_struct(buf, arg1, 1);
10217         }
10218         return ret;
10219 #ifdef TARGET_I386
10220     case TARGET_NR_modify_ldt:
10221         return do_modify_ldt(cpu_env, arg1, arg2, arg3);
10222 #if !defined(TARGET_X86_64)
10223     case TARGET_NR_vm86:
10224         return do_vm86(cpu_env, arg1, arg2);
10225 #endif
10226 #endif
10227 #if defined(TARGET_NR_adjtimex)
10228     case TARGET_NR_adjtimex:
10229         {
10230             struct timex host_buf;
10231 
10232             if (target_to_host_timex(&host_buf, arg1) != 0) {
10233                 return -TARGET_EFAULT;
10234             }
10235             ret = get_errno(adjtimex(&host_buf));
10236             if (!is_error(ret)) {
10237                 if (host_to_target_timex(arg1, &host_buf) != 0) {
10238                     return -TARGET_EFAULT;
10239                 }
10240             }
10241         }
10242         return ret;
10243 #endif
10244 #if defined(TARGET_NR_clock_adjtime) && defined(CONFIG_CLOCK_ADJTIME)
10245     case TARGET_NR_clock_adjtime:
10246         {
10247             struct timex htx, *phtx = &htx;
10248 
10249             if (target_to_host_timex(phtx, arg2) != 0) {
10250                 return -TARGET_EFAULT;
10251             }
10252             ret = get_errno(clock_adjtime(arg1, phtx));
10253             if (!is_error(ret) && phtx) {
10254                 if (host_to_target_timex(arg2, phtx) != 0) {
10255                     return -TARGET_EFAULT;
10256                 }
10257             }
10258         }
10259         return ret;
10260 #endif
10261 #if defined(TARGET_NR_clock_adjtime64) && defined(CONFIG_CLOCK_ADJTIME)
10262     case TARGET_NR_clock_adjtime64:
10263         {
10264             struct timex htx;
10265 
10266             if (target_to_host_timex64(&htx, arg2) != 0) {
10267                 return -TARGET_EFAULT;
10268             }
10269             ret = get_errno(clock_adjtime(arg1, &htx));
10270             if (!is_error(ret) && host_to_target_timex64(arg2, &htx)) {
10271                     return -TARGET_EFAULT;
10272             }
10273         }
10274         return ret;
10275 #endif
10276     case TARGET_NR_getpgid:
10277         return get_errno(getpgid(arg1));
10278     case TARGET_NR_fchdir:
10279         return get_errno(fchdir(arg1));
10280     case TARGET_NR_personality:
10281         return get_errno(personality(arg1));
10282 #ifdef TARGET_NR__llseek /* Not on alpha */
10283     case TARGET_NR__llseek:
10284         {
10285             int64_t res;
10286 #if !defined(__NR_llseek)
10287             res = lseek(arg1, ((uint64_t)arg2 << 32) | (abi_ulong)arg3, arg5);
10288             if (res == -1) {
10289                 ret = get_errno(res);
10290             } else {
10291                 ret = 0;
10292             }
10293 #else
10294             ret = get_errno(_llseek(arg1, arg2, arg3, &res, arg5));
10295 #endif
10296             if ((ret == 0) && put_user_s64(res, arg4)) {
10297                 return -TARGET_EFAULT;
10298             }
10299         }
10300         return ret;
10301 #endif
10302 #ifdef TARGET_NR_getdents
10303     case TARGET_NR_getdents:
10304 #ifdef EMULATE_GETDENTS_WITH_GETDENTS
10305 #if TARGET_ABI_BITS == 32 && HOST_LONG_BITS == 64
10306         {
10307             struct target_dirent *target_dirp;
10308             struct linux_dirent *dirp;
10309             abi_long count = arg3;
10310 
10311             dirp = g_try_malloc(count);
10312             if (!dirp) {
10313                 return -TARGET_ENOMEM;
10314             }
10315 
10316             ret = get_errno(sys_getdents(arg1, dirp, count));
10317             if (!is_error(ret)) {
10318                 struct linux_dirent *de;
10319 		struct target_dirent *tde;
10320                 int len = ret;
10321                 int reclen, treclen;
10322 		int count1, tnamelen;
10323 
10324 		count1 = 0;
10325                 de = dirp;
10326                 if (!(target_dirp = lock_user(VERIFY_WRITE, arg2, count, 0)))
10327                     return -TARGET_EFAULT;
10328 		tde = target_dirp;
10329                 while (len > 0) {
10330                     reclen = de->d_reclen;
10331                     tnamelen = reclen - offsetof(struct linux_dirent, d_name);
10332                     assert(tnamelen >= 0);
10333                     treclen = tnamelen + offsetof(struct target_dirent, d_name);
10334                     assert(count1 + treclen <= count);
10335                     tde->d_reclen = tswap16(treclen);
10336                     tde->d_ino = tswapal(de->d_ino);
10337                     tde->d_off = tswapal(de->d_off);
10338                     memcpy(tde->d_name, de->d_name, tnamelen);
10339                     de = (struct linux_dirent *)((char *)de + reclen);
10340                     len -= reclen;
10341                     tde = (struct target_dirent *)((char *)tde + treclen);
10342 		    count1 += treclen;
10343                 }
10344 		ret = count1;
10345                 unlock_user(target_dirp, arg2, ret);
10346             }
10347             g_free(dirp);
10348         }
10349 #else
10350         {
10351             struct linux_dirent *dirp;
10352             abi_long count = arg3;
10353 
10354             if (!(dirp = lock_user(VERIFY_WRITE, arg2, count, 0)))
10355                 return -TARGET_EFAULT;
10356             ret = get_errno(sys_getdents(arg1, dirp, count));
10357             if (!is_error(ret)) {
10358                 struct linux_dirent *de;
10359                 int len = ret;
10360                 int reclen;
10361                 de = dirp;
10362                 while (len > 0) {
10363                     reclen = de->d_reclen;
10364                     if (reclen > len)
10365                         break;
10366                     de->d_reclen = tswap16(reclen);
10367                     tswapls(&de->d_ino);
10368                     tswapls(&de->d_off);
10369                     de = (struct linux_dirent *)((char *)de + reclen);
10370                     len -= reclen;
10371                 }
10372             }
10373             unlock_user(dirp, arg2, ret);
10374         }
10375 #endif
10376 #else
10377         /* Implement getdents in terms of getdents64 */
10378         {
10379             struct linux_dirent64 *dirp;
10380             abi_long count = arg3;
10381 
10382             dirp = lock_user(VERIFY_WRITE, arg2, count, 0);
10383             if (!dirp) {
10384                 return -TARGET_EFAULT;
10385             }
10386             ret = get_errno(sys_getdents64(arg1, dirp, count));
10387             if (!is_error(ret)) {
10388                 /* Convert the dirent64 structs to target dirent.  We do this
10389                  * in-place, since we can guarantee that a target_dirent is no
10390                  * larger than a dirent64; however this means we have to be
10391                  * careful to read everything before writing in the new format.
10392                  */
10393                 struct linux_dirent64 *de;
10394                 struct target_dirent *tde;
10395                 int len = ret;
10396                 int tlen = 0;
10397 
10398                 de = dirp;
10399                 tde = (struct target_dirent *)dirp;
10400                 while (len > 0) {
10401                     int namelen, treclen;
10402                     int reclen = de->d_reclen;
10403                     uint64_t ino = de->d_ino;
10404                     int64_t off = de->d_off;
10405                     uint8_t type = de->d_type;
10406 
10407                     namelen = strlen(de->d_name);
10408                     treclen = offsetof(struct target_dirent, d_name)
10409                         + namelen + 2;
10410                     treclen = QEMU_ALIGN_UP(treclen, sizeof(abi_long));
10411 
10412                     memmove(tde->d_name, de->d_name, namelen + 1);
10413                     tde->d_ino = tswapal(ino);
10414                     tde->d_off = tswapal(off);
10415                     tde->d_reclen = tswap16(treclen);
10416                     /* The target_dirent type is in what was formerly a padding
10417                      * byte at the end of the structure:
10418                      */
10419                     *(((char *)tde) + treclen - 1) = type;
10420 
10421                     de = (struct linux_dirent64 *)((char *)de + reclen);
10422                     tde = (struct target_dirent *)((char *)tde + treclen);
10423                     len -= reclen;
10424                     tlen += treclen;
10425                 }
10426                 ret = tlen;
10427             }
10428             unlock_user(dirp, arg2, ret);
10429         }
10430 #endif
10431         return ret;
10432 #endif /* TARGET_NR_getdents */
10433 #if defined(TARGET_NR_getdents64) && defined(__NR_getdents64)
10434     case TARGET_NR_getdents64:
10435         {
10436             struct linux_dirent64 *dirp;
10437             abi_long count = arg3;
10438             if (!(dirp = lock_user(VERIFY_WRITE, arg2, count, 0)))
10439                 return -TARGET_EFAULT;
10440             ret = get_errno(sys_getdents64(arg1, dirp, count));
10441             if (!is_error(ret)) {
10442                 struct linux_dirent64 *de;
10443                 int len = ret;
10444                 int reclen;
10445                 de = dirp;
10446                 while (len > 0) {
10447                     reclen = de->d_reclen;
10448                     if (reclen > len)
10449                         break;
10450                     de->d_reclen = tswap16(reclen);
10451                     tswap64s((uint64_t *)&de->d_ino);
10452                     tswap64s((uint64_t *)&de->d_off);
10453                     de = (struct linux_dirent64 *)((char *)de + reclen);
10454                     len -= reclen;
10455                 }
10456             }
10457             unlock_user(dirp, arg2, ret);
10458         }
10459         return ret;
10460 #endif /* TARGET_NR_getdents64 */
10461 #if defined(TARGET_NR__newselect)
10462     case TARGET_NR__newselect:
10463         return do_select(arg1, arg2, arg3, arg4, arg5);
10464 #endif
10465 #ifdef TARGET_NR_poll
10466     case TARGET_NR_poll:
10467         return do_ppoll(arg1, arg2, arg3, arg4, arg5, false, false);
10468 #endif
10469 #ifdef TARGET_NR_ppoll
10470     case TARGET_NR_ppoll:
10471         return do_ppoll(arg1, arg2, arg3, arg4, arg5, true, false);
10472 #endif
10473 #ifdef TARGET_NR_ppoll_time64
10474     case TARGET_NR_ppoll_time64:
10475         return do_ppoll(arg1, arg2, arg3, arg4, arg5, true, true);
10476 #endif
10477     case TARGET_NR_flock:
10478         /* NOTE: the flock constant seems to be the same for every
10479            Linux platform */
10480         return get_errno(safe_flock(arg1, arg2));
10481     case TARGET_NR_readv:
10482         {
10483             struct iovec *vec = lock_iovec(VERIFY_WRITE, arg2, arg3, 0);
10484             if (vec != NULL) {
10485                 ret = get_errno(safe_readv(arg1, vec, arg3));
10486                 unlock_iovec(vec, arg2, arg3, 1);
10487             } else {
10488                 ret = -host_to_target_errno(errno);
10489             }
10490         }
10491         return ret;
10492     case TARGET_NR_writev:
10493         {
10494             struct iovec *vec = lock_iovec(VERIFY_READ, arg2, arg3, 1);
10495             if (vec != NULL) {
10496                 ret = get_errno(safe_writev(arg1, vec, arg3));
10497                 unlock_iovec(vec, arg2, arg3, 0);
10498             } else {
10499                 ret = -host_to_target_errno(errno);
10500             }
10501         }
10502         return ret;
10503 #if defined(TARGET_NR_preadv)
10504     case TARGET_NR_preadv:
10505         {
10506             struct iovec *vec = lock_iovec(VERIFY_WRITE, arg2, arg3, 0);
10507             if (vec != NULL) {
10508                 unsigned long low, high;
10509 
10510                 target_to_host_low_high(arg4, arg5, &low, &high);
10511                 ret = get_errno(safe_preadv(arg1, vec, arg3, low, high));
10512                 unlock_iovec(vec, arg2, arg3, 1);
10513             } else {
10514                 ret = -host_to_target_errno(errno);
10515            }
10516         }
10517         return ret;
10518 #endif
10519 #if defined(TARGET_NR_pwritev)
10520     case TARGET_NR_pwritev:
10521         {
10522             struct iovec *vec = lock_iovec(VERIFY_READ, arg2, arg3, 1);
10523             if (vec != NULL) {
10524                 unsigned long low, high;
10525 
10526                 target_to_host_low_high(arg4, arg5, &low, &high);
10527                 ret = get_errno(safe_pwritev(arg1, vec, arg3, low, high));
10528                 unlock_iovec(vec, arg2, arg3, 0);
10529             } else {
10530                 ret = -host_to_target_errno(errno);
10531            }
10532         }
10533         return ret;
10534 #endif
10535     case TARGET_NR_getsid:
10536         return get_errno(getsid(arg1));
10537 #if defined(TARGET_NR_fdatasync) /* Not on alpha (osf_datasync ?) */
10538     case TARGET_NR_fdatasync:
10539         return get_errno(fdatasync(arg1));
10540 #endif
10541     case TARGET_NR_sched_getaffinity:
10542         {
10543             unsigned int mask_size;
10544             unsigned long *mask;
10545 
10546             /*
10547              * sched_getaffinity needs multiples of ulong, so need to take
10548              * care of mismatches between target ulong and host ulong sizes.
10549              */
10550             if (arg2 & (sizeof(abi_ulong) - 1)) {
10551                 return -TARGET_EINVAL;
10552             }
10553             mask_size = (arg2 + (sizeof(*mask) - 1)) & ~(sizeof(*mask) - 1);
10554 
10555             mask = alloca(mask_size);
10556             memset(mask, 0, mask_size);
10557             ret = get_errno(sys_sched_getaffinity(arg1, mask_size, mask));
10558 
10559             if (!is_error(ret)) {
10560                 if (ret > arg2) {
10561                     /* More data returned than the caller's buffer will fit.
10562                      * This only happens if sizeof(abi_long) < sizeof(long)
10563                      * and the caller passed us a buffer holding an odd number
10564                      * of abi_longs. If the host kernel is actually using the
10565                      * extra 4 bytes then fail EINVAL; otherwise we can just
10566                      * ignore them and only copy the interesting part.
10567                      */
10568                     int numcpus = sysconf(_SC_NPROCESSORS_CONF);
10569                     if (numcpus > arg2 * 8) {
10570                         return -TARGET_EINVAL;
10571                     }
10572                     ret = arg2;
10573                 }
10574 
10575                 if (host_to_target_cpu_mask(mask, mask_size, arg3, ret)) {
10576                     return -TARGET_EFAULT;
10577                 }
10578             }
10579         }
10580         return ret;
10581     case TARGET_NR_sched_setaffinity:
10582         {
10583             unsigned int mask_size;
10584             unsigned long *mask;
10585 
10586             /*
10587              * sched_setaffinity needs multiples of ulong, so need to take
10588              * care of mismatches between target ulong and host ulong sizes.
10589              */
10590             if (arg2 & (sizeof(abi_ulong) - 1)) {
10591                 return -TARGET_EINVAL;
10592             }
10593             mask_size = (arg2 + (sizeof(*mask) - 1)) & ~(sizeof(*mask) - 1);
10594             mask = alloca(mask_size);
10595 
10596             ret = target_to_host_cpu_mask(mask, mask_size, arg3, arg2);
10597             if (ret) {
10598                 return ret;
10599             }
10600 
10601             return get_errno(sys_sched_setaffinity(arg1, mask_size, mask));
10602         }
10603     case TARGET_NR_getcpu:
10604         {
10605             unsigned cpu, node;
10606             ret = get_errno(sys_getcpu(arg1 ? &cpu : NULL,
10607                                        arg2 ? &node : NULL,
10608                                        NULL));
10609             if (is_error(ret)) {
10610                 return ret;
10611             }
10612             if (arg1 && put_user_u32(cpu, arg1)) {
10613                 return -TARGET_EFAULT;
10614             }
10615             if (arg2 && put_user_u32(node, arg2)) {
10616                 return -TARGET_EFAULT;
10617             }
10618         }
10619         return ret;
10620     case TARGET_NR_sched_setparam:
10621         {
10622             struct sched_param *target_schp;
10623             struct sched_param schp;
10624 
10625             if (arg2 == 0) {
10626                 return -TARGET_EINVAL;
10627             }
10628             if (!lock_user_struct(VERIFY_READ, target_schp, arg2, 1))
10629                 return -TARGET_EFAULT;
10630             schp.sched_priority = tswap32(target_schp->sched_priority);
10631             unlock_user_struct(target_schp, arg2, 0);
10632             return get_errno(sched_setparam(arg1, &schp));
10633         }
10634     case TARGET_NR_sched_getparam:
10635         {
10636             struct sched_param *target_schp;
10637             struct sched_param schp;
10638 
10639             if (arg2 == 0) {
10640                 return -TARGET_EINVAL;
10641             }
10642             ret = get_errno(sched_getparam(arg1, &schp));
10643             if (!is_error(ret)) {
10644                 if (!lock_user_struct(VERIFY_WRITE, target_schp, arg2, 0))
10645                     return -TARGET_EFAULT;
10646                 target_schp->sched_priority = tswap32(schp.sched_priority);
10647                 unlock_user_struct(target_schp, arg2, 1);
10648             }
10649         }
10650         return ret;
10651     case TARGET_NR_sched_setscheduler:
10652         {
10653             struct sched_param *target_schp;
10654             struct sched_param schp;
10655             if (arg3 == 0) {
10656                 return -TARGET_EINVAL;
10657             }
10658             if (!lock_user_struct(VERIFY_READ, target_schp, arg3, 1))
10659                 return -TARGET_EFAULT;
10660             schp.sched_priority = tswap32(target_schp->sched_priority);
10661             unlock_user_struct(target_schp, arg3, 0);
10662             return get_errno(sched_setscheduler(arg1, arg2, &schp));
10663         }
10664     case TARGET_NR_sched_getscheduler:
10665         return get_errno(sched_getscheduler(arg1));
10666     case TARGET_NR_sched_yield:
10667         return get_errno(sched_yield());
10668     case TARGET_NR_sched_get_priority_max:
10669         return get_errno(sched_get_priority_max(arg1));
10670     case TARGET_NR_sched_get_priority_min:
10671         return get_errno(sched_get_priority_min(arg1));
10672 #ifdef TARGET_NR_sched_rr_get_interval
10673     case TARGET_NR_sched_rr_get_interval:
10674         {
10675             struct timespec ts;
10676             ret = get_errno(sched_rr_get_interval(arg1, &ts));
10677             if (!is_error(ret)) {
10678                 ret = host_to_target_timespec(arg2, &ts);
10679             }
10680         }
10681         return ret;
10682 #endif
10683 #ifdef TARGET_NR_sched_rr_get_interval_time64
10684     case TARGET_NR_sched_rr_get_interval_time64:
10685         {
10686             struct timespec ts;
10687             ret = get_errno(sched_rr_get_interval(arg1, &ts));
10688             if (!is_error(ret)) {
10689                 ret = host_to_target_timespec64(arg2, &ts);
10690             }
10691         }
10692         return ret;
10693 #endif
10694 #if defined(TARGET_NR_nanosleep)
10695     case TARGET_NR_nanosleep:
10696         {
10697             struct timespec req, rem;
10698             target_to_host_timespec(&req, arg1);
10699             ret = get_errno(safe_nanosleep(&req, &rem));
10700             if (is_error(ret) && arg2) {
10701                 host_to_target_timespec(arg2, &rem);
10702             }
10703         }
10704         return ret;
10705 #endif
10706     case TARGET_NR_prctl:
10707         switch (arg1) {
10708         case PR_GET_PDEATHSIG:
10709         {
10710             int deathsig;
10711             ret = get_errno(prctl(arg1, &deathsig, arg3, arg4, arg5));
10712             if (!is_error(ret) && arg2
10713                 && put_user_s32(deathsig, arg2)) {
10714                 return -TARGET_EFAULT;
10715             }
10716             return ret;
10717         }
10718 #ifdef PR_GET_NAME
10719         case PR_GET_NAME:
10720         {
10721             void *name = lock_user(VERIFY_WRITE, arg2, 16, 1);
10722             if (!name) {
10723                 return -TARGET_EFAULT;
10724             }
10725             ret = get_errno(prctl(arg1, (unsigned long)name,
10726                                   arg3, arg4, arg5));
10727             unlock_user(name, arg2, 16);
10728             return ret;
10729         }
10730         case PR_SET_NAME:
10731         {
10732             void *name = lock_user(VERIFY_READ, arg2, 16, 1);
10733             if (!name) {
10734                 return -TARGET_EFAULT;
10735             }
10736             ret = get_errno(prctl(arg1, (unsigned long)name,
10737                                   arg3, arg4, arg5));
10738             unlock_user(name, arg2, 0);
10739             return ret;
10740         }
10741 #endif
10742 #ifdef TARGET_MIPS
10743         case TARGET_PR_GET_FP_MODE:
10744         {
10745             CPUMIPSState *env = ((CPUMIPSState *)cpu_env);
10746             ret = 0;
10747             if (env->CP0_Status & (1 << CP0St_FR)) {
10748                 ret |= TARGET_PR_FP_MODE_FR;
10749             }
10750             if (env->CP0_Config5 & (1 << CP0C5_FRE)) {
10751                 ret |= TARGET_PR_FP_MODE_FRE;
10752             }
10753             return ret;
10754         }
10755         case TARGET_PR_SET_FP_MODE:
10756         {
10757             CPUMIPSState *env = ((CPUMIPSState *)cpu_env);
10758             bool old_fr = env->CP0_Status & (1 << CP0St_FR);
10759             bool old_fre = env->CP0_Config5 & (1 << CP0C5_FRE);
10760             bool new_fr = arg2 & TARGET_PR_FP_MODE_FR;
10761             bool new_fre = arg2 & TARGET_PR_FP_MODE_FRE;
10762 
10763             const unsigned int known_bits = TARGET_PR_FP_MODE_FR |
10764                                             TARGET_PR_FP_MODE_FRE;
10765 
10766             /* If nothing to change, return right away, successfully.  */
10767             if (old_fr == new_fr && old_fre == new_fre) {
10768                 return 0;
10769             }
10770             /* Check the value is valid */
10771             if (arg2 & ~known_bits) {
10772                 return -TARGET_EOPNOTSUPP;
10773             }
10774             /* Setting FRE without FR is not supported.  */
10775             if (new_fre && !new_fr) {
10776                 return -TARGET_EOPNOTSUPP;
10777             }
10778             if (new_fr && !(env->active_fpu.fcr0 & (1 << FCR0_F64))) {
10779                 /* FR1 is not supported */
10780                 return -TARGET_EOPNOTSUPP;
10781             }
10782             if (!new_fr && (env->active_fpu.fcr0 & (1 << FCR0_F64))
10783                 && !(env->CP0_Status_rw_bitmask & (1 << CP0St_FR))) {
10784                 /* cannot set FR=0 */
10785                 return -TARGET_EOPNOTSUPP;
10786             }
10787             if (new_fre && !(env->active_fpu.fcr0 & (1 << FCR0_FREP))) {
10788                 /* Cannot set FRE=1 */
10789                 return -TARGET_EOPNOTSUPP;
10790             }
10791 
10792             int i;
10793             fpr_t *fpr = env->active_fpu.fpr;
10794             for (i = 0; i < 32 ; i += 2) {
10795                 if (!old_fr && new_fr) {
10796                     fpr[i].w[!FP_ENDIAN_IDX] = fpr[i + 1].w[FP_ENDIAN_IDX];
10797                 } else if (old_fr && !new_fr) {
10798                     fpr[i + 1].w[FP_ENDIAN_IDX] = fpr[i].w[!FP_ENDIAN_IDX];
10799                 }
10800             }
10801 
10802             if (new_fr) {
10803                 env->CP0_Status |= (1 << CP0St_FR);
10804                 env->hflags |= MIPS_HFLAG_F64;
10805             } else {
10806                 env->CP0_Status &= ~(1 << CP0St_FR);
10807                 env->hflags &= ~MIPS_HFLAG_F64;
10808             }
10809             if (new_fre) {
10810                 env->CP0_Config5 |= (1 << CP0C5_FRE);
10811                 if (env->active_fpu.fcr0 & (1 << FCR0_FREP)) {
10812                     env->hflags |= MIPS_HFLAG_FRE;
10813                 }
10814             } else {
10815                 env->CP0_Config5 &= ~(1 << CP0C5_FRE);
10816                 env->hflags &= ~MIPS_HFLAG_FRE;
10817             }
10818 
10819             return 0;
10820         }
10821 #endif /* MIPS */
10822 #ifdef TARGET_AARCH64
10823         case TARGET_PR_SVE_SET_VL:
10824             /*
10825              * We cannot support either PR_SVE_SET_VL_ONEXEC or
10826              * PR_SVE_VL_INHERIT.  Note the kernel definition
10827              * of sve_vl_valid allows for VQ=512, i.e. VL=8192,
10828              * even though the current architectural maximum is VQ=16.
10829              */
10830             ret = -TARGET_EINVAL;
10831             if (cpu_isar_feature(aa64_sve, env_archcpu(cpu_env))
10832                 && arg2 >= 0 && arg2 <= 512 * 16 && !(arg2 & 15)) {
10833                 CPUARMState *env = cpu_env;
10834                 ARMCPU *cpu = env_archcpu(env);
10835                 uint32_t vq, old_vq;
10836 
10837                 old_vq = (env->vfp.zcr_el[1] & 0xf) + 1;
10838                 vq = MAX(arg2 / 16, 1);
10839                 vq = MIN(vq, cpu->sve_max_vq);
10840 
10841                 if (vq < old_vq) {
10842                     aarch64_sve_narrow_vq(env, vq);
10843                 }
10844                 env->vfp.zcr_el[1] = vq - 1;
10845                 arm_rebuild_hflags(env);
10846                 ret = vq * 16;
10847             }
10848             return ret;
10849         case TARGET_PR_SVE_GET_VL:
10850             ret = -TARGET_EINVAL;
10851             {
10852                 ARMCPU *cpu = env_archcpu(cpu_env);
10853                 if (cpu_isar_feature(aa64_sve, cpu)) {
10854                     ret = ((cpu->env.vfp.zcr_el[1] & 0xf) + 1) * 16;
10855                 }
10856             }
10857             return ret;
10858         case TARGET_PR_PAC_RESET_KEYS:
10859             {
10860                 CPUARMState *env = cpu_env;
10861                 ARMCPU *cpu = env_archcpu(env);
10862 
10863                 if (arg3 || arg4 || arg5) {
10864                     return -TARGET_EINVAL;
10865                 }
10866                 if (cpu_isar_feature(aa64_pauth, cpu)) {
10867                     int all = (TARGET_PR_PAC_APIAKEY | TARGET_PR_PAC_APIBKEY |
10868                                TARGET_PR_PAC_APDAKEY | TARGET_PR_PAC_APDBKEY |
10869                                TARGET_PR_PAC_APGAKEY);
10870                     int ret = 0;
10871                     Error *err = NULL;
10872 
10873                     if (arg2 == 0) {
10874                         arg2 = all;
10875                     } else if (arg2 & ~all) {
10876                         return -TARGET_EINVAL;
10877                     }
10878                     if (arg2 & TARGET_PR_PAC_APIAKEY) {
10879                         ret |= qemu_guest_getrandom(&env->keys.apia,
10880                                                     sizeof(ARMPACKey), &err);
10881                     }
10882                     if (arg2 & TARGET_PR_PAC_APIBKEY) {
10883                         ret |= qemu_guest_getrandom(&env->keys.apib,
10884                                                     sizeof(ARMPACKey), &err);
10885                     }
10886                     if (arg2 & TARGET_PR_PAC_APDAKEY) {
10887                         ret |= qemu_guest_getrandom(&env->keys.apda,
10888                                                     sizeof(ARMPACKey), &err);
10889                     }
10890                     if (arg2 & TARGET_PR_PAC_APDBKEY) {
10891                         ret |= qemu_guest_getrandom(&env->keys.apdb,
10892                                                     sizeof(ARMPACKey), &err);
10893                     }
10894                     if (arg2 & TARGET_PR_PAC_APGAKEY) {
10895                         ret |= qemu_guest_getrandom(&env->keys.apga,
10896                                                     sizeof(ARMPACKey), &err);
10897                     }
10898                     if (ret != 0) {
10899                         /*
10900                          * Some unknown failure in the crypto.  The best
10901                          * we can do is log it and fail the syscall.
10902                          * The real syscall cannot fail this way.
10903                          */
10904                         qemu_log_mask(LOG_UNIMP,
10905                                       "PR_PAC_RESET_KEYS: Crypto failure: %s",
10906                                       error_get_pretty(err));
10907                         error_free(err);
10908                         return -TARGET_EIO;
10909                     }
10910                     return 0;
10911                 }
10912             }
10913             return -TARGET_EINVAL;
10914 #endif /* AARCH64 */
10915         case PR_GET_SECCOMP:
10916         case PR_SET_SECCOMP:
10917             /* Disable seccomp to prevent the target disabling syscalls we
10918              * need. */
10919             return -TARGET_EINVAL;
10920         default:
10921             /* Most prctl options have no pointer arguments */
10922             return get_errno(prctl(arg1, arg2, arg3, arg4, arg5));
10923         }
10924         break;
10925 #ifdef TARGET_NR_arch_prctl
10926     case TARGET_NR_arch_prctl:
10927         return do_arch_prctl(cpu_env, arg1, arg2);
10928 #endif
10929 #ifdef TARGET_NR_pread64
10930     case TARGET_NR_pread64:
10931         if (regpairs_aligned(cpu_env, num)) {
10932             arg4 = arg5;
10933             arg5 = arg6;
10934         }
10935         if (arg2 == 0 && arg3 == 0) {
10936             /* Special-case NULL buffer and zero length, which should succeed */
10937             p = 0;
10938         } else {
10939             p = lock_user(VERIFY_WRITE, arg2, arg3, 0);
10940             if (!p) {
10941                 return -TARGET_EFAULT;
10942             }
10943         }
10944         ret = get_errno(pread64(arg1, p, arg3, target_offset64(arg4, arg5)));
10945         unlock_user(p, arg2, ret);
10946         return ret;
10947     case TARGET_NR_pwrite64:
10948         if (regpairs_aligned(cpu_env, num)) {
10949             arg4 = arg5;
10950             arg5 = arg6;
10951         }
10952         if (arg2 == 0 && arg3 == 0) {
10953             /* Special-case NULL buffer and zero length, which should succeed */
10954             p = 0;
10955         } else {
10956             p = lock_user(VERIFY_READ, arg2, arg3, 1);
10957             if (!p) {
10958                 return -TARGET_EFAULT;
10959             }
10960         }
10961         ret = get_errno(pwrite64(arg1, p, arg3, target_offset64(arg4, arg5)));
10962         unlock_user(p, arg2, 0);
10963         return ret;
10964 #endif
10965     case TARGET_NR_getcwd:
10966         if (!(p = lock_user(VERIFY_WRITE, arg1, arg2, 0)))
10967             return -TARGET_EFAULT;
10968         ret = get_errno(sys_getcwd1(p, arg2));
10969         unlock_user(p, arg1, ret);
10970         return ret;
10971     case TARGET_NR_capget:
10972     case TARGET_NR_capset:
10973     {
10974         struct target_user_cap_header *target_header;
10975         struct target_user_cap_data *target_data = NULL;
10976         struct __user_cap_header_struct header;
10977         struct __user_cap_data_struct data[2];
10978         struct __user_cap_data_struct *dataptr = NULL;
10979         int i, target_datalen;
10980         int data_items = 1;
10981 
10982         if (!lock_user_struct(VERIFY_WRITE, target_header, arg1, 1)) {
10983             return -TARGET_EFAULT;
10984         }
10985         header.version = tswap32(target_header->version);
10986         header.pid = tswap32(target_header->pid);
10987 
10988         if (header.version != _LINUX_CAPABILITY_VERSION) {
10989             /* Version 2 and up takes pointer to two user_data structs */
10990             data_items = 2;
10991         }
10992 
10993         target_datalen = sizeof(*target_data) * data_items;
10994 
10995         if (arg2) {
10996             if (num == TARGET_NR_capget) {
10997                 target_data = lock_user(VERIFY_WRITE, arg2, target_datalen, 0);
10998             } else {
10999                 target_data = lock_user(VERIFY_READ, arg2, target_datalen, 1);
11000             }
11001             if (!target_data) {
11002                 unlock_user_struct(target_header, arg1, 0);
11003                 return -TARGET_EFAULT;
11004             }
11005 
11006             if (num == TARGET_NR_capset) {
11007                 for (i = 0; i < data_items; i++) {
11008                     data[i].effective = tswap32(target_data[i].effective);
11009                     data[i].permitted = tswap32(target_data[i].permitted);
11010                     data[i].inheritable = tswap32(target_data[i].inheritable);
11011                 }
11012             }
11013 
11014             dataptr = data;
11015         }
11016 
11017         if (num == TARGET_NR_capget) {
11018             ret = get_errno(capget(&header, dataptr));
11019         } else {
11020             ret = get_errno(capset(&header, dataptr));
11021         }
11022 
11023         /* The kernel always updates version for both capget and capset */
11024         target_header->version = tswap32(header.version);
11025         unlock_user_struct(target_header, arg1, 1);
11026 
11027         if (arg2) {
11028             if (num == TARGET_NR_capget) {
11029                 for (i = 0; i < data_items; i++) {
11030                     target_data[i].effective = tswap32(data[i].effective);
11031                     target_data[i].permitted = tswap32(data[i].permitted);
11032                     target_data[i].inheritable = tswap32(data[i].inheritable);
11033                 }
11034                 unlock_user(target_data, arg2, target_datalen);
11035             } else {
11036                 unlock_user(target_data, arg2, 0);
11037             }
11038         }
11039         return ret;
11040     }
11041     case TARGET_NR_sigaltstack:
11042         return do_sigaltstack(arg1, arg2,
11043                               get_sp_from_cpustate((CPUArchState *)cpu_env));
11044 
11045 #ifdef CONFIG_SENDFILE
11046 #ifdef TARGET_NR_sendfile
11047     case TARGET_NR_sendfile:
11048     {
11049         off_t *offp = NULL;
11050         off_t off;
11051         if (arg3) {
11052             ret = get_user_sal(off, arg3);
11053             if (is_error(ret)) {
11054                 return ret;
11055             }
11056             offp = &off;
11057         }
11058         ret = get_errno(sendfile(arg1, arg2, offp, arg4));
11059         if (!is_error(ret) && arg3) {
11060             abi_long ret2 = put_user_sal(off, arg3);
11061             if (is_error(ret2)) {
11062                 ret = ret2;
11063             }
11064         }
11065         return ret;
11066     }
11067 #endif
11068 #ifdef TARGET_NR_sendfile64
11069     case TARGET_NR_sendfile64:
11070     {
11071         off_t *offp = NULL;
11072         off_t off;
11073         if (arg3) {
11074             ret = get_user_s64(off, arg3);
11075             if (is_error(ret)) {
11076                 return ret;
11077             }
11078             offp = &off;
11079         }
11080         ret = get_errno(sendfile(arg1, arg2, offp, arg4));
11081         if (!is_error(ret) && arg3) {
11082             abi_long ret2 = put_user_s64(off, arg3);
11083             if (is_error(ret2)) {
11084                 ret = ret2;
11085             }
11086         }
11087         return ret;
11088     }
11089 #endif
11090 #endif
11091 #ifdef TARGET_NR_vfork
11092     case TARGET_NR_vfork:
11093         return get_errno(do_fork(cpu_env,
11094                          CLONE_VFORK | CLONE_VM | TARGET_SIGCHLD,
11095                          0, 0, 0, 0));
11096 #endif
11097 #ifdef TARGET_NR_ugetrlimit
11098     case TARGET_NR_ugetrlimit:
11099     {
11100 	struct rlimit rlim;
11101 	int resource = target_to_host_resource(arg1);
11102 	ret = get_errno(getrlimit(resource, &rlim));
11103 	if (!is_error(ret)) {
11104 	    struct target_rlimit *target_rlim;
11105             if (!lock_user_struct(VERIFY_WRITE, target_rlim, arg2, 0))
11106                 return -TARGET_EFAULT;
11107 	    target_rlim->rlim_cur = host_to_target_rlim(rlim.rlim_cur);
11108 	    target_rlim->rlim_max = host_to_target_rlim(rlim.rlim_max);
11109             unlock_user_struct(target_rlim, arg2, 1);
11110 	}
11111         return ret;
11112     }
11113 #endif
11114 #ifdef TARGET_NR_truncate64
11115     case TARGET_NR_truncate64:
11116         if (!(p = lock_user_string(arg1)))
11117             return -TARGET_EFAULT;
11118 	ret = target_truncate64(cpu_env, p, arg2, arg3, arg4);
11119         unlock_user(p, arg1, 0);
11120         return ret;
11121 #endif
11122 #ifdef TARGET_NR_ftruncate64
11123     case TARGET_NR_ftruncate64:
11124         return target_ftruncate64(cpu_env, arg1, arg2, arg3, arg4);
11125 #endif
11126 #ifdef TARGET_NR_stat64
11127     case TARGET_NR_stat64:
11128         if (!(p = lock_user_string(arg1))) {
11129             return -TARGET_EFAULT;
11130         }
11131         ret = get_errno(stat(path(p), &st));
11132         unlock_user(p, arg1, 0);
11133         if (!is_error(ret))
11134             ret = host_to_target_stat64(cpu_env, arg2, &st);
11135         return ret;
11136 #endif
11137 #ifdef TARGET_NR_lstat64
11138     case TARGET_NR_lstat64:
11139         if (!(p = lock_user_string(arg1))) {
11140             return -TARGET_EFAULT;
11141         }
11142         ret = get_errno(lstat(path(p), &st));
11143         unlock_user(p, arg1, 0);
11144         if (!is_error(ret))
11145             ret = host_to_target_stat64(cpu_env, arg2, &st);
11146         return ret;
11147 #endif
11148 #ifdef TARGET_NR_fstat64
11149     case TARGET_NR_fstat64:
11150         ret = get_errno(fstat(arg1, &st));
11151         if (!is_error(ret))
11152             ret = host_to_target_stat64(cpu_env, arg2, &st);
11153         return ret;
11154 #endif
11155 #if (defined(TARGET_NR_fstatat64) || defined(TARGET_NR_newfstatat))
11156 #ifdef TARGET_NR_fstatat64
11157     case TARGET_NR_fstatat64:
11158 #endif
11159 #ifdef TARGET_NR_newfstatat
11160     case TARGET_NR_newfstatat:
11161 #endif
11162         if (!(p = lock_user_string(arg2))) {
11163             return -TARGET_EFAULT;
11164         }
11165         ret = get_errno(fstatat(arg1, path(p), &st, arg4));
11166         unlock_user(p, arg2, 0);
11167         if (!is_error(ret))
11168             ret = host_to_target_stat64(cpu_env, arg3, &st);
11169         return ret;
11170 #endif
11171 #if defined(TARGET_NR_statx)
11172     case TARGET_NR_statx:
11173         {
11174             struct target_statx *target_stx;
11175             int dirfd = arg1;
11176             int flags = arg3;
11177 
11178             p = lock_user_string(arg2);
11179             if (p == NULL) {
11180                 return -TARGET_EFAULT;
11181             }
11182 #if defined(__NR_statx)
11183             {
11184                 /*
11185                  * It is assumed that struct statx is architecture independent.
11186                  */
11187                 struct target_statx host_stx;
11188                 int mask = arg4;
11189 
11190                 ret = get_errno(sys_statx(dirfd, p, flags, mask, &host_stx));
11191                 if (!is_error(ret)) {
11192                     if (host_to_target_statx(&host_stx, arg5) != 0) {
11193                         unlock_user(p, arg2, 0);
11194                         return -TARGET_EFAULT;
11195                     }
11196                 }
11197 
11198                 if (ret != -TARGET_ENOSYS) {
11199                     unlock_user(p, arg2, 0);
11200                     return ret;
11201                 }
11202             }
11203 #endif
11204             ret = get_errno(fstatat(dirfd, path(p), &st, flags));
11205             unlock_user(p, arg2, 0);
11206 
11207             if (!is_error(ret)) {
11208                 if (!lock_user_struct(VERIFY_WRITE, target_stx, arg5, 0)) {
11209                     return -TARGET_EFAULT;
11210                 }
11211                 memset(target_stx, 0, sizeof(*target_stx));
11212                 __put_user(major(st.st_dev), &target_stx->stx_dev_major);
11213                 __put_user(minor(st.st_dev), &target_stx->stx_dev_minor);
11214                 __put_user(st.st_ino, &target_stx->stx_ino);
11215                 __put_user(st.st_mode, &target_stx->stx_mode);
11216                 __put_user(st.st_uid, &target_stx->stx_uid);
11217                 __put_user(st.st_gid, &target_stx->stx_gid);
11218                 __put_user(st.st_nlink, &target_stx->stx_nlink);
11219                 __put_user(major(st.st_rdev), &target_stx->stx_rdev_major);
11220                 __put_user(minor(st.st_rdev), &target_stx->stx_rdev_minor);
11221                 __put_user(st.st_size, &target_stx->stx_size);
11222                 __put_user(st.st_blksize, &target_stx->stx_blksize);
11223                 __put_user(st.st_blocks, &target_stx->stx_blocks);
11224                 __put_user(st.st_atime, &target_stx->stx_atime.tv_sec);
11225                 __put_user(st.st_mtime, &target_stx->stx_mtime.tv_sec);
11226                 __put_user(st.st_ctime, &target_stx->stx_ctime.tv_sec);
11227                 unlock_user_struct(target_stx, arg5, 1);
11228             }
11229         }
11230         return ret;
11231 #endif
11232 #ifdef TARGET_NR_lchown
11233     case TARGET_NR_lchown:
11234         if (!(p = lock_user_string(arg1)))
11235             return -TARGET_EFAULT;
11236         ret = get_errno(lchown(p, low2highuid(arg2), low2highgid(arg3)));
11237         unlock_user(p, arg1, 0);
11238         return ret;
11239 #endif
11240 #ifdef TARGET_NR_getuid
11241     case TARGET_NR_getuid:
11242         return get_errno(high2lowuid(getuid()));
11243 #endif
11244 #ifdef TARGET_NR_getgid
11245     case TARGET_NR_getgid:
11246         return get_errno(high2lowgid(getgid()));
11247 #endif
11248 #ifdef TARGET_NR_geteuid
11249     case TARGET_NR_geteuid:
11250         return get_errno(high2lowuid(geteuid()));
11251 #endif
11252 #ifdef TARGET_NR_getegid
11253     case TARGET_NR_getegid:
11254         return get_errno(high2lowgid(getegid()));
11255 #endif
11256     case TARGET_NR_setreuid:
11257         return get_errno(setreuid(low2highuid(arg1), low2highuid(arg2)));
11258     case TARGET_NR_setregid:
11259         return get_errno(setregid(low2highgid(arg1), low2highgid(arg2)));
11260     case TARGET_NR_getgroups:
11261         {
11262             int gidsetsize = arg1;
11263             target_id *target_grouplist;
11264             gid_t *grouplist;
11265             int i;
11266 
11267             grouplist = alloca(gidsetsize * sizeof(gid_t));
11268             ret = get_errno(getgroups(gidsetsize, grouplist));
11269             if (gidsetsize == 0)
11270                 return ret;
11271             if (!is_error(ret)) {
11272                 target_grouplist = lock_user(VERIFY_WRITE, arg2, gidsetsize * sizeof(target_id), 0);
11273                 if (!target_grouplist)
11274                     return -TARGET_EFAULT;
11275                 for(i = 0;i < ret; i++)
11276                     target_grouplist[i] = tswapid(high2lowgid(grouplist[i]));
11277                 unlock_user(target_grouplist, arg2, gidsetsize * sizeof(target_id));
11278             }
11279         }
11280         return ret;
11281     case TARGET_NR_setgroups:
11282         {
11283             int gidsetsize = arg1;
11284             target_id *target_grouplist;
11285             gid_t *grouplist = NULL;
11286             int i;
11287             if (gidsetsize) {
11288                 grouplist = alloca(gidsetsize * sizeof(gid_t));
11289                 target_grouplist = lock_user(VERIFY_READ, arg2, gidsetsize * sizeof(target_id), 1);
11290                 if (!target_grouplist) {
11291                     return -TARGET_EFAULT;
11292                 }
11293                 for (i = 0; i < gidsetsize; i++) {
11294                     grouplist[i] = low2highgid(tswapid(target_grouplist[i]));
11295                 }
11296                 unlock_user(target_grouplist, arg2, 0);
11297             }
11298             return get_errno(setgroups(gidsetsize, grouplist));
11299         }
11300     case TARGET_NR_fchown:
11301         return get_errno(fchown(arg1, low2highuid(arg2), low2highgid(arg3)));
11302 #if defined(TARGET_NR_fchownat)
11303     case TARGET_NR_fchownat:
11304         if (!(p = lock_user_string(arg2)))
11305             return -TARGET_EFAULT;
11306         ret = get_errno(fchownat(arg1, p, low2highuid(arg3),
11307                                  low2highgid(arg4), arg5));
11308         unlock_user(p, arg2, 0);
11309         return ret;
11310 #endif
11311 #ifdef TARGET_NR_setresuid
11312     case TARGET_NR_setresuid:
11313         return get_errno(sys_setresuid(low2highuid(arg1),
11314                                        low2highuid(arg2),
11315                                        low2highuid(arg3)));
11316 #endif
11317 #ifdef TARGET_NR_getresuid
11318     case TARGET_NR_getresuid:
11319         {
11320             uid_t ruid, euid, suid;
11321             ret = get_errno(getresuid(&ruid, &euid, &suid));
11322             if (!is_error(ret)) {
11323                 if (put_user_id(high2lowuid(ruid), arg1)
11324                     || put_user_id(high2lowuid(euid), arg2)
11325                     || put_user_id(high2lowuid(suid), arg3))
11326                     return -TARGET_EFAULT;
11327             }
11328         }
11329         return ret;
11330 #endif
11331 #ifdef TARGET_NR_getresgid
11332     case TARGET_NR_setresgid:
11333         return get_errno(sys_setresgid(low2highgid(arg1),
11334                                        low2highgid(arg2),
11335                                        low2highgid(arg3)));
11336 #endif
11337 #ifdef TARGET_NR_getresgid
11338     case TARGET_NR_getresgid:
11339         {
11340             gid_t rgid, egid, sgid;
11341             ret = get_errno(getresgid(&rgid, &egid, &sgid));
11342             if (!is_error(ret)) {
11343                 if (put_user_id(high2lowgid(rgid), arg1)
11344                     || put_user_id(high2lowgid(egid), arg2)
11345                     || put_user_id(high2lowgid(sgid), arg3))
11346                     return -TARGET_EFAULT;
11347             }
11348         }
11349         return ret;
11350 #endif
11351 #ifdef TARGET_NR_chown
11352     case TARGET_NR_chown:
11353         if (!(p = lock_user_string(arg1)))
11354             return -TARGET_EFAULT;
11355         ret = get_errno(chown(p, low2highuid(arg2), low2highgid(arg3)));
11356         unlock_user(p, arg1, 0);
11357         return ret;
11358 #endif
11359     case TARGET_NR_setuid:
11360         return get_errno(sys_setuid(low2highuid(arg1)));
11361     case TARGET_NR_setgid:
11362         return get_errno(sys_setgid(low2highgid(arg1)));
11363     case TARGET_NR_setfsuid:
11364         return get_errno(setfsuid(arg1));
11365     case TARGET_NR_setfsgid:
11366         return get_errno(setfsgid(arg1));
11367 
11368 #ifdef TARGET_NR_lchown32
11369     case TARGET_NR_lchown32:
11370         if (!(p = lock_user_string(arg1)))
11371             return -TARGET_EFAULT;
11372         ret = get_errno(lchown(p, arg2, arg3));
11373         unlock_user(p, arg1, 0);
11374         return ret;
11375 #endif
11376 #ifdef TARGET_NR_getuid32
11377     case TARGET_NR_getuid32:
11378         return get_errno(getuid());
11379 #endif
11380 
11381 #if defined(TARGET_NR_getxuid) && defined(TARGET_ALPHA)
11382    /* Alpha specific */
11383     case TARGET_NR_getxuid:
11384          {
11385             uid_t euid;
11386             euid=geteuid();
11387             ((CPUAlphaState *)cpu_env)->ir[IR_A4]=euid;
11388          }
11389         return get_errno(getuid());
11390 #endif
11391 #if defined(TARGET_NR_getxgid) && defined(TARGET_ALPHA)
11392    /* Alpha specific */
11393     case TARGET_NR_getxgid:
11394          {
11395             uid_t egid;
11396             egid=getegid();
11397             ((CPUAlphaState *)cpu_env)->ir[IR_A4]=egid;
11398          }
11399         return get_errno(getgid());
11400 #endif
11401 #if defined(TARGET_NR_osf_getsysinfo) && defined(TARGET_ALPHA)
11402     /* Alpha specific */
11403     case TARGET_NR_osf_getsysinfo:
11404         ret = -TARGET_EOPNOTSUPP;
11405         switch (arg1) {
11406           case TARGET_GSI_IEEE_FP_CONTROL:
11407             {
11408                 uint64_t fpcr = cpu_alpha_load_fpcr(cpu_env);
11409                 uint64_t swcr = ((CPUAlphaState *)cpu_env)->swcr;
11410 
11411                 swcr &= ~SWCR_STATUS_MASK;
11412                 swcr |= (fpcr >> 35) & SWCR_STATUS_MASK;
11413 
11414                 if (put_user_u64 (swcr, arg2))
11415                         return -TARGET_EFAULT;
11416                 ret = 0;
11417             }
11418             break;
11419 
11420           /* case GSI_IEEE_STATE_AT_SIGNAL:
11421              -- Not implemented in linux kernel.
11422              case GSI_UACPROC:
11423              -- Retrieves current unaligned access state; not much used.
11424              case GSI_PROC_TYPE:
11425              -- Retrieves implver information; surely not used.
11426              case GSI_GET_HWRPB:
11427              -- Grabs a copy of the HWRPB; surely not used.
11428           */
11429         }
11430         return ret;
11431 #endif
11432 #if defined(TARGET_NR_osf_setsysinfo) && defined(TARGET_ALPHA)
11433     /* Alpha specific */
11434     case TARGET_NR_osf_setsysinfo:
11435         ret = -TARGET_EOPNOTSUPP;
11436         switch (arg1) {
11437           case TARGET_SSI_IEEE_FP_CONTROL:
11438             {
11439                 uint64_t swcr, fpcr;
11440 
11441                 if (get_user_u64 (swcr, arg2)) {
11442                     return -TARGET_EFAULT;
11443                 }
11444 
11445                 /*
11446                  * The kernel calls swcr_update_status to update the
11447                  * status bits from the fpcr at every point that it
11448                  * could be queried.  Therefore, we store the status
11449                  * bits only in FPCR.
11450                  */
11451                 ((CPUAlphaState *)cpu_env)->swcr
11452                     = swcr & (SWCR_TRAP_ENABLE_MASK | SWCR_MAP_MASK);
11453 
11454                 fpcr = cpu_alpha_load_fpcr(cpu_env);
11455                 fpcr &= ((uint64_t)FPCR_DYN_MASK << 32);
11456                 fpcr |= alpha_ieee_swcr_to_fpcr(swcr);
11457                 cpu_alpha_store_fpcr(cpu_env, fpcr);
11458                 ret = 0;
11459             }
11460             break;
11461 
11462           case TARGET_SSI_IEEE_RAISE_EXCEPTION:
11463             {
11464                 uint64_t exc, fpcr, fex;
11465 
11466                 if (get_user_u64(exc, arg2)) {
11467                     return -TARGET_EFAULT;
11468                 }
11469                 exc &= SWCR_STATUS_MASK;
11470                 fpcr = cpu_alpha_load_fpcr(cpu_env);
11471 
11472                 /* Old exceptions are not signaled.  */
11473                 fex = alpha_ieee_fpcr_to_swcr(fpcr);
11474                 fex = exc & ~fex;
11475                 fex >>= SWCR_STATUS_TO_EXCSUM_SHIFT;
11476                 fex &= ((CPUArchState *)cpu_env)->swcr;
11477 
11478                 /* Update the hardware fpcr.  */
11479                 fpcr |= alpha_ieee_swcr_to_fpcr(exc);
11480                 cpu_alpha_store_fpcr(cpu_env, fpcr);
11481 
11482                 if (fex) {
11483                     int si_code = TARGET_FPE_FLTUNK;
11484                     target_siginfo_t info;
11485 
11486                     if (fex & SWCR_TRAP_ENABLE_DNO) {
11487                         si_code = TARGET_FPE_FLTUND;
11488                     }
11489                     if (fex & SWCR_TRAP_ENABLE_INE) {
11490                         si_code = TARGET_FPE_FLTRES;
11491                     }
11492                     if (fex & SWCR_TRAP_ENABLE_UNF) {
11493                         si_code = TARGET_FPE_FLTUND;
11494                     }
11495                     if (fex & SWCR_TRAP_ENABLE_OVF) {
11496                         si_code = TARGET_FPE_FLTOVF;
11497                     }
11498                     if (fex & SWCR_TRAP_ENABLE_DZE) {
11499                         si_code = TARGET_FPE_FLTDIV;
11500                     }
11501                     if (fex & SWCR_TRAP_ENABLE_INV) {
11502                         si_code = TARGET_FPE_FLTINV;
11503                     }
11504 
11505                     info.si_signo = SIGFPE;
11506                     info.si_errno = 0;
11507                     info.si_code = si_code;
11508                     info._sifields._sigfault._addr
11509                         = ((CPUArchState *)cpu_env)->pc;
11510                     queue_signal((CPUArchState *)cpu_env, info.si_signo,
11511                                  QEMU_SI_FAULT, &info);
11512                 }
11513                 ret = 0;
11514             }
11515             break;
11516 
11517           /* case SSI_NVPAIRS:
11518              -- Used with SSIN_UACPROC to enable unaligned accesses.
11519              case SSI_IEEE_STATE_AT_SIGNAL:
11520              case SSI_IEEE_IGNORE_STATE_AT_SIGNAL:
11521              -- Not implemented in linux kernel
11522           */
11523         }
11524         return ret;
11525 #endif
11526 #ifdef TARGET_NR_osf_sigprocmask
11527     /* Alpha specific.  */
11528     case TARGET_NR_osf_sigprocmask:
11529         {
11530             abi_ulong mask;
11531             int how;
11532             sigset_t set, oldset;
11533 
11534             switch(arg1) {
11535             case TARGET_SIG_BLOCK:
11536                 how = SIG_BLOCK;
11537                 break;
11538             case TARGET_SIG_UNBLOCK:
11539                 how = SIG_UNBLOCK;
11540                 break;
11541             case TARGET_SIG_SETMASK:
11542                 how = SIG_SETMASK;
11543                 break;
11544             default:
11545                 return -TARGET_EINVAL;
11546             }
11547             mask = arg2;
11548             target_to_host_old_sigset(&set, &mask);
11549             ret = do_sigprocmask(how, &set, &oldset);
11550             if (!ret) {
11551                 host_to_target_old_sigset(&mask, &oldset);
11552                 ret = mask;
11553             }
11554         }
11555         return ret;
11556 #endif
11557 
11558 #ifdef TARGET_NR_getgid32
11559     case TARGET_NR_getgid32:
11560         return get_errno(getgid());
11561 #endif
11562 #ifdef TARGET_NR_geteuid32
11563     case TARGET_NR_geteuid32:
11564         return get_errno(geteuid());
11565 #endif
11566 #ifdef TARGET_NR_getegid32
11567     case TARGET_NR_getegid32:
11568         return get_errno(getegid());
11569 #endif
11570 #ifdef TARGET_NR_setreuid32
11571     case TARGET_NR_setreuid32:
11572         return get_errno(setreuid(arg1, arg2));
11573 #endif
11574 #ifdef TARGET_NR_setregid32
11575     case TARGET_NR_setregid32:
11576         return get_errno(setregid(arg1, arg2));
11577 #endif
11578 #ifdef TARGET_NR_getgroups32
11579     case TARGET_NR_getgroups32:
11580         {
11581             int gidsetsize = arg1;
11582             uint32_t *target_grouplist;
11583             gid_t *grouplist;
11584             int i;
11585 
11586             grouplist = alloca(gidsetsize * sizeof(gid_t));
11587             ret = get_errno(getgroups(gidsetsize, grouplist));
11588             if (gidsetsize == 0)
11589                 return ret;
11590             if (!is_error(ret)) {
11591                 target_grouplist = lock_user(VERIFY_WRITE, arg2, gidsetsize * 4, 0);
11592                 if (!target_grouplist) {
11593                     return -TARGET_EFAULT;
11594                 }
11595                 for(i = 0;i < ret; i++)
11596                     target_grouplist[i] = tswap32(grouplist[i]);
11597                 unlock_user(target_grouplist, arg2, gidsetsize * 4);
11598             }
11599         }
11600         return ret;
11601 #endif
11602 #ifdef TARGET_NR_setgroups32
11603     case TARGET_NR_setgroups32:
11604         {
11605             int gidsetsize = arg1;
11606             uint32_t *target_grouplist;
11607             gid_t *grouplist;
11608             int i;
11609 
11610             grouplist = alloca(gidsetsize * sizeof(gid_t));
11611             target_grouplist = lock_user(VERIFY_READ, arg2, gidsetsize * 4, 1);
11612             if (!target_grouplist) {
11613                 return -TARGET_EFAULT;
11614             }
11615             for(i = 0;i < gidsetsize; i++)
11616                 grouplist[i] = tswap32(target_grouplist[i]);
11617             unlock_user(target_grouplist, arg2, 0);
11618             return get_errno(setgroups(gidsetsize, grouplist));
11619         }
11620 #endif
11621 #ifdef TARGET_NR_fchown32
11622     case TARGET_NR_fchown32:
11623         return get_errno(fchown(arg1, arg2, arg3));
11624 #endif
11625 #ifdef TARGET_NR_setresuid32
11626     case TARGET_NR_setresuid32:
11627         return get_errno(sys_setresuid(arg1, arg2, arg3));
11628 #endif
11629 #ifdef TARGET_NR_getresuid32
11630     case TARGET_NR_getresuid32:
11631         {
11632             uid_t ruid, euid, suid;
11633             ret = get_errno(getresuid(&ruid, &euid, &suid));
11634             if (!is_error(ret)) {
11635                 if (put_user_u32(ruid, arg1)
11636                     || put_user_u32(euid, arg2)
11637                     || put_user_u32(suid, arg3))
11638                     return -TARGET_EFAULT;
11639             }
11640         }
11641         return ret;
11642 #endif
11643 #ifdef TARGET_NR_setresgid32
11644     case TARGET_NR_setresgid32:
11645         return get_errno(sys_setresgid(arg1, arg2, arg3));
11646 #endif
11647 #ifdef TARGET_NR_getresgid32
11648     case TARGET_NR_getresgid32:
11649         {
11650             gid_t rgid, egid, sgid;
11651             ret = get_errno(getresgid(&rgid, &egid, &sgid));
11652             if (!is_error(ret)) {
11653                 if (put_user_u32(rgid, arg1)
11654                     || put_user_u32(egid, arg2)
11655                     || put_user_u32(sgid, arg3))
11656                     return -TARGET_EFAULT;
11657             }
11658         }
11659         return ret;
11660 #endif
11661 #ifdef TARGET_NR_chown32
11662     case TARGET_NR_chown32:
11663         if (!(p = lock_user_string(arg1)))
11664             return -TARGET_EFAULT;
11665         ret = get_errno(chown(p, arg2, arg3));
11666         unlock_user(p, arg1, 0);
11667         return ret;
11668 #endif
11669 #ifdef TARGET_NR_setuid32
11670     case TARGET_NR_setuid32:
11671         return get_errno(sys_setuid(arg1));
11672 #endif
11673 #ifdef TARGET_NR_setgid32
11674     case TARGET_NR_setgid32:
11675         return get_errno(sys_setgid(arg1));
11676 #endif
11677 #ifdef TARGET_NR_setfsuid32
11678     case TARGET_NR_setfsuid32:
11679         return get_errno(setfsuid(arg1));
11680 #endif
11681 #ifdef TARGET_NR_setfsgid32
11682     case TARGET_NR_setfsgid32:
11683         return get_errno(setfsgid(arg1));
11684 #endif
11685 #ifdef TARGET_NR_mincore
11686     case TARGET_NR_mincore:
11687         {
11688             void *a = lock_user(VERIFY_READ, arg1, arg2, 0);
11689             if (!a) {
11690                 return -TARGET_ENOMEM;
11691             }
11692             p = lock_user_string(arg3);
11693             if (!p) {
11694                 ret = -TARGET_EFAULT;
11695             } else {
11696                 ret = get_errno(mincore(a, arg2, p));
11697                 unlock_user(p, arg3, ret);
11698             }
11699             unlock_user(a, arg1, 0);
11700         }
11701         return ret;
11702 #endif
11703 #ifdef TARGET_NR_arm_fadvise64_64
11704     case TARGET_NR_arm_fadvise64_64:
11705         /* arm_fadvise64_64 looks like fadvise64_64 but
11706          * with different argument order: fd, advice, offset, len
11707          * rather than the usual fd, offset, len, advice.
11708          * Note that offset and len are both 64-bit so appear as
11709          * pairs of 32-bit registers.
11710          */
11711         ret = posix_fadvise(arg1, target_offset64(arg3, arg4),
11712                             target_offset64(arg5, arg6), arg2);
11713         return -host_to_target_errno(ret);
11714 #endif
11715 
11716 #if TARGET_ABI_BITS == 32
11717 
11718 #ifdef TARGET_NR_fadvise64_64
11719     case TARGET_NR_fadvise64_64:
11720 #if defined(TARGET_PPC) || defined(TARGET_XTENSA)
11721         /* 6 args: fd, advice, offset (high, low), len (high, low) */
11722         ret = arg2;
11723         arg2 = arg3;
11724         arg3 = arg4;
11725         arg4 = arg5;
11726         arg5 = arg6;
11727         arg6 = ret;
11728 #else
11729         /* 6 args: fd, offset (high, low), len (high, low), advice */
11730         if (regpairs_aligned(cpu_env, num)) {
11731             /* offset is in (3,4), len in (5,6) and advice in 7 */
11732             arg2 = arg3;
11733             arg3 = arg4;
11734             arg4 = arg5;
11735             arg5 = arg6;
11736             arg6 = arg7;
11737         }
11738 #endif
11739         ret = posix_fadvise(arg1, target_offset64(arg2, arg3),
11740                             target_offset64(arg4, arg5), arg6);
11741         return -host_to_target_errno(ret);
11742 #endif
11743 
11744 #ifdef TARGET_NR_fadvise64
11745     case TARGET_NR_fadvise64:
11746         /* 5 args: fd, offset (high, low), len, advice */
11747         if (regpairs_aligned(cpu_env, num)) {
11748             /* offset is in (3,4), len in 5 and advice in 6 */
11749             arg2 = arg3;
11750             arg3 = arg4;
11751             arg4 = arg5;
11752             arg5 = arg6;
11753         }
11754         ret = posix_fadvise(arg1, target_offset64(arg2, arg3), arg4, arg5);
11755         return -host_to_target_errno(ret);
11756 #endif
11757 
11758 #else /* not a 32-bit ABI */
11759 #if defined(TARGET_NR_fadvise64_64) || defined(TARGET_NR_fadvise64)
11760 #ifdef TARGET_NR_fadvise64_64
11761     case TARGET_NR_fadvise64_64:
11762 #endif
11763 #ifdef TARGET_NR_fadvise64
11764     case TARGET_NR_fadvise64:
11765 #endif
11766 #ifdef TARGET_S390X
11767         switch (arg4) {
11768         case 4: arg4 = POSIX_FADV_NOREUSE + 1; break; /* make sure it's an invalid value */
11769         case 5: arg4 = POSIX_FADV_NOREUSE + 2; break; /* ditto */
11770         case 6: arg4 = POSIX_FADV_DONTNEED; break;
11771         case 7: arg4 = POSIX_FADV_NOREUSE; break;
11772         default: break;
11773         }
11774 #endif
11775         return -host_to_target_errno(posix_fadvise(arg1, arg2, arg3, arg4));
11776 #endif
11777 #endif /* end of 64-bit ABI fadvise handling */
11778 
11779 #ifdef TARGET_NR_madvise
11780     case TARGET_NR_madvise:
11781         /* A straight passthrough may not be safe because qemu sometimes
11782            turns private file-backed mappings into anonymous mappings.
11783            This will break MADV_DONTNEED.
11784            This is a hint, so ignoring and returning success is ok.  */
11785         return 0;
11786 #endif
11787 #ifdef TARGET_NR_fcntl64
11788     case TARGET_NR_fcntl64:
11789     {
11790         int cmd;
11791         struct flock64 fl;
11792         from_flock64_fn *copyfrom = copy_from_user_flock64;
11793         to_flock64_fn *copyto = copy_to_user_flock64;
11794 
11795 #ifdef TARGET_ARM
11796         if (!((CPUARMState *)cpu_env)->eabi) {
11797             copyfrom = copy_from_user_oabi_flock64;
11798             copyto = copy_to_user_oabi_flock64;
11799         }
11800 #endif
11801 
11802         cmd = target_to_host_fcntl_cmd(arg2);
11803         if (cmd == -TARGET_EINVAL) {
11804             return cmd;
11805         }
11806 
11807         switch(arg2) {
11808         case TARGET_F_GETLK64:
11809             ret = copyfrom(&fl, arg3);
11810             if (ret) {
11811                 break;
11812             }
11813             ret = get_errno(safe_fcntl(arg1, cmd, &fl));
11814             if (ret == 0) {
11815                 ret = copyto(arg3, &fl);
11816             }
11817 	    break;
11818 
11819         case TARGET_F_SETLK64:
11820         case TARGET_F_SETLKW64:
11821             ret = copyfrom(&fl, arg3);
11822             if (ret) {
11823                 break;
11824             }
11825             ret = get_errno(safe_fcntl(arg1, cmd, &fl));
11826 	    break;
11827         default:
11828             ret = do_fcntl(arg1, arg2, arg3);
11829             break;
11830         }
11831         return ret;
11832     }
11833 #endif
11834 #ifdef TARGET_NR_cacheflush
11835     case TARGET_NR_cacheflush:
11836         /* self-modifying code is handled automatically, so nothing needed */
11837         return 0;
11838 #endif
11839 #ifdef TARGET_NR_getpagesize
11840     case TARGET_NR_getpagesize:
11841         return TARGET_PAGE_SIZE;
11842 #endif
11843     case TARGET_NR_gettid:
11844         return get_errno(sys_gettid());
11845 #ifdef TARGET_NR_readahead
11846     case TARGET_NR_readahead:
11847 #if TARGET_ABI_BITS == 32
11848         if (regpairs_aligned(cpu_env, num)) {
11849             arg2 = arg3;
11850             arg3 = arg4;
11851             arg4 = arg5;
11852         }
11853         ret = get_errno(readahead(arg1, target_offset64(arg2, arg3) , arg4));
11854 #else
11855         ret = get_errno(readahead(arg1, arg2, arg3));
11856 #endif
11857         return ret;
11858 #endif
11859 #ifdef CONFIG_ATTR
11860 #ifdef TARGET_NR_setxattr
11861     case TARGET_NR_listxattr:
11862     case TARGET_NR_llistxattr:
11863     {
11864         void *p, *b = 0;
11865         if (arg2) {
11866             b = lock_user(VERIFY_WRITE, arg2, arg3, 0);
11867             if (!b) {
11868                 return -TARGET_EFAULT;
11869             }
11870         }
11871         p = lock_user_string(arg1);
11872         if (p) {
11873             if (num == TARGET_NR_listxattr) {
11874                 ret = get_errno(listxattr(p, b, arg3));
11875             } else {
11876                 ret = get_errno(llistxattr(p, b, arg3));
11877             }
11878         } else {
11879             ret = -TARGET_EFAULT;
11880         }
11881         unlock_user(p, arg1, 0);
11882         unlock_user(b, arg2, arg3);
11883         return ret;
11884     }
11885     case TARGET_NR_flistxattr:
11886     {
11887         void *b = 0;
11888         if (arg2) {
11889             b = lock_user(VERIFY_WRITE, arg2, arg3, 0);
11890             if (!b) {
11891                 return -TARGET_EFAULT;
11892             }
11893         }
11894         ret = get_errno(flistxattr(arg1, b, arg3));
11895         unlock_user(b, arg2, arg3);
11896         return ret;
11897     }
11898     case TARGET_NR_setxattr:
11899     case TARGET_NR_lsetxattr:
11900         {
11901             void *p, *n, *v = 0;
11902             if (arg3) {
11903                 v = lock_user(VERIFY_READ, arg3, arg4, 1);
11904                 if (!v) {
11905                     return -TARGET_EFAULT;
11906                 }
11907             }
11908             p = lock_user_string(arg1);
11909             n = lock_user_string(arg2);
11910             if (p && n) {
11911                 if (num == TARGET_NR_setxattr) {
11912                     ret = get_errno(setxattr(p, n, v, arg4, arg5));
11913                 } else {
11914                     ret = get_errno(lsetxattr(p, n, v, arg4, arg5));
11915                 }
11916             } else {
11917                 ret = -TARGET_EFAULT;
11918             }
11919             unlock_user(p, arg1, 0);
11920             unlock_user(n, arg2, 0);
11921             unlock_user(v, arg3, 0);
11922         }
11923         return ret;
11924     case TARGET_NR_fsetxattr:
11925         {
11926             void *n, *v = 0;
11927             if (arg3) {
11928                 v = lock_user(VERIFY_READ, arg3, arg4, 1);
11929                 if (!v) {
11930                     return -TARGET_EFAULT;
11931                 }
11932             }
11933             n = lock_user_string(arg2);
11934             if (n) {
11935                 ret = get_errno(fsetxattr(arg1, n, v, arg4, arg5));
11936             } else {
11937                 ret = -TARGET_EFAULT;
11938             }
11939             unlock_user(n, arg2, 0);
11940             unlock_user(v, arg3, 0);
11941         }
11942         return ret;
11943     case TARGET_NR_getxattr:
11944     case TARGET_NR_lgetxattr:
11945         {
11946             void *p, *n, *v = 0;
11947             if (arg3) {
11948                 v = lock_user(VERIFY_WRITE, arg3, arg4, 0);
11949                 if (!v) {
11950                     return -TARGET_EFAULT;
11951                 }
11952             }
11953             p = lock_user_string(arg1);
11954             n = lock_user_string(arg2);
11955             if (p && n) {
11956                 if (num == TARGET_NR_getxattr) {
11957                     ret = get_errno(getxattr(p, n, v, arg4));
11958                 } else {
11959                     ret = get_errno(lgetxattr(p, n, v, arg4));
11960                 }
11961             } else {
11962                 ret = -TARGET_EFAULT;
11963             }
11964             unlock_user(p, arg1, 0);
11965             unlock_user(n, arg2, 0);
11966             unlock_user(v, arg3, arg4);
11967         }
11968         return ret;
11969     case TARGET_NR_fgetxattr:
11970         {
11971             void *n, *v = 0;
11972             if (arg3) {
11973                 v = lock_user(VERIFY_WRITE, arg3, arg4, 0);
11974                 if (!v) {
11975                     return -TARGET_EFAULT;
11976                 }
11977             }
11978             n = lock_user_string(arg2);
11979             if (n) {
11980                 ret = get_errno(fgetxattr(arg1, n, v, arg4));
11981             } else {
11982                 ret = -TARGET_EFAULT;
11983             }
11984             unlock_user(n, arg2, 0);
11985             unlock_user(v, arg3, arg4);
11986         }
11987         return ret;
11988     case TARGET_NR_removexattr:
11989     case TARGET_NR_lremovexattr:
11990         {
11991             void *p, *n;
11992             p = lock_user_string(arg1);
11993             n = lock_user_string(arg2);
11994             if (p && n) {
11995                 if (num == TARGET_NR_removexattr) {
11996                     ret = get_errno(removexattr(p, n));
11997                 } else {
11998                     ret = get_errno(lremovexattr(p, n));
11999                 }
12000             } else {
12001                 ret = -TARGET_EFAULT;
12002             }
12003             unlock_user(p, arg1, 0);
12004             unlock_user(n, arg2, 0);
12005         }
12006         return ret;
12007     case TARGET_NR_fremovexattr:
12008         {
12009             void *n;
12010             n = lock_user_string(arg2);
12011             if (n) {
12012                 ret = get_errno(fremovexattr(arg1, n));
12013             } else {
12014                 ret = -TARGET_EFAULT;
12015             }
12016             unlock_user(n, arg2, 0);
12017         }
12018         return ret;
12019 #endif
12020 #endif /* CONFIG_ATTR */
12021 #ifdef TARGET_NR_set_thread_area
12022     case TARGET_NR_set_thread_area:
12023 #if defined(TARGET_MIPS)
12024       ((CPUMIPSState *) cpu_env)->active_tc.CP0_UserLocal = arg1;
12025       return 0;
12026 #elif defined(TARGET_CRIS)
12027       if (arg1 & 0xff)
12028           ret = -TARGET_EINVAL;
12029       else {
12030           ((CPUCRISState *) cpu_env)->pregs[PR_PID] = arg1;
12031           ret = 0;
12032       }
12033       return ret;
12034 #elif defined(TARGET_I386) && defined(TARGET_ABI32)
12035       return do_set_thread_area(cpu_env, arg1);
12036 #elif defined(TARGET_M68K)
12037       {
12038           TaskState *ts = cpu->opaque;
12039           ts->tp_value = arg1;
12040           return 0;
12041       }
12042 #else
12043       return -TARGET_ENOSYS;
12044 #endif
12045 #endif
12046 #ifdef TARGET_NR_get_thread_area
12047     case TARGET_NR_get_thread_area:
12048 #if defined(TARGET_I386) && defined(TARGET_ABI32)
12049         return do_get_thread_area(cpu_env, arg1);
12050 #elif defined(TARGET_M68K)
12051         {
12052             TaskState *ts = cpu->opaque;
12053             return ts->tp_value;
12054         }
12055 #else
12056         return -TARGET_ENOSYS;
12057 #endif
12058 #endif
12059 #ifdef TARGET_NR_getdomainname
12060     case TARGET_NR_getdomainname:
12061         return -TARGET_ENOSYS;
12062 #endif
12063 
12064 #ifdef TARGET_NR_clock_settime
12065     case TARGET_NR_clock_settime:
12066     {
12067         struct timespec ts;
12068 
12069         ret = target_to_host_timespec(&ts, arg2);
12070         if (!is_error(ret)) {
12071             ret = get_errno(clock_settime(arg1, &ts));
12072         }
12073         return ret;
12074     }
12075 #endif
12076 #ifdef TARGET_NR_clock_settime64
12077     case TARGET_NR_clock_settime64:
12078     {
12079         struct timespec ts;
12080 
12081         ret = target_to_host_timespec64(&ts, arg2);
12082         if (!is_error(ret)) {
12083             ret = get_errno(clock_settime(arg1, &ts));
12084         }
12085         return ret;
12086     }
12087 #endif
12088 #ifdef TARGET_NR_clock_gettime
12089     case TARGET_NR_clock_gettime:
12090     {
12091         struct timespec ts;
12092         ret = get_errno(clock_gettime(arg1, &ts));
12093         if (!is_error(ret)) {
12094             ret = host_to_target_timespec(arg2, &ts);
12095         }
12096         return ret;
12097     }
12098 #endif
12099 #ifdef TARGET_NR_clock_gettime64
12100     case TARGET_NR_clock_gettime64:
12101     {
12102         struct timespec ts;
12103         ret = get_errno(clock_gettime(arg1, &ts));
12104         if (!is_error(ret)) {
12105             ret = host_to_target_timespec64(arg2, &ts);
12106         }
12107         return ret;
12108     }
12109 #endif
12110 #ifdef TARGET_NR_clock_getres
12111     case TARGET_NR_clock_getres:
12112     {
12113         struct timespec ts;
12114         ret = get_errno(clock_getres(arg1, &ts));
12115         if (!is_error(ret)) {
12116             host_to_target_timespec(arg2, &ts);
12117         }
12118         return ret;
12119     }
12120 #endif
12121 #ifdef TARGET_NR_clock_getres_time64
12122     case TARGET_NR_clock_getres_time64:
12123     {
12124         struct timespec ts;
12125         ret = get_errno(clock_getres(arg1, &ts));
12126         if (!is_error(ret)) {
12127             host_to_target_timespec64(arg2, &ts);
12128         }
12129         return ret;
12130     }
12131 #endif
12132 #ifdef TARGET_NR_clock_nanosleep
12133     case TARGET_NR_clock_nanosleep:
12134     {
12135         struct timespec ts;
12136         if (target_to_host_timespec(&ts, arg3)) {
12137             return -TARGET_EFAULT;
12138         }
12139         ret = get_errno(safe_clock_nanosleep(arg1, arg2,
12140                                              &ts, arg4 ? &ts : NULL));
12141         /*
12142          * if the call is interrupted by a signal handler, it fails
12143          * with error -TARGET_EINTR and if arg4 is not NULL and arg2 is not
12144          * TIMER_ABSTIME, it returns the remaining unslept time in arg4.
12145          */
12146         if (ret == -TARGET_EINTR && arg4 && arg2 != TIMER_ABSTIME &&
12147             host_to_target_timespec(arg4, &ts)) {
12148               return -TARGET_EFAULT;
12149         }
12150 
12151         return ret;
12152     }
12153 #endif
12154 #ifdef TARGET_NR_clock_nanosleep_time64
12155     case TARGET_NR_clock_nanosleep_time64:
12156     {
12157         struct timespec ts;
12158 
12159         if (target_to_host_timespec64(&ts, arg3)) {
12160             return -TARGET_EFAULT;
12161         }
12162 
12163         ret = get_errno(safe_clock_nanosleep(arg1, arg2,
12164                                              &ts, arg4 ? &ts : NULL));
12165 
12166         if (ret == -TARGET_EINTR && arg4 && arg2 != TIMER_ABSTIME &&
12167             host_to_target_timespec64(arg4, &ts)) {
12168             return -TARGET_EFAULT;
12169         }
12170         return ret;
12171     }
12172 #endif
12173 
12174 #if defined(TARGET_NR_set_tid_address) && defined(__NR_set_tid_address)
12175     case TARGET_NR_set_tid_address:
12176         return get_errno(set_tid_address((int *)g2h(arg1)));
12177 #endif
12178 
12179     case TARGET_NR_tkill:
12180         return get_errno(safe_tkill((int)arg1, target_to_host_signal(arg2)));
12181 
12182     case TARGET_NR_tgkill:
12183         return get_errno(safe_tgkill((int)arg1, (int)arg2,
12184                          target_to_host_signal(arg3)));
12185 
12186 #ifdef TARGET_NR_set_robust_list
12187     case TARGET_NR_set_robust_list:
12188     case TARGET_NR_get_robust_list:
12189         /* The ABI for supporting robust futexes has userspace pass
12190          * the kernel a pointer to a linked list which is updated by
12191          * userspace after the syscall; the list is walked by the kernel
12192          * when the thread exits. Since the linked list in QEMU guest
12193          * memory isn't a valid linked list for the host and we have
12194          * no way to reliably intercept the thread-death event, we can't
12195          * support these. Silently return ENOSYS so that guest userspace
12196          * falls back to a non-robust futex implementation (which should
12197          * be OK except in the corner case of the guest crashing while
12198          * holding a mutex that is shared with another process via
12199          * shared memory).
12200          */
12201         return -TARGET_ENOSYS;
12202 #endif
12203 
12204 #if defined(TARGET_NR_utimensat)
12205     case TARGET_NR_utimensat:
12206         {
12207             struct timespec *tsp, ts[2];
12208             if (!arg3) {
12209                 tsp = NULL;
12210             } else {
12211                 if (target_to_host_timespec(ts, arg3)) {
12212                     return -TARGET_EFAULT;
12213                 }
12214                 if (target_to_host_timespec(ts + 1, arg3 +
12215                                             sizeof(struct target_timespec))) {
12216                     return -TARGET_EFAULT;
12217                 }
12218                 tsp = ts;
12219             }
12220             if (!arg2)
12221                 ret = get_errno(sys_utimensat(arg1, NULL, tsp, arg4));
12222             else {
12223                 if (!(p = lock_user_string(arg2))) {
12224                     return -TARGET_EFAULT;
12225                 }
12226                 ret = get_errno(sys_utimensat(arg1, path(p), tsp, arg4));
12227                 unlock_user(p, arg2, 0);
12228             }
12229         }
12230         return ret;
12231 #endif
12232 #ifdef TARGET_NR_utimensat_time64
12233     case TARGET_NR_utimensat_time64:
12234         {
12235             struct timespec *tsp, ts[2];
12236             if (!arg3) {
12237                 tsp = NULL;
12238             } else {
12239                 if (target_to_host_timespec64(ts, arg3)) {
12240                     return -TARGET_EFAULT;
12241                 }
12242                 if (target_to_host_timespec64(ts + 1, arg3 +
12243                                      sizeof(struct target__kernel_timespec))) {
12244                     return -TARGET_EFAULT;
12245                 }
12246                 tsp = ts;
12247             }
12248             if (!arg2)
12249                 ret = get_errno(sys_utimensat(arg1, NULL, tsp, arg4));
12250             else {
12251                 p = lock_user_string(arg2);
12252                 if (!p) {
12253                     return -TARGET_EFAULT;
12254                 }
12255                 ret = get_errno(sys_utimensat(arg1, path(p), tsp, arg4));
12256                 unlock_user(p, arg2, 0);
12257             }
12258         }
12259         return ret;
12260 #endif
12261 #ifdef TARGET_NR_futex
12262     case TARGET_NR_futex:
12263         return do_futex(arg1, arg2, arg3, arg4, arg5, arg6);
12264 #endif
12265 #ifdef TARGET_NR_futex_time64
12266     case TARGET_NR_futex_time64:
12267         return do_futex_time64(arg1, arg2, arg3, arg4, arg5, arg6);
12268 #endif
12269 #if defined(TARGET_NR_inotify_init) && defined(__NR_inotify_init)
12270     case TARGET_NR_inotify_init:
12271         ret = get_errno(sys_inotify_init());
12272         if (ret >= 0) {
12273             fd_trans_register(ret, &target_inotify_trans);
12274         }
12275         return ret;
12276 #endif
12277 #ifdef CONFIG_INOTIFY1
12278 #if defined(TARGET_NR_inotify_init1) && defined(__NR_inotify_init1)
12279     case TARGET_NR_inotify_init1:
12280         ret = get_errno(sys_inotify_init1(target_to_host_bitmask(arg1,
12281                                           fcntl_flags_tbl)));
12282         if (ret >= 0) {
12283             fd_trans_register(ret, &target_inotify_trans);
12284         }
12285         return ret;
12286 #endif
12287 #endif
12288 #if defined(TARGET_NR_inotify_add_watch) && defined(__NR_inotify_add_watch)
12289     case TARGET_NR_inotify_add_watch:
12290         p = lock_user_string(arg2);
12291         ret = get_errno(sys_inotify_add_watch(arg1, path(p), arg3));
12292         unlock_user(p, arg2, 0);
12293         return ret;
12294 #endif
12295 #if defined(TARGET_NR_inotify_rm_watch) && defined(__NR_inotify_rm_watch)
12296     case TARGET_NR_inotify_rm_watch:
12297         return get_errno(sys_inotify_rm_watch(arg1, arg2));
12298 #endif
12299 
12300 #if defined(TARGET_NR_mq_open) && defined(__NR_mq_open)
12301     case TARGET_NR_mq_open:
12302         {
12303             struct mq_attr posix_mq_attr;
12304             struct mq_attr *pposix_mq_attr;
12305             int host_flags;
12306 
12307             host_flags = target_to_host_bitmask(arg2, fcntl_flags_tbl);
12308             pposix_mq_attr = NULL;
12309             if (arg4) {
12310                 if (copy_from_user_mq_attr(&posix_mq_attr, arg4) != 0) {
12311                     return -TARGET_EFAULT;
12312                 }
12313                 pposix_mq_attr = &posix_mq_attr;
12314             }
12315             p = lock_user_string(arg1 - 1);
12316             if (!p) {
12317                 return -TARGET_EFAULT;
12318             }
12319             ret = get_errno(mq_open(p, host_flags, arg3, pposix_mq_attr));
12320             unlock_user (p, arg1, 0);
12321         }
12322         return ret;
12323 
12324     case TARGET_NR_mq_unlink:
12325         p = lock_user_string(arg1 - 1);
12326         if (!p) {
12327             return -TARGET_EFAULT;
12328         }
12329         ret = get_errno(mq_unlink(p));
12330         unlock_user (p, arg1, 0);
12331         return ret;
12332 
12333 #ifdef TARGET_NR_mq_timedsend
12334     case TARGET_NR_mq_timedsend:
12335         {
12336             struct timespec ts;
12337 
12338             p = lock_user (VERIFY_READ, arg2, arg3, 1);
12339             if (arg5 != 0) {
12340                 if (target_to_host_timespec(&ts, arg5)) {
12341                     return -TARGET_EFAULT;
12342                 }
12343                 ret = get_errno(safe_mq_timedsend(arg1, p, arg3, arg4, &ts));
12344                 if (!is_error(ret) && host_to_target_timespec(arg5, &ts)) {
12345                     return -TARGET_EFAULT;
12346                 }
12347             } else {
12348                 ret = get_errno(safe_mq_timedsend(arg1, p, arg3, arg4, NULL));
12349             }
12350             unlock_user (p, arg2, arg3);
12351         }
12352         return ret;
12353 #endif
12354 #ifdef TARGET_NR_mq_timedsend_time64
12355     case TARGET_NR_mq_timedsend_time64:
12356         {
12357             struct timespec ts;
12358 
12359             p = lock_user(VERIFY_READ, arg2, arg3, 1);
12360             if (arg5 != 0) {
12361                 if (target_to_host_timespec64(&ts, arg5)) {
12362                     return -TARGET_EFAULT;
12363                 }
12364                 ret = get_errno(safe_mq_timedsend(arg1, p, arg3, arg4, &ts));
12365                 if (!is_error(ret) && host_to_target_timespec64(arg5, &ts)) {
12366                     return -TARGET_EFAULT;
12367                 }
12368             } else {
12369                 ret = get_errno(safe_mq_timedsend(arg1, p, arg3, arg4, NULL));
12370             }
12371             unlock_user(p, arg2, arg3);
12372         }
12373         return ret;
12374 #endif
12375 
12376 #ifdef TARGET_NR_mq_timedreceive
12377     case TARGET_NR_mq_timedreceive:
12378         {
12379             struct timespec ts;
12380             unsigned int prio;
12381 
12382             p = lock_user (VERIFY_READ, arg2, arg3, 1);
12383             if (arg5 != 0) {
12384                 if (target_to_host_timespec(&ts, arg5)) {
12385                     return -TARGET_EFAULT;
12386                 }
12387                 ret = get_errno(safe_mq_timedreceive(arg1, p, arg3,
12388                                                      &prio, &ts));
12389                 if (!is_error(ret) && host_to_target_timespec(arg5, &ts)) {
12390                     return -TARGET_EFAULT;
12391                 }
12392             } else {
12393                 ret = get_errno(safe_mq_timedreceive(arg1, p, arg3,
12394                                                      &prio, NULL));
12395             }
12396             unlock_user (p, arg2, arg3);
12397             if (arg4 != 0)
12398                 put_user_u32(prio, arg4);
12399         }
12400         return ret;
12401 #endif
12402 #ifdef TARGET_NR_mq_timedreceive_time64
12403     case TARGET_NR_mq_timedreceive_time64:
12404         {
12405             struct timespec ts;
12406             unsigned int prio;
12407 
12408             p = lock_user(VERIFY_READ, arg2, arg3, 1);
12409             if (arg5 != 0) {
12410                 if (target_to_host_timespec64(&ts, arg5)) {
12411                     return -TARGET_EFAULT;
12412                 }
12413                 ret = get_errno(safe_mq_timedreceive(arg1, p, arg3,
12414                                                      &prio, &ts));
12415                 if (!is_error(ret) && host_to_target_timespec64(arg5, &ts)) {
12416                     return -TARGET_EFAULT;
12417                 }
12418             } else {
12419                 ret = get_errno(safe_mq_timedreceive(arg1, p, arg3,
12420                                                      &prio, NULL));
12421             }
12422             unlock_user(p, arg2, arg3);
12423             if (arg4 != 0) {
12424                 put_user_u32(prio, arg4);
12425             }
12426         }
12427         return ret;
12428 #endif
12429 
12430     /* Not implemented for now... */
12431 /*     case TARGET_NR_mq_notify: */
12432 /*         break; */
12433 
12434     case TARGET_NR_mq_getsetattr:
12435         {
12436             struct mq_attr posix_mq_attr_in, posix_mq_attr_out;
12437             ret = 0;
12438             if (arg2 != 0) {
12439                 copy_from_user_mq_attr(&posix_mq_attr_in, arg2);
12440                 ret = get_errno(mq_setattr(arg1, &posix_mq_attr_in,
12441                                            &posix_mq_attr_out));
12442             } else if (arg3 != 0) {
12443                 ret = get_errno(mq_getattr(arg1, &posix_mq_attr_out));
12444             }
12445             if (ret == 0 && arg3 != 0) {
12446                 copy_to_user_mq_attr(arg3, &posix_mq_attr_out);
12447             }
12448         }
12449         return ret;
12450 #endif
12451 
12452 #ifdef CONFIG_SPLICE
12453 #ifdef TARGET_NR_tee
12454     case TARGET_NR_tee:
12455         {
12456             ret = get_errno(tee(arg1,arg2,arg3,arg4));
12457         }
12458         return ret;
12459 #endif
12460 #ifdef TARGET_NR_splice
12461     case TARGET_NR_splice:
12462         {
12463             loff_t loff_in, loff_out;
12464             loff_t *ploff_in = NULL, *ploff_out = NULL;
12465             if (arg2) {
12466                 if (get_user_u64(loff_in, arg2)) {
12467                     return -TARGET_EFAULT;
12468                 }
12469                 ploff_in = &loff_in;
12470             }
12471             if (arg4) {
12472                 if (get_user_u64(loff_out, arg4)) {
12473                     return -TARGET_EFAULT;
12474                 }
12475                 ploff_out = &loff_out;
12476             }
12477             ret = get_errno(splice(arg1, ploff_in, arg3, ploff_out, arg5, arg6));
12478             if (arg2) {
12479                 if (put_user_u64(loff_in, arg2)) {
12480                     return -TARGET_EFAULT;
12481                 }
12482             }
12483             if (arg4) {
12484                 if (put_user_u64(loff_out, arg4)) {
12485                     return -TARGET_EFAULT;
12486                 }
12487             }
12488         }
12489         return ret;
12490 #endif
12491 #ifdef TARGET_NR_vmsplice
12492 	case TARGET_NR_vmsplice:
12493         {
12494             struct iovec *vec = lock_iovec(VERIFY_READ, arg2, arg3, 1);
12495             if (vec != NULL) {
12496                 ret = get_errno(vmsplice(arg1, vec, arg3, arg4));
12497                 unlock_iovec(vec, arg2, arg3, 0);
12498             } else {
12499                 ret = -host_to_target_errno(errno);
12500             }
12501         }
12502         return ret;
12503 #endif
12504 #endif /* CONFIG_SPLICE */
12505 #ifdef CONFIG_EVENTFD
12506 #if defined(TARGET_NR_eventfd)
12507     case TARGET_NR_eventfd:
12508         ret = get_errno(eventfd(arg1, 0));
12509         if (ret >= 0) {
12510             fd_trans_register(ret, &target_eventfd_trans);
12511         }
12512         return ret;
12513 #endif
12514 #if defined(TARGET_NR_eventfd2)
12515     case TARGET_NR_eventfd2:
12516     {
12517         int host_flags = arg2 & (~(TARGET_O_NONBLOCK | TARGET_O_CLOEXEC));
12518         if (arg2 & TARGET_O_NONBLOCK) {
12519             host_flags |= O_NONBLOCK;
12520         }
12521         if (arg2 & TARGET_O_CLOEXEC) {
12522             host_flags |= O_CLOEXEC;
12523         }
12524         ret = get_errno(eventfd(arg1, host_flags));
12525         if (ret >= 0) {
12526             fd_trans_register(ret, &target_eventfd_trans);
12527         }
12528         return ret;
12529     }
12530 #endif
12531 #endif /* CONFIG_EVENTFD  */
12532 #if defined(CONFIG_FALLOCATE) && defined(TARGET_NR_fallocate)
12533     case TARGET_NR_fallocate:
12534 #if TARGET_ABI_BITS == 32
12535         ret = get_errno(fallocate(arg1, arg2, target_offset64(arg3, arg4),
12536                                   target_offset64(arg5, arg6)));
12537 #else
12538         ret = get_errno(fallocate(arg1, arg2, arg3, arg4));
12539 #endif
12540         return ret;
12541 #endif
12542 #if defined(CONFIG_SYNC_FILE_RANGE)
12543 #if defined(TARGET_NR_sync_file_range)
12544     case TARGET_NR_sync_file_range:
12545 #if TARGET_ABI_BITS == 32
12546 #if defined(TARGET_MIPS)
12547         ret = get_errno(sync_file_range(arg1, target_offset64(arg3, arg4),
12548                                         target_offset64(arg5, arg6), arg7));
12549 #else
12550         ret = get_errno(sync_file_range(arg1, target_offset64(arg2, arg3),
12551                                         target_offset64(arg4, arg5), arg6));
12552 #endif /* !TARGET_MIPS */
12553 #else
12554         ret = get_errno(sync_file_range(arg1, arg2, arg3, arg4));
12555 #endif
12556         return ret;
12557 #endif
12558 #if defined(TARGET_NR_sync_file_range2) || \
12559     defined(TARGET_NR_arm_sync_file_range)
12560 #if defined(TARGET_NR_sync_file_range2)
12561     case TARGET_NR_sync_file_range2:
12562 #endif
12563 #if defined(TARGET_NR_arm_sync_file_range)
12564     case TARGET_NR_arm_sync_file_range:
12565 #endif
12566         /* This is like sync_file_range but the arguments are reordered */
12567 #if TARGET_ABI_BITS == 32
12568         ret = get_errno(sync_file_range(arg1, target_offset64(arg3, arg4),
12569                                         target_offset64(arg5, arg6), arg2));
12570 #else
12571         ret = get_errno(sync_file_range(arg1, arg3, arg4, arg2));
12572 #endif
12573         return ret;
12574 #endif
12575 #endif
12576 #if defined(TARGET_NR_signalfd4)
12577     case TARGET_NR_signalfd4:
12578         return do_signalfd4(arg1, arg2, arg4);
12579 #endif
12580 #if defined(TARGET_NR_signalfd)
12581     case TARGET_NR_signalfd:
12582         return do_signalfd4(arg1, arg2, 0);
12583 #endif
12584 #if defined(CONFIG_EPOLL)
12585 #if defined(TARGET_NR_epoll_create)
12586     case TARGET_NR_epoll_create:
12587         return get_errno(epoll_create(arg1));
12588 #endif
12589 #if defined(TARGET_NR_epoll_create1) && defined(CONFIG_EPOLL_CREATE1)
12590     case TARGET_NR_epoll_create1:
12591         return get_errno(epoll_create1(target_to_host_bitmask(arg1, fcntl_flags_tbl)));
12592 #endif
12593 #if defined(TARGET_NR_epoll_ctl)
12594     case TARGET_NR_epoll_ctl:
12595     {
12596         struct epoll_event ep;
12597         struct epoll_event *epp = 0;
12598         if (arg4) {
12599             if (arg2 != EPOLL_CTL_DEL) {
12600                 struct target_epoll_event *target_ep;
12601                 if (!lock_user_struct(VERIFY_READ, target_ep, arg4, 1)) {
12602                     return -TARGET_EFAULT;
12603                 }
12604                 ep.events = tswap32(target_ep->events);
12605                 /*
12606                  * The epoll_data_t union is just opaque data to the kernel,
12607                  * so we transfer all 64 bits across and need not worry what
12608                  * actual data type it is.
12609                  */
12610                 ep.data.u64 = tswap64(target_ep->data.u64);
12611                 unlock_user_struct(target_ep, arg4, 0);
12612             }
12613             /*
12614              * before kernel 2.6.9, EPOLL_CTL_DEL operation required a
12615              * non-null pointer, even though this argument is ignored.
12616              *
12617              */
12618             epp = &ep;
12619         }
12620         return get_errno(epoll_ctl(arg1, arg2, arg3, epp));
12621     }
12622 #endif
12623 
12624 #if defined(TARGET_NR_epoll_wait) || defined(TARGET_NR_epoll_pwait)
12625 #if defined(TARGET_NR_epoll_wait)
12626     case TARGET_NR_epoll_wait:
12627 #endif
12628 #if defined(TARGET_NR_epoll_pwait)
12629     case TARGET_NR_epoll_pwait:
12630 #endif
12631     {
12632         struct target_epoll_event *target_ep;
12633         struct epoll_event *ep;
12634         int epfd = arg1;
12635         int maxevents = arg3;
12636         int timeout = arg4;
12637 
12638         if (maxevents <= 0 || maxevents > TARGET_EP_MAX_EVENTS) {
12639             return -TARGET_EINVAL;
12640         }
12641 
12642         target_ep = lock_user(VERIFY_WRITE, arg2,
12643                               maxevents * sizeof(struct target_epoll_event), 1);
12644         if (!target_ep) {
12645             return -TARGET_EFAULT;
12646         }
12647 
12648         ep = g_try_new(struct epoll_event, maxevents);
12649         if (!ep) {
12650             unlock_user(target_ep, arg2, 0);
12651             return -TARGET_ENOMEM;
12652         }
12653 
12654         switch (num) {
12655 #if defined(TARGET_NR_epoll_pwait)
12656         case TARGET_NR_epoll_pwait:
12657         {
12658             target_sigset_t *target_set;
12659             sigset_t _set, *set = &_set;
12660 
12661             if (arg5) {
12662                 if (arg6 != sizeof(target_sigset_t)) {
12663                     ret = -TARGET_EINVAL;
12664                     break;
12665                 }
12666 
12667                 target_set = lock_user(VERIFY_READ, arg5,
12668                                        sizeof(target_sigset_t), 1);
12669                 if (!target_set) {
12670                     ret = -TARGET_EFAULT;
12671                     break;
12672                 }
12673                 target_to_host_sigset(set, target_set);
12674                 unlock_user(target_set, arg5, 0);
12675             } else {
12676                 set = NULL;
12677             }
12678 
12679             ret = get_errno(safe_epoll_pwait(epfd, ep, maxevents, timeout,
12680                                              set, SIGSET_T_SIZE));
12681             break;
12682         }
12683 #endif
12684 #if defined(TARGET_NR_epoll_wait)
12685         case TARGET_NR_epoll_wait:
12686             ret = get_errno(safe_epoll_pwait(epfd, ep, maxevents, timeout,
12687                                              NULL, 0));
12688             break;
12689 #endif
12690         default:
12691             ret = -TARGET_ENOSYS;
12692         }
12693         if (!is_error(ret)) {
12694             int i;
12695             for (i = 0; i < ret; i++) {
12696                 target_ep[i].events = tswap32(ep[i].events);
12697                 target_ep[i].data.u64 = tswap64(ep[i].data.u64);
12698             }
12699             unlock_user(target_ep, arg2,
12700                         ret * sizeof(struct target_epoll_event));
12701         } else {
12702             unlock_user(target_ep, arg2, 0);
12703         }
12704         g_free(ep);
12705         return ret;
12706     }
12707 #endif
12708 #endif
12709 #ifdef TARGET_NR_prlimit64
12710     case TARGET_NR_prlimit64:
12711     {
12712         /* args: pid, resource number, ptr to new rlimit, ptr to old rlimit */
12713         struct target_rlimit64 *target_rnew, *target_rold;
12714         struct host_rlimit64 rnew, rold, *rnewp = 0;
12715         int resource = target_to_host_resource(arg2);
12716 
12717         if (arg3 && (resource != RLIMIT_AS &&
12718                      resource != RLIMIT_DATA &&
12719                      resource != RLIMIT_STACK)) {
12720             if (!lock_user_struct(VERIFY_READ, target_rnew, arg3, 1)) {
12721                 return -TARGET_EFAULT;
12722             }
12723             rnew.rlim_cur = tswap64(target_rnew->rlim_cur);
12724             rnew.rlim_max = tswap64(target_rnew->rlim_max);
12725             unlock_user_struct(target_rnew, arg3, 0);
12726             rnewp = &rnew;
12727         }
12728 
12729         ret = get_errno(sys_prlimit64(arg1, resource, rnewp, arg4 ? &rold : 0));
12730         if (!is_error(ret) && arg4) {
12731             if (!lock_user_struct(VERIFY_WRITE, target_rold, arg4, 1)) {
12732                 return -TARGET_EFAULT;
12733             }
12734             target_rold->rlim_cur = tswap64(rold.rlim_cur);
12735             target_rold->rlim_max = tswap64(rold.rlim_max);
12736             unlock_user_struct(target_rold, arg4, 1);
12737         }
12738         return ret;
12739     }
12740 #endif
12741 #ifdef TARGET_NR_gethostname
12742     case TARGET_NR_gethostname:
12743     {
12744         char *name = lock_user(VERIFY_WRITE, arg1, arg2, 0);
12745         if (name) {
12746             ret = get_errno(gethostname(name, arg2));
12747             unlock_user(name, arg1, arg2);
12748         } else {
12749             ret = -TARGET_EFAULT;
12750         }
12751         return ret;
12752     }
12753 #endif
12754 #ifdef TARGET_NR_atomic_cmpxchg_32
12755     case TARGET_NR_atomic_cmpxchg_32:
12756     {
12757         /* should use start_exclusive from main.c */
12758         abi_ulong mem_value;
12759         if (get_user_u32(mem_value, arg6)) {
12760             target_siginfo_t info;
12761             info.si_signo = SIGSEGV;
12762             info.si_errno = 0;
12763             info.si_code = TARGET_SEGV_MAPERR;
12764             info._sifields._sigfault._addr = arg6;
12765             queue_signal((CPUArchState *)cpu_env, info.si_signo,
12766                          QEMU_SI_FAULT, &info);
12767             ret = 0xdeadbeef;
12768 
12769         }
12770         if (mem_value == arg2)
12771             put_user_u32(arg1, arg6);
12772         return mem_value;
12773     }
12774 #endif
12775 #ifdef TARGET_NR_atomic_barrier
12776     case TARGET_NR_atomic_barrier:
12777         /* Like the kernel implementation and the
12778            qemu arm barrier, no-op this? */
12779         return 0;
12780 #endif
12781 
12782 #ifdef TARGET_NR_timer_create
12783     case TARGET_NR_timer_create:
12784     {
12785         /* args: clockid_t clockid, struct sigevent *sevp, timer_t *timerid */
12786 
12787         struct sigevent host_sevp = { {0}, }, *phost_sevp = NULL;
12788 
12789         int clkid = arg1;
12790         int timer_index = next_free_host_timer();
12791 
12792         if (timer_index < 0) {
12793             ret = -TARGET_EAGAIN;
12794         } else {
12795             timer_t *phtimer = g_posix_timers  + timer_index;
12796 
12797             if (arg2) {
12798                 phost_sevp = &host_sevp;
12799                 ret = target_to_host_sigevent(phost_sevp, arg2);
12800                 if (ret != 0) {
12801                     return ret;
12802                 }
12803             }
12804 
12805             ret = get_errno(timer_create(clkid, phost_sevp, phtimer));
12806             if (ret) {
12807                 phtimer = NULL;
12808             } else {
12809                 if (put_user(TIMER_MAGIC | timer_index, arg3, target_timer_t)) {
12810                     return -TARGET_EFAULT;
12811                 }
12812             }
12813         }
12814         return ret;
12815     }
12816 #endif
12817 
12818 #ifdef TARGET_NR_timer_settime
12819     case TARGET_NR_timer_settime:
12820     {
12821         /* args: timer_t timerid, int flags, const struct itimerspec *new_value,
12822          * struct itimerspec * old_value */
12823         target_timer_t timerid = get_timer_id(arg1);
12824 
12825         if (timerid < 0) {
12826             ret = timerid;
12827         } else if (arg3 == 0) {
12828             ret = -TARGET_EINVAL;
12829         } else {
12830             timer_t htimer = g_posix_timers[timerid];
12831             struct itimerspec hspec_new = {{0},}, hspec_old = {{0},};
12832 
12833             if (target_to_host_itimerspec(&hspec_new, arg3)) {
12834                 return -TARGET_EFAULT;
12835             }
12836             ret = get_errno(
12837                           timer_settime(htimer, arg2, &hspec_new, &hspec_old));
12838             if (arg4 && host_to_target_itimerspec(arg4, &hspec_old)) {
12839                 return -TARGET_EFAULT;
12840             }
12841         }
12842         return ret;
12843     }
12844 #endif
12845 
12846 #ifdef TARGET_NR_timer_settime64
12847     case TARGET_NR_timer_settime64:
12848     {
12849         target_timer_t timerid = get_timer_id(arg1);
12850 
12851         if (timerid < 0) {
12852             ret = timerid;
12853         } else if (arg3 == 0) {
12854             ret = -TARGET_EINVAL;
12855         } else {
12856             timer_t htimer = g_posix_timers[timerid];
12857             struct itimerspec hspec_new = {{0},}, hspec_old = {{0},};
12858 
12859             if (target_to_host_itimerspec64(&hspec_new, arg3)) {
12860                 return -TARGET_EFAULT;
12861             }
12862             ret = get_errno(
12863                           timer_settime(htimer, arg2, &hspec_new, &hspec_old));
12864             if (arg4 && host_to_target_itimerspec64(arg4, &hspec_old)) {
12865                 return -TARGET_EFAULT;
12866             }
12867         }
12868         return ret;
12869     }
12870 #endif
12871 
12872 #ifdef TARGET_NR_timer_gettime
12873     case TARGET_NR_timer_gettime:
12874     {
12875         /* args: timer_t timerid, struct itimerspec *curr_value */
12876         target_timer_t timerid = get_timer_id(arg1);
12877 
12878         if (timerid < 0) {
12879             ret = timerid;
12880         } else if (!arg2) {
12881             ret = -TARGET_EFAULT;
12882         } else {
12883             timer_t htimer = g_posix_timers[timerid];
12884             struct itimerspec hspec;
12885             ret = get_errno(timer_gettime(htimer, &hspec));
12886 
12887             if (host_to_target_itimerspec(arg2, &hspec)) {
12888                 ret = -TARGET_EFAULT;
12889             }
12890         }
12891         return ret;
12892     }
12893 #endif
12894 
12895 #ifdef TARGET_NR_timer_gettime64
12896     case TARGET_NR_timer_gettime64:
12897     {
12898         /* args: timer_t timerid, struct itimerspec64 *curr_value */
12899         target_timer_t timerid = get_timer_id(arg1);
12900 
12901         if (timerid < 0) {
12902             ret = timerid;
12903         } else if (!arg2) {
12904             ret = -TARGET_EFAULT;
12905         } else {
12906             timer_t htimer = g_posix_timers[timerid];
12907             struct itimerspec hspec;
12908             ret = get_errno(timer_gettime(htimer, &hspec));
12909 
12910             if (host_to_target_itimerspec64(arg2, &hspec)) {
12911                 ret = -TARGET_EFAULT;
12912             }
12913         }
12914         return ret;
12915     }
12916 #endif
12917 
12918 #ifdef TARGET_NR_timer_getoverrun
12919     case TARGET_NR_timer_getoverrun:
12920     {
12921         /* args: timer_t timerid */
12922         target_timer_t timerid = get_timer_id(arg1);
12923 
12924         if (timerid < 0) {
12925             ret = timerid;
12926         } else {
12927             timer_t htimer = g_posix_timers[timerid];
12928             ret = get_errno(timer_getoverrun(htimer));
12929         }
12930         return ret;
12931     }
12932 #endif
12933 
12934 #ifdef TARGET_NR_timer_delete
12935     case TARGET_NR_timer_delete:
12936     {
12937         /* args: timer_t timerid */
12938         target_timer_t timerid = get_timer_id(arg1);
12939 
12940         if (timerid < 0) {
12941             ret = timerid;
12942         } else {
12943             timer_t htimer = g_posix_timers[timerid];
12944             ret = get_errno(timer_delete(htimer));
12945             g_posix_timers[timerid] = 0;
12946         }
12947         return ret;
12948     }
12949 #endif
12950 
12951 #if defined(TARGET_NR_timerfd_create) && defined(CONFIG_TIMERFD)
12952     case TARGET_NR_timerfd_create:
12953         return get_errno(timerfd_create(arg1,
12954                           target_to_host_bitmask(arg2, fcntl_flags_tbl)));
12955 #endif
12956 
12957 #if defined(TARGET_NR_timerfd_gettime) && defined(CONFIG_TIMERFD)
12958     case TARGET_NR_timerfd_gettime:
12959         {
12960             struct itimerspec its_curr;
12961 
12962             ret = get_errno(timerfd_gettime(arg1, &its_curr));
12963 
12964             if (arg2 && host_to_target_itimerspec(arg2, &its_curr)) {
12965                 return -TARGET_EFAULT;
12966             }
12967         }
12968         return ret;
12969 #endif
12970 
12971 #if defined(TARGET_NR_timerfd_gettime64) && defined(CONFIG_TIMERFD)
12972     case TARGET_NR_timerfd_gettime64:
12973         {
12974             struct itimerspec its_curr;
12975 
12976             ret = get_errno(timerfd_gettime(arg1, &its_curr));
12977 
12978             if (arg2 && host_to_target_itimerspec64(arg2, &its_curr)) {
12979                 return -TARGET_EFAULT;
12980             }
12981         }
12982         return ret;
12983 #endif
12984 
12985 #if defined(TARGET_NR_timerfd_settime) && defined(CONFIG_TIMERFD)
12986     case TARGET_NR_timerfd_settime:
12987         {
12988             struct itimerspec its_new, its_old, *p_new;
12989 
12990             if (arg3) {
12991                 if (target_to_host_itimerspec(&its_new, arg3)) {
12992                     return -TARGET_EFAULT;
12993                 }
12994                 p_new = &its_new;
12995             } else {
12996                 p_new = NULL;
12997             }
12998 
12999             ret = get_errno(timerfd_settime(arg1, arg2, p_new, &its_old));
13000 
13001             if (arg4 && host_to_target_itimerspec(arg4, &its_old)) {
13002                 return -TARGET_EFAULT;
13003             }
13004         }
13005         return ret;
13006 #endif
13007 
13008 #if defined(TARGET_NR_timerfd_settime64) && defined(CONFIG_TIMERFD)
13009     case TARGET_NR_timerfd_settime64:
13010         {
13011             struct itimerspec its_new, its_old, *p_new;
13012 
13013             if (arg3) {
13014                 if (target_to_host_itimerspec64(&its_new, arg3)) {
13015                     return -TARGET_EFAULT;
13016                 }
13017                 p_new = &its_new;
13018             } else {
13019                 p_new = NULL;
13020             }
13021 
13022             ret = get_errno(timerfd_settime(arg1, arg2, p_new, &its_old));
13023 
13024             if (arg4 && host_to_target_itimerspec64(arg4, &its_old)) {
13025                 return -TARGET_EFAULT;
13026             }
13027         }
13028         return ret;
13029 #endif
13030 
13031 #if defined(TARGET_NR_ioprio_get) && defined(__NR_ioprio_get)
13032     case TARGET_NR_ioprio_get:
13033         return get_errno(ioprio_get(arg1, arg2));
13034 #endif
13035 
13036 #if defined(TARGET_NR_ioprio_set) && defined(__NR_ioprio_set)
13037     case TARGET_NR_ioprio_set:
13038         return get_errno(ioprio_set(arg1, arg2, arg3));
13039 #endif
13040 
13041 #if defined(TARGET_NR_setns) && defined(CONFIG_SETNS)
13042     case TARGET_NR_setns:
13043         return get_errno(setns(arg1, arg2));
13044 #endif
13045 #if defined(TARGET_NR_unshare) && defined(CONFIG_SETNS)
13046     case TARGET_NR_unshare:
13047         return get_errno(unshare(arg1));
13048 #endif
13049 #if defined(TARGET_NR_kcmp) && defined(__NR_kcmp)
13050     case TARGET_NR_kcmp:
13051         return get_errno(kcmp(arg1, arg2, arg3, arg4, arg5));
13052 #endif
13053 #ifdef TARGET_NR_swapcontext
13054     case TARGET_NR_swapcontext:
13055         /* PowerPC specific.  */
13056         return do_swapcontext(cpu_env, arg1, arg2, arg3);
13057 #endif
13058 #ifdef TARGET_NR_memfd_create
13059     case TARGET_NR_memfd_create:
13060         p = lock_user_string(arg1);
13061         if (!p) {
13062             return -TARGET_EFAULT;
13063         }
13064         ret = get_errno(memfd_create(p, arg2));
13065         fd_trans_unregister(ret);
13066         unlock_user(p, arg1, 0);
13067         return ret;
13068 #endif
13069 #if defined TARGET_NR_membarrier && defined __NR_membarrier
13070     case TARGET_NR_membarrier:
13071         return get_errno(membarrier(arg1, arg2));
13072 #endif
13073 
13074 #if defined(TARGET_NR_copy_file_range) && defined(__NR_copy_file_range)
13075     case TARGET_NR_copy_file_range:
13076         {
13077             loff_t inoff, outoff;
13078             loff_t *pinoff = NULL, *poutoff = NULL;
13079 
13080             if (arg2) {
13081                 if (get_user_u64(inoff, arg2)) {
13082                     return -TARGET_EFAULT;
13083                 }
13084                 pinoff = &inoff;
13085             }
13086             if (arg4) {
13087                 if (get_user_u64(outoff, arg4)) {
13088                     return -TARGET_EFAULT;
13089                 }
13090                 poutoff = &outoff;
13091             }
13092             ret = get_errno(safe_copy_file_range(arg1, pinoff, arg3, poutoff,
13093                                                  arg5, arg6));
13094             if (!is_error(ret) && ret > 0) {
13095                 if (arg2) {
13096                     if (put_user_u64(inoff, arg2)) {
13097                         return -TARGET_EFAULT;
13098                     }
13099                 }
13100                 if (arg4) {
13101                     if (put_user_u64(outoff, arg4)) {
13102                         return -TARGET_EFAULT;
13103                     }
13104                 }
13105             }
13106         }
13107         return ret;
13108 #endif
13109 
13110     default:
13111         qemu_log_mask(LOG_UNIMP, "Unsupported syscall: %d\n", num);
13112         return -TARGET_ENOSYS;
13113     }
13114     return ret;
13115 }
13116 
13117 abi_long do_syscall(void *cpu_env, int num, abi_long arg1,
13118                     abi_long arg2, abi_long arg3, abi_long arg4,
13119                     abi_long arg5, abi_long arg6, abi_long arg7,
13120                     abi_long arg8)
13121 {
13122     CPUState *cpu = env_cpu(cpu_env);
13123     abi_long ret;
13124 
13125 #ifdef DEBUG_ERESTARTSYS
13126     /* Debug-only code for exercising the syscall-restart code paths
13127      * in the per-architecture cpu main loops: restart every syscall
13128      * the guest makes once before letting it through.
13129      */
13130     {
13131         static bool flag;
13132         flag = !flag;
13133         if (flag) {
13134             return -TARGET_ERESTARTSYS;
13135         }
13136     }
13137 #endif
13138 
13139     record_syscall_start(cpu, num, arg1,
13140                          arg2, arg3, arg4, arg5, arg6, arg7, arg8);
13141 
13142     if (unlikely(qemu_loglevel_mask(LOG_STRACE))) {
13143         print_syscall(cpu_env, num, arg1, arg2, arg3, arg4, arg5, arg6);
13144     }
13145 
13146     ret = do_syscall1(cpu_env, num, arg1, arg2, arg3, arg4,
13147                       arg5, arg6, arg7, arg8);
13148 
13149     if (unlikely(qemu_loglevel_mask(LOG_STRACE))) {
13150         print_syscall_ret(cpu_env, num, ret, arg1, arg2,
13151                           arg3, arg4, arg5, arg6);
13152     }
13153 
13154     record_syscall_return(cpu, num, ret);
13155     return ret;
13156 }
13157