xref: /openbmc/qemu/linux-user/syscall.c (revision 3e8f1628)
1 /*
2  *  Linux syscalls
3  *
4  *  Copyright (c) 2003 Fabrice Bellard
5  *
6  *  This program is free software; you can redistribute it and/or modify
7  *  it under the terms of the GNU General Public License as published by
8  *  the Free Software Foundation; either version 2 of the License, or
9  *  (at your option) any later version.
10  *
11  *  This program is distributed in the hope that it will be useful,
12  *  but WITHOUT ANY WARRANTY; without even the implied warranty of
13  *  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
14  *  GNU General Public License for more details.
15  *
16  *  You should have received a copy of the GNU General Public License
17  *  along with this program; if not, see <http://www.gnu.org/licenses/>.
18  */
19 #define _ATFILE_SOURCE
20 #include "qemu/osdep.h"
21 #include "qemu/cutils.h"
22 #include "qemu/path.h"
23 #include "qemu/memfd.h"
24 #include "qemu/queue.h"
25 #include <elf.h>
26 #include <endian.h>
27 #include <grp.h>
28 #include <sys/ipc.h>
29 #include <sys/msg.h>
30 #include <sys/wait.h>
31 #include <sys/mount.h>
32 #include <sys/file.h>
33 #include <sys/fsuid.h>
34 #include <sys/personality.h>
35 #include <sys/prctl.h>
36 #include <sys/resource.h>
37 #include <sys/swap.h>
38 #include <linux/capability.h>
39 #include <sched.h>
40 #include <sys/timex.h>
41 #include <sys/socket.h>
42 #include <linux/sockios.h>
43 #include <sys/un.h>
44 #include <sys/uio.h>
45 #include <poll.h>
46 #include <sys/times.h>
47 #include <sys/shm.h>
48 #include <sys/sem.h>
49 #include <sys/statfs.h>
50 #include <utime.h>
51 #include <sys/sysinfo.h>
52 #include <sys/signalfd.h>
53 //#include <sys/user.h>
54 #include <netinet/in.h>
55 #include <netinet/ip.h>
56 #include <netinet/tcp.h>
57 #include <netinet/udp.h>
58 #include <linux/wireless.h>
59 #include <linux/icmp.h>
60 #include <linux/icmpv6.h>
61 #include <linux/if_tun.h>
62 #include <linux/in6.h>
63 #include <linux/errqueue.h>
64 #include <linux/random.h>
65 #ifdef CONFIG_TIMERFD
66 #include <sys/timerfd.h>
67 #endif
68 #ifdef CONFIG_EVENTFD
69 #include <sys/eventfd.h>
70 #endif
71 #ifdef CONFIG_EPOLL
72 #include <sys/epoll.h>
73 #endif
74 #ifdef CONFIG_ATTR
75 #include "qemu/xattr.h"
76 #endif
77 #ifdef CONFIG_SENDFILE
78 #include <sys/sendfile.h>
79 #endif
80 #ifdef HAVE_SYS_KCOV_H
81 #include <sys/kcov.h>
82 #endif
83 
84 #define termios host_termios
85 #define winsize host_winsize
86 #define termio host_termio
87 #define sgttyb host_sgttyb /* same as target */
88 #define tchars host_tchars /* same as target */
89 #define ltchars host_ltchars /* same as target */
90 
91 #include <linux/termios.h>
92 #include <linux/unistd.h>
93 #include <linux/cdrom.h>
94 #include <linux/hdreg.h>
95 #include <linux/soundcard.h>
96 #include <linux/kd.h>
97 #include <linux/mtio.h>
98 #include <linux/fs.h>
99 #include <linux/fd.h>
100 #if defined(CONFIG_FIEMAP)
101 #include <linux/fiemap.h>
102 #endif
103 #include <linux/fb.h>
104 #if defined(CONFIG_USBFS)
105 #include <linux/usbdevice_fs.h>
106 #include <linux/usb/ch9.h>
107 #endif
108 #include <linux/vt.h>
109 #include <linux/dm-ioctl.h>
110 #include <linux/reboot.h>
111 #include <linux/route.h>
112 #include <linux/filter.h>
113 #include <linux/blkpg.h>
114 #include <netpacket/packet.h>
115 #include <linux/netlink.h>
116 #include <linux/if_alg.h>
117 #include <linux/rtc.h>
118 #include <sound/asound.h>
119 #ifdef HAVE_BTRFS_H
120 #include <linux/btrfs.h>
121 #endif
122 #ifdef HAVE_DRM_H
123 #include <libdrm/drm.h>
124 #include <libdrm/i915_drm.h>
125 #endif
126 #include "linux_loop.h"
127 #include "uname.h"
128 
129 #include "qemu.h"
130 #include "qemu/guest-random.h"
131 #include "qemu/selfmap.h"
132 #include "user/syscall-trace.h"
133 #include "qapi/error.h"
134 #include "fd-trans.h"
135 #include "tcg/tcg.h"
136 
137 #ifndef CLONE_IO
138 #define CLONE_IO                0x80000000      /* Clone io context */
139 #endif
140 
141 /* We can't directly call the host clone syscall, because this will
142  * badly confuse libc (breaking mutexes, for example). So we must
143  * divide clone flags into:
144  *  * flag combinations that look like pthread_create()
145  *  * flag combinations that look like fork()
146  *  * flags we can implement within QEMU itself
147  *  * flags we can't support and will return an error for
148  */
149 /* For thread creation, all these flags must be present; for
150  * fork, none must be present.
151  */
152 #define CLONE_THREAD_FLAGS                              \
153     (CLONE_VM | CLONE_FS | CLONE_FILES |                \
154      CLONE_SIGHAND | CLONE_THREAD | CLONE_SYSVSEM)
155 
156 /* These flags are ignored:
157  * CLONE_DETACHED is now ignored by the kernel;
158  * CLONE_IO is just an optimisation hint to the I/O scheduler
159  */
160 #define CLONE_IGNORED_FLAGS                     \
161     (CLONE_DETACHED | CLONE_IO)
162 
163 /* Flags for fork which we can implement within QEMU itself */
164 #define CLONE_OPTIONAL_FORK_FLAGS               \
165     (CLONE_SETTLS | CLONE_PARENT_SETTID |       \
166      CLONE_CHILD_CLEARTID | CLONE_CHILD_SETTID)
167 
168 /* Flags for thread creation which we can implement within QEMU itself */
169 #define CLONE_OPTIONAL_THREAD_FLAGS                             \
170     (CLONE_SETTLS | CLONE_PARENT_SETTID |                       \
171      CLONE_CHILD_CLEARTID | CLONE_CHILD_SETTID | CLONE_PARENT)
172 
173 #define CLONE_INVALID_FORK_FLAGS                                        \
174     (~(CSIGNAL | CLONE_OPTIONAL_FORK_FLAGS | CLONE_IGNORED_FLAGS))
175 
176 #define CLONE_INVALID_THREAD_FLAGS                                      \
177     (~(CSIGNAL | CLONE_THREAD_FLAGS | CLONE_OPTIONAL_THREAD_FLAGS |     \
178        CLONE_IGNORED_FLAGS))
179 
180 /* CLONE_VFORK is special cased early in do_fork(). The other flag bits
181  * have almost all been allocated. We cannot support any of
182  * CLONE_NEWNS, CLONE_NEWCGROUP, CLONE_NEWUTS, CLONE_NEWIPC,
183  * CLONE_NEWUSER, CLONE_NEWPID, CLONE_NEWNET, CLONE_PTRACE, CLONE_UNTRACED.
184  * The checks against the invalid thread masks above will catch these.
185  * (The one remaining unallocated bit is 0x1000 which used to be CLONE_PID.)
186  */
187 
188 /* Define DEBUG_ERESTARTSYS to force every syscall to be restarted
189  * once. This exercises the codepaths for restart.
190  */
191 //#define DEBUG_ERESTARTSYS
192 
193 //#include <linux/msdos_fs.h>
194 #define	VFAT_IOCTL_READDIR_BOTH		_IOR('r', 1, struct linux_dirent [2])
195 #define	VFAT_IOCTL_READDIR_SHORT	_IOR('r', 2, struct linux_dirent [2])
196 
197 #undef _syscall0
198 #undef _syscall1
199 #undef _syscall2
200 #undef _syscall3
201 #undef _syscall4
202 #undef _syscall5
203 #undef _syscall6
204 
205 #define _syscall0(type,name)		\
206 static type name (void)			\
207 {					\
208 	return syscall(__NR_##name);	\
209 }
210 
211 #define _syscall1(type,name,type1,arg1)		\
212 static type name (type1 arg1)			\
213 {						\
214 	return syscall(__NR_##name, arg1);	\
215 }
216 
217 #define _syscall2(type,name,type1,arg1,type2,arg2)	\
218 static type name (type1 arg1,type2 arg2)		\
219 {							\
220 	return syscall(__NR_##name, arg1, arg2);	\
221 }
222 
223 #define _syscall3(type,name,type1,arg1,type2,arg2,type3,arg3)	\
224 static type name (type1 arg1,type2 arg2,type3 arg3)		\
225 {								\
226 	return syscall(__NR_##name, arg1, arg2, arg3);		\
227 }
228 
229 #define _syscall4(type,name,type1,arg1,type2,arg2,type3,arg3,type4,arg4)	\
230 static type name (type1 arg1,type2 arg2,type3 arg3,type4 arg4)			\
231 {										\
232 	return syscall(__NR_##name, arg1, arg2, arg3, arg4);			\
233 }
234 
235 #define _syscall5(type,name,type1,arg1,type2,arg2,type3,arg3,type4,arg4,	\
236 		  type5,arg5)							\
237 static type name (type1 arg1,type2 arg2,type3 arg3,type4 arg4,type5 arg5)	\
238 {										\
239 	return syscall(__NR_##name, arg1, arg2, arg3, arg4, arg5);		\
240 }
241 
242 
243 #define _syscall6(type,name,type1,arg1,type2,arg2,type3,arg3,type4,arg4,	\
244 		  type5,arg5,type6,arg6)					\
245 static type name (type1 arg1,type2 arg2,type3 arg3,type4 arg4,type5 arg5,	\
246                   type6 arg6)							\
247 {										\
248 	return syscall(__NR_##name, arg1, arg2, arg3, arg4, arg5, arg6);	\
249 }
250 
251 
252 #define __NR_sys_uname __NR_uname
253 #define __NR_sys_getcwd1 __NR_getcwd
254 #define __NR_sys_getdents __NR_getdents
255 #define __NR_sys_getdents64 __NR_getdents64
256 #define __NR_sys_getpriority __NR_getpriority
257 #define __NR_sys_rt_sigqueueinfo __NR_rt_sigqueueinfo
258 #define __NR_sys_rt_tgsigqueueinfo __NR_rt_tgsigqueueinfo
259 #define __NR_sys_syslog __NR_syslog
260 #if defined(__NR_futex)
261 # define __NR_sys_futex __NR_futex
262 #endif
263 #if defined(__NR_futex_time64)
264 # define __NR_sys_futex_time64 __NR_futex_time64
265 #endif
266 #define __NR_sys_inotify_init __NR_inotify_init
267 #define __NR_sys_inotify_add_watch __NR_inotify_add_watch
268 #define __NR_sys_inotify_rm_watch __NR_inotify_rm_watch
269 #define __NR_sys_statx __NR_statx
270 
271 #if defined(__alpha__) || defined(__x86_64__) || defined(__s390x__)
272 #define __NR__llseek __NR_lseek
273 #endif
274 
275 /* Newer kernel ports have llseek() instead of _llseek() */
276 #if defined(TARGET_NR_llseek) && !defined(TARGET_NR__llseek)
277 #define TARGET_NR__llseek TARGET_NR_llseek
278 #endif
279 
280 /* some platforms need to mask more bits than just TARGET_O_NONBLOCK */
281 #ifndef TARGET_O_NONBLOCK_MASK
282 #define TARGET_O_NONBLOCK_MASK TARGET_O_NONBLOCK
283 #endif
284 
285 #define __NR_sys_gettid __NR_gettid
286 _syscall0(int, sys_gettid)
287 
288 /* For the 64-bit guest on 32-bit host case we must emulate
289  * getdents using getdents64, because otherwise the host
290  * might hand us back more dirent records than we can fit
291  * into the guest buffer after structure format conversion.
292  * Otherwise we emulate getdents with getdents if the host has it.
293  */
294 #if defined(__NR_getdents) && HOST_LONG_BITS >= TARGET_ABI_BITS
295 #define EMULATE_GETDENTS_WITH_GETDENTS
296 #endif
297 
298 #if defined(TARGET_NR_getdents) && defined(EMULATE_GETDENTS_WITH_GETDENTS)
299 _syscall3(int, sys_getdents, uint, fd, struct linux_dirent *, dirp, uint, count);
300 #endif
301 #if (defined(TARGET_NR_getdents) && \
302       !defined(EMULATE_GETDENTS_WITH_GETDENTS)) || \
303     (defined(TARGET_NR_getdents64) && defined(__NR_getdents64))
304 _syscall3(int, sys_getdents64, uint, fd, struct linux_dirent64 *, dirp, uint, count);
305 #endif
306 #if defined(TARGET_NR__llseek) && defined(__NR_llseek)
307 _syscall5(int, _llseek,  uint,  fd, ulong, hi, ulong, lo,
308           loff_t *, res, uint, wh);
309 #endif
310 _syscall3(int, sys_rt_sigqueueinfo, pid_t, pid, int, sig, siginfo_t *, uinfo)
311 _syscall4(int, sys_rt_tgsigqueueinfo, pid_t, pid, pid_t, tid, int, sig,
312           siginfo_t *, uinfo)
313 _syscall3(int,sys_syslog,int,type,char*,bufp,int,len)
314 #ifdef __NR_exit_group
315 _syscall1(int,exit_group,int,error_code)
316 #endif
317 #if defined(TARGET_NR_set_tid_address) && defined(__NR_set_tid_address)
318 _syscall1(int,set_tid_address,int *,tidptr)
319 #endif
320 #if defined(__NR_futex)
321 _syscall6(int,sys_futex,int *,uaddr,int,op,int,val,
322           const struct timespec *,timeout,int *,uaddr2,int,val3)
323 #endif
324 #if defined(__NR_futex_time64)
325 _syscall6(int,sys_futex_time64,int *,uaddr,int,op,int,val,
326           const struct timespec *,timeout,int *,uaddr2,int,val3)
327 #endif
328 #define __NR_sys_sched_getaffinity __NR_sched_getaffinity
329 _syscall3(int, sys_sched_getaffinity, pid_t, pid, unsigned int, len,
330           unsigned long *, user_mask_ptr);
331 #define __NR_sys_sched_setaffinity __NR_sched_setaffinity
332 _syscall3(int, sys_sched_setaffinity, pid_t, pid, unsigned int, len,
333           unsigned long *, user_mask_ptr);
334 #define __NR_sys_getcpu __NR_getcpu
335 _syscall3(int, sys_getcpu, unsigned *, cpu, unsigned *, node, void *, tcache);
336 _syscall4(int, reboot, int, magic1, int, magic2, unsigned int, cmd,
337           void *, arg);
338 _syscall2(int, capget, struct __user_cap_header_struct *, header,
339           struct __user_cap_data_struct *, data);
340 _syscall2(int, capset, struct __user_cap_header_struct *, header,
341           struct __user_cap_data_struct *, data);
342 #if defined(TARGET_NR_ioprio_get) && defined(__NR_ioprio_get)
343 _syscall2(int, ioprio_get, int, which, int, who)
344 #endif
345 #if defined(TARGET_NR_ioprio_set) && defined(__NR_ioprio_set)
346 _syscall3(int, ioprio_set, int, which, int, who, int, ioprio)
347 #endif
348 #if defined(TARGET_NR_getrandom) && defined(__NR_getrandom)
349 _syscall3(int, getrandom, void *, buf, size_t, buflen, unsigned int, flags)
350 #endif
351 
352 #if defined(TARGET_NR_kcmp) && defined(__NR_kcmp)
353 _syscall5(int, kcmp, pid_t, pid1, pid_t, pid2, int, type,
354           unsigned long, idx1, unsigned long, idx2)
355 #endif
356 
357 /*
358  * It is assumed that struct statx is architecture independent.
359  */
360 #if defined(TARGET_NR_statx) && defined(__NR_statx)
361 _syscall5(int, sys_statx, int, dirfd, const char *, pathname, int, flags,
362           unsigned int, mask, struct target_statx *, statxbuf)
363 #endif
364 #if defined(TARGET_NR_membarrier) && defined(__NR_membarrier)
365 _syscall2(int, membarrier, int, cmd, int, flags)
366 #endif
367 
368 static bitmask_transtbl fcntl_flags_tbl[] = {
369   { TARGET_O_ACCMODE,   TARGET_O_WRONLY,    O_ACCMODE,   O_WRONLY,    },
370   { TARGET_O_ACCMODE,   TARGET_O_RDWR,      O_ACCMODE,   O_RDWR,      },
371   { TARGET_O_CREAT,     TARGET_O_CREAT,     O_CREAT,     O_CREAT,     },
372   { TARGET_O_EXCL,      TARGET_O_EXCL,      O_EXCL,      O_EXCL,      },
373   { TARGET_O_NOCTTY,    TARGET_O_NOCTTY,    O_NOCTTY,    O_NOCTTY,    },
374   { TARGET_O_TRUNC,     TARGET_O_TRUNC,     O_TRUNC,     O_TRUNC,     },
375   { TARGET_O_APPEND,    TARGET_O_APPEND,    O_APPEND,    O_APPEND,    },
376   { TARGET_O_NONBLOCK,  TARGET_O_NONBLOCK,  O_NONBLOCK,  O_NONBLOCK,  },
377   { TARGET_O_SYNC,      TARGET_O_DSYNC,     O_SYNC,      O_DSYNC,     },
378   { TARGET_O_SYNC,      TARGET_O_SYNC,      O_SYNC,      O_SYNC,      },
379   { TARGET_FASYNC,      TARGET_FASYNC,      FASYNC,      FASYNC,      },
380   { TARGET_O_DIRECTORY, TARGET_O_DIRECTORY, O_DIRECTORY, O_DIRECTORY, },
381   { TARGET_O_NOFOLLOW,  TARGET_O_NOFOLLOW,  O_NOFOLLOW,  O_NOFOLLOW,  },
382 #if defined(O_DIRECT)
383   { TARGET_O_DIRECT,    TARGET_O_DIRECT,    O_DIRECT,    O_DIRECT,    },
384 #endif
385 #if defined(O_NOATIME)
386   { TARGET_O_NOATIME,   TARGET_O_NOATIME,   O_NOATIME,   O_NOATIME    },
387 #endif
388 #if defined(O_CLOEXEC)
389   { TARGET_O_CLOEXEC,   TARGET_O_CLOEXEC,   O_CLOEXEC,   O_CLOEXEC    },
390 #endif
391 #if defined(O_PATH)
392   { TARGET_O_PATH,      TARGET_O_PATH,      O_PATH,      O_PATH       },
393 #endif
394 #if defined(O_TMPFILE)
395   { TARGET_O_TMPFILE,   TARGET_O_TMPFILE,   O_TMPFILE,   O_TMPFILE    },
396 #endif
397   /* Don't terminate the list prematurely on 64-bit host+guest.  */
398 #if TARGET_O_LARGEFILE != 0 || O_LARGEFILE != 0
399   { TARGET_O_LARGEFILE, TARGET_O_LARGEFILE, O_LARGEFILE, O_LARGEFILE, },
400 #endif
401   { 0, 0, 0, 0 }
402 };
403 
404 _syscall2(int, sys_getcwd1, char *, buf, size_t, size)
405 
406 #if defined(TARGET_NR_utimensat) || defined(TARGET_NR_utimensat_time64)
407 #if defined(__NR_utimensat)
408 #define __NR_sys_utimensat __NR_utimensat
409 _syscall4(int,sys_utimensat,int,dirfd,const char *,pathname,
410           const struct timespec *,tsp,int,flags)
411 #else
412 static int sys_utimensat(int dirfd, const char *pathname,
413                          const struct timespec times[2], int flags)
414 {
415     errno = ENOSYS;
416     return -1;
417 }
418 #endif
419 #endif /* TARGET_NR_utimensat */
420 
421 #ifdef TARGET_NR_renameat2
422 #if defined(__NR_renameat2)
423 #define __NR_sys_renameat2 __NR_renameat2
424 _syscall5(int, sys_renameat2, int, oldfd, const char *, old, int, newfd,
425           const char *, new, unsigned int, flags)
426 #else
427 static int sys_renameat2(int oldfd, const char *old,
428                          int newfd, const char *new, int flags)
429 {
430     if (flags == 0) {
431         return renameat(oldfd, old, newfd, new);
432     }
433     errno = ENOSYS;
434     return -1;
435 }
436 #endif
437 #endif /* TARGET_NR_renameat2 */
438 
439 #ifdef CONFIG_INOTIFY
440 #include <sys/inotify.h>
441 
442 #if defined(TARGET_NR_inotify_init) && defined(__NR_inotify_init)
443 static int sys_inotify_init(void)
444 {
445   return (inotify_init());
446 }
447 #endif
448 #if defined(TARGET_NR_inotify_add_watch) && defined(__NR_inotify_add_watch)
449 static int sys_inotify_add_watch(int fd,const char *pathname, int32_t mask)
450 {
451   return (inotify_add_watch(fd, pathname, mask));
452 }
453 #endif
454 #if defined(TARGET_NR_inotify_rm_watch) && defined(__NR_inotify_rm_watch)
455 static int sys_inotify_rm_watch(int fd, int32_t wd)
456 {
457   return (inotify_rm_watch(fd, wd));
458 }
459 #endif
460 #ifdef CONFIG_INOTIFY1
461 #if defined(TARGET_NR_inotify_init1) && defined(__NR_inotify_init1)
462 static int sys_inotify_init1(int flags)
463 {
464   return (inotify_init1(flags));
465 }
466 #endif
467 #endif
468 #else
469 /* Userspace can usually survive runtime without inotify */
470 #undef TARGET_NR_inotify_init
471 #undef TARGET_NR_inotify_init1
472 #undef TARGET_NR_inotify_add_watch
473 #undef TARGET_NR_inotify_rm_watch
474 #endif /* CONFIG_INOTIFY  */
475 
476 #if defined(TARGET_NR_prlimit64)
477 #ifndef __NR_prlimit64
478 # define __NR_prlimit64 -1
479 #endif
480 #define __NR_sys_prlimit64 __NR_prlimit64
481 /* The glibc rlimit structure may not be that used by the underlying syscall */
482 struct host_rlimit64 {
483     uint64_t rlim_cur;
484     uint64_t rlim_max;
485 };
486 _syscall4(int, sys_prlimit64, pid_t, pid, int, resource,
487           const struct host_rlimit64 *, new_limit,
488           struct host_rlimit64 *, old_limit)
489 #endif
490 
491 
492 #if defined(TARGET_NR_timer_create)
493 /* Maximum of 32 active POSIX timers allowed at any one time. */
494 static timer_t g_posix_timers[32] = { 0, } ;
495 
496 static inline int next_free_host_timer(void)
497 {
498     int k ;
499     /* FIXME: Does finding the next free slot require a lock? */
500     for (k = 0; k < ARRAY_SIZE(g_posix_timers); k++) {
501         if (g_posix_timers[k] == 0) {
502             g_posix_timers[k] = (timer_t) 1;
503             return k;
504         }
505     }
506     return -1;
507 }
508 #endif
509 
510 #define ERRNO_TABLE_SIZE 1200
511 
512 /* target_to_host_errno_table[] is initialized from
513  * host_to_target_errno_table[] in syscall_init(). */
514 static uint16_t target_to_host_errno_table[ERRNO_TABLE_SIZE] = {
515 };
516 
517 /*
518  * This list is the union of errno values overridden in asm-<arch>/errno.h
519  * minus the errnos that are not actually generic to all archs.
520  */
521 static uint16_t host_to_target_errno_table[ERRNO_TABLE_SIZE] = {
522     [EAGAIN]		= TARGET_EAGAIN,
523     [EIDRM]		= TARGET_EIDRM,
524     [ECHRNG]		= TARGET_ECHRNG,
525     [EL2NSYNC]		= TARGET_EL2NSYNC,
526     [EL3HLT]		= TARGET_EL3HLT,
527     [EL3RST]		= TARGET_EL3RST,
528     [ELNRNG]		= TARGET_ELNRNG,
529     [EUNATCH]		= TARGET_EUNATCH,
530     [ENOCSI]		= TARGET_ENOCSI,
531     [EL2HLT]		= TARGET_EL2HLT,
532     [EDEADLK]		= TARGET_EDEADLK,
533     [ENOLCK]		= TARGET_ENOLCK,
534     [EBADE]		= TARGET_EBADE,
535     [EBADR]		= TARGET_EBADR,
536     [EXFULL]		= TARGET_EXFULL,
537     [ENOANO]		= TARGET_ENOANO,
538     [EBADRQC]		= TARGET_EBADRQC,
539     [EBADSLT]		= TARGET_EBADSLT,
540     [EBFONT]		= TARGET_EBFONT,
541     [ENOSTR]		= TARGET_ENOSTR,
542     [ENODATA]		= TARGET_ENODATA,
543     [ETIME]		= TARGET_ETIME,
544     [ENOSR]		= TARGET_ENOSR,
545     [ENONET]		= TARGET_ENONET,
546     [ENOPKG]		= TARGET_ENOPKG,
547     [EREMOTE]		= TARGET_EREMOTE,
548     [ENOLINK]		= TARGET_ENOLINK,
549     [EADV]		= TARGET_EADV,
550     [ESRMNT]		= TARGET_ESRMNT,
551     [ECOMM]		= TARGET_ECOMM,
552     [EPROTO]		= TARGET_EPROTO,
553     [EDOTDOT]		= TARGET_EDOTDOT,
554     [EMULTIHOP]		= TARGET_EMULTIHOP,
555     [EBADMSG]		= TARGET_EBADMSG,
556     [ENAMETOOLONG]	= TARGET_ENAMETOOLONG,
557     [EOVERFLOW]		= TARGET_EOVERFLOW,
558     [ENOTUNIQ]		= TARGET_ENOTUNIQ,
559     [EBADFD]		= TARGET_EBADFD,
560     [EREMCHG]		= TARGET_EREMCHG,
561     [ELIBACC]		= TARGET_ELIBACC,
562     [ELIBBAD]		= TARGET_ELIBBAD,
563     [ELIBSCN]		= TARGET_ELIBSCN,
564     [ELIBMAX]		= TARGET_ELIBMAX,
565     [ELIBEXEC]		= TARGET_ELIBEXEC,
566     [EILSEQ]		= TARGET_EILSEQ,
567     [ENOSYS]		= TARGET_ENOSYS,
568     [ELOOP]		= TARGET_ELOOP,
569     [ERESTART]		= TARGET_ERESTART,
570     [ESTRPIPE]		= TARGET_ESTRPIPE,
571     [ENOTEMPTY]		= TARGET_ENOTEMPTY,
572     [EUSERS]		= TARGET_EUSERS,
573     [ENOTSOCK]		= TARGET_ENOTSOCK,
574     [EDESTADDRREQ]	= TARGET_EDESTADDRREQ,
575     [EMSGSIZE]		= TARGET_EMSGSIZE,
576     [EPROTOTYPE]	= TARGET_EPROTOTYPE,
577     [ENOPROTOOPT]	= TARGET_ENOPROTOOPT,
578     [EPROTONOSUPPORT]	= TARGET_EPROTONOSUPPORT,
579     [ESOCKTNOSUPPORT]	= TARGET_ESOCKTNOSUPPORT,
580     [EOPNOTSUPP]	= TARGET_EOPNOTSUPP,
581     [EPFNOSUPPORT]	= TARGET_EPFNOSUPPORT,
582     [EAFNOSUPPORT]	= TARGET_EAFNOSUPPORT,
583     [EADDRINUSE]	= TARGET_EADDRINUSE,
584     [EADDRNOTAVAIL]	= TARGET_EADDRNOTAVAIL,
585     [ENETDOWN]		= TARGET_ENETDOWN,
586     [ENETUNREACH]	= TARGET_ENETUNREACH,
587     [ENETRESET]		= TARGET_ENETRESET,
588     [ECONNABORTED]	= TARGET_ECONNABORTED,
589     [ECONNRESET]	= TARGET_ECONNRESET,
590     [ENOBUFS]		= TARGET_ENOBUFS,
591     [EISCONN]		= TARGET_EISCONN,
592     [ENOTCONN]		= TARGET_ENOTCONN,
593     [EUCLEAN]		= TARGET_EUCLEAN,
594     [ENOTNAM]		= TARGET_ENOTNAM,
595     [ENAVAIL]		= TARGET_ENAVAIL,
596     [EISNAM]		= TARGET_EISNAM,
597     [EREMOTEIO]		= TARGET_EREMOTEIO,
598     [EDQUOT]            = TARGET_EDQUOT,
599     [ESHUTDOWN]		= TARGET_ESHUTDOWN,
600     [ETOOMANYREFS]	= TARGET_ETOOMANYREFS,
601     [ETIMEDOUT]		= TARGET_ETIMEDOUT,
602     [ECONNREFUSED]	= TARGET_ECONNREFUSED,
603     [EHOSTDOWN]		= TARGET_EHOSTDOWN,
604     [EHOSTUNREACH]	= TARGET_EHOSTUNREACH,
605     [EALREADY]		= TARGET_EALREADY,
606     [EINPROGRESS]	= TARGET_EINPROGRESS,
607     [ESTALE]		= TARGET_ESTALE,
608     [ECANCELED]		= TARGET_ECANCELED,
609     [ENOMEDIUM]		= TARGET_ENOMEDIUM,
610     [EMEDIUMTYPE]	= TARGET_EMEDIUMTYPE,
611 #ifdef ENOKEY
612     [ENOKEY]		= TARGET_ENOKEY,
613 #endif
614 #ifdef EKEYEXPIRED
615     [EKEYEXPIRED]	= TARGET_EKEYEXPIRED,
616 #endif
617 #ifdef EKEYREVOKED
618     [EKEYREVOKED]	= TARGET_EKEYREVOKED,
619 #endif
620 #ifdef EKEYREJECTED
621     [EKEYREJECTED]	= TARGET_EKEYREJECTED,
622 #endif
623 #ifdef EOWNERDEAD
624     [EOWNERDEAD]	= TARGET_EOWNERDEAD,
625 #endif
626 #ifdef ENOTRECOVERABLE
627     [ENOTRECOVERABLE]	= TARGET_ENOTRECOVERABLE,
628 #endif
629 #ifdef ENOMSG
630     [ENOMSG]            = TARGET_ENOMSG,
631 #endif
632 #ifdef ERKFILL
633     [ERFKILL]           = TARGET_ERFKILL,
634 #endif
635 #ifdef EHWPOISON
636     [EHWPOISON]         = TARGET_EHWPOISON,
637 #endif
638 };
639 
640 static inline int host_to_target_errno(int err)
641 {
642     if (err >= 0 && err < ERRNO_TABLE_SIZE &&
643         host_to_target_errno_table[err]) {
644         return host_to_target_errno_table[err];
645     }
646     return err;
647 }
648 
649 static inline int target_to_host_errno(int err)
650 {
651     if (err >= 0 && err < ERRNO_TABLE_SIZE &&
652         target_to_host_errno_table[err]) {
653         return target_to_host_errno_table[err];
654     }
655     return err;
656 }
657 
658 static inline abi_long get_errno(abi_long ret)
659 {
660     if (ret == -1)
661         return -host_to_target_errno(errno);
662     else
663         return ret;
664 }
665 
666 const char *target_strerror(int err)
667 {
668     if (err == TARGET_ERESTARTSYS) {
669         return "To be restarted";
670     }
671     if (err == TARGET_QEMU_ESIGRETURN) {
672         return "Successful exit from sigreturn";
673     }
674 
675     if ((err >= ERRNO_TABLE_SIZE) || (err < 0)) {
676         return NULL;
677     }
678     return strerror(target_to_host_errno(err));
679 }
680 
681 #define safe_syscall0(type, name) \
682 static type safe_##name(void) \
683 { \
684     return safe_syscall(__NR_##name); \
685 }
686 
687 #define safe_syscall1(type, name, type1, arg1) \
688 static type safe_##name(type1 arg1) \
689 { \
690     return safe_syscall(__NR_##name, arg1); \
691 }
692 
693 #define safe_syscall2(type, name, type1, arg1, type2, arg2) \
694 static type safe_##name(type1 arg1, type2 arg2) \
695 { \
696     return safe_syscall(__NR_##name, arg1, arg2); \
697 }
698 
699 #define safe_syscall3(type, name, type1, arg1, type2, arg2, type3, arg3) \
700 static type safe_##name(type1 arg1, type2 arg2, type3 arg3) \
701 { \
702     return safe_syscall(__NR_##name, arg1, arg2, arg3); \
703 }
704 
705 #define safe_syscall4(type, name, type1, arg1, type2, arg2, type3, arg3, \
706     type4, arg4) \
707 static type safe_##name(type1 arg1, type2 arg2, type3 arg3, type4 arg4) \
708 { \
709     return safe_syscall(__NR_##name, arg1, arg2, arg3, arg4); \
710 }
711 
712 #define safe_syscall5(type, name, type1, arg1, type2, arg2, type3, arg3, \
713     type4, arg4, type5, arg5) \
714 static type safe_##name(type1 arg1, type2 arg2, type3 arg3, type4 arg4, \
715     type5 arg5) \
716 { \
717     return safe_syscall(__NR_##name, arg1, arg2, arg3, arg4, arg5); \
718 }
719 
720 #define safe_syscall6(type, name, type1, arg1, type2, arg2, type3, arg3, \
721     type4, arg4, type5, arg5, type6, arg6) \
722 static type safe_##name(type1 arg1, type2 arg2, type3 arg3, type4 arg4, \
723     type5 arg5, type6 arg6) \
724 { \
725     return safe_syscall(__NR_##name, arg1, arg2, arg3, arg4, arg5, arg6); \
726 }
727 
728 safe_syscall3(ssize_t, read, int, fd, void *, buff, size_t, count)
729 safe_syscall3(ssize_t, write, int, fd, const void *, buff, size_t, count)
730 safe_syscall4(int, openat, int, dirfd, const char *, pathname, \
731               int, flags, mode_t, mode)
732 #if defined(TARGET_NR_wait4) || defined(TARGET_NR_waitpid)
733 safe_syscall4(pid_t, wait4, pid_t, pid, int *, status, int, options, \
734               struct rusage *, rusage)
735 #endif
736 safe_syscall5(int, waitid, idtype_t, idtype, id_t, id, siginfo_t *, infop, \
737               int, options, struct rusage *, rusage)
738 safe_syscall3(int, execve, const char *, filename, char **, argv, char **, envp)
739 #if defined(TARGET_NR_select) || defined(TARGET_NR__newselect) || \
740     defined(TARGET_NR_pselect6) || defined(TARGET_NR_pselect6_time64)
741 safe_syscall6(int, pselect6, int, nfds, fd_set *, readfds, fd_set *, writefds, \
742               fd_set *, exceptfds, struct timespec *, timeout, void *, sig)
743 #endif
744 #if defined(TARGET_NR_ppoll) || defined(TARGET_NR_ppoll_time64)
745 safe_syscall5(int, ppoll, struct pollfd *, ufds, unsigned int, nfds,
746               struct timespec *, tsp, const sigset_t *, sigmask,
747               size_t, sigsetsize)
748 #endif
749 safe_syscall6(int, epoll_pwait, int, epfd, struct epoll_event *, events,
750               int, maxevents, int, timeout, const sigset_t *, sigmask,
751               size_t, sigsetsize)
752 #if defined(__NR_futex)
753 safe_syscall6(int,futex,int *,uaddr,int,op,int,val, \
754               const struct timespec *,timeout,int *,uaddr2,int,val3)
755 #endif
756 #if defined(__NR_futex_time64)
757 safe_syscall6(int,futex_time64,int *,uaddr,int,op,int,val, \
758               const struct timespec *,timeout,int *,uaddr2,int,val3)
759 #endif
760 safe_syscall2(int, rt_sigsuspend, sigset_t *, newset, size_t, sigsetsize)
761 safe_syscall2(int, kill, pid_t, pid, int, sig)
762 safe_syscall2(int, tkill, int, tid, int, sig)
763 safe_syscall3(int, tgkill, int, tgid, int, pid, int, sig)
764 safe_syscall3(ssize_t, readv, int, fd, const struct iovec *, iov, int, iovcnt)
765 safe_syscall3(ssize_t, writev, int, fd, const struct iovec *, iov, int, iovcnt)
766 safe_syscall5(ssize_t, preadv, int, fd, const struct iovec *, iov, int, iovcnt,
767               unsigned long, pos_l, unsigned long, pos_h)
768 safe_syscall5(ssize_t, pwritev, int, fd, const struct iovec *, iov, int, iovcnt,
769               unsigned long, pos_l, unsigned long, pos_h)
770 safe_syscall3(int, connect, int, fd, const struct sockaddr *, addr,
771               socklen_t, addrlen)
772 safe_syscall6(ssize_t, sendto, int, fd, const void *, buf, size_t, len,
773               int, flags, const struct sockaddr *, addr, socklen_t, addrlen)
774 safe_syscall6(ssize_t, recvfrom, int, fd, void *, buf, size_t, len,
775               int, flags, struct sockaddr *, addr, socklen_t *, addrlen)
776 safe_syscall3(ssize_t, sendmsg, int, fd, const struct msghdr *, msg, int, flags)
777 safe_syscall3(ssize_t, recvmsg, int, fd, struct msghdr *, msg, int, flags)
778 safe_syscall2(int, flock, int, fd, int, operation)
779 #if defined(TARGET_NR_rt_sigtimedwait) || defined(TARGET_NR_rt_sigtimedwait_time64)
780 safe_syscall4(int, rt_sigtimedwait, const sigset_t *, these, siginfo_t *, uinfo,
781               const struct timespec *, uts, size_t, sigsetsize)
782 #endif
783 safe_syscall4(int, accept4, int, fd, struct sockaddr *, addr, socklen_t *, len,
784               int, flags)
785 #if defined(TARGET_NR_nanosleep)
786 safe_syscall2(int, nanosleep, const struct timespec *, req,
787               struct timespec *, rem)
788 #endif
789 #if defined(TARGET_NR_clock_nanosleep) || \
790     defined(TARGET_NR_clock_nanosleep_time64)
791 safe_syscall4(int, clock_nanosleep, const clockid_t, clock, int, flags,
792               const struct timespec *, req, struct timespec *, rem)
793 #endif
794 #ifdef __NR_ipc
795 #ifdef __s390x__
796 safe_syscall5(int, ipc, int, call, long, first, long, second, long, third,
797               void *, ptr)
798 #else
799 safe_syscall6(int, ipc, int, call, long, first, long, second, long, third,
800               void *, ptr, long, fifth)
801 #endif
802 #endif
803 #ifdef __NR_msgsnd
804 safe_syscall4(int, msgsnd, int, msgid, const void *, msgp, size_t, sz,
805               int, flags)
806 #endif
807 #ifdef __NR_msgrcv
808 safe_syscall5(int, msgrcv, int, msgid, void *, msgp, size_t, sz,
809               long, msgtype, int, flags)
810 #endif
811 #ifdef __NR_semtimedop
812 safe_syscall4(int, semtimedop, int, semid, struct sembuf *, tsops,
813               unsigned, nsops, const struct timespec *, timeout)
814 #endif
815 #if defined(TARGET_NR_mq_timedsend) || \
816     defined(TARGET_NR_mq_timedsend_time64)
817 safe_syscall5(int, mq_timedsend, int, mqdes, const char *, msg_ptr,
818               size_t, len, unsigned, prio, const struct timespec *, timeout)
819 #endif
820 #if defined(TARGET_NR_mq_timedreceive) || \
821     defined(TARGET_NR_mq_timedreceive_time64)
822 safe_syscall5(int, mq_timedreceive, int, mqdes, char *, msg_ptr,
823               size_t, len, unsigned *, prio, const struct timespec *, timeout)
824 #endif
825 #if defined(TARGET_NR_copy_file_range) && defined(__NR_copy_file_range)
826 safe_syscall6(ssize_t, copy_file_range, int, infd, loff_t *, pinoff,
827               int, outfd, loff_t *, poutoff, size_t, length,
828               unsigned int, flags)
829 #endif
830 
831 /* We do ioctl like this rather than via safe_syscall3 to preserve the
832  * "third argument might be integer or pointer or not present" behaviour of
833  * the libc function.
834  */
835 #define safe_ioctl(...) safe_syscall(__NR_ioctl, __VA_ARGS__)
836 /* Similarly for fcntl. Note that callers must always:
837  *  pass the F_GETLK64 etc constants rather than the unsuffixed F_GETLK
838  *  use the flock64 struct rather than unsuffixed flock
839  * This will then work and use a 64-bit offset for both 32-bit and 64-bit hosts.
840  */
841 #ifdef __NR_fcntl64
842 #define safe_fcntl(...) safe_syscall(__NR_fcntl64, __VA_ARGS__)
843 #else
844 #define safe_fcntl(...) safe_syscall(__NR_fcntl, __VA_ARGS__)
845 #endif
846 
847 static inline int host_to_target_sock_type(int host_type)
848 {
849     int target_type;
850 
851     switch (host_type & 0xf /* SOCK_TYPE_MASK */) {
852     case SOCK_DGRAM:
853         target_type = TARGET_SOCK_DGRAM;
854         break;
855     case SOCK_STREAM:
856         target_type = TARGET_SOCK_STREAM;
857         break;
858     default:
859         target_type = host_type & 0xf /* SOCK_TYPE_MASK */;
860         break;
861     }
862 
863 #if defined(SOCK_CLOEXEC)
864     if (host_type & SOCK_CLOEXEC) {
865         target_type |= TARGET_SOCK_CLOEXEC;
866     }
867 #endif
868 
869 #if defined(SOCK_NONBLOCK)
870     if (host_type & SOCK_NONBLOCK) {
871         target_type |= TARGET_SOCK_NONBLOCK;
872     }
873 #endif
874 
875     return target_type;
876 }
877 
878 static abi_ulong target_brk;
879 static abi_ulong target_original_brk;
880 static abi_ulong brk_page;
881 
882 void target_set_brk(abi_ulong new_brk)
883 {
884     target_original_brk = target_brk = HOST_PAGE_ALIGN(new_brk);
885     brk_page = HOST_PAGE_ALIGN(target_brk);
886 }
887 
888 //#define DEBUGF_BRK(message, args...) do { fprintf(stderr, (message), ## args); } while (0)
889 #define DEBUGF_BRK(message, args...)
890 
891 /* do_brk() must return target values and target errnos. */
892 abi_long do_brk(abi_ulong new_brk)
893 {
894     abi_long mapped_addr;
895     abi_ulong new_alloc_size;
896 
897     DEBUGF_BRK("do_brk(" TARGET_ABI_FMT_lx ") -> ", new_brk);
898 
899     if (!new_brk) {
900         DEBUGF_BRK(TARGET_ABI_FMT_lx " (!new_brk)\n", target_brk);
901         return target_brk;
902     }
903     if (new_brk < target_original_brk) {
904         DEBUGF_BRK(TARGET_ABI_FMT_lx " (new_brk < target_original_brk)\n",
905                    target_brk);
906         return target_brk;
907     }
908 
909     /* If the new brk is less than the highest page reserved to the
910      * target heap allocation, set it and we're almost done...  */
911     if (new_brk <= brk_page) {
912         /* Heap contents are initialized to zero, as for anonymous
913          * mapped pages.  */
914         if (new_brk > target_brk) {
915             memset(g2h_untagged(target_brk), 0, new_brk - target_brk);
916         }
917 	target_brk = new_brk;
918         DEBUGF_BRK(TARGET_ABI_FMT_lx " (new_brk <= brk_page)\n", target_brk);
919 	return target_brk;
920     }
921 
922     /* We need to allocate more memory after the brk... Note that
923      * we don't use MAP_FIXED because that will map over the top of
924      * any existing mapping (like the one with the host libc or qemu
925      * itself); instead we treat "mapped but at wrong address" as
926      * a failure and unmap again.
927      */
928     new_alloc_size = HOST_PAGE_ALIGN(new_brk - brk_page);
929     mapped_addr = get_errno(target_mmap(brk_page, new_alloc_size,
930                                         PROT_READ|PROT_WRITE,
931                                         MAP_ANON|MAP_PRIVATE, 0, 0));
932 
933     if (mapped_addr == brk_page) {
934         /* Heap contents are initialized to zero, as for anonymous
935          * mapped pages.  Technically the new pages are already
936          * initialized to zero since they *are* anonymous mapped
937          * pages, however we have to take care with the contents that
938          * come from the remaining part of the previous page: it may
939          * contains garbage data due to a previous heap usage (grown
940          * then shrunken).  */
941         memset(g2h_untagged(target_brk), 0, brk_page - target_brk);
942 
943         target_brk = new_brk;
944         brk_page = HOST_PAGE_ALIGN(target_brk);
945         DEBUGF_BRK(TARGET_ABI_FMT_lx " (mapped_addr == brk_page)\n",
946             target_brk);
947         return target_brk;
948     } else if (mapped_addr != -1) {
949         /* Mapped but at wrong address, meaning there wasn't actually
950          * enough space for this brk.
951          */
952         target_munmap(mapped_addr, new_alloc_size);
953         mapped_addr = -1;
954         DEBUGF_BRK(TARGET_ABI_FMT_lx " (mapped_addr != -1)\n", target_brk);
955     }
956     else {
957         DEBUGF_BRK(TARGET_ABI_FMT_lx " (otherwise)\n", target_brk);
958     }
959 
960 #if defined(TARGET_ALPHA)
961     /* We (partially) emulate OSF/1 on Alpha, which requires we
962        return a proper errno, not an unchanged brk value.  */
963     return -TARGET_ENOMEM;
964 #endif
965     /* For everything else, return the previous break. */
966     return target_brk;
967 }
968 
969 #if defined(TARGET_NR_select) || defined(TARGET_NR__newselect) || \
970     defined(TARGET_NR_pselect6) || defined(TARGET_NR_pselect6_time64)
971 static inline abi_long copy_from_user_fdset(fd_set *fds,
972                                             abi_ulong target_fds_addr,
973                                             int n)
974 {
975     int i, nw, j, k;
976     abi_ulong b, *target_fds;
977 
978     nw = DIV_ROUND_UP(n, TARGET_ABI_BITS);
979     if (!(target_fds = lock_user(VERIFY_READ,
980                                  target_fds_addr,
981                                  sizeof(abi_ulong) * nw,
982                                  1)))
983         return -TARGET_EFAULT;
984 
985     FD_ZERO(fds);
986     k = 0;
987     for (i = 0; i < nw; i++) {
988         /* grab the abi_ulong */
989         __get_user(b, &target_fds[i]);
990         for (j = 0; j < TARGET_ABI_BITS; j++) {
991             /* check the bit inside the abi_ulong */
992             if ((b >> j) & 1)
993                 FD_SET(k, fds);
994             k++;
995         }
996     }
997 
998     unlock_user(target_fds, target_fds_addr, 0);
999 
1000     return 0;
1001 }
1002 
1003 static inline abi_ulong copy_from_user_fdset_ptr(fd_set *fds, fd_set **fds_ptr,
1004                                                  abi_ulong target_fds_addr,
1005                                                  int n)
1006 {
1007     if (target_fds_addr) {
1008         if (copy_from_user_fdset(fds, target_fds_addr, n))
1009             return -TARGET_EFAULT;
1010         *fds_ptr = fds;
1011     } else {
1012         *fds_ptr = NULL;
1013     }
1014     return 0;
1015 }
1016 
1017 static inline abi_long copy_to_user_fdset(abi_ulong target_fds_addr,
1018                                           const fd_set *fds,
1019                                           int n)
1020 {
1021     int i, nw, j, k;
1022     abi_long v;
1023     abi_ulong *target_fds;
1024 
1025     nw = DIV_ROUND_UP(n, TARGET_ABI_BITS);
1026     if (!(target_fds = lock_user(VERIFY_WRITE,
1027                                  target_fds_addr,
1028                                  sizeof(abi_ulong) * nw,
1029                                  0)))
1030         return -TARGET_EFAULT;
1031 
1032     k = 0;
1033     for (i = 0; i < nw; i++) {
1034         v = 0;
1035         for (j = 0; j < TARGET_ABI_BITS; j++) {
1036             v |= ((abi_ulong)(FD_ISSET(k, fds) != 0) << j);
1037             k++;
1038         }
1039         __put_user(v, &target_fds[i]);
1040     }
1041 
1042     unlock_user(target_fds, target_fds_addr, sizeof(abi_ulong) * nw);
1043 
1044     return 0;
1045 }
1046 #endif
1047 
1048 #if defined(__alpha__)
1049 #define HOST_HZ 1024
1050 #else
1051 #define HOST_HZ 100
1052 #endif
1053 
1054 static inline abi_long host_to_target_clock_t(long ticks)
1055 {
1056 #if HOST_HZ == TARGET_HZ
1057     return ticks;
1058 #else
1059     return ((int64_t)ticks * TARGET_HZ) / HOST_HZ;
1060 #endif
1061 }
1062 
1063 static inline abi_long host_to_target_rusage(abi_ulong target_addr,
1064                                              const struct rusage *rusage)
1065 {
1066     struct target_rusage *target_rusage;
1067 
1068     if (!lock_user_struct(VERIFY_WRITE, target_rusage, target_addr, 0))
1069         return -TARGET_EFAULT;
1070     target_rusage->ru_utime.tv_sec = tswapal(rusage->ru_utime.tv_sec);
1071     target_rusage->ru_utime.tv_usec = tswapal(rusage->ru_utime.tv_usec);
1072     target_rusage->ru_stime.tv_sec = tswapal(rusage->ru_stime.tv_sec);
1073     target_rusage->ru_stime.tv_usec = tswapal(rusage->ru_stime.tv_usec);
1074     target_rusage->ru_maxrss = tswapal(rusage->ru_maxrss);
1075     target_rusage->ru_ixrss = tswapal(rusage->ru_ixrss);
1076     target_rusage->ru_idrss = tswapal(rusage->ru_idrss);
1077     target_rusage->ru_isrss = tswapal(rusage->ru_isrss);
1078     target_rusage->ru_minflt = tswapal(rusage->ru_minflt);
1079     target_rusage->ru_majflt = tswapal(rusage->ru_majflt);
1080     target_rusage->ru_nswap = tswapal(rusage->ru_nswap);
1081     target_rusage->ru_inblock = tswapal(rusage->ru_inblock);
1082     target_rusage->ru_oublock = tswapal(rusage->ru_oublock);
1083     target_rusage->ru_msgsnd = tswapal(rusage->ru_msgsnd);
1084     target_rusage->ru_msgrcv = tswapal(rusage->ru_msgrcv);
1085     target_rusage->ru_nsignals = tswapal(rusage->ru_nsignals);
1086     target_rusage->ru_nvcsw = tswapal(rusage->ru_nvcsw);
1087     target_rusage->ru_nivcsw = tswapal(rusage->ru_nivcsw);
1088     unlock_user_struct(target_rusage, target_addr, 1);
1089 
1090     return 0;
1091 }
1092 
1093 #ifdef TARGET_NR_setrlimit
1094 static inline rlim_t target_to_host_rlim(abi_ulong target_rlim)
1095 {
1096     abi_ulong target_rlim_swap;
1097     rlim_t result;
1098 
1099     target_rlim_swap = tswapal(target_rlim);
1100     if (target_rlim_swap == TARGET_RLIM_INFINITY)
1101         return RLIM_INFINITY;
1102 
1103     result = target_rlim_swap;
1104     if (target_rlim_swap != (rlim_t)result)
1105         return RLIM_INFINITY;
1106 
1107     return result;
1108 }
1109 #endif
1110 
1111 #if defined(TARGET_NR_getrlimit) || defined(TARGET_NR_ugetrlimit)
1112 static inline abi_ulong host_to_target_rlim(rlim_t rlim)
1113 {
1114     abi_ulong target_rlim_swap;
1115     abi_ulong result;
1116 
1117     if (rlim == RLIM_INFINITY || rlim != (abi_long)rlim)
1118         target_rlim_swap = TARGET_RLIM_INFINITY;
1119     else
1120         target_rlim_swap = rlim;
1121     result = tswapal(target_rlim_swap);
1122 
1123     return result;
1124 }
1125 #endif
1126 
1127 static inline int target_to_host_resource(int code)
1128 {
1129     switch (code) {
1130     case TARGET_RLIMIT_AS:
1131         return RLIMIT_AS;
1132     case TARGET_RLIMIT_CORE:
1133         return RLIMIT_CORE;
1134     case TARGET_RLIMIT_CPU:
1135         return RLIMIT_CPU;
1136     case TARGET_RLIMIT_DATA:
1137         return RLIMIT_DATA;
1138     case TARGET_RLIMIT_FSIZE:
1139         return RLIMIT_FSIZE;
1140     case TARGET_RLIMIT_LOCKS:
1141         return RLIMIT_LOCKS;
1142     case TARGET_RLIMIT_MEMLOCK:
1143         return RLIMIT_MEMLOCK;
1144     case TARGET_RLIMIT_MSGQUEUE:
1145         return RLIMIT_MSGQUEUE;
1146     case TARGET_RLIMIT_NICE:
1147         return RLIMIT_NICE;
1148     case TARGET_RLIMIT_NOFILE:
1149         return RLIMIT_NOFILE;
1150     case TARGET_RLIMIT_NPROC:
1151         return RLIMIT_NPROC;
1152     case TARGET_RLIMIT_RSS:
1153         return RLIMIT_RSS;
1154     case TARGET_RLIMIT_RTPRIO:
1155         return RLIMIT_RTPRIO;
1156     case TARGET_RLIMIT_SIGPENDING:
1157         return RLIMIT_SIGPENDING;
1158     case TARGET_RLIMIT_STACK:
1159         return RLIMIT_STACK;
1160     default:
1161         return code;
1162     }
1163 }
1164 
1165 static inline abi_long copy_from_user_timeval(struct timeval *tv,
1166                                               abi_ulong target_tv_addr)
1167 {
1168     struct target_timeval *target_tv;
1169 
1170     if (!lock_user_struct(VERIFY_READ, target_tv, target_tv_addr, 1)) {
1171         return -TARGET_EFAULT;
1172     }
1173 
1174     __get_user(tv->tv_sec, &target_tv->tv_sec);
1175     __get_user(tv->tv_usec, &target_tv->tv_usec);
1176 
1177     unlock_user_struct(target_tv, target_tv_addr, 0);
1178 
1179     return 0;
1180 }
1181 
1182 static inline abi_long copy_to_user_timeval(abi_ulong target_tv_addr,
1183                                             const struct timeval *tv)
1184 {
1185     struct target_timeval *target_tv;
1186 
1187     if (!lock_user_struct(VERIFY_WRITE, target_tv, target_tv_addr, 0)) {
1188         return -TARGET_EFAULT;
1189     }
1190 
1191     __put_user(tv->tv_sec, &target_tv->tv_sec);
1192     __put_user(tv->tv_usec, &target_tv->tv_usec);
1193 
1194     unlock_user_struct(target_tv, target_tv_addr, 1);
1195 
1196     return 0;
1197 }
1198 
1199 #if defined(TARGET_NR_clock_adjtime64) && defined(CONFIG_CLOCK_ADJTIME)
1200 static inline abi_long copy_from_user_timeval64(struct timeval *tv,
1201                                                 abi_ulong target_tv_addr)
1202 {
1203     struct target__kernel_sock_timeval *target_tv;
1204 
1205     if (!lock_user_struct(VERIFY_READ, target_tv, target_tv_addr, 1)) {
1206         return -TARGET_EFAULT;
1207     }
1208 
1209     __get_user(tv->tv_sec, &target_tv->tv_sec);
1210     __get_user(tv->tv_usec, &target_tv->tv_usec);
1211 
1212     unlock_user_struct(target_tv, target_tv_addr, 0);
1213 
1214     return 0;
1215 }
1216 #endif
1217 
1218 static inline abi_long copy_to_user_timeval64(abi_ulong target_tv_addr,
1219                                               const struct timeval *tv)
1220 {
1221     struct target__kernel_sock_timeval *target_tv;
1222 
1223     if (!lock_user_struct(VERIFY_WRITE, target_tv, target_tv_addr, 0)) {
1224         return -TARGET_EFAULT;
1225     }
1226 
1227     __put_user(tv->tv_sec, &target_tv->tv_sec);
1228     __put_user(tv->tv_usec, &target_tv->tv_usec);
1229 
1230     unlock_user_struct(target_tv, target_tv_addr, 1);
1231 
1232     return 0;
1233 }
1234 
1235 #if defined(TARGET_NR_futex) || \
1236     defined(TARGET_NR_rt_sigtimedwait) || \
1237     defined(TARGET_NR_pselect6) || defined(TARGET_NR_pselect6) || \
1238     defined(TARGET_NR_nanosleep) || defined(TARGET_NR_clock_settime) || \
1239     defined(TARGET_NR_utimensat) || defined(TARGET_NR_mq_timedsend) || \
1240     defined(TARGET_NR_mq_timedreceive) || defined(TARGET_NR_ipc) || \
1241     defined(TARGET_NR_semop) || defined(TARGET_NR_semtimedop) || \
1242     defined(TARGET_NR_timer_settime) || \
1243     (defined(TARGET_NR_timerfd_settime) && defined(CONFIG_TIMERFD))
1244 static inline abi_long target_to_host_timespec(struct timespec *host_ts,
1245                                                abi_ulong target_addr)
1246 {
1247     struct target_timespec *target_ts;
1248 
1249     if (!lock_user_struct(VERIFY_READ, target_ts, target_addr, 1)) {
1250         return -TARGET_EFAULT;
1251     }
1252     __get_user(host_ts->tv_sec, &target_ts->tv_sec);
1253     __get_user(host_ts->tv_nsec, &target_ts->tv_nsec);
1254     unlock_user_struct(target_ts, target_addr, 0);
1255     return 0;
1256 }
1257 #endif
1258 
1259 #if defined(TARGET_NR_clock_settime64) || defined(TARGET_NR_futex_time64) || \
1260     defined(TARGET_NR_timer_settime64) || \
1261     defined(TARGET_NR_mq_timedsend_time64) || \
1262     defined(TARGET_NR_mq_timedreceive_time64) || \
1263     (defined(TARGET_NR_timerfd_settime64) && defined(CONFIG_TIMERFD)) || \
1264     defined(TARGET_NR_clock_nanosleep_time64) || \
1265     defined(TARGET_NR_rt_sigtimedwait_time64) || \
1266     defined(TARGET_NR_utimensat) || \
1267     defined(TARGET_NR_utimensat_time64) || \
1268     defined(TARGET_NR_semtimedop_time64) || \
1269     defined(TARGET_NR_pselect6_time64) || defined(TARGET_NR_ppoll_time64)
1270 static inline abi_long target_to_host_timespec64(struct timespec *host_ts,
1271                                                  abi_ulong target_addr)
1272 {
1273     struct target__kernel_timespec *target_ts;
1274 
1275     if (!lock_user_struct(VERIFY_READ, target_ts, target_addr, 1)) {
1276         return -TARGET_EFAULT;
1277     }
1278     __get_user(host_ts->tv_sec, &target_ts->tv_sec);
1279     __get_user(host_ts->tv_nsec, &target_ts->tv_nsec);
1280     /* in 32bit mode, this drops the padding */
1281     host_ts->tv_nsec = (long)(abi_long)host_ts->tv_nsec;
1282     unlock_user_struct(target_ts, target_addr, 0);
1283     return 0;
1284 }
1285 #endif
1286 
1287 static inline abi_long host_to_target_timespec(abi_ulong target_addr,
1288                                                struct timespec *host_ts)
1289 {
1290     struct target_timespec *target_ts;
1291 
1292     if (!lock_user_struct(VERIFY_WRITE, target_ts, target_addr, 0)) {
1293         return -TARGET_EFAULT;
1294     }
1295     __put_user(host_ts->tv_sec, &target_ts->tv_sec);
1296     __put_user(host_ts->tv_nsec, &target_ts->tv_nsec);
1297     unlock_user_struct(target_ts, target_addr, 1);
1298     return 0;
1299 }
1300 
1301 static inline abi_long host_to_target_timespec64(abi_ulong target_addr,
1302                                                  struct timespec *host_ts)
1303 {
1304     struct target__kernel_timespec *target_ts;
1305 
1306     if (!lock_user_struct(VERIFY_WRITE, target_ts, target_addr, 0)) {
1307         return -TARGET_EFAULT;
1308     }
1309     __put_user(host_ts->tv_sec, &target_ts->tv_sec);
1310     __put_user(host_ts->tv_nsec, &target_ts->tv_nsec);
1311     unlock_user_struct(target_ts, target_addr, 1);
1312     return 0;
1313 }
1314 
1315 #if defined(TARGET_NR_gettimeofday)
1316 static inline abi_long copy_to_user_timezone(abi_ulong target_tz_addr,
1317                                              struct timezone *tz)
1318 {
1319     struct target_timezone *target_tz;
1320 
1321     if (!lock_user_struct(VERIFY_WRITE, target_tz, target_tz_addr, 1)) {
1322         return -TARGET_EFAULT;
1323     }
1324 
1325     __put_user(tz->tz_minuteswest, &target_tz->tz_minuteswest);
1326     __put_user(tz->tz_dsttime, &target_tz->tz_dsttime);
1327 
1328     unlock_user_struct(target_tz, target_tz_addr, 1);
1329 
1330     return 0;
1331 }
1332 #endif
1333 
1334 #if defined(TARGET_NR_settimeofday)
1335 static inline abi_long copy_from_user_timezone(struct timezone *tz,
1336                                                abi_ulong target_tz_addr)
1337 {
1338     struct target_timezone *target_tz;
1339 
1340     if (!lock_user_struct(VERIFY_READ, target_tz, target_tz_addr, 1)) {
1341         return -TARGET_EFAULT;
1342     }
1343 
1344     __get_user(tz->tz_minuteswest, &target_tz->tz_minuteswest);
1345     __get_user(tz->tz_dsttime, &target_tz->tz_dsttime);
1346 
1347     unlock_user_struct(target_tz, target_tz_addr, 0);
1348 
1349     return 0;
1350 }
1351 #endif
1352 
1353 #if defined(TARGET_NR_mq_open) && defined(__NR_mq_open)
1354 #include <mqueue.h>
1355 
1356 static inline abi_long copy_from_user_mq_attr(struct mq_attr *attr,
1357                                               abi_ulong target_mq_attr_addr)
1358 {
1359     struct target_mq_attr *target_mq_attr;
1360 
1361     if (!lock_user_struct(VERIFY_READ, target_mq_attr,
1362                           target_mq_attr_addr, 1))
1363         return -TARGET_EFAULT;
1364 
1365     __get_user(attr->mq_flags, &target_mq_attr->mq_flags);
1366     __get_user(attr->mq_maxmsg, &target_mq_attr->mq_maxmsg);
1367     __get_user(attr->mq_msgsize, &target_mq_attr->mq_msgsize);
1368     __get_user(attr->mq_curmsgs, &target_mq_attr->mq_curmsgs);
1369 
1370     unlock_user_struct(target_mq_attr, target_mq_attr_addr, 0);
1371 
1372     return 0;
1373 }
1374 
1375 static inline abi_long copy_to_user_mq_attr(abi_ulong target_mq_attr_addr,
1376                                             const struct mq_attr *attr)
1377 {
1378     struct target_mq_attr *target_mq_attr;
1379 
1380     if (!lock_user_struct(VERIFY_WRITE, target_mq_attr,
1381                           target_mq_attr_addr, 0))
1382         return -TARGET_EFAULT;
1383 
1384     __put_user(attr->mq_flags, &target_mq_attr->mq_flags);
1385     __put_user(attr->mq_maxmsg, &target_mq_attr->mq_maxmsg);
1386     __put_user(attr->mq_msgsize, &target_mq_attr->mq_msgsize);
1387     __put_user(attr->mq_curmsgs, &target_mq_attr->mq_curmsgs);
1388 
1389     unlock_user_struct(target_mq_attr, target_mq_attr_addr, 1);
1390 
1391     return 0;
1392 }
1393 #endif
1394 
1395 #if defined(TARGET_NR_select) || defined(TARGET_NR__newselect)
1396 /* do_select() must return target values and target errnos. */
1397 static abi_long do_select(int n,
1398                           abi_ulong rfd_addr, abi_ulong wfd_addr,
1399                           abi_ulong efd_addr, abi_ulong target_tv_addr)
1400 {
1401     fd_set rfds, wfds, efds;
1402     fd_set *rfds_ptr, *wfds_ptr, *efds_ptr;
1403     struct timeval tv;
1404     struct timespec ts, *ts_ptr;
1405     abi_long ret;
1406 
1407     ret = copy_from_user_fdset_ptr(&rfds, &rfds_ptr, rfd_addr, n);
1408     if (ret) {
1409         return ret;
1410     }
1411     ret = copy_from_user_fdset_ptr(&wfds, &wfds_ptr, wfd_addr, n);
1412     if (ret) {
1413         return ret;
1414     }
1415     ret = copy_from_user_fdset_ptr(&efds, &efds_ptr, efd_addr, n);
1416     if (ret) {
1417         return ret;
1418     }
1419 
1420     if (target_tv_addr) {
1421         if (copy_from_user_timeval(&tv, target_tv_addr))
1422             return -TARGET_EFAULT;
1423         ts.tv_sec = tv.tv_sec;
1424         ts.tv_nsec = tv.tv_usec * 1000;
1425         ts_ptr = &ts;
1426     } else {
1427         ts_ptr = NULL;
1428     }
1429 
1430     ret = get_errno(safe_pselect6(n, rfds_ptr, wfds_ptr, efds_ptr,
1431                                   ts_ptr, NULL));
1432 
1433     if (!is_error(ret)) {
1434         if (rfd_addr && copy_to_user_fdset(rfd_addr, &rfds, n))
1435             return -TARGET_EFAULT;
1436         if (wfd_addr && copy_to_user_fdset(wfd_addr, &wfds, n))
1437             return -TARGET_EFAULT;
1438         if (efd_addr && copy_to_user_fdset(efd_addr, &efds, n))
1439             return -TARGET_EFAULT;
1440 
1441         if (target_tv_addr) {
1442             tv.tv_sec = ts.tv_sec;
1443             tv.tv_usec = ts.tv_nsec / 1000;
1444             if (copy_to_user_timeval(target_tv_addr, &tv)) {
1445                 return -TARGET_EFAULT;
1446             }
1447         }
1448     }
1449 
1450     return ret;
1451 }
1452 
1453 #if defined(TARGET_WANT_OLD_SYS_SELECT)
1454 static abi_long do_old_select(abi_ulong arg1)
1455 {
1456     struct target_sel_arg_struct *sel;
1457     abi_ulong inp, outp, exp, tvp;
1458     long nsel;
1459 
1460     if (!lock_user_struct(VERIFY_READ, sel, arg1, 1)) {
1461         return -TARGET_EFAULT;
1462     }
1463 
1464     nsel = tswapal(sel->n);
1465     inp = tswapal(sel->inp);
1466     outp = tswapal(sel->outp);
1467     exp = tswapal(sel->exp);
1468     tvp = tswapal(sel->tvp);
1469 
1470     unlock_user_struct(sel, arg1, 0);
1471 
1472     return do_select(nsel, inp, outp, exp, tvp);
1473 }
1474 #endif
1475 #endif
1476 
1477 #if defined(TARGET_NR_pselect6) || defined(TARGET_NR_pselect6_time64)
1478 static abi_long do_pselect6(abi_long arg1, abi_long arg2, abi_long arg3,
1479                             abi_long arg4, abi_long arg5, abi_long arg6,
1480                             bool time64)
1481 {
1482     abi_long rfd_addr, wfd_addr, efd_addr, n, ts_addr;
1483     fd_set rfds, wfds, efds;
1484     fd_set *rfds_ptr, *wfds_ptr, *efds_ptr;
1485     struct timespec ts, *ts_ptr;
1486     abi_long ret;
1487 
1488     /*
1489      * The 6th arg is actually two args smashed together,
1490      * so we cannot use the C library.
1491      */
1492     sigset_t set;
1493     struct {
1494         sigset_t *set;
1495         size_t size;
1496     } sig, *sig_ptr;
1497 
1498     abi_ulong arg_sigset, arg_sigsize, *arg7;
1499     target_sigset_t *target_sigset;
1500 
1501     n = arg1;
1502     rfd_addr = arg2;
1503     wfd_addr = arg3;
1504     efd_addr = arg4;
1505     ts_addr = arg5;
1506 
1507     ret = copy_from_user_fdset_ptr(&rfds, &rfds_ptr, rfd_addr, n);
1508     if (ret) {
1509         return ret;
1510     }
1511     ret = copy_from_user_fdset_ptr(&wfds, &wfds_ptr, wfd_addr, n);
1512     if (ret) {
1513         return ret;
1514     }
1515     ret = copy_from_user_fdset_ptr(&efds, &efds_ptr, efd_addr, n);
1516     if (ret) {
1517         return ret;
1518     }
1519 
1520     /*
1521      * This takes a timespec, and not a timeval, so we cannot
1522      * use the do_select() helper ...
1523      */
1524     if (ts_addr) {
1525         if (time64) {
1526             if (target_to_host_timespec64(&ts, ts_addr)) {
1527                 return -TARGET_EFAULT;
1528             }
1529         } else {
1530             if (target_to_host_timespec(&ts, ts_addr)) {
1531                 return -TARGET_EFAULT;
1532             }
1533         }
1534             ts_ptr = &ts;
1535     } else {
1536         ts_ptr = NULL;
1537     }
1538 
1539     /* Extract the two packed args for the sigset */
1540     if (arg6) {
1541         sig_ptr = &sig;
1542         sig.size = SIGSET_T_SIZE;
1543 
1544         arg7 = lock_user(VERIFY_READ, arg6, sizeof(*arg7) * 2, 1);
1545         if (!arg7) {
1546             return -TARGET_EFAULT;
1547         }
1548         arg_sigset = tswapal(arg7[0]);
1549         arg_sigsize = tswapal(arg7[1]);
1550         unlock_user(arg7, arg6, 0);
1551 
1552         if (arg_sigset) {
1553             sig.set = &set;
1554             if (arg_sigsize != sizeof(*target_sigset)) {
1555                 /* Like the kernel, we enforce correct size sigsets */
1556                 return -TARGET_EINVAL;
1557             }
1558             target_sigset = lock_user(VERIFY_READ, arg_sigset,
1559                                       sizeof(*target_sigset), 1);
1560             if (!target_sigset) {
1561                 return -TARGET_EFAULT;
1562             }
1563             target_to_host_sigset(&set, target_sigset);
1564             unlock_user(target_sigset, arg_sigset, 0);
1565         } else {
1566             sig.set = NULL;
1567         }
1568     } else {
1569         sig_ptr = NULL;
1570     }
1571 
1572     ret = get_errno(safe_pselect6(n, rfds_ptr, wfds_ptr, efds_ptr,
1573                                   ts_ptr, sig_ptr));
1574 
1575     if (!is_error(ret)) {
1576         if (rfd_addr && copy_to_user_fdset(rfd_addr, &rfds, n)) {
1577             return -TARGET_EFAULT;
1578         }
1579         if (wfd_addr && copy_to_user_fdset(wfd_addr, &wfds, n)) {
1580             return -TARGET_EFAULT;
1581         }
1582         if (efd_addr && copy_to_user_fdset(efd_addr, &efds, n)) {
1583             return -TARGET_EFAULT;
1584         }
1585         if (time64) {
1586             if (ts_addr && host_to_target_timespec64(ts_addr, &ts)) {
1587                 return -TARGET_EFAULT;
1588             }
1589         } else {
1590             if (ts_addr && host_to_target_timespec(ts_addr, &ts)) {
1591                 return -TARGET_EFAULT;
1592             }
1593         }
1594     }
1595     return ret;
1596 }
1597 #endif
1598 
1599 #if defined(TARGET_NR_poll) || defined(TARGET_NR_ppoll) || \
1600     defined(TARGET_NR_ppoll_time64)
1601 static abi_long do_ppoll(abi_long arg1, abi_long arg2, abi_long arg3,
1602                          abi_long arg4, abi_long arg5, bool ppoll, bool time64)
1603 {
1604     struct target_pollfd *target_pfd;
1605     unsigned int nfds = arg2;
1606     struct pollfd *pfd;
1607     unsigned int i;
1608     abi_long ret;
1609 
1610     pfd = NULL;
1611     target_pfd = NULL;
1612     if (nfds) {
1613         if (nfds > (INT_MAX / sizeof(struct target_pollfd))) {
1614             return -TARGET_EINVAL;
1615         }
1616         target_pfd = lock_user(VERIFY_WRITE, arg1,
1617                                sizeof(struct target_pollfd) * nfds, 1);
1618         if (!target_pfd) {
1619             return -TARGET_EFAULT;
1620         }
1621 
1622         pfd = alloca(sizeof(struct pollfd) * nfds);
1623         for (i = 0; i < nfds; i++) {
1624             pfd[i].fd = tswap32(target_pfd[i].fd);
1625             pfd[i].events = tswap16(target_pfd[i].events);
1626         }
1627     }
1628     if (ppoll) {
1629         struct timespec _timeout_ts, *timeout_ts = &_timeout_ts;
1630         target_sigset_t *target_set;
1631         sigset_t _set, *set = &_set;
1632 
1633         if (arg3) {
1634             if (time64) {
1635                 if (target_to_host_timespec64(timeout_ts, arg3)) {
1636                     unlock_user(target_pfd, arg1, 0);
1637                     return -TARGET_EFAULT;
1638                 }
1639             } else {
1640                 if (target_to_host_timespec(timeout_ts, arg3)) {
1641                     unlock_user(target_pfd, arg1, 0);
1642                     return -TARGET_EFAULT;
1643                 }
1644             }
1645         } else {
1646             timeout_ts = NULL;
1647         }
1648 
1649         if (arg4) {
1650             if (arg5 != sizeof(target_sigset_t)) {
1651                 unlock_user(target_pfd, arg1, 0);
1652                 return -TARGET_EINVAL;
1653             }
1654 
1655             target_set = lock_user(VERIFY_READ, arg4,
1656                                    sizeof(target_sigset_t), 1);
1657             if (!target_set) {
1658                 unlock_user(target_pfd, arg1, 0);
1659                 return -TARGET_EFAULT;
1660             }
1661             target_to_host_sigset(set, target_set);
1662         } else {
1663             set = NULL;
1664         }
1665 
1666         ret = get_errno(safe_ppoll(pfd, nfds, timeout_ts,
1667                                    set, SIGSET_T_SIZE));
1668 
1669         if (!is_error(ret) && arg3) {
1670             if (time64) {
1671                 if (host_to_target_timespec64(arg3, timeout_ts)) {
1672                     return -TARGET_EFAULT;
1673                 }
1674             } else {
1675                 if (host_to_target_timespec(arg3, timeout_ts)) {
1676                     return -TARGET_EFAULT;
1677                 }
1678             }
1679         }
1680         if (arg4) {
1681             unlock_user(target_set, arg4, 0);
1682         }
1683     } else {
1684           struct timespec ts, *pts;
1685 
1686           if (arg3 >= 0) {
1687               /* Convert ms to secs, ns */
1688               ts.tv_sec = arg3 / 1000;
1689               ts.tv_nsec = (arg3 % 1000) * 1000000LL;
1690               pts = &ts;
1691           } else {
1692               /* -ve poll() timeout means "infinite" */
1693               pts = NULL;
1694           }
1695           ret = get_errno(safe_ppoll(pfd, nfds, pts, NULL, 0));
1696     }
1697 
1698     if (!is_error(ret)) {
1699         for (i = 0; i < nfds; i++) {
1700             target_pfd[i].revents = tswap16(pfd[i].revents);
1701         }
1702     }
1703     unlock_user(target_pfd, arg1, sizeof(struct target_pollfd) * nfds);
1704     return ret;
1705 }
1706 #endif
1707 
1708 static abi_long do_pipe2(int host_pipe[], int flags)
1709 {
1710 #ifdef CONFIG_PIPE2
1711     return pipe2(host_pipe, flags);
1712 #else
1713     return -ENOSYS;
1714 #endif
1715 }
1716 
1717 static abi_long do_pipe(void *cpu_env, abi_ulong pipedes,
1718                         int flags, int is_pipe2)
1719 {
1720     int host_pipe[2];
1721     abi_long ret;
1722     ret = flags ? do_pipe2(host_pipe, flags) : pipe(host_pipe);
1723 
1724     if (is_error(ret))
1725         return get_errno(ret);
1726 
1727     /* Several targets have special calling conventions for the original
1728        pipe syscall, but didn't replicate this into the pipe2 syscall.  */
1729     if (!is_pipe2) {
1730 #if defined(TARGET_ALPHA)
1731         ((CPUAlphaState *)cpu_env)->ir[IR_A4] = host_pipe[1];
1732         return host_pipe[0];
1733 #elif defined(TARGET_MIPS)
1734         ((CPUMIPSState*)cpu_env)->active_tc.gpr[3] = host_pipe[1];
1735         return host_pipe[0];
1736 #elif defined(TARGET_SH4)
1737         ((CPUSH4State*)cpu_env)->gregs[1] = host_pipe[1];
1738         return host_pipe[0];
1739 #elif defined(TARGET_SPARC)
1740         ((CPUSPARCState*)cpu_env)->regwptr[1] = host_pipe[1];
1741         return host_pipe[0];
1742 #endif
1743     }
1744 
1745     if (put_user_s32(host_pipe[0], pipedes)
1746         || put_user_s32(host_pipe[1], pipedes + sizeof(host_pipe[0])))
1747         return -TARGET_EFAULT;
1748     return get_errno(ret);
1749 }
1750 
1751 static inline abi_long target_to_host_ip_mreq(struct ip_mreqn *mreqn,
1752                                               abi_ulong target_addr,
1753                                               socklen_t len)
1754 {
1755     struct target_ip_mreqn *target_smreqn;
1756 
1757     target_smreqn = lock_user(VERIFY_READ, target_addr, len, 1);
1758     if (!target_smreqn)
1759         return -TARGET_EFAULT;
1760     mreqn->imr_multiaddr.s_addr = target_smreqn->imr_multiaddr.s_addr;
1761     mreqn->imr_address.s_addr = target_smreqn->imr_address.s_addr;
1762     if (len == sizeof(struct target_ip_mreqn))
1763         mreqn->imr_ifindex = tswapal(target_smreqn->imr_ifindex);
1764     unlock_user(target_smreqn, target_addr, 0);
1765 
1766     return 0;
1767 }
1768 
1769 static inline abi_long target_to_host_sockaddr(int fd, struct sockaddr *addr,
1770                                                abi_ulong target_addr,
1771                                                socklen_t len)
1772 {
1773     const socklen_t unix_maxlen = sizeof (struct sockaddr_un);
1774     sa_family_t sa_family;
1775     struct target_sockaddr *target_saddr;
1776 
1777     if (fd_trans_target_to_host_addr(fd)) {
1778         return fd_trans_target_to_host_addr(fd)(addr, target_addr, len);
1779     }
1780 
1781     target_saddr = lock_user(VERIFY_READ, target_addr, len, 1);
1782     if (!target_saddr)
1783         return -TARGET_EFAULT;
1784 
1785     sa_family = tswap16(target_saddr->sa_family);
1786 
1787     /* Oops. The caller might send a incomplete sun_path; sun_path
1788      * must be terminated by \0 (see the manual page), but
1789      * unfortunately it is quite common to specify sockaddr_un
1790      * length as "strlen(x->sun_path)" while it should be
1791      * "strlen(...) + 1". We'll fix that here if needed.
1792      * Linux kernel has a similar feature.
1793      */
1794 
1795     if (sa_family == AF_UNIX) {
1796         if (len < unix_maxlen && len > 0) {
1797             char *cp = (char*)target_saddr;
1798 
1799             if ( cp[len-1] && !cp[len] )
1800                 len++;
1801         }
1802         if (len > unix_maxlen)
1803             len = unix_maxlen;
1804     }
1805 
1806     memcpy(addr, target_saddr, len);
1807     addr->sa_family = sa_family;
1808     if (sa_family == AF_NETLINK) {
1809         struct sockaddr_nl *nladdr;
1810 
1811         nladdr = (struct sockaddr_nl *)addr;
1812         nladdr->nl_pid = tswap32(nladdr->nl_pid);
1813         nladdr->nl_groups = tswap32(nladdr->nl_groups);
1814     } else if (sa_family == AF_PACKET) {
1815 	struct target_sockaddr_ll *lladdr;
1816 
1817 	lladdr = (struct target_sockaddr_ll *)addr;
1818 	lladdr->sll_ifindex = tswap32(lladdr->sll_ifindex);
1819 	lladdr->sll_hatype = tswap16(lladdr->sll_hatype);
1820     }
1821     unlock_user(target_saddr, target_addr, 0);
1822 
1823     return 0;
1824 }
1825 
1826 static inline abi_long host_to_target_sockaddr(abi_ulong target_addr,
1827                                                struct sockaddr *addr,
1828                                                socklen_t len)
1829 {
1830     struct target_sockaddr *target_saddr;
1831 
1832     if (len == 0) {
1833         return 0;
1834     }
1835     assert(addr);
1836 
1837     target_saddr = lock_user(VERIFY_WRITE, target_addr, len, 0);
1838     if (!target_saddr)
1839         return -TARGET_EFAULT;
1840     memcpy(target_saddr, addr, len);
1841     if (len >= offsetof(struct target_sockaddr, sa_family) +
1842         sizeof(target_saddr->sa_family)) {
1843         target_saddr->sa_family = tswap16(addr->sa_family);
1844     }
1845     if (addr->sa_family == AF_NETLINK &&
1846         len >= sizeof(struct target_sockaddr_nl)) {
1847         struct target_sockaddr_nl *target_nl =
1848                (struct target_sockaddr_nl *)target_saddr;
1849         target_nl->nl_pid = tswap32(target_nl->nl_pid);
1850         target_nl->nl_groups = tswap32(target_nl->nl_groups);
1851     } else if (addr->sa_family == AF_PACKET) {
1852         struct sockaddr_ll *target_ll = (struct sockaddr_ll *)target_saddr;
1853         target_ll->sll_ifindex = tswap32(target_ll->sll_ifindex);
1854         target_ll->sll_hatype = tswap16(target_ll->sll_hatype);
1855     } else if (addr->sa_family == AF_INET6 &&
1856                len >= sizeof(struct target_sockaddr_in6)) {
1857         struct target_sockaddr_in6 *target_in6 =
1858                (struct target_sockaddr_in6 *)target_saddr;
1859         target_in6->sin6_scope_id = tswap16(target_in6->sin6_scope_id);
1860     }
1861     unlock_user(target_saddr, target_addr, len);
1862 
1863     return 0;
1864 }
1865 
1866 static inline abi_long target_to_host_cmsg(struct msghdr *msgh,
1867                                            struct target_msghdr *target_msgh)
1868 {
1869     struct cmsghdr *cmsg = CMSG_FIRSTHDR(msgh);
1870     abi_long msg_controllen;
1871     abi_ulong target_cmsg_addr;
1872     struct target_cmsghdr *target_cmsg, *target_cmsg_start;
1873     socklen_t space = 0;
1874 
1875     msg_controllen = tswapal(target_msgh->msg_controllen);
1876     if (msg_controllen < sizeof (struct target_cmsghdr))
1877         goto the_end;
1878     target_cmsg_addr = tswapal(target_msgh->msg_control);
1879     target_cmsg = lock_user(VERIFY_READ, target_cmsg_addr, msg_controllen, 1);
1880     target_cmsg_start = target_cmsg;
1881     if (!target_cmsg)
1882         return -TARGET_EFAULT;
1883 
1884     while (cmsg && target_cmsg) {
1885         void *data = CMSG_DATA(cmsg);
1886         void *target_data = TARGET_CMSG_DATA(target_cmsg);
1887 
1888         int len = tswapal(target_cmsg->cmsg_len)
1889             - sizeof(struct target_cmsghdr);
1890 
1891         space += CMSG_SPACE(len);
1892         if (space > msgh->msg_controllen) {
1893             space -= CMSG_SPACE(len);
1894             /* This is a QEMU bug, since we allocated the payload
1895              * area ourselves (unlike overflow in host-to-target
1896              * conversion, which is just the guest giving us a buffer
1897              * that's too small). It can't happen for the payload types
1898              * we currently support; if it becomes an issue in future
1899              * we would need to improve our allocation strategy to
1900              * something more intelligent than "twice the size of the
1901              * target buffer we're reading from".
1902              */
1903             qemu_log_mask(LOG_UNIMP,
1904                           ("Unsupported ancillary data %d/%d: "
1905                            "unhandled msg size\n"),
1906                           tswap32(target_cmsg->cmsg_level),
1907                           tswap32(target_cmsg->cmsg_type));
1908             break;
1909         }
1910 
1911         if (tswap32(target_cmsg->cmsg_level) == TARGET_SOL_SOCKET) {
1912             cmsg->cmsg_level = SOL_SOCKET;
1913         } else {
1914             cmsg->cmsg_level = tswap32(target_cmsg->cmsg_level);
1915         }
1916         cmsg->cmsg_type = tswap32(target_cmsg->cmsg_type);
1917         cmsg->cmsg_len = CMSG_LEN(len);
1918 
1919         if (cmsg->cmsg_level == SOL_SOCKET && cmsg->cmsg_type == SCM_RIGHTS) {
1920             int *fd = (int *)data;
1921             int *target_fd = (int *)target_data;
1922             int i, numfds = len / sizeof(int);
1923 
1924             for (i = 0; i < numfds; i++) {
1925                 __get_user(fd[i], target_fd + i);
1926             }
1927         } else if (cmsg->cmsg_level == SOL_SOCKET
1928                &&  cmsg->cmsg_type == SCM_CREDENTIALS) {
1929             struct ucred *cred = (struct ucred *)data;
1930             struct target_ucred *target_cred =
1931                 (struct target_ucred *)target_data;
1932 
1933             __get_user(cred->pid, &target_cred->pid);
1934             __get_user(cred->uid, &target_cred->uid);
1935             __get_user(cred->gid, &target_cred->gid);
1936         } else {
1937             qemu_log_mask(LOG_UNIMP, "Unsupported ancillary data: %d/%d\n",
1938                           cmsg->cmsg_level, cmsg->cmsg_type);
1939             memcpy(data, target_data, len);
1940         }
1941 
1942         cmsg = CMSG_NXTHDR(msgh, cmsg);
1943         target_cmsg = TARGET_CMSG_NXTHDR(target_msgh, target_cmsg,
1944                                          target_cmsg_start);
1945     }
1946     unlock_user(target_cmsg, target_cmsg_addr, 0);
1947  the_end:
1948     msgh->msg_controllen = space;
1949     return 0;
1950 }
1951 
1952 static inline abi_long host_to_target_cmsg(struct target_msghdr *target_msgh,
1953                                            struct msghdr *msgh)
1954 {
1955     struct cmsghdr *cmsg = CMSG_FIRSTHDR(msgh);
1956     abi_long msg_controllen;
1957     abi_ulong target_cmsg_addr;
1958     struct target_cmsghdr *target_cmsg, *target_cmsg_start;
1959     socklen_t space = 0;
1960 
1961     msg_controllen = tswapal(target_msgh->msg_controllen);
1962     if (msg_controllen < sizeof (struct target_cmsghdr))
1963         goto the_end;
1964     target_cmsg_addr = tswapal(target_msgh->msg_control);
1965     target_cmsg = lock_user(VERIFY_WRITE, target_cmsg_addr, msg_controllen, 0);
1966     target_cmsg_start = target_cmsg;
1967     if (!target_cmsg)
1968         return -TARGET_EFAULT;
1969 
1970     while (cmsg && target_cmsg) {
1971         void *data = CMSG_DATA(cmsg);
1972         void *target_data = TARGET_CMSG_DATA(target_cmsg);
1973 
1974         int len = cmsg->cmsg_len - sizeof(struct cmsghdr);
1975         int tgt_len, tgt_space;
1976 
1977         /* We never copy a half-header but may copy half-data;
1978          * this is Linux's behaviour in put_cmsg(). Note that
1979          * truncation here is a guest problem (which we report
1980          * to the guest via the CTRUNC bit), unlike truncation
1981          * in target_to_host_cmsg, which is a QEMU bug.
1982          */
1983         if (msg_controllen < sizeof(struct target_cmsghdr)) {
1984             target_msgh->msg_flags |= tswap32(MSG_CTRUNC);
1985             break;
1986         }
1987 
1988         if (cmsg->cmsg_level == SOL_SOCKET) {
1989             target_cmsg->cmsg_level = tswap32(TARGET_SOL_SOCKET);
1990         } else {
1991             target_cmsg->cmsg_level = tswap32(cmsg->cmsg_level);
1992         }
1993         target_cmsg->cmsg_type = tswap32(cmsg->cmsg_type);
1994 
1995         /* Payload types which need a different size of payload on
1996          * the target must adjust tgt_len here.
1997          */
1998         tgt_len = len;
1999         switch (cmsg->cmsg_level) {
2000         case SOL_SOCKET:
2001             switch (cmsg->cmsg_type) {
2002             case SO_TIMESTAMP:
2003                 tgt_len = sizeof(struct target_timeval);
2004                 break;
2005             default:
2006                 break;
2007             }
2008             break;
2009         default:
2010             break;
2011         }
2012 
2013         if (msg_controllen < TARGET_CMSG_LEN(tgt_len)) {
2014             target_msgh->msg_flags |= tswap32(MSG_CTRUNC);
2015             tgt_len = msg_controllen - sizeof(struct target_cmsghdr);
2016         }
2017 
2018         /* We must now copy-and-convert len bytes of payload
2019          * into tgt_len bytes of destination space. Bear in mind
2020          * that in both source and destination we may be dealing
2021          * with a truncated value!
2022          */
2023         switch (cmsg->cmsg_level) {
2024         case SOL_SOCKET:
2025             switch (cmsg->cmsg_type) {
2026             case SCM_RIGHTS:
2027             {
2028                 int *fd = (int *)data;
2029                 int *target_fd = (int *)target_data;
2030                 int i, numfds = tgt_len / sizeof(int);
2031 
2032                 for (i = 0; i < numfds; i++) {
2033                     __put_user(fd[i], target_fd + i);
2034                 }
2035                 break;
2036             }
2037             case SO_TIMESTAMP:
2038             {
2039                 struct timeval *tv = (struct timeval *)data;
2040                 struct target_timeval *target_tv =
2041                     (struct target_timeval *)target_data;
2042 
2043                 if (len != sizeof(struct timeval) ||
2044                     tgt_len != sizeof(struct target_timeval)) {
2045                     goto unimplemented;
2046                 }
2047 
2048                 /* copy struct timeval to target */
2049                 __put_user(tv->tv_sec, &target_tv->tv_sec);
2050                 __put_user(tv->tv_usec, &target_tv->tv_usec);
2051                 break;
2052             }
2053             case SCM_CREDENTIALS:
2054             {
2055                 struct ucred *cred = (struct ucred *)data;
2056                 struct target_ucred *target_cred =
2057                     (struct target_ucred *)target_data;
2058 
2059                 __put_user(cred->pid, &target_cred->pid);
2060                 __put_user(cred->uid, &target_cred->uid);
2061                 __put_user(cred->gid, &target_cred->gid);
2062                 break;
2063             }
2064             default:
2065                 goto unimplemented;
2066             }
2067             break;
2068 
2069         case SOL_IP:
2070             switch (cmsg->cmsg_type) {
2071             case IP_TTL:
2072             {
2073                 uint32_t *v = (uint32_t *)data;
2074                 uint32_t *t_int = (uint32_t *)target_data;
2075 
2076                 if (len != sizeof(uint32_t) ||
2077                     tgt_len != sizeof(uint32_t)) {
2078                     goto unimplemented;
2079                 }
2080                 __put_user(*v, t_int);
2081                 break;
2082             }
2083             case IP_RECVERR:
2084             {
2085                 struct errhdr_t {
2086                    struct sock_extended_err ee;
2087                    struct sockaddr_in offender;
2088                 };
2089                 struct errhdr_t *errh = (struct errhdr_t *)data;
2090                 struct errhdr_t *target_errh =
2091                     (struct errhdr_t *)target_data;
2092 
2093                 if (len != sizeof(struct errhdr_t) ||
2094                     tgt_len != sizeof(struct errhdr_t)) {
2095                     goto unimplemented;
2096                 }
2097                 __put_user(errh->ee.ee_errno, &target_errh->ee.ee_errno);
2098                 __put_user(errh->ee.ee_origin, &target_errh->ee.ee_origin);
2099                 __put_user(errh->ee.ee_type,  &target_errh->ee.ee_type);
2100                 __put_user(errh->ee.ee_code, &target_errh->ee.ee_code);
2101                 __put_user(errh->ee.ee_pad, &target_errh->ee.ee_pad);
2102                 __put_user(errh->ee.ee_info, &target_errh->ee.ee_info);
2103                 __put_user(errh->ee.ee_data, &target_errh->ee.ee_data);
2104                 host_to_target_sockaddr((unsigned long) &target_errh->offender,
2105                     (void *) &errh->offender, sizeof(errh->offender));
2106                 break;
2107             }
2108             default:
2109                 goto unimplemented;
2110             }
2111             break;
2112 
2113         case SOL_IPV6:
2114             switch (cmsg->cmsg_type) {
2115             case IPV6_HOPLIMIT:
2116             {
2117                 uint32_t *v = (uint32_t *)data;
2118                 uint32_t *t_int = (uint32_t *)target_data;
2119 
2120                 if (len != sizeof(uint32_t) ||
2121                     tgt_len != sizeof(uint32_t)) {
2122                     goto unimplemented;
2123                 }
2124                 __put_user(*v, t_int);
2125                 break;
2126             }
2127             case IPV6_RECVERR:
2128             {
2129                 struct errhdr6_t {
2130                    struct sock_extended_err ee;
2131                    struct sockaddr_in6 offender;
2132                 };
2133                 struct errhdr6_t *errh = (struct errhdr6_t *)data;
2134                 struct errhdr6_t *target_errh =
2135                     (struct errhdr6_t *)target_data;
2136 
2137                 if (len != sizeof(struct errhdr6_t) ||
2138                     tgt_len != sizeof(struct errhdr6_t)) {
2139                     goto unimplemented;
2140                 }
2141                 __put_user(errh->ee.ee_errno, &target_errh->ee.ee_errno);
2142                 __put_user(errh->ee.ee_origin, &target_errh->ee.ee_origin);
2143                 __put_user(errh->ee.ee_type,  &target_errh->ee.ee_type);
2144                 __put_user(errh->ee.ee_code, &target_errh->ee.ee_code);
2145                 __put_user(errh->ee.ee_pad, &target_errh->ee.ee_pad);
2146                 __put_user(errh->ee.ee_info, &target_errh->ee.ee_info);
2147                 __put_user(errh->ee.ee_data, &target_errh->ee.ee_data);
2148                 host_to_target_sockaddr((unsigned long) &target_errh->offender,
2149                     (void *) &errh->offender, sizeof(errh->offender));
2150                 break;
2151             }
2152             default:
2153                 goto unimplemented;
2154             }
2155             break;
2156 
2157         default:
2158         unimplemented:
2159             qemu_log_mask(LOG_UNIMP, "Unsupported ancillary data: %d/%d\n",
2160                           cmsg->cmsg_level, cmsg->cmsg_type);
2161             memcpy(target_data, data, MIN(len, tgt_len));
2162             if (tgt_len > len) {
2163                 memset(target_data + len, 0, tgt_len - len);
2164             }
2165         }
2166 
2167         target_cmsg->cmsg_len = tswapal(TARGET_CMSG_LEN(tgt_len));
2168         tgt_space = TARGET_CMSG_SPACE(tgt_len);
2169         if (msg_controllen < tgt_space) {
2170             tgt_space = msg_controllen;
2171         }
2172         msg_controllen -= tgt_space;
2173         space += tgt_space;
2174         cmsg = CMSG_NXTHDR(msgh, cmsg);
2175         target_cmsg = TARGET_CMSG_NXTHDR(target_msgh, target_cmsg,
2176                                          target_cmsg_start);
2177     }
2178     unlock_user(target_cmsg, target_cmsg_addr, space);
2179  the_end:
2180     target_msgh->msg_controllen = tswapal(space);
2181     return 0;
2182 }
2183 
2184 /* do_setsockopt() Must return target values and target errnos. */
2185 static abi_long do_setsockopt(int sockfd, int level, int optname,
2186                               abi_ulong optval_addr, socklen_t optlen)
2187 {
2188     abi_long ret;
2189     int val;
2190     struct ip_mreqn *ip_mreq;
2191     struct ip_mreq_source *ip_mreq_source;
2192 
2193     switch(level) {
2194     case SOL_TCP:
2195     case SOL_UDP:
2196         /* TCP and UDP options all take an 'int' value.  */
2197         if (optlen < sizeof(uint32_t))
2198             return -TARGET_EINVAL;
2199 
2200         if (get_user_u32(val, optval_addr))
2201             return -TARGET_EFAULT;
2202         ret = get_errno(setsockopt(sockfd, level, optname, &val, sizeof(val)));
2203         break;
2204     case SOL_IP:
2205         switch(optname) {
2206         case IP_TOS:
2207         case IP_TTL:
2208         case IP_HDRINCL:
2209         case IP_ROUTER_ALERT:
2210         case IP_RECVOPTS:
2211         case IP_RETOPTS:
2212         case IP_PKTINFO:
2213         case IP_MTU_DISCOVER:
2214         case IP_RECVERR:
2215         case IP_RECVTTL:
2216         case IP_RECVTOS:
2217 #ifdef IP_FREEBIND
2218         case IP_FREEBIND:
2219 #endif
2220         case IP_MULTICAST_TTL:
2221         case IP_MULTICAST_LOOP:
2222             val = 0;
2223             if (optlen >= sizeof(uint32_t)) {
2224                 if (get_user_u32(val, optval_addr))
2225                     return -TARGET_EFAULT;
2226             } else if (optlen >= 1) {
2227                 if (get_user_u8(val, optval_addr))
2228                     return -TARGET_EFAULT;
2229             }
2230             ret = get_errno(setsockopt(sockfd, level, optname, &val, sizeof(val)));
2231             break;
2232         case IP_ADD_MEMBERSHIP:
2233         case IP_DROP_MEMBERSHIP:
2234             if (optlen < sizeof (struct target_ip_mreq) ||
2235                 optlen > sizeof (struct target_ip_mreqn))
2236                 return -TARGET_EINVAL;
2237 
2238             ip_mreq = (struct ip_mreqn *) alloca(optlen);
2239             target_to_host_ip_mreq(ip_mreq, optval_addr, optlen);
2240             ret = get_errno(setsockopt(sockfd, level, optname, ip_mreq, optlen));
2241             break;
2242 
2243         case IP_BLOCK_SOURCE:
2244         case IP_UNBLOCK_SOURCE:
2245         case IP_ADD_SOURCE_MEMBERSHIP:
2246         case IP_DROP_SOURCE_MEMBERSHIP:
2247             if (optlen != sizeof (struct target_ip_mreq_source))
2248                 return -TARGET_EINVAL;
2249 
2250             ip_mreq_source = lock_user(VERIFY_READ, optval_addr, optlen, 1);
2251             ret = get_errno(setsockopt(sockfd, level, optname, ip_mreq_source, optlen));
2252             unlock_user (ip_mreq_source, optval_addr, 0);
2253             break;
2254 
2255         default:
2256             goto unimplemented;
2257         }
2258         break;
2259     case SOL_IPV6:
2260         switch (optname) {
2261         case IPV6_MTU_DISCOVER:
2262         case IPV6_MTU:
2263         case IPV6_V6ONLY:
2264         case IPV6_RECVPKTINFO:
2265         case IPV6_UNICAST_HOPS:
2266         case IPV6_MULTICAST_HOPS:
2267         case IPV6_MULTICAST_LOOP:
2268         case IPV6_RECVERR:
2269         case IPV6_RECVHOPLIMIT:
2270         case IPV6_2292HOPLIMIT:
2271         case IPV6_CHECKSUM:
2272         case IPV6_ADDRFORM:
2273         case IPV6_2292PKTINFO:
2274         case IPV6_RECVTCLASS:
2275         case IPV6_RECVRTHDR:
2276         case IPV6_2292RTHDR:
2277         case IPV6_RECVHOPOPTS:
2278         case IPV6_2292HOPOPTS:
2279         case IPV6_RECVDSTOPTS:
2280         case IPV6_2292DSTOPTS:
2281         case IPV6_TCLASS:
2282         case IPV6_ADDR_PREFERENCES:
2283 #ifdef IPV6_RECVPATHMTU
2284         case IPV6_RECVPATHMTU:
2285 #endif
2286 #ifdef IPV6_TRANSPARENT
2287         case IPV6_TRANSPARENT:
2288 #endif
2289 #ifdef IPV6_FREEBIND
2290         case IPV6_FREEBIND:
2291 #endif
2292 #ifdef IPV6_RECVORIGDSTADDR
2293         case IPV6_RECVORIGDSTADDR:
2294 #endif
2295             val = 0;
2296             if (optlen < sizeof(uint32_t)) {
2297                 return -TARGET_EINVAL;
2298             }
2299             if (get_user_u32(val, optval_addr)) {
2300                 return -TARGET_EFAULT;
2301             }
2302             ret = get_errno(setsockopt(sockfd, level, optname,
2303                                        &val, sizeof(val)));
2304             break;
2305         case IPV6_PKTINFO:
2306         {
2307             struct in6_pktinfo pki;
2308 
2309             if (optlen < sizeof(pki)) {
2310                 return -TARGET_EINVAL;
2311             }
2312 
2313             if (copy_from_user(&pki, optval_addr, sizeof(pki))) {
2314                 return -TARGET_EFAULT;
2315             }
2316 
2317             pki.ipi6_ifindex = tswap32(pki.ipi6_ifindex);
2318 
2319             ret = get_errno(setsockopt(sockfd, level, optname,
2320                                        &pki, sizeof(pki)));
2321             break;
2322         }
2323         case IPV6_ADD_MEMBERSHIP:
2324         case IPV6_DROP_MEMBERSHIP:
2325         {
2326             struct ipv6_mreq ipv6mreq;
2327 
2328             if (optlen < sizeof(ipv6mreq)) {
2329                 return -TARGET_EINVAL;
2330             }
2331 
2332             if (copy_from_user(&ipv6mreq, optval_addr, sizeof(ipv6mreq))) {
2333                 return -TARGET_EFAULT;
2334             }
2335 
2336             ipv6mreq.ipv6mr_interface = tswap32(ipv6mreq.ipv6mr_interface);
2337 
2338             ret = get_errno(setsockopt(sockfd, level, optname,
2339                                        &ipv6mreq, sizeof(ipv6mreq)));
2340             break;
2341         }
2342         default:
2343             goto unimplemented;
2344         }
2345         break;
2346     case SOL_ICMPV6:
2347         switch (optname) {
2348         case ICMPV6_FILTER:
2349         {
2350             struct icmp6_filter icmp6f;
2351 
2352             if (optlen > sizeof(icmp6f)) {
2353                 optlen = sizeof(icmp6f);
2354             }
2355 
2356             if (copy_from_user(&icmp6f, optval_addr, optlen)) {
2357                 return -TARGET_EFAULT;
2358             }
2359 
2360             for (val = 0; val < 8; val++) {
2361                 icmp6f.data[val] = tswap32(icmp6f.data[val]);
2362             }
2363 
2364             ret = get_errno(setsockopt(sockfd, level, optname,
2365                                        &icmp6f, optlen));
2366             break;
2367         }
2368         default:
2369             goto unimplemented;
2370         }
2371         break;
2372     case SOL_RAW:
2373         switch (optname) {
2374         case ICMP_FILTER:
2375         case IPV6_CHECKSUM:
2376             /* those take an u32 value */
2377             if (optlen < sizeof(uint32_t)) {
2378                 return -TARGET_EINVAL;
2379             }
2380 
2381             if (get_user_u32(val, optval_addr)) {
2382                 return -TARGET_EFAULT;
2383             }
2384             ret = get_errno(setsockopt(sockfd, level, optname,
2385                                        &val, sizeof(val)));
2386             break;
2387 
2388         default:
2389             goto unimplemented;
2390         }
2391         break;
2392 #if defined(SOL_ALG) && defined(ALG_SET_KEY) && defined(ALG_SET_AEAD_AUTHSIZE)
2393     case SOL_ALG:
2394         switch (optname) {
2395         case ALG_SET_KEY:
2396         {
2397             char *alg_key = g_malloc(optlen);
2398 
2399             if (!alg_key) {
2400                 return -TARGET_ENOMEM;
2401             }
2402             if (copy_from_user(alg_key, optval_addr, optlen)) {
2403                 g_free(alg_key);
2404                 return -TARGET_EFAULT;
2405             }
2406             ret = get_errno(setsockopt(sockfd, level, optname,
2407                                        alg_key, optlen));
2408             g_free(alg_key);
2409             break;
2410         }
2411         case ALG_SET_AEAD_AUTHSIZE:
2412         {
2413             ret = get_errno(setsockopt(sockfd, level, optname,
2414                                        NULL, optlen));
2415             break;
2416         }
2417         default:
2418             goto unimplemented;
2419         }
2420         break;
2421 #endif
2422     case TARGET_SOL_SOCKET:
2423         switch (optname) {
2424         case TARGET_SO_RCVTIMEO:
2425         {
2426                 struct timeval tv;
2427 
2428                 optname = SO_RCVTIMEO;
2429 
2430 set_timeout:
2431                 if (optlen != sizeof(struct target_timeval)) {
2432                     return -TARGET_EINVAL;
2433                 }
2434 
2435                 if (copy_from_user_timeval(&tv, optval_addr)) {
2436                     return -TARGET_EFAULT;
2437                 }
2438 
2439                 ret = get_errno(setsockopt(sockfd, SOL_SOCKET, optname,
2440                                 &tv, sizeof(tv)));
2441                 return ret;
2442         }
2443         case TARGET_SO_SNDTIMEO:
2444                 optname = SO_SNDTIMEO;
2445                 goto set_timeout;
2446         case TARGET_SO_ATTACH_FILTER:
2447         {
2448                 struct target_sock_fprog *tfprog;
2449                 struct target_sock_filter *tfilter;
2450                 struct sock_fprog fprog;
2451                 struct sock_filter *filter;
2452                 int i;
2453 
2454                 if (optlen != sizeof(*tfprog)) {
2455                     return -TARGET_EINVAL;
2456                 }
2457                 if (!lock_user_struct(VERIFY_READ, tfprog, optval_addr, 0)) {
2458                     return -TARGET_EFAULT;
2459                 }
2460                 if (!lock_user_struct(VERIFY_READ, tfilter,
2461                                       tswapal(tfprog->filter), 0)) {
2462                     unlock_user_struct(tfprog, optval_addr, 1);
2463                     return -TARGET_EFAULT;
2464                 }
2465 
2466                 fprog.len = tswap16(tfprog->len);
2467                 filter = g_try_new(struct sock_filter, fprog.len);
2468                 if (filter == NULL) {
2469                     unlock_user_struct(tfilter, tfprog->filter, 1);
2470                     unlock_user_struct(tfprog, optval_addr, 1);
2471                     return -TARGET_ENOMEM;
2472                 }
2473                 for (i = 0; i < fprog.len; i++) {
2474                     filter[i].code = tswap16(tfilter[i].code);
2475                     filter[i].jt = tfilter[i].jt;
2476                     filter[i].jf = tfilter[i].jf;
2477                     filter[i].k = tswap32(tfilter[i].k);
2478                 }
2479                 fprog.filter = filter;
2480 
2481                 ret = get_errno(setsockopt(sockfd, SOL_SOCKET,
2482                                 SO_ATTACH_FILTER, &fprog, sizeof(fprog)));
2483                 g_free(filter);
2484 
2485                 unlock_user_struct(tfilter, tfprog->filter, 1);
2486                 unlock_user_struct(tfprog, optval_addr, 1);
2487                 return ret;
2488         }
2489 	case TARGET_SO_BINDTODEVICE:
2490 	{
2491 		char *dev_ifname, *addr_ifname;
2492 
2493 		if (optlen > IFNAMSIZ - 1) {
2494 		    optlen = IFNAMSIZ - 1;
2495 		}
2496 		dev_ifname = lock_user(VERIFY_READ, optval_addr, optlen, 1);
2497 		if (!dev_ifname) {
2498 		    return -TARGET_EFAULT;
2499 		}
2500 		optname = SO_BINDTODEVICE;
2501 		addr_ifname = alloca(IFNAMSIZ);
2502 		memcpy(addr_ifname, dev_ifname, optlen);
2503 		addr_ifname[optlen] = 0;
2504 		ret = get_errno(setsockopt(sockfd, SOL_SOCKET, optname,
2505                                            addr_ifname, optlen));
2506 		unlock_user (dev_ifname, optval_addr, 0);
2507 		return ret;
2508 	}
2509         case TARGET_SO_LINGER:
2510         {
2511                 struct linger lg;
2512                 struct target_linger *tlg;
2513 
2514                 if (optlen != sizeof(struct target_linger)) {
2515                     return -TARGET_EINVAL;
2516                 }
2517                 if (!lock_user_struct(VERIFY_READ, tlg, optval_addr, 1)) {
2518                     return -TARGET_EFAULT;
2519                 }
2520                 __get_user(lg.l_onoff, &tlg->l_onoff);
2521                 __get_user(lg.l_linger, &tlg->l_linger);
2522                 ret = get_errno(setsockopt(sockfd, SOL_SOCKET, SO_LINGER,
2523                                 &lg, sizeof(lg)));
2524                 unlock_user_struct(tlg, optval_addr, 0);
2525                 return ret;
2526         }
2527             /* Options with 'int' argument.  */
2528         case TARGET_SO_DEBUG:
2529 		optname = SO_DEBUG;
2530 		break;
2531         case TARGET_SO_REUSEADDR:
2532 		optname = SO_REUSEADDR;
2533 		break;
2534 #ifdef SO_REUSEPORT
2535         case TARGET_SO_REUSEPORT:
2536                 optname = SO_REUSEPORT;
2537                 break;
2538 #endif
2539         case TARGET_SO_TYPE:
2540 		optname = SO_TYPE;
2541 		break;
2542         case TARGET_SO_ERROR:
2543 		optname = SO_ERROR;
2544 		break;
2545         case TARGET_SO_DONTROUTE:
2546 		optname = SO_DONTROUTE;
2547 		break;
2548         case TARGET_SO_BROADCAST:
2549 		optname = SO_BROADCAST;
2550 		break;
2551         case TARGET_SO_SNDBUF:
2552 		optname = SO_SNDBUF;
2553 		break;
2554         case TARGET_SO_SNDBUFFORCE:
2555                 optname = SO_SNDBUFFORCE;
2556                 break;
2557         case TARGET_SO_RCVBUF:
2558 		optname = SO_RCVBUF;
2559 		break;
2560         case TARGET_SO_RCVBUFFORCE:
2561                 optname = SO_RCVBUFFORCE;
2562                 break;
2563         case TARGET_SO_KEEPALIVE:
2564 		optname = SO_KEEPALIVE;
2565 		break;
2566         case TARGET_SO_OOBINLINE:
2567 		optname = SO_OOBINLINE;
2568 		break;
2569         case TARGET_SO_NO_CHECK:
2570 		optname = SO_NO_CHECK;
2571 		break;
2572         case TARGET_SO_PRIORITY:
2573 		optname = SO_PRIORITY;
2574 		break;
2575 #ifdef SO_BSDCOMPAT
2576         case TARGET_SO_BSDCOMPAT:
2577 		optname = SO_BSDCOMPAT;
2578 		break;
2579 #endif
2580         case TARGET_SO_PASSCRED:
2581 		optname = SO_PASSCRED;
2582 		break;
2583         case TARGET_SO_PASSSEC:
2584                 optname = SO_PASSSEC;
2585                 break;
2586         case TARGET_SO_TIMESTAMP:
2587 		optname = SO_TIMESTAMP;
2588 		break;
2589         case TARGET_SO_RCVLOWAT:
2590 		optname = SO_RCVLOWAT;
2591 		break;
2592         default:
2593             goto unimplemented;
2594         }
2595 	if (optlen < sizeof(uint32_t))
2596             return -TARGET_EINVAL;
2597 
2598 	if (get_user_u32(val, optval_addr))
2599             return -TARGET_EFAULT;
2600 	ret = get_errno(setsockopt(sockfd, SOL_SOCKET, optname, &val, sizeof(val)));
2601         break;
2602 #ifdef SOL_NETLINK
2603     case SOL_NETLINK:
2604         switch (optname) {
2605         case NETLINK_PKTINFO:
2606         case NETLINK_ADD_MEMBERSHIP:
2607         case NETLINK_DROP_MEMBERSHIP:
2608         case NETLINK_BROADCAST_ERROR:
2609         case NETLINK_NO_ENOBUFS:
2610 #if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 2, 0)
2611         case NETLINK_LISTEN_ALL_NSID:
2612         case NETLINK_CAP_ACK:
2613 #endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(4, 2, 0) */
2614 #if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 12, 0)
2615         case NETLINK_EXT_ACK:
2616 #endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(4, 12, 0) */
2617 #if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 20, 0)
2618         case NETLINK_GET_STRICT_CHK:
2619 #endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(4, 12, 0) */
2620             break;
2621         default:
2622             goto unimplemented;
2623         }
2624         val = 0;
2625         if (optlen < sizeof(uint32_t)) {
2626             return -TARGET_EINVAL;
2627         }
2628         if (get_user_u32(val, optval_addr)) {
2629             return -TARGET_EFAULT;
2630         }
2631         ret = get_errno(setsockopt(sockfd, SOL_NETLINK, optname, &val,
2632                                    sizeof(val)));
2633         break;
2634 #endif /* SOL_NETLINK */
2635     default:
2636     unimplemented:
2637         qemu_log_mask(LOG_UNIMP, "Unsupported setsockopt level=%d optname=%d\n",
2638                       level, optname);
2639         ret = -TARGET_ENOPROTOOPT;
2640     }
2641     return ret;
2642 }
2643 
2644 /* do_getsockopt() Must return target values and target errnos. */
2645 static abi_long do_getsockopt(int sockfd, int level, int optname,
2646                               abi_ulong optval_addr, abi_ulong optlen)
2647 {
2648     abi_long ret;
2649     int len, val;
2650     socklen_t lv;
2651 
2652     switch(level) {
2653     case TARGET_SOL_SOCKET:
2654         level = SOL_SOCKET;
2655         switch (optname) {
2656         /* These don't just return a single integer */
2657         case TARGET_SO_PEERNAME:
2658             goto unimplemented;
2659         case TARGET_SO_RCVTIMEO: {
2660             struct timeval tv;
2661             socklen_t tvlen;
2662 
2663             optname = SO_RCVTIMEO;
2664 
2665 get_timeout:
2666             if (get_user_u32(len, optlen)) {
2667                 return -TARGET_EFAULT;
2668             }
2669             if (len < 0) {
2670                 return -TARGET_EINVAL;
2671             }
2672 
2673             tvlen = sizeof(tv);
2674             ret = get_errno(getsockopt(sockfd, level, optname,
2675                                        &tv, &tvlen));
2676             if (ret < 0) {
2677                 return ret;
2678             }
2679             if (len > sizeof(struct target_timeval)) {
2680                 len = sizeof(struct target_timeval);
2681             }
2682             if (copy_to_user_timeval(optval_addr, &tv)) {
2683                 return -TARGET_EFAULT;
2684             }
2685             if (put_user_u32(len, optlen)) {
2686                 return -TARGET_EFAULT;
2687             }
2688             break;
2689         }
2690         case TARGET_SO_SNDTIMEO:
2691             optname = SO_SNDTIMEO;
2692             goto get_timeout;
2693         case TARGET_SO_PEERCRED: {
2694             struct ucred cr;
2695             socklen_t crlen;
2696             struct target_ucred *tcr;
2697 
2698             if (get_user_u32(len, optlen)) {
2699                 return -TARGET_EFAULT;
2700             }
2701             if (len < 0) {
2702                 return -TARGET_EINVAL;
2703             }
2704 
2705             crlen = sizeof(cr);
2706             ret = get_errno(getsockopt(sockfd, level, SO_PEERCRED,
2707                                        &cr, &crlen));
2708             if (ret < 0) {
2709                 return ret;
2710             }
2711             if (len > crlen) {
2712                 len = crlen;
2713             }
2714             if (!lock_user_struct(VERIFY_WRITE, tcr, optval_addr, 0)) {
2715                 return -TARGET_EFAULT;
2716             }
2717             __put_user(cr.pid, &tcr->pid);
2718             __put_user(cr.uid, &tcr->uid);
2719             __put_user(cr.gid, &tcr->gid);
2720             unlock_user_struct(tcr, optval_addr, 1);
2721             if (put_user_u32(len, optlen)) {
2722                 return -TARGET_EFAULT;
2723             }
2724             break;
2725         }
2726         case TARGET_SO_PEERSEC: {
2727             char *name;
2728 
2729             if (get_user_u32(len, optlen)) {
2730                 return -TARGET_EFAULT;
2731             }
2732             if (len < 0) {
2733                 return -TARGET_EINVAL;
2734             }
2735             name = lock_user(VERIFY_WRITE, optval_addr, len, 0);
2736             if (!name) {
2737                 return -TARGET_EFAULT;
2738             }
2739             lv = len;
2740             ret = get_errno(getsockopt(sockfd, level, SO_PEERSEC,
2741                                        name, &lv));
2742             if (put_user_u32(lv, optlen)) {
2743                 ret = -TARGET_EFAULT;
2744             }
2745             unlock_user(name, optval_addr, lv);
2746             break;
2747         }
2748         case TARGET_SO_LINGER:
2749         {
2750             struct linger lg;
2751             socklen_t lglen;
2752             struct target_linger *tlg;
2753 
2754             if (get_user_u32(len, optlen)) {
2755                 return -TARGET_EFAULT;
2756             }
2757             if (len < 0) {
2758                 return -TARGET_EINVAL;
2759             }
2760 
2761             lglen = sizeof(lg);
2762             ret = get_errno(getsockopt(sockfd, level, SO_LINGER,
2763                                        &lg, &lglen));
2764             if (ret < 0) {
2765                 return ret;
2766             }
2767             if (len > lglen) {
2768                 len = lglen;
2769             }
2770             if (!lock_user_struct(VERIFY_WRITE, tlg, optval_addr, 0)) {
2771                 return -TARGET_EFAULT;
2772             }
2773             __put_user(lg.l_onoff, &tlg->l_onoff);
2774             __put_user(lg.l_linger, &tlg->l_linger);
2775             unlock_user_struct(tlg, optval_addr, 1);
2776             if (put_user_u32(len, optlen)) {
2777                 return -TARGET_EFAULT;
2778             }
2779             break;
2780         }
2781         /* Options with 'int' argument.  */
2782         case TARGET_SO_DEBUG:
2783             optname = SO_DEBUG;
2784             goto int_case;
2785         case TARGET_SO_REUSEADDR:
2786             optname = SO_REUSEADDR;
2787             goto int_case;
2788 #ifdef SO_REUSEPORT
2789         case TARGET_SO_REUSEPORT:
2790             optname = SO_REUSEPORT;
2791             goto int_case;
2792 #endif
2793         case TARGET_SO_TYPE:
2794             optname = SO_TYPE;
2795             goto int_case;
2796         case TARGET_SO_ERROR:
2797             optname = SO_ERROR;
2798             goto int_case;
2799         case TARGET_SO_DONTROUTE:
2800             optname = SO_DONTROUTE;
2801             goto int_case;
2802         case TARGET_SO_BROADCAST:
2803             optname = SO_BROADCAST;
2804             goto int_case;
2805         case TARGET_SO_SNDBUF:
2806             optname = SO_SNDBUF;
2807             goto int_case;
2808         case TARGET_SO_RCVBUF:
2809             optname = SO_RCVBUF;
2810             goto int_case;
2811         case TARGET_SO_KEEPALIVE:
2812             optname = SO_KEEPALIVE;
2813             goto int_case;
2814         case TARGET_SO_OOBINLINE:
2815             optname = SO_OOBINLINE;
2816             goto int_case;
2817         case TARGET_SO_NO_CHECK:
2818             optname = SO_NO_CHECK;
2819             goto int_case;
2820         case TARGET_SO_PRIORITY:
2821             optname = SO_PRIORITY;
2822             goto int_case;
2823 #ifdef SO_BSDCOMPAT
2824         case TARGET_SO_BSDCOMPAT:
2825             optname = SO_BSDCOMPAT;
2826             goto int_case;
2827 #endif
2828         case TARGET_SO_PASSCRED:
2829             optname = SO_PASSCRED;
2830             goto int_case;
2831         case TARGET_SO_TIMESTAMP:
2832             optname = SO_TIMESTAMP;
2833             goto int_case;
2834         case TARGET_SO_RCVLOWAT:
2835             optname = SO_RCVLOWAT;
2836             goto int_case;
2837         case TARGET_SO_ACCEPTCONN:
2838             optname = SO_ACCEPTCONN;
2839             goto int_case;
2840         case TARGET_SO_PROTOCOL:
2841             optname = SO_PROTOCOL;
2842             goto int_case;
2843         case TARGET_SO_DOMAIN:
2844             optname = SO_DOMAIN;
2845             goto int_case;
2846         default:
2847             goto int_case;
2848         }
2849         break;
2850     case SOL_TCP:
2851     case SOL_UDP:
2852         /* TCP and UDP options all take an 'int' value.  */
2853     int_case:
2854         if (get_user_u32(len, optlen))
2855             return -TARGET_EFAULT;
2856         if (len < 0)
2857             return -TARGET_EINVAL;
2858         lv = sizeof(lv);
2859         ret = get_errno(getsockopt(sockfd, level, optname, &val, &lv));
2860         if (ret < 0)
2861             return ret;
2862         if (optname == SO_TYPE) {
2863             val = host_to_target_sock_type(val);
2864         }
2865         if (len > lv)
2866             len = lv;
2867         if (len == 4) {
2868             if (put_user_u32(val, optval_addr))
2869                 return -TARGET_EFAULT;
2870         } else {
2871             if (put_user_u8(val, optval_addr))
2872                 return -TARGET_EFAULT;
2873         }
2874         if (put_user_u32(len, optlen))
2875             return -TARGET_EFAULT;
2876         break;
2877     case SOL_IP:
2878         switch(optname) {
2879         case IP_TOS:
2880         case IP_TTL:
2881         case IP_HDRINCL:
2882         case IP_ROUTER_ALERT:
2883         case IP_RECVOPTS:
2884         case IP_RETOPTS:
2885         case IP_PKTINFO:
2886         case IP_MTU_DISCOVER:
2887         case IP_RECVERR:
2888         case IP_RECVTOS:
2889 #ifdef IP_FREEBIND
2890         case IP_FREEBIND:
2891 #endif
2892         case IP_MULTICAST_TTL:
2893         case IP_MULTICAST_LOOP:
2894             if (get_user_u32(len, optlen))
2895                 return -TARGET_EFAULT;
2896             if (len < 0)
2897                 return -TARGET_EINVAL;
2898             lv = sizeof(lv);
2899             ret = get_errno(getsockopt(sockfd, level, optname, &val, &lv));
2900             if (ret < 0)
2901                 return ret;
2902             if (len < sizeof(int) && len > 0 && val >= 0 && val < 255) {
2903                 len = 1;
2904                 if (put_user_u32(len, optlen)
2905                     || put_user_u8(val, optval_addr))
2906                     return -TARGET_EFAULT;
2907             } else {
2908                 if (len > sizeof(int))
2909                     len = sizeof(int);
2910                 if (put_user_u32(len, optlen)
2911                     || put_user_u32(val, optval_addr))
2912                     return -TARGET_EFAULT;
2913             }
2914             break;
2915         default:
2916             ret = -TARGET_ENOPROTOOPT;
2917             break;
2918         }
2919         break;
2920     case SOL_IPV6:
2921         switch (optname) {
2922         case IPV6_MTU_DISCOVER:
2923         case IPV6_MTU:
2924         case IPV6_V6ONLY:
2925         case IPV6_RECVPKTINFO:
2926         case IPV6_UNICAST_HOPS:
2927         case IPV6_MULTICAST_HOPS:
2928         case IPV6_MULTICAST_LOOP:
2929         case IPV6_RECVERR:
2930         case IPV6_RECVHOPLIMIT:
2931         case IPV6_2292HOPLIMIT:
2932         case IPV6_CHECKSUM:
2933         case IPV6_ADDRFORM:
2934         case IPV6_2292PKTINFO:
2935         case IPV6_RECVTCLASS:
2936         case IPV6_RECVRTHDR:
2937         case IPV6_2292RTHDR:
2938         case IPV6_RECVHOPOPTS:
2939         case IPV6_2292HOPOPTS:
2940         case IPV6_RECVDSTOPTS:
2941         case IPV6_2292DSTOPTS:
2942         case IPV6_TCLASS:
2943         case IPV6_ADDR_PREFERENCES:
2944 #ifdef IPV6_RECVPATHMTU
2945         case IPV6_RECVPATHMTU:
2946 #endif
2947 #ifdef IPV6_TRANSPARENT
2948         case IPV6_TRANSPARENT:
2949 #endif
2950 #ifdef IPV6_FREEBIND
2951         case IPV6_FREEBIND:
2952 #endif
2953 #ifdef IPV6_RECVORIGDSTADDR
2954         case IPV6_RECVORIGDSTADDR:
2955 #endif
2956             if (get_user_u32(len, optlen))
2957                 return -TARGET_EFAULT;
2958             if (len < 0)
2959                 return -TARGET_EINVAL;
2960             lv = sizeof(lv);
2961             ret = get_errno(getsockopt(sockfd, level, optname, &val, &lv));
2962             if (ret < 0)
2963                 return ret;
2964             if (len < sizeof(int) && len > 0 && val >= 0 && val < 255) {
2965                 len = 1;
2966                 if (put_user_u32(len, optlen)
2967                     || put_user_u8(val, optval_addr))
2968                     return -TARGET_EFAULT;
2969             } else {
2970                 if (len > sizeof(int))
2971                     len = sizeof(int);
2972                 if (put_user_u32(len, optlen)
2973                     || put_user_u32(val, optval_addr))
2974                     return -TARGET_EFAULT;
2975             }
2976             break;
2977         default:
2978             ret = -TARGET_ENOPROTOOPT;
2979             break;
2980         }
2981         break;
2982 #ifdef SOL_NETLINK
2983     case SOL_NETLINK:
2984         switch (optname) {
2985         case NETLINK_PKTINFO:
2986         case NETLINK_BROADCAST_ERROR:
2987         case NETLINK_NO_ENOBUFS:
2988 #if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 2, 0)
2989         case NETLINK_LISTEN_ALL_NSID:
2990         case NETLINK_CAP_ACK:
2991 #endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(4, 2, 0) */
2992 #if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 12, 0)
2993         case NETLINK_EXT_ACK:
2994 #endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(4, 12, 0) */
2995 #if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 20, 0)
2996         case NETLINK_GET_STRICT_CHK:
2997 #endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(4, 12, 0) */
2998             if (get_user_u32(len, optlen)) {
2999                 return -TARGET_EFAULT;
3000             }
3001             if (len != sizeof(val)) {
3002                 return -TARGET_EINVAL;
3003             }
3004             lv = len;
3005             ret = get_errno(getsockopt(sockfd, level, optname, &val, &lv));
3006             if (ret < 0) {
3007                 return ret;
3008             }
3009             if (put_user_u32(lv, optlen)
3010                 || put_user_u32(val, optval_addr)) {
3011                 return -TARGET_EFAULT;
3012             }
3013             break;
3014 #if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 2, 0)
3015         case NETLINK_LIST_MEMBERSHIPS:
3016         {
3017             uint32_t *results;
3018             int i;
3019             if (get_user_u32(len, optlen)) {
3020                 return -TARGET_EFAULT;
3021             }
3022             if (len < 0) {
3023                 return -TARGET_EINVAL;
3024             }
3025             results = lock_user(VERIFY_WRITE, optval_addr, len, 1);
3026             if (!results) {
3027                 return -TARGET_EFAULT;
3028             }
3029             lv = len;
3030             ret = get_errno(getsockopt(sockfd, level, optname, results, &lv));
3031             if (ret < 0) {
3032                 unlock_user(results, optval_addr, 0);
3033                 return ret;
3034             }
3035             /* swap host endianess to target endianess. */
3036             for (i = 0; i < (len / sizeof(uint32_t)); i++) {
3037                 results[i] = tswap32(results[i]);
3038             }
3039             if (put_user_u32(lv, optlen)) {
3040                 return -TARGET_EFAULT;
3041             }
3042             unlock_user(results, optval_addr, 0);
3043             break;
3044         }
3045 #endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(4, 2, 0) */
3046         default:
3047             goto unimplemented;
3048         }
3049         break;
3050 #endif /* SOL_NETLINK */
3051     default:
3052     unimplemented:
3053         qemu_log_mask(LOG_UNIMP,
3054                       "getsockopt level=%d optname=%d not yet supported\n",
3055                       level, optname);
3056         ret = -TARGET_EOPNOTSUPP;
3057         break;
3058     }
3059     return ret;
3060 }
3061 
3062 /* Convert target low/high pair representing file offset into the host
3063  * low/high pair. This function doesn't handle offsets bigger than 64 bits
3064  * as the kernel doesn't handle them either.
3065  */
3066 static void target_to_host_low_high(abi_ulong tlow,
3067                                     abi_ulong thigh,
3068                                     unsigned long *hlow,
3069                                     unsigned long *hhigh)
3070 {
3071     uint64_t off = tlow |
3072         ((unsigned long long)thigh << TARGET_LONG_BITS / 2) <<
3073         TARGET_LONG_BITS / 2;
3074 
3075     *hlow = off;
3076     *hhigh = (off >> HOST_LONG_BITS / 2) >> HOST_LONG_BITS / 2;
3077 }
3078 
3079 static struct iovec *lock_iovec(int type, abi_ulong target_addr,
3080                                 abi_ulong count, int copy)
3081 {
3082     struct target_iovec *target_vec;
3083     struct iovec *vec;
3084     abi_ulong total_len, max_len;
3085     int i;
3086     int err = 0;
3087     bool bad_address = false;
3088 
3089     if (count == 0) {
3090         errno = 0;
3091         return NULL;
3092     }
3093     if (count > IOV_MAX) {
3094         errno = EINVAL;
3095         return NULL;
3096     }
3097 
3098     vec = g_try_new0(struct iovec, count);
3099     if (vec == NULL) {
3100         errno = ENOMEM;
3101         return NULL;
3102     }
3103 
3104     target_vec = lock_user(VERIFY_READ, target_addr,
3105                            count * sizeof(struct target_iovec), 1);
3106     if (target_vec == NULL) {
3107         err = EFAULT;
3108         goto fail2;
3109     }
3110 
3111     /* ??? If host page size > target page size, this will result in a
3112        value larger than what we can actually support.  */
3113     max_len = 0x7fffffff & TARGET_PAGE_MASK;
3114     total_len = 0;
3115 
3116     for (i = 0; i < count; i++) {
3117         abi_ulong base = tswapal(target_vec[i].iov_base);
3118         abi_long len = tswapal(target_vec[i].iov_len);
3119 
3120         if (len < 0) {
3121             err = EINVAL;
3122             goto fail;
3123         } else if (len == 0) {
3124             /* Zero length pointer is ignored.  */
3125             vec[i].iov_base = 0;
3126         } else {
3127             vec[i].iov_base = lock_user(type, base, len, copy);
3128             /* If the first buffer pointer is bad, this is a fault.  But
3129              * subsequent bad buffers will result in a partial write; this
3130              * is realized by filling the vector with null pointers and
3131              * zero lengths. */
3132             if (!vec[i].iov_base) {
3133                 if (i == 0) {
3134                     err = EFAULT;
3135                     goto fail;
3136                 } else {
3137                     bad_address = true;
3138                 }
3139             }
3140             if (bad_address) {
3141                 len = 0;
3142             }
3143             if (len > max_len - total_len) {
3144                 len = max_len - total_len;
3145             }
3146         }
3147         vec[i].iov_len = len;
3148         total_len += len;
3149     }
3150 
3151     unlock_user(target_vec, target_addr, 0);
3152     return vec;
3153 
3154  fail:
3155     while (--i >= 0) {
3156         if (tswapal(target_vec[i].iov_len) > 0) {
3157             unlock_user(vec[i].iov_base, tswapal(target_vec[i].iov_base), 0);
3158         }
3159     }
3160     unlock_user(target_vec, target_addr, 0);
3161  fail2:
3162     g_free(vec);
3163     errno = err;
3164     return NULL;
3165 }
3166 
3167 static void unlock_iovec(struct iovec *vec, abi_ulong target_addr,
3168                          abi_ulong count, int copy)
3169 {
3170     struct target_iovec *target_vec;
3171     int i;
3172 
3173     target_vec = lock_user(VERIFY_READ, target_addr,
3174                            count * sizeof(struct target_iovec), 1);
3175     if (target_vec) {
3176         for (i = 0; i < count; i++) {
3177             abi_ulong base = tswapal(target_vec[i].iov_base);
3178             abi_long len = tswapal(target_vec[i].iov_len);
3179             if (len < 0) {
3180                 break;
3181             }
3182             unlock_user(vec[i].iov_base, base, copy ? vec[i].iov_len : 0);
3183         }
3184         unlock_user(target_vec, target_addr, 0);
3185     }
3186 
3187     g_free(vec);
3188 }
3189 
3190 static inline int target_to_host_sock_type(int *type)
3191 {
3192     int host_type = 0;
3193     int target_type = *type;
3194 
3195     switch (target_type & TARGET_SOCK_TYPE_MASK) {
3196     case TARGET_SOCK_DGRAM:
3197         host_type = SOCK_DGRAM;
3198         break;
3199     case TARGET_SOCK_STREAM:
3200         host_type = SOCK_STREAM;
3201         break;
3202     default:
3203         host_type = target_type & TARGET_SOCK_TYPE_MASK;
3204         break;
3205     }
3206     if (target_type & TARGET_SOCK_CLOEXEC) {
3207 #if defined(SOCK_CLOEXEC)
3208         host_type |= SOCK_CLOEXEC;
3209 #else
3210         return -TARGET_EINVAL;
3211 #endif
3212     }
3213     if (target_type & TARGET_SOCK_NONBLOCK) {
3214 #if defined(SOCK_NONBLOCK)
3215         host_type |= SOCK_NONBLOCK;
3216 #elif !defined(O_NONBLOCK)
3217         return -TARGET_EINVAL;
3218 #endif
3219     }
3220     *type = host_type;
3221     return 0;
3222 }
3223 
3224 /* Try to emulate socket type flags after socket creation.  */
3225 static int sock_flags_fixup(int fd, int target_type)
3226 {
3227 #if !defined(SOCK_NONBLOCK) && defined(O_NONBLOCK)
3228     if (target_type & TARGET_SOCK_NONBLOCK) {
3229         int flags = fcntl(fd, F_GETFL);
3230         if (fcntl(fd, F_SETFL, O_NONBLOCK | flags) == -1) {
3231             close(fd);
3232             return -TARGET_EINVAL;
3233         }
3234     }
3235 #endif
3236     return fd;
3237 }
3238 
3239 /* do_socket() Must return target values and target errnos. */
3240 static abi_long do_socket(int domain, int type, int protocol)
3241 {
3242     int target_type = type;
3243     int ret;
3244 
3245     ret = target_to_host_sock_type(&type);
3246     if (ret) {
3247         return ret;
3248     }
3249 
3250     if (domain == PF_NETLINK && !(
3251 #ifdef CONFIG_RTNETLINK
3252          protocol == NETLINK_ROUTE ||
3253 #endif
3254          protocol == NETLINK_KOBJECT_UEVENT ||
3255          protocol == NETLINK_AUDIT)) {
3256         return -TARGET_EPROTONOSUPPORT;
3257     }
3258 
3259     if (domain == AF_PACKET ||
3260         (domain == AF_INET && type == SOCK_PACKET)) {
3261         protocol = tswap16(protocol);
3262     }
3263 
3264     ret = get_errno(socket(domain, type, protocol));
3265     if (ret >= 0) {
3266         ret = sock_flags_fixup(ret, target_type);
3267         if (type == SOCK_PACKET) {
3268             /* Manage an obsolete case :
3269              * if socket type is SOCK_PACKET, bind by name
3270              */
3271             fd_trans_register(ret, &target_packet_trans);
3272         } else if (domain == PF_NETLINK) {
3273             switch (protocol) {
3274 #ifdef CONFIG_RTNETLINK
3275             case NETLINK_ROUTE:
3276                 fd_trans_register(ret, &target_netlink_route_trans);
3277                 break;
3278 #endif
3279             case NETLINK_KOBJECT_UEVENT:
3280                 /* nothing to do: messages are strings */
3281                 break;
3282             case NETLINK_AUDIT:
3283                 fd_trans_register(ret, &target_netlink_audit_trans);
3284                 break;
3285             default:
3286                 g_assert_not_reached();
3287             }
3288         }
3289     }
3290     return ret;
3291 }
3292 
3293 /* do_bind() Must return target values and target errnos. */
3294 static abi_long do_bind(int sockfd, abi_ulong target_addr,
3295                         socklen_t addrlen)
3296 {
3297     void *addr;
3298     abi_long ret;
3299 
3300     if ((int)addrlen < 0) {
3301         return -TARGET_EINVAL;
3302     }
3303 
3304     addr = alloca(addrlen+1);
3305 
3306     ret = target_to_host_sockaddr(sockfd, addr, target_addr, addrlen);
3307     if (ret)
3308         return ret;
3309 
3310     return get_errno(bind(sockfd, addr, addrlen));
3311 }
3312 
3313 /* do_connect() Must return target values and target errnos. */
3314 static abi_long do_connect(int sockfd, abi_ulong target_addr,
3315                            socklen_t addrlen)
3316 {
3317     void *addr;
3318     abi_long ret;
3319 
3320     if ((int)addrlen < 0) {
3321         return -TARGET_EINVAL;
3322     }
3323 
3324     addr = alloca(addrlen+1);
3325 
3326     ret = target_to_host_sockaddr(sockfd, addr, target_addr, addrlen);
3327     if (ret)
3328         return ret;
3329 
3330     return get_errno(safe_connect(sockfd, addr, addrlen));
3331 }
3332 
3333 /* do_sendrecvmsg_locked() Must return target values and target errnos. */
3334 static abi_long do_sendrecvmsg_locked(int fd, struct target_msghdr *msgp,
3335                                       int flags, int send)
3336 {
3337     abi_long ret, len;
3338     struct msghdr msg;
3339     abi_ulong count;
3340     struct iovec *vec;
3341     abi_ulong target_vec;
3342 
3343     if (msgp->msg_name) {
3344         msg.msg_namelen = tswap32(msgp->msg_namelen);
3345         msg.msg_name = alloca(msg.msg_namelen+1);
3346         ret = target_to_host_sockaddr(fd, msg.msg_name,
3347                                       tswapal(msgp->msg_name),
3348                                       msg.msg_namelen);
3349         if (ret == -TARGET_EFAULT) {
3350             /* For connected sockets msg_name and msg_namelen must
3351              * be ignored, so returning EFAULT immediately is wrong.
3352              * Instead, pass a bad msg_name to the host kernel, and
3353              * let it decide whether to return EFAULT or not.
3354              */
3355             msg.msg_name = (void *)-1;
3356         } else if (ret) {
3357             goto out2;
3358         }
3359     } else {
3360         msg.msg_name = NULL;
3361         msg.msg_namelen = 0;
3362     }
3363     msg.msg_controllen = 2 * tswapal(msgp->msg_controllen);
3364     msg.msg_control = alloca(msg.msg_controllen);
3365     memset(msg.msg_control, 0, msg.msg_controllen);
3366 
3367     msg.msg_flags = tswap32(msgp->msg_flags);
3368 
3369     count = tswapal(msgp->msg_iovlen);
3370     target_vec = tswapal(msgp->msg_iov);
3371 
3372     if (count > IOV_MAX) {
3373         /* sendrcvmsg returns a different errno for this condition than
3374          * readv/writev, so we must catch it here before lock_iovec() does.
3375          */
3376         ret = -TARGET_EMSGSIZE;
3377         goto out2;
3378     }
3379 
3380     vec = lock_iovec(send ? VERIFY_READ : VERIFY_WRITE,
3381                      target_vec, count, send);
3382     if (vec == NULL) {
3383         ret = -host_to_target_errno(errno);
3384         goto out2;
3385     }
3386     msg.msg_iovlen = count;
3387     msg.msg_iov = vec;
3388 
3389     if (send) {
3390         if (fd_trans_target_to_host_data(fd)) {
3391             void *host_msg;
3392 
3393             host_msg = g_malloc(msg.msg_iov->iov_len);
3394             memcpy(host_msg, msg.msg_iov->iov_base, msg.msg_iov->iov_len);
3395             ret = fd_trans_target_to_host_data(fd)(host_msg,
3396                                                    msg.msg_iov->iov_len);
3397             if (ret >= 0) {
3398                 msg.msg_iov->iov_base = host_msg;
3399                 ret = get_errno(safe_sendmsg(fd, &msg, flags));
3400             }
3401             g_free(host_msg);
3402         } else {
3403             ret = target_to_host_cmsg(&msg, msgp);
3404             if (ret == 0) {
3405                 ret = get_errno(safe_sendmsg(fd, &msg, flags));
3406             }
3407         }
3408     } else {
3409         ret = get_errno(safe_recvmsg(fd, &msg, flags));
3410         if (!is_error(ret)) {
3411             len = ret;
3412             if (fd_trans_host_to_target_data(fd)) {
3413                 ret = fd_trans_host_to_target_data(fd)(msg.msg_iov->iov_base,
3414                                                MIN(msg.msg_iov->iov_len, len));
3415             } else {
3416                 ret = host_to_target_cmsg(msgp, &msg);
3417             }
3418             if (!is_error(ret)) {
3419                 msgp->msg_namelen = tswap32(msg.msg_namelen);
3420                 msgp->msg_flags = tswap32(msg.msg_flags);
3421                 if (msg.msg_name != NULL && msg.msg_name != (void *)-1) {
3422                     ret = host_to_target_sockaddr(tswapal(msgp->msg_name),
3423                                     msg.msg_name, msg.msg_namelen);
3424                     if (ret) {
3425                         goto out;
3426                     }
3427                 }
3428 
3429                 ret = len;
3430             }
3431         }
3432     }
3433 
3434 out:
3435     unlock_iovec(vec, target_vec, count, !send);
3436 out2:
3437     return ret;
3438 }
3439 
3440 static abi_long do_sendrecvmsg(int fd, abi_ulong target_msg,
3441                                int flags, int send)
3442 {
3443     abi_long ret;
3444     struct target_msghdr *msgp;
3445 
3446     if (!lock_user_struct(send ? VERIFY_READ : VERIFY_WRITE,
3447                           msgp,
3448                           target_msg,
3449                           send ? 1 : 0)) {
3450         return -TARGET_EFAULT;
3451     }
3452     ret = do_sendrecvmsg_locked(fd, msgp, flags, send);
3453     unlock_user_struct(msgp, target_msg, send ? 0 : 1);
3454     return ret;
3455 }
3456 
3457 /* We don't rely on the C library to have sendmmsg/recvmmsg support,
3458  * so it might not have this *mmsg-specific flag either.
3459  */
3460 #ifndef MSG_WAITFORONE
3461 #define MSG_WAITFORONE 0x10000
3462 #endif
3463 
3464 static abi_long do_sendrecvmmsg(int fd, abi_ulong target_msgvec,
3465                                 unsigned int vlen, unsigned int flags,
3466                                 int send)
3467 {
3468     struct target_mmsghdr *mmsgp;
3469     abi_long ret = 0;
3470     int i;
3471 
3472     if (vlen > UIO_MAXIOV) {
3473         vlen = UIO_MAXIOV;
3474     }
3475 
3476     mmsgp = lock_user(VERIFY_WRITE, target_msgvec, sizeof(*mmsgp) * vlen, 1);
3477     if (!mmsgp) {
3478         return -TARGET_EFAULT;
3479     }
3480 
3481     for (i = 0; i < vlen; i++) {
3482         ret = do_sendrecvmsg_locked(fd, &mmsgp[i].msg_hdr, flags, send);
3483         if (is_error(ret)) {
3484             break;
3485         }
3486         mmsgp[i].msg_len = tswap32(ret);
3487         /* MSG_WAITFORONE turns on MSG_DONTWAIT after one packet */
3488         if (flags & MSG_WAITFORONE) {
3489             flags |= MSG_DONTWAIT;
3490         }
3491     }
3492 
3493     unlock_user(mmsgp, target_msgvec, sizeof(*mmsgp) * i);
3494 
3495     /* Return number of datagrams sent if we sent any at all;
3496      * otherwise return the error.
3497      */
3498     if (i) {
3499         return i;
3500     }
3501     return ret;
3502 }
3503 
3504 /* do_accept4() Must return target values and target errnos. */
3505 static abi_long do_accept4(int fd, abi_ulong target_addr,
3506                            abi_ulong target_addrlen_addr, int flags)
3507 {
3508     socklen_t addrlen, ret_addrlen;
3509     void *addr;
3510     abi_long ret;
3511     int host_flags;
3512 
3513     host_flags = target_to_host_bitmask(flags, fcntl_flags_tbl);
3514 
3515     if (target_addr == 0) {
3516         return get_errno(safe_accept4(fd, NULL, NULL, host_flags));
3517     }
3518 
3519     /* linux returns EFAULT if addrlen pointer is invalid */
3520     if (get_user_u32(addrlen, target_addrlen_addr))
3521         return -TARGET_EFAULT;
3522 
3523     if ((int)addrlen < 0) {
3524         return -TARGET_EINVAL;
3525     }
3526 
3527     if (!access_ok(VERIFY_WRITE, target_addr, addrlen))
3528         return -TARGET_EFAULT;
3529 
3530     addr = alloca(addrlen);
3531 
3532     ret_addrlen = addrlen;
3533     ret = get_errno(safe_accept4(fd, addr, &ret_addrlen, host_flags));
3534     if (!is_error(ret)) {
3535         host_to_target_sockaddr(target_addr, addr, MIN(addrlen, ret_addrlen));
3536         if (put_user_u32(ret_addrlen, target_addrlen_addr)) {
3537             ret = -TARGET_EFAULT;
3538         }
3539     }
3540     return ret;
3541 }
3542 
3543 /* do_getpeername() Must return target values and target errnos. */
3544 static abi_long do_getpeername(int fd, abi_ulong target_addr,
3545                                abi_ulong target_addrlen_addr)
3546 {
3547     socklen_t addrlen, ret_addrlen;
3548     void *addr;
3549     abi_long ret;
3550 
3551     if (get_user_u32(addrlen, target_addrlen_addr))
3552         return -TARGET_EFAULT;
3553 
3554     if ((int)addrlen < 0) {
3555         return -TARGET_EINVAL;
3556     }
3557 
3558     if (!access_ok(VERIFY_WRITE, target_addr, addrlen))
3559         return -TARGET_EFAULT;
3560 
3561     addr = alloca(addrlen);
3562 
3563     ret_addrlen = addrlen;
3564     ret = get_errno(getpeername(fd, addr, &ret_addrlen));
3565     if (!is_error(ret)) {
3566         host_to_target_sockaddr(target_addr, addr, MIN(addrlen, ret_addrlen));
3567         if (put_user_u32(ret_addrlen, target_addrlen_addr)) {
3568             ret = -TARGET_EFAULT;
3569         }
3570     }
3571     return ret;
3572 }
3573 
3574 /* do_getsockname() Must return target values and target errnos. */
3575 static abi_long do_getsockname(int fd, abi_ulong target_addr,
3576                                abi_ulong target_addrlen_addr)
3577 {
3578     socklen_t addrlen, ret_addrlen;
3579     void *addr;
3580     abi_long ret;
3581 
3582     if (get_user_u32(addrlen, target_addrlen_addr))
3583         return -TARGET_EFAULT;
3584 
3585     if ((int)addrlen < 0) {
3586         return -TARGET_EINVAL;
3587     }
3588 
3589     if (!access_ok(VERIFY_WRITE, target_addr, addrlen))
3590         return -TARGET_EFAULT;
3591 
3592     addr = alloca(addrlen);
3593 
3594     ret_addrlen = addrlen;
3595     ret = get_errno(getsockname(fd, addr, &ret_addrlen));
3596     if (!is_error(ret)) {
3597         host_to_target_sockaddr(target_addr, addr, MIN(addrlen, ret_addrlen));
3598         if (put_user_u32(ret_addrlen, target_addrlen_addr)) {
3599             ret = -TARGET_EFAULT;
3600         }
3601     }
3602     return ret;
3603 }
3604 
3605 /* do_socketpair() Must return target values and target errnos. */
3606 static abi_long do_socketpair(int domain, int type, int protocol,
3607                               abi_ulong target_tab_addr)
3608 {
3609     int tab[2];
3610     abi_long ret;
3611 
3612     target_to_host_sock_type(&type);
3613 
3614     ret = get_errno(socketpair(domain, type, protocol, tab));
3615     if (!is_error(ret)) {
3616         if (put_user_s32(tab[0], target_tab_addr)
3617             || put_user_s32(tab[1], target_tab_addr + sizeof(tab[0])))
3618             ret = -TARGET_EFAULT;
3619     }
3620     return ret;
3621 }
3622 
3623 /* do_sendto() Must return target values and target errnos. */
3624 static abi_long do_sendto(int fd, abi_ulong msg, size_t len, int flags,
3625                           abi_ulong target_addr, socklen_t addrlen)
3626 {
3627     void *addr;
3628     void *host_msg;
3629     void *copy_msg = NULL;
3630     abi_long ret;
3631 
3632     if ((int)addrlen < 0) {
3633         return -TARGET_EINVAL;
3634     }
3635 
3636     host_msg = lock_user(VERIFY_READ, msg, len, 1);
3637     if (!host_msg)
3638         return -TARGET_EFAULT;
3639     if (fd_trans_target_to_host_data(fd)) {
3640         copy_msg = host_msg;
3641         host_msg = g_malloc(len);
3642         memcpy(host_msg, copy_msg, len);
3643         ret = fd_trans_target_to_host_data(fd)(host_msg, len);
3644         if (ret < 0) {
3645             goto fail;
3646         }
3647     }
3648     if (target_addr) {
3649         addr = alloca(addrlen+1);
3650         ret = target_to_host_sockaddr(fd, addr, target_addr, addrlen);
3651         if (ret) {
3652             goto fail;
3653         }
3654         ret = get_errno(safe_sendto(fd, host_msg, len, flags, addr, addrlen));
3655     } else {
3656         ret = get_errno(safe_sendto(fd, host_msg, len, flags, NULL, 0));
3657     }
3658 fail:
3659     if (copy_msg) {
3660         g_free(host_msg);
3661         host_msg = copy_msg;
3662     }
3663     unlock_user(host_msg, msg, 0);
3664     return ret;
3665 }
3666 
3667 /* do_recvfrom() Must return target values and target errnos. */
3668 static abi_long do_recvfrom(int fd, abi_ulong msg, size_t len, int flags,
3669                             abi_ulong target_addr,
3670                             abi_ulong target_addrlen)
3671 {
3672     socklen_t addrlen, ret_addrlen;
3673     void *addr;
3674     void *host_msg;
3675     abi_long ret;
3676 
3677     host_msg = lock_user(VERIFY_WRITE, msg, len, 0);
3678     if (!host_msg)
3679         return -TARGET_EFAULT;
3680     if (target_addr) {
3681         if (get_user_u32(addrlen, target_addrlen)) {
3682             ret = -TARGET_EFAULT;
3683             goto fail;
3684         }
3685         if ((int)addrlen < 0) {
3686             ret = -TARGET_EINVAL;
3687             goto fail;
3688         }
3689         addr = alloca(addrlen);
3690         ret_addrlen = addrlen;
3691         ret = get_errno(safe_recvfrom(fd, host_msg, len, flags,
3692                                       addr, &ret_addrlen));
3693     } else {
3694         addr = NULL; /* To keep compiler quiet.  */
3695         addrlen = 0; /* To keep compiler quiet.  */
3696         ret = get_errno(safe_recvfrom(fd, host_msg, len, flags, NULL, 0));
3697     }
3698     if (!is_error(ret)) {
3699         if (fd_trans_host_to_target_data(fd)) {
3700             abi_long trans;
3701             trans = fd_trans_host_to_target_data(fd)(host_msg, MIN(ret, len));
3702             if (is_error(trans)) {
3703                 ret = trans;
3704                 goto fail;
3705             }
3706         }
3707         if (target_addr) {
3708             host_to_target_sockaddr(target_addr, addr,
3709                                     MIN(addrlen, ret_addrlen));
3710             if (put_user_u32(ret_addrlen, target_addrlen)) {
3711                 ret = -TARGET_EFAULT;
3712                 goto fail;
3713             }
3714         }
3715         unlock_user(host_msg, msg, len);
3716     } else {
3717 fail:
3718         unlock_user(host_msg, msg, 0);
3719     }
3720     return ret;
3721 }
3722 
3723 #ifdef TARGET_NR_socketcall
3724 /* do_socketcall() must return target values and target errnos. */
3725 static abi_long do_socketcall(int num, abi_ulong vptr)
3726 {
3727     static const unsigned nargs[] = { /* number of arguments per operation */
3728         [TARGET_SYS_SOCKET] = 3,      /* domain, type, protocol */
3729         [TARGET_SYS_BIND] = 3,        /* fd, addr, addrlen */
3730         [TARGET_SYS_CONNECT] = 3,     /* fd, addr, addrlen */
3731         [TARGET_SYS_LISTEN] = 2,      /* fd, backlog */
3732         [TARGET_SYS_ACCEPT] = 3,      /* fd, addr, addrlen */
3733         [TARGET_SYS_GETSOCKNAME] = 3, /* fd, addr, addrlen */
3734         [TARGET_SYS_GETPEERNAME] = 3, /* fd, addr, addrlen */
3735         [TARGET_SYS_SOCKETPAIR] = 4,  /* domain, type, protocol, tab */
3736         [TARGET_SYS_SEND] = 4,        /* fd, msg, len, flags */
3737         [TARGET_SYS_RECV] = 4,        /* fd, msg, len, flags */
3738         [TARGET_SYS_SENDTO] = 6,      /* fd, msg, len, flags, addr, addrlen */
3739         [TARGET_SYS_RECVFROM] = 6,    /* fd, msg, len, flags, addr, addrlen */
3740         [TARGET_SYS_SHUTDOWN] = 2,    /* fd, how */
3741         [TARGET_SYS_SETSOCKOPT] = 5,  /* fd, level, optname, optval, optlen */
3742         [TARGET_SYS_GETSOCKOPT] = 5,  /* fd, level, optname, optval, optlen */
3743         [TARGET_SYS_SENDMSG] = 3,     /* fd, msg, flags */
3744         [TARGET_SYS_RECVMSG] = 3,     /* fd, msg, flags */
3745         [TARGET_SYS_ACCEPT4] = 4,     /* fd, addr, addrlen, flags */
3746         [TARGET_SYS_RECVMMSG] = 4,    /* fd, msgvec, vlen, flags */
3747         [TARGET_SYS_SENDMMSG] = 4,    /* fd, msgvec, vlen, flags */
3748     };
3749     abi_long a[6]; /* max 6 args */
3750     unsigned i;
3751 
3752     /* check the range of the first argument num */
3753     /* (TARGET_SYS_SENDMMSG is the highest among TARGET_SYS_xxx) */
3754     if (num < 1 || num > TARGET_SYS_SENDMMSG) {
3755         return -TARGET_EINVAL;
3756     }
3757     /* ensure we have space for args */
3758     if (nargs[num] > ARRAY_SIZE(a)) {
3759         return -TARGET_EINVAL;
3760     }
3761     /* collect the arguments in a[] according to nargs[] */
3762     for (i = 0; i < nargs[num]; ++i) {
3763         if (get_user_ual(a[i], vptr + i * sizeof(abi_long)) != 0) {
3764             return -TARGET_EFAULT;
3765         }
3766     }
3767     /* now when we have the args, invoke the appropriate underlying function */
3768     switch (num) {
3769     case TARGET_SYS_SOCKET: /* domain, type, protocol */
3770         return do_socket(a[0], a[1], a[2]);
3771     case TARGET_SYS_BIND: /* sockfd, addr, addrlen */
3772         return do_bind(a[0], a[1], a[2]);
3773     case TARGET_SYS_CONNECT: /* sockfd, addr, addrlen */
3774         return do_connect(a[0], a[1], a[2]);
3775     case TARGET_SYS_LISTEN: /* sockfd, backlog */
3776         return get_errno(listen(a[0], a[1]));
3777     case TARGET_SYS_ACCEPT: /* sockfd, addr, addrlen */
3778         return do_accept4(a[0], a[1], a[2], 0);
3779     case TARGET_SYS_GETSOCKNAME: /* sockfd, addr, addrlen */
3780         return do_getsockname(a[0], a[1], a[2]);
3781     case TARGET_SYS_GETPEERNAME: /* sockfd, addr, addrlen */
3782         return do_getpeername(a[0], a[1], a[2]);
3783     case TARGET_SYS_SOCKETPAIR: /* domain, type, protocol, tab */
3784         return do_socketpair(a[0], a[1], a[2], a[3]);
3785     case TARGET_SYS_SEND: /* sockfd, msg, len, flags */
3786         return do_sendto(a[0], a[1], a[2], a[3], 0, 0);
3787     case TARGET_SYS_RECV: /* sockfd, msg, len, flags */
3788         return do_recvfrom(a[0], a[1], a[2], a[3], 0, 0);
3789     case TARGET_SYS_SENDTO: /* sockfd, msg, len, flags, addr, addrlen */
3790         return do_sendto(a[0], a[1], a[2], a[3], a[4], a[5]);
3791     case TARGET_SYS_RECVFROM: /* sockfd, msg, len, flags, addr, addrlen */
3792         return do_recvfrom(a[0], a[1], a[2], a[3], a[4], a[5]);
3793     case TARGET_SYS_SHUTDOWN: /* sockfd, how */
3794         return get_errno(shutdown(a[0], a[1]));
3795     case TARGET_SYS_SETSOCKOPT: /* sockfd, level, optname, optval, optlen */
3796         return do_setsockopt(a[0], a[1], a[2], a[3], a[4]);
3797     case TARGET_SYS_GETSOCKOPT: /* sockfd, level, optname, optval, optlen */
3798         return do_getsockopt(a[0], a[1], a[2], a[3], a[4]);
3799     case TARGET_SYS_SENDMSG: /* sockfd, msg, flags */
3800         return do_sendrecvmsg(a[0], a[1], a[2], 1);
3801     case TARGET_SYS_RECVMSG: /* sockfd, msg, flags */
3802         return do_sendrecvmsg(a[0], a[1], a[2], 0);
3803     case TARGET_SYS_ACCEPT4: /* sockfd, addr, addrlen, flags */
3804         return do_accept4(a[0], a[1], a[2], a[3]);
3805     case TARGET_SYS_RECVMMSG: /* sockfd, msgvec, vlen, flags */
3806         return do_sendrecvmmsg(a[0], a[1], a[2], a[3], 0);
3807     case TARGET_SYS_SENDMMSG: /* sockfd, msgvec, vlen, flags */
3808         return do_sendrecvmmsg(a[0], a[1], a[2], a[3], 1);
3809     default:
3810         qemu_log_mask(LOG_UNIMP, "Unsupported socketcall: %d\n", num);
3811         return -TARGET_EINVAL;
3812     }
3813 }
3814 #endif
3815 
3816 #define N_SHM_REGIONS	32
3817 
3818 static struct shm_region {
3819     abi_ulong start;
3820     abi_ulong size;
3821     bool in_use;
3822 } shm_regions[N_SHM_REGIONS];
3823 
3824 #ifndef TARGET_SEMID64_DS
3825 /* asm-generic version of this struct */
3826 struct target_semid64_ds
3827 {
3828   struct target_ipc_perm sem_perm;
3829   abi_ulong sem_otime;
3830 #if TARGET_ABI_BITS == 32
3831   abi_ulong __unused1;
3832 #endif
3833   abi_ulong sem_ctime;
3834 #if TARGET_ABI_BITS == 32
3835   abi_ulong __unused2;
3836 #endif
3837   abi_ulong sem_nsems;
3838   abi_ulong __unused3;
3839   abi_ulong __unused4;
3840 };
3841 #endif
3842 
3843 static inline abi_long target_to_host_ipc_perm(struct ipc_perm *host_ip,
3844                                                abi_ulong target_addr)
3845 {
3846     struct target_ipc_perm *target_ip;
3847     struct target_semid64_ds *target_sd;
3848 
3849     if (!lock_user_struct(VERIFY_READ, target_sd, target_addr, 1))
3850         return -TARGET_EFAULT;
3851     target_ip = &(target_sd->sem_perm);
3852     host_ip->__key = tswap32(target_ip->__key);
3853     host_ip->uid = tswap32(target_ip->uid);
3854     host_ip->gid = tswap32(target_ip->gid);
3855     host_ip->cuid = tswap32(target_ip->cuid);
3856     host_ip->cgid = tswap32(target_ip->cgid);
3857 #if defined(TARGET_ALPHA) || defined(TARGET_MIPS) || defined(TARGET_PPC)
3858     host_ip->mode = tswap32(target_ip->mode);
3859 #else
3860     host_ip->mode = tswap16(target_ip->mode);
3861 #endif
3862 #if defined(TARGET_PPC)
3863     host_ip->__seq = tswap32(target_ip->__seq);
3864 #else
3865     host_ip->__seq = tswap16(target_ip->__seq);
3866 #endif
3867     unlock_user_struct(target_sd, target_addr, 0);
3868     return 0;
3869 }
3870 
3871 static inline abi_long host_to_target_ipc_perm(abi_ulong target_addr,
3872                                                struct ipc_perm *host_ip)
3873 {
3874     struct target_ipc_perm *target_ip;
3875     struct target_semid64_ds *target_sd;
3876 
3877     if (!lock_user_struct(VERIFY_WRITE, target_sd, target_addr, 0))
3878         return -TARGET_EFAULT;
3879     target_ip = &(target_sd->sem_perm);
3880     target_ip->__key = tswap32(host_ip->__key);
3881     target_ip->uid = tswap32(host_ip->uid);
3882     target_ip->gid = tswap32(host_ip->gid);
3883     target_ip->cuid = tswap32(host_ip->cuid);
3884     target_ip->cgid = tswap32(host_ip->cgid);
3885 #if defined(TARGET_ALPHA) || defined(TARGET_MIPS) || defined(TARGET_PPC)
3886     target_ip->mode = tswap32(host_ip->mode);
3887 #else
3888     target_ip->mode = tswap16(host_ip->mode);
3889 #endif
3890 #if defined(TARGET_PPC)
3891     target_ip->__seq = tswap32(host_ip->__seq);
3892 #else
3893     target_ip->__seq = tswap16(host_ip->__seq);
3894 #endif
3895     unlock_user_struct(target_sd, target_addr, 1);
3896     return 0;
3897 }
3898 
3899 static inline abi_long target_to_host_semid_ds(struct semid_ds *host_sd,
3900                                                abi_ulong target_addr)
3901 {
3902     struct target_semid64_ds *target_sd;
3903 
3904     if (!lock_user_struct(VERIFY_READ, target_sd, target_addr, 1))
3905         return -TARGET_EFAULT;
3906     if (target_to_host_ipc_perm(&(host_sd->sem_perm),target_addr))
3907         return -TARGET_EFAULT;
3908     host_sd->sem_nsems = tswapal(target_sd->sem_nsems);
3909     host_sd->sem_otime = tswapal(target_sd->sem_otime);
3910     host_sd->sem_ctime = tswapal(target_sd->sem_ctime);
3911     unlock_user_struct(target_sd, target_addr, 0);
3912     return 0;
3913 }
3914 
3915 static inline abi_long host_to_target_semid_ds(abi_ulong target_addr,
3916                                                struct semid_ds *host_sd)
3917 {
3918     struct target_semid64_ds *target_sd;
3919 
3920     if (!lock_user_struct(VERIFY_WRITE, target_sd, target_addr, 0))
3921         return -TARGET_EFAULT;
3922     if (host_to_target_ipc_perm(target_addr,&(host_sd->sem_perm)))
3923         return -TARGET_EFAULT;
3924     target_sd->sem_nsems = tswapal(host_sd->sem_nsems);
3925     target_sd->sem_otime = tswapal(host_sd->sem_otime);
3926     target_sd->sem_ctime = tswapal(host_sd->sem_ctime);
3927     unlock_user_struct(target_sd, target_addr, 1);
3928     return 0;
3929 }
3930 
3931 struct target_seminfo {
3932     int semmap;
3933     int semmni;
3934     int semmns;
3935     int semmnu;
3936     int semmsl;
3937     int semopm;
3938     int semume;
3939     int semusz;
3940     int semvmx;
3941     int semaem;
3942 };
3943 
3944 static inline abi_long host_to_target_seminfo(abi_ulong target_addr,
3945                                               struct seminfo *host_seminfo)
3946 {
3947     struct target_seminfo *target_seminfo;
3948     if (!lock_user_struct(VERIFY_WRITE, target_seminfo, target_addr, 0))
3949         return -TARGET_EFAULT;
3950     __put_user(host_seminfo->semmap, &target_seminfo->semmap);
3951     __put_user(host_seminfo->semmni, &target_seminfo->semmni);
3952     __put_user(host_seminfo->semmns, &target_seminfo->semmns);
3953     __put_user(host_seminfo->semmnu, &target_seminfo->semmnu);
3954     __put_user(host_seminfo->semmsl, &target_seminfo->semmsl);
3955     __put_user(host_seminfo->semopm, &target_seminfo->semopm);
3956     __put_user(host_seminfo->semume, &target_seminfo->semume);
3957     __put_user(host_seminfo->semusz, &target_seminfo->semusz);
3958     __put_user(host_seminfo->semvmx, &target_seminfo->semvmx);
3959     __put_user(host_seminfo->semaem, &target_seminfo->semaem);
3960     unlock_user_struct(target_seminfo, target_addr, 1);
3961     return 0;
3962 }
3963 
3964 union semun {
3965 	int val;
3966 	struct semid_ds *buf;
3967 	unsigned short *array;
3968 	struct seminfo *__buf;
3969 };
3970 
3971 union target_semun {
3972 	int val;
3973 	abi_ulong buf;
3974 	abi_ulong array;
3975 	abi_ulong __buf;
3976 };
3977 
3978 static inline abi_long target_to_host_semarray(int semid, unsigned short **host_array,
3979                                                abi_ulong target_addr)
3980 {
3981     int nsems;
3982     unsigned short *array;
3983     union semun semun;
3984     struct semid_ds semid_ds;
3985     int i, ret;
3986 
3987     semun.buf = &semid_ds;
3988 
3989     ret = semctl(semid, 0, IPC_STAT, semun);
3990     if (ret == -1)
3991         return get_errno(ret);
3992 
3993     nsems = semid_ds.sem_nsems;
3994 
3995     *host_array = g_try_new(unsigned short, nsems);
3996     if (!*host_array) {
3997         return -TARGET_ENOMEM;
3998     }
3999     array = lock_user(VERIFY_READ, target_addr,
4000                       nsems*sizeof(unsigned short), 1);
4001     if (!array) {
4002         g_free(*host_array);
4003         return -TARGET_EFAULT;
4004     }
4005 
4006     for(i=0; i<nsems; i++) {
4007         __get_user((*host_array)[i], &array[i]);
4008     }
4009     unlock_user(array, target_addr, 0);
4010 
4011     return 0;
4012 }
4013 
4014 static inline abi_long host_to_target_semarray(int semid, abi_ulong target_addr,
4015                                                unsigned short **host_array)
4016 {
4017     int nsems;
4018     unsigned short *array;
4019     union semun semun;
4020     struct semid_ds semid_ds;
4021     int i, ret;
4022 
4023     semun.buf = &semid_ds;
4024 
4025     ret = semctl(semid, 0, IPC_STAT, semun);
4026     if (ret == -1)
4027         return get_errno(ret);
4028 
4029     nsems = semid_ds.sem_nsems;
4030 
4031     array = lock_user(VERIFY_WRITE, target_addr,
4032                       nsems*sizeof(unsigned short), 0);
4033     if (!array)
4034         return -TARGET_EFAULT;
4035 
4036     for(i=0; i<nsems; i++) {
4037         __put_user((*host_array)[i], &array[i]);
4038     }
4039     g_free(*host_array);
4040     unlock_user(array, target_addr, 1);
4041 
4042     return 0;
4043 }
4044 
4045 static inline abi_long do_semctl(int semid, int semnum, int cmd,
4046                                  abi_ulong target_arg)
4047 {
4048     union target_semun target_su = { .buf = target_arg };
4049     union semun arg;
4050     struct semid_ds dsarg;
4051     unsigned short *array = NULL;
4052     struct seminfo seminfo;
4053     abi_long ret = -TARGET_EINVAL;
4054     abi_long err;
4055     cmd &= 0xff;
4056 
4057     switch( cmd ) {
4058 	case GETVAL:
4059 	case SETVAL:
4060             /* In 64 bit cross-endian situations, we will erroneously pick up
4061              * the wrong half of the union for the "val" element.  To rectify
4062              * this, the entire 8-byte structure is byteswapped, followed by
4063 	     * a swap of the 4 byte val field. In other cases, the data is
4064 	     * already in proper host byte order. */
4065 	    if (sizeof(target_su.val) != (sizeof(target_su.buf))) {
4066 		target_su.buf = tswapal(target_su.buf);
4067 		arg.val = tswap32(target_su.val);
4068 	    } else {
4069 		arg.val = target_su.val;
4070 	    }
4071             ret = get_errno(semctl(semid, semnum, cmd, arg));
4072             break;
4073 	case GETALL:
4074 	case SETALL:
4075             err = target_to_host_semarray(semid, &array, target_su.array);
4076             if (err)
4077                 return err;
4078             arg.array = array;
4079             ret = get_errno(semctl(semid, semnum, cmd, arg));
4080             err = host_to_target_semarray(semid, target_su.array, &array);
4081             if (err)
4082                 return err;
4083             break;
4084 	case IPC_STAT:
4085 	case IPC_SET:
4086 	case SEM_STAT:
4087             err = target_to_host_semid_ds(&dsarg, target_su.buf);
4088             if (err)
4089                 return err;
4090             arg.buf = &dsarg;
4091             ret = get_errno(semctl(semid, semnum, cmd, arg));
4092             err = host_to_target_semid_ds(target_su.buf, &dsarg);
4093             if (err)
4094                 return err;
4095             break;
4096 	case IPC_INFO:
4097 	case SEM_INFO:
4098             arg.__buf = &seminfo;
4099             ret = get_errno(semctl(semid, semnum, cmd, arg));
4100             err = host_to_target_seminfo(target_su.__buf, &seminfo);
4101             if (err)
4102                 return err;
4103             break;
4104 	case IPC_RMID:
4105 	case GETPID:
4106 	case GETNCNT:
4107 	case GETZCNT:
4108             ret = get_errno(semctl(semid, semnum, cmd, NULL));
4109             break;
4110     }
4111 
4112     return ret;
4113 }
4114 
4115 struct target_sembuf {
4116     unsigned short sem_num;
4117     short sem_op;
4118     short sem_flg;
4119 };
4120 
4121 static inline abi_long target_to_host_sembuf(struct sembuf *host_sembuf,
4122                                              abi_ulong target_addr,
4123                                              unsigned nsops)
4124 {
4125     struct target_sembuf *target_sembuf;
4126     int i;
4127 
4128     target_sembuf = lock_user(VERIFY_READ, target_addr,
4129                               nsops*sizeof(struct target_sembuf), 1);
4130     if (!target_sembuf)
4131         return -TARGET_EFAULT;
4132 
4133     for(i=0; i<nsops; i++) {
4134         __get_user(host_sembuf[i].sem_num, &target_sembuf[i].sem_num);
4135         __get_user(host_sembuf[i].sem_op, &target_sembuf[i].sem_op);
4136         __get_user(host_sembuf[i].sem_flg, &target_sembuf[i].sem_flg);
4137     }
4138 
4139     unlock_user(target_sembuf, target_addr, 0);
4140 
4141     return 0;
4142 }
4143 
4144 #if defined(TARGET_NR_ipc) || defined(TARGET_NR_semop) || \
4145     defined(TARGET_NR_semtimedop) || defined(TARGET_NR_semtimedop_time64)
4146 
4147 /*
4148  * This macro is required to handle the s390 variants, which passes the
4149  * arguments in a different order than default.
4150  */
4151 #ifdef __s390x__
4152 #define SEMTIMEDOP_IPC_ARGS(__nsops, __sops, __timeout) \
4153   (__nsops), (__timeout), (__sops)
4154 #else
4155 #define SEMTIMEDOP_IPC_ARGS(__nsops, __sops, __timeout) \
4156   (__nsops), 0, (__sops), (__timeout)
4157 #endif
4158 
4159 static inline abi_long do_semtimedop(int semid,
4160                                      abi_long ptr,
4161                                      unsigned nsops,
4162                                      abi_long timeout, bool time64)
4163 {
4164     struct sembuf *sops;
4165     struct timespec ts, *pts = NULL;
4166     abi_long ret;
4167 
4168     if (timeout) {
4169         pts = &ts;
4170         if (time64) {
4171             if (target_to_host_timespec64(pts, timeout)) {
4172                 return -TARGET_EFAULT;
4173             }
4174         } else {
4175             if (target_to_host_timespec(pts, timeout)) {
4176                 return -TARGET_EFAULT;
4177             }
4178         }
4179     }
4180 
4181     if (nsops > TARGET_SEMOPM) {
4182         return -TARGET_E2BIG;
4183     }
4184 
4185     sops = g_new(struct sembuf, nsops);
4186 
4187     if (target_to_host_sembuf(sops, ptr, nsops)) {
4188         g_free(sops);
4189         return -TARGET_EFAULT;
4190     }
4191 
4192     ret = -TARGET_ENOSYS;
4193 #ifdef __NR_semtimedop
4194     ret = get_errno(safe_semtimedop(semid, sops, nsops, pts));
4195 #endif
4196 #ifdef __NR_ipc
4197     if (ret == -TARGET_ENOSYS) {
4198         ret = get_errno(safe_ipc(IPCOP_semtimedop, semid,
4199                                  SEMTIMEDOP_IPC_ARGS(nsops, sops, (long)pts)));
4200     }
4201 #endif
4202     g_free(sops);
4203     return ret;
4204 }
4205 #endif
4206 
4207 struct target_msqid_ds
4208 {
4209     struct target_ipc_perm msg_perm;
4210     abi_ulong msg_stime;
4211 #if TARGET_ABI_BITS == 32
4212     abi_ulong __unused1;
4213 #endif
4214     abi_ulong msg_rtime;
4215 #if TARGET_ABI_BITS == 32
4216     abi_ulong __unused2;
4217 #endif
4218     abi_ulong msg_ctime;
4219 #if TARGET_ABI_BITS == 32
4220     abi_ulong __unused3;
4221 #endif
4222     abi_ulong __msg_cbytes;
4223     abi_ulong msg_qnum;
4224     abi_ulong msg_qbytes;
4225     abi_ulong msg_lspid;
4226     abi_ulong msg_lrpid;
4227     abi_ulong __unused4;
4228     abi_ulong __unused5;
4229 };
4230 
4231 static inline abi_long target_to_host_msqid_ds(struct msqid_ds *host_md,
4232                                                abi_ulong target_addr)
4233 {
4234     struct target_msqid_ds *target_md;
4235 
4236     if (!lock_user_struct(VERIFY_READ, target_md, target_addr, 1))
4237         return -TARGET_EFAULT;
4238     if (target_to_host_ipc_perm(&(host_md->msg_perm),target_addr))
4239         return -TARGET_EFAULT;
4240     host_md->msg_stime = tswapal(target_md->msg_stime);
4241     host_md->msg_rtime = tswapal(target_md->msg_rtime);
4242     host_md->msg_ctime = tswapal(target_md->msg_ctime);
4243     host_md->__msg_cbytes = tswapal(target_md->__msg_cbytes);
4244     host_md->msg_qnum = tswapal(target_md->msg_qnum);
4245     host_md->msg_qbytes = tswapal(target_md->msg_qbytes);
4246     host_md->msg_lspid = tswapal(target_md->msg_lspid);
4247     host_md->msg_lrpid = tswapal(target_md->msg_lrpid);
4248     unlock_user_struct(target_md, target_addr, 0);
4249     return 0;
4250 }
4251 
4252 static inline abi_long host_to_target_msqid_ds(abi_ulong target_addr,
4253                                                struct msqid_ds *host_md)
4254 {
4255     struct target_msqid_ds *target_md;
4256 
4257     if (!lock_user_struct(VERIFY_WRITE, target_md, target_addr, 0))
4258         return -TARGET_EFAULT;
4259     if (host_to_target_ipc_perm(target_addr,&(host_md->msg_perm)))
4260         return -TARGET_EFAULT;
4261     target_md->msg_stime = tswapal(host_md->msg_stime);
4262     target_md->msg_rtime = tswapal(host_md->msg_rtime);
4263     target_md->msg_ctime = tswapal(host_md->msg_ctime);
4264     target_md->__msg_cbytes = tswapal(host_md->__msg_cbytes);
4265     target_md->msg_qnum = tswapal(host_md->msg_qnum);
4266     target_md->msg_qbytes = tswapal(host_md->msg_qbytes);
4267     target_md->msg_lspid = tswapal(host_md->msg_lspid);
4268     target_md->msg_lrpid = tswapal(host_md->msg_lrpid);
4269     unlock_user_struct(target_md, target_addr, 1);
4270     return 0;
4271 }
4272 
4273 struct target_msginfo {
4274     int msgpool;
4275     int msgmap;
4276     int msgmax;
4277     int msgmnb;
4278     int msgmni;
4279     int msgssz;
4280     int msgtql;
4281     unsigned short int msgseg;
4282 };
4283 
4284 static inline abi_long host_to_target_msginfo(abi_ulong target_addr,
4285                                               struct msginfo *host_msginfo)
4286 {
4287     struct target_msginfo *target_msginfo;
4288     if (!lock_user_struct(VERIFY_WRITE, target_msginfo, target_addr, 0))
4289         return -TARGET_EFAULT;
4290     __put_user(host_msginfo->msgpool, &target_msginfo->msgpool);
4291     __put_user(host_msginfo->msgmap, &target_msginfo->msgmap);
4292     __put_user(host_msginfo->msgmax, &target_msginfo->msgmax);
4293     __put_user(host_msginfo->msgmnb, &target_msginfo->msgmnb);
4294     __put_user(host_msginfo->msgmni, &target_msginfo->msgmni);
4295     __put_user(host_msginfo->msgssz, &target_msginfo->msgssz);
4296     __put_user(host_msginfo->msgtql, &target_msginfo->msgtql);
4297     __put_user(host_msginfo->msgseg, &target_msginfo->msgseg);
4298     unlock_user_struct(target_msginfo, target_addr, 1);
4299     return 0;
4300 }
4301 
4302 static inline abi_long do_msgctl(int msgid, int cmd, abi_long ptr)
4303 {
4304     struct msqid_ds dsarg;
4305     struct msginfo msginfo;
4306     abi_long ret = -TARGET_EINVAL;
4307 
4308     cmd &= 0xff;
4309 
4310     switch (cmd) {
4311     case IPC_STAT:
4312     case IPC_SET:
4313     case MSG_STAT:
4314         if (target_to_host_msqid_ds(&dsarg,ptr))
4315             return -TARGET_EFAULT;
4316         ret = get_errno(msgctl(msgid, cmd, &dsarg));
4317         if (host_to_target_msqid_ds(ptr,&dsarg))
4318             return -TARGET_EFAULT;
4319         break;
4320     case IPC_RMID:
4321         ret = get_errno(msgctl(msgid, cmd, NULL));
4322         break;
4323     case IPC_INFO:
4324     case MSG_INFO:
4325         ret = get_errno(msgctl(msgid, cmd, (struct msqid_ds *)&msginfo));
4326         if (host_to_target_msginfo(ptr, &msginfo))
4327             return -TARGET_EFAULT;
4328         break;
4329     }
4330 
4331     return ret;
4332 }
4333 
4334 struct target_msgbuf {
4335     abi_long mtype;
4336     char	mtext[1];
4337 };
4338 
4339 static inline abi_long do_msgsnd(int msqid, abi_long msgp,
4340                                  ssize_t msgsz, int msgflg)
4341 {
4342     struct target_msgbuf *target_mb;
4343     struct msgbuf *host_mb;
4344     abi_long ret = 0;
4345 
4346     if (msgsz < 0) {
4347         return -TARGET_EINVAL;
4348     }
4349 
4350     if (!lock_user_struct(VERIFY_READ, target_mb, msgp, 0))
4351         return -TARGET_EFAULT;
4352     host_mb = g_try_malloc(msgsz + sizeof(long));
4353     if (!host_mb) {
4354         unlock_user_struct(target_mb, msgp, 0);
4355         return -TARGET_ENOMEM;
4356     }
4357     host_mb->mtype = (abi_long) tswapal(target_mb->mtype);
4358     memcpy(host_mb->mtext, target_mb->mtext, msgsz);
4359     ret = -TARGET_ENOSYS;
4360 #ifdef __NR_msgsnd
4361     ret = get_errno(safe_msgsnd(msqid, host_mb, msgsz, msgflg));
4362 #endif
4363 #ifdef __NR_ipc
4364     if (ret == -TARGET_ENOSYS) {
4365 #ifdef __s390x__
4366         ret = get_errno(safe_ipc(IPCOP_msgsnd, msqid, msgsz, msgflg,
4367                                  host_mb));
4368 #else
4369         ret = get_errno(safe_ipc(IPCOP_msgsnd, msqid, msgsz, msgflg,
4370                                  host_mb, 0));
4371 #endif
4372     }
4373 #endif
4374     g_free(host_mb);
4375     unlock_user_struct(target_mb, msgp, 0);
4376 
4377     return ret;
4378 }
4379 
4380 #ifdef __NR_ipc
4381 #if defined(__sparc__)
4382 /* SPARC for msgrcv it does not use the kludge on final 2 arguments.  */
4383 #define MSGRCV_ARGS(__msgp, __msgtyp) __msgp, __msgtyp
4384 #elif defined(__s390x__)
4385 /* The s390 sys_ipc variant has only five parameters.  */
4386 #define MSGRCV_ARGS(__msgp, __msgtyp) \
4387     ((long int[]){(long int)__msgp, __msgtyp})
4388 #else
4389 #define MSGRCV_ARGS(__msgp, __msgtyp) \
4390     ((long int[]){(long int)__msgp, __msgtyp}), 0
4391 #endif
4392 #endif
4393 
4394 static inline abi_long do_msgrcv(int msqid, abi_long msgp,
4395                                  ssize_t msgsz, abi_long msgtyp,
4396                                  int msgflg)
4397 {
4398     struct target_msgbuf *target_mb;
4399     char *target_mtext;
4400     struct msgbuf *host_mb;
4401     abi_long ret = 0;
4402 
4403     if (msgsz < 0) {
4404         return -TARGET_EINVAL;
4405     }
4406 
4407     if (!lock_user_struct(VERIFY_WRITE, target_mb, msgp, 0))
4408         return -TARGET_EFAULT;
4409 
4410     host_mb = g_try_malloc(msgsz + sizeof(long));
4411     if (!host_mb) {
4412         ret = -TARGET_ENOMEM;
4413         goto end;
4414     }
4415     ret = -TARGET_ENOSYS;
4416 #ifdef __NR_msgrcv
4417     ret = get_errno(safe_msgrcv(msqid, host_mb, msgsz, msgtyp, msgflg));
4418 #endif
4419 #ifdef __NR_ipc
4420     if (ret == -TARGET_ENOSYS) {
4421         ret = get_errno(safe_ipc(IPCOP_CALL(1, IPCOP_msgrcv), msqid, msgsz,
4422                         msgflg, MSGRCV_ARGS(host_mb, msgtyp)));
4423     }
4424 #endif
4425 
4426     if (ret > 0) {
4427         abi_ulong target_mtext_addr = msgp + sizeof(abi_ulong);
4428         target_mtext = lock_user(VERIFY_WRITE, target_mtext_addr, ret, 0);
4429         if (!target_mtext) {
4430             ret = -TARGET_EFAULT;
4431             goto end;
4432         }
4433         memcpy(target_mb->mtext, host_mb->mtext, ret);
4434         unlock_user(target_mtext, target_mtext_addr, ret);
4435     }
4436 
4437     target_mb->mtype = tswapal(host_mb->mtype);
4438 
4439 end:
4440     if (target_mb)
4441         unlock_user_struct(target_mb, msgp, 1);
4442     g_free(host_mb);
4443     return ret;
4444 }
4445 
4446 static inline abi_long target_to_host_shmid_ds(struct shmid_ds *host_sd,
4447                                                abi_ulong target_addr)
4448 {
4449     struct target_shmid_ds *target_sd;
4450 
4451     if (!lock_user_struct(VERIFY_READ, target_sd, target_addr, 1))
4452         return -TARGET_EFAULT;
4453     if (target_to_host_ipc_perm(&(host_sd->shm_perm), target_addr))
4454         return -TARGET_EFAULT;
4455     __get_user(host_sd->shm_segsz, &target_sd->shm_segsz);
4456     __get_user(host_sd->shm_atime, &target_sd->shm_atime);
4457     __get_user(host_sd->shm_dtime, &target_sd->shm_dtime);
4458     __get_user(host_sd->shm_ctime, &target_sd->shm_ctime);
4459     __get_user(host_sd->shm_cpid, &target_sd->shm_cpid);
4460     __get_user(host_sd->shm_lpid, &target_sd->shm_lpid);
4461     __get_user(host_sd->shm_nattch, &target_sd->shm_nattch);
4462     unlock_user_struct(target_sd, target_addr, 0);
4463     return 0;
4464 }
4465 
4466 static inline abi_long host_to_target_shmid_ds(abi_ulong target_addr,
4467                                                struct shmid_ds *host_sd)
4468 {
4469     struct target_shmid_ds *target_sd;
4470 
4471     if (!lock_user_struct(VERIFY_WRITE, target_sd, target_addr, 0))
4472         return -TARGET_EFAULT;
4473     if (host_to_target_ipc_perm(target_addr, &(host_sd->shm_perm)))
4474         return -TARGET_EFAULT;
4475     __put_user(host_sd->shm_segsz, &target_sd->shm_segsz);
4476     __put_user(host_sd->shm_atime, &target_sd->shm_atime);
4477     __put_user(host_sd->shm_dtime, &target_sd->shm_dtime);
4478     __put_user(host_sd->shm_ctime, &target_sd->shm_ctime);
4479     __put_user(host_sd->shm_cpid, &target_sd->shm_cpid);
4480     __put_user(host_sd->shm_lpid, &target_sd->shm_lpid);
4481     __put_user(host_sd->shm_nattch, &target_sd->shm_nattch);
4482     unlock_user_struct(target_sd, target_addr, 1);
4483     return 0;
4484 }
4485 
4486 struct  target_shminfo {
4487     abi_ulong shmmax;
4488     abi_ulong shmmin;
4489     abi_ulong shmmni;
4490     abi_ulong shmseg;
4491     abi_ulong shmall;
4492 };
4493 
4494 static inline abi_long host_to_target_shminfo(abi_ulong target_addr,
4495                                               struct shminfo *host_shminfo)
4496 {
4497     struct target_shminfo *target_shminfo;
4498     if (!lock_user_struct(VERIFY_WRITE, target_shminfo, target_addr, 0))
4499         return -TARGET_EFAULT;
4500     __put_user(host_shminfo->shmmax, &target_shminfo->shmmax);
4501     __put_user(host_shminfo->shmmin, &target_shminfo->shmmin);
4502     __put_user(host_shminfo->shmmni, &target_shminfo->shmmni);
4503     __put_user(host_shminfo->shmseg, &target_shminfo->shmseg);
4504     __put_user(host_shminfo->shmall, &target_shminfo->shmall);
4505     unlock_user_struct(target_shminfo, target_addr, 1);
4506     return 0;
4507 }
4508 
4509 struct target_shm_info {
4510     int used_ids;
4511     abi_ulong shm_tot;
4512     abi_ulong shm_rss;
4513     abi_ulong shm_swp;
4514     abi_ulong swap_attempts;
4515     abi_ulong swap_successes;
4516 };
4517 
4518 static inline abi_long host_to_target_shm_info(abi_ulong target_addr,
4519                                                struct shm_info *host_shm_info)
4520 {
4521     struct target_shm_info *target_shm_info;
4522     if (!lock_user_struct(VERIFY_WRITE, target_shm_info, target_addr, 0))
4523         return -TARGET_EFAULT;
4524     __put_user(host_shm_info->used_ids, &target_shm_info->used_ids);
4525     __put_user(host_shm_info->shm_tot, &target_shm_info->shm_tot);
4526     __put_user(host_shm_info->shm_rss, &target_shm_info->shm_rss);
4527     __put_user(host_shm_info->shm_swp, &target_shm_info->shm_swp);
4528     __put_user(host_shm_info->swap_attempts, &target_shm_info->swap_attempts);
4529     __put_user(host_shm_info->swap_successes, &target_shm_info->swap_successes);
4530     unlock_user_struct(target_shm_info, target_addr, 1);
4531     return 0;
4532 }
4533 
4534 static inline abi_long do_shmctl(int shmid, int cmd, abi_long buf)
4535 {
4536     struct shmid_ds dsarg;
4537     struct shminfo shminfo;
4538     struct shm_info shm_info;
4539     abi_long ret = -TARGET_EINVAL;
4540 
4541     cmd &= 0xff;
4542 
4543     switch(cmd) {
4544     case IPC_STAT:
4545     case IPC_SET:
4546     case SHM_STAT:
4547         if (target_to_host_shmid_ds(&dsarg, buf))
4548             return -TARGET_EFAULT;
4549         ret = get_errno(shmctl(shmid, cmd, &dsarg));
4550         if (host_to_target_shmid_ds(buf, &dsarg))
4551             return -TARGET_EFAULT;
4552         break;
4553     case IPC_INFO:
4554         ret = get_errno(shmctl(shmid, cmd, (struct shmid_ds *)&shminfo));
4555         if (host_to_target_shminfo(buf, &shminfo))
4556             return -TARGET_EFAULT;
4557         break;
4558     case SHM_INFO:
4559         ret = get_errno(shmctl(shmid, cmd, (struct shmid_ds *)&shm_info));
4560         if (host_to_target_shm_info(buf, &shm_info))
4561             return -TARGET_EFAULT;
4562         break;
4563     case IPC_RMID:
4564     case SHM_LOCK:
4565     case SHM_UNLOCK:
4566         ret = get_errno(shmctl(shmid, cmd, NULL));
4567         break;
4568     }
4569 
4570     return ret;
4571 }
4572 
4573 #ifndef TARGET_FORCE_SHMLBA
4574 /* For most architectures, SHMLBA is the same as the page size;
4575  * some architectures have larger values, in which case they should
4576  * define TARGET_FORCE_SHMLBA and provide a target_shmlba() function.
4577  * This corresponds to the kernel arch code defining __ARCH_FORCE_SHMLBA
4578  * and defining its own value for SHMLBA.
4579  *
4580  * The kernel also permits SHMLBA to be set by the architecture to a
4581  * value larger than the page size without setting __ARCH_FORCE_SHMLBA;
4582  * this means that addresses are rounded to the large size if
4583  * SHM_RND is set but addresses not aligned to that size are not rejected
4584  * as long as they are at least page-aligned. Since the only architecture
4585  * which uses this is ia64 this code doesn't provide for that oddity.
4586  */
4587 static inline abi_ulong target_shmlba(CPUArchState *cpu_env)
4588 {
4589     return TARGET_PAGE_SIZE;
4590 }
4591 #endif
4592 
4593 static inline abi_ulong do_shmat(CPUArchState *cpu_env,
4594                                  int shmid, abi_ulong shmaddr, int shmflg)
4595 {
4596     abi_long raddr;
4597     void *host_raddr;
4598     struct shmid_ds shm_info;
4599     int i,ret;
4600     abi_ulong shmlba;
4601 
4602     /* find out the length of the shared memory segment */
4603     ret = get_errno(shmctl(shmid, IPC_STAT, &shm_info));
4604     if (is_error(ret)) {
4605         /* can't get length, bail out */
4606         return ret;
4607     }
4608 
4609     shmlba = target_shmlba(cpu_env);
4610 
4611     if (shmaddr & (shmlba - 1)) {
4612         if (shmflg & SHM_RND) {
4613             shmaddr &= ~(shmlba - 1);
4614         } else {
4615             return -TARGET_EINVAL;
4616         }
4617     }
4618     if (!guest_range_valid(shmaddr, shm_info.shm_segsz)) {
4619         return -TARGET_EINVAL;
4620     }
4621 
4622     mmap_lock();
4623 
4624     if (shmaddr)
4625         host_raddr = shmat(shmid, (void *)g2h_untagged(shmaddr), shmflg);
4626     else {
4627         abi_ulong mmap_start;
4628 
4629         /* In order to use the host shmat, we need to honor host SHMLBA.  */
4630         mmap_start = mmap_find_vma(0, shm_info.shm_segsz, MAX(SHMLBA, shmlba));
4631 
4632         if (mmap_start == -1) {
4633             errno = ENOMEM;
4634             host_raddr = (void *)-1;
4635         } else
4636             host_raddr = shmat(shmid, g2h_untagged(mmap_start),
4637                                shmflg | SHM_REMAP);
4638     }
4639 
4640     if (host_raddr == (void *)-1) {
4641         mmap_unlock();
4642         return get_errno((long)host_raddr);
4643     }
4644     raddr=h2g((unsigned long)host_raddr);
4645 
4646     page_set_flags(raddr, raddr + shm_info.shm_segsz,
4647                    PAGE_VALID | PAGE_RESET | PAGE_READ |
4648                    (shmflg & SHM_RDONLY ? 0 : PAGE_WRITE));
4649 
4650     for (i = 0; i < N_SHM_REGIONS; i++) {
4651         if (!shm_regions[i].in_use) {
4652             shm_regions[i].in_use = true;
4653             shm_regions[i].start = raddr;
4654             shm_regions[i].size = shm_info.shm_segsz;
4655             break;
4656         }
4657     }
4658 
4659     mmap_unlock();
4660     return raddr;
4661 
4662 }
4663 
4664 static inline abi_long do_shmdt(abi_ulong shmaddr)
4665 {
4666     int i;
4667     abi_long rv;
4668 
4669     mmap_lock();
4670 
4671     for (i = 0; i < N_SHM_REGIONS; ++i) {
4672         if (shm_regions[i].in_use && shm_regions[i].start == shmaddr) {
4673             shm_regions[i].in_use = false;
4674             page_set_flags(shmaddr, shmaddr + shm_regions[i].size, 0);
4675             break;
4676         }
4677     }
4678     rv = get_errno(shmdt(g2h_untagged(shmaddr)));
4679 
4680     mmap_unlock();
4681 
4682     return rv;
4683 }
4684 
4685 #ifdef TARGET_NR_ipc
4686 /* ??? This only works with linear mappings.  */
4687 /* do_ipc() must return target values and target errnos. */
4688 static abi_long do_ipc(CPUArchState *cpu_env,
4689                        unsigned int call, abi_long first,
4690                        abi_long second, abi_long third,
4691                        abi_long ptr, abi_long fifth)
4692 {
4693     int version;
4694     abi_long ret = 0;
4695 
4696     version = call >> 16;
4697     call &= 0xffff;
4698 
4699     switch (call) {
4700     case IPCOP_semop:
4701         ret = do_semtimedop(first, ptr, second, 0, false);
4702         break;
4703     case IPCOP_semtimedop:
4704     /*
4705      * The s390 sys_ipc variant has only five parameters instead of six
4706      * (as for default variant) and the only difference is the handling of
4707      * SEMTIMEDOP where on s390 the third parameter is used as a pointer
4708      * to a struct timespec where the generic variant uses fifth parameter.
4709      */
4710 #if defined(TARGET_S390X)
4711         ret = do_semtimedop(first, ptr, second, third, TARGET_ABI_BITS == 64);
4712 #else
4713         ret = do_semtimedop(first, ptr, second, fifth, TARGET_ABI_BITS == 64);
4714 #endif
4715         break;
4716 
4717     case IPCOP_semget:
4718         ret = get_errno(semget(first, second, third));
4719         break;
4720 
4721     case IPCOP_semctl: {
4722         /* The semun argument to semctl is passed by value, so dereference the
4723          * ptr argument. */
4724         abi_ulong atptr;
4725         get_user_ual(atptr, ptr);
4726         ret = do_semctl(first, second, third, atptr);
4727         break;
4728     }
4729 
4730     case IPCOP_msgget:
4731         ret = get_errno(msgget(first, second));
4732         break;
4733 
4734     case IPCOP_msgsnd:
4735         ret = do_msgsnd(first, ptr, second, third);
4736         break;
4737 
4738     case IPCOP_msgctl:
4739         ret = do_msgctl(first, second, ptr);
4740         break;
4741 
4742     case IPCOP_msgrcv:
4743         switch (version) {
4744         case 0:
4745             {
4746                 struct target_ipc_kludge {
4747                     abi_long msgp;
4748                     abi_long msgtyp;
4749                 } *tmp;
4750 
4751                 if (!lock_user_struct(VERIFY_READ, tmp, ptr, 1)) {
4752                     ret = -TARGET_EFAULT;
4753                     break;
4754                 }
4755 
4756                 ret = do_msgrcv(first, tswapal(tmp->msgp), second, tswapal(tmp->msgtyp), third);
4757 
4758                 unlock_user_struct(tmp, ptr, 0);
4759                 break;
4760             }
4761         default:
4762             ret = do_msgrcv(first, ptr, second, fifth, third);
4763         }
4764         break;
4765 
4766     case IPCOP_shmat:
4767         switch (version) {
4768         default:
4769         {
4770             abi_ulong raddr;
4771             raddr = do_shmat(cpu_env, first, ptr, second);
4772             if (is_error(raddr))
4773                 return get_errno(raddr);
4774             if (put_user_ual(raddr, third))
4775                 return -TARGET_EFAULT;
4776             break;
4777         }
4778         case 1:
4779             ret = -TARGET_EINVAL;
4780             break;
4781         }
4782 	break;
4783     case IPCOP_shmdt:
4784         ret = do_shmdt(ptr);
4785 	break;
4786 
4787     case IPCOP_shmget:
4788 	/* IPC_* flag values are the same on all linux platforms */
4789 	ret = get_errno(shmget(first, second, third));
4790 	break;
4791 
4792 	/* IPC_* and SHM_* command values are the same on all linux platforms */
4793     case IPCOP_shmctl:
4794         ret = do_shmctl(first, second, ptr);
4795         break;
4796     default:
4797         qemu_log_mask(LOG_UNIMP, "Unsupported ipc call: %d (version %d)\n",
4798                       call, version);
4799 	ret = -TARGET_ENOSYS;
4800 	break;
4801     }
4802     return ret;
4803 }
4804 #endif
4805 
4806 /* kernel structure types definitions */
4807 
4808 #define STRUCT(name, ...) STRUCT_ ## name,
4809 #define STRUCT_SPECIAL(name) STRUCT_ ## name,
4810 enum {
4811 #include "syscall_types.h"
4812 STRUCT_MAX
4813 };
4814 #undef STRUCT
4815 #undef STRUCT_SPECIAL
4816 
4817 #define STRUCT(name, ...) static const argtype struct_ ## name ## _def[] = {  __VA_ARGS__, TYPE_NULL };
4818 #define STRUCT_SPECIAL(name)
4819 #include "syscall_types.h"
4820 #undef STRUCT
4821 #undef STRUCT_SPECIAL
4822 
4823 #define MAX_STRUCT_SIZE 4096
4824 
4825 #ifdef CONFIG_FIEMAP
4826 /* So fiemap access checks don't overflow on 32 bit systems.
4827  * This is very slightly smaller than the limit imposed by
4828  * the underlying kernel.
4829  */
4830 #define FIEMAP_MAX_EXTENTS ((UINT_MAX - sizeof(struct fiemap))  \
4831                             / sizeof(struct fiemap_extent))
4832 
4833 static abi_long do_ioctl_fs_ioc_fiemap(const IOCTLEntry *ie, uint8_t *buf_temp,
4834                                        int fd, int cmd, abi_long arg)
4835 {
4836     /* The parameter for this ioctl is a struct fiemap followed
4837      * by an array of struct fiemap_extent whose size is set
4838      * in fiemap->fm_extent_count. The array is filled in by the
4839      * ioctl.
4840      */
4841     int target_size_in, target_size_out;
4842     struct fiemap *fm;
4843     const argtype *arg_type = ie->arg_type;
4844     const argtype extent_arg_type[] = { MK_STRUCT(STRUCT_fiemap_extent) };
4845     void *argptr, *p;
4846     abi_long ret;
4847     int i, extent_size = thunk_type_size(extent_arg_type, 0);
4848     uint32_t outbufsz;
4849     int free_fm = 0;
4850 
4851     assert(arg_type[0] == TYPE_PTR);
4852     assert(ie->access == IOC_RW);
4853     arg_type++;
4854     target_size_in = thunk_type_size(arg_type, 0);
4855     argptr = lock_user(VERIFY_READ, arg, target_size_in, 1);
4856     if (!argptr) {
4857         return -TARGET_EFAULT;
4858     }
4859     thunk_convert(buf_temp, argptr, arg_type, THUNK_HOST);
4860     unlock_user(argptr, arg, 0);
4861     fm = (struct fiemap *)buf_temp;
4862     if (fm->fm_extent_count > FIEMAP_MAX_EXTENTS) {
4863         return -TARGET_EINVAL;
4864     }
4865 
4866     outbufsz = sizeof (*fm) +
4867         (sizeof(struct fiemap_extent) * fm->fm_extent_count);
4868 
4869     if (outbufsz > MAX_STRUCT_SIZE) {
4870         /* We can't fit all the extents into the fixed size buffer.
4871          * Allocate one that is large enough and use it instead.
4872          */
4873         fm = g_try_malloc(outbufsz);
4874         if (!fm) {
4875             return -TARGET_ENOMEM;
4876         }
4877         memcpy(fm, buf_temp, sizeof(struct fiemap));
4878         free_fm = 1;
4879     }
4880     ret = get_errno(safe_ioctl(fd, ie->host_cmd, fm));
4881     if (!is_error(ret)) {
4882         target_size_out = target_size_in;
4883         /* An extent_count of 0 means we were only counting the extents
4884          * so there are no structs to copy
4885          */
4886         if (fm->fm_extent_count != 0) {
4887             target_size_out += fm->fm_mapped_extents * extent_size;
4888         }
4889         argptr = lock_user(VERIFY_WRITE, arg, target_size_out, 0);
4890         if (!argptr) {
4891             ret = -TARGET_EFAULT;
4892         } else {
4893             /* Convert the struct fiemap */
4894             thunk_convert(argptr, fm, arg_type, THUNK_TARGET);
4895             if (fm->fm_extent_count != 0) {
4896                 p = argptr + target_size_in;
4897                 /* ...and then all the struct fiemap_extents */
4898                 for (i = 0; i < fm->fm_mapped_extents; i++) {
4899                     thunk_convert(p, &fm->fm_extents[i], extent_arg_type,
4900                                   THUNK_TARGET);
4901                     p += extent_size;
4902                 }
4903             }
4904             unlock_user(argptr, arg, target_size_out);
4905         }
4906     }
4907     if (free_fm) {
4908         g_free(fm);
4909     }
4910     return ret;
4911 }
4912 #endif
4913 
4914 static abi_long do_ioctl_ifconf(const IOCTLEntry *ie, uint8_t *buf_temp,
4915                                 int fd, int cmd, abi_long arg)
4916 {
4917     const argtype *arg_type = ie->arg_type;
4918     int target_size;
4919     void *argptr;
4920     int ret;
4921     struct ifconf *host_ifconf;
4922     uint32_t outbufsz;
4923     const argtype ifreq_arg_type[] = { MK_STRUCT(STRUCT_sockaddr_ifreq) };
4924     const argtype ifreq_max_type[] = { MK_STRUCT(STRUCT_ifmap_ifreq) };
4925     int target_ifreq_size;
4926     int nb_ifreq;
4927     int free_buf = 0;
4928     int i;
4929     int target_ifc_len;
4930     abi_long target_ifc_buf;
4931     int host_ifc_len;
4932     char *host_ifc_buf;
4933 
4934     assert(arg_type[0] == TYPE_PTR);
4935     assert(ie->access == IOC_RW);
4936 
4937     arg_type++;
4938     target_size = thunk_type_size(arg_type, 0);
4939 
4940     argptr = lock_user(VERIFY_READ, arg, target_size, 1);
4941     if (!argptr)
4942         return -TARGET_EFAULT;
4943     thunk_convert(buf_temp, argptr, arg_type, THUNK_HOST);
4944     unlock_user(argptr, arg, 0);
4945 
4946     host_ifconf = (struct ifconf *)(unsigned long)buf_temp;
4947     target_ifc_buf = (abi_long)(unsigned long)host_ifconf->ifc_buf;
4948     target_ifreq_size = thunk_type_size(ifreq_max_type, 0);
4949 
4950     if (target_ifc_buf != 0) {
4951         target_ifc_len = host_ifconf->ifc_len;
4952         nb_ifreq = target_ifc_len / target_ifreq_size;
4953         host_ifc_len = nb_ifreq * sizeof(struct ifreq);
4954 
4955         outbufsz = sizeof(*host_ifconf) + host_ifc_len;
4956         if (outbufsz > MAX_STRUCT_SIZE) {
4957             /*
4958              * We can't fit all the extents into the fixed size buffer.
4959              * Allocate one that is large enough and use it instead.
4960              */
4961             host_ifconf = malloc(outbufsz);
4962             if (!host_ifconf) {
4963                 return -TARGET_ENOMEM;
4964             }
4965             memcpy(host_ifconf, buf_temp, sizeof(*host_ifconf));
4966             free_buf = 1;
4967         }
4968         host_ifc_buf = (char *)host_ifconf + sizeof(*host_ifconf);
4969 
4970         host_ifconf->ifc_len = host_ifc_len;
4971     } else {
4972       host_ifc_buf = NULL;
4973     }
4974     host_ifconf->ifc_buf = host_ifc_buf;
4975 
4976     ret = get_errno(safe_ioctl(fd, ie->host_cmd, host_ifconf));
4977     if (!is_error(ret)) {
4978 	/* convert host ifc_len to target ifc_len */
4979 
4980         nb_ifreq = host_ifconf->ifc_len / sizeof(struct ifreq);
4981         target_ifc_len = nb_ifreq * target_ifreq_size;
4982         host_ifconf->ifc_len = target_ifc_len;
4983 
4984 	/* restore target ifc_buf */
4985 
4986         host_ifconf->ifc_buf = (char *)(unsigned long)target_ifc_buf;
4987 
4988 	/* copy struct ifconf to target user */
4989 
4990         argptr = lock_user(VERIFY_WRITE, arg, target_size, 0);
4991         if (!argptr)
4992             return -TARGET_EFAULT;
4993         thunk_convert(argptr, host_ifconf, arg_type, THUNK_TARGET);
4994         unlock_user(argptr, arg, target_size);
4995 
4996         if (target_ifc_buf != 0) {
4997             /* copy ifreq[] to target user */
4998             argptr = lock_user(VERIFY_WRITE, target_ifc_buf, target_ifc_len, 0);
4999             for (i = 0; i < nb_ifreq ; i++) {
5000                 thunk_convert(argptr + i * target_ifreq_size,
5001                               host_ifc_buf + i * sizeof(struct ifreq),
5002                               ifreq_arg_type, THUNK_TARGET);
5003             }
5004             unlock_user(argptr, target_ifc_buf, target_ifc_len);
5005         }
5006     }
5007 
5008     if (free_buf) {
5009         free(host_ifconf);
5010     }
5011 
5012     return ret;
5013 }
5014 
5015 #if defined(CONFIG_USBFS)
5016 #if HOST_LONG_BITS > 64
5017 #error USBDEVFS thunks do not support >64 bit hosts yet.
5018 #endif
5019 struct live_urb {
5020     uint64_t target_urb_adr;
5021     uint64_t target_buf_adr;
5022     char *target_buf_ptr;
5023     struct usbdevfs_urb host_urb;
5024 };
5025 
5026 static GHashTable *usbdevfs_urb_hashtable(void)
5027 {
5028     static GHashTable *urb_hashtable;
5029 
5030     if (!urb_hashtable) {
5031         urb_hashtable = g_hash_table_new(g_int64_hash, g_int64_equal);
5032     }
5033     return urb_hashtable;
5034 }
5035 
5036 static void urb_hashtable_insert(struct live_urb *urb)
5037 {
5038     GHashTable *urb_hashtable = usbdevfs_urb_hashtable();
5039     g_hash_table_insert(urb_hashtable, urb, urb);
5040 }
5041 
5042 static struct live_urb *urb_hashtable_lookup(uint64_t target_urb_adr)
5043 {
5044     GHashTable *urb_hashtable = usbdevfs_urb_hashtable();
5045     return g_hash_table_lookup(urb_hashtable, &target_urb_adr);
5046 }
5047 
5048 static void urb_hashtable_remove(struct live_urb *urb)
5049 {
5050     GHashTable *urb_hashtable = usbdevfs_urb_hashtable();
5051     g_hash_table_remove(urb_hashtable, urb);
5052 }
5053 
5054 static abi_long
5055 do_ioctl_usbdevfs_reapurb(const IOCTLEntry *ie, uint8_t *buf_temp,
5056                           int fd, int cmd, abi_long arg)
5057 {
5058     const argtype usbfsurb_arg_type[] = { MK_STRUCT(STRUCT_usbdevfs_urb) };
5059     const argtype ptrvoid_arg_type[] = { TYPE_PTRVOID, 0, 0 };
5060     struct live_urb *lurb;
5061     void *argptr;
5062     uint64_t hurb;
5063     int target_size;
5064     uintptr_t target_urb_adr;
5065     abi_long ret;
5066 
5067     target_size = thunk_type_size(usbfsurb_arg_type, THUNK_TARGET);
5068 
5069     memset(buf_temp, 0, sizeof(uint64_t));
5070     ret = get_errno(safe_ioctl(fd, ie->host_cmd, buf_temp));
5071     if (is_error(ret)) {
5072         return ret;
5073     }
5074 
5075     memcpy(&hurb, buf_temp, sizeof(uint64_t));
5076     lurb = (void *)((uintptr_t)hurb - offsetof(struct live_urb, host_urb));
5077     if (!lurb->target_urb_adr) {
5078         return -TARGET_EFAULT;
5079     }
5080     urb_hashtable_remove(lurb);
5081     unlock_user(lurb->target_buf_ptr, lurb->target_buf_adr,
5082         lurb->host_urb.buffer_length);
5083     lurb->target_buf_ptr = NULL;
5084 
5085     /* restore the guest buffer pointer */
5086     lurb->host_urb.buffer = (void *)(uintptr_t)lurb->target_buf_adr;
5087 
5088     /* update the guest urb struct */
5089     argptr = lock_user(VERIFY_WRITE, lurb->target_urb_adr, target_size, 0);
5090     if (!argptr) {
5091         g_free(lurb);
5092         return -TARGET_EFAULT;
5093     }
5094     thunk_convert(argptr, &lurb->host_urb, usbfsurb_arg_type, THUNK_TARGET);
5095     unlock_user(argptr, lurb->target_urb_adr, target_size);
5096 
5097     target_size = thunk_type_size(ptrvoid_arg_type, THUNK_TARGET);
5098     /* write back the urb handle */
5099     argptr = lock_user(VERIFY_WRITE, arg, target_size, 0);
5100     if (!argptr) {
5101         g_free(lurb);
5102         return -TARGET_EFAULT;
5103     }
5104 
5105     /* GHashTable uses 64-bit keys but thunk_convert expects uintptr_t */
5106     target_urb_adr = lurb->target_urb_adr;
5107     thunk_convert(argptr, &target_urb_adr, ptrvoid_arg_type, THUNK_TARGET);
5108     unlock_user(argptr, arg, target_size);
5109 
5110     g_free(lurb);
5111     return ret;
5112 }
5113 
5114 static abi_long
5115 do_ioctl_usbdevfs_discardurb(const IOCTLEntry *ie,
5116                              uint8_t *buf_temp __attribute__((unused)),
5117                              int fd, int cmd, abi_long arg)
5118 {
5119     struct live_urb *lurb;
5120 
5121     /* map target address back to host URB with metadata. */
5122     lurb = urb_hashtable_lookup(arg);
5123     if (!lurb) {
5124         return -TARGET_EFAULT;
5125     }
5126     return get_errno(safe_ioctl(fd, ie->host_cmd, &lurb->host_urb));
5127 }
5128 
5129 static abi_long
5130 do_ioctl_usbdevfs_submiturb(const IOCTLEntry *ie, uint8_t *buf_temp,
5131                             int fd, int cmd, abi_long arg)
5132 {
5133     const argtype *arg_type = ie->arg_type;
5134     int target_size;
5135     abi_long ret;
5136     void *argptr;
5137     int rw_dir;
5138     struct live_urb *lurb;
5139 
5140     /*
5141      * each submitted URB needs to map to a unique ID for the
5142      * kernel, and that unique ID needs to be a pointer to
5143      * host memory.  hence, we need to malloc for each URB.
5144      * isochronous transfers have a variable length struct.
5145      */
5146     arg_type++;
5147     target_size = thunk_type_size(arg_type, THUNK_TARGET);
5148 
5149     /* construct host copy of urb and metadata */
5150     lurb = g_try_malloc0(sizeof(struct live_urb));
5151     if (!lurb) {
5152         return -TARGET_ENOMEM;
5153     }
5154 
5155     argptr = lock_user(VERIFY_READ, arg, target_size, 1);
5156     if (!argptr) {
5157         g_free(lurb);
5158         return -TARGET_EFAULT;
5159     }
5160     thunk_convert(&lurb->host_urb, argptr, arg_type, THUNK_HOST);
5161     unlock_user(argptr, arg, 0);
5162 
5163     lurb->target_urb_adr = arg;
5164     lurb->target_buf_adr = (uintptr_t)lurb->host_urb.buffer;
5165 
5166     /* buffer space used depends on endpoint type so lock the entire buffer */
5167     /* control type urbs should check the buffer contents for true direction */
5168     rw_dir = lurb->host_urb.endpoint & USB_DIR_IN ? VERIFY_WRITE : VERIFY_READ;
5169     lurb->target_buf_ptr = lock_user(rw_dir, lurb->target_buf_adr,
5170         lurb->host_urb.buffer_length, 1);
5171     if (lurb->target_buf_ptr == NULL) {
5172         g_free(lurb);
5173         return -TARGET_EFAULT;
5174     }
5175 
5176     /* update buffer pointer in host copy */
5177     lurb->host_urb.buffer = lurb->target_buf_ptr;
5178 
5179     ret = get_errno(safe_ioctl(fd, ie->host_cmd, &lurb->host_urb));
5180     if (is_error(ret)) {
5181         unlock_user(lurb->target_buf_ptr, lurb->target_buf_adr, 0);
5182         g_free(lurb);
5183     } else {
5184         urb_hashtable_insert(lurb);
5185     }
5186 
5187     return ret;
5188 }
5189 #endif /* CONFIG_USBFS */
5190 
5191 static abi_long do_ioctl_dm(const IOCTLEntry *ie, uint8_t *buf_temp, int fd,
5192                             int cmd, abi_long arg)
5193 {
5194     void *argptr;
5195     struct dm_ioctl *host_dm;
5196     abi_long guest_data;
5197     uint32_t guest_data_size;
5198     int target_size;
5199     const argtype *arg_type = ie->arg_type;
5200     abi_long ret;
5201     void *big_buf = NULL;
5202     char *host_data;
5203 
5204     arg_type++;
5205     target_size = thunk_type_size(arg_type, 0);
5206     argptr = lock_user(VERIFY_READ, arg, target_size, 1);
5207     if (!argptr) {
5208         ret = -TARGET_EFAULT;
5209         goto out;
5210     }
5211     thunk_convert(buf_temp, argptr, arg_type, THUNK_HOST);
5212     unlock_user(argptr, arg, 0);
5213 
5214     /* buf_temp is too small, so fetch things into a bigger buffer */
5215     big_buf = g_malloc0(((struct dm_ioctl*)buf_temp)->data_size * 2);
5216     memcpy(big_buf, buf_temp, target_size);
5217     buf_temp = big_buf;
5218     host_dm = big_buf;
5219 
5220     guest_data = arg + host_dm->data_start;
5221     if ((guest_data - arg) < 0) {
5222         ret = -TARGET_EINVAL;
5223         goto out;
5224     }
5225     guest_data_size = host_dm->data_size - host_dm->data_start;
5226     host_data = (char*)host_dm + host_dm->data_start;
5227 
5228     argptr = lock_user(VERIFY_READ, guest_data, guest_data_size, 1);
5229     if (!argptr) {
5230         ret = -TARGET_EFAULT;
5231         goto out;
5232     }
5233 
5234     switch (ie->host_cmd) {
5235     case DM_REMOVE_ALL:
5236     case DM_LIST_DEVICES:
5237     case DM_DEV_CREATE:
5238     case DM_DEV_REMOVE:
5239     case DM_DEV_SUSPEND:
5240     case DM_DEV_STATUS:
5241     case DM_DEV_WAIT:
5242     case DM_TABLE_STATUS:
5243     case DM_TABLE_CLEAR:
5244     case DM_TABLE_DEPS:
5245     case DM_LIST_VERSIONS:
5246         /* no input data */
5247         break;
5248     case DM_DEV_RENAME:
5249     case DM_DEV_SET_GEOMETRY:
5250         /* data contains only strings */
5251         memcpy(host_data, argptr, guest_data_size);
5252         break;
5253     case DM_TARGET_MSG:
5254         memcpy(host_data, argptr, guest_data_size);
5255         *(uint64_t*)host_data = tswap64(*(uint64_t*)argptr);
5256         break;
5257     case DM_TABLE_LOAD:
5258     {
5259         void *gspec = argptr;
5260         void *cur_data = host_data;
5261         const argtype arg_type[] = { MK_STRUCT(STRUCT_dm_target_spec) };
5262         int spec_size = thunk_type_size(arg_type, 0);
5263         int i;
5264 
5265         for (i = 0; i < host_dm->target_count; i++) {
5266             struct dm_target_spec *spec = cur_data;
5267             uint32_t next;
5268             int slen;
5269 
5270             thunk_convert(spec, gspec, arg_type, THUNK_HOST);
5271             slen = strlen((char*)gspec + spec_size) + 1;
5272             next = spec->next;
5273             spec->next = sizeof(*spec) + slen;
5274             strcpy((char*)&spec[1], gspec + spec_size);
5275             gspec += next;
5276             cur_data += spec->next;
5277         }
5278         break;
5279     }
5280     default:
5281         ret = -TARGET_EINVAL;
5282         unlock_user(argptr, guest_data, 0);
5283         goto out;
5284     }
5285     unlock_user(argptr, guest_data, 0);
5286 
5287     ret = get_errno(safe_ioctl(fd, ie->host_cmd, buf_temp));
5288     if (!is_error(ret)) {
5289         guest_data = arg + host_dm->data_start;
5290         guest_data_size = host_dm->data_size - host_dm->data_start;
5291         argptr = lock_user(VERIFY_WRITE, guest_data, guest_data_size, 0);
5292         switch (ie->host_cmd) {
5293         case DM_REMOVE_ALL:
5294         case DM_DEV_CREATE:
5295         case DM_DEV_REMOVE:
5296         case DM_DEV_RENAME:
5297         case DM_DEV_SUSPEND:
5298         case DM_DEV_STATUS:
5299         case DM_TABLE_LOAD:
5300         case DM_TABLE_CLEAR:
5301         case DM_TARGET_MSG:
5302         case DM_DEV_SET_GEOMETRY:
5303             /* no return data */
5304             break;
5305         case DM_LIST_DEVICES:
5306         {
5307             struct dm_name_list *nl = (void*)host_dm + host_dm->data_start;
5308             uint32_t remaining_data = guest_data_size;
5309             void *cur_data = argptr;
5310             const argtype arg_type[] = { MK_STRUCT(STRUCT_dm_name_list) };
5311             int nl_size = 12; /* can't use thunk_size due to alignment */
5312 
5313             while (1) {
5314                 uint32_t next = nl->next;
5315                 if (next) {
5316                     nl->next = nl_size + (strlen(nl->name) + 1);
5317                 }
5318                 if (remaining_data < nl->next) {
5319                     host_dm->flags |= DM_BUFFER_FULL_FLAG;
5320                     break;
5321                 }
5322                 thunk_convert(cur_data, nl, arg_type, THUNK_TARGET);
5323                 strcpy(cur_data + nl_size, nl->name);
5324                 cur_data += nl->next;
5325                 remaining_data -= nl->next;
5326                 if (!next) {
5327                     break;
5328                 }
5329                 nl = (void*)nl + next;
5330             }
5331             break;
5332         }
5333         case DM_DEV_WAIT:
5334         case DM_TABLE_STATUS:
5335         {
5336             struct dm_target_spec *spec = (void*)host_dm + host_dm->data_start;
5337             void *cur_data = argptr;
5338             const argtype arg_type[] = { MK_STRUCT(STRUCT_dm_target_spec) };
5339             int spec_size = thunk_type_size(arg_type, 0);
5340             int i;
5341 
5342             for (i = 0; i < host_dm->target_count; i++) {
5343                 uint32_t next = spec->next;
5344                 int slen = strlen((char*)&spec[1]) + 1;
5345                 spec->next = (cur_data - argptr) + spec_size + slen;
5346                 if (guest_data_size < spec->next) {
5347                     host_dm->flags |= DM_BUFFER_FULL_FLAG;
5348                     break;
5349                 }
5350                 thunk_convert(cur_data, spec, arg_type, THUNK_TARGET);
5351                 strcpy(cur_data + spec_size, (char*)&spec[1]);
5352                 cur_data = argptr + spec->next;
5353                 spec = (void*)host_dm + host_dm->data_start + next;
5354             }
5355             break;
5356         }
5357         case DM_TABLE_DEPS:
5358         {
5359             void *hdata = (void*)host_dm + host_dm->data_start;
5360             int count = *(uint32_t*)hdata;
5361             uint64_t *hdev = hdata + 8;
5362             uint64_t *gdev = argptr + 8;
5363             int i;
5364 
5365             *(uint32_t*)argptr = tswap32(count);
5366             for (i = 0; i < count; i++) {
5367                 *gdev = tswap64(*hdev);
5368                 gdev++;
5369                 hdev++;
5370             }
5371             break;
5372         }
5373         case DM_LIST_VERSIONS:
5374         {
5375             struct dm_target_versions *vers = (void*)host_dm + host_dm->data_start;
5376             uint32_t remaining_data = guest_data_size;
5377             void *cur_data = argptr;
5378             const argtype arg_type[] = { MK_STRUCT(STRUCT_dm_target_versions) };
5379             int vers_size = thunk_type_size(arg_type, 0);
5380 
5381             while (1) {
5382                 uint32_t next = vers->next;
5383                 if (next) {
5384                     vers->next = vers_size + (strlen(vers->name) + 1);
5385                 }
5386                 if (remaining_data < vers->next) {
5387                     host_dm->flags |= DM_BUFFER_FULL_FLAG;
5388                     break;
5389                 }
5390                 thunk_convert(cur_data, vers, arg_type, THUNK_TARGET);
5391                 strcpy(cur_data + vers_size, vers->name);
5392                 cur_data += vers->next;
5393                 remaining_data -= vers->next;
5394                 if (!next) {
5395                     break;
5396                 }
5397                 vers = (void*)vers + next;
5398             }
5399             break;
5400         }
5401         default:
5402             unlock_user(argptr, guest_data, 0);
5403             ret = -TARGET_EINVAL;
5404             goto out;
5405         }
5406         unlock_user(argptr, guest_data, guest_data_size);
5407 
5408         argptr = lock_user(VERIFY_WRITE, arg, target_size, 0);
5409         if (!argptr) {
5410             ret = -TARGET_EFAULT;
5411             goto out;
5412         }
5413         thunk_convert(argptr, buf_temp, arg_type, THUNK_TARGET);
5414         unlock_user(argptr, arg, target_size);
5415     }
5416 out:
5417     g_free(big_buf);
5418     return ret;
5419 }
5420 
5421 static abi_long do_ioctl_blkpg(const IOCTLEntry *ie, uint8_t *buf_temp, int fd,
5422                                int cmd, abi_long arg)
5423 {
5424     void *argptr;
5425     int target_size;
5426     const argtype *arg_type = ie->arg_type;
5427     const argtype part_arg_type[] = { MK_STRUCT(STRUCT_blkpg_partition) };
5428     abi_long ret;
5429 
5430     struct blkpg_ioctl_arg *host_blkpg = (void*)buf_temp;
5431     struct blkpg_partition host_part;
5432 
5433     /* Read and convert blkpg */
5434     arg_type++;
5435     target_size = thunk_type_size(arg_type, 0);
5436     argptr = lock_user(VERIFY_READ, arg, target_size, 1);
5437     if (!argptr) {
5438         ret = -TARGET_EFAULT;
5439         goto out;
5440     }
5441     thunk_convert(buf_temp, argptr, arg_type, THUNK_HOST);
5442     unlock_user(argptr, arg, 0);
5443 
5444     switch (host_blkpg->op) {
5445     case BLKPG_ADD_PARTITION:
5446     case BLKPG_DEL_PARTITION:
5447         /* payload is struct blkpg_partition */
5448         break;
5449     default:
5450         /* Unknown opcode */
5451         ret = -TARGET_EINVAL;
5452         goto out;
5453     }
5454 
5455     /* Read and convert blkpg->data */
5456     arg = (abi_long)(uintptr_t)host_blkpg->data;
5457     target_size = thunk_type_size(part_arg_type, 0);
5458     argptr = lock_user(VERIFY_READ, arg, target_size, 1);
5459     if (!argptr) {
5460         ret = -TARGET_EFAULT;
5461         goto out;
5462     }
5463     thunk_convert(&host_part, argptr, part_arg_type, THUNK_HOST);
5464     unlock_user(argptr, arg, 0);
5465 
5466     /* Swizzle the data pointer to our local copy and call! */
5467     host_blkpg->data = &host_part;
5468     ret = get_errno(safe_ioctl(fd, ie->host_cmd, host_blkpg));
5469 
5470 out:
5471     return ret;
5472 }
5473 
5474 static abi_long do_ioctl_rt(const IOCTLEntry *ie, uint8_t *buf_temp,
5475                                 int fd, int cmd, abi_long arg)
5476 {
5477     const argtype *arg_type = ie->arg_type;
5478     const StructEntry *se;
5479     const argtype *field_types;
5480     const int *dst_offsets, *src_offsets;
5481     int target_size;
5482     void *argptr;
5483     abi_ulong *target_rt_dev_ptr = NULL;
5484     unsigned long *host_rt_dev_ptr = NULL;
5485     abi_long ret;
5486     int i;
5487 
5488     assert(ie->access == IOC_W);
5489     assert(*arg_type == TYPE_PTR);
5490     arg_type++;
5491     assert(*arg_type == TYPE_STRUCT);
5492     target_size = thunk_type_size(arg_type, 0);
5493     argptr = lock_user(VERIFY_READ, arg, target_size, 1);
5494     if (!argptr) {
5495         return -TARGET_EFAULT;
5496     }
5497     arg_type++;
5498     assert(*arg_type == (int)STRUCT_rtentry);
5499     se = struct_entries + *arg_type++;
5500     assert(se->convert[0] == NULL);
5501     /* convert struct here to be able to catch rt_dev string */
5502     field_types = se->field_types;
5503     dst_offsets = se->field_offsets[THUNK_HOST];
5504     src_offsets = se->field_offsets[THUNK_TARGET];
5505     for (i = 0; i < se->nb_fields; i++) {
5506         if (dst_offsets[i] == offsetof(struct rtentry, rt_dev)) {
5507             assert(*field_types == TYPE_PTRVOID);
5508             target_rt_dev_ptr = (abi_ulong *)(argptr + src_offsets[i]);
5509             host_rt_dev_ptr = (unsigned long *)(buf_temp + dst_offsets[i]);
5510             if (*target_rt_dev_ptr != 0) {
5511                 *host_rt_dev_ptr = (unsigned long)lock_user_string(
5512                                                   tswapal(*target_rt_dev_ptr));
5513                 if (!*host_rt_dev_ptr) {
5514                     unlock_user(argptr, arg, 0);
5515                     return -TARGET_EFAULT;
5516                 }
5517             } else {
5518                 *host_rt_dev_ptr = 0;
5519             }
5520             field_types++;
5521             continue;
5522         }
5523         field_types = thunk_convert(buf_temp + dst_offsets[i],
5524                                     argptr + src_offsets[i],
5525                                     field_types, THUNK_HOST);
5526     }
5527     unlock_user(argptr, arg, 0);
5528 
5529     ret = get_errno(safe_ioctl(fd, ie->host_cmd, buf_temp));
5530 
5531     assert(host_rt_dev_ptr != NULL);
5532     assert(target_rt_dev_ptr != NULL);
5533     if (*host_rt_dev_ptr != 0) {
5534         unlock_user((void *)*host_rt_dev_ptr,
5535                     *target_rt_dev_ptr, 0);
5536     }
5537     return ret;
5538 }
5539 
5540 static abi_long do_ioctl_kdsigaccept(const IOCTLEntry *ie, uint8_t *buf_temp,
5541                                      int fd, int cmd, abi_long arg)
5542 {
5543     int sig = target_to_host_signal(arg);
5544     return get_errno(safe_ioctl(fd, ie->host_cmd, sig));
5545 }
5546 
5547 static abi_long do_ioctl_SIOCGSTAMP(const IOCTLEntry *ie, uint8_t *buf_temp,
5548                                     int fd, int cmd, abi_long arg)
5549 {
5550     struct timeval tv;
5551     abi_long ret;
5552 
5553     ret = get_errno(safe_ioctl(fd, SIOCGSTAMP, &tv));
5554     if (is_error(ret)) {
5555         return ret;
5556     }
5557 
5558     if (cmd == (int)TARGET_SIOCGSTAMP_OLD) {
5559         if (copy_to_user_timeval(arg, &tv)) {
5560             return -TARGET_EFAULT;
5561         }
5562     } else {
5563         if (copy_to_user_timeval64(arg, &tv)) {
5564             return -TARGET_EFAULT;
5565         }
5566     }
5567 
5568     return ret;
5569 }
5570 
5571 static abi_long do_ioctl_SIOCGSTAMPNS(const IOCTLEntry *ie, uint8_t *buf_temp,
5572                                       int fd, int cmd, abi_long arg)
5573 {
5574     struct timespec ts;
5575     abi_long ret;
5576 
5577     ret = get_errno(safe_ioctl(fd, SIOCGSTAMPNS, &ts));
5578     if (is_error(ret)) {
5579         return ret;
5580     }
5581 
5582     if (cmd == (int)TARGET_SIOCGSTAMPNS_OLD) {
5583         if (host_to_target_timespec(arg, &ts)) {
5584             return -TARGET_EFAULT;
5585         }
5586     } else{
5587         if (host_to_target_timespec64(arg, &ts)) {
5588             return -TARGET_EFAULT;
5589         }
5590     }
5591 
5592     return ret;
5593 }
5594 
5595 #ifdef TIOCGPTPEER
5596 static abi_long do_ioctl_tiocgptpeer(const IOCTLEntry *ie, uint8_t *buf_temp,
5597                                      int fd, int cmd, abi_long arg)
5598 {
5599     int flags = target_to_host_bitmask(arg, fcntl_flags_tbl);
5600     return get_errno(safe_ioctl(fd, ie->host_cmd, flags));
5601 }
5602 #endif
5603 
5604 #ifdef HAVE_DRM_H
5605 
5606 static void unlock_drm_version(struct drm_version *host_ver,
5607                                struct target_drm_version *target_ver,
5608                                bool copy)
5609 {
5610     unlock_user(host_ver->name, target_ver->name,
5611                                 copy ? host_ver->name_len : 0);
5612     unlock_user(host_ver->date, target_ver->date,
5613                                 copy ? host_ver->date_len : 0);
5614     unlock_user(host_ver->desc, target_ver->desc,
5615                                 copy ? host_ver->desc_len : 0);
5616 }
5617 
5618 static inline abi_long target_to_host_drmversion(struct drm_version *host_ver,
5619                                           struct target_drm_version *target_ver)
5620 {
5621     memset(host_ver, 0, sizeof(*host_ver));
5622 
5623     __get_user(host_ver->name_len, &target_ver->name_len);
5624     if (host_ver->name_len) {
5625         host_ver->name = lock_user(VERIFY_WRITE, target_ver->name,
5626                                    target_ver->name_len, 0);
5627         if (!host_ver->name) {
5628             return -EFAULT;
5629         }
5630     }
5631 
5632     __get_user(host_ver->date_len, &target_ver->date_len);
5633     if (host_ver->date_len) {
5634         host_ver->date = lock_user(VERIFY_WRITE, target_ver->date,
5635                                    target_ver->date_len, 0);
5636         if (!host_ver->date) {
5637             goto err;
5638         }
5639     }
5640 
5641     __get_user(host_ver->desc_len, &target_ver->desc_len);
5642     if (host_ver->desc_len) {
5643         host_ver->desc = lock_user(VERIFY_WRITE, target_ver->desc,
5644                                    target_ver->desc_len, 0);
5645         if (!host_ver->desc) {
5646             goto err;
5647         }
5648     }
5649 
5650     return 0;
5651 err:
5652     unlock_drm_version(host_ver, target_ver, false);
5653     return -EFAULT;
5654 }
5655 
5656 static inline void host_to_target_drmversion(
5657                                           struct target_drm_version *target_ver,
5658                                           struct drm_version *host_ver)
5659 {
5660     __put_user(host_ver->version_major, &target_ver->version_major);
5661     __put_user(host_ver->version_minor, &target_ver->version_minor);
5662     __put_user(host_ver->version_patchlevel, &target_ver->version_patchlevel);
5663     __put_user(host_ver->name_len, &target_ver->name_len);
5664     __put_user(host_ver->date_len, &target_ver->date_len);
5665     __put_user(host_ver->desc_len, &target_ver->desc_len);
5666     unlock_drm_version(host_ver, target_ver, true);
5667 }
5668 
5669 static abi_long do_ioctl_drm(const IOCTLEntry *ie, uint8_t *buf_temp,
5670                              int fd, int cmd, abi_long arg)
5671 {
5672     struct drm_version *ver;
5673     struct target_drm_version *target_ver;
5674     abi_long ret;
5675 
5676     switch (ie->host_cmd) {
5677     case DRM_IOCTL_VERSION:
5678         if (!lock_user_struct(VERIFY_WRITE, target_ver, arg, 0)) {
5679             return -TARGET_EFAULT;
5680         }
5681         ver = (struct drm_version *)buf_temp;
5682         ret = target_to_host_drmversion(ver, target_ver);
5683         if (!is_error(ret)) {
5684             ret = get_errno(safe_ioctl(fd, ie->host_cmd, ver));
5685             if (is_error(ret)) {
5686                 unlock_drm_version(ver, target_ver, false);
5687             } else {
5688                 host_to_target_drmversion(target_ver, ver);
5689             }
5690         }
5691         unlock_user_struct(target_ver, arg, 0);
5692         return ret;
5693     }
5694     return -TARGET_ENOSYS;
5695 }
5696 
5697 static abi_long do_ioctl_drm_i915_getparam(const IOCTLEntry *ie,
5698                                            struct drm_i915_getparam *gparam,
5699                                            int fd, abi_long arg)
5700 {
5701     abi_long ret;
5702     int value;
5703     struct target_drm_i915_getparam *target_gparam;
5704 
5705     if (!lock_user_struct(VERIFY_READ, target_gparam, arg, 0)) {
5706         return -TARGET_EFAULT;
5707     }
5708 
5709     __get_user(gparam->param, &target_gparam->param);
5710     gparam->value = &value;
5711     ret = get_errno(safe_ioctl(fd, ie->host_cmd, gparam));
5712     put_user_s32(value, target_gparam->value);
5713 
5714     unlock_user_struct(target_gparam, arg, 0);
5715     return ret;
5716 }
5717 
5718 static abi_long do_ioctl_drm_i915(const IOCTLEntry *ie, uint8_t *buf_temp,
5719                                   int fd, int cmd, abi_long arg)
5720 {
5721     switch (ie->host_cmd) {
5722     case DRM_IOCTL_I915_GETPARAM:
5723         return do_ioctl_drm_i915_getparam(ie,
5724                                           (struct drm_i915_getparam *)buf_temp,
5725                                           fd, arg);
5726     default:
5727         return -TARGET_ENOSYS;
5728     }
5729 }
5730 
5731 #endif
5732 
5733 static abi_long do_ioctl_TUNSETTXFILTER(const IOCTLEntry *ie, uint8_t *buf_temp,
5734                                         int fd, int cmd, abi_long arg)
5735 {
5736     struct tun_filter *filter = (struct tun_filter *)buf_temp;
5737     struct tun_filter *target_filter;
5738     char *target_addr;
5739 
5740     assert(ie->access == IOC_W);
5741 
5742     target_filter = lock_user(VERIFY_READ, arg, sizeof(*target_filter), 1);
5743     if (!target_filter) {
5744         return -TARGET_EFAULT;
5745     }
5746     filter->flags = tswap16(target_filter->flags);
5747     filter->count = tswap16(target_filter->count);
5748     unlock_user(target_filter, arg, 0);
5749 
5750     if (filter->count) {
5751         if (offsetof(struct tun_filter, addr) + filter->count * ETH_ALEN >
5752             MAX_STRUCT_SIZE) {
5753             return -TARGET_EFAULT;
5754         }
5755 
5756         target_addr = lock_user(VERIFY_READ,
5757                                 arg + offsetof(struct tun_filter, addr),
5758                                 filter->count * ETH_ALEN, 1);
5759         if (!target_addr) {
5760             return -TARGET_EFAULT;
5761         }
5762         memcpy(filter->addr, target_addr, filter->count * ETH_ALEN);
5763         unlock_user(target_addr, arg + offsetof(struct tun_filter, addr), 0);
5764     }
5765 
5766     return get_errno(safe_ioctl(fd, ie->host_cmd, filter));
5767 }
5768 
5769 IOCTLEntry ioctl_entries[] = {
5770 #define IOCTL(cmd, access, ...) \
5771     { TARGET_ ## cmd, cmd, #cmd, access, 0, {  __VA_ARGS__ } },
5772 #define IOCTL_SPECIAL(cmd, access, dofn, ...)                      \
5773     { TARGET_ ## cmd, cmd, #cmd, access, dofn, {  __VA_ARGS__ } },
5774 #define IOCTL_IGNORE(cmd) \
5775     { TARGET_ ## cmd, 0, #cmd },
5776 #include "ioctls.h"
5777     { 0, 0, },
5778 };
5779 
5780 /* ??? Implement proper locking for ioctls.  */
5781 /* do_ioctl() Must return target values and target errnos. */
5782 static abi_long do_ioctl(int fd, int cmd, abi_long arg)
5783 {
5784     const IOCTLEntry *ie;
5785     const argtype *arg_type;
5786     abi_long ret;
5787     uint8_t buf_temp[MAX_STRUCT_SIZE];
5788     int target_size;
5789     void *argptr;
5790 
5791     ie = ioctl_entries;
5792     for(;;) {
5793         if (ie->target_cmd == 0) {
5794             qemu_log_mask(
5795                 LOG_UNIMP, "Unsupported ioctl: cmd=0x%04lx\n", (long)cmd);
5796             return -TARGET_ENOSYS;
5797         }
5798         if (ie->target_cmd == cmd)
5799             break;
5800         ie++;
5801     }
5802     arg_type = ie->arg_type;
5803     if (ie->do_ioctl) {
5804         return ie->do_ioctl(ie, buf_temp, fd, cmd, arg);
5805     } else if (!ie->host_cmd) {
5806         /* Some architectures define BSD ioctls in their headers
5807            that are not implemented in Linux.  */
5808         return -TARGET_ENOSYS;
5809     }
5810 
5811     switch(arg_type[0]) {
5812     case TYPE_NULL:
5813         /* no argument */
5814         ret = get_errno(safe_ioctl(fd, ie->host_cmd));
5815         break;
5816     case TYPE_PTRVOID:
5817     case TYPE_INT:
5818     case TYPE_LONG:
5819     case TYPE_ULONG:
5820         ret = get_errno(safe_ioctl(fd, ie->host_cmd, arg));
5821         break;
5822     case TYPE_PTR:
5823         arg_type++;
5824         target_size = thunk_type_size(arg_type, 0);
5825         switch(ie->access) {
5826         case IOC_R:
5827             ret = get_errno(safe_ioctl(fd, ie->host_cmd, buf_temp));
5828             if (!is_error(ret)) {
5829                 argptr = lock_user(VERIFY_WRITE, arg, target_size, 0);
5830                 if (!argptr)
5831                     return -TARGET_EFAULT;
5832                 thunk_convert(argptr, buf_temp, arg_type, THUNK_TARGET);
5833                 unlock_user(argptr, arg, target_size);
5834             }
5835             break;
5836         case IOC_W:
5837             argptr = lock_user(VERIFY_READ, arg, target_size, 1);
5838             if (!argptr)
5839                 return -TARGET_EFAULT;
5840             thunk_convert(buf_temp, argptr, arg_type, THUNK_HOST);
5841             unlock_user(argptr, arg, 0);
5842             ret = get_errno(safe_ioctl(fd, ie->host_cmd, buf_temp));
5843             break;
5844         default:
5845         case IOC_RW:
5846             argptr = lock_user(VERIFY_READ, arg, target_size, 1);
5847             if (!argptr)
5848                 return -TARGET_EFAULT;
5849             thunk_convert(buf_temp, argptr, arg_type, THUNK_HOST);
5850             unlock_user(argptr, arg, 0);
5851             ret = get_errno(safe_ioctl(fd, ie->host_cmd, buf_temp));
5852             if (!is_error(ret)) {
5853                 argptr = lock_user(VERIFY_WRITE, arg, target_size, 0);
5854                 if (!argptr)
5855                     return -TARGET_EFAULT;
5856                 thunk_convert(argptr, buf_temp, arg_type, THUNK_TARGET);
5857                 unlock_user(argptr, arg, target_size);
5858             }
5859             break;
5860         }
5861         break;
5862     default:
5863         qemu_log_mask(LOG_UNIMP,
5864                       "Unsupported ioctl type: cmd=0x%04lx type=%d\n",
5865                       (long)cmd, arg_type[0]);
5866         ret = -TARGET_ENOSYS;
5867         break;
5868     }
5869     return ret;
5870 }
5871 
5872 static const bitmask_transtbl iflag_tbl[] = {
5873         { TARGET_IGNBRK, TARGET_IGNBRK, IGNBRK, IGNBRK },
5874         { TARGET_BRKINT, TARGET_BRKINT, BRKINT, BRKINT },
5875         { TARGET_IGNPAR, TARGET_IGNPAR, IGNPAR, IGNPAR },
5876         { TARGET_PARMRK, TARGET_PARMRK, PARMRK, PARMRK },
5877         { TARGET_INPCK, TARGET_INPCK, INPCK, INPCK },
5878         { TARGET_ISTRIP, TARGET_ISTRIP, ISTRIP, ISTRIP },
5879         { TARGET_INLCR, TARGET_INLCR, INLCR, INLCR },
5880         { TARGET_IGNCR, TARGET_IGNCR, IGNCR, IGNCR },
5881         { TARGET_ICRNL, TARGET_ICRNL, ICRNL, ICRNL },
5882         { TARGET_IUCLC, TARGET_IUCLC, IUCLC, IUCLC },
5883         { TARGET_IXON, TARGET_IXON, IXON, IXON },
5884         { TARGET_IXANY, TARGET_IXANY, IXANY, IXANY },
5885         { TARGET_IXOFF, TARGET_IXOFF, IXOFF, IXOFF },
5886         { TARGET_IMAXBEL, TARGET_IMAXBEL, IMAXBEL, IMAXBEL },
5887         { TARGET_IUTF8, TARGET_IUTF8, IUTF8, IUTF8},
5888         { 0, 0, 0, 0 }
5889 };
5890 
5891 static const bitmask_transtbl oflag_tbl[] = {
5892 	{ TARGET_OPOST, TARGET_OPOST, OPOST, OPOST },
5893 	{ TARGET_OLCUC, TARGET_OLCUC, OLCUC, OLCUC },
5894 	{ TARGET_ONLCR, TARGET_ONLCR, ONLCR, ONLCR },
5895 	{ TARGET_OCRNL, TARGET_OCRNL, OCRNL, OCRNL },
5896 	{ TARGET_ONOCR, TARGET_ONOCR, ONOCR, ONOCR },
5897 	{ TARGET_ONLRET, TARGET_ONLRET, ONLRET, ONLRET },
5898 	{ TARGET_OFILL, TARGET_OFILL, OFILL, OFILL },
5899 	{ TARGET_OFDEL, TARGET_OFDEL, OFDEL, OFDEL },
5900 	{ TARGET_NLDLY, TARGET_NL0, NLDLY, NL0 },
5901 	{ TARGET_NLDLY, TARGET_NL1, NLDLY, NL1 },
5902 	{ TARGET_CRDLY, TARGET_CR0, CRDLY, CR0 },
5903 	{ TARGET_CRDLY, TARGET_CR1, CRDLY, CR1 },
5904 	{ TARGET_CRDLY, TARGET_CR2, CRDLY, CR2 },
5905 	{ TARGET_CRDLY, TARGET_CR3, CRDLY, CR3 },
5906 	{ TARGET_TABDLY, TARGET_TAB0, TABDLY, TAB0 },
5907 	{ TARGET_TABDLY, TARGET_TAB1, TABDLY, TAB1 },
5908 	{ TARGET_TABDLY, TARGET_TAB2, TABDLY, TAB2 },
5909 	{ TARGET_TABDLY, TARGET_TAB3, TABDLY, TAB3 },
5910 	{ TARGET_BSDLY, TARGET_BS0, BSDLY, BS0 },
5911 	{ TARGET_BSDLY, TARGET_BS1, BSDLY, BS1 },
5912 	{ TARGET_VTDLY, TARGET_VT0, VTDLY, VT0 },
5913 	{ TARGET_VTDLY, TARGET_VT1, VTDLY, VT1 },
5914 	{ TARGET_FFDLY, TARGET_FF0, FFDLY, FF0 },
5915 	{ TARGET_FFDLY, TARGET_FF1, FFDLY, FF1 },
5916 	{ 0, 0, 0, 0 }
5917 };
5918 
5919 static const bitmask_transtbl cflag_tbl[] = {
5920 	{ TARGET_CBAUD, TARGET_B0, CBAUD, B0 },
5921 	{ TARGET_CBAUD, TARGET_B50, CBAUD, B50 },
5922 	{ TARGET_CBAUD, TARGET_B75, CBAUD, B75 },
5923 	{ TARGET_CBAUD, TARGET_B110, CBAUD, B110 },
5924 	{ TARGET_CBAUD, TARGET_B134, CBAUD, B134 },
5925 	{ TARGET_CBAUD, TARGET_B150, CBAUD, B150 },
5926 	{ TARGET_CBAUD, TARGET_B200, CBAUD, B200 },
5927 	{ TARGET_CBAUD, TARGET_B300, CBAUD, B300 },
5928 	{ TARGET_CBAUD, TARGET_B600, CBAUD, B600 },
5929 	{ TARGET_CBAUD, TARGET_B1200, CBAUD, B1200 },
5930 	{ TARGET_CBAUD, TARGET_B1800, CBAUD, B1800 },
5931 	{ TARGET_CBAUD, TARGET_B2400, CBAUD, B2400 },
5932 	{ TARGET_CBAUD, TARGET_B4800, CBAUD, B4800 },
5933 	{ TARGET_CBAUD, TARGET_B9600, CBAUD, B9600 },
5934 	{ TARGET_CBAUD, TARGET_B19200, CBAUD, B19200 },
5935 	{ TARGET_CBAUD, TARGET_B38400, CBAUD, B38400 },
5936 	{ TARGET_CBAUD, TARGET_B57600, CBAUD, B57600 },
5937 	{ TARGET_CBAUD, TARGET_B115200, CBAUD, B115200 },
5938 	{ TARGET_CBAUD, TARGET_B230400, CBAUD, B230400 },
5939 	{ TARGET_CBAUD, TARGET_B460800, CBAUD, B460800 },
5940 	{ TARGET_CSIZE, TARGET_CS5, CSIZE, CS5 },
5941 	{ TARGET_CSIZE, TARGET_CS6, CSIZE, CS6 },
5942 	{ TARGET_CSIZE, TARGET_CS7, CSIZE, CS7 },
5943 	{ TARGET_CSIZE, TARGET_CS8, CSIZE, CS8 },
5944 	{ TARGET_CSTOPB, TARGET_CSTOPB, CSTOPB, CSTOPB },
5945 	{ TARGET_CREAD, TARGET_CREAD, CREAD, CREAD },
5946 	{ TARGET_PARENB, TARGET_PARENB, PARENB, PARENB },
5947 	{ TARGET_PARODD, TARGET_PARODD, PARODD, PARODD },
5948 	{ TARGET_HUPCL, TARGET_HUPCL, HUPCL, HUPCL },
5949 	{ TARGET_CLOCAL, TARGET_CLOCAL, CLOCAL, CLOCAL },
5950 	{ TARGET_CRTSCTS, TARGET_CRTSCTS, CRTSCTS, CRTSCTS },
5951 	{ 0, 0, 0, 0 }
5952 };
5953 
5954 static const bitmask_transtbl lflag_tbl[] = {
5955   { TARGET_ISIG, TARGET_ISIG, ISIG, ISIG },
5956   { TARGET_ICANON, TARGET_ICANON, ICANON, ICANON },
5957   { TARGET_XCASE, TARGET_XCASE, XCASE, XCASE },
5958   { TARGET_ECHO, TARGET_ECHO, ECHO, ECHO },
5959   { TARGET_ECHOE, TARGET_ECHOE, ECHOE, ECHOE },
5960   { TARGET_ECHOK, TARGET_ECHOK, ECHOK, ECHOK },
5961   { TARGET_ECHONL, TARGET_ECHONL, ECHONL, ECHONL },
5962   { TARGET_NOFLSH, TARGET_NOFLSH, NOFLSH, NOFLSH },
5963   { TARGET_TOSTOP, TARGET_TOSTOP, TOSTOP, TOSTOP },
5964   { TARGET_ECHOCTL, TARGET_ECHOCTL, ECHOCTL, ECHOCTL },
5965   { TARGET_ECHOPRT, TARGET_ECHOPRT, ECHOPRT, ECHOPRT },
5966   { TARGET_ECHOKE, TARGET_ECHOKE, ECHOKE, ECHOKE },
5967   { TARGET_FLUSHO, TARGET_FLUSHO, FLUSHO, FLUSHO },
5968   { TARGET_PENDIN, TARGET_PENDIN, PENDIN, PENDIN },
5969   { TARGET_IEXTEN, TARGET_IEXTEN, IEXTEN, IEXTEN },
5970   { TARGET_EXTPROC, TARGET_EXTPROC, EXTPROC, EXTPROC},
5971   { 0, 0, 0, 0 }
5972 };
5973 
5974 static void target_to_host_termios (void *dst, const void *src)
5975 {
5976     struct host_termios *host = dst;
5977     const struct target_termios *target = src;
5978 
5979     host->c_iflag =
5980         target_to_host_bitmask(tswap32(target->c_iflag), iflag_tbl);
5981     host->c_oflag =
5982         target_to_host_bitmask(tswap32(target->c_oflag), oflag_tbl);
5983     host->c_cflag =
5984         target_to_host_bitmask(tswap32(target->c_cflag), cflag_tbl);
5985     host->c_lflag =
5986         target_to_host_bitmask(tswap32(target->c_lflag), lflag_tbl);
5987     host->c_line = target->c_line;
5988 
5989     memset(host->c_cc, 0, sizeof(host->c_cc));
5990     host->c_cc[VINTR] = target->c_cc[TARGET_VINTR];
5991     host->c_cc[VQUIT] = target->c_cc[TARGET_VQUIT];
5992     host->c_cc[VERASE] = target->c_cc[TARGET_VERASE];
5993     host->c_cc[VKILL] = target->c_cc[TARGET_VKILL];
5994     host->c_cc[VEOF] = target->c_cc[TARGET_VEOF];
5995     host->c_cc[VTIME] = target->c_cc[TARGET_VTIME];
5996     host->c_cc[VMIN] = target->c_cc[TARGET_VMIN];
5997     host->c_cc[VSWTC] = target->c_cc[TARGET_VSWTC];
5998     host->c_cc[VSTART] = target->c_cc[TARGET_VSTART];
5999     host->c_cc[VSTOP] = target->c_cc[TARGET_VSTOP];
6000     host->c_cc[VSUSP] = target->c_cc[TARGET_VSUSP];
6001     host->c_cc[VEOL] = target->c_cc[TARGET_VEOL];
6002     host->c_cc[VREPRINT] = target->c_cc[TARGET_VREPRINT];
6003     host->c_cc[VDISCARD] = target->c_cc[TARGET_VDISCARD];
6004     host->c_cc[VWERASE] = target->c_cc[TARGET_VWERASE];
6005     host->c_cc[VLNEXT] = target->c_cc[TARGET_VLNEXT];
6006     host->c_cc[VEOL2] = target->c_cc[TARGET_VEOL2];
6007 }
6008 
6009 static void host_to_target_termios (void *dst, const void *src)
6010 {
6011     struct target_termios *target = dst;
6012     const struct host_termios *host = src;
6013 
6014     target->c_iflag =
6015         tswap32(host_to_target_bitmask(host->c_iflag, iflag_tbl));
6016     target->c_oflag =
6017         tswap32(host_to_target_bitmask(host->c_oflag, oflag_tbl));
6018     target->c_cflag =
6019         tswap32(host_to_target_bitmask(host->c_cflag, cflag_tbl));
6020     target->c_lflag =
6021         tswap32(host_to_target_bitmask(host->c_lflag, lflag_tbl));
6022     target->c_line = host->c_line;
6023 
6024     memset(target->c_cc, 0, sizeof(target->c_cc));
6025     target->c_cc[TARGET_VINTR] = host->c_cc[VINTR];
6026     target->c_cc[TARGET_VQUIT] = host->c_cc[VQUIT];
6027     target->c_cc[TARGET_VERASE] = host->c_cc[VERASE];
6028     target->c_cc[TARGET_VKILL] = host->c_cc[VKILL];
6029     target->c_cc[TARGET_VEOF] = host->c_cc[VEOF];
6030     target->c_cc[TARGET_VTIME] = host->c_cc[VTIME];
6031     target->c_cc[TARGET_VMIN] = host->c_cc[VMIN];
6032     target->c_cc[TARGET_VSWTC] = host->c_cc[VSWTC];
6033     target->c_cc[TARGET_VSTART] = host->c_cc[VSTART];
6034     target->c_cc[TARGET_VSTOP] = host->c_cc[VSTOP];
6035     target->c_cc[TARGET_VSUSP] = host->c_cc[VSUSP];
6036     target->c_cc[TARGET_VEOL] = host->c_cc[VEOL];
6037     target->c_cc[TARGET_VREPRINT] = host->c_cc[VREPRINT];
6038     target->c_cc[TARGET_VDISCARD] = host->c_cc[VDISCARD];
6039     target->c_cc[TARGET_VWERASE] = host->c_cc[VWERASE];
6040     target->c_cc[TARGET_VLNEXT] = host->c_cc[VLNEXT];
6041     target->c_cc[TARGET_VEOL2] = host->c_cc[VEOL2];
6042 }
6043 
6044 static const StructEntry struct_termios_def = {
6045     .convert = { host_to_target_termios, target_to_host_termios },
6046     .size = { sizeof(struct target_termios), sizeof(struct host_termios) },
6047     .align = { __alignof__(struct target_termios), __alignof__(struct host_termios) },
6048     .print = print_termios,
6049 };
6050 
6051 static bitmask_transtbl mmap_flags_tbl[] = {
6052     { TARGET_MAP_SHARED, TARGET_MAP_SHARED, MAP_SHARED, MAP_SHARED },
6053     { TARGET_MAP_PRIVATE, TARGET_MAP_PRIVATE, MAP_PRIVATE, MAP_PRIVATE },
6054     { TARGET_MAP_FIXED, TARGET_MAP_FIXED, MAP_FIXED, MAP_FIXED },
6055     { TARGET_MAP_ANONYMOUS, TARGET_MAP_ANONYMOUS,
6056       MAP_ANONYMOUS, MAP_ANONYMOUS },
6057     { TARGET_MAP_GROWSDOWN, TARGET_MAP_GROWSDOWN,
6058       MAP_GROWSDOWN, MAP_GROWSDOWN },
6059     { TARGET_MAP_DENYWRITE, TARGET_MAP_DENYWRITE,
6060       MAP_DENYWRITE, MAP_DENYWRITE },
6061     { TARGET_MAP_EXECUTABLE, TARGET_MAP_EXECUTABLE,
6062       MAP_EXECUTABLE, MAP_EXECUTABLE },
6063     { TARGET_MAP_LOCKED, TARGET_MAP_LOCKED, MAP_LOCKED, MAP_LOCKED },
6064     { TARGET_MAP_NORESERVE, TARGET_MAP_NORESERVE,
6065       MAP_NORESERVE, MAP_NORESERVE },
6066     { TARGET_MAP_HUGETLB, TARGET_MAP_HUGETLB, MAP_HUGETLB, MAP_HUGETLB },
6067     /* MAP_STACK had been ignored by the kernel for quite some time.
6068        Recognize it for the target insofar as we do not want to pass
6069        it through to the host.  */
6070     { TARGET_MAP_STACK, TARGET_MAP_STACK, 0, 0 },
6071     { 0, 0, 0, 0 }
6072 };
6073 
6074 /*
6075  * NOTE: TARGET_ABI32 is defined for TARGET_I386 (but not for TARGET_X86_64)
6076  *       TARGET_I386 is defined if TARGET_X86_64 is defined
6077  */
6078 #if defined(TARGET_I386)
6079 
6080 /* NOTE: there is really one LDT for all the threads */
6081 static uint8_t *ldt_table;
6082 
6083 static abi_long read_ldt(abi_ulong ptr, unsigned long bytecount)
6084 {
6085     int size;
6086     void *p;
6087 
6088     if (!ldt_table)
6089         return 0;
6090     size = TARGET_LDT_ENTRIES * TARGET_LDT_ENTRY_SIZE;
6091     if (size > bytecount)
6092         size = bytecount;
6093     p = lock_user(VERIFY_WRITE, ptr, size, 0);
6094     if (!p)
6095         return -TARGET_EFAULT;
6096     /* ??? Should this by byteswapped?  */
6097     memcpy(p, ldt_table, size);
6098     unlock_user(p, ptr, size);
6099     return size;
6100 }
6101 
6102 /* XXX: add locking support */
6103 static abi_long write_ldt(CPUX86State *env,
6104                           abi_ulong ptr, unsigned long bytecount, int oldmode)
6105 {
6106     struct target_modify_ldt_ldt_s ldt_info;
6107     struct target_modify_ldt_ldt_s *target_ldt_info;
6108     int seg_32bit, contents, read_exec_only, limit_in_pages;
6109     int seg_not_present, useable, lm;
6110     uint32_t *lp, entry_1, entry_2;
6111 
6112     if (bytecount != sizeof(ldt_info))
6113         return -TARGET_EINVAL;
6114     if (!lock_user_struct(VERIFY_READ, target_ldt_info, ptr, 1))
6115         return -TARGET_EFAULT;
6116     ldt_info.entry_number = tswap32(target_ldt_info->entry_number);
6117     ldt_info.base_addr = tswapal(target_ldt_info->base_addr);
6118     ldt_info.limit = tswap32(target_ldt_info->limit);
6119     ldt_info.flags = tswap32(target_ldt_info->flags);
6120     unlock_user_struct(target_ldt_info, ptr, 0);
6121 
6122     if (ldt_info.entry_number >= TARGET_LDT_ENTRIES)
6123         return -TARGET_EINVAL;
6124     seg_32bit = ldt_info.flags & 1;
6125     contents = (ldt_info.flags >> 1) & 3;
6126     read_exec_only = (ldt_info.flags >> 3) & 1;
6127     limit_in_pages = (ldt_info.flags >> 4) & 1;
6128     seg_not_present = (ldt_info.flags >> 5) & 1;
6129     useable = (ldt_info.flags >> 6) & 1;
6130 #ifdef TARGET_ABI32
6131     lm = 0;
6132 #else
6133     lm = (ldt_info.flags >> 7) & 1;
6134 #endif
6135     if (contents == 3) {
6136         if (oldmode)
6137             return -TARGET_EINVAL;
6138         if (seg_not_present == 0)
6139             return -TARGET_EINVAL;
6140     }
6141     /* allocate the LDT */
6142     if (!ldt_table) {
6143         env->ldt.base = target_mmap(0,
6144                                     TARGET_LDT_ENTRIES * TARGET_LDT_ENTRY_SIZE,
6145                                     PROT_READ|PROT_WRITE,
6146                                     MAP_ANONYMOUS|MAP_PRIVATE, -1, 0);
6147         if (env->ldt.base == -1)
6148             return -TARGET_ENOMEM;
6149         memset(g2h_untagged(env->ldt.base), 0,
6150                TARGET_LDT_ENTRIES * TARGET_LDT_ENTRY_SIZE);
6151         env->ldt.limit = 0xffff;
6152         ldt_table = g2h_untagged(env->ldt.base);
6153     }
6154 
6155     /* NOTE: same code as Linux kernel */
6156     /* Allow LDTs to be cleared by the user. */
6157     if (ldt_info.base_addr == 0 && ldt_info.limit == 0) {
6158         if (oldmode ||
6159             (contents == 0		&&
6160              read_exec_only == 1	&&
6161              seg_32bit == 0		&&
6162              limit_in_pages == 0	&&
6163              seg_not_present == 1	&&
6164              useable == 0 )) {
6165             entry_1 = 0;
6166             entry_2 = 0;
6167             goto install;
6168         }
6169     }
6170 
6171     entry_1 = ((ldt_info.base_addr & 0x0000ffff) << 16) |
6172         (ldt_info.limit & 0x0ffff);
6173     entry_2 = (ldt_info.base_addr & 0xff000000) |
6174         ((ldt_info.base_addr & 0x00ff0000) >> 16) |
6175         (ldt_info.limit & 0xf0000) |
6176         ((read_exec_only ^ 1) << 9) |
6177         (contents << 10) |
6178         ((seg_not_present ^ 1) << 15) |
6179         (seg_32bit << 22) |
6180         (limit_in_pages << 23) |
6181         (lm << 21) |
6182         0x7000;
6183     if (!oldmode)
6184         entry_2 |= (useable << 20);
6185 
6186     /* Install the new entry ...  */
6187 install:
6188     lp = (uint32_t *)(ldt_table + (ldt_info.entry_number << 3));
6189     lp[0] = tswap32(entry_1);
6190     lp[1] = tswap32(entry_2);
6191     return 0;
6192 }
6193 
6194 /* specific and weird i386 syscalls */
6195 static abi_long do_modify_ldt(CPUX86State *env, int func, abi_ulong ptr,
6196                               unsigned long bytecount)
6197 {
6198     abi_long ret;
6199 
6200     switch (func) {
6201     case 0:
6202         ret = read_ldt(ptr, bytecount);
6203         break;
6204     case 1:
6205         ret = write_ldt(env, ptr, bytecount, 1);
6206         break;
6207     case 0x11:
6208         ret = write_ldt(env, ptr, bytecount, 0);
6209         break;
6210     default:
6211         ret = -TARGET_ENOSYS;
6212         break;
6213     }
6214     return ret;
6215 }
6216 
6217 #if defined(TARGET_ABI32)
6218 abi_long do_set_thread_area(CPUX86State *env, abi_ulong ptr)
6219 {
6220     uint64_t *gdt_table = g2h_untagged(env->gdt.base);
6221     struct target_modify_ldt_ldt_s ldt_info;
6222     struct target_modify_ldt_ldt_s *target_ldt_info;
6223     int seg_32bit, contents, read_exec_only, limit_in_pages;
6224     int seg_not_present, useable, lm;
6225     uint32_t *lp, entry_1, entry_2;
6226     int i;
6227 
6228     lock_user_struct(VERIFY_WRITE, target_ldt_info, ptr, 1);
6229     if (!target_ldt_info)
6230         return -TARGET_EFAULT;
6231     ldt_info.entry_number = tswap32(target_ldt_info->entry_number);
6232     ldt_info.base_addr = tswapal(target_ldt_info->base_addr);
6233     ldt_info.limit = tswap32(target_ldt_info->limit);
6234     ldt_info.flags = tswap32(target_ldt_info->flags);
6235     if (ldt_info.entry_number == -1) {
6236         for (i=TARGET_GDT_ENTRY_TLS_MIN; i<=TARGET_GDT_ENTRY_TLS_MAX; i++) {
6237             if (gdt_table[i] == 0) {
6238                 ldt_info.entry_number = i;
6239                 target_ldt_info->entry_number = tswap32(i);
6240                 break;
6241             }
6242         }
6243     }
6244     unlock_user_struct(target_ldt_info, ptr, 1);
6245 
6246     if (ldt_info.entry_number < TARGET_GDT_ENTRY_TLS_MIN ||
6247         ldt_info.entry_number > TARGET_GDT_ENTRY_TLS_MAX)
6248            return -TARGET_EINVAL;
6249     seg_32bit = ldt_info.flags & 1;
6250     contents = (ldt_info.flags >> 1) & 3;
6251     read_exec_only = (ldt_info.flags >> 3) & 1;
6252     limit_in_pages = (ldt_info.flags >> 4) & 1;
6253     seg_not_present = (ldt_info.flags >> 5) & 1;
6254     useable = (ldt_info.flags >> 6) & 1;
6255 #ifdef TARGET_ABI32
6256     lm = 0;
6257 #else
6258     lm = (ldt_info.flags >> 7) & 1;
6259 #endif
6260 
6261     if (contents == 3) {
6262         if (seg_not_present == 0)
6263             return -TARGET_EINVAL;
6264     }
6265 
6266     /* NOTE: same code as Linux kernel */
6267     /* Allow LDTs to be cleared by the user. */
6268     if (ldt_info.base_addr == 0 && ldt_info.limit == 0) {
6269         if ((contents == 0             &&
6270              read_exec_only == 1       &&
6271              seg_32bit == 0            &&
6272              limit_in_pages == 0       &&
6273              seg_not_present == 1      &&
6274              useable == 0 )) {
6275             entry_1 = 0;
6276             entry_2 = 0;
6277             goto install;
6278         }
6279     }
6280 
6281     entry_1 = ((ldt_info.base_addr & 0x0000ffff) << 16) |
6282         (ldt_info.limit & 0x0ffff);
6283     entry_2 = (ldt_info.base_addr & 0xff000000) |
6284         ((ldt_info.base_addr & 0x00ff0000) >> 16) |
6285         (ldt_info.limit & 0xf0000) |
6286         ((read_exec_only ^ 1) << 9) |
6287         (contents << 10) |
6288         ((seg_not_present ^ 1) << 15) |
6289         (seg_32bit << 22) |
6290         (limit_in_pages << 23) |
6291         (useable << 20) |
6292         (lm << 21) |
6293         0x7000;
6294 
6295     /* Install the new entry ...  */
6296 install:
6297     lp = (uint32_t *)(gdt_table + ldt_info.entry_number);
6298     lp[0] = tswap32(entry_1);
6299     lp[1] = tswap32(entry_2);
6300     return 0;
6301 }
6302 
6303 static abi_long do_get_thread_area(CPUX86State *env, abi_ulong ptr)
6304 {
6305     struct target_modify_ldt_ldt_s *target_ldt_info;
6306     uint64_t *gdt_table = g2h_untagged(env->gdt.base);
6307     uint32_t base_addr, limit, flags;
6308     int seg_32bit, contents, read_exec_only, limit_in_pages, idx;
6309     int seg_not_present, useable, lm;
6310     uint32_t *lp, entry_1, entry_2;
6311 
6312     lock_user_struct(VERIFY_WRITE, target_ldt_info, ptr, 1);
6313     if (!target_ldt_info)
6314         return -TARGET_EFAULT;
6315     idx = tswap32(target_ldt_info->entry_number);
6316     if (idx < TARGET_GDT_ENTRY_TLS_MIN ||
6317         idx > TARGET_GDT_ENTRY_TLS_MAX) {
6318         unlock_user_struct(target_ldt_info, ptr, 1);
6319         return -TARGET_EINVAL;
6320     }
6321     lp = (uint32_t *)(gdt_table + idx);
6322     entry_1 = tswap32(lp[0]);
6323     entry_2 = tswap32(lp[1]);
6324 
6325     read_exec_only = ((entry_2 >> 9) & 1) ^ 1;
6326     contents = (entry_2 >> 10) & 3;
6327     seg_not_present = ((entry_2 >> 15) & 1) ^ 1;
6328     seg_32bit = (entry_2 >> 22) & 1;
6329     limit_in_pages = (entry_2 >> 23) & 1;
6330     useable = (entry_2 >> 20) & 1;
6331 #ifdef TARGET_ABI32
6332     lm = 0;
6333 #else
6334     lm = (entry_2 >> 21) & 1;
6335 #endif
6336     flags = (seg_32bit << 0) | (contents << 1) |
6337         (read_exec_only << 3) | (limit_in_pages << 4) |
6338         (seg_not_present << 5) | (useable << 6) | (lm << 7);
6339     limit = (entry_1 & 0xffff) | (entry_2  & 0xf0000);
6340     base_addr = (entry_1 >> 16) |
6341         (entry_2 & 0xff000000) |
6342         ((entry_2 & 0xff) << 16);
6343     target_ldt_info->base_addr = tswapal(base_addr);
6344     target_ldt_info->limit = tswap32(limit);
6345     target_ldt_info->flags = tswap32(flags);
6346     unlock_user_struct(target_ldt_info, ptr, 1);
6347     return 0;
6348 }
6349 
6350 abi_long do_arch_prctl(CPUX86State *env, int code, abi_ulong addr)
6351 {
6352     return -TARGET_ENOSYS;
6353 }
6354 #else
6355 abi_long do_arch_prctl(CPUX86State *env, int code, abi_ulong addr)
6356 {
6357     abi_long ret = 0;
6358     abi_ulong val;
6359     int idx;
6360 
6361     switch(code) {
6362     case TARGET_ARCH_SET_GS:
6363     case TARGET_ARCH_SET_FS:
6364         if (code == TARGET_ARCH_SET_GS)
6365             idx = R_GS;
6366         else
6367             idx = R_FS;
6368         cpu_x86_load_seg(env, idx, 0);
6369         env->segs[idx].base = addr;
6370         break;
6371     case TARGET_ARCH_GET_GS:
6372     case TARGET_ARCH_GET_FS:
6373         if (code == TARGET_ARCH_GET_GS)
6374             idx = R_GS;
6375         else
6376             idx = R_FS;
6377         val = env->segs[idx].base;
6378         if (put_user(val, addr, abi_ulong))
6379             ret = -TARGET_EFAULT;
6380         break;
6381     default:
6382         ret = -TARGET_EINVAL;
6383         break;
6384     }
6385     return ret;
6386 }
6387 #endif /* defined(TARGET_ABI32 */
6388 
6389 #endif /* defined(TARGET_I386) */
6390 
6391 #define NEW_STACK_SIZE 0x40000
6392 
6393 
6394 static pthread_mutex_t clone_lock = PTHREAD_MUTEX_INITIALIZER;
6395 typedef struct {
6396     CPUArchState *env;
6397     pthread_mutex_t mutex;
6398     pthread_cond_t cond;
6399     pthread_t thread;
6400     uint32_t tid;
6401     abi_ulong child_tidptr;
6402     abi_ulong parent_tidptr;
6403     sigset_t sigmask;
6404 } new_thread_info;
6405 
6406 static void *clone_func(void *arg)
6407 {
6408     new_thread_info *info = arg;
6409     CPUArchState *env;
6410     CPUState *cpu;
6411     TaskState *ts;
6412 
6413     rcu_register_thread();
6414     tcg_register_thread();
6415     env = info->env;
6416     cpu = env_cpu(env);
6417     thread_cpu = cpu;
6418     ts = (TaskState *)cpu->opaque;
6419     info->tid = sys_gettid();
6420     task_settid(ts);
6421     if (info->child_tidptr)
6422         put_user_u32(info->tid, info->child_tidptr);
6423     if (info->parent_tidptr)
6424         put_user_u32(info->tid, info->parent_tidptr);
6425     qemu_guest_random_seed_thread_part2(cpu->random_seed);
6426     /* Enable signals.  */
6427     sigprocmask(SIG_SETMASK, &info->sigmask, NULL);
6428     /* Signal to the parent that we're ready.  */
6429     pthread_mutex_lock(&info->mutex);
6430     pthread_cond_broadcast(&info->cond);
6431     pthread_mutex_unlock(&info->mutex);
6432     /* Wait until the parent has finished initializing the tls state.  */
6433     pthread_mutex_lock(&clone_lock);
6434     pthread_mutex_unlock(&clone_lock);
6435     cpu_loop(env);
6436     /* never exits */
6437     return NULL;
6438 }
6439 
6440 /* do_fork() Must return host values and target errnos (unlike most
6441    do_*() functions). */
6442 static int do_fork(CPUArchState *env, unsigned int flags, abi_ulong newsp,
6443                    abi_ulong parent_tidptr, target_ulong newtls,
6444                    abi_ulong child_tidptr)
6445 {
6446     CPUState *cpu = env_cpu(env);
6447     int ret;
6448     TaskState *ts;
6449     CPUState *new_cpu;
6450     CPUArchState *new_env;
6451     sigset_t sigmask;
6452 
6453     flags &= ~CLONE_IGNORED_FLAGS;
6454 
6455     /* Emulate vfork() with fork() */
6456     if (flags & CLONE_VFORK)
6457         flags &= ~(CLONE_VFORK | CLONE_VM);
6458 
6459     if (flags & CLONE_VM) {
6460         TaskState *parent_ts = (TaskState *)cpu->opaque;
6461         new_thread_info info;
6462         pthread_attr_t attr;
6463 
6464         if (((flags & CLONE_THREAD_FLAGS) != CLONE_THREAD_FLAGS) ||
6465             (flags & CLONE_INVALID_THREAD_FLAGS)) {
6466             return -TARGET_EINVAL;
6467         }
6468 
6469         ts = g_new0(TaskState, 1);
6470         init_task_state(ts);
6471 
6472         /* Grab a mutex so that thread setup appears atomic.  */
6473         pthread_mutex_lock(&clone_lock);
6474 
6475         /* we create a new CPU instance. */
6476         new_env = cpu_copy(env);
6477         /* Init regs that differ from the parent.  */
6478         cpu_clone_regs_child(new_env, newsp, flags);
6479         cpu_clone_regs_parent(env, flags);
6480         new_cpu = env_cpu(new_env);
6481         new_cpu->opaque = ts;
6482         ts->bprm = parent_ts->bprm;
6483         ts->info = parent_ts->info;
6484         ts->signal_mask = parent_ts->signal_mask;
6485 
6486         if (flags & CLONE_CHILD_CLEARTID) {
6487             ts->child_tidptr = child_tidptr;
6488         }
6489 
6490         if (flags & CLONE_SETTLS) {
6491             cpu_set_tls (new_env, newtls);
6492         }
6493 
6494         memset(&info, 0, sizeof(info));
6495         pthread_mutex_init(&info.mutex, NULL);
6496         pthread_mutex_lock(&info.mutex);
6497         pthread_cond_init(&info.cond, NULL);
6498         info.env = new_env;
6499         if (flags & CLONE_CHILD_SETTID) {
6500             info.child_tidptr = child_tidptr;
6501         }
6502         if (flags & CLONE_PARENT_SETTID) {
6503             info.parent_tidptr = parent_tidptr;
6504         }
6505 
6506         ret = pthread_attr_init(&attr);
6507         ret = pthread_attr_setstacksize(&attr, NEW_STACK_SIZE);
6508         ret = pthread_attr_setdetachstate(&attr, PTHREAD_CREATE_DETACHED);
6509         /* It is not safe to deliver signals until the child has finished
6510            initializing, so temporarily block all signals.  */
6511         sigfillset(&sigmask);
6512         sigprocmask(SIG_BLOCK, &sigmask, &info.sigmask);
6513         cpu->random_seed = qemu_guest_random_seed_thread_part1();
6514 
6515         /* If this is our first additional thread, we need to ensure we
6516          * generate code for parallel execution and flush old translations.
6517          */
6518         if (!parallel_cpus) {
6519             parallel_cpus = true;
6520             tb_flush(cpu);
6521         }
6522 
6523         ret = pthread_create(&info.thread, &attr, clone_func, &info);
6524         /* TODO: Free new CPU state if thread creation failed.  */
6525 
6526         sigprocmask(SIG_SETMASK, &info.sigmask, NULL);
6527         pthread_attr_destroy(&attr);
6528         if (ret == 0) {
6529             /* Wait for the child to initialize.  */
6530             pthread_cond_wait(&info.cond, &info.mutex);
6531             ret = info.tid;
6532         } else {
6533             ret = -1;
6534         }
6535         pthread_mutex_unlock(&info.mutex);
6536         pthread_cond_destroy(&info.cond);
6537         pthread_mutex_destroy(&info.mutex);
6538         pthread_mutex_unlock(&clone_lock);
6539     } else {
6540         /* if no CLONE_VM, we consider it is a fork */
6541         if (flags & CLONE_INVALID_FORK_FLAGS) {
6542             return -TARGET_EINVAL;
6543         }
6544 
6545         /* We can't support custom termination signals */
6546         if ((flags & CSIGNAL) != TARGET_SIGCHLD) {
6547             return -TARGET_EINVAL;
6548         }
6549 
6550         if (block_signals()) {
6551             return -TARGET_ERESTARTSYS;
6552         }
6553 
6554         fork_start();
6555         ret = fork();
6556         if (ret == 0) {
6557             /* Child Process.  */
6558             cpu_clone_regs_child(env, newsp, flags);
6559             fork_end(1);
6560             /* There is a race condition here.  The parent process could
6561                theoretically read the TID in the child process before the child
6562                tid is set.  This would require using either ptrace
6563                (not implemented) or having *_tidptr to point at a shared memory
6564                mapping.  We can't repeat the spinlock hack used above because
6565                the child process gets its own copy of the lock.  */
6566             if (flags & CLONE_CHILD_SETTID)
6567                 put_user_u32(sys_gettid(), child_tidptr);
6568             if (flags & CLONE_PARENT_SETTID)
6569                 put_user_u32(sys_gettid(), parent_tidptr);
6570             ts = (TaskState *)cpu->opaque;
6571             if (flags & CLONE_SETTLS)
6572                 cpu_set_tls (env, newtls);
6573             if (flags & CLONE_CHILD_CLEARTID)
6574                 ts->child_tidptr = child_tidptr;
6575         } else {
6576             cpu_clone_regs_parent(env, flags);
6577             fork_end(0);
6578         }
6579     }
6580     return ret;
6581 }
6582 
6583 /* warning : doesn't handle linux specific flags... */
6584 static int target_to_host_fcntl_cmd(int cmd)
6585 {
6586     int ret;
6587 
6588     switch(cmd) {
6589     case TARGET_F_DUPFD:
6590     case TARGET_F_GETFD:
6591     case TARGET_F_SETFD:
6592     case TARGET_F_GETFL:
6593     case TARGET_F_SETFL:
6594     case TARGET_F_OFD_GETLK:
6595     case TARGET_F_OFD_SETLK:
6596     case TARGET_F_OFD_SETLKW:
6597         ret = cmd;
6598         break;
6599     case TARGET_F_GETLK:
6600         ret = F_GETLK64;
6601         break;
6602     case TARGET_F_SETLK:
6603         ret = F_SETLK64;
6604         break;
6605     case TARGET_F_SETLKW:
6606         ret = F_SETLKW64;
6607         break;
6608     case TARGET_F_GETOWN:
6609         ret = F_GETOWN;
6610         break;
6611     case TARGET_F_SETOWN:
6612         ret = F_SETOWN;
6613         break;
6614     case TARGET_F_GETSIG:
6615         ret = F_GETSIG;
6616         break;
6617     case TARGET_F_SETSIG:
6618         ret = F_SETSIG;
6619         break;
6620 #if TARGET_ABI_BITS == 32
6621     case TARGET_F_GETLK64:
6622         ret = F_GETLK64;
6623         break;
6624     case TARGET_F_SETLK64:
6625         ret = F_SETLK64;
6626         break;
6627     case TARGET_F_SETLKW64:
6628         ret = F_SETLKW64;
6629         break;
6630 #endif
6631     case TARGET_F_SETLEASE:
6632         ret = F_SETLEASE;
6633         break;
6634     case TARGET_F_GETLEASE:
6635         ret = F_GETLEASE;
6636         break;
6637 #ifdef F_DUPFD_CLOEXEC
6638     case TARGET_F_DUPFD_CLOEXEC:
6639         ret = F_DUPFD_CLOEXEC;
6640         break;
6641 #endif
6642     case TARGET_F_NOTIFY:
6643         ret = F_NOTIFY;
6644         break;
6645 #ifdef F_GETOWN_EX
6646     case TARGET_F_GETOWN_EX:
6647         ret = F_GETOWN_EX;
6648         break;
6649 #endif
6650 #ifdef F_SETOWN_EX
6651     case TARGET_F_SETOWN_EX:
6652         ret = F_SETOWN_EX;
6653         break;
6654 #endif
6655 #ifdef F_SETPIPE_SZ
6656     case TARGET_F_SETPIPE_SZ:
6657         ret = F_SETPIPE_SZ;
6658         break;
6659     case TARGET_F_GETPIPE_SZ:
6660         ret = F_GETPIPE_SZ;
6661         break;
6662 #endif
6663 #ifdef F_ADD_SEALS
6664     case TARGET_F_ADD_SEALS:
6665         ret = F_ADD_SEALS;
6666         break;
6667     case TARGET_F_GET_SEALS:
6668         ret = F_GET_SEALS;
6669         break;
6670 #endif
6671     default:
6672         ret = -TARGET_EINVAL;
6673         break;
6674     }
6675 
6676 #if defined(__powerpc64__)
6677     /* On PPC64, glibc headers has the F_*LK* defined to 12, 13 and 14 and
6678      * is not supported by kernel. The glibc fcntl call actually adjusts
6679      * them to 5, 6 and 7 before making the syscall(). Since we make the
6680      * syscall directly, adjust to what is supported by the kernel.
6681      */
6682     if (ret >= F_GETLK64 && ret <= F_SETLKW64) {
6683         ret -= F_GETLK64 - 5;
6684     }
6685 #endif
6686 
6687     return ret;
6688 }
6689 
6690 #define FLOCK_TRANSTBL \
6691     switch (type) { \
6692     TRANSTBL_CONVERT(F_RDLCK); \
6693     TRANSTBL_CONVERT(F_WRLCK); \
6694     TRANSTBL_CONVERT(F_UNLCK); \
6695     }
6696 
6697 static int target_to_host_flock(int type)
6698 {
6699 #define TRANSTBL_CONVERT(a) case TARGET_##a: return a
6700     FLOCK_TRANSTBL
6701 #undef  TRANSTBL_CONVERT
6702     return -TARGET_EINVAL;
6703 }
6704 
6705 static int host_to_target_flock(int type)
6706 {
6707 #define TRANSTBL_CONVERT(a) case a: return TARGET_##a
6708     FLOCK_TRANSTBL
6709 #undef  TRANSTBL_CONVERT
6710     /* if we don't know how to convert the value coming
6711      * from the host we copy to the target field as-is
6712      */
6713     return type;
6714 }
6715 
6716 static inline abi_long copy_from_user_flock(struct flock64 *fl,
6717                                             abi_ulong target_flock_addr)
6718 {
6719     struct target_flock *target_fl;
6720     int l_type;
6721 
6722     if (!lock_user_struct(VERIFY_READ, target_fl, target_flock_addr, 1)) {
6723         return -TARGET_EFAULT;
6724     }
6725 
6726     __get_user(l_type, &target_fl->l_type);
6727     l_type = target_to_host_flock(l_type);
6728     if (l_type < 0) {
6729         return l_type;
6730     }
6731     fl->l_type = l_type;
6732     __get_user(fl->l_whence, &target_fl->l_whence);
6733     __get_user(fl->l_start, &target_fl->l_start);
6734     __get_user(fl->l_len, &target_fl->l_len);
6735     __get_user(fl->l_pid, &target_fl->l_pid);
6736     unlock_user_struct(target_fl, target_flock_addr, 0);
6737     return 0;
6738 }
6739 
6740 static inline abi_long copy_to_user_flock(abi_ulong target_flock_addr,
6741                                           const struct flock64 *fl)
6742 {
6743     struct target_flock *target_fl;
6744     short l_type;
6745 
6746     if (!lock_user_struct(VERIFY_WRITE, target_fl, target_flock_addr, 0)) {
6747         return -TARGET_EFAULT;
6748     }
6749 
6750     l_type = host_to_target_flock(fl->l_type);
6751     __put_user(l_type, &target_fl->l_type);
6752     __put_user(fl->l_whence, &target_fl->l_whence);
6753     __put_user(fl->l_start, &target_fl->l_start);
6754     __put_user(fl->l_len, &target_fl->l_len);
6755     __put_user(fl->l_pid, &target_fl->l_pid);
6756     unlock_user_struct(target_fl, target_flock_addr, 1);
6757     return 0;
6758 }
6759 
6760 typedef abi_long from_flock64_fn(struct flock64 *fl, abi_ulong target_addr);
6761 typedef abi_long to_flock64_fn(abi_ulong target_addr, const struct flock64 *fl);
6762 
6763 #if defined(TARGET_ARM) && TARGET_ABI_BITS == 32
6764 static inline abi_long copy_from_user_oabi_flock64(struct flock64 *fl,
6765                                                    abi_ulong target_flock_addr)
6766 {
6767     struct target_oabi_flock64 *target_fl;
6768     int l_type;
6769 
6770     if (!lock_user_struct(VERIFY_READ, target_fl, target_flock_addr, 1)) {
6771         return -TARGET_EFAULT;
6772     }
6773 
6774     __get_user(l_type, &target_fl->l_type);
6775     l_type = target_to_host_flock(l_type);
6776     if (l_type < 0) {
6777         return l_type;
6778     }
6779     fl->l_type = l_type;
6780     __get_user(fl->l_whence, &target_fl->l_whence);
6781     __get_user(fl->l_start, &target_fl->l_start);
6782     __get_user(fl->l_len, &target_fl->l_len);
6783     __get_user(fl->l_pid, &target_fl->l_pid);
6784     unlock_user_struct(target_fl, target_flock_addr, 0);
6785     return 0;
6786 }
6787 
6788 static inline abi_long copy_to_user_oabi_flock64(abi_ulong target_flock_addr,
6789                                                  const struct flock64 *fl)
6790 {
6791     struct target_oabi_flock64 *target_fl;
6792     short l_type;
6793 
6794     if (!lock_user_struct(VERIFY_WRITE, target_fl, target_flock_addr, 0)) {
6795         return -TARGET_EFAULT;
6796     }
6797 
6798     l_type = host_to_target_flock(fl->l_type);
6799     __put_user(l_type, &target_fl->l_type);
6800     __put_user(fl->l_whence, &target_fl->l_whence);
6801     __put_user(fl->l_start, &target_fl->l_start);
6802     __put_user(fl->l_len, &target_fl->l_len);
6803     __put_user(fl->l_pid, &target_fl->l_pid);
6804     unlock_user_struct(target_fl, target_flock_addr, 1);
6805     return 0;
6806 }
6807 #endif
6808 
6809 static inline abi_long copy_from_user_flock64(struct flock64 *fl,
6810                                               abi_ulong target_flock_addr)
6811 {
6812     struct target_flock64 *target_fl;
6813     int l_type;
6814 
6815     if (!lock_user_struct(VERIFY_READ, target_fl, target_flock_addr, 1)) {
6816         return -TARGET_EFAULT;
6817     }
6818 
6819     __get_user(l_type, &target_fl->l_type);
6820     l_type = target_to_host_flock(l_type);
6821     if (l_type < 0) {
6822         return l_type;
6823     }
6824     fl->l_type = l_type;
6825     __get_user(fl->l_whence, &target_fl->l_whence);
6826     __get_user(fl->l_start, &target_fl->l_start);
6827     __get_user(fl->l_len, &target_fl->l_len);
6828     __get_user(fl->l_pid, &target_fl->l_pid);
6829     unlock_user_struct(target_fl, target_flock_addr, 0);
6830     return 0;
6831 }
6832 
6833 static inline abi_long copy_to_user_flock64(abi_ulong target_flock_addr,
6834                                             const struct flock64 *fl)
6835 {
6836     struct target_flock64 *target_fl;
6837     short l_type;
6838 
6839     if (!lock_user_struct(VERIFY_WRITE, target_fl, target_flock_addr, 0)) {
6840         return -TARGET_EFAULT;
6841     }
6842 
6843     l_type = host_to_target_flock(fl->l_type);
6844     __put_user(l_type, &target_fl->l_type);
6845     __put_user(fl->l_whence, &target_fl->l_whence);
6846     __put_user(fl->l_start, &target_fl->l_start);
6847     __put_user(fl->l_len, &target_fl->l_len);
6848     __put_user(fl->l_pid, &target_fl->l_pid);
6849     unlock_user_struct(target_fl, target_flock_addr, 1);
6850     return 0;
6851 }
6852 
6853 static abi_long do_fcntl(int fd, int cmd, abi_ulong arg)
6854 {
6855     struct flock64 fl64;
6856 #ifdef F_GETOWN_EX
6857     struct f_owner_ex fox;
6858     struct target_f_owner_ex *target_fox;
6859 #endif
6860     abi_long ret;
6861     int host_cmd = target_to_host_fcntl_cmd(cmd);
6862 
6863     if (host_cmd == -TARGET_EINVAL)
6864 	    return host_cmd;
6865 
6866     switch(cmd) {
6867     case TARGET_F_GETLK:
6868         ret = copy_from_user_flock(&fl64, arg);
6869         if (ret) {
6870             return ret;
6871         }
6872         ret = get_errno(safe_fcntl(fd, host_cmd, &fl64));
6873         if (ret == 0) {
6874             ret = copy_to_user_flock(arg, &fl64);
6875         }
6876         break;
6877 
6878     case TARGET_F_SETLK:
6879     case TARGET_F_SETLKW:
6880         ret = copy_from_user_flock(&fl64, arg);
6881         if (ret) {
6882             return ret;
6883         }
6884         ret = get_errno(safe_fcntl(fd, host_cmd, &fl64));
6885         break;
6886 
6887     case TARGET_F_GETLK64:
6888     case TARGET_F_OFD_GETLK:
6889         ret = copy_from_user_flock64(&fl64, arg);
6890         if (ret) {
6891             return ret;
6892         }
6893         ret = get_errno(safe_fcntl(fd, host_cmd, &fl64));
6894         if (ret == 0) {
6895             ret = copy_to_user_flock64(arg, &fl64);
6896         }
6897         break;
6898     case TARGET_F_SETLK64:
6899     case TARGET_F_SETLKW64:
6900     case TARGET_F_OFD_SETLK:
6901     case TARGET_F_OFD_SETLKW:
6902         ret = copy_from_user_flock64(&fl64, arg);
6903         if (ret) {
6904             return ret;
6905         }
6906         ret = get_errno(safe_fcntl(fd, host_cmd, &fl64));
6907         break;
6908 
6909     case TARGET_F_GETFL:
6910         ret = get_errno(safe_fcntl(fd, host_cmd, arg));
6911         if (ret >= 0) {
6912             ret = host_to_target_bitmask(ret, fcntl_flags_tbl);
6913         }
6914         break;
6915 
6916     case TARGET_F_SETFL:
6917         ret = get_errno(safe_fcntl(fd, host_cmd,
6918                                    target_to_host_bitmask(arg,
6919                                                           fcntl_flags_tbl)));
6920         break;
6921 
6922 #ifdef F_GETOWN_EX
6923     case TARGET_F_GETOWN_EX:
6924         ret = get_errno(safe_fcntl(fd, host_cmd, &fox));
6925         if (ret >= 0) {
6926             if (!lock_user_struct(VERIFY_WRITE, target_fox, arg, 0))
6927                 return -TARGET_EFAULT;
6928             target_fox->type = tswap32(fox.type);
6929             target_fox->pid = tswap32(fox.pid);
6930             unlock_user_struct(target_fox, arg, 1);
6931         }
6932         break;
6933 #endif
6934 
6935 #ifdef F_SETOWN_EX
6936     case TARGET_F_SETOWN_EX:
6937         if (!lock_user_struct(VERIFY_READ, target_fox, arg, 1))
6938             return -TARGET_EFAULT;
6939         fox.type = tswap32(target_fox->type);
6940         fox.pid = tswap32(target_fox->pid);
6941         unlock_user_struct(target_fox, arg, 0);
6942         ret = get_errno(safe_fcntl(fd, host_cmd, &fox));
6943         break;
6944 #endif
6945 
6946     case TARGET_F_SETSIG:
6947         ret = get_errno(safe_fcntl(fd, host_cmd, target_to_host_signal(arg)));
6948         break;
6949 
6950     case TARGET_F_GETSIG:
6951         ret = host_to_target_signal(get_errno(safe_fcntl(fd, host_cmd, arg)));
6952         break;
6953 
6954     case TARGET_F_SETOWN:
6955     case TARGET_F_GETOWN:
6956     case TARGET_F_SETLEASE:
6957     case TARGET_F_GETLEASE:
6958     case TARGET_F_SETPIPE_SZ:
6959     case TARGET_F_GETPIPE_SZ:
6960     case TARGET_F_ADD_SEALS:
6961     case TARGET_F_GET_SEALS:
6962         ret = get_errno(safe_fcntl(fd, host_cmd, arg));
6963         break;
6964 
6965     default:
6966         ret = get_errno(safe_fcntl(fd, cmd, arg));
6967         break;
6968     }
6969     return ret;
6970 }
6971 
6972 #ifdef USE_UID16
6973 
6974 static inline int high2lowuid(int uid)
6975 {
6976     if (uid > 65535)
6977         return 65534;
6978     else
6979         return uid;
6980 }
6981 
6982 static inline int high2lowgid(int gid)
6983 {
6984     if (gid > 65535)
6985         return 65534;
6986     else
6987         return gid;
6988 }
6989 
6990 static inline int low2highuid(int uid)
6991 {
6992     if ((int16_t)uid == -1)
6993         return -1;
6994     else
6995         return uid;
6996 }
6997 
6998 static inline int low2highgid(int gid)
6999 {
7000     if ((int16_t)gid == -1)
7001         return -1;
7002     else
7003         return gid;
7004 }
7005 static inline int tswapid(int id)
7006 {
7007     return tswap16(id);
7008 }
7009 
7010 #define put_user_id(x, gaddr) put_user_u16(x, gaddr)
7011 
7012 #else /* !USE_UID16 */
7013 static inline int high2lowuid(int uid)
7014 {
7015     return uid;
7016 }
7017 static inline int high2lowgid(int gid)
7018 {
7019     return gid;
7020 }
7021 static inline int low2highuid(int uid)
7022 {
7023     return uid;
7024 }
7025 static inline int low2highgid(int gid)
7026 {
7027     return gid;
7028 }
7029 static inline int tswapid(int id)
7030 {
7031     return tswap32(id);
7032 }
7033 
7034 #define put_user_id(x, gaddr) put_user_u32(x, gaddr)
7035 
7036 #endif /* USE_UID16 */
7037 
7038 /* We must do direct syscalls for setting UID/GID, because we want to
7039  * implement the Linux system call semantics of "change only for this thread",
7040  * not the libc/POSIX semantics of "change for all threads in process".
7041  * (See http://ewontfix.com/17/ for more details.)
7042  * We use the 32-bit version of the syscalls if present; if it is not
7043  * then either the host architecture supports 32-bit UIDs natively with
7044  * the standard syscall, or the 16-bit UID is the best we can do.
7045  */
7046 #ifdef __NR_setuid32
7047 #define __NR_sys_setuid __NR_setuid32
7048 #else
7049 #define __NR_sys_setuid __NR_setuid
7050 #endif
7051 #ifdef __NR_setgid32
7052 #define __NR_sys_setgid __NR_setgid32
7053 #else
7054 #define __NR_sys_setgid __NR_setgid
7055 #endif
7056 #ifdef __NR_setresuid32
7057 #define __NR_sys_setresuid __NR_setresuid32
7058 #else
7059 #define __NR_sys_setresuid __NR_setresuid
7060 #endif
7061 #ifdef __NR_setresgid32
7062 #define __NR_sys_setresgid __NR_setresgid32
7063 #else
7064 #define __NR_sys_setresgid __NR_setresgid
7065 #endif
7066 
7067 _syscall1(int, sys_setuid, uid_t, uid)
7068 _syscall1(int, sys_setgid, gid_t, gid)
7069 _syscall3(int, sys_setresuid, uid_t, ruid, uid_t, euid, uid_t, suid)
7070 _syscall3(int, sys_setresgid, gid_t, rgid, gid_t, egid, gid_t, sgid)
7071 
7072 void syscall_init(void)
7073 {
7074     IOCTLEntry *ie;
7075     const argtype *arg_type;
7076     int size;
7077     int i;
7078 
7079     thunk_init(STRUCT_MAX);
7080 
7081 #define STRUCT(name, ...) thunk_register_struct(STRUCT_ ## name, #name, struct_ ## name ## _def);
7082 #define STRUCT_SPECIAL(name) thunk_register_struct_direct(STRUCT_ ## name, #name, &struct_ ## name ## _def);
7083 #include "syscall_types.h"
7084 #undef STRUCT
7085 #undef STRUCT_SPECIAL
7086 
7087     /* Build target_to_host_errno_table[] table from
7088      * host_to_target_errno_table[]. */
7089     for (i = 0; i < ERRNO_TABLE_SIZE; i++) {
7090         target_to_host_errno_table[host_to_target_errno_table[i]] = i;
7091     }
7092 
7093     /* we patch the ioctl size if necessary. We rely on the fact that
7094        no ioctl has all the bits at '1' in the size field */
7095     ie = ioctl_entries;
7096     while (ie->target_cmd != 0) {
7097         if (((ie->target_cmd >> TARGET_IOC_SIZESHIFT) & TARGET_IOC_SIZEMASK) ==
7098             TARGET_IOC_SIZEMASK) {
7099             arg_type = ie->arg_type;
7100             if (arg_type[0] != TYPE_PTR) {
7101                 fprintf(stderr, "cannot patch size for ioctl 0x%x\n",
7102                         ie->target_cmd);
7103                 exit(1);
7104             }
7105             arg_type++;
7106             size = thunk_type_size(arg_type, 0);
7107             ie->target_cmd = (ie->target_cmd &
7108                               ~(TARGET_IOC_SIZEMASK << TARGET_IOC_SIZESHIFT)) |
7109                 (size << TARGET_IOC_SIZESHIFT);
7110         }
7111 
7112         /* automatic consistency check if same arch */
7113 #if (defined(__i386__) && defined(TARGET_I386) && defined(TARGET_ABI32)) || \
7114     (defined(__x86_64__) && defined(TARGET_X86_64))
7115         if (unlikely(ie->target_cmd != ie->host_cmd)) {
7116             fprintf(stderr, "ERROR: ioctl(%s): target=0x%x host=0x%x\n",
7117                     ie->name, ie->target_cmd, ie->host_cmd);
7118         }
7119 #endif
7120         ie++;
7121     }
7122 }
7123 
7124 #ifdef TARGET_NR_truncate64
7125 static inline abi_long target_truncate64(void *cpu_env, const char *arg1,
7126                                          abi_long arg2,
7127                                          abi_long arg3,
7128                                          abi_long arg4)
7129 {
7130     if (regpairs_aligned(cpu_env, TARGET_NR_truncate64)) {
7131         arg2 = arg3;
7132         arg3 = arg4;
7133     }
7134     return get_errno(truncate64(arg1, target_offset64(arg2, arg3)));
7135 }
7136 #endif
7137 
7138 #ifdef TARGET_NR_ftruncate64
7139 static inline abi_long target_ftruncate64(void *cpu_env, abi_long arg1,
7140                                           abi_long arg2,
7141                                           abi_long arg3,
7142                                           abi_long arg4)
7143 {
7144     if (regpairs_aligned(cpu_env, TARGET_NR_ftruncate64)) {
7145         arg2 = arg3;
7146         arg3 = arg4;
7147     }
7148     return get_errno(ftruncate64(arg1, target_offset64(arg2, arg3)));
7149 }
7150 #endif
7151 
7152 #if defined(TARGET_NR_timer_settime) || \
7153     (defined(TARGET_NR_timerfd_settime) && defined(CONFIG_TIMERFD))
7154 static inline abi_long target_to_host_itimerspec(struct itimerspec *host_its,
7155                                                  abi_ulong target_addr)
7156 {
7157     if (target_to_host_timespec(&host_its->it_interval, target_addr +
7158                                 offsetof(struct target_itimerspec,
7159                                          it_interval)) ||
7160         target_to_host_timespec(&host_its->it_value, target_addr +
7161                                 offsetof(struct target_itimerspec,
7162                                          it_value))) {
7163         return -TARGET_EFAULT;
7164     }
7165 
7166     return 0;
7167 }
7168 #endif
7169 
7170 #if defined(TARGET_NR_timer_settime64) || \
7171     (defined(TARGET_NR_timerfd_settime64) && defined(CONFIG_TIMERFD))
7172 static inline abi_long target_to_host_itimerspec64(struct itimerspec *host_its,
7173                                                    abi_ulong target_addr)
7174 {
7175     if (target_to_host_timespec64(&host_its->it_interval, target_addr +
7176                                   offsetof(struct target__kernel_itimerspec,
7177                                            it_interval)) ||
7178         target_to_host_timespec64(&host_its->it_value, target_addr +
7179                                   offsetof(struct target__kernel_itimerspec,
7180                                            it_value))) {
7181         return -TARGET_EFAULT;
7182     }
7183 
7184     return 0;
7185 }
7186 #endif
7187 
7188 #if ((defined(TARGET_NR_timerfd_gettime) || \
7189       defined(TARGET_NR_timerfd_settime)) && defined(CONFIG_TIMERFD)) || \
7190       defined(TARGET_NR_timer_gettime) || defined(TARGET_NR_timer_settime)
7191 static inline abi_long host_to_target_itimerspec(abi_ulong target_addr,
7192                                                  struct itimerspec *host_its)
7193 {
7194     if (host_to_target_timespec(target_addr + offsetof(struct target_itimerspec,
7195                                                        it_interval),
7196                                 &host_its->it_interval) ||
7197         host_to_target_timespec(target_addr + offsetof(struct target_itimerspec,
7198                                                        it_value),
7199                                 &host_its->it_value)) {
7200         return -TARGET_EFAULT;
7201     }
7202     return 0;
7203 }
7204 #endif
7205 
7206 #if ((defined(TARGET_NR_timerfd_gettime64) || \
7207       defined(TARGET_NR_timerfd_settime64)) && defined(CONFIG_TIMERFD)) || \
7208       defined(TARGET_NR_timer_gettime64) || defined(TARGET_NR_timer_settime64)
7209 static inline abi_long host_to_target_itimerspec64(abi_ulong target_addr,
7210                                                    struct itimerspec *host_its)
7211 {
7212     if (host_to_target_timespec64(target_addr +
7213                                   offsetof(struct target__kernel_itimerspec,
7214                                            it_interval),
7215                                   &host_its->it_interval) ||
7216         host_to_target_timespec64(target_addr +
7217                                   offsetof(struct target__kernel_itimerspec,
7218                                            it_value),
7219                                   &host_its->it_value)) {
7220         return -TARGET_EFAULT;
7221     }
7222     return 0;
7223 }
7224 #endif
7225 
7226 #if defined(TARGET_NR_adjtimex) || \
7227     (defined(TARGET_NR_clock_adjtime) && defined(CONFIG_CLOCK_ADJTIME))
7228 static inline abi_long target_to_host_timex(struct timex *host_tx,
7229                                             abi_long target_addr)
7230 {
7231     struct target_timex *target_tx;
7232 
7233     if (!lock_user_struct(VERIFY_READ, target_tx, target_addr, 1)) {
7234         return -TARGET_EFAULT;
7235     }
7236 
7237     __get_user(host_tx->modes, &target_tx->modes);
7238     __get_user(host_tx->offset, &target_tx->offset);
7239     __get_user(host_tx->freq, &target_tx->freq);
7240     __get_user(host_tx->maxerror, &target_tx->maxerror);
7241     __get_user(host_tx->esterror, &target_tx->esterror);
7242     __get_user(host_tx->status, &target_tx->status);
7243     __get_user(host_tx->constant, &target_tx->constant);
7244     __get_user(host_tx->precision, &target_tx->precision);
7245     __get_user(host_tx->tolerance, &target_tx->tolerance);
7246     __get_user(host_tx->time.tv_sec, &target_tx->time.tv_sec);
7247     __get_user(host_tx->time.tv_usec, &target_tx->time.tv_usec);
7248     __get_user(host_tx->tick, &target_tx->tick);
7249     __get_user(host_tx->ppsfreq, &target_tx->ppsfreq);
7250     __get_user(host_tx->jitter, &target_tx->jitter);
7251     __get_user(host_tx->shift, &target_tx->shift);
7252     __get_user(host_tx->stabil, &target_tx->stabil);
7253     __get_user(host_tx->jitcnt, &target_tx->jitcnt);
7254     __get_user(host_tx->calcnt, &target_tx->calcnt);
7255     __get_user(host_tx->errcnt, &target_tx->errcnt);
7256     __get_user(host_tx->stbcnt, &target_tx->stbcnt);
7257     __get_user(host_tx->tai, &target_tx->tai);
7258 
7259     unlock_user_struct(target_tx, target_addr, 0);
7260     return 0;
7261 }
7262 
7263 static inline abi_long host_to_target_timex(abi_long target_addr,
7264                                             struct timex *host_tx)
7265 {
7266     struct target_timex *target_tx;
7267 
7268     if (!lock_user_struct(VERIFY_WRITE, target_tx, target_addr, 0)) {
7269         return -TARGET_EFAULT;
7270     }
7271 
7272     __put_user(host_tx->modes, &target_tx->modes);
7273     __put_user(host_tx->offset, &target_tx->offset);
7274     __put_user(host_tx->freq, &target_tx->freq);
7275     __put_user(host_tx->maxerror, &target_tx->maxerror);
7276     __put_user(host_tx->esterror, &target_tx->esterror);
7277     __put_user(host_tx->status, &target_tx->status);
7278     __put_user(host_tx->constant, &target_tx->constant);
7279     __put_user(host_tx->precision, &target_tx->precision);
7280     __put_user(host_tx->tolerance, &target_tx->tolerance);
7281     __put_user(host_tx->time.tv_sec, &target_tx->time.tv_sec);
7282     __put_user(host_tx->time.tv_usec, &target_tx->time.tv_usec);
7283     __put_user(host_tx->tick, &target_tx->tick);
7284     __put_user(host_tx->ppsfreq, &target_tx->ppsfreq);
7285     __put_user(host_tx->jitter, &target_tx->jitter);
7286     __put_user(host_tx->shift, &target_tx->shift);
7287     __put_user(host_tx->stabil, &target_tx->stabil);
7288     __put_user(host_tx->jitcnt, &target_tx->jitcnt);
7289     __put_user(host_tx->calcnt, &target_tx->calcnt);
7290     __put_user(host_tx->errcnt, &target_tx->errcnt);
7291     __put_user(host_tx->stbcnt, &target_tx->stbcnt);
7292     __put_user(host_tx->tai, &target_tx->tai);
7293 
7294     unlock_user_struct(target_tx, target_addr, 1);
7295     return 0;
7296 }
7297 #endif
7298 
7299 
7300 #if defined(TARGET_NR_clock_adjtime64) && defined(CONFIG_CLOCK_ADJTIME)
7301 static inline abi_long target_to_host_timex64(struct timex *host_tx,
7302                                               abi_long target_addr)
7303 {
7304     struct target__kernel_timex *target_tx;
7305 
7306     if (copy_from_user_timeval64(&host_tx->time, target_addr +
7307                                  offsetof(struct target__kernel_timex,
7308                                           time))) {
7309         return -TARGET_EFAULT;
7310     }
7311 
7312     if (!lock_user_struct(VERIFY_READ, target_tx, target_addr, 1)) {
7313         return -TARGET_EFAULT;
7314     }
7315 
7316     __get_user(host_tx->modes, &target_tx->modes);
7317     __get_user(host_tx->offset, &target_tx->offset);
7318     __get_user(host_tx->freq, &target_tx->freq);
7319     __get_user(host_tx->maxerror, &target_tx->maxerror);
7320     __get_user(host_tx->esterror, &target_tx->esterror);
7321     __get_user(host_tx->status, &target_tx->status);
7322     __get_user(host_tx->constant, &target_tx->constant);
7323     __get_user(host_tx->precision, &target_tx->precision);
7324     __get_user(host_tx->tolerance, &target_tx->tolerance);
7325     __get_user(host_tx->tick, &target_tx->tick);
7326     __get_user(host_tx->ppsfreq, &target_tx->ppsfreq);
7327     __get_user(host_tx->jitter, &target_tx->jitter);
7328     __get_user(host_tx->shift, &target_tx->shift);
7329     __get_user(host_tx->stabil, &target_tx->stabil);
7330     __get_user(host_tx->jitcnt, &target_tx->jitcnt);
7331     __get_user(host_tx->calcnt, &target_tx->calcnt);
7332     __get_user(host_tx->errcnt, &target_tx->errcnt);
7333     __get_user(host_tx->stbcnt, &target_tx->stbcnt);
7334     __get_user(host_tx->tai, &target_tx->tai);
7335 
7336     unlock_user_struct(target_tx, target_addr, 0);
7337     return 0;
7338 }
7339 
7340 static inline abi_long host_to_target_timex64(abi_long target_addr,
7341                                               struct timex *host_tx)
7342 {
7343     struct target__kernel_timex *target_tx;
7344 
7345    if (copy_to_user_timeval64(target_addr +
7346                               offsetof(struct target__kernel_timex, time),
7347                               &host_tx->time)) {
7348         return -TARGET_EFAULT;
7349     }
7350 
7351     if (!lock_user_struct(VERIFY_WRITE, target_tx, target_addr, 0)) {
7352         return -TARGET_EFAULT;
7353     }
7354 
7355     __put_user(host_tx->modes, &target_tx->modes);
7356     __put_user(host_tx->offset, &target_tx->offset);
7357     __put_user(host_tx->freq, &target_tx->freq);
7358     __put_user(host_tx->maxerror, &target_tx->maxerror);
7359     __put_user(host_tx->esterror, &target_tx->esterror);
7360     __put_user(host_tx->status, &target_tx->status);
7361     __put_user(host_tx->constant, &target_tx->constant);
7362     __put_user(host_tx->precision, &target_tx->precision);
7363     __put_user(host_tx->tolerance, &target_tx->tolerance);
7364     __put_user(host_tx->tick, &target_tx->tick);
7365     __put_user(host_tx->ppsfreq, &target_tx->ppsfreq);
7366     __put_user(host_tx->jitter, &target_tx->jitter);
7367     __put_user(host_tx->shift, &target_tx->shift);
7368     __put_user(host_tx->stabil, &target_tx->stabil);
7369     __put_user(host_tx->jitcnt, &target_tx->jitcnt);
7370     __put_user(host_tx->calcnt, &target_tx->calcnt);
7371     __put_user(host_tx->errcnt, &target_tx->errcnt);
7372     __put_user(host_tx->stbcnt, &target_tx->stbcnt);
7373     __put_user(host_tx->tai, &target_tx->tai);
7374 
7375     unlock_user_struct(target_tx, target_addr, 1);
7376     return 0;
7377 }
7378 #endif
7379 
7380 static inline abi_long target_to_host_sigevent(struct sigevent *host_sevp,
7381                                                abi_ulong target_addr)
7382 {
7383     struct target_sigevent *target_sevp;
7384 
7385     if (!lock_user_struct(VERIFY_READ, target_sevp, target_addr, 1)) {
7386         return -TARGET_EFAULT;
7387     }
7388 
7389     /* This union is awkward on 64 bit systems because it has a 32 bit
7390      * integer and a pointer in it; we follow the conversion approach
7391      * used for handling sigval types in signal.c so the guest should get
7392      * the correct value back even if we did a 64 bit byteswap and it's
7393      * using the 32 bit integer.
7394      */
7395     host_sevp->sigev_value.sival_ptr =
7396         (void *)(uintptr_t)tswapal(target_sevp->sigev_value.sival_ptr);
7397     host_sevp->sigev_signo =
7398         target_to_host_signal(tswap32(target_sevp->sigev_signo));
7399     host_sevp->sigev_notify = tswap32(target_sevp->sigev_notify);
7400     host_sevp->_sigev_un._tid = tswap32(target_sevp->_sigev_un._tid);
7401 
7402     unlock_user_struct(target_sevp, target_addr, 1);
7403     return 0;
7404 }
7405 
7406 #if defined(TARGET_NR_mlockall)
7407 static inline int target_to_host_mlockall_arg(int arg)
7408 {
7409     int result = 0;
7410 
7411     if (arg & TARGET_MCL_CURRENT) {
7412         result |= MCL_CURRENT;
7413     }
7414     if (arg & TARGET_MCL_FUTURE) {
7415         result |= MCL_FUTURE;
7416     }
7417 #ifdef MCL_ONFAULT
7418     if (arg & TARGET_MCL_ONFAULT) {
7419         result |= MCL_ONFAULT;
7420     }
7421 #endif
7422 
7423     return result;
7424 }
7425 #endif
7426 
7427 #if (defined(TARGET_NR_stat64) || defined(TARGET_NR_lstat64) ||     \
7428      defined(TARGET_NR_fstat64) || defined(TARGET_NR_fstatat64) ||  \
7429      defined(TARGET_NR_newfstatat))
7430 static inline abi_long host_to_target_stat64(void *cpu_env,
7431                                              abi_ulong target_addr,
7432                                              struct stat *host_st)
7433 {
7434 #if defined(TARGET_ARM) && defined(TARGET_ABI32)
7435     if (((CPUARMState *)cpu_env)->eabi) {
7436         struct target_eabi_stat64 *target_st;
7437 
7438         if (!lock_user_struct(VERIFY_WRITE, target_st, target_addr, 0))
7439             return -TARGET_EFAULT;
7440         memset(target_st, 0, sizeof(struct target_eabi_stat64));
7441         __put_user(host_st->st_dev, &target_st->st_dev);
7442         __put_user(host_st->st_ino, &target_st->st_ino);
7443 #ifdef TARGET_STAT64_HAS_BROKEN_ST_INO
7444         __put_user(host_st->st_ino, &target_st->__st_ino);
7445 #endif
7446         __put_user(host_st->st_mode, &target_st->st_mode);
7447         __put_user(host_st->st_nlink, &target_st->st_nlink);
7448         __put_user(host_st->st_uid, &target_st->st_uid);
7449         __put_user(host_st->st_gid, &target_st->st_gid);
7450         __put_user(host_st->st_rdev, &target_st->st_rdev);
7451         __put_user(host_st->st_size, &target_st->st_size);
7452         __put_user(host_st->st_blksize, &target_st->st_blksize);
7453         __put_user(host_st->st_blocks, &target_st->st_blocks);
7454         __put_user(host_st->st_atime, &target_st->target_st_atime);
7455         __put_user(host_st->st_mtime, &target_st->target_st_mtime);
7456         __put_user(host_st->st_ctime, &target_st->target_st_ctime);
7457 #if _POSIX_C_SOURCE >= 200809L || _XOPEN_SOURCE >= 700
7458         __put_user(host_st->st_atim.tv_nsec, &target_st->target_st_atime_nsec);
7459         __put_user(host_st->st_mtim.tv_nsec, &target_st->target_st_mtime_nsec);
7460         __put_user(host_st->st_ctim.tv_nsec, &target_st->target_st_ctime_nsec);
7461 #endif
7462         unlock_user_struct(target_st, target_addr, 1);
7463     } else
7464 #endif
7465     {
7466 #if defined(TARGET_HAS_STRUCT_STAT64)
7467         struct target_stat64 *target_st;
7468 #else
7469         struct target_stat *target_st;
7470 #endif
7471 
7472         if (!lock_user_struct(VERIFY_WRITE, target_st, target_addr, 0))
7473             return -TARGET_EFAULT;
7474         memset(target_st, 0, sizeof(*target_st));
7475         __put_user(host_st->st_dev, &target_st->st_dev);
7476         __put_user(host_st->st_ino, &target_st->st_ino);
7477 #ifdef TARGET_STAT64_HAS_BROKEN_ST_INO
7478         __put_user(host_st->st_ino, &target_st->__st_ino);
7479 #endif
7480         __put_user(host_st->st_mode, &target_st->st_mode);
7481         __put_user(host_st->st_nlink, &target_st->st_nlink);
7482         __put_user(host_st->st_uid, &target_st->st_uid);
7483         __put_user(host_st->st_gid, &target_st->st_gid);
7484         __put_user(host_st->st_rdev, &target_st->st_rdev);
7485         /* XXX: better use of kernel struct */
7486         __put_user(host_st->st_size, &target_st->st_size);
7487         __put_user(host_st->st_blksize, &target_st->st_blksize);
7488         __put_user(host_st->st_blocks, &target_st->st_blocks);
7489         __put_user(host_st->st_atime, &target_st->target_st_atime);
7490         __put_user(host_st->st_mtime, &target_st->target_st_mtime);
7491         __put_user(host_st->st_ctime, &target_st->target_st_ctime);
7492 #if _POSIX_C_SOURCE >= 200809L || _XOPEN_SOURCE >= 700
7493         __put_user(host_st->st_atim.tv_nsec, &target_st->target_st_atime_nsec);
7494         __put_user(host_st->st_mtim.tv_nsec, &target_st->target_st_mtime_nsec);
7495         __put_user(host_st->st_ctim.tv_nsec, &target_st->target_st_ctime_nsec);
7496 #endif
7497         unlock_user_struct(target_st, target_addr, 1);
7498     }
7499 
7500     return 0;
7501 }
7502 #endif
7503 
7504 #if defined(TARGET_NR_statx) && defined(__NR_statx)
7505 static inline abi_long host_to_target_statx(struct target_statx *host_stx,
7506                                             abi_ulong target_addr)
7507 {
7508     struct target_statx *target_stx;
7509 
7510     if (!lock_user_struct(VERIFY_WRITE, target_stx, target_addr,  0)) {
7511         return -TARGET_EFAULT;
7512     }
7513     memset(target_stx, 0, sizeof(*target_stx));
7514 
7515     __put_user(host_stx->stx_mask, &target_stx->stx_mask);
7516     __put_user(host_stx->stx_blksize, &target_stx->stx_blksize);
7517     __put_user(host_stx->stx_attributes, &target_stx->stx_attributes);
7518     __put_user(host_stx->stx_nlink, &target_stx->stx_nlink);
7519     __put_user(host_stx->stx_uid, &target_stx->stx_uid);
7520     __put_user(host_stx->stx_gid, &target_stx->stx_gid);
7521     __put_user(host_stx->stx_mode, &target_stx->stx_mode);
7522     __put_user(host_stx->stx_ino, &target_stx->stx_ino);
7523     __put_user(host_stx->stx_size, &target_stx->stx_size);
7524     __put_user(host_stx->stx_blocks, &target_stx->stx_blocks);
7525     __put_user(host_stx->stx_attributes_mask, &target_stx->stx_attributes_mask);
7526     __put_user(host_stx->stx_atime.tv_sec, &target_stx->stx_atime.tv_sec);
7527     __put_user(host_stx->stx_atime.tv_nsec, &target_stx->stx_atime.tv_nsec);
7528     __put_user(host_stx->stx_btime.tv_sec, &target_stx->stx_btime.tv_sec);
7529     __put_user(host_stx->stx_btime.tv_nsec, &target_stx->stx_btime.tv_nsec);
7530     __put_user(host_stx->stx_ctime.tv_sec, &target_stx->stx_ctime.tv_sec);
7531     __put_user(host_stx->stx_ctime.tv_nsec, &target_stx->stx_ctime.tv_nsec);
7532     __put_user(host_stx->stx_mtime.tv_sec, &target_stx->stx_mtime.tv_sec);
7533     __put_user(host_stx->stx_mtime.tv_nsec, &target_stx->stx_mtime.tv_nsec);
7534     __put_user(host_stx->stx_rdev_major, &target_stx->stx_rdev_major);
7535     __put_user(host_stx->stx_rdev_minor, &target_stx->stx_rdev_minor);
7536     __put_user(host_stx->stx_dev_major, &target_stx->stx_dev_major);
7537     __put_user(host_stx->stx_dev_minor, &target_stx->stx_dev_minor);
7538 
7539     unlock_user_struct(target_stx, target_addr, 1);
7540 
7541     return 0;
7542 }
7543 #endif
7544 
7545 static int do_sys_futex(int *uaddr, int op, int val,
7546                          const struct timespec *timeout, int *uaddr2,
7547                          int val3)
7548 {
7549 #if HOST_LONG_BITS == 64
7550 #if defined(__NR_futex)
7551     /* always a 64-bit time_t, it doesn't define _time64 version  */
7552     return sys_futex(uaddr, op, val, timeout, uaddr2, val3);
7553 
7554 #endif
7555 #else /* HOST_LONG_BITS == 64 */
7556 #if defined(__NR_futex_time64)
7557     if (sizeof(timeout->tv_sec) == 8) {
7558         /* _time64 function on 32bit arch */
7559         return sys_futex_time64(uaddr, op, val, timeout, uaddr2, val3);
7560     }
7561 #endif
7562 #if defined(__NR_futex)
7563     /* old function on 32bit arch */
7564     return sys_futex(uaddr, op, val, timeout, uaddr2, val3);
7565 #endif
7566 #endif /* HOST_LONG_BITS == 64 */
7567     g_assert_not_reached();
7568 }
7569 
7570 static int do_safe_futex(int *uaddr, int op, int val,
7571                          const struct timespec *timeout, int *uaddr2,
7572                          int val3)
7573 {
7574 #if HOST_LONG_BITS == 64
7575 #if defined(__NR_futex)
7576     /* always a 64-bit time_t, it doesn't define _time64 version  */
7577     return get_errno(safe_futex(uaddr, op, val, timeout, uaddr2, val3));
7578 #endif
7579 #else /* HOST_LONG_BITS == 64 */
7580 #if defined(__NR_futex_time64)
7581     if (sizeof(timeout->tv_sec) == 8) {
7582         /* _time64 function on 32bit arch */
7583         return get_errno(safe_futex_time64(uaddr, op, val, timeout, uaddr2,
7584                                            val3));
7585     }
7586 #endif
7587 #if defined(__NR_futex)
7588     /* old function on 32bit arch */
7589     return get_errno(safe_futex(uaddr, op, val, timeout, uaddr2, val3));
7590 #endif
7591 #endif /* HOST_LONG_BITS == 64 */
7592     return -TARGET_ENOSYS;
7593 }
7594 
7595 /* ??? Using host futex calls even when target atomic operations
7596    are not really atomic probably breaks things.  However implementing
7597    futexes locally would make futexes shared between multiple processes
7598    tricky.  However they're probably useless because guest atomic
7599    operations won't work either.  */
7600 #if defined(TARGET_NR_futex)
7601 static int do_futex(CPUState *cpu, target_ulong uaddr, int op, int val,
7602                     target_ulong timeout, target_ulong uaddr2, int val3)
7603 {
7604     struct timespec ts, *pts;
7605     int base_op;
7606 
7607     /* ??? We assume FUTEX_* constants are the same on both host
7608        and target.  */
7609 #ifdef FUTEX_CMD_MASK
7610     base_op = op & FUTEX_CMD_MASK;
7611 #else
7612     base_op = op;
7613 #endif
7614     switch (base_op) {
7615     case FUTEX_WAIT:
7616     case FUTEX_WAIT_BITSET:
7617         if (timeout) {
7618             pts = &ts;
7619             target_to_host_timespec(pts, timeout);
7620         } else {
7621             pts = NULL;
7622         }
7623         return do_safe_futex(g2h(cpu, uaddr),
7624                              op, tswap32(val), pts, NULL, val3);
7625     case FUTEX_WAKE:
7626         return do_safe_futex(g2h(cpu, uaddr),
7627                              op, val, NULL, NULL, 0);
7628     case FUTEX_FD:
7629         return do_safe_futex(g2h(cpu, uaddr),
7630                              op, val, NULL, NULL, 0);
7631     case FUTEX_REQUEUE:
7632     case FUTEX_CMP_REQUEUE:
7633     case FUTEX_WAKE_OP:
7634         /* For FUTEX_REQUEUE, FUTEX_CMP_REQUEUE, and FUTEX_WAKE_OP, the
7635            TIMEOUT parameter is interpreted as a uint32_t by the kernel.
7636            But the prototype takes a `struct timespec *'; insert casts
7637            to satisfy the compiler.  We do not need to tswap TIMEOUT
7638            since it's not compared to guest memory.  */
7639         pts = (struct timespec *)(uintptr_t) timeout;
7640         return do_safe_futex(g2h(cpu, uaddr), op, val, pts, g2h(cpu, uaddr2),
7641                              (base_op == FUTEX_CMP_REQUEUE
7642                               ? tswap32(val3) : val3));
7643     default:
7644         return -TARGET_ENOSYS;
7645     }
7646 }
7647 #endif
7648 
7649 #if defined(TARGET_NR_futex_time64)
7650 static int do_futex_time64(CPUState *cpu, target_ulong uaddr, int op,
7651                            int val, target_ulong timeout,
7652                            target_ulong uaddr2, int val3)
7653 {
7654     struct timespec ts, *pts;
7655     int base_op;
7656 
7657     /* ??? We assume FUTEX_* constants are the same on both host
7658        and target.  */
7659 #ifdef FUTEX_CMD_MASK
7660     base_op = op & FUTEX_CMD_MASK;
7661 #else
7662     base_op = op;
7663 #endif
7664     switch (base_op) {
7665     case FUTEX_WAIT:
7666     case FUTEX_WAIT_BITSET:
7667         if (timeout) {
7668             pts = &ts;
7669             if (target_to_host_timespec64(pts, timeout)) {
7670                 return -TARGET_EFAULT;
7671             }
7672         } else {
7673             pts = NULL;
7674         }
7675         return do_safe_futex(g2h(cpu, uaddr), op,
7676                              tswap32(val), pts, NULL, val3);
7677     case FUTEX_WAKE:
7678         return do_safe_futex(g2h(cpu, uaddr), op, val, NULL, NULL, 0);
7679     case FUTEX_FD:
7680         return do_safe_futex(g2h(cpu, uaddr), op, val, NULL, NULL, 0);
7681     case FUTEX_REQUEUE:
7682     case FUTEX_CMP_REQUEUE:
7683     case FUTEX_WAKE_OP:
7684         /* For FUTEX_REQUEUE, FUTEX_CMP_REQUEUE, and FUTEX_WAKE_OP, the
7685            TIMEOUT parameter is interpreted as a uint32_t by the kernel.
7686            But the prototype takes a `struct timespec *'; insert casts
7687            to satisfy the compiler.  We do not need to tswap TIMEOUT
7688            since it's not compared to guest memory.  */
7689         pts = (struct timespec *)(uintptr_t) timeout;
7690         return do_safe_futex(g2h(cpu, uaddr), op, val, pts, g2h(cpu, uaddr2),
7691                              (base_op == FUTEX_CMP_REQUEUE
7692                               ? tswap32(val3) : val3));
7693     default:
7694         return -TARGET_ENOSYS;
7695     }
7696 }
7697 #endif
7698 
7699 #if defined(TARGET_NR_name_to_handle_at) && defined(CONFIG_OPEN_BY_HANDLE)
7700 static abi_long do_name_to_handle_at(abi_long dirfd, abi_long pathname,
7701                                      abi_long handle, abi_long mount_id,
7702                                      abi_long flags)
7703 {
7704     struct file_handle *target_fh;
7705     struct file_handle *fh;
7706     int mid = 0;
7707     abi_long ret;
7708     char *name;
7709     unsigned int size, total_size;
7710 
7711     if (get_user_s32(size, handle)) {
7712         return -TARGET_EFAULT;
7713     }
7714 
7715     name = lock_user_string(pathname);
7716     if (!name) {
7717         return -TARGET_EFAULT;
7718     }
7719 
7720     total_size = sizeof(struct file_handle) + size;
7721     target_fh = lock_user(VERIFY_WRITE, handle, total_size, 0);
7722     if (!target_fh) {
7723         unlock_user(name, pathname, 0);
7724         return -TARGET_EFAULT;
7725     }
7726 
7727     fh = g_malloc0(total_size);
7728     fh->handle_bytes = size;
7729 
7730     ret = get_errno(name_to_handle_at(dirfd, path(name), fh, &mid, flags));
7731     unlock_user(name, pathname, 0);
7732 
7733     /* man name_to_handle_at(2):
7734      * Other than the use of the handle_bytes field, the caller should treat
7735      * the file_handle structure as an opaque data type
7736      */
7737 
7738     memcpy(target_fh, fh, total_size);
7739     target_fh->handle_bytes = tswap32(fh->handle_bytes);
7740     target_fh->handle_type = tswap32(fh->handle_type);
7741     g_free(fh);
7742     unlock_user(target_fh, handle, total_size);
7743 
7744     if (put_user_s32(mid, mount_id)) {
7745         return -TARGET_EFAULT;
7746     }
7747 
7748     return ret;
7749 
7750 }
7751 #endif
7752 
7753 #if defined(TARGET_NR_open_by_handle_at) && defined(CONFIG_OPEN_BY_HANDLE)
7754 static abi_long do_open_by_handle_at(abi_long mount_fd, abi_long handle,
7755                                      abi_long flags)
7756 {
7757     struct file_handle *target_fh;
7758     struct file_handle *fh;
7759     unsigned int size, total_size;
7760     abi_long ret;
7761 
7762     if (get_user_s32(size, handle)) {
7763         return -TARGET_EFAULT;
7764     }
7765 
7766     total_size = sizeof(struct file_handle) + size;
7767     target_fh = lock_user(VERIFY_READ, handle, total_size, 1);
7768     if (!target_fh) {
7769         return -TARGET_EFAULT;
7770     }
7771 
7772     fh = g_memdup(target_fh, total_size);
7773     fh->handle_bytes = size;
7774     fh->handle_type = tswap32(target_fh->handle_type);
7775 
7776     ret = get_errno(open_by_handle_at(mount_fd, fh,
7777                     target_to_host_bitmask(flags, fcntl_flags_tbl)));
7778 
7779     g_free(fh);
7780 
7781     unlock_user(target_fh, handle, total_size);
7782 
7783     return ret;
7784 }
7785 #endif
7786 
7787 #if defined(TARGET_NR_signalfd) || defined(TARGET_NR_signalfd4)
7788 
7789 static abi_long do_signalfd4(int fd, abi_long mask, int flags)
7790 {
7791     int host_flags;
7792     target_sigset_t *target_mask;
7793     sigset_t host_mask;
7794     abi_long ret;
7795 
7796     if (flags & ~(TARGET_O_NONBLOCK_MASK | TARGET_O_CLOEXEC)) {
7797         return -TARGET_EINVAL;
7798     }
7799     if (!lock_user_struct(VERIFY_READ, target_mask, mask, 1)) {
7800         return -TARGET_EFAULT;
7801     }
7802 
7803     target_to_host_sigset(&host_mask, target_mask);
7804 
7805     host_flags = target_to_host_bitmask(flags, fcntl_flags_tbl);
7806 
7807     ret = get_errno(signalfd(fd, &host_mask, host_flags));
7808     if (ret >= 0) {
7809         fd_trans_register(ret, &target_signalfd_trans);
7810     }
7811 
7812     unlock_user_struct(target_mask, mask, 0);
7813 
7814     return ret;
7815 }
7816 #endif
7817 
7818 /* Map host to target signal numbers for the wait family of syscalls.
7819    Assume all other status bits are the same.  */
7820 int host_to_target_waitstatus(int status)
7821 {
7822     if (WIFSIGNALED(status)) {
7823         return host_to_target_signal(WTERMSIG(status)) | (status & ~0x7f);
7824     }
7825     if (WIFSTOPPED(status)) {
7826         return (host_to_target_signal(WSTOPSIG(status)) << 8)
7827                | (status & 0xff);
7828     }
7829     return status;
7830 }
7831 
7832 static int open_self_cmdline(void *cpu_env, int fd)
7833 {
7834     CPUState *cpu = env_cpu((CPUArchState *)cpu_env);
7835     struct linux_binprm *bprm = ((TaskState *)cpu->opaque)->bprm;
7836     int i;
7837 
7838     for (i = 0; i < bprm->argc; i++) {
7839         size_t len = strlen(bprm->argv[i]) + 1;
7840 
7841         if (write(fd, bprm->argv[i], len) != len) {
7842             return -1;
7843         }
7844     }
7845 
7846     return 0;
7847 }
7848 
7849 static int open_self_maps(void *cpu_env, int fd)
7850 {
7851     CPUState *cpu = env_cpu((CPUArchState *)cpu_env);
7852     TaskState *ts = cpu->opaque;
7853     GSList *map_info = read_self_maps();
7854     GSList *s;
7855     int count;
7856 
7857     for (s = map_info; s; s = g_slist_next(s)) {
7858         MapInfo *e = (MapInfo *) s->data;
7859 
7860         if (h2g_valid(e->start)) {
7861             unsigned long min = e->start;
7862             unsigned long max = e->end;
7863             int flags = page_get_flags(h2g(min));
7864             const char *path;
7865 
7866             max = h2g_valid(max - 1) ?
7867                 max : (uintptr_t) g2h_untagged(GUEST_ADDR_MAX) + 1;
7868 
7869             if (page_check_range(h2g(min), max - min, flags) == -1) {
7870                 continue;
7871             }
7872 
7873             if (h2g(min) == ts->info->stack_limit) {
7874                 path = "[stack]";
7875             } else {
7876                 path = e->path;
7877             }
7878 
7879             count = dprintf(fd, TARGET_ABI_FMT_ptr "-" TARGET_ABI_FMT_ptr
7880                             " %c%c%c%c %08" PRIx64 " %s %"PRId64,
7881                             h2g(min), h2g(max - 1) + 1,
7882                             e->is_read ? 'r' : '-',
7883                             e->is_write ? 'w' : '-',
7884                             e->is_exec ? 'x' : '-',
7885                             e->is_priv ? 'p' : '-',
7886                             (uint64_t) e->offset, e->dev, e->inode);
7887             if (path) {
7888                 dprintf(fd, "%*s%s\n", 73 - count, "", path);
7889             } else {
7890                 dprintf(fd, "\n");
7891             }
7892         }
7893     }
7894 
7895     free_self_maps(map_info);
7896 
7897 #ifdef TARGET_VSYSCALL_PAGE
7898     /*
7899      * We only support execution from the vsyscall page.
7900      * This is as if CONFIG_LEGACY_VSYSCALL_XONLY=y from v5.3.
7901      */
7902     count = dprintf(fd, TARGET_FMT_lx "-" TARGET_FMT_lx
7903                     " --xp 00000000 00:00 0",
7904                     TARGET_VSYSCALL_PAGE, TARGET_VSYSCALL_PAGE + TARGET_PAGE_SIZE);
7905     dprintf(fd, "%*s%s\n", 73 - count, "",  "[vsyscall]");
7906 #endif
7907 
7908     return 0;
7909 }
7910 
7911 static int open_self_stat(void *cpu_env, int fd)
7912 {
7913     CPUState *cpu = env_cpu((CPUArchState *)cpu_env);
7914     TaskState *ts = cpu->opaque;
7915     g_autoptr(GString) buf = g_string_new(NULL);
7916     int i;
7917 
7918     for (i = 0; i < 44; i++) {
7919         if (i == 0) {
7920             /* pid */
7921             g_string_printf(buf, FMT_pid " ", getpid());
7922         } else if (i == 1) {
7923             /* app name */
7924             gchar *bin = g_strrstr(ts->bprm->argv[0], "/");
7925             bin = bin ? bin + 1 : ts->bprm->argv[0];
7926             g_string_printf(buf, "(%.15s) ", bin);
7927         } else if (i == 27) {
7928             /* stack bottom */
7929             g_string_printf(buf, TARGET_ABI_FMT_ld " ", ts->info->start_stack);
7930         } else {
7931             /* for the rest, there is MasterCard */
7932             g_string_printf(buf, "0%c", i == 43 ? '\n' : ' ');
7933         }
7934 
7935         if (write(fd, buf->str, buf->len) != buf->len) {
7936             return -1;
7937         }
7938     }
7939 
7940     return 0;
7941 }
7942 
7943 static int open_self_auxv(void *cpu_env, int fd)
7944 {
7945     CPUState *cpu = env_cpu((CPUArchState *)cpu_env);
7946     TaskState *ts = cpu->opaque;
7947     abi_ulong auxv = ts->info->saved_auxv;
7948     abi_ulong len = ts->info->auxv_len;
7949     char *ptr;
7950 
7951     /*
7952      * Auxiliary vector is stored in target process stack.
7953      * read in whole auxv vector and copy it to file
7954      */
7955     ptr = lock_user(VERIFY_READ, auxv, len, 0);
7956     if (ptr != NULL) {
7957         while (len > 0) {
7958             ssize_t r;
7959             r = write(fd, ptr, len);
7960             if (r <= 0) {
7961                 break;
7962             }
7963             len -= r;
7964             ptr += r;
7965         }
7966         lseek(fd, 0, SEEK_SET);
7967         unlock_user(ptr, auxv, len);
7968     }
7969 
7970     return 0;
7971 }
7972 
7973 static int is_proc_myself(const char *filename, const char *entry)
7974 {
7975     if (!strncmp(filename, "/proc/", strlen("/proc/"))) {
7976         filename += strlen("/proc/");
7977         if (!strncmp(filename, "self/", strlen("self/"))) {
7978             filename += strlen("self/");
7979         } else if (*filename >= '1' && *filename <= '9') {
7980             char myself[80];
7981             snprintf(myself, sizeof(myself), "%d/", getpid());
7982             if (!strncmp(filename, myself, strlen(myself))) {
7983                 filename += strlen(myself);
7984             } else {
7985                 return 0;
7986             }
7987         } else {
7988             return 0;
7989         }
7990         if (!strcmp(filename, entry)) {
7991             return 1;
7992         }
7993     }
7994     return 0;
7995 }
7996 
7997 #if defined(HOST_WORDS_BIGENDIAN) != defined(TARGET_WORDS_BIGENDIAN) || \
7998     defined(TARGET_SPARC) || defined(TARGET_M68K) || defined(TARGET_HPPA)
7999 static int is_proc(const char *filename, const char *entry)
8000 {
8001     return strcmp(filename, entry) == 0;
8002 }
8003 #endif
8004 
8005 #if defined(HOST_WORDS_BIGENDIAN) != defined(TARGET_WORDS_BIGENDIAN)
8006 static int open_net_route(void *cpu_env, int fd)
8007 {
8008     FILE *fp;
8009     char *line = NULL;
8010     size_t len = 0;
8011     ssize_t read;
8012 
8013     fp = fopen("/proc/net/route", "r");
8014     if (fp == NULL) {
8015         return -1;
8016     }
8017 
8018     /* read header */
8019 
8020     read = getline(&line, &len, fp);
8021     dprintf(fd, "%s", line);
8022 
8023     /* read routes */
8024 
8025     while ((read = getline(&line, &len, fp)) != -1) {
8026         char iface[16];
8027         uint32_t dest, gw, mask;
8028         unsigned int flags, refcnt, use, metric, mtu, window, irtt;
8029         int fields;
8030 
8031         fields = sscanf(line,
8032                         "%s\t%08x\t%08x\t%04x\t%d\t%d\t%d\t%08x\t%d\t%u\t%u\n",
8033                         iface, &dest, &gw, &flags, &refcnt, &use, &metric,
8034                         &mask, &mtu, &window, &irtt);
8035         if (fields != 11) {
8036             continue;
8037         }
8038         dprintf(fd, "%s\t%08x\t%08x\t%04x\t%d\t%d\t%d\t%08x\t%d\t%u\t%u\n",
8039                 iface, tswap32(dest), tswap32(gw), flags, refcnt, use,
8040                 metric, tswap32(mask), mtu, window, irtt);
8041     }
8042 
8043     free(line);
8044     fclose(fp);
8045 
8046     return 0;
8047 }
8048 #endif
8049 
8050 #if defined(TARGET_SPARC)
8051 static int open_cpuinfo(void *cpu_env, int fd)
8052 {
8053     dprintf(fd, "type\t\t: sun4u\n");
8054     return 0;
8055 }
8056 #endif
8057 
8058 #if defined(TARGET_HPPA)
8059 static int open_cpuinfo(void *cpu_env, int fd)
8060 {
8061     dprintf(fd, "cpu family\t: PA-RISC 1.1e\n");
8062     dprintf(fd, "cpu\t\t: PA7300LC (PCX-L2)\n");
8063     dprintf(fd, "capabilities\t: os32\n");
8064     dprintf(fd, "model\t\t: 9000/778/B160L\n");
8065     dprintf(fd, "model name\t: Merlin L2 160 QEMU (9000/778/B160L)\n");
8066     return 0;
8067 }
8068 #endif
8069 
8070 #if defined(TARGET_M68K)
8071 static int open_hardware(void *cpu_env, int fd)
8072 {
8073     dprintf(fd, "Model:\t\tqemu-m68k\n");
8074     return 0;
8075 }
8076 #endif
8077 
8078 static int do_openat(void *cpu_env, int dirfd, const char *pathname, int flags, mode_t mode)
8079 {
8080     struct fake_open {
8081         const char *filename;
8082         int (*fill)(void *cpu_env, int fd);
8083         int (*cmp)(const char *s1, const char *s2);
8084     };
8085     const struct fake_open *fake_open;
8086     static const struct fake_open fakes[] = {
8087         { "maps", open_self_maps, is_proc_myself },
8088         { "stat", open_self_stat, is_proc_myself },
8089         { "auxv", open_self_auxv, is_proc_myself },
8090         { "cmdline", open_self_cmdline, is_proc_myself },
8091 #if defined(HOST_WORDS_BIGENDIAN) != defined(TARGET_WORDS_BIGENDIAN)
8092         { "/proc/net/route", open_net_route, is_proc },
8093 #endif
8094 #if defined(TARGET_SPARC) || defined(TARGET_HPPA)
8095         { "/proc/cpuinfo", open_cpuinfo, is_proc },
8096 #endif
8097 #if defined(TARGET_M68K)
8098         { "/proc/hardware", open_hardware, is_proc },
8099 #endif
8100         { NULL, NULL, NULL }
8101     };
8102 
8103     if (is_proc_myself(pathname, "exe")) {
8104         int execfd = qemu_getauxval(AT_EXECFD);
8105         return execfd ? execfd : safe_openat(dirfd, exec_path, flags, mode);
8106     }
8107 
8108     for (fake_open = fakes; fake_open->filename; fake_open++) {
8109         if (fake_open->cmp(pathname, fake_open->filename)) {
8110             break;
8111         }
8112     }
8113 
8114     if (fake_open->filename) {
8115         const char *tmpdir;
8116         char filename[PATH_MAX];
8117         int fd, r;
8118 
8119         /* create temporary file to map stat to */
8120         tmpdir = getenv("TMPDIR");
8121         if (!tmpdir)
8122             tmpdir = "/tmp";
8123         snprintf(filename, sizeof(filename), "%s/qemu-open.XXXXXX", tmpdir);
8124         fd = mkstemp(filename);
8125         if (fd < 0) {
8126             return fd;
8127         }
8128         unlink(filename);
8129 
8130         if ((r = fake_open->fill(cpu_env, fd))) {
8131             int e = errno;
8132             close(fd);
8133             errno = e;
8134             return r;
8135         }
8136         lseek(fd, 0, SEEK_SET);
8137 
8138         return fd;
8139     }
8140 
8141     return safe_openat(dirfd, path(pathname), flags, mode);
8142 }
8143 
8144 #define TIMER_MAGIC 0x0caf0000
8145 #define TIMER_MAGIC_MASK 0xffff0000
8146 
8147 /* Convert QEMU provided timer ID back to internal 16bit index format */
8148 static target_timer_t get_timer_id(abi_long arg)
8149 {
8150     target_timer_t timerid = arg;
8151 
8152     if ((timerid & TIMER_MAGIC_MASK) != TIMER_MAGIC) {
8153         return -TARGET_EINVAL;
8154     }
8155 
8156     timerid &= 0xffff;
8157 
8158     if (timerid >= ARRAY_SIZE(g_posix_timers)) {
8159         return -TARGET_EINVAL;
8160     }
8161 
8162     return timerid;
8163 }
8164 
8165 static int target_to_host_cpu_mask(unsigned long *host_mask,
8166                                    size_t host_size,
8167                                    abi_ulong target_addr,
8168                                    size_t target_size)
8169 {
8170     unsigned target_bits = sizeof(abi_ulong) * 8;
8171     unsigned host_bits = sizeof(*host_mask) * 8;
8172     abi_ulong *target_mask;
8173     unsigned i, j;
8174 
8175     assert(host_size >= target_size);
8176 
8177     target_mask = lock_user(VERIFY_READ, target_addr, target_size, 1);
8178     if (!target_mask) {
8179         return -TARGET_EFAULT;
8180     }
8181     memset(host_mask, 0, host_size);
8182 
8183     for (i = 0 ; i < target_size / sizeof(abi_ulong); i++) {
8184         unsigned bit = i * target_bits;
8185         abi_ulong val;
8186 
8187         __get_user(val, &target_mask[i]);
8188         for (j = 0; j < target_bits; j++, bit++) {
8189             if (val & (1UL << j)) {
8190                 host_mask[bit / host_bits] |= 1UL << (bit % host_bits);
8191             }
8192         }
8193     }
8194 
8195     unlock_user(target_mask, target_addr, 0);
8196     return 0;
8197 }
8198 
8199 static int host_to_target_cpu_mask(const unsigned long *host_mask,
8200                                    size_t host_size,
8201                                    abi_ulong target_addr,
8202                                    size_t target_size)
8203 {
8204     unsigned target_bits = sizeof(abi_ulong) * 8;
8205     unsigned host_bits = sizeof(*host_mask) * 8;
8206     abi_ulong *target_mask;
8207     unsigned i, j;
8208 
8209     assert(host_size >= target_size);
8210 
8211     target_mask = lock_user(VERIFY_WRITE, target_addr, target_size, 0);
8212     if (!target_mask) {
8213         return -TARGET_EFAULT;
8214     }
8215 
8216     for (i = 0 ; i < target_size / sizeof(abi_ulong); i++) {
8217         unsigned bit = i * target_bits;
8218         abi_ulong val = 0;
8219 
8220         for (j = 0; j < target_bits; j++, bit++) {
8221             if (host_mask[bit / host_bits] & (1UL << (bit % host_bits))) {
8222                 val |= 1UL << j;
8223             }
8224         }
8225         __put_user(val, &target_mask[i]);
8226     }
8227 
8228     unlock_user(target_mask, target_addr, target_size);
8229     return 0;
8230 }
8231 
8232 /* This is an internal helper for do_syscall so that it is easier
8233  * to have a single return point, so that actions, such as logging
8234  * of syscall results, can be performed.
8235  * All errnos that do_syscall() returns must be -TARGET_<errcode>.
8236  */
8237 static abi_long do_syscall1(void *cpu_env, int num, abi_long arg1,
8238                             abi_long arg2, abi_long arg3, abi_long arg4,
8239                             abi_long arg5, abi_long arg6, abi_long arg7,
8240                             abi_long arg8)
8241 {
8242     CPUState *cpu = env_cpu(cpu_env);
8243     abi_long ret;
8244 #if defined(TARGET_NR_stat) || defined(TARGET_NR_stat64) \
8245     || defined(TARGET_NR_lstat) || defined(TARGET_NR_lstat64) \
8246     || defined(TARGET_NR_fstat) || defined(TARGET_NR_fstat64) \
8247     || defined(TARGET_NR_statx)
8248     struct stat st;
8249 #endif
8250 #if defined(TARGET_NR_statfs) || defined(TARGET_NR_statfs64) \
8251     || defined(TARGET_NR_fstatfs)
8252     struct statfs stfs;
8253 #endif
8254     void *p;
8255 
8256     switch(num) {
8257     case TARGET_NR_exit:
8258         /* In old applications this may be used to implement _exit(2).
8259            However in threaded applications it is used for thread termination,
8260            and _exit_group is used for application termination.
8261            Do thread termination if we have more then one thread.  */
8262 
8263         if (block_signals()) {
8264             return -TARGET_ERESTARTSYS;
8265         }
8266 
8267         pthread_mutex_lock(&clone_lock);
8268 
8269         if (CPU_NEXT(first_cpu)) {
8270             TaskState *ts = cpu->opaque;
8271 
8272             object_property_set_bool(OBJECT(cpu), "realized", false, NULL);
8273             object_unref(OBJECT(cpu));
8274             /*
8275              * At this point the CPU should be unrealized and removed
8276              * from cpu lists. We can clean-up the rest of the thread
8277              * data without the lock held.
8278              */
8279 
8280             pthread_mutex_unlock(&clone_lock);
8281 
8282             if (ts->child_tidptr) {
8283                 put_user_u32(0, ts->child_tidptr);
8284                 do_sys_futex(g2h(cpu, ts->child_tidptr),
8285                              FUTEX_WAKE, INT_MAX, NULL, NULL, 0);
8286             }
8287             thread_cpu = NULL;
8288             g_free(ts);
8289             rcu_unregister_thread();
8290             pthread_exit(NULL);
8291         }
8292 
8293         pthread_mutex_unlock(&clone_lock);
8294         preexit_cleanup(cpu_env, arg1);
8295         _exit(arg1);
8296         return 0; /* avoid warning */
8297     case TARGET_NR_read:
8298         if (arg2 == 0 && arg3 == 0) {
8299             return get_errno(safe_read(arg1, 0, 0));
8300         } else {
8301             if (!(p = lock_user(VERIFY_WRITE, arg2, arg3, 0)))
8302                 return -TARGET_EFAULT;
8303             ret = get_errno(safe_read(arg1, p, arg3));
8304             if (ret >= 0 &&
8305                 fd_trans_host_to_target_data(arg1)) {
8306                 ret = fd_trans_host_to_target_data(arg1)(p, ret);
8307             }
8308             unlock_user(p, arg2, ret);
8309         }
8310         return ret;
8311     case TARGET_NR_write:
8312         if (arg2 == 0 && arg3 == 0) {
8313             return get_errno(safe_write(arg1, 0, 0));
8314         }
8315         if (!(p = lock_user(VERIFY_READ, arg2, arg3, 1)))
8316             return -TARGET_EFAULT;
8317         if (fd_trans_target_to_host_data(arg1)) {
8318             void *copy = g_malloc(arg3);
8319             memcpy(copy, p, arg3);
8320             ret = fd_trans_target_to_host_data(arg1)(copy, arg3);
8321             if (ret >= 0) {
8322                 ret = get_errno(safe_write(arg1, copy, ret));
8323             }
8324             g_free(copy);
8325         } else {
8326             ret = get_errno(safe_write(arg1, p, arg3));
8327         }
8328         unlock_user(p, arg2, 0);
8329         return ret;
8330 
8331 #ifdef TARGET_NR_open
8332     case TARGET_NR_open:
8333         if (!(p = lock_user_string(arg1)))
8334             return -TARGET_EFAULT;
8335         ret = get_errno(do_openat(cpu_env, AT_FDCWD, p,
8336                                   target_to_host_bitmask(arg2, fcntl_flags_tbl),
8337                                   arg3));
8338         fd_trans_unregister(ret);
8339         unlock_user(p, arg1, 0);
8340         return ret;
8341 #endif
8342     case TARGET_NR_openat:
8343         if (!(p = lock_user_string(arg2)))
8344             return -TARGET_EFAULT;
8345         ret = get_errno(do_openat(cpu_env, arg1, p,
8346                                   target_to_host_bitmask(arg3, fcntl_flags_tbl),
8347                                   arg4));
8348         fd_trans_unregister(ret);
8349         unlock_user(p, arg2, 0);
8350         return ret;
8351 #if defined(TARGET_NR_name_to_handle_at) && defined(CONFIG_OPEN_BY_HANDLE)
8352     case TARGET_NR_name_to_handle_at:
8353         ret = do_name_to_handle_at(arg1, arg2, arg3, arg4, arg5);
8354         return ret;
8355 #endif
8356 #if defined(TARGET_NR_open_by_handle_at) && defined(CONFIG_OPEN_BY_HANDLE)
8357     case TARGET_NR_open_by_handle_at:
8358         ret = do_open_by_handle_at(arg1, arg2, arg3);
8359         fd_trans_unregister(ret);
8360         return ret;
8361 #endif
8362     case TARGET_NR_close:
8363         fd_trans_unregister(arg1);
8364         return get_errno(close(arg1));
8365 
8366     case TARGET_NR_brk:
8367         return do_brk(arg1);
8368 #ifdef TARGET_NR_fork
8369     case TARGET_NR_fork:
8370         return get_errno(do_fork(cpu_env, TARGET_SIGCHLD, 0, 0, 0, 0));
8371 #endif
8372 #ifdef TARGET_NR_waitpid
8373     case TARGET_NR_waitpid:
8374         {
8375             int status;
8376             ret = get_errno(safe_wait4(arg1, &status, arg3, 0));
8377             if (!is_error(ret) && arg2 && ret
8378                 && put_user_s32(host_to_target_waitstatus(status), arg2))
8379                 return -TARGET_EFAULT;
8380         }
8381         return ret;
8382 #endif
8383 #ifdef TARGET_NR_waitid
8384     case TARGET_NR_waitid:
8385         {
8386             siginfo_t info;
8387             info.si_pid = 0;
8388             ret = get_errno(safe_waitid(arg1, arg2, &info, arg4, NULL));
8389             if (!is_error(ret) && arg3 && info.si_pid != 0) {
8390                 if (!(p = lock_user(VERIFY_WRITE, arg3, sizeof(target_siginfo_t), 0)))
8391                     return -TARGET_EFAULT;
8392                 host_to_target_siginfo(p, &info);
8393                 unlock_user(p, arg3, sizeof(target_siginfo_t));
8394             }
8395         }
8396         return ret;
8397 #endif
8398 #ifdef TARGET_NR_creat /* not on alpha */
8399     case TARGET_NR_creat:
8400         if (!(p = lock_user_string(arg1)))
8401             return -TARGET_EFAULT;
8402         ret = get_errno(creat(p, arg2));
8403         fd_trans_unregister(ret);
8404         unlock_user(p, arg1, 0);
8405         return ret;
8406 #endif
8407 #ifdef TARGET_NR_link
8408     case TARGET_NR_link:
8409         {
8410             void * p2;
8411             p = lock_user_string(arg1);
8412             p2 = lock_user_string(arg2);
8413             if (!p || !p2)
8414                 ret = -TARGET_EFAULT;
8415             else
8416                 ret = get_errno(link(p, p2));
8417             unlock_user(p2, arg2, 0);
8418             unlock_user(p, arg1, 0);
8419         }
8420         return ret;
8421 #endif
8422 #if defined(TARGET_NR_linkat)
8423     case TARGET_NR_linkat:
8424         {
8425             void * p2 = NULL;
8426             if (!arg2 || !arg4)
8427                 return -TARGET_EFAULT;
8428             p  = lock_user_string(arg2);
8429             p2 = lock_user_string(arg4);
8430             if (!p || !p2)
8431                 ret = -TARGET_EFAULT;
8432             else
8433                 ret = get_errno(linkat(arg1, p, arg3, p2, arg5));
8434             unlock_user(p, arg2, 0);
8435             unlock_user(p2, arg4, 0);
8436         }
8437         return ret;
8438 #endif
8439 #ifdef TARGET_NR_unlink
8440     case TARGET_NR_unlink:
8441         if (!(p = lock_user_string(arg1)))
8442             return -TARGET_EFAULT;
8443         ret = get_errno(unlink(p));
8444         unlock_user(p, arg1, 0);
8445         return ret;
8446 #endif
8447 #if defined(TARGET_NR_unlinkat)
8448     case TARGET_NR_unlinkat:
8449         if (!(p = lock_user_string(arg2)))
8450             return -TARGET_EFAULT;
8451         ret = get_errno(unlinkat(arg1, p, arg3));
8452         unlock_user(p, arg2, 0);
8453         return ret;
8454 #endif
8455     case TARGET_NR_execve:
8456         {
8457             char **argp, **envp;
8458             int argc, envc;
8459             abi_ulong gp;
8460             abi_ulong guest_argp;
8461             abi_ulong guest_envp;
8462             abi_ulong addr;
8463             char **q;
8464             int total_size = 0;
8465 
8466             argc = 0;
8467             guest_argp = arg2;
8468             for (gp = guest_argp; gp; gp += sizeof(abi_ulong)) {
8469                 if (get_user_ual(addr, gp))
8470                     return -TARGET_EFAULT;
8471                 if (!addr)
8472                     break;
8473                 argc++;
8474             }
8475             envc = 0;
8476             guest_envp = arg3;
8477             for (gp = guest_envp; gp; gp += sizeof(abi_ulong)) {
8478                 if (get_user_ual(addr, gp))
8479                     return -TARGET_EFAULT;
8480                 if (!addr)
8481                     break;
8482                 envc++;
8483             }
8484 
8485             argp = g_new0(char *, argc + 1);
8486             envp = g_new0(char *, envc + 1);
8487 
8488             for (gp = guest_argp, q = argp; gp;
8489                   gp += sizeof(abi_ulong), q++) {
8490                 if (get_user_ual(addr, gp))
8491                     goto execve_efault;
8492                 if (!addr)
8493                     break;
8494                 if (!(*q = lock_user_string(addr)))
8495                     goto execve_efault;
8496                 total_size += strlen(*q) + 1;
8497             }
8498             *q = NULL;
8499 
8500             for (gp = guest_envp, q = envp; gp;
8501                   gp += sizeof(abi_ulong), q++) {
8502                 if (get_user_ual(addr, gp))
8503                     goto execve_efault;
8504                 if (!addr)
8505                     break;
8506                 if (!(*q = lock_user_string(addr)))
8507                     goto execve_efault;
8508                 total_size += strlen(*q) + 1;
8509             }
8510             *q = NULL;
8511 
8512             if (!(p = lock_user_string(arg1)))
8513                 goto execve_efault;
8514             /* Although execve() is not an interruptible syscall it is
8515              * a special case where we must use the safe_syscall wrapper:
8516              * if we allow a signal to happen before we make the host
8517              * syscall then we will 'lose' it, because at the point of
8518              * execve the process leaves QEMU's control. So we use the
8519              * safe syscall wrapper to ensure that we either take the
8520              * signal as a guest signal, or else it does not happen
8521              * before the execve completes and makes it the other
8522              * program's problem.
8523              */
8524             ret = get_errno(safe_execve(p, argp, envp));
8525             unlock_user(p, arg1, 0);
8526 
8527             goto execve_end;
8528 
8529         execve_efault:
8530             ret = -TARGET_EFAULT;
8531 
8532         execve_end:
8533             for (gp = guest_argp, q = argp; *q;
8534                   gp += sizeof(abi_ulong), q++) {
8535                 if (get_user_ual(addr, gp)
8536                     || !addr)
8537                     break;
8538                 unlock_user(*q, addr, 0);
8539             }
8540             for (gp = guest_envp, q = envp; *q;
8541                   gp += sizeof(abi_ulong), q++) {
8542                 if (get_user_ual(addr, gp)
8543                     || !addr)
8544                     break;
8545                 unlock_user(*q, addr, 0);
8546             }
8547 
8548             g_free(argp);
8549             g_free(envp);
8550         }
8551         return ret;
8552     case TARGET_NR_chdir:
8553         if (!(p = lock_user_string(arg1)))
8554             return -TARGET_EFAULT;
8555         ret = get_errno(chdir(p));
8556         unlock_user(p, arg1, 0);
8557         return ret;
8558 #ifdef TARGET_NR_time
8559     case TARGET_NR_time:
8560         {
8561             time_t host_time;
8562             ret = get_errno(time(&host_time));
8563             if (!is_error(ret)
8564                 && arg1
8565                 && put_user_sal(host_time, arg1))
8566                 return -TARGET_EFAULT;
8567         }
8568         return ret;
8569 #endif
8570 #ifdef TARGET_NR_mknod
8571     case TARGET_NR_mknod:
8572         if (!(p = lock_user_string(arg1)))
8573             return -TARGET_EFAULT;
8574         ret = get_errno(mknod(p, arg2, arg3));
8575         unlock_user(p, arg1, 0);
8576         return ret;
8577 #endif
8578 #if defined(TARGET_NR_mknodat)
8579     case TARGET_NR_mknodat:
8580         if (!(p = lock_user_string(arg2)))
8581             return -TARGET_EFAULT;
8582         ret = get_errno(mknodat(arg1, p, arg3, arg4));
8583         unlock_user(p, arg2, 0);
8584         return ret;
8585 #endif
8586 #ifdef TARGET_NR_chmod
8587     case TARGET_NR_chmod:
8588         if (!(p = lock_user_string(arg1)))
8589             return -TARGET_EFAULT;
8590         ret = get_errno(chmod(p, arg2));
8591         unlock_user(p, arg1, 0);
8592         return ret;
8593 #endif
8594 #ifdef TARGET_NR_lseek
8595     case TARGET_NR_lseek:
8596         return get_errno(lseek(arg1, arg2, arg3));
8597 #endif
8598 #if defined(TARGET_NR_getxpid) && defined(TARGET_ALPHA)
8599     /* Alpha specific */
8600     case TARGET_NR_getxpid:
8601         ((CPUAlphaState *)cpu_env)->ir[IR_A4] = getppid();
8602         return get_errno(getpid());
8603 #endif
8604 #ifdef TARGET_NR_getpid
8605     case TARGET_NR_getpid:
8606         return get_errno(getpid());
8607 #endif
8608     case TARGET_NR_mount:
8609         {
8610             /* need to look at the data field */
8611             void *p2, *p3;
8612 
8613             if (arg1) {
8614                 p = lock_user_string(arg1);
8615                 if (!p) {
8616                     return -TARGET_EFAULT;
8617                 }
8618             } else {
8619                 p = NULL;
8620             }
8621 
8622             p2 = lock_user_string(arg2);
8623             if (!p2) {
8624                 if (arg1) {
8625                     unlock_user(p, arg1, 0);
8626                 }
8627                 return -TARGET_EFAULT;
8628             }
8629 
8630             if (arg3) {
8631                 p3 = lock_user_string(arg3);
8632                 if (!p3) {
8633                     if (arg1) {
8634                         unlock_user(p, arg1, 0);
8635                     }
8636                     unlock_user(p2, arg2, 0);
8637                     return -TARGET_EFAULT;
8638                 }
8639             } else {
8640                 p3 = NULL;
8641             }
8642 
8643             /* FIXME - arg5 should be locked, but it isn't clear how to
8644              * do that since it's not guaranteed to be a NULL-terminated
8645              * string.
8646              */
8647             if (!arg5) {
8648                 ret = mount(p, p2, p3, (unsigned long)arg4, NULL);
8649             } else {
8650                 ret = mount(p, p2, p3, (unsigned long)arg4, g2h(cpu, arg5));
8651             }
8652             ret = get_errno(ret);
8653 
8654             if (arg1) {
8655                 unlock_user(p, arg1, 0);
8656             }
8657             unlock_user(p2, arg2, 0);
8658             if (arg3) {
8659                 unlock_user(p3, arg3, 0);
8660             }
8661         }
8662         return ret;
8663 #if defined(TARGET_NR_umount) || defined(TARGET_NR_oldumount)
8664 #if defined(TARGET_NR_umount)
8665     case TARGET_NR_umount:
8666 #endif
8667 #if defined(TARGET_NR_oldumount)
8668     case TARGET_NR_oldumount:
8669 #endif
8670         if (!(p = lock_user_string(arg1)))
8671             return -TARGET_EFAULT;
8672         ret = get_errno(umount(p));
8673         unlock_user(p, arg1, 0);
8674         return ret;
8675 #endif
8676 #ifdef TARGET_NR_stime /* not on alpha */
8677     case TARGET_NR_stime:
8678         {
8679             struct timespec ts;
8680             ts.tv_nsec = 0;
8681             if (get_user_sal(ts.tv_sec, arg1)) {
8682                 return -TARGET_EFAULT;
8683             }
8684             return get_errno(clock_settime(CLOCK_REALTIME, &ts));
8685         }
8686 #endif
8687 #ifdef TARGET_NR_alarm /* not on alpha */
8688     case TARGET_NR_alarm:
8689         return alarm(arg1);
8690 #endif
8691 #ifdef TARGET_NR_pause /* not on alpha */
8692     case TARGET_NR_pause:
8693         if (!block_signals()) {
8694             sigsuspend(&((TaskState *)cpu->opaque)->signal_mask);
8695         }
8696         return -TARGET_EINTR;
8697 #endif
8698 #ifdef TARGET_NR_utime
8699     case TARGET_NR_utime:
8700         {
8701             struct utimbuf tbuf, *host_tbuf;
8702             struct target_utimbuf *target_tbuf;
8703             if (arg2) {
8704                 if (!lock_user_struct(VERIFY_READ, target_tbuf, arg2, 1))
8705                     return -TARGET_EFAULT;
8706                 tbuf.actime = tswapal(target_tbuf->actime);
8707                 tbuf.modtime = tswapal(target_tbuf->modtime);
8708                 unlock_user_struct(target_tbuf, arg2, 0);
8709                 host_tbuf = &tbuf;
8710             } else {
8711                 host_tbuf = NULL;
8712             }
8713             if (!(p = lock_user_string(arg1)))
8714                 return -TARGET_EFAULT;
8715             ret = get_errno(utime(p, host_tbuf));
8716             unlock_user(p, arg1, 0);
8717         }
8718         return ret;
8719 #endif
8720 #ifdef TARGET_NR_utimes
8721     case TARGET_NR_utimes:
8722         {
8723             struct timeval *tvp, tv[2];
8724             if (arg2) {
8725                 if (copy_from_user_timeval(&tv[0], arg2)
8726                     || copy_from_user_timeval(&tv[1],
8727                                               arg2 + sizeof(struct target_timeval)))
8728                     return -TARGET_EFAULT;
8729                 tvp = tv;
8730             } else {
8731                 tvp = NULL;
8732             }
8733             if (!(p = lock_user_string(arg1)))
8734                 return -TARGET_EFAULT;
8735             ret = get_errno(utimes(p, tvp));
8736             unlock_user(p, arg1, 0);
8737         }
8738         return ret;
8739 #endif
8740 #if defined(TARGET_NR_futimesat)
8741     case TARGET_NR_futimesat:
8742         {
8743             struct timeval *tvp, tv[2];
8744             if (arg3) {
8745                 if (copy_from_user_timeval(&tv[0], arg3)
8746                     || copy_from_user_timeval(&tv[1],
8747                                               arg3 + sizeof(struct target_timeval)))
8748                     return -TARGET_EFAULT;
8749                 tvp = tv;
8750             } else {
8751                 tvp = NULL;
8752             }
8753             if (!(p = lock_user_string(arg2))) {
8754                 return -TARGET_EFAULT;
8755             }
8756             ret = get_errno(futimesat(arg1, path(p), tvp));
8757             unlock_user(p, arg2, 0);
8758         }
8759         return ret;
8760 #endif
8761 #ifdef TARGET_NR_access
8762     case TARGET_NR_access:
8763         if (!(p = lock_user_string(arg1))) {
8764             return -TARGET_EFAULT;
8765         }
8766         ret = get_errno(access(path(p), arg2));
8767         unlock_user(p, arg1, 0);
8768         return ret;
8769 #endif
8770 #if defined(TARGET_NR_faccessat) && defined(__NR_faccessat)
8771     case TARGET_NR_faccessat:
8772         if (!(p = lock_user_string(arg2))) {
8773             return -TARGET_EFAULT;
8774         }
8775         ret = get_errno(faccessat(arg1, p, arg3, 0));
8776         unlock_user(p, arg2, 0);
8777         return ret;
8778 #endif
8779 #ifdef TARGET_NR_nice /* not on alpha */
8780     case TARGET_NR_nice:
8781         return get_errno(nice(arg1));
8782 #endif
8783     case TARGET_NR_sync:
8784         sync();
8785         return 0;
8786 #if defined(TARGET_NR_syncfs) && defined(CONFIG_SYNCFS)
8787     case TARGET_NR_syncfs:
8788         return get_errno(syncfs(arg1));
8789 #endif
8790     case TARGET_NR_kill:
8791         return get_errno(safe_kill(arg1, target_to_host_signal(arg2)));
8792 #ifdef TARGET_NR_rename
8793     case TARGET_NR_rename:
8794         {
8795             void *p2;
8796             p = lock_user_string(arg1);
8797             p2 = lock_user_string(arg2);
8798             if (!p || !p2)
8799                 ret = -TARGET_EFAULT;
8800             else
8801                 ret = get_errno(rename(p, p2));
8802             unlock_user(p2, arg2, 0);
8803             unlock_user(p, arg1, 0);
8804         }
8805         return ret;
8806 #endif
8807 #if defined(TARGET_NR_renameat)
8808     case TARGET_NR_renameat:
8809         {
8810             void *p2;
8811             p  = lock_user_string(arg2);
8812             p2 = lock_user_string(arg4);
8813             if (!p || !p2)
8814                 ret = -TARGET_EFAULT;
8815             else
8816                 ret = get_errno(renameat(arg1, p, arg3, p2));
8817             unlock_user(p2, arg4, 0);
8818             unlock_user(p, arg2, 0);
8819         }
8820         return ret;
8821 #endif
8822 #if defined(TARGET_NR_renameat2)
8823     case TARGET_NR_renameat2:
8824         {
8825             void *p2;
8826             p  = lock_user_string(arg2);
8827             p2 = lock_user_string(arg4);
8828             if (!p || !p2) {
8829                 ret = -TARGET_EFAULT;
8830             } else {
8831                 ret = get_errno(sys_renameat2(arg1, p, arg3, p2, arg5));
8832             }
8833             unlock_user(p2, arg4, 0);
8834             unlock_user(p, arg2, 0);
8835         }
8836         return ret;
8837 #endif
8838 #ifdef TARGET_NR_mkdir
8839     case TARGET_NR_mkdir:
8840         if (!(p = lock_user_string(arg1)))
8841             return -TARGET_EFAULT;
8842         ret = get_errno(mkdir(p, arg2));
8843         unlock_user(p, arg1, 0);
8844         return ret;
8845 #endif
8846 #if defined(TARGET_NR_mkdirat)
8847     case TARGET_NR_mkdirat:
8848         if (!(p = lock_user_string(arg2)))
8849             return -TARGET_EFAULT;
8850         ret = get_errno(mkdirat(arg1, p, arg3));
8851         unlock_user(p, arg2, 0);
8852         return ret;
8853 #endif
8854 #ifdef TARGET_NR_rmdir
8855     case TARGET_NR_rmdir:
8856         if (!(p = lock_user_string(arg1)))
8857             return -TARGET_EFAULT;
8858         ret = get_errno(rmdir(p));
8859         unlock_user(p, arg1, 0);
8860         return ret;
8861 #endif
8862     case TARGET_NR_dup:
8863         ret = get_errno(dup(arg1));
8864         if (ret >= 0) {
8865             fd_trans_dup(arg1, ret);
8866         }
8867         return ret;
8868 #ifdef TARGET_NR_pipe
8869     case TARGET_NR_pipe:
8870         return do_pipe(cpu_env, arg1, 0, 0);
8871 #endif
8872 #ifdef TARGET_NR_pipe2
8873     case TARGET_NR_pipe2:
8874         return do_pipe(cpu_env, arg1,
8875                        target_to_host_bitmask(arg2, fcntl_flags_tbl), 1);
8876 #endif
8877     case TARGET_NR_times:
8878         {
8879             struct target_tms *tmsp;
8880             struct tms tms;
8881             ret = get_errno(times(&tms));
8882             if (arg1) {
8883                 tmsp = lock_user(VERIFY_WRITE, arg1, sizeof(struct target_tms), 0);
8884                 if (!tmsp)
8885                     return -TARGET_EFAULT;
8886                 tmsp->tms_utime = tswapal(host_to_target_clock_t(tms.tms_utime));
8887                 tmsp->tms_stime = tswapal(host_to_target_clock_t(tms.tms_stime));
8888                 tmsp->tms_cutime = tswapal(host_to_target_clock_t(tms.tms_cutime));
8889                 tmsp->tms_cstime = tswapal(host_to_target_clock_t(tms.tms_cstime));
8890             }
8891             if (!is_error(ret))
8892                 ret = host_to_target_clock_t(ret);
8893         }
8894         return ret;
8895     case TARGET_NR_acct:
8896         if (arg1 == 0) {
8897             ret = get_errno(acct(NULL));
8898         } else {
8899             if (!(p = lock_user_string(arg1))) {
8900                 return -TARGET_EFAULT;
8901             }
8902             ret = get_errno(acct(path(p)));
8903             unlock_user(p, arg1, 0);
8904         }
8905         return ret;
8906 #ifdef TARGET_NR_umount2
8907     case TARGET_NR_umount2:
8908         if (!(p = lock_user_string(arg1)))
8909             return -TARGET_EFAULT;
8910         ret = get_errno(umount2(p, arg2));
8911         unlock_user(p, arg1, 0);
8912         return ret;
8913 #endif
8914     case TARGET_NR_ioctl:
8915         return do_ioctl(arg1, arg2, arg3);
8916 #ifdef TARGET_NR_fcntl
8917     case TARGET_NR_fcntl:
8918         return do_fcntl(arg1, arg2, arg3);
8919 #endif
8920     case TARGET_NR_setpgid:
8921         return get_errno(setpgid(arg1, arg2));
8922     case TARGET_NR_umask:
8923         return get_errno(umask(arg1));
8924     case TARGET_NR_chroot:
8925         if (!(p = lock_user_string(arg1)))
8926             return -TARGET_EFAULT;
8927         ret = get_errno(chroot(p));
8928         unlock_user(p, arg1, 0);
8929         return ret;
8930 #ifdef TARGET_NR_dup2
8931     case TARGET_NR_dup2:
8932         ret = get_errno(dup2(arg1, arg2));
8933         if (ret >= 0) {
8934             fd_trans_dup(arg1, arg2);
8935         }
8936         return ret;
8937 #endif
8938 #if defined(CONFIG_DUP3) && defined(TARGET_NR_dup3)
8939     case TARGET_NR_dup3:
8940     {
8941         int host_flags;
8942 
8943         if ((arg3 & ~TARGET_O_CLOEXEC) != 0) {
8944             return -EINVAL;
8945         }
8946         host_flags = target_to_host_bitmask(arg3, fcntl_flags_tbl);
8947         ret = get_errno(dup3(arg1, arg2, host_flags));
8948         if (ret >= 0) {
8949             fd_trans_dup(arg1, arg2);
8950         }
8951         return ret;
8952     }
8953 #endif
8954 #ifdef TARGET_NR_getppid /* not on alpha */
8955     case TARGET_NR_getppid:
8956         return get_errno(getppid());
8957 #endif
8958 #ifdef TARGET_NR_getpgrp
8959     case TARGET_NR_getpgrp:
8960         return get_errno(getpgrp());
8961 #endif
8962     case TARGET_NR_setsid:
8963         return get_errno(setsid());
8964 #ifdef TARGET_NR_sigaction
8965     case TARGET_NR_sigaction:
8966         {
8967 #if defined(TARGET_ALPHA)
8968             struct target_sigaction act, oact, *pact = 0;
8969             struct target_old_sigaction *old_act;
8970             if (arg2) {
8971                 if (!lock_user_struct(VERIFY_READ, old_act, arg2, 1))
8972                     return -TARGET_EFAULT;
8973                 act._sa_handler = old_act->_sa_handler;
8974                 target_siginitset(&act.sa_mask, old_act->sa_mask);
8975                 act.sa_flags = old_act->sa_flags;
8976                 act.sa_restorer = 0;
8977                 unlock_user_struct(old_act, arg2, 0);
8978                 pact = &act;
8979             }
8980             ret = get_errno(do_sigaction(arg1, pact, &oact));
8981             if (!is_error(ret) && arg3) {
8982                 if (!lock_user_struct(VERIFY_WRITE, old_act, arg3, 0))
8983                     return -TARGET_EFAULT;
8984                 old_act->_sa_handler = oact._sa_handler;
8985                 old_act->sa_mask = oact.sa_mask.sig[0];
8986                 old_act->sa_flags = oact.sa_flags;
8987                 unlock_user_struct(old_act, arg3, 1);
8988             }
8989 #elif defined(TARGET_MIPS)
8990 	    struct target_sigaction act, oact, *pact, *old_act;
8991 
8992 	    if (arg2) {
8993                 if (!lock_user_struct(VERIFY_READ, old_act, arg2, 1))
8994                     return -TARGET_EFAULT;
8995 		act._sa_handler = old_act->_sa_handler;
8996 		target_siginitset(&act.sa_mask, old_act->sa_mask.sig[0]);
8997 		act.sa_flags = old_act->sa_flags;
8998 		unlock_user_struct(old_act, arg2, 0);
8999 		pact = &act;
9000 	    } else {
9001 		pact = NULL;
9002 	    }
9003 
9004 	    ret = get_errno(do_sigaction(arg1, pact, &oact));
9005 
9006 	    if (!is_error(ret) && arg3) {
9007                 if (!lock_user_struct(VERIFY_WRITE, old_act, arg3, 0))
9008                     return -TARGET_EFAULT;
9009 		old_act->_sa_handler = oact._sa_handler;
9010 		old_act->sa_flags = oact.sa_flags;
9011 		old_act->sa_mask.sig[0] = oact.sa_mask.sig[0];
9012 		old_act->sa_mask.sig[1] = 0;
9013 		old_act->sa_mask.sig[2] = 0;
9014 		old_act->sa_mask.sig[3] = 0;
9015 		unlock_user_struct(old_act, arg3, 1);
9016 	    }
9017 #else
9018             struct target_old_sigaction *old_act;
9019             struct target_sigaction act, oact, *pact;
9020             if (arg2) {
9021                 if (!lock_user_struct(VERIFY_READ, old_act, arg2, 1))
9022                     return -TARGET_EFAULT;
9023                 act._sa_handler = old_act->_sa_handler;
9024                 target_siginitset(&act.sa_mask, old_act->sa_mask);
9025                 act.sa_flags = old_act->sa_flags;
9026                 act.sa_restorer = old_act->sa_restorer;
9027 #ifdef TARGET_ARCH_HAS_KA_RESTORER
9028                 act.ka_restorer = 0;
9029 #endif
9030                 unlock_user_struct(old_act, arg2, 0);
9031                 pact = &act;
9032             } else {
9033                 pact = NULL;
9034             }
9035             ret = get_errno(do_sigaction(arg1, pact, &oact));
9036             if (!is_error(ret) && arg3) {
9037                 if (!lock_user_struct(VERIFY_WRITE, old_act, arg3, 0))
9038                     return -TARGET_EFAULT;
9039                 old_act->_sa_handler = oact._sa_handler;
9040                 old_act->sa_mask = oact.sa_mask.sig[0];
9041                 old_act->sa_flags = oact.sa_flags;
9042                 old_act->sa_restorer = oact.sa_restorer;
9043                 unlock_user_struct(old_act, arg3, 1);
9044             }
9045 #endif
9046         }
9047         return ret;
9048 #endif
9049     case TARGET_NR_rt_sigaction:
9050         {
9051 #if defined(TARGET_ALPHA)
9052             /* For Alpha and SPARC this is a 5 argument syscall, with
9053              * a 'restorer' parameter which must be copied into the
9054              * sa_restorer field of the sigaction struct.
9055              * For Alpha that 'restorer' is arg5; for SPARC it is arg4,
9056              * and arg5 is the sigsetsize.
9057              * Alpha also has a separate rt_sigaction struct that it uses
9058              * here; SPARC uses the usual sigaction struct.
9059              */
9060             struct target_rt_sigaction *rt_act;
9061             struct target_sigaction act, oact, *pact = 0;
9062 
9063             if (arg4 != sizeof(target_sigset_t)) {
9064                 return -TARGET_EINVAL;
9065             }
9066             if (arg2) {
9067                 if (!lock_user_struct(VERIFY_READ, rt_act, arg2, 1))
9068                     return -TARGET_EFAULT;
9069                 act._sa_handler = rt_act->_sa_handler;
9070                 act.sa_mask = rt_act->sa_mask;
9071                 act.sa_flags = rt_act->sa_flags;
9072                 act.sa_restorer = arg5;
9073                 unlock_user_struct(rt_act, arg2, 0);
9074                 pact = &act;
9075             }
9076             ret = get_errno(do_sigaction(arg1, pact, &oact));
9077             if (!is_error(ret) && arg3) {
9078                 if (!lock_user_struct(VERIFY_WRITE, rt_act, arg3, 0))
9079                     return -TARGET_EFAULT;
9080                 rt_act->_sa_handler = oact._sa_handler;
9081                 rt_act->sa_mask = oact.sa_mask;
9082                 rt_act->sa_flags = oact.sa_flags;
9083                 unlock_user_struct(rt_act, arg3, 1);
9084             }
9085 #else
9086 #ifdef TARGET_SPARC
9087             target_ulong restorer = arg4;
9088             target_ulong sigsetsize = arg5;
9089 #else
9090             target_ulong sigsetsize = arg4;
9091 #endif
9092             struct target_sigaction *act;
9093             struct target_sigaction *oact;
9094 
9095             if (sigsetsize != sizeof(target_sigset_t)) {
9096                 return -TARGET_EINVAL;
9097             }
9098             if (arg2) {
9099                 if (!lock_user_struct(VERIFY_READ, act, arg2, 1)) {
9100                     return -TARGET_EFAULT;
9101                 }
9102 #ifdef TARGET_ARCH_HAS_KA_RESTORER
9103                 act->ka_restorer = restorer;
9104 #endif
9105             } else {
9106                 act = NULL;
9107             }
9108             if (arg3) {
9109                 if (!lock_user_struct(VERIFY_WRITE, oact, arg3, 0)) {
9110                     ret = -TARGET_EFAULT;
9111                     goto rt_sigaction_fail;
9112                 }
9113             } else
9114                 oact = NULL;
9115             ret = get_errno(do_sigaction(arg1, act, oact));
9116 	rt_sigaction_fail:
9117             if (act)
9118                 unlock_user_struct(act, arg2, 0);
9119             if (oact)
9120                 unlock_user_struct(oact, arg3, 1);
9121 #endif
9122         }
9123         return ret;
9124 #ifdef TARGET_NR_sgetmask /* not on alpha */
9125     case TARGET_NR_sgetmask:
9126         {
9127             sigset_t cur_set;
9128             abi_ulong target_set;
9129             ret = do_sigprocmask(0, NULL, &cur_set);
9130             if (!ret) {
9131                 host_to_target_old_sigset(&target_set, &cur_set);
9132                 ret = target_set;
9133             }
9134         }
9135         return ret;
9136 #endif
9137 #ifdef TARGET_NR_ssetmask /* not on alpha */
9138     case TARGET_NR_ssetmask:
9139         {
9140             sigset_t set, oset;
9141             abi_ulong target_set = arg1;
9142             target_to_host_old_sigset(&set, &target_set);
9143             ret = do_sigprocmask(SIG_SETMASK, &set, &oset);
9144             if (!ret) {
9145                 host_to_target_old_sigset(&target_set, &oset);
9146                 ret = target_set;
9147             }
9148         }
9149         return ret;
9150 #endif
9151 #ifdef TARGET_NR_sigprocmask
9152     case TARGET_NR_sigprocmask:
9153         {
9154 #if defined(TARGET_ALPHA)
9155             sigset_t set, oldset;
9156             abi_ulong mask;
9157             int how;
9158 
9159             switch (arg1) {
9160             case TARGET_SIG_BLOCK:
9161                 how = SIG_BLOCK;
9162                 break;
9163             case TARGET_SIG_UNBLOCK:
9164                 how = SIG_UNBLOCK;
9165                 break;
9166             case TARGET_SIG_SETMASK:
9167                 how = SIG_SETMASK;
9168                 break;
9169             default:
9170                 return -TARGET_EINVAL;
9171             }
9172             mask = arg2;
9173             target_to_host_old_sigset(&set, &mask);
9174 
9175             ret = do_sigprocmask(how, &set, &oldset);
9176             if (!is_error(ret)) {
9177                 host_to_target_old_sigset(&mask, &oldset);
9178                 ret = mask;
9179                 ((CPUAlphaState *)cpu_env)->ir[IR_V0] = 0; /* force no error */
9180             }
9181 #else
9182             sigset_t set, oldset, *set_ptr;
9183             int how;
9184 
9185             if (arg2) {
9186                 switch (arg1) {
9187                 case TARGET_SIG_BLOCK:
9188                     how = SIG_BLOCK;
9189                     break;
9190                 case TARGET_SIG_UNBLOCK:
9191                     how = SIG_UNBLOCK;
9192                     break;
9193                 case TARGET_SIG_SETMASK:
9194                     how = SIG_SETMASK;
9195                     break;
9196                 default:
9197                     return -TARGET_EINVAL;
9198                 }
9199                 if (!(p = lock_user(VERIFY_READ, arg2, sizeof(target_sigset_t), 1)))
9200                     return -TARGET_EFAULT;
9201                 target_to_host_old_sigset(&set, p);
9202                 unlock_user(p, arg2, 0);
9203                 set_ptr = &set;
9204             } else {
9205                 how = 0;
9206                 set_ptr = NULL;
9207             }
9208             ret = do_sigprocmask(how, set_ptr, &oldset);
9209             if (!is_error(ret) && arg3) {
9210                 if (!(p = lock_user(VERIFY_WRITE, arg3, sizeof(target_sigset_t), 0)))
9211                     return -TARGET_EFAULT;
9212                 host_to_target_old_sigset(p, &oldset);
9213                 unlock_user(p, arg3, sizeof(target_sigset_t));
9214             }
9215 #endif
9216         }
9217         return ret;
9218 #endif
9219     case TARGET_NR_rt_sigprocmask:
9220         {
9221             int how = arg1;
9222             sigset_t set, oldset, *set_ptr;
9223 
9224             if (arg4 != sizeof(target_sigset_t)) {
9225                 return -TARGET_EINVAL;
9226             }
9227 
9228             if (arg2) {
9229                 switch(how) {
9230                 case TARGET_SIG_BLOCK:
9231                     how = SIG_BLOCK;
9232                     break;
9233                 case TARGET_SIG_UNBLOCK:
9234                     how = SIG_UNBLOCK;
9235                     break;
9236                 case TARGET_SIG_SETMASK:
9237                     how = SIG_SETMASK;
9238                     break;
9239                 default:
9240                     return -TARGET_EINVAL;
9241                 }
9242                 if (!(p = lock_user(VERIFY_READ, arg2, sizeof(target_sigset_t), 1)))
9243                     return -TARGET_EFAULT;
9244                 target_to_host_sigset(&set, p);
9245                 unlock_user(p, arg2, 0);
9246                 set_ptr = &set;
9247             } else {
9248                 how = 0;
9249                 set_ptr = NULL;
9250             }
9251             ret = do_sigprocmask(how, set_ptr, &oldset);
9252             if (!is_error(ret) && arg3) {
9253                 if (!(p = lock_user(VERIFY_WRITE, arg3, sizeof(target_sigset_t), 0)))
9254                     return -TARGET_EFAULT;
9255                 host_to_target_sigset(p, &oldset);
9256                 unlock_user(p, arg3, sizeof(target_sigset_t));
9257             }
9258         }
9259         return ret;
9260 #ifdef TARGET_NR_sigpending
9261     case TARGET_NR_sigpending:
9262         {
9263             sigset_t set;
9264             ret = get_errno(sigpending(&set));
9265             if (!is_error(ret)) {
9266                 if (!(p = lock_user(VERIFY_WRITE, arg1, sizeof(target_sigset_t), 0)))
9267                     return -TARGET_EFAULT;
9268                 host_to_target_old_sigset(p, &set);
9269                 unlock_user(p, arg1, sizeof(target_sigset_t));
9270             }
9271         }
9272         return ret;
9273 #endif
9274     case TARGET_NR_rt_sigpending:
9275         {
9276             sigset_t set;
9277 
9278             /* Yes, this check is >, not != like most. We follow the kernel's
9279              * logic and it does it like this because it implements
9280              * NR_sigpending through the same code path, and in that case
9281              * the old_sigset_t is smaller in size.
9282              */
9283             if (arg2 > sizeof(target_sigset_t)) {
9284                 return -TARGET_EINVAL;
9285             }
9286 
9287             ret = get_errno(sigpending(&set));
9288             if (!is_error(ret)) {
9289                 if (!(p = lock_user(VERIFY_WRITE, arg1, sizeof(target_sigset_t), 0)))
9290                     return -TARGET_EFAULT;
9291                 host_to_target_sigset(p, &set);
9292                 unlock_user(p, arg1, sizeof(target_sigset_t));
9293             }
9294         }
9295         return ret;
9296 #ifdef TARGET_NR_sigsuspend
9297     case TARGET_NR_sigsuspend:
9298         {
9299             TaskState *ts = cpu->opaque;
9300 #if defined(TARGET_ALPHA)
9301             abi_ulong mask = arg1;
9302             target_to_host_old_sigset(&ts->sigsuspend_mask, &mask);
9303 #else
9304             if (!(p = lock_user(VERIFY_READ, arg1, sizeof(target_sigset_t), 1)))
9305                 return -TARGET_EFAULT;
9306             target_to_host_old_sigset(&ts->sigsuspend_mask, p);
9307             unlock_user(p, arg1, 0);
9308 #endif
9309             ret = get_errno(safe_rt_sigsuspend(&ts->sigsuspend_mask,
9310                                                SIGSET_T_SIZE));
9311             if (ret != -TARGET_ERESTARTSYS) {
9312                 ts->in_sigsuspend = 1;
9313             }
9314         }
9315         return ret;
9316 #endif
9317     case TARGET_NR_rt_sigsuspend:
9318         {
9319             TaskState *ts = cpu->opaque;
9320 
9321             if (arg2 != sizeof(target_sigset_t)) {
9322                 return -TARGET_EINVAL;
9323             }
9324             if (!(p = lock_user(VERIFY_READ, arg1, sizeof(target_sigset_t), 1)))
9325                 return -TARGET_EFAULT;
9326             target_to_host_sigset(&ts->sigsuspend_mask, p);
9327             unlock_user(p, arg1, 0);
9328             ret = get_errno(safe_rt_sigsuspend(&ts->sigsuspend_mask,
9329                                                SIGSET_T_SIZE));
9330             if (ret != -TARGET_ERESTARTSYS) {
9331                 ts->in_sigsuspend = 1;
9332             }
9333         }
9334         return ret;
9335 #ifdef TARGET_NR_rt_sigtimedwait
9336     case TARGET_NR_rt_sigtimedwait:
9337         {
9338             sigset_t set;
9339             struct timespec uts, *puts;
9340             siginfo_t uinfo;
9341 
9342             if (arg4 != sizeof(target_sigset_t)) {
9343                 return -TARGET_EINVAL;
9344             }
9345 
9346             if (!(p = lock_user(VERIFY_READ, arg1, sizeof(target_sigset_t), 1)))
9347                 return -TARGET_EFAULT;
9348             target_to_host_sigset(&set, p);
9349             unlock_user(p, arg1, 0);
9350             if (arg3) {
9351                 puts = &uts;
9352                 if (target_to_host_timespec(puts, arg3)) {
9353                     return -TARGET_EFAULT;
9354                 }
9355             } else {
9356                 puts = NULL;
9357             }
9358             ret = get_errno(safe_rt_sigtimedwait(&set, &uinfo, puts,
9359                                                  SIGSET_T_SIZE));
9360             if (!is_error(ret)) {
9361                 if (arg2) {
9362                     p = lock_user(VERIFY_WRITE, arg2, sizeof(target_siginfo_t),
9363                                   0);
9364                     if (!p) {
9365                         return -TARGET_EFAULT;
9366                     }
9367                     host_to_target_siginfo(p, &uinfo);
9368                     unlock_user(p, arg2, sizeof(target_siginfo_t));
9369                 }
9370                 ret = host_to_target_signal(ret);
9371             }
9372         }
9373         return ret;
9374 #endif
9375 #ifdef TARGET_NR_rt_sigtimedwait_time64
9376     case TARGET_NR_rt_sigtimedwait_time64:
9377         {
9378             sigset_t set;
9379             struct timespec uts, *puts;
9380             siginfo_t uinfo;
9381 
9382             if (arg4 != sizeof(target_sigset_t)) {
9383                 return -TARGET_EINVAL;
9384             }
9385 
9386             p = lock_user(VERIFY_READ, arg1, sizeof(target_sigset_t), 1);
9387             if (!p) {
9388                 return -TARGET_EFAULT;
9389             }
9390             target_to_host_sigset(&set, p);
9391             unlock_user(p, arg1, 0);
9392             if (arg3) {
9393                 puts = &uts;
9394                 if (target_to_host_timespec64(puts, arg3)) {
9395                     return -TARGET_EFAULT;
9396                 }
9397             } else {
9398                 puts = NULL;
9399             }
9400             ret = get_errno(safe_rt_sigtimedwait(&set, &uinfo, puts,
9401                                                  SIGSET_T_SIZE));
9402             if (!is_error(ret)) {
9403                 if (arg2) {
9404                     p = lock_user(VERIFY_WRITE, arg2,
9405                                   sizeof(target_siginfo_t), 0);
9406                     if (!p) {
9407                         return -TARGET_EFAULT;
9408                     }
9409                     host_to_target_siginfo(p, &uinfo);
9410                     unlock_user(p, arg2, sizeof(target_siginfo_t));
9411                 }
9412                 ret = host_to_target_signal(ret);
9413             }
9414         }
9415         return ret;
9416 #endif
9417     case TARGET_NR_rt_sigqueueinfo:
9418         {
9419             siginfo_t uinfo;
9420 
9421             p = lock_user(VERIFY_READ, arg3, sizeof(target_siginfo_t), 1);
9422             if (!p) {
9423                 return -TARGET_EFAULT;
9424             }
9425             target_to_host_siginfo(&uinfo, p);
9426             unlock_user(p, arg3, 0);
9427             ret = get_errno(sys_rt_sigqueueinfo(arg1, arg2, &uinfo));
9428         }
9429         return ret;
9430     case TARGET_NR_rt_tgsigqueueinfo:
9431         {
9432             siginfo_t uinfo;
9433 
9434             p = lock_user(VERIFY_READ, arg4, sizeof(target_siginfo_t), 1);
9435             if (!p) {
9436                 return -TARGET_EFAULT;
9437             }
9438             target_to_host_siginfo(&uinfo, p);
9439             unlock_user(p, arg4, 0);
9440             ret = get_errno(sys_rt_tgsigqueueinfo(arg1, arg2, arg3, &uinfo));
9441         }
9442         return ret;
9443 #ifdef TARGET_NR_sigreturn
9444     case TARGET_NR_sigreturn:
9445         if (block_signals()) {
9446             return -TARGET_ERESTARTSYS;
9447         }
9448         return do_sigreturn(cpu_env);
9449 #endif
9450     case TARGET_NR_rt_sigreturn:
9451         if (block_signals()) {
9452             return -TARGET_ERESTARTSYS;
9453         }
9454         return do_rt_sigreturn(cpu_env);
9455     case TARGET_NR_sethostname:
9456         if (!(p = lock_user_string(arg1)))
9457             return -TARGET_EFAULT;
9458         ret = get_errno(sethostname(p, arg2));
9459         unlock_user(p, arg1, 0);
9460         return ret;
9461 #ifdef TARGET_NR_setrlimit
9462     case TARGET_NR_setrlimit:
9463         {
9464             int resource = target_to_host_resource(arg1);
9465             struct target_rlimit *target_rlim;
9466             struct rlimit rlim;
9467             if (!lock_user_struct(VERIFY_READ, target_rlim, arg2, 1))
9468                 return -TARGET_EFAULT;
9469             rlim.rlim_cur = target_to_host_rlim(target_rlim->rlim_cur);
9470             rlim.rlim_max = target_to_host_rlim(target_rlim->rlim_max);
9471             unlock_user_struct(target_rlim, arg2, 0);
9472             /*
9473              * If we just passed through resource limit settings for memory then
9474              * they would also apply to QEMU's own allocations, and QEMU will
9475              * crash or hang or die if its allocations fail. Ideally we would
9476              * track the guest allocations in QEMU and apply the limits ourselves.
9477              * For now, just tell the guest the call succeeded but don't actually
9478              * limit anything.
9479              */
9480             if (resource != RLIMIT_AS &&
9481                 resource != RLIMIT_DATA &&
9482                 resource != RLIMIT_STACK) {
9483                 return get_errno(setrlimit(resource, &rlim));
9484             } else {
9485                 return 0;
9486             }
9487         }
9488 #endif
9489 #ifdef TARGET_NR_getrlimit
9490     case TARGET_NR_getrlimit:
9491         {
9492             int resource = target_to_host_resource(arg1);
9493             struct target_rlimit *target_rlim;
9494             struct rlimit rlim;
9495 
9496             ret = get_errno(getrlimit(resource, &rlim));
9497             if (!is_error(ret)) {
9498                 if (!lock_user_struct(VERIFY_WRITE, target_rlim, arg2, 0))
9499                     return -TARGET_EFAULT;
9500                 target_rlim->rlim_cur = host_to_target_rlim(rlim.rlim_cur);
9501                 target_rlim->rlim_max = host_to_target_rlim(rlim.rlim_max);
9502                 unlock_user_struct(target_rlim, arg2, 1);
9503             }
9504         }
9505         return ret;
9506 #endif
9507     case TARGET_NR_getrusage:
9508         {
9509             struct rusage rusage;
9510             ret = get_errno(getrusage(arg1, &rusage));
9511             if (!is_error(ret)) {
9512                 ret = host_to_target_rusage(arg2, &rusage);
9513             }
9514         }
9515         return ret;
9516 #if defined(TARGET_NR_gettimeofday)
9517     case TARGET_NR_gettimeofday:
9518         {
9519             struct timeval tv;
9520             struct timezone tz;
9521 
9522             ret = get_errno(gettimeofday(&tv, &tz));
9523             if (!is_error(ret)) {
9524                 if (arg1 && copy_to_user_timeval(arg1, &tv)) {
9525                     return -TARGET_EFAULT;
9526                 }
9527                 if (arg2 && copy_to_user_timezone(arg2, &tz)) {
9528                     return -TARGET_EFAULT;
9529                 }
9530             }
9531         }
9532         return ret;
9533 #endif
9534 #if defined(TARGET_NR_settimeofday)
9535     case TARGET_NR_settimeofday:
9536         {
9537             struct timeval tv, *ptv = NULL;
9538             struct timezone tz, *ptz = NULL;
9539 
9540             if (arg1) {
9541                 if (copy_from_user_timeval(&tv, arg1)) {
9542                     return -TARGET_EFAULT;
9543                 }
9544                 ptv = &tv;
9545             }
9546 
9547             if (arg2) {
9548                 if (copy_from_user_timezone(&tz, arg2)) {
9549                     return -TARGET_EFAULT;
9550                 }
9551                 ptz = &tz;
9552             }
9553 
9554             return get_errno(settimeofday(ptv, ptz));
9555         }
9556 #endif
9557 #if defined(TARGET_NR_select)
9558     case TARGET_NR_select:
9559 #if defined(TARGET_WANT_NI_OLD_SELECT)
9560         /* some architectures used to have old_select here
9561          * but now ENOSYS it.
9562          */
9563         ret = -TARGET_ENOSYS;
9564 #elif defined(TARGET_WANT_OLD_SYS_SELECT)
9565         ret = do_old_select(arg1);
9566 #else
9567         ret = do_select(arg1, arg2, arg3, arg4, arg5);
9568 #endif
9569         return ret;
9570 #endif
9571 #ifdef TARGET_NR_pselect6
9572     case TARGET_NR_pselect6:
9573         return do_pselect6(arg1, arg2, arg3, arg4, arg5, arg6, false);
9574 #endif
9575 #ifdef TARGET_NR_pselect6_time64
9576     case TARGET_NR_pselect6_time64:
9577         return do_pselect6(arg1, arg2, arg3, arg4, arg5, arg6, true);
9578 #endif
9579 #ifdef TARGET_NR_symlink
9580     case TARGET_NR_symlink:
9581         {
9582             void *p2;
9583             p = lock_user_string(arg1);
9584             p2 = lock_user_string(arg2);
9585             if (!p || !p2)
9586                 ret = -TARGET_EFAULT;
9587             else
9588                 ret = get_errno(symlink(p, p2));
9589             unlock_user(p2, arg2, 0);
9590             unlock_user(p, arg1, 0);
9591         }
9592         return ret;
9593 #endif
9594 #if defined(TARGET_NR_symlinkat)
9595     case TARGET_NR_symlinkat:
9596         {
9597             void *p2;
9598             p  = lock_user_string(arg1);
9599             p2 = lock_user_string(arg3);
9600             if (!p || !p2)
9601                 ret = -TARGET_EFAULT;
9602             else
9603                 ret = get_errno(symlinkat(p, arg2, p2));
9604             unlock_user(p2, arg3, 0);
9605             unlock_user(p, arg1, 0);
9606         }
9607         return ret;
9608 #endif
9609 #ifdef TARGET_NR_readlink
9610     case TARGET_NR_readlink:
9611         {
9612             void *p2;
9613             p = lock_user_string(arg1);
9614             p2 = lock_user(VERIFY_WRITE, arg2, arg3, 0);
9615             if (!p || !p2) {
9616                 ret = -TARGET_EFAULT;
9617             } else if (!arg3) {
9618                 /* Short circuit this for the magic exe check. */
9619                 ret = -TARGET_EINVAL;
9620             } else if (is_proc_myself((const char *)p, "exe")) {
9621                 char real[PATH_MAX], *temp;
9622                 temp = realpath(exec_path, real);
9623                 /* Return value is # of bytes that we wrote to the buffer. */
9624                 if (temp == NULL) {
9625                     ret = get_errno(-1);
9626                 } else {
9627                     /* Don't worry about sign mismatch as earlier mapping
9628                      * logic would have thrown a bad address error. */
9629                     ret = MIN(strlen(real), arg3);
9630                     /* We cannot NUL terminate the string. */
9631                     memcpy(p2, real, ret);
9632                 }
9633             } else {
9634                 ret = get_errno(readlink(path(p), p2, arg3));
9635             }
9636             unlock_user(p2, arg2, ret);
9637             unlock_user(p, arg1, 0);
9638         }
9639         return ret;
9640 #endif
9641 #if defined(TARGET_NR_readlinkat)
9642     case TARGET_NR_readlinkat:
9643         {
9644             void *p2;
9645             p  = lock_user_string(arg2);
9646             p2 = lock_user(VERIFY_WRITE, arg3, arg4, 0);
9647             if (!p || !p2) {
9648                 ret = -TARGET_EFAULT;
9649             } else if (is_proc_myself((const char *)p, "exe")) {
9650                 char real[PATH_MAX], *temp;
9651                 temp = realpath(exec_path, real);
9652                 ret = temp == NULL ? get_errno(-1) : strlen(real) ;
9653                 snprintf((char *)p2, arg4, "%s", real);
9654             } else {
9655                 ret = get_errno(readlinkat(arg1, path(p), p2, arg4));
9656             }
9657             unlock_user(p2, arg3, ret);
9658             unlock_user(p, arg2, 0);
9659         }
9660         return ret;
9661 #endif
9662 #ifdef TARGET_NR_swapon
9663     case TARGET_NR_swapon:
9664         if (!(p = lock_user_string(arg1)))
9665             return -TARGET_EFAULT;
9666         ret = get_errno(swapon(p, arg2));
9667         unlock_user(p, arg1, 0);
9668         return ret;
9669 #endif
9670     case TARGET_NR_reboot:
9671         if (arg3 == LINUX_REBOOT_CMD_RESTART2) {
9672            /* arg4 must be ignored in all other cases */
9673            p = lock_user_string(arg4);
9674            if (!p) {
9675                return -TARGET_EFAULT;
9676            }
9677            ret = get_errno(reboot(arg1, arg2, arg3, p));
9678            unlock_user(p, arg4, 0);
9679         } else {
9680            ret = get_errno(reboot(arg1, arg2, arg3, NULL));
9681         }
9682         return ret;
9683 #ifdef TARGET_NR_mmap
9684     case TARGET_NR_mmap:
9685 #if (defined(TARGET_I386) && defined(TARGET_ABI32)) || \
9686     (defined(TARGET_ARM) && defined(TARGET_ABI32)) || \
9687     defined(TARGET_M68K) || defined(TARGET_CRIS) || defined(TARGET_MICROBLAZE) \
9688     || defined(TARGET_S390X)
9689         {
9690             abi_ulong *v;
9691             abi_ulong v1, v2, v3, v4, v5, v6;
9692             if (!(v = lock_user(VERIFY_READ, arg1, 6 * sizeof(abi_ulong), 1)))
9693                 return -TARGET_EFAULT;
9694             v1 = tswapal(v[0]);
9695             v2 = tswapal(v[1]);
9696             v3 = tswapal(v[2]);
9697             v4 = tswapal(v[3]);
9698             v5 = tswapal(v[4]);
9699             v6 = tswapal(v[5]);
9700             unlock_user(v, arg1, 0);
9701             ret = get_errno(target_mmap(v1, v2, v3,
9702                                         target_to_host_bitmask(v4, mmap_flags_tbl),
9703                                         v5, v6));
9704         }
9705 #else
9706         ret = get_errno(target_mmap(arg1, arg2, arg3,
9707                                     target_to_host_bitmask(arg4, mmap_flags_tbl),
9708                                     arg5,
9709                                     arg6));
9710 #endif
9711         return ret;
9712 #endif
9713 #ifdef TARGET_NR_mmap2
9714     case TARGET_NR_mmap2:
9715 #ifndef MMAP_SHIFT
9716 #define MMAP_SHIFT 12
9717 #endif
9718         ret = target_mmap(arg1, arg2, arg3,
9719                           target_to_host_bitmask(arg4, mmap_flags_tbl),
9720                           arg5, arg6 << MMAP_SHIFT);
9721         return get_errno(ret);
9722 #endif
9723     case TARGET_NR_munmap:
9724         return get_errno(target_munmap(arg1, arg2));
9725     case TARGET_NR_mprotect:
9726         {
9727             TaskState *ts = cpu->opaque;
9728             /* Special hack to detect libc making the stack executable.  */
9729             if ((arg3 & PROT_GROWSDOWN)
9730                 && arg1 >= ts->info->stack_limit
9731                 && arg1 <= ts->info->start_stack) {
9732                 arg3 &= ~PROT_GROWSDOWN;
9733                 arg2 = arg2 + arg1 - ts->info->stack_limit;
9734                 arg1 = ts->info->stack_limit;
9735             }
9736         }
9737         return get_errno(target_mprotect(arg1, arg2, arg3));
9738 #ifdef TARGET_NR_mremap
9739     case TARGET_NR_mremap:
9740         return get_errno(target_mremap(arg1, arg2, arg3, arg4, arg5));
9741 #endif
9742         /* ??? msync/mlock/munlock are broken for softmmu.  */
9743 #ifdef TARGET_NR_msync
9744     case TARGET_NR_msync:
9745         return get_errno(msync(g2h(cpu, arg1), arg2, arg3));
9746 #endif
9747 #ifdef TARGET_NR_mlock
9748     case TARGET_NR_mlock:
9749         return get_errno(mlock(g2h(cpu, arg1), arg2));
9750 #endif
9751 #ifdef TARGET_NR_munlock
9752     case TARGET_NR_munlock:
9753         return get_errno(munlock(g2h(cpu, arg1), arg2));
9754 #endif
9755 #ifdef TARGET_NR_mlockall
9756     case TARGET_NR_mlockall:
9757         return get_errno(mlockall(target_to_host_mlockall_arg(arg1)));
9758 #endif
9759 #ifdef TARGET_NR_munlockall
9760     case TARGET_NR_munlockall:
9761         return get_errno(munlockall());
9762 #endif
9763 #ifdef TARGET_NR_truncate
9764     case TARGET_NR_truncate:
9765         if (!(p = lock_user_string(arg1)))
9766             return -TARGET_EFAULT;
9767         ret = get_errno(truncate(p, arg2));
9768         unlock_user(p, arg1, 0);
9769         return ret;
9770 #endif
9771 #ifdef TARGET_NR_ftruncate
9772     case TARGET_NR_ftruncate:
9773         return get_errno(ftruncate(arg1, arg2));
9774 #endif
9775     case TARGET_NR_fchmod:
9776         return get_errno(fchmod(arg1, arg2));
9777 #if defined(TARGET_NR_fchmodat)
9778     case TARGET_NR_fchmodat:
9779         if (!(p = lock_user_string(arg2)))
9780             return -TARGET_EFAULT;
9781         ret = get_errno(fchmodat(arg1, p, arg3, 0));
9782         unlock_user(p, arg2, 0);
9783         return ret;
9784 #endif
9785     case TARGET_NR_getpriority:
9786         /* Note that negative values are valid for getpriority, so we must
9787            differentiate based on errno settings.  */
9788         errno = 0;
9789         ret = getpriority(arg1, arg2);
9790         if (ret == -1 && errno != 0) {
9791             return -host_to_target_errno(errno);
9792         }
9793 #ifdef TARGET_ALPHA
9794         /* Return value is the unbiased priority.  Signal no error.  */
9795         ((CPUAlphaState *)cpu_env)->ir[IR_V0] = 0;
9796 #else
9797         /* Return value is a biased priority to avoid negative numbers.  */
9798         ret = 20 - ret;
9799 #endif
9800         return ret;
9801     case TARGET_NR_setpriority:
9802         return get_errno(setpriority(arg1, arg2, arg3));
9803 #ifdef TARGET_NR_statfs
9804     case TARGET_NR_statfs:
9805         if (!(p = lock_user_string(arg1))) {
9806             return -TARGET_EFAULT;
9807         }
9808         ret = get_errno(statfs(path(p), &stfs));
9809         unlock_user(p, arg1, 0);
9810     convert_statfs:
9811         if (!is_error(ret)) {
9812             struct target_statfs *target_stfs;
9813 
9814             if (!lock_user_struct(VERIFY_WRITE, target_stfs, arg2, 0))
9815                 return -TARGET_EFAULT;
9816             __put_user(stfs.f_type, &target_stfs->f_type);
9817             __put_user(stfs.f_bsize, &target_stfs->f_bsize);
9818             __put_user(stfs.f_blocks, &target_stfs->f_blocks);
9819             __put_user(stfs.f_bfree, &target_stfs->f_bfree);
9820             __put_user(stfs.f_bavail, &target_stfs->f_bavail);
9821             __put_user(stfs.f_files, &target_stfs->f_files);
9822             __put_user(stfs.f_ffree, &target_stfs->f_ffree);
9823             __put_user(stfs.f_fsid.__val[0], &target_stfs->f_fsid.val[0]);
9824             __put_user(stfs.f_fsid.__val[1], &target_stfs->f_fsid.val[1]);
9825             __put_user(stfs.f_namelen, &target_stfs->f_namelen);
9826             __put_user(stfs.f_frsize, &target_stfs->f_frsize);
9827 #ifdef _STATFS_F_FLAGS
9828             __put_user(stfs.f_flags, &target_stfs->f_flags);
9829 #else
9830             __put_user(0, &target_stfs->f_flags);
9831 #endif
9832             memset(target_stfs->f_spare, 0, sizeof(target_stfs->f_spare));
9833             unlock_user_struct(target_stfs, arg2, 1);
9834         }
9835         return ret;
9836 #endif
9837 #ifdef TARGET_NR_fstatfs
9838     case TARGET_NR_fstatfs:
9839         ret = get_errno(fstatfs(arg1, &stfs));
9840         goto convert_statfs;
9841 #endif
9842 #ifdef TARGET_NR_statfs64
9843     case TARGET_NR_statfs64:
9844         if (!(p = lock_user_string(arg1))) {
9845             return -TARGET_EFAULT;
9846         }
9847         ret = get_errno(statfs(path(p), &stfs));
9848         unlock_user(p, arg1, 0);
9849     convert_statfs64:
9850         if (!is_error(ret)) {
9851             struct target_statfs64 *target_stfs;
9852 
9853             if (!lock_user_struct(VERIFY_WRITE, target_stfs, arg3, 0))
9854                 return -TARGET_EFAULT;
9855             __put_user(stfs.f_type, &target_stfs->f_type);
9856             __put_user(stfs.f_bsize, &target_stfs->f_bsize);
9857             __put_user(stfs.f_blocks, &target_stfs->f_blocks);
9858             __put_user(stfs.f_bfree, &target_stfs->f_bfree);
9859             __put_user(stfs.f_bavail, &target_stfs->f_bavail);
9860             __put_user(stfs.f_files, &target_stfs->f_files);
9861             __put_user(stfs.f_ffree, &target_stfs->f_ffree);
9862             __put_user(stfs.f_fsid.__val[0], &target_stfs->f_fsid.val[0]);
9863             __put_user(stfs.f_fsid.__val[1], &target_stfs->f_fsid.val[1]);
9864             __put_user(stfs.f_namelen, &target_stfs->f_namelen);
9865             __put_user(stfs.f_frsize, &target_stfs->f_frsize);
9866 #ifdef _STATFS_F_FLAGS
9867             __put_user(stfs.f_flags, &target_stfs->f_flags);
9868 #else
9869             __put_user(0, &target_stfs->f_flags);
9870 #endif
9871             memset(target_stfs->f_spare, 0, sizeof(target_stfs->f_spare));
9872             unlock_user_struct(target_stfs, arg3, 1);
9873         }
9874         return ret;
9875     case TARGET_NR_fstatfs64:
9876         ret = get_errno(fstatfs(arg1, &stfs));
9877         goto convert_statfs64;
9878 #endif
9879 #ifdef TARGET_NR_socketcall
9880     case TARGET_NR_socketcall:
9881         return do_socketcall(arg1, arg2);
9882 #endif
9883 #ifdef TARGET_NR_accept
9884     case TARGET_NR_accept:
9885         return do_accept4(arg1, arg2, arg3, 0);
9886 #endif
9887 #ifdef TARGET_NR_accept4
9888     case TARGET_NR_accept4:
9889         return do_accept4(arg1, arg2, arg3, arg4);
9890 #endif
9891 #ifdef TARGET_NR_bind
9892     case TARGET_NR_bind:
9893         return do_bind(arg1, arg2, arg3);
9894 #endif
9895 #ifdef TARGET_NR_connect
9896     case TARGET_NR_connect:
9897         return do_connect(arg1, arg2, arg3);
9898 #endif
9899 #ifdef TARGET_NR_getpeername
9900     case TARGET_NR_getpeername:
9901         return do_getpeername(arg1, arg2, arg3);
9902 #endif
9903 #ifdef TARGET_NR_getsockname
9904     case TARGET_NR_getsockname:
9905         return do_getsockname(arg1, arg2, arg3);
9906 #endif
9907 #ifdef TARGET_NR_getsockopt
9908     case TARGET_NR_getsockopt:
9909         return do_getsockopt(arg1, arg2, arg3, arg4, arg5);
9910 #endif
9911 #ifdef TARGET_NR_listen
9912     case TARGET_NR_listen:
9913         return get_errno(listen(arg1, arg2));
9914 #endif
9915 #ifdef TARGET_NR_recv
9916     case TARGET_NR_recv:
9917         return do_recvfrom(arg1, arg2, arg3, arg4, 0, 0);
9918 #endif
9919 #ifdef TARGET_NR_recvfrom
9920     case TARGET_NR_recvfrom:
9921         return do_recvfrom(arg1, arg2, arg3, arg4, arg5, arg6);
9922 #endif
9923 #ifdef TARGET_NR_recvmsg
9924     case TARGET_NR_recvmsg:
9925         return do_sendrecvmsg(arg1, arg2, arg3, 0);
9926 #endif
9927 #ifdef TARGET_NR_send
9928     case TARGET_NR_send:
9929         return do_sendto(arg1, arg2, arg3, arg4, 0, 0);
9930 #endif
9931 #ifdef TARGET_NR_sendmsg
9932     case TARGET_NR_sendmsg:
9933         return do_sendrecvmsg(arg1, arg2, arg3, 1);
9934 #endif
9935 #ifdef TARGET_NR_sendmmsg
9936     case TARGET_NR_sendmmsg:
9937         return do_sendrecvmmsg(arg1, arg2, arg3, arg4, 1);
9938 #endif
9939 #ifdef TARGET_NR_recvmmsg
9940     case TARGET_NR_recvmmsg:
9941         return do_sendrecvmmsg(arg1, arg2, arg3, arg4, 0);
9942 #endif
9943 #ifdef TARGET_NR_sendto
9944     case TARGET_NR_sendto:
9945         return do_sendto(arg1, arg2, arg3, arg4, arg5, arg6);
9946 #endif
9947 #ifdef TARGET_NR_shutdown
9948     case TARGET_NR_shutdown:
9949         return get_errno(shutdown(arg1, arg2));
9950 #endif
9951 #if defined(TARGET_NR_getrandom) && defined(__NR_getrandom)
9952     case TARGET_NR_getrandom:
9953         p = lock_user(VERIFY_WRITE, arg1, arg2, 0);
9954         if (!p) {
9955             return -TARGET_EFAULT;
9956         }
9957         ret = get_errno(getrandom(p, arg2, arg3));
9958         unlock_user(p, arg1, ret);
9959         return ret;
9960 #endif
9961 #ifdef TARGET_NR_socket
9962     case TARGET_NR_socket:
9963         return do_socket(arg1, arg2, arg3);
9964 #endif
9965 #ifdef TARGET_NR_socketpair
9966     case TARGET_NR_socketpair:
9967         return do_socketpair(arg1, arg2, arg3, arg4);
9968 #endif
9969 #ifdef TARGET_NR_setsockopt
9970     case TARGET_NR_setsockopt:
9971         return do_setsockopt(arg1, arg2, arg3, arg4, (socklen_t) arg5);
9972 #endif
9973 #if defined(TARGET_NR_syslog)
9974     case TARGET_NR_syslog:
9975         {
9976             int len = arg2;
9977 
9978             switch (arg1) {
9979             case TARGET_SYSLOG_ACTION_CLOSE:         /* Close log */
9980             case TARGET_SYSLOG_ACTION_OPEN:          /* Open log */
9981             case TARGET_SYSLOG_ACTION_CLEAR:         /* Clear ring buffer */
9982             case TARGET_SYSLOG_ACTION_CONSOLE_OFF:   /* Disable logging */
9983             case TARGET_SYSLOG_ACTION_CONSOLE_ON:    /* Enable logging */
9984             case TARGET_SYSLOG_ACTION_CONSOLE_LEVEL: /* Set messages level */
9985             case TARGET_SYSLOG_ACTION_SIZE_UNREAD:   /* Number of chars */
9986             case TARGET_SYSLOG_ACTION_SIZE_BUFFER:   /* Size of the buffer */
9987                 return get_errno(sys_syslog((int)arg1, NULL, (int)arg3));
9988             case TARGET_SYSLOG_ACTION_READ:          /* Read from log */
9989             case TARGET_SYSLOG_ACTION_READ_CLEAR:    /* Read/clear msgs */
9990             case TARGET_SYSLOG_ACTION_READ_ALL:      /* Read last messages */
9991                 {
9992                     if (len < 0) {
9993                         return -TARGET_EINVAL;
9994                     }
9995                     if (len == 0) {
9996                         return 0;
9997                     }
9998                     p = lock_user(VERIFY_WRITE, arg2, arg3, 0);
9999                     if (!p) {
10000                         return -TARGET_EFAULT;
10001                     }
10002                     ret = get_errno(sys_syslog((int)arg1, p, (int)arg3));
10003                     unlock_user(p, arg2, arg3);
10004                 }
10005                 return ret;
10006             default:
10007                 return -TARGET_EINVAL;
10008             }
10009         }
10010         break;
10011 #endif
10012     case TARGET_NR_setitimer:
10013         {
10014             struct itimerval value, ovalue, *pvalue;
10015 
10016             if (arg2) {
10017                 pvalue = &value;
10018                 if (copy_from_user_timeval(&pvalue->it_interval, arg2)
10019                     || copy_from_user_timeval(&pvalue->it_value,
10020                                               arg2 + sizeof(struct target_timeval)))
10021                     return -TARGET_EFAULT;
10022             } else {
10023                 pvalue = NULL;
10024             }
10025             ret = get_errno(setitimer(arg1, pvalue, &ovalue));
10026             if (!is_error(ret) && arg3) {
10027                 if (copy_to_user_timeval(arg3,
10028                                          &ovalue.it_interval)
10029                     || copy_to_user_timeval(arg3 + sizeof(struct target_timeval),
10030                                             &ovalue.it_value))
10031                     return -TARGET_EFAULT;
10032             }
10033         }
10034         return ret;
10035     case TARGET_NR_getitimer:
10036         {
10037             struct itimerval value;
10038 
10039             ret = get_errno(getitimer(arg1, &value));
10040             if (!is_error(ret) && arg2) {
10041                 if (copy_to_user_timeval(arg2,
10042                                          &value.it_interval)
10043                     || copy_to_user_timeval(arg2 + sizeof(struct target_timeval),
10044                                             &value.it_value))
10045                     return -TARGET_EFAULT;
10046             }
10047         }
10048         return ret;
10049 #ifdef TARGET_NR_stat
10050     case TARGET_NR_stat:
10051         if (!(p = lock_user_string(arg1))) {
10052             return -TARGET_EFAULT;
10053         }
10054         ret = get_errno(stat(path(p), &st));
10055         unlock_user(p, arg1, 0);
10056         goto do_stat;
10057 #endif
10058 #ifdef TARGET_NR_lstat
10059     case TARGET_NR_lstat:
10060         if (!(p = lock_user_string(arg1))) {
10061             return -TARGET_EFAULT;
10062         }
10063         ret = get_errno(lstat(path(p), &st));
10064         unlock_user(p, arg1, 0);
10065         goto do_stat;
10066 #endif
10067 #ifdef TARGET_NR_fstat
10068     case TARGET_NR_fstat:
10069         {
10070             ret = get_errno(fstat(arg1, &st));
10071 #if defined(TARGET_NR_stat) || defined(TARGET_NR_lstat)
10072         do_stat:
10073 #endif
10074             if (!is_error(ret)) {
10075                 struct target_stat *target_st;
10076 
10077                 if (!lock_user_struct(VERIFY_WRITE, target_st, arg2, 0))
10078                     return -TARGET_EFAULT;
10079                 memset(target_st, 0, sizeof(*target_st));
10080                 __put_user(st.st_dev, &target_st->st_dev);
10081                 __put_user(st.st_ino, &target_st->st_ino);
10082                 __put_user(st.st_mode, &target_st->st_mode);
10083                 __put_user(st.st_uid, &target_st->st_uid);
10084                 __put_user(st.st_gid, &target_st->st_gid);
10085                 __put_user(st.st_nlink, &target_st->st_nlink);
10086                 __put_user(st.st_rdev, &target_st->st_rdev);
10087                 __put_user(st.st_size, &target_st->st_size);
10088                 __put_user(st.st_blksize, &target_st->st_blksize);
10089                 __put_user(st.st_blocks, &target_st->st_blocks);
10090                 __put_user(st.st_atime, &target_st->target_st_atime);
10091                 __put_user(st.st_mtime, &target_st->target_st_mtime);
10092                 __put_user(st.st_ctime, &target_st->target_st_ctime);
10093 #if (_POSIX_C_SOURCE >= 200809L || _XOPEN_SOURCE >= 700) && \
10094     defined(TARGET_STAT_HAVE_NSEC)
10095                 __put_user(st.st_atim.tv_nsec,
10096                            &target_st->target_st_atime_nsec);
10097                 __put_user(st.st_mtim.tv_nsec,
10098                            &target_st->target_st_mtime_nsec);
10099                 __put_user(st.st_ctim.tv_nsec,
10100                            &target_st->target_st_ctime_nsec);
10101 #endif
10102                 unlock_user_struct(target_st, arg2, 1);
10103             }
10104         }
10105         return ret;
10106 #endif
10107     case TARGET_NR_vhangup:
10108         return get_errno(vhangup());
10109 #ifdef TARGET_NR_syscall
10110     case TARGET_NR_syscall:
10111         return do_syscall(cpu_env, arg1 & 0xffff, arg2, arg3, arg4, arg5,
10112                           arg6, arg7, arg8, 0);
10113 #endif
10114 #if defined(TARGET_NR_wait4)
10115     case TARGET_NR_wait4:
10116         {
10117             int status;
10118             abi_long status_ptr = arg2;
10119             struct rusage rusage, *rusage_ptr;
10120             abi_ulong target_rusage = arg4;
10121             abi_long rusage_err;
10122             if (target_rusage)
10123                 rusage_ptr = &rusage;
10124             else
10125                 rusage_ptr = NULL;
10126             ret = get_errno(safe_wait4(arg1, &status, arg3, rusage_ptr));
10127             if (!is_error(ret)) {
10128                 if (status_ptr && ret) {
10129                     status = host_to_target_waitstatus(status);
10130                     if (put_user_s32(status, status_ptr))
10131                         return -TARGET_EFAULT;
10132                 }
10133                 if (target_rusage) {
10134                     rusage_err = host_to_target_rusage(target_rusage, &rusage);
10135                     if (rusage_err) {
10136                         ret = rusage_err;
10137                     }
10138                 }
10139             }
10140         }
10141         return ret;
10142 #endif
10143 #ifdef TARGET_NR_swapoff
10144     case TARGET_NR_swapoff:
10145         if (!(p = lock_user_string(arg1)))
10146             return -TARGET_EFAULT;
10147         ret = get_errno(swapoff(p));
10148         unlock_user(p, arg1, 0);
10149         return ret;
10150 #endif
10151     case TARGET_NR_sysinfo:
10152         {
10153             struct target_sysinfo *target_value;
10154             struct sysinfo value;
10155             ret = get_errno(sysinfo(&value));
10156             if (!is_error(ret) && arg1)
10157             {
10158                 if (!lock_user_struct(VERIFY_WRITE, target_value, arg1, 0))
10159                     return -TARGET_EFAULT;
10160                 __put_user(value.uptime, &target_value->uptime);
10161                 __put_user(value.loads[0], &target_value->loads[0]);
10162                 __put_user(value.loads[1], &target_value->loads[1]);
10163                 __put_user(value.loads[2], &target_value->loads[2]);
10164                 __put_user(value.totalram, &target_value->totalram);
10165                 __put_user(value.freeram, &target_value->freeram);
10166                 __put_user(value.sharedram, &target_value->sharedram);
10167                 __put_user(value.bufferram, &target_value->bufferram);
10168                 __put_user(value.totalswap, &target_value->totalswap);
10169                 __put_user(value.freeswap, &target_value->freeswap);
10170                 __put_user(value.procs, &target_value->procs);
10171                 __put_user(value.totalhigh, &target_value->totalhigh);
10172                 __put_user(value.freehigh, &target_value->freehigh);
10173                 __put_user(value.mem_unit, &target_value->mem_unit);
10174                 unlock_user_struct(target_value, arg1, 1);
10175             }
10176         }
10177         return ret;
10178 #ifdef TARGET_NR_ipc
10179     case TARGET_NR_ipc:
10180         return do_ipc(cpu_env, arg1, arg2, arg3, arg4, arg5, arg6);
10181 #endif
10182 #ifdef TARGET_NR_semget
10183     case TARGET_NR_semget:
10184         return get_errno(semget(arg1, arg2, arg3));
10185 #endif
10186 #ifdef TARGET_NR_semop
10187     case TARGET_NR_semop:
10188         return do_semtimedop(arg1, arg2, arg3, 0, false);
10189 #endif
10190 #ifdef TARGET_NR_semtimedop
10191     case TARGET_NR_semtimedop:
10192         return do_semtimedop(arg1, arg2, arg3, arg4, false);
10193 #endif
10194 #ifdef TARGET_NR_semtimedop_time64
10195     case TARGET_NR_semtimedop_time64:
10196         return do_semtimedop(arg1, arg2, arg3, arg4, true);
10197 #endif
10198 #ifdef TARGET_NR_semctl
10199     case TARGET_NR_semctl:
10200         return do_semctl(arg1, arg2, arg3, arg4);
10201 #endif
10202 #ifdef TARGET_NR_msgctl
10203     case TARGET_NR_msgctl:
10204         return do_msgctl(arg1, arg2, arg3);
10205 #endif
10206 #ifdef TARGET_NR_msgget
10207     case TARGET_NR_msgget:
10208         return get_errno(msgget(arg1, arg2));
10209 #endif
10210 #ifdef TARGET_NR_msgrcv
10211     case TARGET_NR_msgrcv:
10212         return do_msgrcv(arg1, arg2, arg3, arg4, arg5);
10213 #endif
10214 #ifdef TARGET_NR_msgsnd
10215     case TARGET_NR_msgsnd:
10216         return do_msgsnd(arg1, arg2, arg3, arg4);
10217 #endif
10218 #ifdef TARGET_NR_shmget
10219     case TARGET_NR_shmget:
10220         return get_errno(shmget(arg1, arg2, arg3));
10221 #endif
10222 #ifdef TARGET_NR_shmctl
10223     case TARGET_NR_shmctl:
10224         return do_shmctl(arg1, arg2, arg3);
10225 #endif
10226 #ifdef TARGET_NR_shmat
10227     case TARGET_NR_shmat:
10228         return do_shmat(cpu_env, arg1, arg2, arg3);
10229 #endif
10230 #ifdef TARGET_NR_shmdt
10231     case TARGET_NR_shmdt:
10232         return do_shmdt(arg1);
10233 #endif
10234     case TARGET_NR_fsync:
10235         return get_errno(fsync(arg1));
10236     case TARGET_NR_clone:
10237         /* Linux manages to have three different orderings for its
10238          * arguments to clone(); the BACKWARDS and BACKWARDS2 defines
10239          * match the kernel's CONFIG_CLONE_* settings.
10240          * Microblaze is further special in that it uses a sixth
10241          * implicit argument to clone for the TLS pointer.
10242          */
10243 #if defined(TARGET_MICROBLAZE)
10244         ret = get_errno(do_fork(cpu_env, arg1, arg2, arg4, arg6, arg5));
10245 #elif defined(TARGET_CLONE_BACKWARDS)
10246         ret = get_errno(do_fork(cpu_env, arg1, arg2, arg3, arg4, arg5));
10247 #elif defined(TARGET_CLONE_BACKWARDS2)
10248         ret = get_errno(do_fork(cpu_env, arg2, arg1, arg3, arg5, arg4));
10249 #else
10250         ret = get_errno(do_fork(cpu_env, arg1, arg2, arg3, arg5, arg4));
10251 #endif
10252         return ret;
10253 #ifdef __NR_exit_group
10254         /* new thread calls */
10255     case TARGET_NR_exit_group:
10256         preexit_cleanup(cpu_env, arg1);
10257         return get_errno(exit_group(arg1));
10258 #endif
10259     case TARGET_NR_setdomainname:
10260         if (!(p = lock_user_string(arg1)))
10261             return -TARGET_EFAULT;
10262         ret = get_errno(setdomainname(p, arg2));
10263         unlock_user(p, arg1, 0);
10264         return ret;
10265     case TARGET_NR_uname:
10266         /* no need to transcode because we use the linux syscall */
10267         {
10268             struct new_utsname * buf;
10269 
10270             if (!lock_user_struct(VERIFY_WRITE, buf, arg1, 0))
10271                 return -TARGET_EFAULT;
10272             ret = get_errno(sys_uname(buf));
10273             if (!is_error(ret)) {
10274                 /* Overwrite the native machine name with whatever is being
10275                    emulated. */
10276                 g_strlcpy(buf->machine, cpu_to_uname_machine(cpu_env),
10277                           sizeof(buf->machine));
10278                 /* Allow the user to override the reported release.  */
10279                 if (qemu_uname_release && *qemu_uname_release) {
10280                     g_strlcpy(buf->release, qemu_uname_release,
10281                               sizeof(buf->release));
10282                 }
10283             }
10284             unlock_user_struct(buf, arg1, 1);
10285         }
10286         return ret;
10287 #ifdef TARGET_I386
10288     case TARGET_NR_modify_ldt:
10289         return do_modify_ldt(cpu_env, arg1, arg2, arg3);
10290 #if !defined(TARGET_X86_64)
10291     case TARGET_NR_vm86:
10292         return do_vm86(cpu_env, arg1, arg2);
10293 #endif
10294 #endif
10295 #if defined(TARGET_NR_adjtimex)
10296     case TARGET_NR_adjtimex:
10297         {
10298             struct timex host_buf;
10299 
10300             if (target_to_host_timex(&host_buf, arg1) != 0) {
10301                 return -TARGET_EFAULT;
10302             }
10303             ret = get_errno(adjtimex(&host_buf));
10304             if (!is_error(ret)) {
10305                 if (host_to_target_timex(arg1, &host_buf) != 0) {
10306                     return -TARGET_EFAULT;
10307                 }
10308             }
10309         }
10310         return ret;
10311 #endif
10312 #if defined(TARGET_NR_clock_adjtime) && defined(CONFIG_CLOCK_ADJTIME)
10313     case TARGET_NR_clock_adjtime:
10314         {
10315             struct timex htx, *phtx = &htx;
10316 
10317             if (target_to_host_timex(phtx, arg2) != 0) {
10318                 return -TARGET_EFAULT;
10319             }
10320             ret = get_errno(clock_adjtime(arg1, phtx));
10321             if (!is_error(ret) && phtx) {
10322                 if (host_to_target_timex(arg2, phtx) != 0) {
10323                     return -TARGET_EFAULT;
10324                 }
10325             }
10326         }
10327         return ret;
10328 #endif
10329 #if defined(TARGET_NR_clock_adjtime64) && defined(CONFIG_CLOCK_ADJTIME)
10330     case TARGET_NR_clock_adjtime64:
10331         {
10332             struct timex htx;
10333 
10334             if (target_to_host_timex64(&htx, arg2) != 0) {
10335                 return -TARGET_EFAULT;
10336             }
10337             ret = get_errno(clock_adjtime(arg1, &htx));
10338             if (!is_error(ret) && host_to_target_timex64(arg2, &htx)) {
10339                     return -TARGET_EFAULT;
10340             }
10341         }
10342         return ret;
10343 #endif
10344     case TARGET_NR_getpgid:
10345         return get_errno(getpgid(arg1));
10346     case TARGET_NR_fchdir:
10347         return get_errno(fchdir(arg1));
10348     case TARGET_NR_personality:
10349         return get_errno(personality(arg1));
10350 #ifdef TARGET_NR__llseek /* Not on alpha */
10351     case TARGET_NR__llseek:
10352         {
10353             int64_t res;
10354 #if !defined(__NR_llseek)
10355             res = lseek(arg1, ((uint64_t)arg2 << 32) | (abi_ulong)arg3, arg5);
10356             if (res == -1) {
10357                 ret = get_errno(res);
10358             } else {
10359                 ret = 0;
10360             }
10361 #else
10362             ret = get_errno(_llseek(arg1, arg2, arg3, &res, arg5));
10363 #endif
10364             if ((ret == 0) && put_user_s64(res, arg4)) {
10365                 return -TARGET_EFAULT;
10366             }
10367         }
10368         return ret;
10369 #endif
10370 #ifdef TARGET_NR_getdents
10371     case TARGET_NR_getdents:
10372 #ifdef EMULATE_GETDENTS_WITH_GETDENTS
10373 #if TARGET_ABI_BITS == 32 && HOST_LONG_BITS == 64
10374         {
10375             struct target_dirent *target_dirp;
10376             struct linux_dirent *dirp;
10377             abi_long count = arg3;
10378 
10379             dirp = g_try_malloc(count);
10380             if (!dirp) {
10381                 return -TARGET_ENOMEM;
10382             }
10383 
10384             ret = get_errno(sys_getdents(arg1, dirp, count));
10385             if (!is_error(ret)) {
10386                 struct linux_dirent *de;
10387 		struct target_dirent *tde;
10388                 int len = ret;
10389                 int reclen, treclen;
10390 		int count1, tnamelen;
10391 
10392 		count1 = 0;
10393                 de = dirp;
10394                 if (!(target_dirp = lock_user(VERIFY_WRITE, arg2, count, 0)))
10395                     return -TARGET_EFAULT;
10396 		tde = target_dirp;
10397                 while (len > 0) {
10398                     reclen = de->d_reclen;
10399                     tnamelen = reclen - offsetof(struct linux_dirent, d_name);
10400                     assert(tnamelen >= 0);
10401                     treclen = tnamelen + offsetof(struct target_dirent, d_name);
10402                     assert(count1 + treclen <= count);
10403                     tde->d_reclen = tswap16(treclen);
10404                     tde->d_ino = tswapal(de->d_ino);
10405                     tde->d_off = tswapal(de->d_off);
10406                     memcpy(tde->d_name, de->d_name, tnamelen);
10407                     de = (struct linux_dirent *)((char *)de + reclen);
10408                     len -= reclen;
10409                     tde = (struct target_dirent *)((char *)tde + treclen);
10410 		    count1 += treclen;
10411                 }
10412 		ret = count1;
10413                 unlock_user(target_dirp, arg2, ret);
10414             }
10415             g_free(dirp);
10416         }
10417 #else
10418         {
10419             struct linux_dirent *dirp;
10420             abi_long count = arg3;
10421 
10422             if (!(dirp = lock_user(VERIFY_WRITE, arg2, count, 0)))
10423                 return -TARGET_EFAULT;
10424             ret = get_errno(sys_getdents(arg1, dirp, count));
10425             if (!is_error(ret)) {
10426                 struct linux_dirent *de;
10427                 int len = ret;
10428                 int reclen;
10429                 de = dirp;
10430                 while (len > 0) {
10431                     reclen = de->d_reclen;
10432                     if (reclen > len)
10433                         break;
10434                     de->d_reclen = tswap16(reclen);
10435                     tswapls(&de->d_ino);
10436                     tswapls(&de->d_off);
10437                     de = (struct linux_dirent *)((char *)de + reclen);
10438                     len -= reclen;
10439                 }
10440             }
10441             unlock_user(dirp, arg2, ret);
10442         }
10443 #endif
10444 #else
10445         /* Implement getdents in terms of getdents64 */
10446         {
10447             struct linux_dirent64 *dirp;
10448             abi_long count = arg3;
10449 
10450             dirp = lock_user(VERIFY_WRITE, arg2, count, 0);
10451             if (!dirp) {
10452                 return -TARGET_EFAULT;
10453             }
10454             ret = get_errno(sys_getdents64(arg1, dirp, count));
10455             if (!is_error(ret)) {
10456                 /* Convert the dirent64 structs to target dirent.  We do this
10457                  * in-place, since we can guarantee that a target_dirent is no
10458                  * larger than a dirent64; however this means we have to be
10459                  * careful to read everything before writing in the new format.
10460                  */
10461                 struct linux_dirent64 *de;
10462                 struct target_dirent *tde;
10463                 int len = ret;
10464                 int tlen = 0;
10465 
10466                 de = dirp;
10467                 tde = (struct target_dirent *)dirp;
10468                 while (len > 0) {
10469                     int namelen, treclen;
10470                     int reclen = de->d_reclen;
10471                     uint64_t ino = de->d_ino;
10472                     int64_t off = de->d_off;
10473                     uint8_t type = de->d_type;
10474 
10475                     namelen = strlen(de->d_name);
10476                     treclen = offsetof(struct target_dirent, d_name)
10477                         + namelen + 2;
10478                     treclen = QEMU_ALIGN_UP(treclen, sizeof(abi_long));
10479 
10480                     memmove(tde->d_name, de->d_name, namelen + 1);
10481                     tde->d_ino = tswapal(ino);
10482                     tde->d_off = tswapal(off);
10483                     tde->d_reclen = tswap16(treclen);
10484                     /* The target_dirent type is in what was formerly a padding
10485                      * byte at the end of the structure:
10486                      */
10487                     *(((char *)tde) + treclen - 1) = type;
10488 
10489                     de = (struct linux_dirent64 *)((char *)de + reclen);
10490                     tde = (struct target_dirent *)((char *)tde + treclen);
10491                     len -= reclen;
10492                     tlen += treclen;
10493                 }
10494                 ret = tlen;
10495             }
10496             unlock_user(dirp, arg2, ret);
10497         }
10498 #endif
10499         return ret;
10500 #endif /* TARGET_NR_getdents */
10501 #if defined(TARGET_NR_getdents64) && defined(__NR_getdents64)
10502     case TARGET_NR_getdents64:
10503         {
10504             struct linux_dirent64 *dirp;
10505             abi_long count = arg3;
10506             if (!(dirp = lock_user(VERIFY_WRITE, arg2, count, 0)))
10507                 return -TARGET_EFAULT;
10508             ret = get_errno(sys_getdents64(arg1, dirp, count));
10509             if (!is_error(ret)) {
10510                 struct linux_dirent64 *de;
10511                 int len = ret;
10512                 int reclen;
10513                 de = dirp;
10514                 while (len > 0) {
10515                     reclen = de->d_reclen;
10516                     if (reclen > len)
10517                         break;
10518                     de->d_reclen = tswap16(reclen);
10519                     tswap64s((uint64_t *)&de->d_ino);
10520                     tswap64s((uint64_t *)&de->d_off);
10521                     de = (struct linux_dirent64 *)((char *)de + reclen);
10522                     len -= reclen;
10523                 }
10524             }
10525             unlock_user(dirp, arg2, ret);
10526         }
10527         return ret;
10528 #endif /* TARGET_NR_getdents64 */
10529 #if defined(TARGET_NR__newselect)
10530     case TARGET_NR__newselect:
10531         return do_select(arg1, arg2, arg3, arg4, arg5);
10532 #endif
10533 #ifdef TARGET_NR_poll
10534     case TARGET_NR_poll:
10535         return do_ppoll(arg1, arg2, arg3, arg4, arg5, false, false);
10536 #endif
10537 #ifdef TARGET_NR_ppoll
10538     case TARGET_NR_ppoll:
10539         return do_ppoll(arg1, arg2, arg3, arg4, arg5, true, false);
10540 #endif
10541 #ifdef TARGET_NR_ppoll_time64
10542     case TARGET_NR_ppoll_time64:
10543         return do_ppoll(arg1, arg2, arg3, arg4, arg5, true, true);
10544 #endif
10545     case TARGET_NR_flock:
10546         /* NOTE: the flock constant seems to be the same for every
10547            Linux platform */
10548         return get_errno(safe_flock(arg1, arg2));
10549     case TARGET_NR_readv:
10550         {
10551             struct iovec *vec = lock_iovec(VERIFY_WRITE, arg2, arg3, 0);
10552             if (vec != NULL) {
10553                 ret = get_errno(safe_readv(arg1, vec, arg3));
10554                 unlock_iovec(vec, arg2, arg3, 1);
10555             } else {
10556                 ret = -host_to_target_errno(errno);
10557             }
10558         }
10559         return ret;
10560     case TARGET_NR_writev:
10561         {
10562             struct iovec *vec = lock_iovec(VERIFY_READ, arg2, arg3, 1);
10563             if (vec != NULL) {
10564                 ret = get_errno(safe_writev(arg1, vec, arg3));
10565                 unlock_iovec(vec, arg2, arg3, 0);
10566             } else {
10567                 ret = -host_to_target_errno(errno);
10568             }
10569         }
10570         return ret;
10571 #if defined(TARGET_NR_preadv)
10572     case TARGET_NR_preadv:
10573         {
10574             struct iovec *vec = lock_iovec(VERIFY_WRITE, arg2, arg3, 0);
10575             if (vec != NULL) {
10576                 unsigned long low, high;
10577 
10578                 target_to_host_low_high(arg4, arg5, &low, &high);
10579                 ret = get_errno(safe_preadv(arg1, vec, arg3, low, high));
10580                 unlock_iovec(vec, arg2, arg3, 1);
10581             } else {
10582                 ret = -host_to_target_errno(errno);
10583            }
10584         }
10585         return ret;
10586 #endif
10587 #if defined(TARGET_NR_pwritev)
10588     case TARGET_NR_pwritev:
10589         {
10590             struct iovec *vec = lock_iovec(VERIFY_READ, arg2, arg3, 1);
10591             if (vec != NULL) {
10592                 unsigned long low, high;
10593 
10594                 target_to_host_low_high(arg4, arg5, &low, &high);
10595                 ret = get_errno(safe_pwritev(arg1, vec, arg3, low, high));
10596                 unlock_iovec(vec, arg2, arg3, 0);
10597             } else {
10598                 ret = -host_to_target_errno(errno);
10599            }
10600         }
10601         return ret;
10602 #endif
10603     case TARGET_NR_getsid:
10604         return get_errno(getsid(arg1));
10605 #if defined(TARGET_NR_fdatasync) /* Not on alpha (osf_datasync ?) */
10606     case TARGET_NR_fdatasync:
10607         return get_errno(fdatasync(arg1));
10608 #endif
10609     case TARGET_NR_sched_getaffinity:
10610         {
10611             unsigned int mask_size;
10612             unsigned long *mask;
10613 
10614             /*
10615              * sched_getaffinity needs multiples of ulong, so need to take
10616              * care of mismatches between target ulong and host ulong sizes.
10617              */
10618             if (arg2 & (sizeof(abi_ulong) - 1)) {
10619                 return -TARGET_EINVAL;
10620             }
10621             mask_size = (arg2 + (sizeof(*mask) - 1)) & ~(sizeof(*mask) - 1);
10622 
10623             mask = alloca(mask_size);
10624             memset(mask, 0, mask_size);
10625             ret = get_errno(sys_sched_getaffinity(arg1, mask_size, mask));
10626 
10627             if (!is_error(ret)) {
10628                 if (ret > arg2) {
10629                     /* More data returned than the caller's buffer will fit.
10630                      * This only happens if sizeof(abi_long) < sizeof(long)
10631                      * and the caller passed us a buffer holding an odd number
10632                      * of abi_longs. If the host kernel is actually using the
10633                      * extra 4 bytes then fail EINVAL; otherwise we can just
10634                      * ignore them and only copy the interesting part.
10635                      */
10636                     int numcpus = sysconf(_SC_NPROCESSORS_CONF);
10637                     if (numcpus > arg2 * 8) {
10638                         return -TARGET_EINVAL;
10639                     }
10640                     ret = arg2;
10641                 }
10642 
10643                 if (host_to_target_cpu_mask(mask, mask_size, arg3, ret)) {
10644                     return -TARGET_EFAULT;
10645                 }
10646             }
10647         }
10648         return ret;
10649     case TARGET_NR_sched_setaffinity:
10650         {
10651             unsigned int mask_size;
10652             unsigned long *mask;
10653 
10654             /*
10655              * sched_setaffinity needs multiples of ulong, so need to take
10656              * care of mismatches between target ulong and host ulong sizes.
10657              */
10658             if (arg2 & (sizeof(abi_ulong) - 1)) {
10659                 return -TARGET_EINVAL;
10660             }
10661             mask_size = (arg2 + (sizeof(*mask) - 1)) & ~(sizeof(*mask) - 1);
10662             mask = alloca(mask_size);
10663 
10664             ret = target_to_host_cpu_mask(mask, mask_size, arg3, arg2);
10665             if (ret) {
10666                 return ret;
10667             }
10668 
10669             return get_errno(sys_sched_setaffinity(arg1, mask_size, mask));
10670         }
10671     case TARGET_NR_getcpu:
10672         {
10673             unsigned cpu, node;
10674             ret = get_errno(sys_getcpu(arg1 ? &cpu : NULL,
10675                                        arg2 ? &node : NULL,
10676                                        NULL));
10677             if (is_error(ret)) {
10678                 return ret;
10679             }
10680             if (arg1 && put_user_u32(cpu, arg1)) {
10681                 return -TARGET_EFAULT;
10682             }
10683             if (arg2 && put_user_u32(node, arg2)) {
10684                 return -TARGET_EFAULT;
10685             }
10686         }
10687         return ret;
10688     case TARGET_NR_sched_setparam:
10689         {
10690             struct sched_param *target_schp;
10691             struct sched_param schp;
10692 
10693             if (arg2 == 0) {
10694                 return -TARGET_EINVAL;
10695             }
10696             if (!lock_user_struct(VERIFY_READ, target_schp, arg2, 1))
10697                 return -TARGET_EFAULT;
10698             schp.sched_priority = tswap32(target_schp->sched_priority);
10699             unlock_user_struct(target_schp, arg2, 0);
10700             return get_errno(sched_setparam(arg1, &schp));
10701         }
10702     case TARGET_NR_sched_getparam:
10703         {
10704             struct sched_param *target_schp;
10705             struct sched_param schp;
10706 
10707             if (arg2 == 0) {
10708                 return -TARGET_EINVAL;
10709             }
10710             ret = get_errno(sched_getparam(arg1, &schp));
10711             if (!is_error(ret)) {
10712                 if (!lock_user_struct(VERIFY_WRITE, target_schp, arg2, 0))
10713                     return -TARGET_EFAULT;
10714                 target_schp->sched_priority = tswap32(schp.sched_priority);
10715                 unlock_user_struct(target_schp, arg2, 1);
10716             }
10717         }
10718         return ret;
10719     case TARGET_NR_sched_setscheduler:
10720         {
10721             struct sched_param *target_schp;
10722             struct sched_param schp;
10723             if (arg3 == 0) {
10724                 return -TARGET_EINVAL;
10725             }
10726             if (!lock_user_struct(VERIFY_READ, target_schp, arg3, 1))
10727                 return -TARGET_EFAULT;
10728             schp.sched_priority = tswap32(target_schp->sched_priority);
10729             unlock_user_struct(target_schp, arg3, 0);
10730             return get_errno(sched_setscheduler(arg1, arg2, &schp));
10731         }
10732     case TARGET_NR_sched_getscheduler:
10733         return get_errno(sched_getscheduler(arg1));
10734     case TARGET_NR_sched_yield:
10735         return get_errno(sched_yield());
10736     case TARGET_NR_sched_get_priority_max:
10737         return get_errno(sched_get_priority_max(arg1));
10738     case TARGET_NR_sched_get_priority_min:
10739         return get_errno(sched_get_priority_min(arg1));
10740 #ifdef TARGET_NR_sched_rr_get_interval
10741     case TARGET_NR_sched_rr_get_interval:
10742         {
10743             struct timespec ts;
10744             ret = get_errno(sched_rr_get_interval(arg1, &ts));
10745             if (!is_error(ret)) {
10746                 ret = host_to_target_timespec(arg2, &ts);
10747             }
10748         }
10749         return ret;
10750 #endif
10751 #ifdef TARGET_NR_sched_rr_get_interval_time64
10752     case TARGET_NR_sched_rr_get_interval_time64:
10753         {
10754             struct timespec ts;
10755             ret = get_errno(sched_rr_get_interval(arg1, &ts));
10756             if (!is_error(ret)) {
10757                 ret = host_to_target_timespec64(arg2, &ts);
10758             }
10759         }
10760         return ret;
10761 #endif
10762 #if defined(TARGET_NR_nanosleep)
10763     case TARGET_NR_nanosleep:
10764         {
10765             struct timespec req, rem;
10766             target_to_host_timespec(&req, arg1);
10767             ret = get_errno(safe_nanosleep(&req, &rem));
10768             if (is_error(ret) && arg2) {
10769                 host_to_target_timespec(arg2, &rem);
10770             }
10771         }
10772         return ret;
10773 #endif
10774     case TARGET_NR_prctl:
10775         switch (arg1) {
10776         case PR_GET_PDEATHSIG:
10777         {
10778             int deathsig;
10779             ret = get_errno(prctl(arg1, &deathsig, arg3, arg4, arg5));
10780             if (!is_error(ret) && arg2
10781                 && put_user_s32(deathsig, arg2)) {
10782                 return -TARGET_EFAULT;
10783             }
10784             return ret;
10785         }
10786 #ifdef PR_GET_NAME
10787         case PR_GET_NAME:
10788         {
10789             void *name = lock_user(VERIFY_WRITE, arg2, 16, 1);
10790             if (!name) {
10791                 return -TARGET_EFAULT;
10792             }
10793             ret = get_errno(prctl(arg1, (unsigned long)name,
10794                                   arg3, arg4, arg5));
10795             unlock_user(name, arg2, 16);
10796             return ret;
10797         }
10798         case PR_SET_NAME:
10799         {
10800             void *name = lock_user(VERIFY_READ, arg2, 16, 1);
10801             if (!name) {
10802                 return -TARGET_EFAULT;
10803             }
10804             ret = get_errno(prctl(arg1, (unsigned long)name,
10805                                   arg3, arg4, arg5));
10806             unlock_user(name, arg2, 0);
10807             return ret;
10808         }
10809 #endif
10810 #ifdef TARGET_MIPS
10811         case TARGET_PR_GET_FP_MODE:
10812         {
10813             CPUMIPSState *env = ((CPUMIPSState *)cpu_env);
10814             ret = 0;
10815             if (env->CP0_Status & (1 << CP0St_FR)) {
10816                 ret |= TARGET_PR_FP_MODE_FR;
10817             }
10818             if (env->CP0_Config5 & (1 << CP0C5_FRE)) {
10819                 ret |= TARGET_PR_FP_MODE_FRE;
10820             }
10821             return ret;
10822         }
10823         case TARGET_PR_SET_FP_MODE:
10824         {
10825             CPUMIPSState *env = ((CPUMIPSState *)cpu_env);
10826             bool old_fr = env->CP0_Status & (1 << CP0St_FR);
10827             bool old_fre = env->CP0_Config5 & (1 << CP0C5_FRE);
10828             bool new_fr = arg2 & TARGET_PR_FP_MODE_FR;
10829             bool new_fre = arg2 & TARGET_PR_FP_MODE_FRE;
10830 
10831             const unsigned int known_bits = TARGET_PR_FP_MODE_FR |
10832                                             TARGET_PR_FP_MODE_FRE;
10833 
10834             /* If nothing to change, return right away, successfully.  */
10835             if (old_fr == new_fr && old_fre == new_fre) {
10836                 return 0;
10837             }
10838             /* Check the value is valid */
10839             if (arg2 & ~known_bits) {
10840                 return -TARGET_EOPNOTSUPP;
10841             }
10842             /* Setting FRE without FR is not supported.  */
10843             if (new_fre && !new_fr) {
10844                 return -TARGET_EOPNOTSUPP;
10845             }
10846             if (new_fr && !(env->active_fpu.fcr0 & (1 << FCR0_F64))) {
10847                 /* FR1 is not supported */
10848                 return -TARGET_EOPNOTSUPP;
10849             }
10850             if (!new_fr && (env->active_fpu.fcr0 & (1 << FCR0_F64))
10851                 && !(env->CP0_Status_rw_bitmask & (1 << CP0St_FR))) {
10852                 /* cannot set FR=0 */
10853                 return -TARGET_EOPNOTSUPP;
10854             }
10855             if (new_fre && !(env->active_fpu.fcr0 & (1 << FCR0_FREP))) {
10856                 /* Cannot set FRE=1 */
10857                 return -TARGET_EOPNOTSUPP;
10858             }
10859 
10860             int i;
10861             fpr_t *fpr = env->active_fpu.fpr;
10862             for (i = 0; i < 32 ; i += 2) {
10863                 if (!old_fr && new_fr) {
10864                     fpr[i].w[!FP_ENDIAN_IDX] = fpr[i + 1].w[FP_ENDIAN_IDX];
10865                 } else if (old_fr && !new_fr) {
10866                     fpr[i + 1].w[FP_ENDIAN_IDX] = fpr[i].w[!FP_ENDIAN_IDX];
10867                 }
10868             }
10869 
10870             if (new_fr) {
10871                 env->CP0_Status |= (1 << CP0St_FR);
10872                 env->hflags |= MIPS_HFLAG_F64;
10873             } else {
10874                 env->CP0_Status &= ~(1 << CP0St_FR);
10875                 env->hflags &= ~MIPS_HFLAG_F64;
10876             }
10877             if (new_fre) {
10878                 env->CP0_Config5 |= (1 << CP0C5_FRE);
10879                 if (env->active_fpu.fcr0 & (1 << FCR0_FREP)) {
10880                     env->hflags |= MIPS_HFLAG_FRE;
10881                 }
10882             } else {
10883                 env->CP0_Config5 &= ~(1 << CP0C5_FRE);
10884                 env->hflags &= ~MIPS_HFLAG_FRE;
10885             }
10886 
10887             return 0;
10888         }
10889 #endif /* MIPS */
10890 #ifdef TARGET_AARCH64
10891         case TARGET_PR_SVE_SET_VL:
10892             /*
10893              * We cannot support either PR_SVE_SET_VL_ONEXEC or
10894              * PR_SVE_VL_INHERIT.  Note the kernel definition
10895              * of sve_vl_valid allows for VQ=512, i.e. VL=8192,
10896              * even though the current architectural maximum is VQ=16.
10897              */
10898             ret = -TARGET_EINVAL;
10899             if (cpu_isar_feature(aa64_sve, env_archcpu(cpu_env))
10900                 && arg2 >= 0 && arg2 <= 512 * 16 && !(arg2 & 15)) {
10901                 CPUARMState *env = cpu_env;
10902                 ARMCPU *cpu = env_archcpu(env);
10903                 uint32_t vq, old_vq;
10904 
10905                 old_vq = (env->vfp.zcr_el[1] & 0xf) + 1;
10906                 vq = MAX(arg2 / 16, 1);
10907                 vq = MIN(vq, cpu->sve_max_vq);
10908 
10909                 if (vq < old_vq) {
10910                     aarch64_sve_narrow_vq(env, vq);
10911                 }
10912                 env->vfp.zcr_el[1] = vq - 1;
10913                 arm_rebuild_hflags(env);
10914                 ret = vq * 16;
10915             }
10916             return ret;
10917         case TARGET_PR_SVE_GET_VL:
10918             ret = -TARGET_EINVAL;
10919             {
10920                 ARMCPU *cpu = env_archcpu(cpu_env);
10921                 if (cpu_isar_feature(aa64_sve, cpu)) {
10922                     ret = ((cpu->env.vfp.zcr_el[1] & 0xf) + 1) * 16;
10923                 }
10924             }
10925             return ret;
10926         case TARGET_PR_PAC_RESET_KEYS:
10927             {
10928                 CPUARMState *env = cpu_env;
10929                 ARMCPU *cpu = env_archcpu(env);
10930 
10931                 if (arg3 || arg4 || arg5) {
10932                     return -TARGET_EINVAL;
10933                 }
10934                 if (cpu_isar_feature(aa64_pauth, cpu)) {
10935                     int all = (TARGET_PR_PAC_APIAKEY | TARGET_PR_PAC_APIBKEY |
10936                                TARGET_PR_PAC_APDAKEY | TARGET_PR_PAC_APDBKEY |
10937                                TARGET_PR_PAC_APGAKEY);
10938                     int ret = 0;
10939                     Error *err = NULL;
10940 
10941                     if (arg2 == 0) {
10942                         arg2 = all;
10943                     } else if (arg2 & ~all) {
10944                         return -TARGET_EINVAL;
10945                     }
10946                     if (arg2 & TARGET_PR_PAC_APIAKEY) {
10947                         ret |= qemu_guest_getrandom(&env->keys.apia,
10948                                                     sizeof(ARMPACKey), &err);
10949                     }
10950                     if (arg2 & TARGET_PR_PAC_APIBKEY) {
10951                         ret |= qemu_guest_getrandom(&env->keys.apib,
10952                                                     sizeof(ARMPACKey), &err);
10953                     }
10954                     if (arg2 & TARGET_PR_PAC_APDAKEY) {
10955                         ret |= qemu_guest_getrandom(&env->keys.apda,
10956                                                     sizeof(ARMPACKey), &err);
10957                     }
10958                     if (arg2 & TARGET_PR_PAC_APDBKEY) {
10959                         ret |= qemu_guest_getrandom(&env->keys.apdb,
10960                                                     sizeof(ARMPACKey), &err);
10961                     }
10962                     if (arg2 & TARGET_PR_PAC_APGAKEY) {
10963                         ret |= qemu_guest_getrandom(&env->keys.apga,
10964                                                     sizeof(ARMPACKey), &err);
10965                     }
10966                     if (ret != 0) {
10967                         /*
10968                          * Some unknown failure in the crypto.  The best
10969                          * we can do is log it and fail the syscall.
10970                          * The real syscall cannot fail this way.
10971                          */
10972                         qemu_log_mask(LOG_UNIMP,
10973                                       "PR_PAC_RESET_KEYS: Crypto failure: %s",
10974                                       error_get_pretty(err));
10975                         error_free(err);
10976                         return -TARGET_EIO;
10977                     }
10978                     return 0;
10979                 }
10980             }
10981             return -TARGET_EINVAL;
10982 #endif /* AARCH64 */
10983         case PR_GET_SECCOMP:
10984         case PR_SET_SECCOMP:
10985             /* Disable seccomp to prevent the target disabling syscalls we
10986              * need. */
10987             return -TARGET_EINVAL;
10988         default:
10989             /* Most prctl options have no pointer arguments */
10990             return get_errno(prctl(arg1, arg2, arg3, arg4, arg5));
10991         }
10992         break;
10993 #ifdef TARGET_NR_arch_prctl
10994     case TARGET_NR_arch_prctl:
10995         return do_arch_prctl(cpu_env, arg1, arg2);
10996 #endif
10997 #ifdef TARGET_NR_pread64
10998     case TARGET_NR_pread64:
10999         if (regpairs_aligned(cpu_env, num)) {
11000             arg4 = arg5;
11001             arg5 = arg6;
11002         }
11003         if (arg2 == 0 && arg3 == 0) {
11004             /* Special-case NULL buffer and zero length, which should succeed */
11005             p = 0;
11006         } else {
11007             p = lock_user(VERIFY_WRITE, arg2, arg3, 0);
11008             if (!p) {
11009                 return -TARGET_EFAULT;
11010             }
11011         }
11012         ret = get_errno(pread64(arg1, p, arg3, target_offset64(arg4, arg5)));
11013         unlock_user(p, arg2, ret);
11014         return ret;
11015     case TARGET_NR_pwrite64:
11016         if (regpairs_aligned(cpu_env, num)) {
11017             arg4 = arg5;
11018             arg5 = arg6;
11019         }
11020         if (arg2 == 0 && arg3 == 0) {
11021             /* Special-case NULL buffer and zero length, which should succeed */
11022             p = 0;
11023         } else {
11024             p = lock_user(VERIFY_READ, arg2, arg3, 1);
11025             if (!p) {
11026                 return -TARGET_EFAULT;
11027             }
11028         }
11029         ret = get_errno(pwrite64(arg1, p, arg3, target_offset64(arg4, arg5)));
11030         unlock_user(p, arg2, 0);
11031         return ret;
11032 #endif
11033     case TARGET_NR_getcwd:
11034         if (!(p = lock_user(VERIFY_WRITE, arg1, arg2, 0)))
11035             return -TARGET_EFAULT;
11036         ret = get_errno(sys_getcwd1(p, arg2));
11037         unlock_user(p, arg1, ret);
11038         return ret;
11039     case TARGET_NR_capget:
11040     case TARGET_NR_capset:
11041     {
11042         struct target_user_cap_header *target_header;
11043         struct target_user_cap_data *target_data = NULL;
11044         struct __user_cap_header_struct header;
11045         struct __user_cap_data_struct data[2];
11046         struct __user_cap_data_struct *dataptr = NULL;
11047         int i, target_datalen;
11048         int data_items = 1;
11049 
11050         if (!lock_user_struct(VERIFY_WRITE, target_header, arg1, 1)) {
11051             return -TARGET_EFAULT;
11052         }
11053         header.version = tswap32(target_header->version);
11054         header.pid = tswap32(target_header->pid);
11055 
11056         if (header.version != _LINUX_CAPABILITY_VERSION) {
11057             /* Version 2 and up takes pointer to two user_data structs */
11058             data_items = 2;
11059         }
11060 
11061         target_datalen = sizeof(*target_data) * data_items;
11062 
11063         if (arg2) {
11064             if (num == TARGET_NR_capget) {
11065                 target_data = lock_user(VERIFY_WRITE, arg2, target_datalen, 0);
11066             } else {
11067                 target_data = lock_user(VERIFY_READ, arg2, target_datalen, 1);
11068             }
11069             if (!target_data) {
11070                 unlock_user_struct(target_header, arg1, 0);
11071                 return -TARGET_EFAULT;
11072             }
11073 
11074             if (num == TARGET_NR_capset) {
11075                 for (i = 0; i < data_items; i++) {
11076                     data[i].effective = tswap32(target_data[i].effective);
11077                     data[i].permitted = tswap32(target_data[i].permitted);
11078                     data[i].inheritable = tswap32(target_data[i].inheritable);
11079                 }
11080             }
11081 
11082             dataptr = data;
11083         }
11084 
11085         if (num == TARGET_NR_capget) {
11086             ret = get_errno(capget(&header, dataptr));
11087         } else {
11088             ret = get_errno(capset(&header, dataptr));
11089         }
11090 
11091         /* The kernel always updates version for both capget and capset */
11092         target_header->version = tswap32(header.version);
11093         unlock_user_struct(target_header, arg1, 1);
11094 
11095         if (arg2) {
11096             if (num == TARGET_NR_capget) {
11097                 for (i = 0; i < data_items; i++) {
11098                     target_data[i].effective = tswap32(data[i].effective);
11099                     target_data[i].permitted = tswap32(data[i].permitted);
11100                     target_data[i].inheritable = tswap32(data[i].inheritable);
11101                 }
11102                 unlock_user(target_data, arg2, target_datalen);
11103             } else {
11104                 unlock_user(target_data, arg2, 0);
11105             }
11106         }
11107         return ret;
11108     }
11109     case TARGET_NR_sigaltstack:
11110         return do_sigaltstack(arg1, arg2,
11111                               get_sp_from_cpustate((CPUArchState *)cpu_env));
11112 
11113 #ifdef CONFIG_SENDFILE
11114 #ifdef TARGET_NR_sendfile
11115     case TARGET_NR_sendfile:
11116     {
11117         off_t *offp = NULL;
11118         off_t off;
11119         if (arg3) {
11120             ret = get_user_sal(off, arg3);
11121             if (is_error(ret)) {
11122                 return ret;
11123             }
11124             offp = &off;
11125         }
11126         ret = get_errno(sendfile(arg1, arg2, offp, arg4));
11127         if (!is_error(ret) && arg3) {
11128             abi_long ret2 = put_user_sal(off, arg3);
11129             if (is_error(ret2)) {
11130                 ret = ret2;
11131             }
11132         }
11133         return ret;
11134     }
11135 #endif
11136 #ifdef TARGET_NR_sendfile64
11137     case TARGET_NR_sendfile64:
11138     {
11139         off_t *offp = NULL;
11140         off_t off;
11141         if (arg3) {
11142             ret = get_user_s64(off, arg3);
11143             if (is_error(ret)) {
11144                 return ret;
11145             }
11146             offp = &off;
11147         }
11148         ret = get_errno(sendfile(arg1, arg2, offp, arg4));
11149         if (!is_error(ret) && arg3) {
11150             abi_long ret2 = put_user_s64(off, arg3);
11151             if (is_error(ret2)) {
11152                 ret = ret2;
11153             }
11154         }
11155         return ret;
11156     }
11157 #endif
11158 #endif
11159 #ifdef TARGET_NR_vfork
11160     case TARGET_NR_vfork:
11161         return get_errno(do_fork(cpu_env,
11162                          CLONE_VFORK | CLONE_VM | TARGET_SIGCHLD,
11163                          0, 0, 0, 0));
11164 #endif
11165 #ifdef TARGET_NR_ugetrlimit
11166     case TARGET_NR_ugetrlimit:
11167     {
11168 	struct rlimit rlim;
11169 	int resource = target_to_host_resource(arg1);
11170 	ret = get_errno(getrlimit(resource, &rlim));
11171 	if (!is_error(ret)) {
11172 	    struct target_rlimit *target_rlim;
11173             if (!lock_user_struct(VERIFY_WRITE, target_rlim, arg2, 0))
11174                 return -TARGET_EFAULT;
11175 	    target_rlim->rlim_cur = host_to_target_rlim(rlim.rlim_cur);
11176 	    target_rlim->rlim_max = host_to_target_rlim(rlim.rlim_max);
11177             unlock_user_struct(target_rlim, arg2, 1);
11178 	}
11179         return ret;
11180     }
11181 #endif
11182 #ifdef TARGET_NR_truncate64
11183     case TARGET_NR_truncate64:
11184         if (!(p = lock_user_string(arg1)))
11185             return -TARGET_EFAULT;
11186 	ret = target_truncate64(cpu_env, p, arg2, arg3, arg4);
11187         unlock_user(p, arg1, 0);
11188         return ret;
11189 #endif
11190 #ifdef TARGET_NR_ftruncate64
11191     case TARGET_NR_ftruncate64:
11192         return target_ftruncate64(cpu_env, arg1, arg2, arg3, arg4);
11193 #endif
11194 #ifdef TARGET_NR_stat64
11195     case TARGET_NR_stat64:
11196         if (!(p = lock_user_string(arg1))) {
11197             return -TARGET_EFAULT;
11198         }
11199         ret = get_errno(stat(path(p), &st));
11200         unlock_user(p, arg1, 0);
11201         if (!is_error(ret))
11202             ret = host_to_target_stat64(cpu_env, arg2, &st);
11203         return ret;
11204 #endif
11205 #ifdef TARGET_NR_lstat64
11206     case TARGET_NR_lstat64:
11207         if (!(p = lock_user_string(arg1))) {
11208             return -TARGET_EFAULT;
11209         }
11210         ret = get_errno(lstat(path(p), &st));
11211         unlock_user(p, arg1, 0);
11212         if (!is_error(ret))
11213             ret = host_to_target_stat64(cpu_env, arg2, &st);
11214         return ret;
11215 #endif
11216 #ifdef TARGET_NR_fstat64
11217     case TARGET_NR_fstat64:
11218         ret = get_errno(fstat(arg1, &st));
11219         if (!is_error(ret))
11220             ret = host_to_target_stat64(cpu_env, arg2, &st);
11221         return ret;
11222 #endif
11223 #if (defined(TARGET_NR_fstatat64) || defined(TARGET_NR_newfstatat))
11224 #ifdef TARGET_NR_fstatat64
11225     case TARGET_NR_fstatat64:
11226 #endif
11227 #ifdef TARGET_NR_newfstatat
11228     case TARGET_NR_newfstatat:
11229 #endif
11230         if (!(p = lock_user_string(arg2))) {
11231             return -TARGET_EFAULT;
11232         }
11233         ret = get_errno(fstatat(arg1, path(p), &st, arg4));
11234         unlock_user(p, arg2, 0);
11235         if (!is_error(ret))
11236             ret = host_to_target_stat64(cpu_env, arg3, &st);
11237         return ret;
11238 #endif
11239 #if defined(TARGET_NR_statx)
11240     case TARGET_NR_statx:
11241         {
11242             struct target_statx *target_stx;
11243             int dirfd = arg1;
11244             int flags = arg3;
11245 
11246             p = lock_user_string(arg2);
11247             if (p == NULL) {
11248                 return -TARGET_EFAULT;
11249             }
11250 #if defined(__NR_statx)
11251             {
11252                 /*
11253                  * It is assumed that struct statx is architecture independent.
11254                  */
11255                 struct target_statx host_stx;
11256                 int mask = arg4;
11257 
11258                 ret = get_errno(sys_statx(dirfd, p, flags, mask, &host_stx));
11259                 if (!is_error(ret)) {
11260                     if (host_to_target_statx(&host_stx, arg5) != 0) {
11261                         unlock_user(p, arg2, 0);
11262                         return -TARGET_EFAULT;
11263                     }
11264                 }
11265 
11266                 if (ret != -TARGET_ENOSYS) {
11267                     unlock_user(p, arg2, 0);
11268                     return ret;
11269                 }
11270             }
11271 #endif
11272             ret = get_errno(fstatat(dirfd, path(p), &st, flags));
11273             unlock_user(p, arg2, 0);
11274 
11275             if (!is_error(ret)) {
11276                 if (!lock_user_struct(VERIFY_WRITE, target_stx, arg5, 0)) {
11277                     return -TARGET_EFAULT;
11278                 }
11279                 memset(target_stx, 0, sizeof(*target_stx));
11280                 __put_user(major(st.st_dev), &target_stx->stx_dev_major);
11281                 __put_user(minor(st.st_dev), &target_stx->stx_dev_minor);
11282                 __put_user(st.st_ino, &target_stx->stx_ino);
11283                 __put_user(st.st_mode, &target_stx->stx_mode);
11284                 __put_user(st.st_uid, &target_stx->stx_uid);
11285                 __put_user(st.st_gid, &target_stx->stx_gid);
11286                 __put_user(st.st_nlink, &target_stx->stx_nlink);
11287                 __put_user(major(st.st_rdev), &target_stx->stx_rdev_major);
11288                 __put_user(minor(st.st_rdev), &target_stx->stx_rdev_minor);
11289                 __put_user(st.st_size, &target_stx->stx_size);
11290                 __put_user(st.st_blksize, &target_stx->stx_blksize);
11291                 __put_user(st.st_blocks, &target_stx->stx_blocks);
11292                 __put_user(st.st_atime, &target_stx->stx_atime.tv_sec);
11293                 __put_user(st.st_mtime, &target_stx->stx_mtime.tv_sec);
11294                 __put_user(st.st_ctime, &target_stx->stx_ctime.tv_sec);
11295                 unlock_user_struct(target_stx, arg5, 1);
11296             }
11297         }
11298         return ret;
11299 #endif
11300 #ifdef TARGET_NR_lchown
11301     case TARGET_NR_lchown:
11302         if (!(p = lock_user_string(arg1)))
11303             return -TARGET_EFAULT;
11304         ret = get_errno(lchown(p, low2highuid(arg2), low2highgid(arg3)));
11305         unlock_user(p, arg1, 0);
11306         return ret;
11307 #endif
11308 #ifdef TARGET_NR_getuid
11309     case TARGET_NR_getuid:
11310         return get_errno(high2lowuid(getuid()));
11311 #endif
11312 #ifdef TARGET_NR_getgid
11313     case TARGET_NR_getgid:
11314         return get_errno(high2lowgid(getgid()));
11315 #endif
11316 #ifdef TARGET_NR_geteuid
11317     case TARGET_NR_geteuid:
11318         return get_errno(high2lowuid(geteuid()));
11319 #endif
11320 #ifdef TARGET_NR_getegid
11321     case TARGET_NR_getegid:
11322         return get_errno(high2lowgid(getegid()));
11323 #endif
11324     case TARGET_NR_setreuid:
11325         return get_errno(setreuid(low2highuid(arg1), low2highuid(arg2)));
11326     case TARGET_NR_setregid:
11327         return get_errno(setregid(low2highgid(arg1), low2highgid(arg2)));
11328     case TARGET_NR_getgroups:
11329         {
11330             int gidsetsize = arg1;
11331             target_id *target_grouplist;
11332             gid_t *grouplist;
11333             int i;
11334 
11335             grouplist = alloca(gidsetsize * sizeof(gid_t));
11336             ret = get_errno(getgroups(gidsetsize, grouplist));
11337             if (gidsetsize == 0)
11338                 return ret;
11339             if (!is_error(ret)) {
11340                 target_grouplist = lock_user(VERIFY_WRITE, arg2, gidsetsize * sizeof(target_id), 0);
11341                 if (!target_grouplist)
11342                     return -TARGET_EFAULT;
11343                 for(i = 0;i < ret; i++)
11344                     target_grouplist[i] = tswapid(high2lowgid(grouplist[i]));
11345                 unlock_user(target_grouplist, arg2, gidsetsize * sizeof(target_id));
11346             }
11347         }
11348         return ret;
11349     case TARGET_NR_setgroups:
11350         {
11351             int gidsetsize = arg1;
11352             target_id *target_grouplist;
11353             gid_t *grouplist = NULL;
11354             int i;
11355             if (gidsetsize) {
11356                 grouplist = alloca(gidsetsize * sizeof(gid_t));
11357                 target_grouplist = lock_user(VERIFY_READ, arg2, gidsetsize * sizeof(target_id), 1);
11358                 if (!target_grouplist) {
11359                     return -TARGET_EFAULT;
11360                 }
11361                 for (i = 0; i < gidsetsize; i++) {
11362                     grouplist[i] = low2highgid(tswapid(target_grouplist[i]));
11363                 }
11364                 unlock_user(target_grouplist, arg2, 0);
11365             }
11366             return get_errno(setgroups(gidsetsize, grouplist));
11367         }
11368     case TARGET_NR_fchown:
11369         return get_errno(fchown(arg1, low2highuid(arg2), low2highgid(arg3)));
11370 #if defined(TARGET_NR_fchownat)
11371     case TARGET_NR_fchownat:
11372         if (!(p = lock_user_string(arg2)))
11373             return -TARGET_EFAULT;
11374         ret = get_errno(fchownat(arg1, p, low2highuid(arg3),
11375                                  low2highgid(arg4), arg5));
11376         unlock_user(p, arg2, 0);
11377         return ret;
11378 #endif
11379 #ifdef TARGET_NR_setresuid
11380     case TARGET_NR_setresuid:
11381         return get_errno(sys_setresuid(low2highuid(arg1),
11382                                        low2highuid(arg2),
11383                                        low2highuid(arg3)));
11384 #endif
11385 #ifdef TARGET_NR_getresuid
11386     case TARGET_NR_getresuid:
11387         {
11388             uid_t ruid, euid, suid;
11389             ret = get_errno(getresuid(&ruid, &euid, &suid));
11390             if (!is_error(ret)) {
11391                 if (put_user_id(high2lowuid(ruid), arg1)
11392                     || put_user_id(high2lowuid(euid), arg2)
11393                     || put_user_id(high2lowuid(suid), arg3))
11394                     return -TARGET_EFAULT;
11395             }
11396         }
11397         return ret;
11398 #endif
11399 #ifdef TARGET_NR_getresgid
11400     case TARGET_NR_setresgid:
11401         return get_errno(sys_setresgid(low2highgid(arg1),
11402                                        low2highgid(arg2),
11403                                        low2highgid(arg3)));
11404 #endif
11405 #ifdef TARGET_NR_getresgid
11406     case TARGET_NR_getresgid:
11407         {
11408             gid_t rgid, egid, sgid;
11409             ret = get_errno(getresgid(&rgid, &egid, &sgid));
11410             if (!is_error(ret)) {
11411                 if (put_user_id(high2lowgid(rgid), arg1)
11412                     || put_user_id(high2lowgid(egid), arg2)
11413                     || put_user_id(high2lowgid(sgid), arg3))
11414                     return -TARGET_EFAULT;
11415             }
11416         }
11417         return ret;
11418 #endif
11419 #ifdef TARGET_NR_chown
11420     case TARGET_NR_chown:
11421         if (!(p = lock_user_string(arg1)))
11422             return -TARGET_EFAULT;
11423         ret = get_errno(chown(p, low2highuid(arg2), low2highgid(arg3)));
11424         unlock_user(p, arg1, 0);
11425         return ret;
11426 #endif
11427     case TARGET_NR_setuid:
11428         return get_errno(sys_setuid(low2highuid(arg1)));
11429     case TARGET_NR_setgid:
11430         return get_errno(sys_setgid(low2highgid(arg1)));
11431     case TARGET_NR_setfsuid:
11432         return get_errno(setfsuid(arg1));
11433     case TARGET_NR_setfsgid:
11434         return get_errno(setfsgid(arg1));
11435 
11436 #ifdef TARGET_NR_lchown32
11437     case TARGET_NR_lchown32:
11438         if (!(p = lock_user_string(arg1)))
11439             return -TARGET_EFAULT;
11440         ret = get_errno(lchown(p, arg2, arg3));
11441         unlock_user(p, arg1, 0);
11442         return ret;
11443 #endif
11444 #ifdef TARGET_NR_getuid32
11445     case TARGET_NR_getuid32:
11446         return get_errno(getuid());
11447 #endif
11448 
11449 #if defined(TARGET_NR_getxuid) && defined(TARGET_ALPHA)
11450    /* Alpha specific */
11451     case TARGET_NR_getxuid:
11452          {
11453             uid_t euid;
11454             euid=geteuid();
11455             ((CPUAlphaState *)cpu_env)->ir[IR_A4]=euid;
11456          }
11457         return get_errno(getuid());
11458 #endif
11459 #if defined(TARGET_NR_getxgid) && defined(TARGET_ALPHA)
11460    /* Alpha specific */
11461     case TARGET_NR_getxgid:
11462          {
11463             uid_t egid;
11464             egid=getegid();
11465             ((CPUAlphaState *)cpu_env)->ir[IR_A4]=egid;
11466          }
11467         return get_errno(getgid());
11468 #endif
11469 #if defined(TARGET_NR_osf_getsysinfo) && defined(TARGET_ALPHA)
11470     /* Alpha specific */
11471     case TARGET_NR_osf_getsysinfo:
11472         ret = -TARGET_EOPNOTSUPP;
11473         switch (arg1) {
11474           case TARGET_GSI_IEEE_FP_CONTROL:
11475             {
11476                 uint64_t fpcr = cpu_alpha_load_fpcr(cpu_env);
11477                 uint64_t swcr = ((CPUAlphaState *)cpu_env)->swcr;
11478 
11479                 swcr &= ~SWCR_STATUS_MASK;
11480                 swcr |= (fpcr >> 35) & SWCR_STATUS_MASK;
11481 
11482                 if (put_user_u64 (swcr, arg2))
11483                         return -TARGET_EFAULT;
11484                 ret = 0;
11485             }
11486             break;
11487 
11488           /* case GSI_IEEE_STATE_AT_SIGNAL:
11489              -- Not implemented in linux kernel.
11490              case GSI_UACPROC:
11491              -- Retrieves current unaligned access state; not much used.
11492              case GSI_PROC_TYPE:
11493              -- Retrieves implver information; surely not used.
11494              case GSI_GET_HWRPB:
11495              -- Grabs a copy of the HWRPB; surely not used.
11496           */
11497         }
11498         return ret;
11499 #endif
11500 #if defined(TARGET_NR_osf_setsysinfo) && defined(TARGET_ALPHA)
11501     /* Alpha specific */
11502     case TARGET_NR_osf_setsysinfo:
11503         ret = -TARGET_EOPNOTSUPP;
11504         switch (arg1) {
11505           case TARGET_SSI_IEEE_FP_CONTROL:
11506             {
11507                 uint64_t swcr, fpcr;
11508 
11509                 if (get_user_u64 (swcr, arg2)) {
11510                     return -TARGET_EFAULT;
11511                 }
11512 
11513                 /*
11514                  * The kernel calls swcr_update_status to update the
11515                  * status bits from the fpcr at every point that it
11516                  * could be queried.  Therefore, we store the status
11517                  * bits only in FPCR.
11518                  */
11519                 ((CPUAlphaState *)cpu_env)->swcr
11520                     = swcr & (SWCR_TRAP_ENABLE_MASK | SWCR_MAP_MASK);
11521 
11522                 fpcr = cpu_alpha_load_fpcr(cpu_env);
11523                 fpcr &= ((uint64_t)FPCR_DYN_MASK << 32);
11524                 fpcr |= alpha_ieee_swcr_to_fpcr(swcr);
11525                 cpu_alpha_store_fpcr(cpu_env, fpcr);
11526                 ret = 0;
11527             }
11528             break;
11529 
11530           case TARGET_SSI_IEEE_RAISE_EXCEPTION:
11531             {
11532                 uint64_t exc, fpcr, fex;
11533 
11534                 if (get_user_u64(exc, arg2)) {
11535                     return -TARGET_EFAULT;
11536                 }
11537                 exc &= SWCR_STATUS_MASK;
11538                 fpcr = cpu_alpha_load_fpcr(cpu_env);
11539 
11540                 /* Old exceptions are not signaled.  */
11541                 fex = alpha_ieee_fpcr_to_swcr(fpcr);
11542                 fex = exc & ~fex;
11543                 fex >>= SWCR_STATUS_TO_EXCSUM_SHIFT;
11544                 fex &= ((CPUArchState *)cpu_env)->swcr;
11545 
11546                 /* Update the hardware fpcr.  */
11547                 fpcr |= alpha_ieee_swcr_to_fpcr(exc);
11548                 cpu_alpha_store_fpcr(cpu_env, fpcr);
11549 
11550                 if (fex) {
11551                     int si_code = TARGET_FPE_FLTUNK;
11552                     target_siginfo_t info;
11553 
11554                     if (fex & SWCR_TRAP_ENABLE_DNO) {
11555                         si_code = TARGET_FPE_FLTUND;
11556                     }
11557                     if (fex & SWCR_TRAP_ENABLE_INE) {
11558                         si_code = TARGET_FPE_FLTRES;
11559                     }
11560                     if (fex & SWCR_TRAP_ENABLE_UNF) {
11561                         si_code = TARGET_FPE_FLTUND;
11562                     }
11563                     if (fex & SWCR_TRAP_ENABLE_OVF) {
11564                         si_code = TARGET_FPE_FLTOVF;
11565                     }
11566                     if (fex & SWCR_TRAP_ENABLE_DZE) {
11567                         si_code = TARGET_FPE_FLTDIV;
11568                     }
11569                     if (fex & SWCR_TRAP_ENABLE_INV) {
11570                         si_code = TARGET_FPE_FLTINV;
11571                     }
11572 
11573                     info.si_signo = SIGFPE;
11574                     info.si_errno = 0;
11575                     info.si_code = si_code;
11576                     info._sifields._sigfault._addr
11577                         = ((CPUArchState *)cpu_env)->pc;
11578                     queue_signal((CPUArchState *)cpu_env, info.si_signo,
11579                                  QEMU_SI_FAULT, &info);
11580                 }
11581                 ret = 0;
11582             }
11583             break;
11584 
11585           /* case SSI_NVPAIRS:
11586              -- Used with SSIN_UACPROC to enable unaligned accesses.
11587              case SSI_IEEE_STATE_AT_SIGNAL:
11588              case SSI_IEEE_IGNORE_STATE_AT_SIGNAL:
11589              -- Not implemented in linux kernel
11590           */
11591         }
11592         return ret;
11593 #endif
11594 #ifdef TARGET_NR_osf_sigprocmask
11595     /* Alpha specific.  */
11596     case TARGET_NR_osf_sigprocmask:
11597         {
11598             abi_ulong mask;
11599             int how;
11600             sigset_t set, oldset;
11601 
11602             switch(arg1) {
11603             case TARGET_SIG_BLOCK:
11604                 how = SIG_BLOCK;
11605                 break;
11606             case TARGET_SIG_UNBLOCK:
11607                 how = SIG_UNBLOCK;
11608                 break;
11609             case TARGET_SIG_SETMASK:
11610                 how = SIG_SETMASK;
11611                 break;
11612             default:
11613                 return -TARGET_EINVAL;
11614             }
11615             mask = arg2;
11616             target_to_host_old_sigset(&set, &mask);
11617             ret = do_sigprocmask(how, &set, &oldset);
11618             if (!ret) {
11619                 host_to_target_old_sigset(&mask, &oldset);
11620                 ret = mask;
11621             }
11622         }
11623         return ret;
11624 #endif
11625 
11626 #ifdef TARGET_NR_getgid32
11627     case TARGET_NR_getgid32:
11628         return get_errno(getgid());
11629 #endif
11630 #ifdef TARGET_NR_geteuid32
11631     case TARGET_NR_geteuid32:
11632         return get_errno(geteuid());
11633 #endif
11634 #ifdef TARGET_NR_getegid32
11635     case TARGET_NR_getegid32:
11636         return get_errno(getegid());
11637 #endif
11638 #ifdef TARGET_NR_setreuid32
11639     case TARGET_NR_setreuid32:
11640         return get_errno(setreuid(arg1, arg2));
11641 #endif
11642 #ifdef TARGET_NR_setregid32
11643     case TARGET_NR_setregid32:
11644         return get_errno(setregid(arg1, arg2));
11645 #endif
11646 #ifdef TARGET_NR_getgroups32
11647     case TARGET_NR_getgroups32:
11648         {
11649             int gidsetsize = arg1;
11650             uint32_t *target_grouplist;
11651             gid_t *grouplist;
11652             int i;
11653 
11654             grouplist = alloca(gidsetsize * sizeof(gid_t));
11655             ret = get_errno(getgroups(gidsetsize, grouplist));
11656             if (gidsetsize == 0)
11657                 return ret;
11658             if (!is_error(ret)) {
11659                 target_grouplist = lock_user(VERIFY_WRITE, arg2, gidsetsize * 4, 0);
11660                 if (!target_grouplist) {
11661                     return -TARGET_EFAULT;
11662                 }
11663                 for(i = 0;i < ret; i++)
11664                     target_grouplist[i] = tswap32(grouplist[i]);
11665                 unlock_user(target_grouplist, arg2, gidsetsize * 4);
11666             }
11667         }
11668         return ret;
11669 #endif
11670 #ifdef TARGET_NR_setgroups32
11671     case TARGET_NR_setgroups32:
11672         {
11673             int gidsetsize = arg1;
11674             uint32_t *target_grouplist;
11675             gid_t *grouplist;
11676             int i;
11677 
11678             grouplist = alloca(gidsetsize * sizeof(gid_t));
11679             target_grouplist = lock_user(VERIFY_READ, arg2, gidsetsize * 4, 1);
11680             if (!target_grouplist) {
11681                 return -TARGET_EFAULT;
11682             }
11683             for(i = 0;i < gidsetsize; i++)
11684                 grouplist[i] = tswap32(target_grouplist[i]);
11685             unlock_user(target_grouplist, arg2, 0);
11686             return get_errno(setgroups(gidsetsize, grouplist));
11687         }
11688 #endif
11689 #ifdef TARGET_NR_fchown32
11690     case TARGET_NR_fchown32:
11691         return get_errno(fchown(arg1, arg2, arg3));
11692 #endif
11693 #ifdef TARGET_NR_setresuid32
11694     case TARGET_NR_setresuid32:
11695         return get_errno(sys_setresuid(arg1, arg2, arg3));
11696 #endif
11697 #ifdef TARGET_NR_getresuid32
11698     case TARGET_NR_getresuid32:
11699         {
11700             uid_t ruid, euid, suid;
11701             ret = get_errno(getresuid(&ruid, &euid, &suid));
11702             if (!is_error(ret)) {
11703                 if (put_user_u32(ruid, arg1)
11704                     || put_user_u32(euid, arg2)
11705                     || put_user_u32(suid, arg3))
11706                     return -TARGET_EFAULT;
11707             }
11708         }
11709         return ret;
11710 #endif
11711 #ifdef TARGET_NR_setresgid32
11712     case TARGET_NR_setresgid32:
11713         return get_errno(sys_setresgid(arg1, arg2, arg3));
11714 #endif
11715 #ifdef TARGET_NR_getresgid32
11716     case TARGET_NR_getresgid32:
11717         {
11718             gid_t rgid, egid, sgid;
11719             ret = get_errno(getresgid(&rgid, &egid, &sgid));
11720             if (!is_error(ret)) {
11721                 if (put_user_u32(rgid, arg1)
11722                     || put_user_u32(egid, arg2)
11723                     || put_user_u32(sgid, arg3))
11724                     return -TARGET_EFAULT;
11725             }
11726         }
11727         return ret;
11728 #endif
11729 #ifdef TARGET_NR_chown32
11730     case TARGET_NR_chown32:
11731         if (!(p = lock_user_string(arg1)))
11732             return -TARGET_EFAULT;
11733         ret = get_errno(chown(p, arg2, arg3));
11734         unlock_user(p, arg1, 0);
11735         return ret;
11736 #endif
11737 #ifdef TARGET_NR_setuid32
11738     case TARGET_NR_setuid32:
11739         return get_errno(sys_setuid(arg1));
11740 #endif
11741 #ifdef TARGET_NR_setgid32
11742     case TARGET_NR_setgid32:
11743         return get_errno(sys_setgid(arg1));
11744 #endif
11745 #ifdef TARGET_NR_setfsuid32
11746     case TARGET_NR_setfsuid32:
11747         return get_errno(setfsuid(arg1));
11748 #endif
11749 #ifdef TARGET_NR_setfsgid32
11750     case TARGET_NR_setfsgid32:
11751         return get_errno(setfsgid(arg1));
11752 #endif
11753 #ifdef TARGET_NR_mincore
11754     case TARGET_NR_mincore:
11755         {
11756             void *a = lock_user(VERIFY_READ, arg1, arg2, 0);
11757             if (!a) {
11758                 return -TARGET_ENOMEM;
11759             }
11760             p = lock_user_string(arg3);
11761             if (!p) {
11762                 ret = -TARGET_EFAULT;
11763             } else {
11764                 ret = get_errno(mincore(a, arg2, p));
11765                 unlock_user(p, arg3, ret);
11766             }
11767             unlock_user(a, arg1, 0);
11768         }
11769         return ret;
11770 #endif
11771 #ifdef TARGET_NR_arm_fadvise64_64
11772     case TARGET_NR_arm_fadvise64_64:
11773         /* arm_fadvise64_64 looks like fadvise64_64 but
11774          * with different argument order: fd, advice, offset, len
11775          * rather than the usual fd, offset, len, advice.
11776          * Note that offset and len are both 64-bit so appear as
11777          * pairs of 32-bit registers.
11778          */
11779         ret = posix_fadvise(arg1, target_offset64(arg3, arg4),
11780                             target_offset64(arg5, arg6), arg2);
11781         return -host_to_target_errno(ret);
11782 #endif
11783 
11784 #if TARGET_ABI_BITS == 32
11785 
11786 #ifdef TARGET_NR_fadvise64_64
11787     case TARGET_NR_fadvise64_64:
11788 #if defined(TARGET_PPC) || defined(TARGET_XTENSA)
11789         /* 6 args: fd, advice, offset (high, low), len (high, low) */
11790         ret = arg2;
11791         arg2 = arg3;
11792         arg3 = arg4;
11793         arg4 = arg5;
11794         arg5 = arg6;
11795         arg6 = ret;
11796 #else
11797         /* 6 args: fd, offset (high, low), len (high, low), advice */
11798         if (regpairs_aligned(cpu_env, num)) {
11799             /* offset is in (3,4), len in (5,6) and advice in 7 */
11800             arg2 = arg3;
11801             arg3 = arg4;
11802             arg4 = arg5;
11803             arg5 = arg6;
11804             arg6 = arg7;
11805         }
11806 #endif
11807         ret = posix_fadvise(arg1, target_offset64(arg2, arg3),
11808                             target_offset64(arg4, arg5), arg6);
11809         return -host_to_target_errno(ret);
11810 #endif
11811 
11812 #ifdef TARGET_NR_fadvise64
11813     case TARGET_NR_fadvise64:
11814         /* 5 args: fd, offset (high, low), len, advice */
11815         if (regpairs_aligned(cpu_env, num)) {
11816             /* offset is in (3,4), len in 5 and advice in 6 */
11817             arg2 = arg3;
11818             arg3 = arg4;
11819             arg4 = arg5;
11820             arg5 = arg6;
11821         }
11822         ret = posix_fadvise(arg1, target_offset64(arg2, arg3), arg4, arg5);
11823         return -host_to_target_errno(ret);
11824 #endif
11825 
11826 #else /* not a 32-bit ABI */
11827 #if defined(TARGET_NR_fadvise64_64) || defined(TARGET_NR_fadvise64)
11828 #ifdef TARGET_NR_fadvise64_64
11829     case TARGET_NR_fadvise64_64:
11830 #endif
11831 #ifdef TARGET_NR_fadvise64
11832     case TARGET_NR_fadvise64:
11833 #endif
11834 #ifdef TARGET_S390X
11835         switch (arg4) {
11836         case 4: arg4 = POSIX_FADV_NOREUSE + 1; break; /* make sure it's an invalid value */
11837         case 5: arg4 = POSIX_FADV_NOREUSE + 2; break; /* ditto */
11838         case 6: arg4 = POSIX_FADV_DONTNEED; break;
11839         case 7: arg4 = POSIX_FADV_NOREUSE; break;
11840         default: break;
11841         }
11842 #endif
11843         return -host_to_target_errno(posix_fadvise(arg1, arg2, arg3, arg4));
11844 #endif
11845 #endif /* end of 64-bit ABI fadvise handling */
11846 
11847 #ifdef TARGET_NR_madvise
11848     case TARGET_NR_madvise:
11849         /* A straight passthrough may not be safe because qemu sometimes
11850            turns private file-backed mappings into anonymous mappings.
11851            This will break MADV_DONTNEED.
11852            This is a hint, so ignoring and returning success is ok.  */
11853         return 0;
11854 #endif
11855 #ifdef TARGET_NR_fcntl64
11856     case TARGET_NR_fcntl64:
11857     {
11858         int cmd;
11859         struct flock64 fl;
11860         from_flock64_fn *copyfrom = copy_from_user_flock64;
11861         to_flock64_fn *copyto = copy_to_user_flock64;
11862 
11863 #ifdef TARGET_ARM
11864         if (!((CPUARMState *)cpu_env)->eabi) {
11865             copyfrom = copy_from_user_oabi_flock64;
11866             copyto = copy_to_user_oabi_flock64;
11867         }
11868 #endif
11869 
11870         cmd = target_to_host_fcntl_cmd(arg2);
11871         if (cmd == -TARGET_EINVAL) {
11872             return cmd;
11873         }
11874 
11875         switch(arg2) {
11876         case TARGET_F_GETLK64:
11877             ret = copyfrom(&fl, arg3);
11878             if (ret) {
11879                 break;
11880             }
11881             ret = get_errno(safe_fcntl(arg1, cmd, &fl));
11882             if (ret == 0) {
11883                 ret = copyto(arg3, &fl);
11884             }
11885 	    break;
11886 
11887         case TARGET_F_SETLK64:
11888         case TARGET_F_SETLKW64:
11889             ret = copyfrom(&fl, arg3);
11890             if (ret) {
11891                 break;
11892             }
11893             ret = get_errno(safe_fcntl(arg1, cmd, &fl));
11894 	    break;
11895         default:
11896             ret = do_fcntl(arg1, arg2, arg3);
11897             break;
11898         }
11899         return ret;
11900     }
11901 #endif
11902 #ifdef TARGET_NR_cacheflush
11903     case TARGET_NR_cacheflush:
11904         /* self-modifying code is handled automatically, so nothing needed */
11905         return 0;
11906 #endif
11907 #ifdef TARGET_NR_getpagesize
11908     case TARGET_NR_getpagesize:
11909         return TARGET_PAGE_SIZE;
11910 #endif
11911     case TARGET_NR_gettid:
11912         return get_errno(sys_gettid());
11913 #ifdef TARGET_NR_readahead
11914     case TARGET_NR_readahead:
11915 #if TARGET_ABI_BITS == 32
11916         if (regpairs_aligned(cpu_env, num)) {
11917             arg2 = arg3;
11918             arg3 = arg4;
11919             arg4 = arg5;
11920         }
11921         ret = get_errno(readahead(arg1, target_offset64(arg2, arg3) , arg4));
11922 #else
11923         ret = get_errno(readahead(arg1, arg2, arg3));
11924 #endif
11925         return ret;
11926 #endif
11927 #ifdef CONFIG_ATTR
11928 #ifdef TARGET_NR_setxattr
11929     case TARGET_NR_listxattr:
11930     case TARGET_NR_llistxattr:
11931     {
11932         void *p, *b = 0;
11933         if (arg2) {
11934             b = lock_user(VERIFY_WRITE, arg2, arg3, 0);
11935             if (!b) {
11936                 return -TARGET_EFAULT;
11937             }
11938         }
11939         p = lock_user_string(arg1);
11940         if (p) {
11941             if (num == TARGET_NR_listxattr) {
11942                 ret = get_errno(listxattr(p, b, arg3));
11943             } else {
11944                 ret = get_errno(llistxattr(p, b, arg3));
11945             }
11946         } else {
11947             ret = -TARGET_EFAULT;
11948         }
11949         unlock_user(p, arg1, 0);
11950         unlock_user(b, arg2, arg3);
11951         return ret;
11952     }
11953     case TARGET_NR_flistxattr:
11954     {
11955         void *b = 0;
11956         if (arg2) {
11957             b = lock_user(VERIFY_WRITE, arg2, arg3, 0);
11958             if (!b) {
11959                 return -TARGET_EFAULT;
11960             }
11961         }
11962         ret = get_errno(flistxattr(arg1, b, arg3));
11963         unlock_user(b, arg2, arg3);
11964         return ret;
11965     }
11966     case TARGET_NR_setxattr:
11967     case TARGET_NR_lsetxattr:
11968         {
11969             void *p, *n, *v = 0;
11970             if (arg3) {
11971                 v = lock_user(VERIFY_READ, arg3, arg4, 1);
11972                 if (!v) {
11973                     return -TARGET_EFAULT;
11974                 }
11975             }
11976             p = lock_user_string(arg1);
11977             n = lock_user_string(arg2);
11978             if (p && n) {
11979                 if (num == TARGET_NR_setxattr) {
11980                     ret = get_errno(setxattr(p, n, v, arg4, arg5));
11981                 } else {
11982                     ret = get_errno(lsetxattr(p, n, v, arg4, arg5));
11983                 }
11984             } else {
11985                 ret = -TARGET_EFAULT;
11986             }
11987             unlock_user(p, arg1, 0);
11988             unlock_user(n, arg2, 0);
11989             unlock_user(v, arg3, 0);
11990         }
11991         return ret;
11992     case TARGET_NR_fsetxattr:
11993         {
11994             void *n, *v = 0;
11995             if (arg3) {
11996                 v = lock_user(VERIFY_READ, arg3, arg4, 1);
11997                 if (!v) {
11998                     return -TARGET_EFAULT;
11999                 }
12000             }
12001             n = lock_user_string(arg2);
12002             if (n) {
12003                 ret = get_errno(fsetxattr(arg1, n, v, arg4, arg5));
12004             } else {
12005                 ret = -TARGET_EFAULT;
12006             }
12007             unlock_user(n, arg2, 0);
12008             unlock_user(v, arg3, 0);
12009         }
12010         return ret;
12011     case TARGET_NR_getxattr:
12012     case TARGET_NR_lgetxattr:
12013         {
12014             void *p, *n, *v = 0;
12015             if (arg3) {
12016                 v = lock_user(VERIFY_WRITE, arg3, arg4, 0);
12017                 if (!v) {
12018                     return -TARGET_EFAULT;
12019                 }
12020             }
12021             p = lock_user_string(arg1);
12022             n = lock_user_string(arg2);
12023             if (p && n) {
12024                 if (num == TARGET_NR_getxattr) {
12025                     ret = get_errno(getxattr(p, n, v, arg4));
12026                 } else {
12027                     ret = get_errno(lgetxattr(p, n, v, arg4));
12028                 }
12029             } else {
12030                 ret = -TARGET_EFAULT;
12031             }
12032             unlock_user(p, arg1, 0);
12033             unlock_user(n, arg2, 0);
12034             unlock_user(v, arg3, arg4);
12035         }
12036         return ret;
12037     case TARGET_NR_fgetxattr:
12038         {
12039             void *n, *v = 0;
12040             if (arg3) {
12041                 v = lock_user(VERIFY_WRITE, arg3, arg4, 0);
12042                 if (!v) {
12043                     return -TARGET_EFAULT;
12044                 }
12045             }
12046             n = lock_user_string(arg2);
12047             if (n) {
12048                 ret = get_errno(fgetxattr(arg1, n, v, arg4));
12049             } else {
12050                 ret = -TARGET_EFAULT;
12051             }
12052             unlock_user(n, arg2, 0);
12053             unlock_user(v, arg3, arg4);
12054         }
12055         return ret;
12056     case TARGET_NR_removexattr:
12057     case TARGET_NR_lremovexattr:
12058         {
12059             void *p, *n;
12060             p = lock_user_string(arg1);
12061             n = lock_user_string(arg2);
12062             if (p && n) {
12063                 if (num == TARGET_NR_removexattr) {
12064                     ret = get_errno(removexattr(p, n));
12065                 } else {
12066                     ret = get_errno(lremovexattr(p, n));
12067                 }
12068             } else {
12069                 ret = -TARGET_EFAULT;
12070             }
12071             unlock_user(p, arg1, 0);
12072             unlock_user(n, arg2, 0);
12073         }
12074         return ret;
12075     case TARGET_NR_fremovexattr:
12076         {
12077             void *n;
12078             n = lock_user_string(arg2);
12079             if (n) {
12080                 ret = get_errno(fremovexattr(arg1, n));
12081             } else {
12082                 ret = -TARGET_EFAULT;
12083             }
12084             unlock_user(n, arg2, 0);
12085         }
12086         return ret;
12087 #endif
12088 #endif /* CONFIG_ATTR */
12089 #ifdef TARGET_NR_set_thread_area
12090     case TARGET_NR_set_thread_area:
12091 #if defined(TARGET_MIPS)
12092       ((CPUMIPSState *) cpu_env)->active_tc.CP0_UserLocal = arg1;
12093       return 0;
12094 #elif defined(TARGET_CRIS)
12095       if (arg1 & 0xff)
12096           ret = -TARGET_EINVAL;
12097       else {
12098           ((CPUCRISState *) cpu_env)->pregs[PR_PID] = arg1;
12099           ret = 0;
12100       }
12101       return ret;
12102 #elif defined(TARGET_I386) && defined(TARGET_ABI32)
12103       return do_set_thread_area(cpu_env, arg1);
12104 #elif defined(TARGET_M68K)
12105       {
12106           TaskState *ts = cpu->opaque;
12107           ts->tp_value = arg1;
12108           return 0;
12109       }
12110 #else
12111       return -TARGET_ENOSYS;
12112 #endif
12113 #endif
12114 #ifdef TARGET_NR_get_thread_area
12115     case TARGET_NR_get_thread_area:
12116 #if defined(TARGET_I386) && defined(TARGET_ABI32)
12117         return do_get_thread_area(cpu_env, arg1);
12118 #elif defined(TARGET_M68K)
12119         {
12120             TaskState *ts = cpu->opaque;
12121             return ts->tp_value;
12122         }
12123 #else
12124         return -TARGET_ENOSYS;
12125 #endif
12126 #endif
12127 #ifdef TARGET_NR_getdomainname
12128     case TARGET_NR_getdomainname:
12129         return -TARGET_ENOSYS;
12130 #endif
12131 
12132 #ifdef TARGET_NR_clock_settime
12133     case TARGET_NR_clock_settime:
12134     {
12135         struct timespec ts;
12136 
12137         ret = target_to_host_timespec(&ts, arg2);
12138         if (!is_error(ret)) {
12139             ret = get_errno(clock_settime(arg1, &ts));
12140         }
12141         return ret;
12142     }
12143 #endif
12144 #ifdef TARGET_NR_clock_settime64
12145     case TARGET_NR_clock_settime64:
12146     {
12147         struct timespec ts;
12148 
12149         ret = target_to_host_timespec64(&ts, arg2);
12150         if (!is_error(ret)) {
12151             ret = get_errno(clock_settime(arg1, &ts));
12152         }
12153         return ret;
12154     }
12155 #endif
12156 #ifdef TARGET_NR_clock_gettime
12157     case TARGET_NR_clock_gettime:
12158     {
12159         struct timespec ts;
12160         ret = get_errno(clock_gettime(arg1, &ts));
12161         if (!is_error(ret)) {
12162             ret = host_to_target_timespec(arg2, &ts);
12163         }
12164         return ret;
12165     }
12166 #endif
12167 #ifdef TARGET_NR_clock_gettime64
12168     case TARGET_NR_clock_gettime64:
12169     {
12170         struct timespec ts;
12171         ret = get_errno(clock_gettime(arg1, &ts));
12172         if (!is_error(ret)) {
12173             ret = host_to_target_timespec64(arg2, &ts);
12174         }
12175         return ret;
12176     }
12177 #endif
12178 #ifdef TARGET_NR_clock_getres
12179     case TARGET_NR_clock_getres:
12180     {
12181         struct timespec ts;
12182         ret = get_errno(clock_getres(arg1, &ts));
12183         if (!is_error(ret)) {
12184             host_to_target_timespec(arg2, &ts);
12185         }
12186         return ret;
12187     }
12188 #endif
12189 #ifdef TARGET_NR_clock_getres_time64
12190     case TARGET_NR_clock_getres_time64:
12191     {
12192         struct timespec ts;
12193         ret = get_errno(clock_getres(arg1, &ts));
12194         if (!is_error(ret)) {
12195             host_to_target_timespec64(arg2, &ts);
12196         }
12197         return ret;
12198     }
12199 #endif
12200 #ifdef TARGET_NR_clock_nanosleep
12201     case TARGET_NR_clock_nanosleep:
12202     {
12203         struct timespec ts;
12204         if (target_to_host_timespec(&ts, arg3)) {
12205             return -TARGET_EFAULT;
12206         }
12207         ret = get_errno(safe_clock_nanosleep(arg1, arg2,
12208                                              &ts, arg4 ? &ts : NULL));
12209         /*
12210          * if the call is interrupted by a signal handler, it fails
12211          * with error -TARGET_EINTR and if arg4 is not NULL and arg2 is not
12212          * TIMER_ABSTIME, it returns the remaining unslept time in arg4.
12213          */
12214         if (ret == -TARGET_EINTR && arg4 && arg2 != TIMER_ABSTIME &&
12215             host_to_target_timespec(arg4, &ts)) {
12216               return -TARGET_EFAULT;
12217         }
12218 
12219         return ret;
12220     }
12221 #endif
12222 #ifdef TARGET_NR_clock_nanosleep_time64
12223     case TARGET_NR_clock_nanosleep_time64:
12224     {
12225         struct timespec ts;
12226 
12227         if (target_to_host_timespec64(&ts, arg3)) {
12228             return -TARGET_EFAULT;
12229         }
12230 
12231         ret = get_errno(safe_clock_nanosleep(arg1, arg2,
12232                                              &ts, arg4 ? &ts : NULL));
12233 
12234         if (ret == -TARGET_EINTR && arg4 && arg2 != TIMER_ABSTIME &&
12235             host_to_target_timespec64(arg4, &ts)) {
12236             return -TARGET_EFAULT;
12237         }
12238         return ret;
12239     }
12240 #endif
12241 
12242 #if defined(TARGET_NR_set_tid_address) && defined(__NR_set_tid_address)
12243     case TARGET_NR_set_tid_address:
12244         return get_errno(set_tid_address((int *)g2h(cpu, arg1)));
12245 #endif
12246 
12247     case TARGET_NR_tkill:
12248         return get_errno(safe_tkill((int)arg1, target_to_host_signal(arg2)));
12249 
12250     case TARGET_NR_tgkill:
12251         return get_errno(safe_tgkill((int)arg1, (int)arg2,
12252                          target_to_host_signal(arg3)));
12253 
12254 #ifdef TARGET_NR_set_robust_list
12255     case TARGET_NR_set_robust_list:
12256     case TARGET_NR_get_robust_list:
12257         /* The ABI for supporting robust futexes has userspace pass
12258          * the kernel a pointer to a linked list which is updated by
12259          * userspace after the syscall; the list is walked by the kernel
12260          * when the thread exits. Since the linked list in QEMU guest
12261          * memory isn't a valid linked list for the host and we have
12262          * no way to reliably intercept the thread-death event, we can't
12263          * support these. Silently return ENOSYS so that guest userspace
12264          * falls back to a non-robust futex implementation (which should
12265          * be OK except in the corner case of the guest crashing while
12266          * holding a mutex that is shared with another process via
12267          * shared memory).
12268          */
12269         return -TARGET_ENOSYS;
12270 #endif
12271 
12272 #if defined(TARGET_NR_utimensat)
12273     case TARGET_NR_utimensat:
12274         {
12275             struct timespec *tsp, ts[2];
12276             if (!arg3) {
12277                 tsp = NULL;
12278             } else {
12279                 if (target_to_host_timespec(ts, arg3)) {
12280                     return -TARGET_EFAULT;
12281                 }
12282                 if (target_to_host_timespec(ts + 1, arg3 +
12283                                             sizeof(struct target_timespec))) {
12284                     return -TARGET_EFAULT;
12285                 }
12286                 tsp = ts;
12287             }
12288             if (!arg2)
12289                 ret = get_errno(sys_utimensat(arg1, NULL, tsp, arg4));
12290             else {
12291                 if (!(p = lock_user_string(arg2))) {
12292                     return -TARGET_EFAULT;
12293                 }
12294                 ret = get_errno(sys_utimensat(arg1, path(p), tsp, arg4));
12295                 unlock_user(p, arg2, 0);
12296             }
12297         }
12298         return ret;
12299 #endif
12300 #ifdef TARGET_NR_utimensat_time64
12301     case TARGET_NR_utimensat_time64:
12302         {
12303             struct timespec *tsp, ts[2];
12304             if (!arg3) {
12305                 tsp = NULL;
12306             } else {
12307                 if (target_to_host_timespec64(ts, arg3)) {
12308                     return -TARGET_EFAULT;
12309                 }
12310                 if (target_to_host_timespec64(ts + 1, arg3 +
12311                                      sizeof(struct target__kernel_timespec))) {
12312                     return -TARGET_EFAULT;
12313                 }
12314                 tsp = ts;
12315             }
12316             if (!arg2)
12317                 ret = get_errno(sys_utimensat(arg1, NULL, tsp, arg4));
12318             else {
12319                 p = lock_user_string(arg2);
12320                 if (!p) {
12321                     return -TARGET_EFAULT;
12322                 }
12323                 ret = get_errno(sys_utimensat(arg1, path(p), tsp, arg4));
12324                 unlock_user(p, arg2, 0);
12325             }
12326         }
12327         return ret;
12328 #endif
12329 #ifdef TARGET_NR_futex
12330     case TARGET_NR_futex:
12331         return do_futex(cpu, arg1, arg2, arg3, arg4, arg5, arg6);
12332 #endif
12333 #ifdef TARGET_NR_futex_time64
12334     case TARGET_NR_futex_time64:
12335         return do_futex_time64(cpu, arg1, arg2, arg3, arg4, arg5, arg6);
12336 #endif
12337 #if defined(TARGET_NR_inotify_init) && defined(__NR_inotify_init)
12338     case TARGET_NR_inotify_init:
12339         ret = get_errno(sys_inotify_init());
12340         if (ret >= 0) {
12341             fd_trans_register(ret, &target_inotify_trans);
12342         }
12343         return ret;
12344 #endif
12345 #ifdef CONFIG_INOTIFY1
12346 #if defined(TARGET_NR_inotify_init1) && defined(__NR_inotify_init1)
12347     case TARGET_NR_inotify_init1:
12348         ret = get_errno(sys_inotify_init1(target_to_host_bitmask(arg1,
12349                                           fcntl_flags_tbl)));
12350         if (ret >= 0) {
12351             fd_trans_register(ret, &target_inotify_trans);
12352         }
12353         return ret;
12354 #endif
12355 #endif
12356 #if defined(TARGET_NR_inotify_add_watch) && defined(__NR_inotify_add_watch)
12357     case TARGET_NR_inotify_add_watch:
12358         p = lock_user_string(arg2);
12359         ret = get_errno(sys_inotify_add_watch(arg1, path(p), arg3));
12360         unlock_user(p, arg2, 0);
12361         return ret;
12362 #endif
12363 #if defined(TARGET_NR_inotify_rm_watch) && defined(__NR_inotify_rm_watch)
12364     case TARGET_NR_inotify_rm_watch:
12365         return get_errno(sys_inotify_rm_watch(arg1, arg2));
12366 #endif
12367 
12368 #if defined(TARGET_NR_mq_open) && defined(__NR_mq_open)
12369     case TARGET_NR_mq_open:
12370         {
12371             struct mq_attr posix_mq_attr;
12372             struct mq_attr *pposix_mq_attr;
12373             int host_flags;
12374 
12375             host_flags = target_to_host_bitmask(arg2, fcntl_flags_tbl);
12376             pposix_mq_attr = NULL;
12377             if (arg4) {
12378                 if (copy_from_user_mq_attr(&posix_mq_attr, arg4) != 0) {
12379                     return -TARGET_EFAULT;
12380                 }
12381                 pposix_mq_attr = &posix_mq_attr;
12382             }
12383             p = lock_user_string(arg1 - 1);
12384             if (!p) {
12385                 return -TARGET_EFAULT;
12386             }
12387             ret = get_errno(mq_open(p, host_flags, arg3, pposix_mq_attr));
12388             unlock_user (p, arg1, 0);
12389         }
12390         return ret;
12391 
12392     case TARGET_NR_mq_unlink:
12393         p = lock_user_string(arg1 - 1);
12394         if (!p) {
12395             return -TARGET_EFAULT;
12396         }
12397         ret = get_errno(mq_unlink(p));
12398         unlock_user (p, arg1, 0);
12399         return ret;
12400 
12401 #ifdef TARGET_NR_mq_timedsend
12402     case TARGET_NR_mq_timedsend:
12403         {
12404             struct timespec ts;
12405 
12406             p = lock_user (VERIFY_READ, arg2, arg3, 1);
12407             if (arg5 != 0) {
12408                 if (target_to_host_timespec(&ts, arg5)) {
12409                     return -TARGET_EFAULT;
12410                 }
12411                 ret = get_errno(safe_mq_timedsend(arg1, p, arg3, arg4, &ts));
12412                 if (!is_error(ret) && host_to_target_timespec(arg5, &ts)) {
12413                     return -TARGET_EFAULT;
12414                 }
12415             } else {
12416                 ret = get_errno(safe_mq_timedsend(arg1, p, arg3, arg4, NULL));
12417             }
12418             unlock_user (p, arg2, arg3);
12419         }
12420         return ret;
12421 #endif
12422 #ifdef TARGET_NR_mq_timedsend_time64
12423     case TARGET_NR_mq_timedsend_time64:
12424         {
12425             struct timespec ts;
12426 
12427             p = lock_user(VERIFY_READ, arg2, arg3, 1);
12428             if (arg5 != 0) {
12429                 if (target_to_host_timespec64(&ts, arg5)) {
12430                     return -TARGET_EFAULT;
12431                 }
12432                 ret = get_errno(safe_mq_timedsend(arg1, p, arg3, arg4, &ts));
12433                 if (!is_error(ret) && host_to_target_timespec64(arg5, &ts)) {
12434                     return -TARGET_EFAULT;
12435                 }
12436             } else {
12437                 ret = get_errno(safe_mq_timedsend(arg1, p, arg3, arg4, NULL));
12438             }
12439             unlock_user(p, arg2, arg3);
12440         }
12441         return ret;
12442 #endif
12443 
12444 #ifdef TARGET_NR_mq_timedreceive
12445     case TARGET_NR_mq_timedreceive:
12446         {
12447             struct timespec ts;
12448             unsigned int prio;
12449 
12450             p = lock_user (VERIFY_READ, arg2, arg3, 1);
12451             if (arg5 != 0) {
12452                 if (target_to_host_timespec(&ts, arg5)) {
12453                     return -TARGET_EFAULT;
12454                 }
12455                 ret = get_errno(safe_mq_timedreceive(arg1, p, arg3,
12456                                                      &prio, &ts));
12457                 if (!is_error(ret) && host_to_target_timespec(arg5, &ts)) {
12458                     return -TARGET_EFAULT;
12459                 }
12460             } else {
12461                 ret = get_errno(safe_mq_timedreceive(arg1, p, arg3,
12462                                                      &prio, NULL));
12463             }
12464             unlock_user (p, arg2, arg3);
12465             if (arg4 != 0)
12466                 put_user_u32(prio, arg4);
12467         }
12468         return ret;
12469 #endif
12470 #ifdef TARGET_NR_mq_timedreceive_time64
12471     case TARGET_NR_mq_timedreceive_time64:
12472         {
12473             struct timespec ts;
12474             unsigned int prio;
12475 
12476             p = lock_user(VERIFY_READ, arg2, arg3, 1);
12477             if (arg5 != 0) {
12478                 if (target_to_host_timespec64(&ts, arg5)) {
12479                     return -TARGET_EFAULT;
12480                 }
12481                 ret = get_errno(safe_mq_timedreceive(arg1, p, arg3,
12482                                                      &prio, &ts));
12483                 if (!is_error(ret) && host_to_target_timespec64(arg5, &ts)) {
12484                     return -TARGET_EFAULT;
12485                 }
12486             } else {
12487                 ret = get_errno(safe_mq_timedreceive(arg1, p, arg3,
12488                                                      &prio, NULL));
12489             }
12490             unlock_user(p, arg2, arg3);
12491             if (arg4 != 0) {
12492                 put_user_u32(prio, arg4);
12493             }
12494         }
12495         return ret;
12496 #endif
12497 
12498     /* Not implemented for now... */
12499 /*     case TARGET_NR_mq_notify: */
12500 /*         break; */
12501 
12502     case TARGET_NR_mq_getsetattr:
12503         {
12504             struct mq_attr posix_mq_attr_in, posix_mq_attr_out;
12505             ret = 0;
12506             if (arg2 != 0) {
12507                 copy_from_user_mq_attr(&posix_mq_attr_in, arg2);
12508                 ret = get_errno(mq_setattr(arg1, &posix_mq_attr_in,
12509                                            &posix_mq_attr_out));
12510             } else if (arg3 != 0) {
12511                 ret = get_errno(mq_getattr(arg1, &posix_mq_attr_out));
12512             }
12513             if (ret == 0 && arg3 != 0) {
12514                 copy_to_user_mq_attr(arg3, &posix_mq_attr_out);
12515             }
12516         }
12517         return ret;
12518 #endif
12519 
12520 #ifdef CONFIG_SPLICE
12521 #ifdef TARGET_NR_tee
12522     case TARGET_NR_tee:
12523         {
12524             ret = get_errno(tee(arg1,arg2,arg3,arg4));
12525         }
12526         return ret;
12527 #endif
12528 #ifdef TARGET_NR_splice
12529     case TARGET_NR_splice:
12530         {
12531             loff_t loff_in, loff_out;
12532             loff_t *ploff_in = NULL, *ploff_out = NULL;
12533             if (arg2) {
12534                 if (get_user_u64(loff_in, arg2)) {
12535                     return -TARGET_EFAULT;
12536                 }
12537                 ploff_in = &loff_in;
12538             }
12539             if (arg4) {
12540                 if (get_user_u64(loff_out, arg4)) {
12541                     return -TARGET_EFAULT;
12542                 }
12543                 ploff_out = &loff_out;
12544             }
12545             ret = get_errno(splice(arg1, ploff_in, arg3, ploff_out, arg5, arg6));
12546             if (arg2) {
12547                 if (put_user_u64(loff_in, arg2)) {
12548                     return -TARGET_EFAULT;
12549                 }
12550             }
12551             if (arg4) {
12552                 if (put_user_u64(loff_out, arg4)) {
12553                     return -TARGET_EFAULT;
12554                 }
12555             }
12556         }
12557         return ret;
12558 #endif
12559 #ifdef TARGET_NR_vmsplice
12560 	case TARGET_NR_vmsplice:
12561         {
12562             struct iovec *vec = lock_iovec(VERIFY_READ, arg2, arg3, 1);
12563             if (vec != NULL) {
12564                 ret = get_errno(vmsplice(arg1, vec, arg3, arg4));
12565                 unlock_iovec(vec, arg2, arg3, 0);
12566             } else {
12567                 ret = -host_to_target_errno(errno);
12568             }
12569         }
12570         return ret;
12571 #endif
12572 #endif /* CONFIG_SPLICE */
12573 #ifdef CONFIG_EVENTFD
12574 #if defined(TARGET_NR_eventfd)
12575     case TARGET_NR_eventfd:
12576         ret = get_errno(eventfd(arg1, 0));
12577         if (ret >= 0) {
12578             fd_trans_register(ret, &target_eventfd_trans);
12579         }
12580         return ret;
12581 #endif
12582 #if defined(TARGET_NR_eventfd2)
12583     case TARGET_NR_eventfd2:
12584     {
12585         int host_flags = arg2 & (~(TARGET_O_NONBLOCK_MASK | TARGET_O_CLOEXEC));
12586         if (arg2 & TARGET_O_NONBLOCK) {
12587             host_flags |= O_NONBLOCK;
12588         }
12589         if (arg2 & TARGET_O_CLOEXEC) {
12590             host_flags |= O_CLOEXEC;
12591         }
12592         ret = get_errno(eventfd(arg1, host_flags));
12593         if (ret >= 0) {
12594             fd_trans_register(ret, &target_eventfd_trans);
12595         }
12596         return ret;
12597     }
12598 #endif
12599 #endif /* CONFIG_EVENTFD  */
12600 #if defined(CONFIG_FALLOCATE) && defined(TARGET_NR_fallocate)
12601     case TARGET_NR_fallocate:
12602 #if TARGET_ABI_BITS == 32
12603         ret = get_errno(fallocate(arg1, arg2, target_offset64(arg3, arg4),
12604                                   target_offset64(arg5, arg6)));
12605 #else
12606         ret = get_errno(fallocate(arg1, arg2, arg3, arg4));
12607 #endif
12608         return ret;
12609 #endif
12610 #if defined(CONFIG_SYNC_FILE_RANGE)
12611 #if defined(TARGET_NR_sync_file_range)
12612     case TARGET_NR_sync_file_range:
12613 #if TARGET_ABI_BITS == 32
12614 #if defined(TARGET_MIPS)
12615         ret = get_errno(sync_file_range(arg1, target_offset64(arg3, arg4),
12616                                         target_offset64(arg5, arg6), arg7));
12617 #else
12618         ret = get_errno(sync_file_range(arg1, target_offset64(arg2, arg3),
12619                                         target_offset64(arg4, arg5), arg6));
12620 #endif /* !TARGET_MIPS */
12621 #else
12622         ret = get_errno(sync_file_range(arg1, arg2, arg3, arg4));
12623 #endif
12624         return ret;
12625 #endif
12626 #if defined(TARGET_NR_sync_file_range2) || \
12627     defined(TARGET_NR_arm_sync_file_range)
12628 #if defined(TARGET_NR_sync_file_range2)
12629     case TARGET_NR_sync_file_range2:
12630 #endif
12631 #if defined(TARGET_NR_arm_sync_file_range)
12632     case TARGET_NR_arm_sync_file_range:
12633 #endif
12634         /* This is like sync_file_range but the arguments are reordered */
12635 #if TARGET_ABI_BITS == 32
12636         ret = get_errno(sync_file_range(arg1, target_offset64(arg3, arg4),
12637                                         target_offset64(arg5, arg6), arg2));
12638 #else
12639         ret = get_errno(sync_file_range(arg1, arg3, arg4, arg2));
12640 #endif
12641         return ret;
12642 #endif
12643 #endif
12644 #if defined(TARGET_NR_signalfd4)
12645     case TARGET_NR_signalfd4:
12646         return do_signalfd4(arg1, arg2, arg4);
12647 #endif
12648 #if defined(TARGET_NR_signalfd)
12649     case TARGET_NR_signalfd:
12650         return do_signalfd4(arg1, arg2, 0);
12651 #endif
12652 #if defined(CONFIG_EPOLL)
12653 #if defined(TARGET_NR_epoll_create)
12654     case TARGET_NR_epoll_create:
12655         return get_errno(epoll_create(arg1));
12656 #endif
12657 #if defined(TARGET_NR_epoll_create1) && defined(CONFIG_EPOLL_CREATE1)
12658     case TARGET_NR_epoll_create1:
12659         return get_errno(epoll_create1(target_to_host_bitmask(arg1, fcntl_flags_tbl)));
12660 #endif
12661 #if defined(TARGET_NR_epoll_ctl)
12662     case TARGET_NR_epoll_ctl:
12663     {
12664         struct epoll_event ep;
12665         struct epoll_event *epp = 0;
12666         if (arg4) {
12667             if (arg2 != EPOLL_CTL_DEL) {
12668                 struct target_epoll_event *target_ep;
12669                 if (!lock_user_struct(VERIFY_READ, target_ep, arg4, 1)) {
12670                     return -TARGET_EFAULT;
12671                 }
12672                 ep.events = tswap32(target_ep->events);
12673                 /*
12674                  * The epoll_data_t union is just opaque data to the kernel,
12675                  * so we transfer all 64 bits across and need not worry what
12676                  * actual data type it is.
12677                  */
12678                 ep.data.u64 = tswap64(target_ep->data.u64);
12679                 unlock_user_struct(target_ep, arg4, 0);
12680             }
12681             /*
12682              * before kernel 2.6.9, EPOLL_CTL_DEL operation required a
12683              * non-null pointer, even though this argument is ignored.
12684              *
12685              */
12686             epp = &ep;
12687         }
12688         return get_errno(epoll_ctl(arg1, arg2, arg3, epp));
12689     }
12690 #endif
12691 
12692 #if defined(TARGET_NR_epoll_wait) || defined(TARGET_NR_epoll_pwait)
12693 #if defined(TARGET_NR_epoll_wait)
12694     case TARGET_NR_epoll_wait:
12695 #endif
12696 #if defined(TARGET_NR_epoll_pwait)
12697     case TARGET_NR_epoll_pwait:
12698 #endif
12699     {
12700         struct target_epoll_event *target_ep;
12701         struct epoll_event *ep;
12702         int epfd = arg1;
12703         int maxevents = arg3;
12704         int timeout = arg4;
12705 
12706         if (maxevents <= 0 || maxevents > TARGET_EP_MAX_EVENTS) {
12707             return -TARGET_EINVAL;
12708         }
12709 
12710         target_ep = lock_user(VERIFY_WRITE, arg2,
12711                               maxevents * sizeof(struct target_epoll_event), 1);
12712         if (!target_ep) {
12713             return -TARGET_EFAULT;
12714         }
12715 
12716         ep = g_try_new(struct epoll_event, maxevents);
12717         if (!ep) {
12718             unlock_user(target_ep, arg2, 0);
12719             return -TARGET_ENOMEM;
12720         }
12721 
12722         switch (num) {
12723 #if defined(TARGET_NR_epoll_pwait)
12724         case TARGET_NR_epoll_pwait:
12725         {
12726             target_sigset_t *target_set;
12727             sigset_t _set, *set = &_set;
12728 
12729             if (arg5) {
12730                 if (arg6 != sizeof(target_sigset_t)) {
12731                     ret = -TARGET_EINVAL;
12732                     break;
12733                 }
12734 
12735                 target_set = lock_user(VERIFY_READ, arg5,
12736                                        sizeof(target_sigset_t), 1);
12737                 if (!target_set) {
12738                     ret = -TARGET_EFAULT;
12739                     break;
12740                 }
12741                 target_to_host_sigset(set, target_set);
12742                 unlock_user(target_set, arg5, 0);
12743             } else {
12744                 set = NULL;
12745             }
12746 
12747             ret = get_errno(safe_epoll_pwait(epfd, ep, maxevents, timeout,
12748                                              set, SIGSET_T_SIZE));
12749             break;
12750         }
12751 #endif
12752 #if defined(TARGET_NR_epoll_wait)
12753         case TARGET_NR_epoll_wait:
12754             ret = get_errno(safe_epoll_pwait(epfd, ep, maxevents, timeout,
12755                                              NULL, 0));
12756             break;
12757 #endif
12758         default:
12759             ret = -TARGET_ENOSYS;
12760         }
12761         if (!is_error(ret)) {
12762             int i;
12763             for (i = 0; i < ret; i++) {
12764                 target_ep[i].events = tswap32(ep[i].events);
12765                 target_ep[i].data.u64 = tswap64(ep[i].data.u64);
12766             }
12767             unlock_user(target_ep, arg2,
12768                         ret * sizeof(struct target_epoll_event));
12769         } else {
12770             unlock_user(target_ep, arg2, 0);
12771         }
12772         g_free(ep);
12773         return ret;
12774     }
12775 #endif
12776 #endif
12777 #ifdef TARGET_NR_prlimit64
12778     case TARGET_NR_prlimit64:
12779     {
12780         /* args: pid, resource number, ptr to new rlimit, ptr to old rlimit */
12781         struct target_rlimit64 *target_rnew, *target_rold;
12782         struct host_rlimit64 rnew, rold, *rnewp = 0;
12783         int resource = target_to_host_resource(arg2);
12784 
12785         if (arg3 && (resource != RLIMIT_AS &&
12786                      resource != RLIMIT_DATA &&
12787                      resource != RLIMIT_STACK)) {
12788             if (!lock_user_struct(VERIFY_READ, target_rnew, arg3, 1)) {
12789                 return -TARGET_EFAULT;
12790             }
12791             rnew.rlim_cur = tswap64(target_rnew->rlim_cur);
12792             rnew.rlim_max = tswap64(target_rnew->rlim_max);
12793             unlock_user_struct(target_rnew, arg3, 0);
12794             rnewp = &rnew;
12795         }
12796 
12797         ret = get_errno(sys_prlimit64(arg1, resource, rnewp, arg4 ? &rold : 0));
12798         if (!is_error(ret) && arg4) {
12799             if (!lock_user_struct(VERIFY_WRITE, target_rold, arg4, 1)) {
12800                 return -TARGET_EFAULT;
12801             }
12802             target_rold->rlim_cur = tswap64(rold.rlim_cur);
12803             target_rold->rlim_max = tswap64(rold.rlim_max);
12804             unlock_user_struct(target_rold, arg4, 1);
12805         }
12806         return ret;
12807     }
12808 #endif
12809 #ifdef TARGET_NR_gethostname
12810     case TARGET_NR_gethostname:
12811     {
12812         char *name = lock_user(VERIFY_WRITE, arg1, arg2, 0);
12813         if (name) {
12814             ret = get_errno(gethostname(name, arg2));
12815             unlock_user(name, arg1, arg2);
12816         } else {
12817             ret = -TARGET_EFAULT;
12818         }
12819         return ret;
12820     }
12821 #endif
12822 #ifdef TARGET_NR_atomic_cmpxchg_32
12823     case TARGET_NR_atomic_cmpxchg_32:
12824     {
12825         /* should use start_exclusive from main.c */
12826         abi_ulong mem_value;
12827         if (get_user_u32(mem_value, arg6)) {
12828             target_siginfo_t info;
12829             info.si_signo = SIGSEGV;
12830             info.si_errno = 0;
12831             info.si_code = TARGET_SEGV_MAPERR;
12832             info._sifields._sigfault._addr = arg6;
12833             queue_signal((CPUArchState *)cpu_env, info.si_signo,
12834                          QEMU_SI_FAULT, &info);
12835             ret = 0xdeadbeef;
12836 
12837         }
12838         if (mem_value == arg2)
12839             put_user_u32(arg1, arg6);
12840         return mem_value;
12841     }
12842 #endif
12843 #ifdef TARGET_NR_atomic_barrier
12844     case TARGET_NR_atomic_barrier:
12845         /* Like the kernel implementation and the
12846            qemu arm barrier, no-op this? */
12847         return 0;
12848 #endif
12849 
12850 #ifdef TARGET_NR_timer_create
12851     case TARGET_NR_timer_create:
12852     {
12853         /* args: clockid_t clockid, struct sigevent *sevp, timer_t *timerid */
12854 
12855         struct sigevent host_sevp = { {0}, }, *phost_sevp = NULL;
12856 
12857         int clkid = arg1;
12858         int timer_index = next_free_host_timer();
12859 
12860         if (timer_index < 0) {
12861             ret = -TARGET_EAGAIN;
12862         } else {
12863             timer_t *phtimer = g_posix_timers  + timer_index;
12864 
12865             if (arg2) {
12866                 phost_sevp = &host_sevp;
12867                 ret = target_to_host_sigevent(phost_sevp, arg2);
12868                 if (ret != 0) {
12869                     return ret;
12870                 }
12871             }
12872 
12873             ret = get_errno(timer_create(clkid, phost_sevp, phtimer));
12874             if (ret) {
12875                 phtimer = NULL;
12876             } else {
12877                 if (put_user(TIMER_MAGIC | timer_index, arg3, target_timer_t)) {
12878                     return -TARGET_EFAULT;
12879                 }
12880             }
12881         }
12882         return ret;
12883     }
12884 #endif
12885 
12886 #ifdef TARGET_NR_timer_settime
12887     case TARGET_NR_timer_settime:
12888     {
12889         /* args: timer_t timerid, int flags, const struct itimerspec *new_value,
12890          * struct itimerspec * old_value */
12891         target_timer_t timerid = get_timer_id(arg1);
12892 
12893         if (timerid < 0) {
12894             ret = timerid;
12895         } else if (arg3 == 0) {
12896             ret = -TARGET_EINVAL;
12897         } else {
12898             timer_t htimer = g_posix_timers[timerid];
12899             struct itimerspec hspec_new = {{0},}, hspec_old = {{0},};
12900 
12901             if (target_to_host_itimerspec(&hspec_new, arg3)) {
12902                 return -TARGET_EFAULT;
12903             }
12904             ret = get_errno(
12905                           timer_settime(htimer, arg2, &hspec_new, &hspec_old));
12906             if (arg4 && host_to_target_itimerspec(arg4, &hspec_old)) {
12907                 return -TARGET_EFAULT;
12908             }
12909         }
12910         return ret;
12911     }
12912 #endif
12913 
12914 #ifdef TARGET_NR_timer_settime64
12915     case TARGET_NR_timer_settime64:
12916     {
12917         target_timer_t timerid = get_timer_id(arg1);
12918 
12919         if (timerid < 0) {
12920             ret = timerid;
12921         } else if (arg3 == 0) {
12922             ret = -TARGET_EINVAL;
12923         } else {
12924             timer_t htimer = g_posix_timers[timerid];
12925             struct itimerspec hspec_new = {{0},}, hspec_old = {{0},};
12926 
12927             if (target_to_host_itimerspec64(&hspec_new, arg3)) {
12928                 return -TARGET_EFAULT;
12929             }
12930             ret = get_errno(
12931                           timer_settime(htimer, arg2, &hspec_new, &hspec_old));
12932             if (arg4 && host_to_target_itimerspec64(arg4, &hspec_old)) {
12933                 return -TARGET_EFAULT;
12934             }
12935         }
12936         return ret;
12937     }
12938 #endif
12939 
12940 #ifdef TARGET_NR_timer_gettime
12941     case TARGET_NR_timer_gettime:
12942     {
12943         /* args: timer_t timerid, struct itimerspec *curr_value */
12944         target_timer_t timerid = get_timer_id(arg1);
12945 
12946         if (timerid < 0) {
12947             ret = timerid;
12948         } else if (!arg2) {
12949             ret = -TARGET_EFAULT;
12950         } else {
12951             timer_t htimer = g_posix_timers[timerid];
12952             struct itimerspec hspec;
12953             ret = get_errno(timer_gettime(htimer, &hspec));
12954 
12955             if (host_to_target_itimerspec(arg2, &hspec)) {
12956                 ret = -TARGET_EFAULT;
12957             }
12958         }
12959         return ret;
12960     }
12961 #endif
12962 
12963 #ifdef TARGET_NR_timer_gettime64
12964     case TARGET_NR_timer_gettime64:
12965     {
12966         /* args: timer_t timerid, struct itimerspec64 *curr_value */
12967         target_timer_t timerid = get_timer_id(arg1);
12968 
12969         if (timerid < 0) {
12970             ret = timerid;
12971         } else if (!arg2) {
12972             ret = -TARGET_EFAULT;
12973         } else {
12974             timer_t htimer = g_posix_timers[timerid];
12975             struct itimerspec hspec;
12976             ret = get_errno(timer_gettime(htimer, &hspec));
12977 
12978             if (host_to_target_itimerspec64(arg2, &hspec)) {
12979                 ret = -TARGET_EFAULT;
12980             }
12981         }
12982         return ret;
12983     }
12984 #endif
12985 
12986 #ifdef TARGET_NR_timer_getoverrun
12987     case TARGET_NR_timer_getoverrun:
12988     {
12989         /* args: timer_t timerid */
12990         target_timer_t timerid = get_timer_id(arg1);
12991 
12992         if (timerid < 0) {
12993             ret = timerid;
12994         } else {
12995             timer_t htimer = g_posix_timers[timerid];
12996             ret = get_errno(timer_getoverrun(htimer));
12997         }
12998         return ret;
12999     }
13000 #endif
13001 
13002 #ifdef TARGET_NR_timer_delete
13003     case TARGET_NR_timer_delete:
13004     {
13005         /* args: timer_t timerid */
13006         target_timer_t timerid = get_timer_id(arg1);
13007 
13008         if (timerid < 0) {
13009             ret = timerid;
13010         } else {
13011             timer_t htimer = g_posix_timers[timerid];
13012             ret = get_errno(timer_delete(htimer));
13013             g_posix_timers[timerid] = 0;
13014         }
13015         return ret;
13016     }
13017 #endif
13018 
13019 #if defined(TARGET_NR_timerfd_create) && defined(CONFIG_TIMERFD)
13020     case TARGET_NR_timerfd_create:
13021         return get_errno(timerfd_create(arg1,
13022                           target_to_host_bitmask(arg2, fcntl_flags_tbl)));
13023 #endif
13024 
13025 #if defined(TARGET_NR_timerfd_gettime) && defined(CONFIG_TIMERFD)
13026     case TARGET_NR_timerfd_gettime:
13027         {
13028             struct itimerspec its_curr;
13029 
13030             ret = get_errno(timerfd_gettime(arg1, &its_curr));
13031 
13032             if (arg2 && host_to_target_itimerspec(arg2, &its_curr)) {
13033                 return -TARGET_EFAULT;
13034             }
13035         }
13036         return ret;
13037 #endif
13038 
13039 #if defined(TARGET_NR_timerfd_gettime64) && defined(CONFIG_TIMERFD)
13040     case TARGET_NR_timerfd_gettime64:
13041         {
13042             struct itimerspec its_curr;
13043 
13044             ret = get_errno(timerfd_gettime(arg1, &its_curr));
13045 
13046             if (arg2 && host_to_target_itimerspec64(arg2, &its_curr)) {
13047                 return -TARGET_EFAULT;
13048             }
13049         }
13050         return ret;
13051 #endif
13052 
13053 #if defined(TARGET_NR_timerfd_settime) && defined(CONFIG_TIMERFD)
13054     case TARGET_NR_timerfd_settime:
13055         {
13056             struct itimerspec its_new, its_old, *p_new;
13057 
13058             if (arg3) {
13059                 if (target_to_host_itimerspec(&its_new, arg3)) {
13060                     return -TARGET_EFAULT;
13061                 }
13062                 p_new = &its_new;
13063             } else {
13064                 p_new = NULL;
13065             }
13066 
13067             ret = get_errno(timerfd_settime(arg1, arg2, p_new, &its_old));
13068 
13069             if (arg4 && host_to_target_itimerspec(arg4, &its_old)) {
13070                 return -TARGET_EFAULT;
13071             }
13072         }
13073         return ret;
13074 #endif
13075 
13076 #if defined(TARGET_NR_timerfd_settime64) && defined(CONFIG_TIMERFD)
13077     case TARGET_NR_timerfd_settime64:
13078         {
13079             struct itimerspec its_new, its_old, *p_new;
13080 
13081             if (arg3) {
13082                 if (target_to_host_itimerspec64(&its_new, arg3)) {
13083                     return -TARGET_EFAULT;
13084                 }
13085                 p_new = &its_new;
13086             } else {
13087                 p_new = NULL;
13088             }
13089 
13090             ret = get_errno(timerfd_settime(arg1, arg2, p_new, &its_old));
13091 
13092             if (arg4 && host_to_target_itimerspec64(arg4, &its_old)) {
13093                 return -TARGET_EFAULT;
13094             }
13095         }
13096         return ret;
13097 #endif
13098 
13099 #if defined(TARGET_NR_ioprio_get) && defined(__NR_ioprio_get)
13100     case TARGET_NR_ioprio_get:
13101         return get_errno(ioprio_get(arg1, arg2));
13102 #endif
13103 
13104 #if defined(TARGET_NR_ioprio_set) && defined(__NR_ioprio_set)
13105     case TARGET_NR_ioprio_set:
13106         return get_errno(ioprio_set(arg1, arg2, arg3));
13107 #endif
13108 
13109 #if defined(TARGET_NR_setns) && defined(CONFIG_SETNS)
13110     case TARGET_NR_setns:
13111         return get_errno(setns(arg1, arg2));
13112 #endif
13113 #if defined(TARGET_NR_unshare) && defined(CONFIG_SETNS)
13114     case TARGET_NR_unshare:
13115         return get_errno(unshare(arg1));
13116 #endif
13117 #if defined(TARGET_NR_kcmp) && defined(__NR_kcmp)
13118     case TARGET_NR_kcmp:
13119         return get_errno(kcmp(arg1, arg2, arg3, arg4, arg5));
13120 #endif
13121 #ifdef TARGET_NR_swapcontext
13122     case TARGET_NR_swapcontext:
13123         /* PowerPC specific.  */
13124         return do_swapcontext(cpu_env, arg1, arg2, arg3);
13125 #endif
13126 #ifdef TARGET_NR_memfd_create
13127     case TARGET_NR_memfd_create:
13128         p = lock_user_string(arg1);
13129         if (!p) {
13130             return -TARGET_EFAULT;
13131         }
13132         ret = get_errno(memfd_create(p, arg2));
13133         fd_trans_unregister(ret);
13134         unlock_user(p, arg1, 0);
13135         return ret;
13136 #endif
13137 #if defined TARGET_NR_membarrier && defined __NR_membarrier
13138     case TARGET_NR_membarrier:
13139         return get_errno(membarrier(arg1, arg2));
13140 #endif
13141 
13142 #if defined(TARGET_NR_copy_file_range) && defined(__NR_copy_file_range)
13143     case TARGET_NR_copy_file_range:
13144         {
13145             loff_t inoff, outoff;
13146             loff_t *pinoff = NULL, *poutoff = NULL;
13147 
13148             if (arg2) {
13149                 if (get_user_u64(inoff, arg2)) {
13150                     return -TARGET_EFAULT;
13151                 }
13152                 pinoff = &inoff;
13153             }
13154             if (arg4) {
13155                 if (get_user_u64(outoff, arg4)) {
13156                     return -TARGET_EFAULT;
13157                 }
13158                 poutoff = &outoff;
13159             }
13160             ret = get_errno(safe_copy_file_range(arg1, pinoff, arg3, poutoff,
13161                                                  arg5, arg6));
13162             if (!is_error(ret) && ret > 0) {
13163                 if (arg2) {
13164                     if (put_user_u64(inoff, arg2)) {
13165                         return -TARGET_EFAULT;
13166                     }
13167                 }
13168                 if (arg4) {
13169                     if (put_user_u64(outoff, arg4)) {
13170                         return -TARGET_EFAULT;
13171                     }
13172                 }
13173             }
13174         }
13175         return ret;
13176 #endif
13177 
13178     default:
13179         qemu_log_mask(LOG_UNIMP, "Unsupported syscall: %d\n", num);
13180         return -TARGET_ENOSYS;
13181     }
13182     return ret;
13183 }
13184 
13185 abi_long do_syscall(void *cpu_env, int num, abi_long arg1,
13186                     abi_long arg2, abi_long arg3, abi_long arg4,
13187                     abi_long arg5, abi_long arg6, abi_long arg7,
13188                     abi_long arg8)
13189 {
13190     CPUState *cpu = env_cpu(cpu_env);
13191     abi_long ret;
13192 
13193 #ifdef DEBUG_ERESTARTSYS
13194     /* Debug-only code for exercising the syscall-restart code paths
13195      * in the per-architecture cpu main loops: restart every syscall
13196      * the guest makes once before letting it through.
13197      */
13198     {
13199         static bool flag;
13200         flag = !flag;
13201         if (flag) {
13202             return -TARGET_ERESTARTSYS;
13203         }
13204     }
13205 #endif
13206 
13207     record_syscall_start(cpu, num, arg1,
13208                          arg2, arg3, arg4, arg5, arg6, arg7, arg8);
13209 
13210     if (unlikely(qemu_loglevel_mask(LOG_STRACE))) {
13211         print_syscall(cpu_env, num, arg1, arg2, arg3, arg4, arg5, arg6);
13212     }
13213 
13214     ret = do_syscall1(cpu_env, num, arg1, arg2, arg3, arg4,
13215                       arg5, arg6, arg7, arg8);
13216 
13217     if (unlikely(qemu_loglevel_mask(LOG_STRACE))) {
13218         print_syscall_ret(cpu_env, num, ret, arg1, arg2,
13219                           arg3, arg4, arg5, arg6);
13220     }
13221 
13222     record_syscall_return(cpu, num, ret);
13223     return ret;
13224 }
13225