xref: /openbmc/qemu/linux-user/syscall.c (revision 228168cb)
1 /*
2  *  Linux syscalls
3  *
4  *  Copyright (c) 2003 Fabrice Bellard
5  *
6  *  This program is free software; you can redistribute it and/or modify
7  *  it under the terms of the GNU General Public License as published by
8  *  the Free Software Foundation; either version 2 of the License, or
9  *  (at your option) any later version.
10  *
11  *  This program is distributed in the hope that it will be useful,
12  *  but WITHOUT ANY WARRANTY; without even the implied warranty of
13  *  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
14  *  GNU General Public License for more details.
15  *
16  *  You should have received a copy of the GNU General Public License
17  *  along with this program; if not, see <http://www.gnu.org/licenses/>.
18  */
19 #define _ATFILE_SOURCE
20 #include "qemu/osdep.h"
21 #include "qemu/cutils.h"
22 #include "qemu/path.h"
23 #include "qemu/memfd.h"
24 #include "qemu/queue.h"
25 #include <elf.h>
26 #include <endian.h>
27 #include <grp.h>
28 #include <sys/ipc.h>
29 #include <sys/msg.h>
30 #include <sys/wait.h>
31 #include <sys/mount.h>
32 #include <sys/file.h>
33 #include <sys/fsuid.h>
34 #include <sys/personality.h>
35 #include <sys/prctl.h>
36 #include <sys/resource.h>
37 #include <sys/swap.h>
38 #include <linux/capability.h>
39 #include <sched.h>
40 #include <sys/timex.h>
41 #include <sys/socket.h>
42 #include <linux/sockios.h>
43 #include <sys/un.h>
44 #include <sys/uio.h>
45 #include <poll.h>
46 #include <sys/times.h>
47 #include <sys/shm.h>
48 #include <sys/sem.h>
49 #include <sys/statfs.h>
50 #include <utime.h>
51 #include <sys/sysinfo.h>
52 #include <sys/signalfd.h>
53 //#include <sys/user.h>
54 #include <netinet/in.h>
55 #include <netinet/ip.h>
56 #include <netinet/tcp.h>
57 #include <netinet/udp.h>
58 #include <linux/wireless.h>
59 #include <linux/icmp.h>
60 #include <linux/icmpv6.h>
61 #include <linux/if_tun.h>
62 #include <linux/in6.h>
63 #include <linux/errqueue.h>
64 #include <linux/random.h>
65 #ifdef CONFIG_TIMERFD
66 #include <sys/timerfd.h>
67 #endif
68 #ifdef CONFIG_EVENTFD
69 #include <sys/eventfd.h>
70 #endif
71 #ifdef CONFIG_EPOLL
72 #include <sys/epoll.h>
73 #endif
74 #ifdef CONFIG_ATTR
75 #include "qemu/xattr.h"
76 #endif
77 #ifdef CONFIG_SENDFILE
78 #include <sys/sendfile.h>
79 #endif
80 #ifdef HAVE_SYS_KCOV_H
81 #include <sys/kcov.h>
82 #endif
83 
84 #define termios host_termios
85 #define winsize host_winsize
86 #define termio host_termio
87 #define sgttyb host_sgttyb /* same as target */
88 #define tchars host_tchars /* same as target */
89 #define ltchars host_ltchars /* same as target */
90 
91 #include <linux/termios.h>
92 #include <linux/unistd.h>
93 #include <linux/cdrom.h>
94 #include <linux/hdreg.h>
95 #include <linux/soundcard.h>
96 #include <linux/kd.h>
97 #include <linux/mtio.h>
98 #include <linux/fs.h>
99 #include <linux/fd.h>
100 #if defined(CONFIG_FIEMAP)
101 #include <linux/fiemap.h>
102 #endif
103 #include <linux/fb.h>
104 #if defined(CONFIG_USBFS)
105 #include <linux/usbdevice_fs.h>
106 #include <linux/usb/ch9.h>
107 #endif
108 #include <linux/vt.h>
109 #include <linux/dm-ioctl.h>
110 #include <linux/reboot.h>
111 #include <linux/route.h>
112 #include <linux/filter.h>
113 #include <linux/blkpg.h>
114 #include <netpacket/packet.h>
115 #include <linux/netlink.h>
116 #include <linux/if_alg.h>
117 #include <linux/rtc.h>
118 #include <sound/asound.h>
119 #ifdef HAVE_BTRFS_H
120 #include <linux/btrfs.h>
121 #endif
122 #ifdef HAVE_DRM_H
123 #include <libdrm/drm.h>
124 #include <libdrm/i915_drm.h>
125 #endif
126 #include "linux_loop.h"
127 #include "uname.h"
128 
129 #include "qemu.h"
130 #include "qemu/guest-random.h"
131 #include "qemu/selfmap.h"
132 #include "user/syscall-trace.h"
133 #include "qapi/error.h"
134 #include "fd-trans.h"
135 #include "tcg/tcg.h"
136 
137 #ifndef CLONE_IO
138 #define CLONE_IO                0x80000000      /* Clone io context */
139 #endif
140 
141 /* We can't directly call the host clone syscall, because this will
142  * badly confuse libc (breaking mutexes, for example). So we must
143  * divide clone flags into:
144  *  * flag combinations that look like pthread_create()
145  *  * flag combinations that look like fork()
146  *  * flags we can implement within QEMU itself
147  *  * flags we can't support and will return an error for
148  */
149 /* For thread creation, all these flags must be present; for
150  * fork, none must be present.
151  */
152 #define CLONE_THREAD_FLAGS                              \
153     (CLONE_VM | CLONE_FS | CLONE_FILES |                \
154      CLONE_SIGHAND | CLONE_THREAD | CLONE_SYSVSEM)
155 
156 /* These flags are ignored:
157  * CLONE_DETACHED is now ignored by the kernel;
158  * CLONE_IO is just an optimisation hint to the I/O scheduler
159  */
160 #define CLONE_IGNORED_FLAGS                     \
161     (CLONE_DETACHED | CLONE_IO)
162 
163 /* Flags for fork which we can implement within QEMU itself */
164 #define CLONE_OPTIONAL_FORK_FLAGS               \
165     (CLONE_SETTLS | CLONE_PARENT_SETTID |       \
166      CLONE_CHILD_CLEARTID | CLONE_CHILD_SETTID)
167 
168 /* Flags for thread creation which we can implement within QEMU itself */
169 #define CLONE_OPTIONAL_THREAD_FLAGS                             \
170     (CLONE_SETTLS | CLONE_PARENT_SETTID |                       \
171      CLONE_CHILD_CLEARTID | CLONE_CHILD_SETTID | CLONE_PARENT)
172 
173 #define CLONE_INVALID_FORK_FLAGS                                        \
174     (~(CSIGNAL | CLONE_OPTIONAL_FORK_FLAGS | CLONE_IGNORED_FLAGS))
175 
176 #define CLONE_INVALID_THREAD_FLAGS                                      \
177     (~(CSIGNAL | CLONE_THREAD_FLAGS | CLONE_OPTIONAL_THREAD_FLAGS |     \
178        CLONE_IGNORED_FLAGS))
179 
180 /* CLONE_VFORK is special cased early in do_fork(). The other flag bits
181  * have almost all been allocated. We cannot support any of
182  * CLONE_NEWNS, CLONE_NEWCGROUP, CLONE_NEWUTS, CLONE_NEWIPC,
183  * CLONE_NEWUSER, CLONE_NEWPID, CLONE_NEWNET, CLONE_PTRACE, CLONE_UNTRACED.
184  * The checks against the invalid thread masks above will catch these.
185  * (The one remaining unallocated bit is 0x1000 which used to be CLONE_PID.)
186  */
187 
188 /* Define DEBUG_ERESTARTSYS to force every syscall to be restarted
189  * once. This exercises the codepaths for restart.
190  */
191 //#define DEBUG_ERESTARTSYS
192 
193 //#include <linux/msdos_fs.h>
194 #define	VFAT_IOCTL_READDIR_BOTH		_IOR('r', 1, struct linux_dirent [2])
195 #define	VFAT_IOCTL_READDIR_SHORT	_IOR('r', 2, struct linux_dirent [2])
196 
197 #undef _syscall0
198 #undef _syscall1
199 #undef _syscall2
200 #undef _syscall3
201 #undef _syscall4
202 #undef _syscall5
203 #undef _syscall6
204 
205 #define _syscall0(type,name)		\
206 static type name (void)			\
207 {					\
208 	return syscall(__NR_##name);	\
209 }
210 
211 #define _syscall1(type,name,type1,arg1)		\
212 static type name (type1 arg1)			\
213 {						\
214 	return syscall(__NR_##name, arg1);	\
215 }
216 
217 #define _syscall2(type,name,type1,arg1,type2,arg2)	\
218 static type name (type1 arg1,type2 arg2)		\
219 {							\
220 	return syscall(__NR_##name, arg1, arg2);	\
221 }
222 
223 #define _syscall3(type,name,type1,arg1,type2,arg2,type3,arg3)	\
224 static type name (type1 arg1,type2 arg2,type3 arg3)		\
225 {								\
226 	return syscall(__NR_##name, arg1, arg2, arg3);		\
227 }
228 
229 #define _syscall4(type,name,type1,arg1,type2,arg2,type3,arg3,type4,arg4)	\
230 static type name (type1 arg1,type2 arg2,type3 arg3,type4 arg4)			\
231 {										\
232 	return syscall(__NR_##name, arg1, arg2, arg3, arg4);			\
233 }
234 
235 #define _syscall5(type,name,type1,arg1,type2,arg2,type3,arg3,type4,arg4,	\
236 		  type5,arg5)							\
237 static type name (type1 arg1,type2 arg2,type3 arg3,type4 arg4,type5 arg5)	\
238 {										\
239 	return syscall(__NR_##name, arg1, arg2, arg3, arg4, arg5);		\
240 }
241 
242 
243 #define _syscall6(type,name,type1,arg1,type2,arg2,type3,arg3,type4,arg4,	\
244 		  type5,arg5,type6,arg6)					\
245 static type name (type1 arg1,type2 arg2,type3 arg3,type4 arg4,type5 arg5,	\
246                   type6 arg6)							\
247 {										\
248 	return syscall(__NR_##name, arg1, arg2, arg3, arg4, arg5, arg6);	\
249 }
250 
251 
252 #define __NR_sys_uname __NR_uname
253 #define __NR_sys_getcwd1 __NR_getcwd
254 #define __NR_sys_getdents __NR_getdents
255 #define __NR_sys_getdents64 __NR_getdents64
256 #define __NR_sys_getpriority __NR_getpriority
257 #define __NR_sys_rt_sigqueueinfo __NR_rt_sigqueueinfo
258 #define __NR_sys_rt_tgsigqueueinfo __NR_rt_tgsigqueueinfo
259 #define __NR_sys_syslog __NR_syslog
260 #if defined(__NR_futex)
261 # define __NR_sys_futex __NR_futex
262 #endif
263 #if defined(__NR_futex_time64)
264 # define __NR_sys_futex_time64 __NR_futex_time64
265 #endif
266 #define __NR_sys_inotify_init __NR_inotify_init
267 #define __NR_sys_inotify_add_watch __NR_inotify_add_watch
268 #define __NR_sys_inotify_rm_watch __NR_inotify_rm_watch
269 #define __NR_sys_statx __NR_statx
270 
271 #if defined(__alpha__) || defined(__x86_64__) || defined(__s390x__)
272 #define __NR__llseek __NR_lseek
273 #endif
274 
275 /* Newer kernel ports have llseek() instead of _llseek() */
276 #if defined(TARGET_NR_llseek) && !defined(TARGET_NR__llseek)
277 #define TARGET_NR__llseek TARGET_NR_llseek
278 #endif
279 
280 /* some platforms need to mask more bits than just TARGET_O_NONBLOCK */
281 #ifndef TARGET_O_NONBLOCK_MASK
282 #define TARGET_O_NONBLOCK_MASK TARGET_O_NONBLOCK
283 #endif
284 
285 #define __NR_sys_gettid __NR_gettid
286 _syscall0(int, sys_gettid)
287 
288 /* For the 64-bit guest on 32-bit host case we must emulate
289  * getdents using getdents64, because otherwise the host
290  * might hand us back more dirent records than we can fit
291  * into the guest buffer after structure format conversion.
292  * Otherwise we emulate getdents with getdents if the host has it.
293  */
294 #if defined(__NR_getdents) && HOST_LONG_BITS >= TARGET_ABI_BITS
295 #define EMULATE_GETDENTS_WITH_GETDENTS
296 #endif
297 
298 #if defined(TARGET_NR_getdents) && defined(EMULATE_GETDENTS_WITH_GETDENTS)
299 _syscall3(int, sys_getdents, uint, fd, struct linux_dirent *, dirp, uint, count);
300 #endif
301 #if (defined(TARGET_NR_getdents) && \
302       !defined(EMULATE_GETDENTS_WITH_GETDENTS)) || \
303     (defined(TARGET_NR_getdents64) && defined(__NR_getdents64))
304 _syscall3(int, sys_getdents64, uint, fd, struct linux_dirent64 *, dirp, uint, count);
305 #endif
306 #if defined(TARGET_NR__llseek) && defined(__NR_llseek)
307 _syscall5(int, _llseek,  uint,  fd, ulong, hi, ulong, lo,
308           loff_t *, res, uint, wh);
309 #endif
310 _syscall3(int, sys_rt_sigqueueinfo, pid_t, pid, int, sig, siginfo_t *, uinfo)
311 _syscall4(int, sys_rt_tgsigqueueinfo, pid_t, pid, pid_t, tid, int, sig,
312           siginfo_t *, uinfo)
313 _syscall3(int,sys_syslog,int,type,char*,bufp,int,len)
314 #ifdef __NR_exit_group
315 _syscall1(int,exit_group,int,error_code)
316 #endif
317 #if defined(TARGET_NR_set_tid_address) && defined(__NR_set_tid_address)
318 _syscall1(int,set_tid_address,int *,tidptr)
319 #endif
320 #if defined(__NR_futex)
321 _syscall6(int,sys_futex,int *,uaddr,int,op,int,val,
322           const struct timespec *,timeout,int *,uaddr2,int,val3)
323 #endif
324 #if defined(__NR_futex_time64)
325 _syscall6(int,sys_futex_time64,int *,uaddr,int,op,int,val,
326           const struct timespec *,timeout,int *,uaddr2,int,val3)
327 #endif
328 #define __NR_sys_sched_getaffinity __NR_sched_getaffinity
329 _syscall3(int, sys_sched_getaffinity, pid_t, pid, unsigned int, len,
330           unsigned long *, user_mask_ptr);
331 #define __NR_sys_sched_setaffinity __NR_sched_setaffinity
332 _syscall3(int, sys_sched_setaffinity, pid_t, pid, unsigned int, len,
333           unsigned long *, user_mask_ptr);
334 #define __NR_sys_getcpu __NR_getcpu
335 _syscall3(int, sys_getcpu, unsigned *, cpu, unsigned *, node, void *, tcache);
336 _syscall4(int, reboot, int, magic1, int, magic2, unsigned int, cmd,
337           void *, arg);
338 _syscall2(int, capget, struct __user_cap_header_struct *, header,
339           struct __user_cap_data_struct *, data);
340 _syscall2(int, capset, struct __user_cap_header_struct *, header,
341           struct __user_cap_data_struct *, data);
342 #if defined(TARGET_NR_ioprio_get) && defined(__NR_ioprio_get)
343 _syscall2(int, ioprio_get, int, which, int, who)
344 #endif
345 #if defined(TARGET_NR_ioprio_set) && defined(__NR_ioprio_set)
346 _syscall3(int, ioprio_set, int, which, int, who, int, ioprio)
347 #endif
348 #if defined(TARGET_NR_getrandom) && defined(__NR_getrandom)
349 _syscall3(int, getrandom, void *, buf, size_t, buflen, unsigned int, flags)
350 #endif
351 
352 #if defined(TARGET_NR_kcmp) && defined(__NR_kcmp)
353 _syscall5(int, kcmp, pid_t, pid1, pid_t, pid2, int, type,
354           unsigned long, idx1, unsigned long, idx2)
355 #endif
356 
357 /*
358  * It is assumed that struct statx is architecture independent.
359  */
360 #if defined(TARGET_NR_statx) && defined(__NR_statx)
361 _syscall5(int, sys_statx, int, dirfd, const char *, pathname, int, flags,
362           unsigned int, mask, struct target_statx *, statxbuf)
363 #endif
364 #if defined(TARGET_NR_membarrier) && defined(__NR_membarrier)
365 _syscall2(int, membarrier, int, cmd, int, flags)
366 #endif
367 
368 static const bitmask_transtbl fcntl_flags_tbl[] = {
369   { TARGET_O_ACCMODE,   TARGET_O_WRONLY,    O_ACCMODE,   O_WRONLY,    },
370   { TARGET_O_ACCMODE,   TARGET_O_RDWR,      O_ACCMODE,   O_RDWR,      },
371   { TARGET_O_CREAT,     TARGET_O_CREAT,     O_CREAT,     O_CREAT,     },
372   { TARGET_O_EXCL,      TARGET_O_EXCL,      O_EXCL,      O_EXCL,      },
373   { TARGET_O_NOCTTY,    TARGET_O_NOCTTY,    O_NOCTTY,    O_NOCTTY,    },
374   { TARGET_O_TRUNC,     TARGET_O_TRUNC,     O_TRUNC,     O_TRUNC,     },
375   { TARGET_O_APPEND,    TARGET_O_APPEND,    O_APPEND,    O_APPEND,    },
376   { TARGET_O_NONBLOCK,  TARGET_O_NONBLOCK,  O_NONBLOCK,  O_NONBLOCK,  },
377   { TARGET_O_SYNC,      TARGET_O_DSYNC,     O_SYNC,      O_DSYNC,     },
378   { TARGET_O_SYNC,      TARGET_O_SYNC,      O_SYNC,      O_SYNC,      },
379   { TARGET_FASYNC,      TARGET_FASYNC,      FASYNC,      FASYNC,      },
380   { TARGET_O_DIRECTORY, TARGET_O_DIRECTORY, O_DIRECTORY, O_DIRECTORY, },
381   { TARGET_O_NOFOLLOW,  TARGET_O_NOFOLLOW,  O_NOFOLLOW,  O_NOFOLLOW,  },
382 #if defined(O_DIRECT)
383   { TARGET_O_DIRECT,    TARGET_O_DIRECT,    O_DIRECT,    O_DIRECT,    },
384 #endif
385 #if defined(O_NOATIME)
386   { TARGET_O_NOATIME,   TARGET_O_NOATIME,   O_NOATIME,   O_NOATIME    },
387 #endif
388 #if defined(O_CLOEXEC)
389   { TARGET_O_CLOEXEC,   TARGET_O_CLOEXEC,   O_CLOEXEC,   O_CLOEXEC    },
390 #endif
391 #if defined(O_PATH)
392   { TARGET_O_PATH,      TARGET_O_PATH,      O_PATH,      O_PATH       },
393 #endif
394 #if defined(O_TMPFILE)
395   { TARGET_O_TMPFILE,   TARGET_O_TMPFILE,   O_TMPFILE,   O_TMPFILE    },
396 #endif
397   /* Don't terminate the list prematurely on 64-bit host+guest.  */
398 #if TARGET_O_LARGEFILE != 0 || O_LARGEFILE != 0
399   { TARGET_O_LARGEFILE, TARGET_O_LARGEFILE, O_LARGEFILE, O_LARGEFILE, },
400 #endif
401   { 0, 0, 0, 0 }
402 };
403 
404 _syscall2(int, sys_getcwd1, char *, buf, size_t, size)
405 
406 #if defined(TARGET_NR_utimensat) || defined(TARGET_NR_utimensat_time64)
407 #if defined(__NR_utimensat)
408 #define __NR_sys_utimensat __NR_utimensat
409 _syscall4(int,sys_utimensat,int,dirfd,const char *,pathname,
410           const struct timespec *,tsp,int,flags)
411 #else
412 static int sys_utimensat(int dirfd, const char *pathname,
413                          const struct timespec times[2], int flags)
414 {
415     errno = ENOSYS;
416     return -1;
417 }
418 #endif
419 #endif /* TARGET_NR_utimensat */
420 
421 #ifdef TARGET_NR_renameat2
422 #if defined(__NR_renameat2)
423 #define __NR_sys_renameat2 __NR_renameat2
424 _syscall5(int, sys_renameat2, int, oldfd, const char *, old, int, newfd,
425           const char *, new, unsigned int, flags)
426 #else
427 static int sys_renameat2(int oldfd, const char *old,
428                          int newfd, const char *new, int flags)
429 {
430     if (flags == 0) {
431         return renameat(oldfd, old, newfd, new);
432     }
433     errno = ENOSYS;
434     return -1;
435 }
436 #endif
437 #endif /* TARGET_NR_renameat2 */
438 
439 #ifdef CONFIG_INOTIFY
440 #include <sys/inotify.h>
441 
442 #if defined(TARGET_NR_inotify_init) && defined(__NR_inotify_init)
443 static int sys_inotify_init(void)
444 {
445   return (inotify_init());
446 }
447 #endif
448 #if defined(TARGET_NR_inotify_add_watch) && defined(__NR_inotify_add_watch)
449 static int sys_inotify_add_watch(int fd,const char *pathname, int32_t mask)
450 {
451   return (inotify_add_watch(fd, pathname, mask));
452 }
453 #endif
454 #if defined(TARGET_NR_inotify_rm_watch) && defined(__NR_inotify_rm_watch)
455 static int sys_inotify_rm_watch(int fd, int32_t wd)
456 {
457   return (inotify_rm_watch(fd, wd));
458 }
459 #endif
460 #ifdef CONFIG_INOTIFY1
461 #if defined(TARGET_NR_inotify_init1) && defined(__NR_inotify_init1)
462 static int sys_inotify_init1(int flags)
463 {
464   return (inotify_init1(flags));
465 }
466 #endif
467 #endif
468 #else
469 /* Userspace can usually survive runtime without inotify */
470 #undef TARGET_NR_inotify_init
471 #undef TARGET_NR_inotify_init1
472 #undef TARGET_NR_inotify_add_watch
473 #undef TARGET_NR_inotify_rm_watch
474 #endif /* CONFIG_INOTIFY  */
475 
476 #if defined(TARGET_NR_prlimit64)
477 #ifndef __NR_prlimit64
478 # define __NR_prlimit64 -1
479 #endif
480 #define __NR_sys_prlimit64 __NR_prlimit64
481 /* The glibc rlimit structure may not be that used by the underlying syscall */
482 struct host_rlimit64 {
483     uint64_t rlim_cur;
484     uint64_t rlim_max;
485 };
486 _syscall4(int, sys_prlimit64, pid_t, pid, int, resource,
487           const struct host_rlimit64 *, new_limit,
488           struct host_rlimit64 *, old_limit)
489 #endif
490 
491 
492 #if defined(TARGET_NR_timer_create)
493 /* Maximum of 32 active POSIX timers allowed at any one time. */
494 static timer_t g_posix_timers[32] = { 0, } ;
495 
496 static inline int next_free_host_timer(void)
497 {
498     int k ;
499     /* FIXME: Does finding the next free slot require a lock? */
500     for (k = 0; k < ARRAY_SIZE(g_posix_timers); k++) {
501         if (g_posix_timers[k] == 0) {
502             g_posix_timers[k] = (timer_t) 1;
503             return k;
504         }
505     }
506     return -1;
507 }
508 #endif
509 
510 #define ERRNO_TABLE_SIZE 1200
511 
512 /* target_to_host_errno_table[] is initialized from
513  * host_to_target_errno_table[] in syscall_init(). */
514 static uint16_t target_to_host_errno_table[ERRNO_TABLE_SIZE] = {
515 };
516 
517 /*
518  * This list is the union of errno values overridden in asm-<arch>/errno.h
519  * minus the errnos that are not actually generic to all archs.
520  */
521 static uint16_t host_to_target_errno_table[ERRNO_TABLE_SIZE] = {
522     [EAGAIN]		= TARGET_EAGAIN,
523     [EIDRM]		= TARGET_EIDRM,
524     [ECHRNG]		= TARGET_ECHRNG,
525     [EL2NSYNC]		= TARGET_EL2NSYNC,
526     [EL3HLT]		= TARGET_EL3HLT,
527     [EL3RST]		= TARGET_EL3RST,
528     [ELNRNG]		= TARGET_ELNRNG,
529     [EUNATCH]		= TARGET_EUNATCH,
530     [ENOCSI]		= TARGET_ENOCSI,
531     [EL2HLT]		= TARGET_EL2HLT,
532     [EDEADLK]		= TARGET_EDEADLK,
533     [ENOLCK]		= TARGET_ENOLCK,
534     [EBADE]		= TARGET_EBADE,
535     [EBADR]		= TARGET_EBADR,
536     [EXFULL]		= TARGET_EXFULL,
537     [ENOANO]		= TARGET_ENOANO,
538     [EBADRQC]		= TARGET_EBADRQC,
539     [EBADSLT]		= TARGET_EBADSLT,
540     [EBFONT]		= TARGET_EBFONT,
541     [ENOSTR]		= TARGET_ENOSTR,
542     [ENODATA]		= TARGET_ENODATA,
543     [ETIME]		= TARGET_ETIME,
544     [ENOSR]		= TARGET_ENOSR,
545     [ENONET]		= TARGET_ENONET,
546     [ENOPKG]		= TARGET_ENOPKG,
547     [EREMOTE]		= TARGET_EREMOTE,
548     [ENOLINK]		= TARGET_ENOLINK,
549     [EADV]		= TARGET_EADV,
550     [ESRMNT]		= TARGET_ESRMNT,
551     [ECOMM]		= TARGET_ECOMM,
552     [EPROTO]		= TARGET_EPROTO,
553     [EDOTDOT]		= TARGET_EDOTDOT,
554     [EMULTIHOP]		= TARGET_EMULTIHOP,
555     [EBADMSG]		= TARGET_EBADMSG,
556     [ENAMETOOLONG]	= TARGET_ENAMETOOLONG,
557     [EOVERFLOW]		= TARGET_EOVERFLOW,
558     [ENOTUNIQ]		= TARGET_ENOTUNIQ,
559     [EBADFD]		= TARGET_EBADFD,
560     [EREMCHG]		= TARGET_EREMCHG,
561     [ELIBACC]		= TARGET_ELIBACC,
562     [ELIBBAD]		= TARGET_ELIBBAD,
563     [ELIBSCN]		= TARGET_ELIBSCN,
564     [ELIBMAX]		= TARGET_ELIBMAX,
565     [ELIBEXEC]		= TARGET_ELIBEXEC,
566     [EILSEQ]		= TARGET_EILSEQ,
567     [ENOSYS]		= TARGET_ENOSYS,
568     [ELOOP]		= TARGET_ELOOP,
569     [ERESTART]		= TARGET_ERESTART,
570     [ESTRPIPE]		= TARGET_ESTRPIPE,
571     [ENOTEMPTY]		= TARGET_ENOTEMPTY,
572     [EUSERS]		= TARGET_EUSERS,
573     [ENOTSOCK]		= TARGET_ENOTSOCK,
574     [EDESTADDRREQ]	= TARGET_EDESTADDRREQ,
575     [EMSGSIZE]		= TARGET_EMSGSIZE,
576     [EPROTOTYPE]	= TARGET_EPROTOTYPE,
577     [ENOPROTOOPT]	= TARGET_ENOPROTOOPT,
578     [EPROTONOSUPPORT]	= TARGET_EPROTONOSUPPORT,
579     [ESOCKTNOSUPPORT]	= TARGET_ESOCKTNOSUPPORT,
580     [EOPNOTSUPP]	= TARGET_EOPNOTSUPP,
581     [EPFNOSUPPORT]	= TARGET_EPFNOSUPPORT,
582     [EAFNOSUPPORT]	= TARGET_EAFNOSUPPORT,
583     [EADDRINUSE]	= TARGET_EADDRINUSE,
584     [EADDRNOTAVAIL]	= TARGET_EADDRNOTAVAIL,
585     [ENETDOWN]		= TARGET_ENETDOWN,
586     [ENETUNREACH]	= TARGET_ENETUNREACH,
587     [ENETRESET]		= TARGET_ENETRESET,
588     [ECONNABORTED]	= TARGET_ECONNABORTED,
589     [ECONNRESET]	= TARGET_ECONNRESET,
590     [ENOBUFS]		= TARGET_ENOBUFS,
591     [EISCONN]		= TARGET_EISCONN,
592     [ENOTCONN]		= TARGET_ENOTCONN,
593     [EUCLEAN]		= TARGET_EUCLEAN,
594     [ENOTNAM]		= TARGET_ENOTNAM,
595     [ENAVAIL]		= TARGET_ENAVAIL,
596     [EISNAM]		= TARGET_EISNAM,
597     [EREMOTEIO]		= TARGET_EREMOTEIO,
598     [EDQUOT]            = TARGET_EDQUOT,
599     [ESHUTDOWN]		= TARGET_ESHUTDOWN,
600     [ETOOMANYREFS]	= TARGET_ETOOMANYREFS,
601     [ETIMEDOUT]		= TARGET_ETIMEDOUT,
602     [ECONNREFUSED]	= TARGET_ECONNREFUSED,
603     [EHOSTDOWN]		= TARGET_EHOSTDOWN,
604     [EHOSTUNREACH]	= TARGET_EHOSTUNREACH,
605     [EALREADY]		= TARGET_EALREADY,
606     [EINPROGRESS]	= TARGET_EINPROGRESS,
607     [ESTALE]		= TARGET_ESTALE,
608     [ECANCELED]		= TARGET_ECANCELED,
609     [ENOMEDIUM]		= TARGET_ENOMEDIUM,
610     [EMEDIUMTYPE]	= TARGET_EMEDIUMTYPE,
611 #ifdef ENOKEY
612     [ENOKEY]		= TARGET_ENOKEY,
613 #endif
614 #ifdef EKEYEXPIRED
615     [EKEYEXPIRED]	= TARGET_EKEYEXPIRED,
616 #endif
617 #ifdef EKEYREVOKED
618     [EKEYREVOKED]	= TARGET_EKEYREVOKED,
619 #endif
620 #ifdef EKEYREJECTED
621     [EKEYREJECTED]	= TARGET_EKEYREJECTED,
622 #endif
623 #ifdef EOWNERDEAD
624     [EOWNERDEAD]	= TARGET_EOWNERDEAD,
625 #endif
626 #ifdef ENOTRECOVERABLE
627     [ENOTRECOVERABLE]	= TARGET_ENOTRECOVERABLE,
628 #endif
629 #ifdef ENOMSG
630     [ENOMSG]            = TARGET_ENOMSG,
631 #endif
632 #ifdef ERKFILL
633     [ERFKILL]           = TARGET_ERFKILL,
634 #endif
635 #ifdef EHWPOISON
636     [EHWPOISON]         = TARGET_EHWPOISON,
637 #endif
638 };
639 
640 static inline int host_to_target_errno(int err)
641 {
642     if (err >= 0 && err < ERRNO_TABLE_SIZE &&
643         host_to_target_errno_table[err]) {
644         return host_to_target_errno_table[err];
645     }
646     return err;
647 }
648 
649 static inline int target_to_host_errno(int err)
650 {
651     if (err >= 0 && err < ERRNO_TABLE_SIZE &&
652         target_to_host_errno_table[err]) {
653         return target_to_host_errno_table[err];
654     }
655     return err;
656 }
657 
658 static inline abi_long get_errno(abi_long ret)
659 {
660     if (ret == -1)
661         return -host_to_target_errno(errno);
662     else
663         return ret;
664 }
665 
666 const char *target_strerror(int err)
667 {
668     if (err == TARGET_ERESTARTSYS) {
669         return "To be restarted";
670     }
671     if (err == TARGET_QEMU_ESIGRETURN) {
672         return "Successful exit from sigreturn";
673     }
674 
675     if ((err >= ERRNO_TABLE_SIZE) || (err < 0)) {
676         return NULL;
677     }
678     return strerror(target_to_host_errno(err));
679 }
680 
681 #define safe_syscall0(type, name) \
682 static type safe_##name(void) \
683 { \
684     return safe_syscall(__NR_##name); \
685 }
686 
687 #define safe_syscall1(type, name, type1, arg1) \
688 static type safe_##name(type1 arg1) \
689 { \
690     return safe_syscall(__NR_##name, arg1); \
691 }
692 
693 #define safe_syscall2(type, name, type1, arg1, type2, arg2) \
694 static type safe_##name(type1 arg1, type2 arg2) \
695 { \
696     return safe_syscall(__NR_##name, arg1, arg2); \
697 }
698 
699 #define safe_syscall3(type, name, type1, arg1, type2, arg2, type3, arg3) \
700 static type safe_##name(type1 arg1, type2 arg2, type3 arg3) \
701 { \
702     return safe_syscall(__NR_##name, arg1, arg2, arg3); \
703 }
704 
705 #define safe_syscall4(type, name, type1, arg1, type2, arg2, type3, arg3, \
706     type4, arg4) \
707 static type safe_##name(type1 arg1, type2 arg2, type3 arg3, type4 arg4) \
708 { \
709     return safe_syscall(__NR_##name, arg1, arg2, arg3, arg4); \
710 }
711 
712 #define safe_syscall5(type, name, type1, arg1, type2, arg2, type3, arg3, \
713     type4, arg4, type5, arg5) \
714 static type safe_##name(type1 arg1, type2 arg2, type3 arg3, type4 arg4, \
715     type5 arg5) \
716 { \
717     return safe_syscall(__NR_##name, arg1, arg2, arg3, arg4, arg5); \
718 }
719 
720 #define safe_syscall6(type, name, type1, arg1, type2, arg2, type3, arg3, \
721     type4, arg4, type5, arg5, type6, arg6) \
722 static type safe_##name(type1 arg1, type2 arg2, type3 arg3, type4 arg4, \
723     type5 arg5, type6 arg6) \
724 { \
725     return safe_syscall(__NR_##name, arg1, arg2, arg3, arg4, arg5, arg6); \
726 }
727 
728 safe_syscall3(ssize_t, read, int, fd, void *, buff, size_t, count)
729 safe_syscall3(ssize_t, write, int, fd, const void *, buff, size_t, count)
730 safe_syscall4(int, openat, int, dirfd, const char *, pathname, \
731               int, flags, mode_t, mode)
732 #if defined(TARGET_NR_wait4) || defined(TARGET_NR_waitpid)
733 safe_syscall4(pid_t, wait4, pid_t, pid, int *, status, int, options, \
734               struct rusage *, rusage)
735 #endif
736 safe_syscall5(int, waitid, idtype_t, idtype, id_t, id, siginfo_t *, infop, \
737               int, options, struct rusage *, rusage)
738 safe_syscall3(int, execve, const char *, filename, char **, argv, char **, envp)
739 #if defined(TARGET_NR_select) || defined(TARGET_NR__newselect) || \
740     defined(TARGET_NR_pselect6) || defined(TARGET_NR_pselect6_time64)
741 safe_syscall6(int, pselect6, int, nfds, fd_set *, readfds, fd_set *, writefds, \
742               fd_set *, exceptfds, struct timespec *, timeout, void *, sig)
743 #endif
744 #if defined(TARGET_NR_ppoll) || defined(TARGET_NR_ppoll_time64)
745 safe_syscall5(int, ppoll, struct pollfd *, ufds, unsigned int, nfds,
746               struct timespec *, tsp, const sigset_t *, sigmask,
747               size_t, sigsetsize)
748 #endif
749 safe_syscall6(int, epoll_pwait, int, epfd, struct epoll_event *, events,
750               int, maxevents, int, timeout, const sigset_t *, sigmask,
751               size_t, sigsetsize)
752 #if defined(__NR_futex)
753 safe_syscall6(int,futex,int *,uaddr,int,op,int,val, \
754               const struct timespec *,timeout,int *,uaddr2,int,val3)
755 #endif
756 #if defined(__NR_futex_time64)
757 safe_syscall6(int,futex_time64,int *,uaddr,int,op,int,val, \
758               const struct timespec *,timeout,int *,uaddr2,int,val3)
759 #endif
760 safe_syscall2(int, rt_sigsuspend, sigset_t *, newset, size_t, sigsetsize)
761 safe_syscall2(int, kill, pid_t, pid, int, sig)
762 safe_syscall2(int, tkill, int, tid, int, sig)
763 safe_syscall3(int, tgkill, int, tgid, int, pid, int, sig)
764 safe_syscall3(ssize_t, readv, int, fd, const struct iovec *, iov, int, iovcnt)
765 safe_syscall3(ssize_t, writev, int, fd, const struct iovec *, iov, int, iovcnt)
766 safe_syscall5(ssize_t, preadv, int, fd, const struct iovec *, iov, int, iovcnt,
767               unsigned long, pos_l, unsigned long, pos_h)
768 safe_syscall5(ssize_t, pwritev, int, fd, const struct iovec *, iov, int, iovcnt,
769               unsigned long, pos_l, unsigned long, pos_h)
770 safe_syscall3(int, connect, int, fd, const struct sockaddr *, addr,
771               socklen_t, addrlen)
772 safe_syscall6(ssize_t, sendto, int, fd, const void *, buf, size_t, len,
773               int, flags, const struct sockaddr *, addr, socklen_t, addrlen)
774 safe_syscall6(ssize_t, recvfrom, int, fd, void *, buf, size_t, len,
775               int, flags, struct sockaddr *, addr, socklen_t *, addrlen)
776 safe_syscall3(ssize_t, sendmsg, int, fd, const struct msghdr *, msg, int, flags)
777 safe_syscall3(ssize_t, recvmsg, int, fd, struct msghdr *, msg, int, flags)
778 safe_syscall2(int, flock, int, fd, int, operation)
779 #if defined(TARGET_NR_rt_sigtimedwait) || defined(TARGET_NR_rt_sigtimedwait_time64)
780 safe_syscall4(int, rt_sigtimedwait, const sigset_t *, these, siginfo_t *, uinfo,
781               const struct timespec *, uts, size_t, sigsetsize)
782 #endif
783 safe_syscall4(int, accept4, int, fd, struct sockaddr *, addr, socklen_t *, len,
784               int, flags)
785 #if defined(TARGET_NR_nanosleep)
786 safe_syscall2(int, nanosleep, const struct timespec *, req,
787               struct timespec *, rem)
788 #endif
789 #if defined(TARGET_NR_clock_nanosleep) || \
790     defined(TARGET_NR_clock_nanosleep_time64)
791 safe_syscall4(int, clock_nanosleep, const clockid_t, clock, int, flags,
792               const struct timespec *, req, struct timespec *, rem)
793 #endif
794 #ifdef __NR_ipc
795 #ifdef __s390x__
796 safe_syscall5(int, ipc, int, call, long, first, long, second, long, third,
797               void *, ptr)
798 #else
799 safe_syscall6(int, ipc, int, call, long, first, long, second, long, third,
800               void *, ptr, long, fifth)
801 #endif
802 #endif
803 #ifdef __NR_msgsnd
804 safe_syscall4(int, msgsnd, int, msgid, const void *, msgp, size_t, sz,
805               int, flags)
806 #endif
807 #ifdef __NR_msgrcv
808 safe_syscall5(int, msgrcv, int, msgid, void *, msgp, size_t, sz,
809               long, msgtype, int, flags)
810 #endif
811 #ifdef __NR_semtimedop
812 safe_syscall4(int, semtimedop, int, semid, struct sembuf *, tsops,
813               unsigned, nsops, const struct timespec *, timeout)
814 #endif
815 #if defined(TARGET_NR_mq_timedsend) || \
816     defined(TARGET_NR_mq_timedsend_time64)
817 safe_syscall5(int, mq_timedsend, int, mqdes, const char *, msg_ptr,
818               size_t, len, unsigned, prio, const struct timespec *, timeout)
819 #endif
820 #if defined(TARGET_NR_mq_timedreceive) || \
821     defined(TARGET_NR_mq_timedreceive_time64)
822 safe_syscall5(int, mq_timedreceive, int, mqdes, char *, msg_ptr,
823               size_t, len, unsigned *, prio, const struct timespec *, timeout)
824 #endif
825 #if defined(TARGET_NR_copy_file_range) && defined(__NR_copy_file_range)
826 safe_syscall6(ssize_t, copy_file_range, int, infd, loff_t *, pinoff,
827               int, outfd, loff_t *, poutoff, size_t, length,
828               unsigned int, flags)
829 #endif
830 
831 /* We do ioctl like this rather than via safe_syscall3 to preserve the
832  * "third argument might be integer or pointer or not present" behaviour of
833  * the libc function.
834  */
835 #define safe_ioctl(...) safe_syscall(__NR_ioctl, __VA_ARGS__)
836 /* Similarly for fcntl. Note that callers must always:
837  *  pass the F_GETLK64 etc constants rather than the unsuffixed F_GETLK
838  *  use the flock64 struct rather than unsuffixed flock
839  * This will then work and use a 64-bit offset for both 32-bit and 64-bit hosts.
840  */
841 #ifdef __NR_fcntl64
842 #define safe_fcntl(...) safe_syscall(__NR_fcntl64, __VA_ARGS__)
843 #else
844 #define safe_fcntl(...) safe_syscall(__NR_fcntl, __VA_ARGS__)
845 #endif
846 
847 static inline int host_to_target_sock_type(int host_type)
848 {
849     int target_type;
850 
851     switch (host_type & 0xf /* SOCK_TYPE_MASK */) {
852     case SOCK_DGRAM:
853         target_type = TARGET_SOCK_DGRAM;
854         break;
855     case SOCK_STREAM:
856         target_type = TARGET_SOCK_STREAM;
857         break;
858     default:
859         target_type = host_type & 0xf /* SOCK_TYPE_MASK */;
860         break;
861     }
862 
863 #if defined(SOCK_CLOEXEC)
864     if (host_type & SOCK_CLOEXEC) {
865         target_type |= TARGET_SOCK_CLOEXEC;
866     }
867 #endif
868 
869 #if defined(SOCK_NONBLOCK)
870     if (host_type & SOCK_NONBLOCK) {
871         target_type |= TARGET_SOCK_NONBLOCK;
872     }
873 #endif
874 
875     return target_type;
876 }
877 
878 static abi_ulong target_brk;
879 static abi_ulong target_original_brk;
880 static abi_ulong brk_page;
881 
882 void target_set_brk(abi_ulong new_brk)
883 {
884     target_original_brk = target_brk = HOST_PAGE_ALIGN(new_brk);
885     brk_page = HOST_PAGE_ALIGN(target_brk);
886 }
887 
888 //#define DEBUGF_BRK(message, args...) do { fprintf(stderr, (message), ## args); } while (0)
889 #define DEBUGF_BRK(message, args...)
890 
891 /* do_brk() must return target values and target errnos. */
892 abi_long do_brk(abi_ulong new_brk)
893 {
894     abi_long mapped_addr;
895     abi_ulong new_alloc_size;
896 
897     /* brk pointers are always untagged */
898 
899     DEBUGF_BRK("do_brk(" TARGET_ABI_FMT_lx ") -> ", new_brk);
900 
901     if (!new_brk) {
902         DEBUGF_BRK(TARGET_ABI_FMT_lx " (!new_brk)\n", target_brk);
903         return target_brk;
904     }
905     if (new_brk < target_original_brk) {
906         DEBUGF_BRK(TARGET_ABI_FMT_lx " (new_brk < target_original_brk)\n",
907                    target_brk);
908         return target_brk;
909     }
910 
911     /* If the new brk is less than the highest page reserved to the
912      * target heap allocation, set it and we're almost done...  */
913     if (new_brk <= brk_page) {
914         /* Heap contents are initialized to zero, as for anonymous
915          * mapped pages.  */
916         if (new_brk > target_brk) {
917             memset(g2h_untagged(target_brk), 0, new_brk - target_brk);
918         }
919 	target_brk = new_brk;
920         DEBUGF_BRK(TARGET_ABI_FMT_lx " (new_brk <= brk_page)\n", target_brk);
921 	return target_brk;
922     }
923 
924     /* We need to allocate more memory after the brk... Note that
925      * we don't use MAP_FIXED because that will map over the top of
926      * any existing mapping (like the one with the host libc or qemu
927      * itself); instead we treat "mapped but at wrong address" as
928      * a failure and unmap again.
929      */
930     new_alloc_size = HOST_PAGE_ALIGN(new_brk - brk_page);
931     mapped_addr = get_errno(target_mmap(brk_page, new_alloc_size,
932                                         PROT_READ|PROT_WRITE,
933                                         MAP_ANON|MAP_PRIVATE, 0, 0));
934 
935     if (mapped_addr == brk_page) {
936         /* Heap contents are initialized to zero, as for anonymous
937          * mapped pages.  Technically the new pages are already
938          * initialized to zero since they *are* anonymous mapped
939          * pages, however we have to take care with the contents that
940          * come from the remaining part of the previous page: it may
941          * contains garbage data due to a previous heap usage (grown
942          * then shrunken).  */
943         memset(g2h_untagged(target_brk), 0, brk_page - target_brk);
944 
945         target_brk = new_brk;
946         brk_page = HOST_PAGE_ALIGN(target_brk);
947         DEBUGF_BRK(TARGET_ABI_FMT_lx " (mapped_addr == brk_page)\n",
948             target_brk);
949         return target_brk;
950     } else if (mapped_addr != -1) {
951         /* Mapped but at wrong address, meaning there wasn't actually
952          * enough space for this brk.
953          */
954         target_munmap(mapped_addr, new_alloc_size);
955         mapped_addr = -1;
956         DEBUGF_BRK(TARGET_ABI_FMT_lx " (mapped_addr != -1)\n", target_brk);
957     }
958     else {
959         DEBUGF_BRK(TARGET_ABI_FMT_lx " (otherwise)\n", target_brk);
960     }
961 
962 #if defined(TARGET_ALPHA)
963     /* We (partially) emulate OSF/1 on Alpha, which requires we
964        return a proper errno, not an unchanged brk value.  */
965     return -TARGET_ENOMEM;
966 #endif
967     /* For everything else, return the previous break. */
968     return target_brk;
969 }
970 
971 #if defined(TARGET_NR_select) || defined(TARGET_NR__newselect) || \
972     defined(TARGET_NR_pselect6) || defined(TARGET_NR_pselect6_time64)
973 static inline abi_long copy_from_user_fdset(fd_set *fds,
974                                             abi_ulong target_fds_addr,
975                                             int n)
976 {
977     int i, nw, j, k;
978     abi_ulong b, *target_fds;
979 
980     nw = DIV_ROUND_UP(n, TARGET_ABI_BITS);
981     if (!(target_fds = lock_user(VERIFY_READ,
982                                  target_fds_addr,
983                                  sizeof(abi_ulong) * nw,
984                                  1)))
985         return -TARGET_EFAULT;
986 
987     FD_ZERO(fds);
988     k = 0;
989     for (i = 0; i < nw; i++) {
990         /* grab the abi_ulong */
991         __get_user(b, &target_fds[i]);
992         for (j = 0; j < TARGET_ABI_BITS; j++) {
993             /* check the bit inside the abi_ulong */
994             if ((b >> j) & 1)
995                 FD_SET(k, fds);
996             k++;
997         }
998     }
999 
1000     unlock_user(target_fds, target_fds_addr, 0);
1001 
1002     return 0;
1003 }
1004 
1005 static inline abi_ulong copy_from_user_fdset_ptr(fd_set *fds, fd_set **fds_ptr,
1006                                                  abi_ulong target_fds_addr,
1007                                                  int n)
1008 {
1009     if (target_fds_addr) {
1010         if (copy_from_user_fdset(fds, target_fds_addr, n))
1011             return -TARGET_EFAULT;
1012         *fds_ptr = fds;
1013     } else {
1014         *fds_ptr = NULL;
1015     }
1016     return 0;
1017 }
1018 
1019 static inline abi_long copy_to_user_fdset(abi_ulong target_fds_addr,
1020                                           const fd_set *fds,
1021                                           int n)
1022 {
1023     int i, nw, j, k;
1024     abi_long v;
1025     abi_ulong *target_fds;
1026 
1027     nw = DIV_ROUND_UP(n, TARGET_ABI_BITS);
1028     if (!(target_fds = lock_user(VERIFY_WRITE,
1029                                  target_fds_addr,
1030                                  sizeof(abi_ulong) * nw,
1031                                  0)))
1032         return -TARGET_EFAULT;
1033 
1034     k = 0;
1035     for (i = 0; i < nw; i++) {
1036         v = 0;
1037         for (j = 0; j < TARGET_ABI_BITS; j++) {
1038             v |= ((abi_ulong)(FD_ISSET(k, fds) != 0) << j);
1039             k++;
1040         }
1041         __put_user(v, &target_fds[i]);
1042     }
1043 
1044     unlock_user(target_fds, target_fds_addr, sizeof(abi_ulong) * nw);
1045 
1046     return 0;
1047 }
1048 #endif
1049 
1050 #if defined(__alpha__)
1051 #define HOST_HZ 1024
1052 #else
1053 #define HOST_HZ 100
1054 #endif
1055 
1056 static inline abi_long host_to_target_clock_t(long ticks)
1057 {
1058 #if HOST_HZ == TARGET_HZ
1059     return ticks;
1060 #else
1061     return ((int64_t)ticks * TARGET_HZ) / HOST_HZ;
1062 #endif
1063 }
1064 
1065 static inline abi_long host_to_target_rusage(abi_ulong target_addr,
1066                                              const struct rusage *rusage)
1067 {
1068     struct target_rusage *target_rusage;
1069 
1070     if (!lock_user_struct(VERIFY_WRITE, target_rusage, target_addr, 0))
1071         return -TARGET_EFAULT;
1072     target_rusage->ru_utime.tv_sec = tswapal(rusage->ru_utime.tv_sec);
1073     target_rusage->ru_utime.tv_usec = tswapal(rusage->ru_utime.tv_usec);
1074     target_rusage->ru_stime.tv_sec = tswapal(rusage->ru_stime.tv_sec);
1075     target_rusage->ru_stime.tv_usec = tswapal(rusage->ru_stime.tv_usec);
1076     target_rusage->ru_maxrss = tswapal(rusage->ru_maxrss);
1077     target_rusage->ru_ixrss = tswapal(rusage->ru_ixrss);
1078     target_rusage->ru_idrss = tswapal(rusage->ru_idrss);
1079     target_rusage->ru_isrss = tswapal(rusage->ru_isrss);
1080     target_rusage->ru_minflt = tswapal(rusage->ru_minflt);
1081     target_rusage->ru_majflt = tswapal(rusage->ru_majflt);
1082     target_rusage->ru_nswap = tswapal(rusage->ru_nswap);
1083     target_rusage->ru_inblock = tswapal(rusage->ru_inblock);
1084     target_rusage->ru_oublock = tswapal(rusage->ru_oublock);
1085     target_rusage->ru_msgsnd = tswapal(rusage->ru_msgsnd);
1086     target_rusage->ru_msgrcv = tswapal(rusage->ru_msgrcv);
1087     target_rusage->ru_nsignals = tswapal(rusage->ru_nsignals);
1088     target_rusage->ru_nvcsw = tswapal(rusage->ru_nvcsw);
1089     target_rusage->ru_nivcsw = tswapal(rusage->ru_nivcsw);
1090     unlock_user_struct(target_rusage, target_addr, 1);
1091 
1092     return 0;
1093 }
1094 
1095 #ifdef TARGET_NR_setrlimit
1096 static inline rlim_t target_to_host_rlim(abi_ulong target_rlim)
1097 {
1098     abi_ulong target_rlim_swap;
1099     rlim_t result;
1100 
1101     target_rlim_swap = tswapal(target_rlim);
1102     if (target_rlim_swap == TARGET_RLIM_INFINITY)
1103         return RLIM_INFINITY;
1104 
1105     result = target_rlim_swap;
1106     if (target_rlim_swap != (rlim_t)result)
1107         return RLIM_INFINITY;
1108 
1109     return result;
1110 }
1111 #endif
1112 
1113 #if defined(TARGET_NR_getrlimit) || defined(TARGET_NR_ugetrlimit)
1114 static inline abi_ulong host_to_target_rlim(rlim_t rlim)
1115 {
1116     abi_ulong target_rlim_swap;
1117     abi_ulong result;
1118 
1119     if (rlim == RLIM_INFINITY || rlim != (abi_long)rlim)
1120         target_rlim_swap = TARGET_RLIM_INFINITY;
1121     else
1122         target_rlim_swap = rlim;
1123     result = tswapal(target_rlim_swap);
1124 
1125     return result;
1126 }
1127 #endif
1128 
1129 static inline int target_to_host_resource(int code)
1130 {
1131     switch (code) {
1132     case TARGET_RLIMIT_AS:
1133         return RLIMIT_AS;
1134     case TARGET_RLIMIT_CORE:
1135         return RLIMIT_CORE;
1136     case TARGET_RLIMIT_CPU:
1137         return RLIMIT_CPU;
1138     case TARGET_RLIMIT_DATA:
1139         return RLIMIT_DATA;
1140     case TARGET_RLIMIT_FSIZE:
1141         return RLIMIT_FSIZE;
1142     case TARGET_RLIMIT_LOCKS:
1143         return RLIMIT_LOCKS;
1144     case TARGET_RLIMIT_MEMLOCK:
1145         return RLIMIT_MEMLOCK;
1146     case TARGET_RLIMIT_MSGQUEUE:
1147         return RLIMIT_MSGQUEUE;
1148     case TARGET_RLIMIT_NICE:
1149         return RLIMIT_NICE;
1150     case TARGET_RLIMIT_NOFILE:
1151         return RLIMIT_NOFILE;
1152     case TARGET_RLIMIT_NPROC:
1153         return RLIMIT_NPROC;
1154     case TARGET_RLIMIT_RSS:
1155         return RLIMIT_RSS;
1156     case TARGET_RLIMIT_RTPRIO:
1157         return RLIMIT_RTPRIO;
1158     case TARGET_RLIMIT_SIGPENDING:
1159         return RLIMIT_SIGPENDING;
1160     case TARGET_RLIMIT_STACK:
1161         return RLIMIT_STACK;
1162     default:
1163         return code;
1164     }
1165 }
1166 
1167 static inline abi_long copy_from_user_timeval(struct timeval *tv,
1168                                               abi_ulong target_tv_addr)
1169 {
1170     struct target_timeval *target_tv;
1171 
1172     if (!lock_user_struct(VERIFY_READ, target_tv, target_tv_addr, 1)) {
1173         return -TARGET_EFAULT;
1174     }
1175 
1176     __get_user(tv->tv_sec, &target_tv->tv_sec);
1177     __get_user(tv->tv_usec, &target_tv->tv_usec);
1178 
1179     unlock_user_struct(target_tv, target_tv_addr, 0);
1180 
1181     return 0;
1182 }
1183 
1184 static inline abi_long copy_to_user_timeval(abi_ulong target_tv_addr,
1185                                             const struct timeval *tv)
1186 {
1187     struct target_timeval *target_tv;
1188 
1189     if (!lock_user_struct(VERIFY_WRITE, target_tv, target_tv_addr, 0)) {
1190         return -TARGET_EFAULT;
1191     }
1192 
1193     __put_user(tv->tv_sec, &target_tv->tv_sec);
1194     __put_user(tv->tv_usec, &target_tv->tv_usec);
1195 
1196     unlock_user_struct(target_tv, target_tv_addr, 1);
1197 
1198     return 0;
1199 }
1200 
1201 #if defined(TARGET_NR_clock_adjtime64) && defined(CONFIG_CLOCK_ADJTIME)
1202 static inline abi_long copy_from_user_timeval64(struct timeval *tv,
1203                                                 abi_ulong target_tv_addr)
1204 {
1205     struct target__kernel_sock_timeval *target_tv;
1206 
1207     if (!lock_user_struct(VERIFY_READ, target_tv, target_tv_addr, 1)) {
1208         return -TARGET_EFAULT;
1209     }
1210 
1211     __get_user(tv->tv_sec, &target_tv->tv_sec);
1212     __get_user(tv->tv_usec, &target_tv->tv_usec);
1213 
1214     unlock_user_struct(target_tv, target_tv_addr, 0);
1215 
1216     return 0;
1217 }
1218 #endif
1219 
1220 static inline abi_long copy_to_user_timeval64(abi_ulong target_tv_addr,
1221                                               const struct timeval *tv)
1222 {
1223     struct target__kernel_sock_timeval *target_tv;
1224 
1225     if (!lock_user_struct(VERIFY_WRITE, target_tv, target_tv_addr, 0)) {
1226         return -TARGET_EFAULT;
1227     }
1228 
1229     __put_user(tv->tv_sec, &target_tv->tv_sec);
1230     __put_user(tv->tv_usec, &target_tv->tv_usec);
1231 
1232     unlock_user_struct(target_tv, target_tv_addr, 1);
1233 
1234     return 0;
1235 }
1236 
1237 #if defined(TARGET_NR_futex) || \
1238     defined(TARGET_NR_rt_sigtimedwait) || \
1239     defined(TARGET_NR_pselect6) || defined(TARGET_NR_pselect6) || \
1240     defined(TARGET_NR_nanosleep) || defined(TARGET_NR_clock_settime) || \
1241     defined(TARGET_NR_utimensat) || defined(TARGET_NR_mq_timedsend) || \
1242     defined(TARGET_NR_mq_timedreceive) || defined(TARGET_NR_ipc) || \
1243     defined(TARGET_NR_semop) || defined(TARGET_NR_semtimedop) || \
1244     defined(TARGET_NR_timer_settime) || \
1245     (defined(TARGET_NR_timerfd_settime) && defined(CONFIG_TIMERFD))
1246 static inline abi_long target_to_host_timespec(struct timespec *host_ts,
1247                                                abi_ulong target_addr)
1248 {
1249     struct target_timespec *target_ts;
1250 
1251     if (!lock_user_struct(VERIFY_READ, target_ts, target_addr, 1)) {
1252         return -TARGET_EFAULT;
1253     }
1254     __get_user(host_ts->tv_sec, &target_ts->tv_sec);
1255     __get_user(host_ts->tv_nsec, &target_ts->tv_nsec);
1256     unlock_user_struct(target_ts, target_addr, 0);
1257     return 0;
1258 }
1259 #endif
1260 
1261 #if defined(TARGET_NR_clock_settime64) || defined(TARGET_NR_futex_time64) || \
1262     defined(TARGET_NR_timer_settime64) || \
1263     defined(TARGET_NR_mq_timedsend_time64) || \
1264     defined(TARGET_NR_mq_timedreceive_time64) || \
1265     (defined(TARGET_NR_timerfd_settime64) && defined(CONFIG_TIMERFD)) || \
1266     defined(TARGET_NR_clock_nanosleep_time64) || \
1267     defined(TARGET_NR_rt_sigtimedwait_time64) || \
1268     defined(TARGET_NR_utimensat) || \
1269     defined(TARGET_NR_utimensat_time64) || \
1270     defined(TARGET_NR_semtimedop_time64) || \
1271     defined(TARGET_NR_pselect6_time64) || defined(TARGET_NR_ppoll_time64)
1272 static inline abi_long target_to_host_timespec64(struct timespec *host_ts,
1273                                                  abi_ulong target_addr)
1274 {
1275     struct target__kernel_timespec *target_ts;
1276 
1277     if (!lock_user_struct(VERIFY_READ, target_ts, target_addr, 1)) {
1278         return -TARGET_EFAULT;
1279     }
1280     __get_user(host_ts->tv_sec, &target_ts->tv_sec);
1281     __get_user(host_ts->tv_nsec, &target_ts->tv_nsec);
1282     /* in 32bit mode, this drops the padding */
1283     host_ts->tv_nsec = (long)(abi_long)host_ts->tv_nsec;
1284     unlock_user_struct(target_ts, target_addr, 0);
1285     return 0;
1286 }
1287 #endif
1288 
1289 static inline abi_long host_to_target_timespec(abi_ulong target_addr,
1290                                                struct timespec *host_ts)
1291 {
1292     struct target_timespec *target_ts;
1293 
1294     if (!lock_user_struct(VERIFY_WRITE, target_ts, target_addr, 0)) {
1295         return -TARGET_EFAULT;
1296     }
1297     __put_user(host_ts->tv_sec, &target_ts->tv_sec);
1298     __put_user(host_ts->tv_nsec, &target_ts->tv_nsec);
1299     unlock_user_struct(target_ts, target_addr, 1);
1300     return 0;
1301 }
1302 
1303 static inline abi_long host_to_target_timespec64(abi_ulong target_addr,
1304                                                  struct timespec *host_ts)
1305 {
1306     struct target__kernel_timespec *target_ts;
1307 
1308     if (!lock_user_struct(VERIFY_WRITE, target_ts, target_addr, 0)) {
1309         return -TARGET_EFAULT;
1310     }
1311     __put_user(host_ts->tv_sec, &target_ts->tv_sec);
1312     __put_user(host_ts->tv_nsec, &target_ts->tv_nsec);
1313     unlock_user_struct(target_ts, target_addr, 1);
1314     return 0;
1315 }
1316 
1317 #if defined(TARGET_NR_gettimeofday)
1318 static inline abi_long copy_to_user_timezone(abi_ulong target_tz_addr,
1319                                              struct timezone *tz)
1320 {
1321     struct target_timezone *target_tz;
1322 
1323     if (!lock_user_struct(VERIFY_WRITE, target_tz, target_tz_addr, 1)) {
1324         return -TARGET_EFAULT;
1325     }
1326 
1327     __put_user(tz->tz_minuteswest, &target_tz->tz_minuteswest);
1328     __put_user(tz->tz_dsttime, &target_tz->tz_dsttime);
1329 
1330     unlock_user_struct(target_tz, target_tz_addr, 1);
1331 
1332     return 0;
1333 }
1334 #endif
1335 
1336 #if defined(TARGET_NR_settimeofday)
1337 static inline abi_long copy_from_user_timezone(struct timezone *tz,
1338                                                abi_ulong target_tz_addr)
1339 {
1340     struct target_timezone *target_tz;
1341 
1342     if (!lock_user_struct(VERIFY_READ, target_tz, target_tz_addr, 1)) {
1343         return -TARGET_EFAULT;
1344     }
1345 
1346     __get_user(tz->tz_minuteswest, &target_tz->tz_minuteswest);
1347     __get_user(tz->tz_dsttime, &target_tz->tz_dsttime);
1348 
1349     unlock_user_struct(target_tz, target_tz_addr, 0);
1350 
1351     return 0;
1352 }
1353 #endif
1354 
1355 #if defined(TARGET_NR_mq_open) && defined(__NR_mq_open)
1356 #include <mqueue.h>
1357 
1358 static inline abi_long copy_from_user_mq_attr(struct mq_attr *attr,
1359                                               abi_ulong target_mq_attr_addr)
1360 {
1361     struct target_mq_attr *target_mq_attr;
1362 
1363     if (!lock_user_struct(VERIFY_READ, target_mq_attr,
1364                           target_mq_attr_addr, 1))
1365         return -TARGET_EFAULT;
1366 
1367     __get_user(attr->mq_flags, &target_mq_attr->mq_flags);
1368     __get_user(attr->mq_maxmsg, &target_mq_attr->mq_maxmsg);
1369     __get_user(attr->mq_msgsize, &target_mq_attr->mq_msgsize);
1370     __get_user(attr->mq_curmsgs, &target_mq_attr->mq_curmsgs);
1371 
1372     unlock_user_struct(target_mq_attr, target_mq_attr_addr, 0);
1373 
1374     return 0;
1375 }
1376 
1377 static inline abi_long copy_to_user_mq_attr(abi_ulong target_mq_attr_addr,
1378                                             const struct mq_attr *attr)
1379 {
1380     struct target_mq_attr *target_mq_attr;
1381 
1382     if (!lock_user_struct(VERIFY_WRITE, target_mq_attr,
1383                           target_mq_attr_addr, 0))
1384         return -TARGET_EFAULT;
1385 
1386     __put_user(attr->mq_flags, &target_mq_attr->mq_flags);
1387     __put_user(attr->mq_maxmsg, &target_mq_attr->mq_maxmsg);
1388     __put_user(attr->mq_msgsize, &target_mq_attr->mq_msgsize);
1389     __put_user(attr->mq_curmsgs, &target_mq_attr->mq_curmsgs);
1390 
1391     unlock_user_struct(target_mq_attr, target_mq_attr_addr, 1);
1392 
1393     return 0;
1394 }
1395 #endif
1396 
1397 #if defined(TARGET_NR_select) || defined(TARGET_NR__newselect)
1398 /* do_select() must return target values and target errnos. */
1399 static abi_long do_select(int n,
1400                           abi_ulong rfd_addr, abi_ulong wfd_addr,
1401                           abi_ulong efd_addr, abi_ulong target_tv_addr)
1402 {
1403     fd_set rfds, wfds, efds;
1404     fd_set *rfds_ptr, *wfds_ptr, *efds_ptr;
1405     struct timeval tv;
1406     struct timespec ts, *ts_ptr;
1407     abi_long ret;
1408 
1409     ret = copy_from_user_fdset_ptr(&rfds, &rfds_ptr, rfd_addr, n);
1410     if (ret) {
1411         return ret;
1412     }
1413     ret = copy_from_user_fdset_ptr(&wfds, &wfds_ptr, wfd_addr, n);
1414     if (ret) {
1415         return ret;
1416     }
1417     ret = copy_from_user_fdset_ptr(&efds, &efds_ptr, efd_addr, n);
1418     if (ret) {
1419         return ret;
1420     }
1421 
1422     if (target_tv_addr) {
1423         if (copy_from_user_timeval(&tv, target_tv_addr))
1424             return -TARGET_EFAULT;
1425         ts.tv_sec = tv.tv_sec;
1426         ts.tv_nsec = tv.tv_usec * 1000;
1427         ts_ptr = &ts;
1428     } else {
1429         ts_ptr = NULL;
1430     }
1431 
1432     ret = get_errno(safe_pselect6(n, rfds_ptr, wfds_ptr, efds_ptr,
1433                                   ts_ptr, NULL));
1434 
1435     if (!is_error(ret)) {
1436         if (rfd_addr && copy_to_user_fdset(rfd_addr, &rfds, n))
1437             return -TARGET_EFAULT;
1438         if (wfd_addr && copy_to_user_fdset(wfd_addr, &wfds, n))
1439             return -TARGET_EFAULT;
1440         if (efd_addr && copy_to_user_fdset(efd_addr, &efds, n))
1441             return -TARGET_EFAULT;
1442 
1443         if (target_tv_addr) {
1444             tv.tv_sec = ts.tv_sec;
1445             tv.tv_usec = ts.tv_nsec / 1000;
1446             if (copy_to_user_timeval(target_tv_addr, &tv)) {
1447                 return -TARGET_EFAULT;
1448             }
1449         }
1450     }
1451 
1452     return ret;
1453 }
1454 
1455 #if defined(TARGET_WANT_OLD_SYS_SELECT)
1456 static abi_long do_old_select(abi_ulong arg1)
1457 {
1458     struct target_sel_arg_struct *sel;
1459     abi_ulong inp, outp, exp, tvp;
1460     long nsel;
1461 
1462     if (!lock_user_struct(VERIFY_READ, sel, arg1, 1)) {
1463         return -TARGET_EFAULT;
1464     }
1465 
1466     nsel = tswapal(sel->n);
1467     inp = tswapal(sel->inp);
1468     outp = tswapal(sel->outp);
1469     exp = tswapal(sel->exp);
1470     tvp = tswapal(sel->tvp);
1471 
1472     unlock_user_struct(sel, arg1, 0);
1473 
1474     return do_select(nsel, inp, outp, exp, tvp);
1475 }
1476 #endif
1477 #endif
1478 
1479 #if defined(TARGET_NR_pselect6) || defined(TARGET_NR_pselect6_time64)
1480 static abi_long do_pselect6(abi_long arg1, abi_long arg2, abi_long arg3,
1481                             abi_long arg4, abi_long arg5, abi_long arg6,
1482                             bool time64)
1483 {
1484     abi_long rfd_addr, wfd_addr, efd_addr, n, ts_addr;
1485     fd_set rfds, wfds, efds;
1486     fd_set *rfds_ptr, *wfds_ptr, *efds_ptr;
1487     struct timespec ts, *ts_ptr;
1488     abi_long ret;
1489 
1490     /*
1491      * The 6th arg is actually two args smashed together,
1492      * so we cannot use the C library.
1493      */
1494     sigset_t set;
1495     struct {
1496         sigset_t *set;
1497         size_t size;
1498     } sig, *sig_ptr;
1499 
1500     abi_ulong arg_sigset, arg_sigsize, *arg7;
1501     target_sigset_t *target_sigset;
1502 
1503     n = arg1;
1504     rfd_addr = arg2;
1505     wfd_addr = arg3;
1506     efd_addr = arg4;
1507     ts_addr = arg5;
1508 
1509     ret = copy_from_user_fdset_ptr(&rfds, &rfds_ptr, rfd_addr, n);
1510     if (ret) {
1511         return ret;
1512     }
1513     ret = copy_from_user_fdset_ptr(&wfds, &wfds_ptr, wfd_addr, n);
1514     if (ret) {
1515         return ret;
1516     }
1517     ret = copy_from_user_fdset_ptr(&efds, &efds_ptr, efd_addr, n);
1518     if (ret) {
1519         return ret;
1520     }
1521 
1522     /*
1523      * This takes a timespec, and not a timeval, so we cannot
1524      * use the do_select() helper ...
1525      */
1526     if (ts_addr) {
1527         if (time64) {
1528             if (target_to_host_timespec64(&ts, ts_addr)) {
1529                 return -TARGET_EFAULT;
1530             }
1531         } else {
1532             if (target_to_host_timespec(&ts, ts_addr)) {
1533                 return -TARGET_EFAULT;
1534             }
1535         }
1536             ts_ptr = &ts;
1537     } else {
1538         ts_ptr = NULL;
1539     }
1540 
1541     /* Extract the two packed args for the sigset */
1542     if (arg6) {
1543         sig_ptr = &sig;
1544         sig.size = SIGSET_T_SIZE;
1545 
1546         arg7 = lock_user(VERIFY_READ, arg6, sizeof(*arg7) * 2, 1);
1547         if (!arg7) {
1548             return -TARGET_EFAULT;
1549         }
1550         arg_sigset = tswapal(arg7[0]);
1551         arg_sigsize = tswapal(arg7[1]);
1552         unlock_user(arg7, arg6, 0);
1553 
1554         if (arg_sigset) {
1555             sig.set = &set;
1556             if (arg_sigsize != sizeof(*target_sigset)) {
1557                 /* Like the kernel, we enforce correct size sigsets */
1558                 return -TARGET_EINVAL;
1559             }
1560             target_sigset = lock_user(VERIFY_READ, arg_sigset,
1561                                       sizeof(*target_sigset), 1);
1562             if (!target_sigset) {
1563                 return -TARGET_EFAULT;
1564             }
1565             target_to_host_sigset(&set, target_sigset);
1566             unlock_user(target_sigset, arg_sigset, 0);
1567         } else {
1568             sig.set = NULL;
1569         }
1570     } else {
1571         sig_ptr = NULL;
1572     }
1573 
1574     ret = get_errno(safe_pselect6(n, rfds_ptr, wfds_ptr, efds_ptr,
1575                                   ts_ptr, sig_ptr));
1576 
1577     if (!is_error(ret)) {
1578         if (rfd_addr && copy_to_user_fdset(rfd_addr, &rfds, n)) {
1579             return -TARGET_EFAULT;
1580         }
1581         if (wfd_addr && copy_to_user_fdset(wfd_addr, &wfds, n)) {
1582             return -TARGET_EFAULT;
1583         }
1584         if (efd_addr && copy_to_user_fdset(efd_addr, &efds, n)) {
1585             return -TARGET_EFAULT;
1586         }
1587         if (time64) {
1588             if (ts_addr && host_to_target_timespec64(ts_addr, &ts)) {
1589                 return -TARGET_EFAULT;
1590             }
1591         } else {
1592             if (ts_addr && host_to_target_timespec(ts_addr, &ts)) {
1593                 return -TARGET_EFAULT;
1594             }
1595         }
1596     }
1597     return ret;
1598 }
1599 #endif
1600 
1601 #if defined(TARGET_NR_poll) || defined(TARGET_NR_ppoll) || \
1602     defined(TARGET_NR_ppoll_time64)
1603 static abi_long do_ppoll(abi_long arg1, abi_long arg2, abi_long arg3,
1604                          abi_long arg4, abi_long arg5, bool ppoll, bool time64)
1605 {
1606     struct target_pollfd *target_pfd;
1607     unsigned int nfds = arg2;
1608     struct pollfd *pfd;
1609     unsigned int i;
1610     abi_long ret;
1611 
1612     pfd = NULL;
1613     target_pfd = NULL;
1614     if (nfds) {
1615         if (nfds > (INT_MAX / sizeof(struct target_pollfd))) {
1616             return -TARGET_EINVAL;
1617         }
1618         target_pfd = lock_user(VERIFY_WRITE, arg1,
1619                                sizeof(struct target_pollfd) * nfds, 1);
1620         if (!target_pfd) {
1621             return -TARGET_EFAULT;
1622         }
1623 
1624         pfd = alloca(sizeof(struct pollfd) * nfds);
1625         for (i = 0; i < nfds; i++) {
1626             pfd[i].fd = tswap32(target_pfd[i].fd);
1627             pfd[i].events = tswap16(target_pfd[i].events);
1628         }
1629     }
1630     if (ppoll) {
1631         struct timespec _timeout_ts, *timeout_ts = &_timeout_ts;
1632         target_sigset_t *target_set;
1633         sigset_t _set, *set = &_set;
1634 
1635         if (arg3) {
1636             if (time64) {
1637                 if (target_to_host_timespec64(timeout_ts, arg3)) {
1638                     unlock_user(target_pfd, arg1, 0);
1639                     return -TARGET_EFAULT;
1640                 }
1641             } else {
1642                 if (target_to_host_timespec(timeout_ts, arg3)) {
1643                     unlock_user(target_pfd, arg1, 0);
1644                     return -TARGET_EFAULT;
1645                 }
1646             }
1647         } else {
1648             timeout_ts = NULL;
1649         }
1650 
1651         if (arg4) {
1652             if (arg5 != sizeof(target_sigset_t)) {
1653                 unlock_user(target_pfd, arg1, 0);
1654                 return -TARGET_EINVAL;
1655             }
1656 
1657             target_set = lock_user(VERIFY_READ, arg4,
1658                                    sizeof(target_sigset_t), 1);
1659             if (!target_set) {
1660                 unlock_user(target_pfd, arg1, 0);
1661                 return -TARGET_EFAULT;
1662             }
1663             target_to_host_sigset(set, target_set);
1664         } else {
1665             set = NULL;
1666         }
1667 
1668         ret = get_errno(safe_ppoll(pfd, nfds, timeout_ts,
1669                                    set, SIGSET_T_SIZE));
1670 
1671         if (!is_error(ret) && arg3) {
1672             if (time64) {
1673                 if (host_to_target_timespec64(arg3, timeout_ts)) {
1674                     return -TARGET_EFAULT;
1675                 }
1676             } else {
1677                 if (host_to_target_timespec(arg3, timeout_ts)) {
1678                     return -TARGET_EFAULT;
1679                 }
1680             }
1681         }
1682         if (arg4) {
1683             unlock_user(target_set, arg4, 0);
1684         }
1685     } else {
1686           struct timespec ts, *pts;
1687 
1688           if (arg3 >= 0) {
1689               /* Convert ms to secs, ns */
1690               ts.tv_sec = arg3 / 1000;
1691               ts.tv_nsec = (arg3 % 1000) * 1000000LL;
1692               pts = &ts;
1693           } else {
1694               /* -ve poll() timeout means "infinite" */
1695               pts = NULL;
1696           }
1697           ret = get_errno(safe_ppoll(pfd, nfds, pts, NULL, 0));
1698     }
1699 
1700     if (!is_error(ret)) {
1701         for (i = 0; i < nfds; i++) {
1702             target_pfd[i].revents = tswap16(pfd[i].revents);
1703         }
1704     }
1705     unlock_user(target_pfd, arg1, sizeof(struct target_pollfd) * nfds);
1706     return ret;
1707 }
1708 #endif
1709 
1710 static abi_long do_pipe2(int host_pipe[], int flags)
1711 {
1712 #ifdef CONFIG_PIPE2
1713     return pipe2(host_pipe, flags);
1714 #else
1715     return -ENOSYS;
1716 #endif
1717 }
1718 
1719 static abi_long do_pipe(void *cpu_env, abi_ulong pipedes,
1720                         int flags, int is_pipe2)
1721 {
1722     int host_pipe[2];
1723     abi_long ret;
1724     ret = flags ? do_pipe2(host_pipe, flags) : pipe(host_pipe);
1725 
1726     if (is_error(ret))
1727         return get_errno(ret);
1728 
1729     /* Several targets have special calling conventions for the original
1730        pipe syscall, but didn't replicate this into the pipe2 syscall.  */
1731     if (!is_pipe2) {
1732 #if defined(TARGET_ALPHA)
1733         ((CPUAlphaState *)cpu_env)->ir[IR_A4] = host_pipe[1];
1734         return host_pipe[0];
1735 #elif defined(TARGET_MIPS)
1736         ((CPUMIPSState*)cpu_env)->active_tc.gpr[3] = host_pipe[1];
1737         return host_pipe[0];
1738 #elif defined(TARGET_SH4)
1739         ((CPUSH4State*)cpu_env)->gregs[1] = host_pipe[1];
1740         return host_pipe[0];
1741 #elif defined(TARGET_SPARC)
1742         ((CPUSPARCState*)cpu_env)->regwptr[1] = host_pipe[1];
1743         return host_pipe[0];
1744 #endif
1745     }
1746 
1747     if (put_user_s32(host_pipe[0], pipedes)
1748         || put_user_s32(host_pipe[1], pipedes + sizeof(host_pipe[0])))
1749         return -TARGET_EFAULT;
1750     return get_errno(ret);
1751 }
1752 
1753 static inline abi_long target_to_host_ip_mreq(struct ip_mreqn *mreqn,
1754                                               abi_ulong target_addr,
1755                                               socklen_t len)
1756 {
1757     struct target_ip_mreqn *target_smreqn;
1758 
1759     target_smreqn = lock_user(VERIFY_READ, target_addr, len, 1);
1760     if (!target_smreqn)
1761         return -TARGET_EFAULT;
1762     mreqn->imr_multiaddr.s_addr = target_smreqn->imr_multiaddr.s_addr;
1763     mreqn->imr_address.s_addr = target_smreqn->imr_address.s_addr;
1764     if (len == sizeof(struct target_ip_mreqn))
1765         mreqn->imr_ifindex = tswapal(target_smreqn->imr_ifindex);
1766     unlock_user(target_smreqn, target_addr, 0);
1767 
1768     return 0;
1769 }
1770 
1771 static inline abi_long target_to_host_sockaddr(int fd, struct sockaddr *addr,
1772                                                abi_ulong target_addr,
1773                                                socklen_t len)
1774 {
1775     const socklen_t unix_maxlen = sizeof (struct sockaddr_un);
1776     sa_family_t sa_family;
1777     struct target_sockaddr *target_saddr;
1778 
1779     if (fd_trans_target_to_host_addr(fd)) {
1780         return fd_trans_target_to_host_addr(fd)(addr, target_addr, len);
1781     }
1782 
1783     target_saddr = lock_user(VERIFY_READ, target_addr, len, 1);
1784     if (!target_saddr)
1785         return -TARGET_EFAULT;
1786 
1787     sa_family = tswap16(target_saddr->sa_family);
1788 
1789     /* Oops. The caller might send a incomplete sun_path; sun_path
1790      * must be terminated by \0 (see the manual page), but
1791      * unfortunately it is quite common to specify sockaddr_un
1792      * length as "strlen(x->sun_path)" while it should be
1793      * "strlen(...) + 1". We'll fix that here if needed.
1794      * Linux kernel has a similar feature.
1795      */
1796 
1797     if (sa_family == AF_UNIX) {
1798         if (len < unix_maxlen && len > 0) {
1799             char *cp = (char*)target_saddr;
1800 
1801             if ( cp[len-1] && !cp[len] )
1802                 len++;
1803         }
1804         if (len > unix_maxlen)
1805             len = unix_maxlen;
1806     }
1807 
1808     memcpy(addr, target_saddr, len);
1809     addr->sa_family = sa_family;
1810     if (sa_family == AF_NETLINK) {
1811         struct sockaddr_nl *nladdr;
1812 
1813         nladdr = (struct sockaddr_nl *)addr;
1814         nladdr->nl_pid = tswap32(nladdr->nl_pid);
1815         nladdr->nl_groups = tswap32(nladdr->nl_groups);
1816     } else if (sa_family == AF_PACKET) {
1817 	struct target_sockaddr_ll *lladdr;
1818 
1819 	lladdr = (struct target_sockaddr_ll *)addr;
1820 	lladdr->sll_ifindex = tswap32(lladdr->sll_ifindex);
1821 	lladdr->sll_hatype = tswap16(lladdr->sll_hatype);
1822     }
1823     unlock_user(target_saddr, target_addr, 0);
1824 
1825     return 0;
1826 }
1827 
1828 static inline abi_long host_to_target_sockaddr(abi_ulong target_addr,
1829                                                struct sockaddr *addr,
1830                                                socklen_t len)
1831 {
1832     struct target_sockaddr *target_saddr;
1833 
1834     if (len == 0) {
1835         return 0;
1836     }
1837     assert(addr);
1838 
1839     target_saddr = lock_user(VERIFY_WRITE, target_addr, len, 0);
1840     if (!target_saddr)
1841         return -TARGET_EFAULT;
1842     memcpy(target_saddr, addr, len);
1843     if (len >= offsetof(struct target_sockaddr, sa_family) +
1844         sizeof(target_saddr->sa_family)) {
1845         target_saddr->sa_family = tswap16(addr->sa_family);
1846     }
1847     if (addr->sa_family == AF_NETLINK &&
1848         len >= sizeof(struct target_sockaddr_nl)) {
1849         struct target_sockaddr_nl *target_nl =
1850                (struct target_sockaddr_nl *)target_saddr;
1851         target_nl->nl_pid = tswap32(target_nl->nl_pid);
1852         target_nl->nl_groups = tswap32(target_nl->nl_groups);
1853     } else if (addr->sa_family == AF_PACKET) {
1854         struct sockaddr_ll *target_ll = (struct sockaddr_ll *)target_saddr;
1855         target_ll->sll_ifindex = tswap32(target_ll->sll_ifindex);
1856         target_ll->sll_hatype = tswap16(target_ll->sll_hatype);
1857     } else if (addr->sa_family == AF_INET6 &&
1858                len >= sizeof(struct target_sockaddr_in6)) {
1859         struct target_sockaddr_in6 *target_in6 =
1860                (struct target_sockaddr_in6 *)target_saddr;
1861         target_in6->sin6_scope_id = tswap16(target_in6->sin6_scope_id);
1862     }
1863     unlock_user(target_saddr, target_addr, len);
1864 
1865     return 0;
1866 }
1867 
1868 static inline abi_long target_to_host_cmsg(struct msghdr *msgh,
1869                                            struct target_msghdr *target_msgh)
1870 {
1871     struct cmsghdr *cmsg = CMSG_FIRSTHDR(msgh);
1872     abi_long msg_controllen;
1873     abi_ulong target_cmsg_addr;
1874     struct target_cmsghdr *target_cmsg, *target_cmsg_start;
1875     socklen_t space = 0;
1876 
1877     msg_controllen = tswapal(target_msgh->msg_controllen);
1878     if (msg_controllen < sizeof (struct target_cmsghdr))
1879         goto the_end;
1880     target_cmsg_addr = tswapal(target_msgh->msg_control);
1881     target_cmsg = lock_user(VERIFY_READ, target_cmsg_addr, msg_controllen, 1);
1882     target_cmsg_start = target_cmsg;
1883     if (!target_cmsg)
1884         return -TARGET_EFAULT;
1885 
1886     while (cmsg && target_cmsg) {
1887         void *data = CMSG_DATA(cmsg);
1888         void *target_data = TARGET_CMSG_DATA(target_cmsg);
1889 
1890         int len = tswapal(target_cmsg->cmsg_len)
1891             - sizeof(struct target_cmsghdr);
1892 
1893         space += CMSG_SPACE(len);
1894         if (space > msgh->msg_controllen) {
1895             space -= CMSG_SPACE(len);
1896             /* This is a QEMU bug, since we allocated the payload
1897              * area ourselves (unlike overflow in host-to-target
1898              * conversion, which is just the guest giving us a buffer
1899              * that's too small). It can't happen for the payload types
1900              * we currently support; if it becomes an issue in future
1901              * we would need to improve our allocation strategy to
1902              * something more intelligent than "twice the size of the
1903              * target buffer we're reading from".
1904              */
1905             qemu_log_mask(LOG_UNIMP,
1906                           ("Unsupported ancillary data %d/%d: "
1907                            "unhandled msg size\n"),
1908                           tswap32(target_cmsg->cmsg_level),
1909                           tswap32(target_cmsg->cmsg_type));
1910             break;
1911         }
1912 
1913         if (tswap32(target_cmsg->cmsg_level) == TARGET_SOL_SOCKET) {
1914             cmsg->cmsg_level = SOL_SOCKET;
1915         } else {
1916             cmsg->cmsg_level = tswap32(target_cmsg->cmsg_level);
1917         }
1918         cmsg->cmsg_type = tswap32(target_cmsg->cmsg_type);
1919         cmsg->cmsg_len = CMSG_LEN(len);
1920 
1921         if (cmsg->cmsg_level == SOL_SOCKET && cmsg->cmsg_type == SCM_RIGHTS) {
1922             int *fd = (int *)data;
1923             int *target_fd = (int *)target_data;
1924             int i, numfds = len / sizeof(int);
1925 
1926             for (i = 0; i < numfds; i++) {
1927                 __get_user(fd[i], target_fd + i);
1928             }
1929         } else if (cmsg->cmsg_level == SOL_SOCKET
1930                &&  cmsg->cmsg_type == SCM_CREDENTIALS) {
1931             struct ucred *cred = (struct ucred *)data;
1932             struct target_ucred *target_cred =
1933                 (struct target_ucred *)target_data;
1934 
1935             __get_user(cred->pid, &target_cred->pid);
1936             __get_user(cred->uid, &target_cred->uid);
1937             __get_user(cred->gid, &target_cred->gid);
1938         } else {
1939             qemu_log_mask(LOG_UNIMP, "Unsupported ancillary data: %d/%d\n",
1940                           cmsg->cmsg_level, cmsg->cmsg_type);
1941             memcpy(data, target_data, len);
1942         }
1943 
1944         cmsg = CMSG_NXTHDR(msgh, cmsg);
1945         target_cmsg = TARGET_CMSG_NXTHDR(target_msgh, target_cmsg,
1946                                          target_cmsg_start);
1947     }
1948     unlock_user(target_cmsg, target_cmsg_addr, 0);
1949  the_end:
1950     msgh->msg_controllen = space;
1951     return 0;
1952 }
1953 
1954 static inline abi_long host_to_target_cmsg(struct target_msghdr *target_msgh,
1955                                            struct msghdr *msgh)
1956 {
1957     struct cmsghdr *cmsg = CMSG_FIRSTHDR(msgh);
1958     abi_long msg_controllen;
1959     abi_ulong target_cmsg_addr;
1960     struct target_cmsghdr *target_cmsg, *target_cmsg_start;
1961     socklen_t space = 0;
1962 
1963     msg_controllen = tswapal(target_msgh->msg_controllen);
1964     if (msg_controllen < sizeof (struct target_cmsghdr))
1965         goto the_end;
1966     target_cmsg_addr = tswapal(target_msgh->msg_control);
1967     target_cmsg = lock_user(VERIFY_WRITE, target_cmsg_addr, msg_controllen, 0);
1968     target_cmsg_start = target_cmsg;
1969     if (!target_cmsg)
1970         return -TARGET_EFAULT;
1971 
1972     while (cmsg && target_cmsg) {
1973         void *data = CMSG_DATA(cmsg);
1974         void *target_data = TARGET_CMSG_DATA(target_cmsg);
1975 
1976         int len = cmsg->cmsg_len - sizeof(struct cmsghdr);
1977         int tgt_len, tgt_space;
1978 
1979         /* We never copy a half-header but may copy half-data;
1980          * this is Linux's behaviour in put_cmsg(). Note that
1981          * truncation here is a guest problem (which we report
1982          * to the guest via the CTRUNC bit), unlike truncation
1983          * in target_to_host_cmsg, which is a QEMU bug.
1984          */
1985         if (msg_controllen < sizeof(struct target_cmsghdr)) {
1986             target_msgh->msg_flags |= tswap32(MSG_CTRUNC);
1987             break;
1988         }
1989 
1990         if (cmsg->cmsg_level == SOL_SOCKET) {
1991             target_cmsg->cmsg_level = tswap32(TARGET_SOL_SOCKET);
1992         } else {
1993             target_cmsg->cmsg_level = tswap32(cmsg->cmsg_level);
1994         }
1995         target_cmsg->cmsg_type = tswap32(cmsg->cmsg_type);
1996 
1997         /* Payload types which need a different size of payload on
1998          * the target must adjust tgt_len here.
1999          */
2000         tgt_len = len;
2001         switch (cmsg->cmsg_level) {
2002         case SOL_SOCKET:
2003             switch (cmsg->cmsg_type) {
2004             case SO_TIMESTAMP:
2005                 tgt_len = sizeof(struct target_timeval);
2006                 break;
2007             default:
2008                 break;
2009             }
2010             break;
2011         default:
2012             break;
2013         }
2014 
2015         if (msg_controllen < TARGET_CMSG_LEN(tgt_len)) {
2016             target_msgh->msg_flags |= tswap32(MSG_CTRUNC);
2017             tgt_len = msg_controllen - sizeof(struct target_cmsghdr);
2018         }
2019 
2020         /* We must now copy-and-convert len bytes of payload
2021          * into tgt_len bytes of destination space. Bear in mind
2022          * that in both source and destination we may be dealing
2023          * with a truncated value!
2024          */
2025         switch (cmsg->cmsg_level) {
2026         case SOL_SOCKET:
2027             switch (cmsg->cmsg_type) {
2028             case SCM_RIGHTS:
2029             {
2030                 int *fd = (int *)data;
2031                 int *target_fd = (int *)target_data;
2032                 int i, numfds = tgt_len / sizeof(int);
2033 
2034                 for (i = 0; i < numfds; i++) {
2035                     __put_user(fd[i], target_fd + i);
2036                 }
2037                 break;
2038             }
2039             case SO_TIMESTAMP:
2040             {
2041                 struct timeval *tv = (struct timeval *)data;
2042                 struct target_timeval *target_tv =
2043                     (struct target_timeval *)target_data;
2044 
2045                 if (len != sizeof(struct timeval) ||
2046                     tgt_len != sizeof(struct target_timeval)) {
2047                     goto unimplemented;
2048                 }
2049 
2050                 /* copy struct timeval to target */
2051                 __put_user(tv->tv_sec, &target_tv->tv_sec);
2052                 __put_user(tv->tv_usec, &target_tv->tv_usec);
2053                 break;
2054             }
2055             case SCM_CREDENTIALS:
2056             {
2057                 struct ucred *cred = (struct ucred *)data;
2058                 struct target_ucred *target_cred =
2059                     (struct target_ucred *)target_data;
2060 
2061                 __put_user(cred->pid, &target_cred->pid);
2062                 __put_user(cred->uid, &target_cred->uid);
2063                 __put_user(cred->gid, &target_cred->gid);
2064                 break;
2065             }
2066             default:
2067                 goto unimplemented;
2068             }
2069             break;
2070 
2071         case SOL_IP:
2072             switch (cmsg->cmsg_type) {
2073             case IP_TTL:
2074             {
2075                 uint32_t *v = (uint32_t *)data;
2076                 uint32_t *t_int = (uint32_t *)target_data;
2077 
2078                 if (len != sizeof(uint32_t) ||
2079                     tgt_len != sizeof(uint32_t)) {
2080                     goto unimplemented;
2081                 }
2082                 __put_user(*v, t_int);
2083                 break;
2084             }
2085             case IP_RECVERR:
2086             {
2087                 struct errhdr_t {
2088                    struct sock_extended_err ee;
2089                    struct sockaddr_in offender;
2090                 };
2091                 struct errhdr_t *errh = (struct errhdr_t *)data;
2092                 struct errhdr_t *target_errh =
2093                     (struct errhdr_t *)target_data;
2094 
2095                 if (len != sizeof(struct errhdr_t) ||
2096                     tgt_len != sizeof(struct errhdr_t)) {
2097                     goto unimplemented;
2098                 }
2099                 __put_user(errh->ee.ee_errno, &target_errh->ee.ee_errno);
2100                 __put_user(errh->ee.ee_origin, &target_errh->ee.ee_origin);
2101                 __put_user(errh->ee.ee_type,  &target_errh->ee.ee_type);
2102                 __put_user(errh->ee.ee_code, &target_errh->ee.ee_code);
2103                 __put_user(errh->ee.ee_pad, &target_errh->ee.ee_pad);
2104                 __put_user(errh->ee.ee_info, &target_errh->ee.ee_info);
2105                 __put_user(errh->ee.ee_data, &target_errh->ee.ee_data);
2106                 host_to_target_sockaddr((unsigned long) &target_errh->offender,
2107                     (void *) &errh->offender, sizeof(errh->offender));
2108                 break;
2109             }
2110             default:
2111                 goto unimplemented;
2112             }
2113             break;
2114 
2115         case SOL_IPV6:
2116             switch (cmsg->cmsg_type) {
2117             case IPV6_HOPLIMIT:
2118             {
2119                 uint32_t *v = (uint32_t *)data;
2120                 uint32_t *t_int = (uint32_t *)target_data;
2121 
2122                 if (len != sizeof(uint32_t) ||
2123                     tgt_len != sizeof(uint32_t)) {
2124                     goto unimplemented;
2125                 }
2126                 __put_user(*v, t_int);
2127                 break;
2128             }
2129             case IPV6_RECVERR:
2130             {
2131                 struct errhdr6_t {
2132                    struct sock_extended_err ee;
2133                    struct sockaddr_in6 offender;
2134                 };
2135                 struct errhdr6_t *errh = (struct errhdr6_t *)data;
2136                 struct errhdr6_t *target_errh =
2137                     (struct errhdr6_t *)target_data;
2138 
2139                 if (len != sizeof(struct errhdr6_t) ||
2140                     tgt_len != sizeof(struct errhdr6_t)) {
2141                     goto unimplemented;
2142                 }
2143                 __put_user(errh->ee.ee_errno, &target_errh->ee.ee_errno);
2144                 __put_user(errh->ee.ee_origin, &target_errh->ee.ee_origin);
2145                 __put_user(errh->ee.ee_type,  &target_errh->ee.ee_type);
2146                 __put_user(errh->ee.ee_code, &target_errh->ee.ee_code);
2147                 __put_user(errh->ee.ee_pad, &target_errh->ee.ee_pad);
2148                 __put_user(errh->ee.ee_info, &target_errh->ee.ee_info);
2149                 __put_user(errh->ee.ee_data, &target_errh->ee.ee_data);
2150                 host_to_target_sockaddr((unsigned long) &target_errh->offender,
2151                     (void *) &errh->offender, sizeof(errh->offender));
2152                 break;
2153             }
2154             default:
2155                 goto unimplemented;
2156             }
2157             break;
2158 
2159         default:
2160         unimplemented:
2161             qemu_log_mask(LOG_UNIMP, "Unsupported ancillary data: %d/%d\n",
2162                           cmsg->cmsg_level, cmsg->cmsg_type);
2163             memcpy(target_data, data, MIN(len, tgt_len));
2164             if (tgt_len > len) {
2165                 memset(target_data + len, 0, tgt_len - len);
2166             }
2167         }
2168 
2169         target_cmsg->cmsg_len = tswapal(TARGET_CMSG_LEN(tgt_len));
2170         tgt_space = TARGET_CMSG_SPACE(tgt_len);
2171         if (msg_controllen < tgt_space) {
2172             tgt_space = msg_controllen;
2173         }
2174         msg_controllen -= tgt_space;
2175         space += tgt_space;
2176         cmsg = CMSG_NXTHDR(msgh, cmsg);
2177         target_cmsg = TARGET_CMSG_NXTHDR(target_msgh, target_cmsg,
2178                                          target_cmsg_start);
2179     }
2180     unlock_user(target_cmsg, target_cmsg_addr, space);
2181  the_end:
2182     target_msgh->msg_controllen = tswapal(space);
2183     return 0;
2184 }
2185 
2186 /* do_setsockopt() Must return target values and target errnos. */
2187 static abi_long do_setsockopt(int sockfd, int level, int optname,
2188                               abi_ulong optval_addr, socklen_t optlen)
2189 {
2190     abi_long ret;
2191     int val;
2192     struct ip_mreqn *ip_mreq;
2193     struct ip_mreq_source *ip_mreq_source;
2194 
2195     switch(level) {
2196     case SOL_TCP:
2197     case SOL_UDP:
2198         /* TCP and UDP options all take an 'int' value.  */
2199         if (optlen < sizeof(uint32_t))
2200             return -TARGET_EINVAL;
2201 
2202         if (get_user_u32(val, optval_addr))
2203             return -TARGET_EFAULT;
2204         ret = get_errno(setsockopt(sockfd, level, optname, &val, sizeof(val)));
2205         break;
2206     case SOL_IP:
2207         switch(optname) {
2208         case IP_TOS:
2209         case IP_TTL:
2210         case IP_HDRINCL:
2211         case IP_ROUTER_ALERT:
2212         case IP_RECVOPTS:
2213         case IP_RETOPTS:
2214         case IP_PKTINFO:
2215         case IP_MTU_DISCOVER:
2216         case IP_RECVERR:
2217         case IP_RECVTTL:
2218         case IP_RECVTOS:
2219 #ifdef IP_FREEBIND
2220         case IP_FREEBIND:
2221 #endif
2222         case IP_MULTICAST_TTL:
2223         case IP_MULTICAST_LOOP:
2224             val = 0;
2225             if (optlen >= sizeof(uint32_t)) {
2226                 if (get_user_u32(val, optval_addr))
2227                     return -TARGET_EFAULT;
2228             } else if (optlen >= 1) {
2229                 if (get_user_u8(val, optval_addr))
2230                     return -TARGET_EFAULT;
2231             }
2232             ret = get_errno(setsockopt(sockfd, level, optname, &val, sizeof(val)));
2233             break;
2234         case IP_ADD_MEMBERSHIP:
2235         case IP_DROP_MEMBERSHIP:
2236             if (optlen < sizeof (struct target_ip_mreq) ||
2237                 optlen > sizeof (struct target_ip_mreqn))
2238                 return -TARGET_EINVAL;
2239 
2240             ip_mreq = (struct ip_mreqn *) alloca(optlen);
2241             target_to_host_ip_mreq(ip_mreq, optval_addr, optlen);
2242             ret = get_errno(setsockopt(sockfd, level, optname, ip_mreq, optlen));
2243             break;
2244 
2245         case IP_BLOCK_SOURCE:
2246         case IP_UNBLOCK_SOURCE:
2247         case IP_ADD_SOURCE_MEMBERSHIP:
2248         case IP_DROP_SOURCE_MEMBERSHIP:
2249             if (optlen != sizeof (struct target_ip_mreq_source))
2250                 return -TARGET_EINVAL;
2251 
2252             ip_mreq_source = lock_user(VERIFY_READ, optval_addr, optlen, 1);
2253             ret = get_errno(setsockopt(sockfd, level, optname, ip_mreq_source, optlen));
2254             unlock_user (ip_mreq_source, optval_addr, 0);
2255             break;
2256 
2257         default:
2258             goto unimplemented;
2259         }
2260         break;
2261     case SOL_IPV6:
2262         switch (optname) {
2263         case IPV6_MTU_DISCOVER:
2264         case IPV6_MTU:
2265         case IPV6_V6ONLY:
2266         case IPV6_RECVPKTINFO:
2267         case IPV6_UNICAST_HOPS:
2268         case IPV6_MULTICAST_HOPS:
2269         case IPV6_MULTICAST_LOOP:
2270         case IPV6_RECVERR:
2271         case IPV6_RECVHOPLIMIT:
2272         case IPV6_2292HOPLIMIT:
2273         case IPV6_CHECKSUM:
2274         case IPV6_ADDRFORM:
2275         case IPV6_2292PKTINFO:
2276         case IPV6_RECVTCLASS:
2277         case IPV6_RECVRTHDR:
2278         case IPV6_2292RTHDR:
2279         case IPV6_RECVHOPOPTS:
2280         case IPV6_2292HOPOPTS:
2281         case IPV6_RECVDSTOPTS:
2282         case IPV6_2292DSTOPTS:
2283         case IPV6_TCLASS:
2284         case IPV6_ADDR_PREFERENCES:
2285 #ifdef IPV6_RECVPATHMTU
2286         case IPV6_RECVPATHMTU:
2287 #endif
2288 #ifdef IPV6_TRANSPARENT
2289         case IPV6_TRANSPARENT:
2290 #endif
2291 #ifdef IPV6_FREEBIND
2292         case IPV6_FREEBIND:
2293 #endif
2294 #ifdef IPV6_RECVORIGDSTADDR
2295         case IPV6_RECVORIGDSTADDR:
2296 #endif
2297             val = 0;
2298             if (optlen < sizeof(uint32_t)) {
2299                 return -TARGET_EINVAL;
2300             }
2301             if (get_user_u32(val, optval_addr)) {
2302                 return -TARGET_EFAULT;
2303             }
2304             ret = get_errno(setsockopt(sockfd, level, optname,
2305                                        &val, sizeof(val)));
2306             break;
2307         case IPV6_PKTINFO:
2308         {
2309             struct in6_pktinfo pki;
2310 
2311             if (optlen < sizeof(pki)) {
2312                 return -TARGET_EINVAL;
2313             }
2314 
2315             if (copy_from_user(&pki, optval_addr, sizeof(pki))) {
2316                 return -TARGET_EFAULT;
2317             }
2318 
2319             pki.ipi6_ifindex = tswap32(pki.ipi6_ifindex);
2320 
2321             ret = get_errno(setsockopt(sockfd, level, optname,
2322                                        &pki, sizeof(pki)));
2323             break;
2324         }
2325         case IPV6_ADD_MEMBERSHIP:
2326         case IPV6_DROP_MEMBERSHIP:
2327         {
2328             struct ipv6_mreq ipv6mreq;
2329 
2330             if (optlen < sizeof(ipv6mreq)) {
2331                 return -TARGET_EINVAL;
2332             }
2333 
2334             if (copy_from_user(&ipv6mreq, optval_addr, sizeof(ipv6mreq))) {
2335                 return -TARGET_EFAULT;
2336             }
2337 
2338             ipv6mreq.ipv6mr_interface = tswap32(ipv6mreq.ipv6mr_interface);
2339 
2340             ret = get_errno(setsockopt(sockfd, level, optname,
2341                                        &ipv6mreq, sizeof(ipv6mreq)));
2342             break;
2343         }
2344         default:
2345             goto unimplemented;
2346         }
2347         break;
2348     case SOL_ICMPV6:
2349         switch (optname) {
2350         case ICMPV6_FILTER:
2351         {
2352             struct icmp6_filter icmp6f;
2353 
2354             if (optlen > sizeof(icmp6f)) {
2355                 optlen = sizeof(icmp6f);
2356             }
2357 
2358             if (copy_from_user(&icmp6f, optval_addr, optlen)) {
2359                 return -TARGET_EFAULT;
2360             }
2361 
2362             for (val = 0; val < 8; val++) {
2363                 icmp6f.data[val] = tswap32(icmp6f.data[val]);
2364             }
2365 
2366             ret = get_errno(setsockopt(sockfd, level, optname,
2367                                        &icmp6f, optlen));
2368             break;
2369         }
2370         default:
2371             goto unimplemented;
2372         }
2373         break;
2374     case SOL_RAW:
2375         switch (optname) {
2376         case ICMP_FILTER:
2377         case IPV6_CHECKSUM:
2378             /* those take an u32 value */
2379             if (optlen < sizeof(uint32_t)) {
2380                 return -TARGET_EINVAL;
2381             }
2382 
2383             if (get_user_u32(val, optval_addr)) {
2384                 return -TARGET_EFAULT;
2385             }
2386             ret = get_errno(setsockopt(sockfd, level, optname,
2387                                        &val, sizeof(val)));
2388             break;
2389 
2390         default:
2391             goto unimplemented;
2392         }
2393         break;
2394 #if defined(SOL_ALG) && defined(ALG_SET_KEY) && defined(ALG_SET_AEAD_AUTHSIZE)
2395     case SOL_ALG:
2396         switch (optname) {
2397         case ALG_SET_KEY:
2398         {
2399             char *alg_key = g_malloc(optlen);
2400 
2401             if (!alg_key) {
2402                 return -TARGET_ENOMEM;
2403             }
2404             if (copy_from_user(alg_key, optval_addr, optlen)) {
2405                 g_free(alg_key);
2406                 return -TARGET_EFAULT;
2407             }
2408             ret = get_errno(setsockopt(sockfd, level, optname,
2409                                        alg_key, optlen));
2410             g_free(alg_key);
2411             break;
2412         }
2413         case ALG_SET_AEAD_AUTHSIZE:
2414         {
2415             ret = get_errno(setsockopt(sockfd, level, optname,
2416                                        NULL, optlen));
2417             break;
2418         }
2419         default:
2420             goto unimplemented;
2421         }
2422         break;
2423 #endif
2424     case TARGET_SOL_SOCKET:
2425         switch (optname) {
2426         case TARGET_SO_RCVTIMEO:
2427         {
2428                 struct timeval tv;
2429 
2430                 optname = SO_RCVTIMEO;
2431 
2432 set_timeout:
2433                 if (optlen != sizeof(struct target_timeval)) {
2434                     return -TARGET_EINVAL;
2435                 }
2436 
2437                 if (copy_from_user_timeval(&tv, optval_addr)) {
2438                     return -TARGET_EFAULT;
2439                 }
2440 
2441                 ret = get_errno(setsockopt(sockfd, SOL_SOCKET, optname,
2442                                 &tv, sizeof(tv)));
2443                 return ret;
2444         }
2445         case TARGET_SO_SNDTIMEO:
2446                 optname = SO_SNDTIMEO;
2447                 goto set_timeout;
2448         case TARGET_SO_ATTACH_FILTER:
2449         {
2450                 struct target_sock_fprog *tfprog;
2451                 struct target_sock_filter *tfilter;
2452                 struct sock_fprog fprog;
2453                 struct sock_filter *filter;
2454                 int i;
2455 
2456                 if (optlen != sizeof(*tfprog)) {
2457                     return -TARGET_EINVAL;
2458                 }
2459                 if (!lock_user_struct(VERIFY_READ, tfprog, optval_addr, 0)) {
2460                     return -TARGET_EFAULT;
2461                 }
2462                 if (!lock_user_struct(VERIFY_READ, tfilter,
2463                                       tswapal(tfprog->filter), 0)) {
2464                     unlock_user_struct(tfprog, optval_addr, 1);
2465                     return -TARGET_EFAULT;
2466                 }
2467 
2468                 fprog.len = tswap16(tfprog->len);
2469                 filter = g_try_new(struct sock_filter, fprog.len);
2470                 if (filter == NULL) {
2471                     unlock_user_struct(tfilter, tfprog->filter, 1);
2472                     unlock_user_struct(tfprog, optval_addr, 1);
2473                     return -TARGET_ENOMEM;
2474                 }
2475                 for (i = 0; i < fprog.len; i++) {
2476                     filter[i].code = tswap16(tfilter[i].code);
2477                     filter[i].jt = tfilter[i].jt;
2478                     filter[i].jf = tfilter[i].jf;
2479                     filter[i].k = tswap32(tfilter[i].k);
2480                 }
2481                 fprog.filter = filter;
2482 
2483                 ret = get_errno(setsockopt(sockfd, SOL_SOCKET,
2484                                 SO_ATTACH_FILTER, &fprog, sizeof(fprog)));
2485                 g_free(filter);
2486 
2487                 unlock_user_struct(tfilter, tfprog->filter, 1);
2488                 unlock_user_struct(tfprog, optval_addr, 1);
2489                 return ret;
2490         }
2491 	case TARGET_SO_BINDTODEVICE:
2492 	{
2493 		char *dev_ifname, *addr_ifname;
2494 
2495 		if (optlen > IFNAMSIZ - 1) {
2496 		    optlen = IFNAMSIZ - 1;
2497 		}
2498 		dev_ifname = lock_user(VERIFY_READ, optval_addr, optlen, 1);
2499 		if (!dev_ifname) {
2500 		    return -TARGET_EFAULT;
2501 		}
2502 		optname = SO_BINDTODEVICE;
2503 		addr_ifname = alloca(IFNAMSIZ);
2504 		memcpy(addr_ifname, dev_ifname, optlen);
2505 		addr_ifname[optlen] = 0;
2506 		ret = get_errno(setsockopt(sockfd, SOL_SOCKET, optname,
2507                                            addr_ifname, optlen));
2508 		unlock_user (dev_ifname, optval_addr, 0);
2509 		return ret;
2510 	}
2511         case TARGET_SO_LINGER:
2512         {
2513                 struct linger lg;
2514                 struct target_linger *tlg;
2515 
2516                 if (optlen != sizeof(struct target_linger)) {
2517                     return -TARGET_EINVAL;
2518                 }
2519                 if (!lock_user_struct(VERIFY_READ, tlg, optval_addr, 1)) {
2520                     return -TARGET_EFAULT;
2521                 }
2522                 __get_user(lg.l_onoff, &tlg->l_onoff);
2523                 __get_user(lg.l_linger, &tlg->l_linger);
2524                 ret = get_errno(setsockopt(sockfd, SOL_SOCKET, SO_LINGER,
2525                                 &lg, sizeof(lg)));
2526                 unlock_user_struct(tlg, optval_addr, 0);
2527                 return ret;
2528         }
2529             /* Options with 'int' argument.  */
2530         case TARGET_SO_DEBUG:
2531 		optname = SO_DEBUG;
2532 		break;
2533         case TARGET_SO_REUSEADDR:
2534 		optname = SO_REUSEADDR;
2535 		break;
2536 #ifdef SO_REUSEPORT
2537         case TARGET_SO_REUSEPORT:
2538                 optname = SO_REUSEPORT;
2539                 break;
2540 #endif
2541         case TARGET_SO_TYPE:
2542 		optname = SO_TYPE;
2543 		break;
2544         case TARGET_SO_ERROR:
2545 		optname = SO_ERROR;
2546 		break;
2547         case TARGET_SO_DONTROUTE:
2548 		optname = SO_DONTROUTE;
2549 		break;
2550         case TARGET_SO_BROADCAST:
2551 		optname = SO_BROADCAST;
2552 		break;
2553         case TARGET_SO_SNDBUF:
2554 		optname = SO_SNDBUF;
2555 		break;
2556         case TARGET_SO_SNDBUFFORCE:
2557                 optname = SO_SNDBUFFORCE;
2558                 break;
2559         case TARGET_SO_RCVBUF:
2560 		optname = SO_RCVBUF;
2561 		break;
2562         case TARGET_SO_RCVBUFFORCE:
2563                 optname = SO_RCVBUFFORCE;
2564                 break;
2565         case TARGET_SO_KEEPALIVE:
2566 		optname = SO_KEEPALIVE;
2567 		break;
2568         case TARGET_SO_OOBINLINE:
2569 		optname = SO_OOBINLINE;
2570 		break;
2571         case TARGET_SO_NO_CHECK:
2572 		optname = SO_NO_CHECK;
2573 		break;
2574         case TARGET_SO_PRIORITY:
2575 		optname = SO_PRIORITY;
2576 		break;
2577 #ifdef SO_BSDCOMPAT
2578         case TARGET_SO_BSDCOMPAT:
2579 		optname = SO_BSDCOMPAT;
2580 		break;
2581 #endif
2582         case TARGET_SO_PASSCRED:
2583 		optname = SO_PASSCRED;
2584 		break;
2585         case TARGET_SO_PASSSEC:
2586                 optname = SO_PASSSEC;
2587                 break;
2588         case TARGET_SO_TIMESTAMP:
2589 		optname = SO_TIMESTAMP;
2590 		break;
2591         case TARGET_SO_RCVLOWAT:
2592 		optname = SO_RCVLOWAT;
2593 		break;
2594         default:
2595             goto unimplemented;
2596         }
2597 	if (optlen < sizeof(uint32_t))
2598             return -TARGET_EINVAL;
2599 
2600 	if (get_user_u32(val, optval_addr))
2601             return -TARGET_EFAULT;
2602 	ret = get_errno(setsockopt(sockfd, SOL_SOCKET, optname, &val, sizeof(val)));
2603         break;
2604 #ifdef SOL_NETLINK
2605     case SOL_NETLINK:
2606         switch (optname) {
2607         case NETLINK_PKTINFO:
2608         case NETLINK_ADD_MEMBERSHIP:
2609         case NETLINK_DROP_MEMBERSHIP:
2610         case NETLINK_BROADCAST_ERROR:
2611         case NETLINK_NO_ENOBUFS:
2612 #if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 2, 0)
2613         case NETLINK_LISTEN_ALL_NSID:
2614         case NETLINK_CAP_ACK:
2615 #endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(4, 2, 0) */
2616 #if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 12, 0)
2617         case NETLINK_EXT_ACK:
2618 #endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(4, 12, 0) */
2619 #if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 20, 0)
2620         case NETLINK_GET_STRICT_CHK:
2621 #endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(4, 12, 0) */
2622             break;
2623         default:
2624             goto unimplemented;
2625         }
2626         val = 0;
2627         if (optlen < sizeof(uint32_t)) {
2628             return -TARGET_EINVAL;
2629         }
2630         if (get_user_u32(val, optval_addr)) {
2631             return -TARGET_EFAULT;
2632         }
2633         ret = get_errno(setsockopt(sockfd, SOL_NETLINK, optname, &val,
2634                                    sizeof(val)));
2635         break;
2636 #endif /* SOL_NETLINK */
2637     default:
2638     unimplemented:
2639         qemu_log_mask(LOG_UNIMP, "Unsupported setsockopt level=%d optname=%d\n",
2640                       level, optname);
2641         ret = -TARGET_ENOPROTOOPT;
2642     }
2643     return ret;
2644 }
2645 
2646 /* do_getsockopt() Must return target values and target errnos. */
2647 static abi_long do_getsockopt(int sockfd, int level, int optname,
2648                               abi_ulong optval_addr, abi_ulong optlen)
2649 {
2650     abi_long ret;
2651     int len, val;
2652     socklen_t lv;
2653 
2654     switch(level) {
2655     case TARGET_SOL_SOCKET:
2656         level = SOL_SOCKET;
2657         switch (optname) {
2658         /* These don't just return a single integer */
2659         case TARGET_SO_PEERNAME:
2660             goto unimplemented;
2661         case TARGET_SO_RCVTIMEO: {
2662             struct timeval tv;
2663             socklen_t tvlen;
2664 
2665             optname = SO_RCVTIMEO;
2666 
2667 get_timeout:
2668             if (get_user_u32(len, optlen)) {
2669                 return -TARGET_EFAULT;
2670             }
2671             if (len < 0) {
2672                 return -TARGET_EINVAL;
2673             }
2674 
2675             tvlen = sizeof(tv);
2676             ret = get_errno(getsockopt(sockfd, level, optname,
2677                                        &tv, &tvlen));
2678             if (ret < 0) {
2679                 return ret;
2680             }
2681             if (len > sizeof(struct target_timeval)) {
2682                 len = sizeof(struct target_timeval);
2683             }
2684             if (copy_to_user_timeval(optval_addr, &tv)) {
2685                 return -TARGET_EFAULT;
2686             }
2687             if (put_user_u32(len, optlen)) {
2688                 return -TARGET_EFAULT;
2689             }
2690             break;
2691         }
2692         case TARGET_SO_SNDTIMEO:
2693             optname = SO_SNDTIMEO;
2694             goto get_timeout;
2695         case TARGET_SO_PEERCRED: {
2696             struct ucred cr;
2697             socklen_t crlen;
2698             struct target_ucred *tcr;
2699 
2700             if (get_user_u32(len, optlen)) {
2701                 return -TARGET_EFAULT;
2702             }
2703             if (len < 0) {
2704                 return -TARGET_EINVAL;
2705             }
2706 
2707             crlen = sizeof(cr);
2708             ret = get_errno(getsockopt(sockfd, level, SO_PEERCRED,
2709                                        &cr, &crlen));
2710             if (ret < 0) {
2711                 return ret;
2712             }
2713             if (len > crlen) {
2714                 len = crlen;
2715             }
2716             if (!lock_user_struct(VERIFY_WRITE, tcr, optval_addr, 0)) {
2717                 return -TARGET_EFAULT;
2718             }
2719             __put_user(cr.pid, &tcr->pid);
2720             __put_user(cr.uid, &tcr->uid);
2721             __put_user(cr.gid, &tcr->gid);
2722             unlock_user_struct(tcr, optval_addr, 1);
2723             if (put_user_u32(len, optlen)) {
2724                 return -TARGET_EFAULT;
2725             }
2726             break;
2727         }
2728         case TARGET_SO_PEERSEC: {
2729             char *name;
2730 
2731             if (get_user_u32(len, optlen)) {
2732                 return -TARGET_EFAULT;
2733             }
2734             if (len < 0) {
2735                 return -TARGET_EINVAL;
2736             }
2737             name = lock_user(VERIFY_WRITE, optval_addr, len, 0);
2738             if (!name) {
2739                 return -TARGET_EFAULT;
2740             }
2741             lv = len;
2742             ret = get_errno(getsockopt(sockfd, level, SO_PEERSEC,
2743                                        name, &lv));
2744             if (put_user_u32(lv, optlen)) {
2745                 ret = -TARGET_EFAULT;
2746             }
2747             unlock_user(name, optval_addr, lv);
2748             break;
2749         }
2750         case TARGET_SO_LINGER:
2751         {
2752             struct linger lg;
2753             socklen_t lglen;
2754             struct target_linger *tlg;
2755 
2756             if (get_user_u32(len, optlen)) {
2757                 return -TARGET_EFAULT;
2758             }
2759             if (len < 0) {
2760                 return -TARGET_EINVAL;
2761             }
2762 
2763             lglen = sizeof(lg);
2764             ret = get_errno(getsockopt(sockfd, level, SO_LINGER,
2765                                        &lg, &lglen));
2766             if (ret < 0) {
2767                 return ret;
2768             }
2769             if (len > lglen) {
2770                 len = lglen;
2771             }
2772             if (!lock_user_struct(VERIFY_WRITE, tlg, optval_addr, 0)) {
2773                 return -TARGET_EFAULT;
2774             }
2775             __put_user(lg.l_onoff, &tlg->l_onoff);
2776             __put_user(lg.l_linger, &tlg->l_linger);
2777             unlock_user_struct(tlg, optval_addr, 1);
2778             if (put_user_u32(len, optlen)) {
2779                 return -TARGET_EFAULT;
2780             }
2781             break;
2782         }
2783         /* Options with 'int' argument.  */
2784         case TARGET_SO_DEBUG:
2785             optname = SO_DEBUG;
2786             goto int_case;
2787         case TARGET_SO_REUSEADDR:
2788             optname = SO_REUSEADDR;
2789             goto int_case;
2790 #ifdef SO_REUSEPORT
2791         case TARGET_SO_REUSEPORT:
2792             optname = SO_REUSEPORT;
2793             goto int_case;
2794 #endif
2795         case TARGET_SO_TYPE:
2796             optname = SO_TYPE;
2797             goto int_case;
2798         case TARGET_SO_ERROR:
2799             optname = SO_ERROR;
2800             goto int_case;
2801         case TARGET_SO_DONTROUTE:
2802             optname = SO_DONTROUTE;
2803             goto int_case;
2804         case TARGET_SO_BROADCAST:
2805             optname = SO_BROADCAST;
2806             goto int_case;
2807         case TARGET_SO_SNDBUF:
2808             optname = SO_SNDBUF;
2809             goto int_case;
2810         case TARGET_SO_RCVBUF:
2811             optname = SO_RCVBUF;
2812             goto int_case;
2813         case TARGET_SO_KEEPALIVE:
2814             optname = SO_KEEPALIVE;
2815             goto int_case;
2816         case TARGET_SO_OOBINLINE:
2817             optname = SO_OOBINLINE;
2818             goto int_case;
2819         case TARGET_SO_NO_CHECK:
2820             optname = SO_NO_CHECK;
2821             goto int_case;
2822         case TARGET_SO_PRIORITY:
2823             optname = SO_PRIORITY;
2824             goto int_case;
2825 #ifdef SO_BSDCOMPAT
2826         case TARGET_SO_BSDCOMPAT:
2827             optname = SO_BSDCOMPAT;
2828             goto int_case;
2829 #endif
2830         case TARGET_SO_PASSCRED:
2831             optname = SO_PASSCRED;
2832             goto int_case;
2833         case TARGET_SO_TIMESTAMP:
2834             optname = SO_TIMESTAMP;
2835             goto int_case;
2836         case TARGET_SO_RCVLOWAT:
2837             optname = SO_RCVLOWAT;
2838             goto int_case;
2839         case TARGET_SO_ACCEPTCONN:
2840             optname = SO_ACCEPTCONN;
2841             goto int_case;
2842         case TARGET_SO_PROTOCOL:
2843             optname = SO_PROTOCOL;
2844             goto int_case;
2845         case TARGET_SO_DOMAIN:
2846             optname = SO_DOMAIN;
2847             goto int_case;
2848         default:
2849             goto int_case;
2850         }
2851         break;
2852     case SOL_TCP:
2853     case SOL_UDP:
2854         /* TCP and UDP options all take an 'int' value.  */
2855     int_case:
2856         if (get_user_u32(len, optlen))
2857             return -TARGET_EFAULT;
2858         if (len < 0)
2859             return -TARGET_EINVAL;
2860         lv = sizeof(lv);
2861         ret = get_errno(getsockopt(sockfd, level, optname, &val, &lv));
2862         if (ret < 0)
2863             return ret;
2864         if (optname == SO_TYPE) {
2865             val = host_to_target_sock_type(val);
2866         }
2867         if (len > lv)
2868             len = lv;
2869         if (len == 4) {
2870             if (put_user_u32(val, optval_addr))
2871                 return -TARGET_EFAULT;
2872         } else {
2873             if (put_user_u8(val, optval_addr))
2874                 return -TARGET_EFAULT;
2875         }
2876         if (put_user_u32(len, optlen))
2877             return -TARGET_EFAULT;
2878         break;
2879     case SOL_IP:
2880         switch(optname) {
2881         case IP_TOS:
2882         case IP_TTL:
2883         case IP_HDRINCL:
2884         case IP_ROUTER_ALERT:
2885         case IP_RECVOPTS:
2886         case IP_RETOPTS:
2887         case IP_PKTINFO:
2888         case IP_MTU_DISCOVER:
2889         case IP_RECVERR:
2890         case IP_RECVTOS:
2891 #ifdef IP_FREEBIND
2892         case IP_FREEBIND:
2893 #endif
2894         case IP_MULTICAST_TTL:
2895         case IP_MULTICAST_LOOP:
2896             if (get_user_u32(len, optlen))
2897                 return -TARGET_EFAULT;
2898             if (len < 0)
2899                 return -TARGET_EINVAL;
2900             lv = sizeof(lv);
2901             ret = get_errno(getsockopt(sockfd, level, optname, &val, &lv));
2902             if (ret < 0)
2903                 return ret;
2904             if (len < sizeof(int) && len > 0 && val >= 0 && val < 255) {
2905                 len = 1;
2906                 if (put_user_u32(len, optlen)
2907                     || put_user_u8(val, optval_addr))
2908                     return -TARGET_EFAULT;
2909             } else {
2910                 if (len > sizeof(int))
2911                     len = sizeof(int);
2912                 if (put_user_u32(len, optlen)
2913                     || put_user_u32(val, optval_addr))
2914                     return -TARGET_EFAULT;
2915             }
2916             break;
2917         default:
2918             ret = -TARGET_ENOPROTOOPT;
2919             break;
2920         }
2921         break;
2922     case SOL_IPV6:
2923         switch (optname) {
2924         case IPV6_MTU_DISCOVER:
2925         case IPV6_MTU:
2926         case IPV6_V6ONLY:
2927         case IPV6_RECVPKTINFO:
2928         case IPV6_UNICAST_HOPS:
2929         case IPV6_MULTICAST_HOPS:
2930         case IPV6_MULTICAST_LOOP:
2931         case IPV6_RECVERR:
2932         case IPV6_RECVHOPLIMIT:
2933         case IPV6_2292HOPLIMIT:
2934         case IPV6_CHECKSUM:
2935         case IPV6_ADDRFORM:
2936         case IPV6_2292PKTINFO:
2937         case IPV6_RECVTCLASS:
2938         case IPV6_RECVRTHDR:
2939         case IPV6_2292RTHDR:
2940         case IPV6_RECVHOPOPTS:
2941         case IPV6_2292HOPOPTS:
2942         case IPV6_RECVDSTOPTS:
2943         case IPV6_2292DSTOPTS:
2944         case IPV6_TCLASS:
2945         case IPV6_ADDR_PREFERENCES:
2946 #ifdef IPV6_RECVPATHMTU
2947         case IPV6_RECVPATHMTU:
2948 #endif
2949 #ifdef IPV6_TRANSPARENT
2950         case IPV6_TRANSPARENT:
2951 #endif
2952 #ifdef IPV6_FREEBIND
2953         case IPV6_FREEBIND:
2954 #endif
2955 #ifdef IPV6_RECVORIGDSTADDR
2956         case IPV6_RECVORIGDSTADDR:
2957 #endif
2958             if (get_user_u32(len, optlen))
2959                 return -TARGET_EFAULT;
2960             if (len < 0)
2961                 return -TARGET_EINVAL;
2962             lv = sizeof(lv);
2963             ret = get_errno(getsockopt(sockfd, level, optname, &val, &lv));
2964             if (ret < 0)
2965                 return ret;
2966             if (len < sizeof(int) && len > 0 && val >= 0 && val < 255) {
2967                 len = 1;
2968                 if (put_user_u32(len, optlen)
2969                     || put_user_u8(val, optval_addr))
2970                     return -TARGET_EFAULT;
2971             } else {
2972                 if (len > sizeof(int))
2973                     len = sizeof(int);
2974                 if (put_user_u32(len, optlen)
2975                     || put_user_u32(val, optval_addr))
2976                     return -TARGET_EFAULT;
2977             }
2978             break;
2979         default:
2980             ret = -TARGET_ENOPROTOOPT;
2981             break;
2982         }
2983         break;
2984 #ifdef SOL_NETLINK
2985     case SOL_NETLINK:
2986         switch (optname) {
2987         case NETLINK_PKTINFO:
2988         case NETLINK_BROADCAST_ERROR:
2989         case NETLINK_NO_ENOBUFS:
2990 #if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 2, 0)
2991         case NETLINK_LISTEN_ALL_NSID:
2992         case NETLINK_CAP_ACK:
2993 #endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(4, 2, 0) */
2994 #if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 12, 0)
2995         case NETLINK_EXT_ACK:
2996 #endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(4, 12, 0) */
2997 #if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 20, 0)
2998         case NETLINK_GET_STRICT_CHK:
2999 #endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(4, 12, 0) */
3000             if (get_user_u32(len, optlen)) {
3001                 return -TARGET_EFAULT;
3002             }
3003             if (len != sizeof(val)) {
3004                 return -TARGET_EINVAL;
3005             }
3006             lv = len;
3007             ret = get_errno(getsockopt(sockfd, level, optname, &val, &lv));
3008             if (ret < 0) {
3009                 return ret;
3010             }
3011             if (put_user_u32(lv, optlen)
3012                 || put_user_u32(val, optval_addr)) {
3013                 return -TARGET_EFAULT;
3014             }
3015             break;
3016 #if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 2, 0)
3017         case NETLINK_LIST_MEMBERSHIPS:
3018         {
3019             uint32_t *results;
3020             int i;
3021             if (get_user_u32(len, optlen)) {
3022                 return -TARGET_EFAULT;
3023             }
3024             if (len < 0) {
3025                 return -TARGET_EINVAL;
3026             }
3027             results = lock_user(VERIFY_WRITE, optval_addr, len, 1);
3028             if (!results && len > 0) {
3029                 return -TARGET_EFAULT;
3030             }
3031             lv = len;
3032             ret = get_errno(getsockopt(sockfd, level, optname, results, &lv));
3033             if (ret < 0) {
3034                 unlock_user(results, optval_addr, 0);
3035                 return ret;
3036             }
3037             /* swap host endianess to target endianess. */
3038             for (i = 0; i < (len / sizeof(uint32_t)); i++) {
3039                 results[i] = tswap32(results[i]);
3040             }
3041             if (put_user_u32(lv, optlen)) {
3042                 return -TARGET_EFAULT;
3043             }
3044             unlock_user(results, optval_addr, 0);
3045             break;
3046         }
3047 #endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(4, 2, 0) */
3048         default:
3049             goto unimplemented;
3050         }
3051         break;
3052 #endif /* SOL_NETLINK */
3053     default:
3054     unimplemented:
3055         qemu_log_mask(LOG_UNIMP,
3056                       "getsockopt level=%d optname=%d not yet supported\n",
3057                       level, optname);
3058         ret = -TARGET_EOPNOTSUPP;
3059         break;
3060     }
3061     return ret;
3062 }
3063 
3064 /* Convert target low/high pair representing file offset into the host
3065  * low/high pair. This function doesn't handle offsets bigger than 64 bits
3066  * as the kernel doesn't handle them either.
3067  */
3068 static void target_to_host_low_high(abi_ulong tlow,
3069                                     abi_ulong thigh,
3070                                     unsigned long *hlow,
3071                                     unsigned long *hhigh)
3072 {
3073     uint64_t off = tlow |
3074         ((unsigned long long)thigh << TARGET_LONG_BITS / 2) <<
3075         TARGET_LONG_BITS / 2;
3076 
3077     *hlow = off;
3078     *hhigh = (off >> HOST_LONG_BITS / 2) >> HOST_LONG_BITS / 2;
3079 }
3080 
3081 static struct iovec *lock_iovec(int type, abi_ulong target_addr,
3082                                 abi_ulong count, int copy)
3083 {
3084     struct target_iovec *target_vec;
3085     struct iovec *vec;
3086     abi_ulong total_len, max_len;
3087     int i;
3088     int err = 0;
3089     bool bad_address = false;
3090 
3091     if (count == 0) {
3092         errno = 0;
3093         return NULL;
3094     }
3095     if (count > IOV_MAX) {
3096         errno = EINVAL;
3097         return NULL;
3098     }
3099 
3100     vec = g_try_new0(struct iovec, count);
3101     if (vec == NULL) {
3102         errno = ENOMEM;
3103         return NULL;
3104     }
3105 
3106     target_vec = lock_user(VERIFY_READ, target_addr,
3107                            count * sizeof(struct target_iovec), 1);
3108     if (target_vec == NULL) {
3109         err = EFAULT;
3110         goto fail2;
3111     }
3112 
3113     /* ??? If host page size > target page size, this will result in a
3114        value larger than what we can actually support.  */
3115     max_len = 0x7fffffff & TARGET_PAGE_MASK;
3116     total_len = 0;
3117 
3118     for (i = 0; i < count; i++) {
3119         abi_ulong base = tswapal(target_vec[i].iov_base);
3120         abi_long len = tswapal(target_vec[i].iov_len);
3121 
3122         if (len < 0) {
3123             err = EINVAL;
3124             goto fail;
3125         } else if (len == 0) {
3126             /* Zero length pointer is ignored.  */
3127             vec[i].iov_base = 0;
3128         } else {
3129             vec[i].iov_base = lock_user(type, base, len, copy);
3130             /* If the first buffer pointer is bad, this is a fault.  But
3131              * subsequent bad buffers will result in a partial write; this
3132              * is realized by filling the vector with null pointers and
3133              * zero lengths. */
3134             if (!vec[i].iov_base) {
3135                 if (i == 0) {
3136                     err = EFAULT;
3137                     goto fail;
3138                 } else {
3139                     bad_address = true;
3140                 }
3141             }
3142             if (bad_address) {
3143                 len = 0;
3144             }
3145             if (len > max_len - total_len) {
3146                 len = max_len - total_len;
3147             }
3148         }
3149         vec[i].iov_len = len;
3150         total_len += len;
3151     }
3152 
3153     unlock_user(target_vec, target_addr, 0);
3154     return vec;
3155 
3156  fail:
3157     while (--i >= 0) {
3158         if (tswapal(target_vec[i].iov_len) > 0) {
3159             unlock_user(vec[i].iov_base, tswapal(target_vec[i].iov_base), 0);
3160         }
3161     }
3162     unlock_user(target_vec, target_addr, 0);
3163  fail2:
3164     g_free(vec);
3165     errno = err;
3166     return NULL;
3167 }
3168 
3169 static void unlock_iovec(struct iovec *vec, abi_ulong target_addr,
3170                          abi_ulong count, int copy)
3171 {
3172     struct target_iovec *target_vec;
3173     int i;
3174 
3175     target_vec = lock_user(VERIFY_READ, target_addr,
3176                            count * sizeof(struct target_iovec), 1);
3177     if (target_vec) {
3178         for (i = 0; i < count; i++) {
3179             abi_ulong base = tswapal(target_vec[i].iov_base);
3180             abi_long len = tswapal(target_vec[i].iov_len);
3181             if (len < 0) {
3182                 break;
3183             }
3184             unlock_user(vec[i].iov_base, base, copy ? vec[i].iov_len : 0);
3185         }
3186         unlock_user(target_vec, target_addr, 0);
3187     }
3188 
3189     g_free(vec);
3190 }
3191 
3192 static inline int target_to_host_sock_type(int *type)
3193 {
3194     int host_type = 0;
3195     int target_type = *type;
3196 
3197     switch (target_type & TARGET_SOCK_TYPE_MASK) {
3198     case TARGET_SOCK_DGRAM:
3199         host_type = SOCK_DGRAM;
3200         break;
3201     case TARGET_SOCK_STREAM:
3202         host_type = SOCK_STREAM;
3203         break;
3204     default:
3205         host_type = target_type & TARGET_SOCK_TYPE_MASK;
3206         break;
3207     }
3208     if (target_type & TARGET_SOCK_CLOEXEC) {
3209 #if defined(SOCK_CLOEXEC)
3210         host_type |= SOCK_CLOEXEC;
3211 #else
3212         return -TARGET_EINVAL;
3213 #endif
3214     }
3215     if (target_type & TARGET_SOCK_NONBLOCK) {
3216 #if defined(SOCK_NONBLOCK)
3217         host_type |= SOCK_NONBLOCK;
3218 #elif !defined(O_NONBLOCK)
3219         return -TARGET_EINVAL;
3220 #endif
3221     }
3222     *type = host_type;
3223     return 0;
3224 }
3225 
3226 /* Try to emulate socket type flags after socket creation.  */
3227 static int sock_flags_fixup(int fd, int target_type)
3228 {
3229 #if !defined(SOCK_NONBLOCK) && defined(O_NONBLOCK)
3230     if (target_type & TARGET_SOCK_NONBLOCK) {
3231         int flags = fcntl(fd, F_GETFL);
3232         if (fcntl(fd, F_SETFL, O_NONBLOCK | flags) == -1) {
3233             close(fd);
3234             return -TARGET_EINVAL;
3235         }
3236     }
3237 #endif
3238     return fd;
3239 }
3240 
3241 /* do_socket() Must return target values and target errnos. */
3242 static abi_long do_socket(int domain, int type, int protocol)
3243 {
3244     int target_type = type;
3245     int ret;
3246 
3247     ret = target_to_host_sock_type(&type);
3248     if (ret) {
3249         return ret;
3250     }
3251 
3252     if (domain == PF_NETLINK && !(
3253 #ifdef CONFIG_RTNETLINK
3254          protocol == NETLINK_ROUTE ||
3255 #endif
3256          protocol == NETLINK_KOBJECT_UEVENT ||
3257          protocol == NETLINK_AUDIT)) {
3258         return -TARGET_EPROTONOSUPPORT;
3259     }
3260 
3261     if (domain == AF_PACKET ||
3262         (domain == AF_INET && type == SOCK_PACKET)) {
3263         protocol = tswap16(protocol);
3264     }
3265 
3266     ret = get_errno(socket(domain, type, protocol));
3267     if (ret >= 0) {
3268         ret = sock_flags_fixup(ret, target_type);
3269         if (type == SOCK_PACKET) {
3270             /* Manage an obsolete case :
3271              * if socket type is SOCK_PACKET, bind by name
3272              */
3273             fd_trans_register(ret, &target_packet_trans);
3274         } else if (domain == PF_NETLINK) {
3275             switch (protocol) {
3276 #ifdef CONFIG_RTNETLINK
3277             case NETLINK_ROUTE:
3278                 fd_trans_register(ret, &target_netlink_route_trans);
3279                 break;
3280 #endif
3281             case NETLINK_KOBJECT_UEVENT:
3282                 /* nothing to do: messages are strings */
3283                 break;
3284             case NETLINK_AUDIT:
3285                 fd_trans_register(ret, &target_netlink_audit_trans);
3286                 break;
3287             default:
3288                 g_assert_not_reached();
3289             }
3290         }
3291     }
3292     return ret;
3293 }
3294 
3295 /* do_bind() Must return target values and target errnos. */
3296 static abi_long do_bind(int sockfd, abi_ulong target_addr,
3297                         socklen_t addrlen)
3298 {
3299     void *addr;
3300     abi_long ret;
3301 
3302     if ((int)addrlen < 0) {
3303         return -TARGET_EINVAL;
3304     }
3305 
3306     addr = alloca(addrlen+1);
3307 
3308     ret = target_to_host_sockaddr(sockfd, addr, target_addr, addrlen);
3309     if (ret)
3310         return ret;
3311 
3312     return get_errno(bind(sockfd, addr, addrlen));
3313 }
3314 
3315 /* do_connect() Must return target values and target errnos. */
3316 static abi_long do_connect(int sockfd, abi_ulong target_addr,
3317                            socklen_t addrlen)
3318 {
3319     void *addr;
3320     abi_long ret;
3321 
3322     if ((int)addrlen < 0) {
3323         return -TARGET_EINVAL;
3324     }
3325 
3326     addr = alloca(addrlen+1);
3327 
3328     ret = target_to_host_sockaddr(sockfd, addr, target_addr, addrlen);
3329     if (ret)
3330         return ret;
3331 
3332     return get_errno(safe_connect(sockfd, addr, addrlen));
3333 }
3334 
3335 /* do_sendrecvmsg_locked() Must return target values and target errnos. */
3336 static abi_long do_sendrecvmsg_locked(int fd, struct target_msghdr *msgp,
3337                                       int flags, int send)
3338 {
3339     abi_long ret, len;
3340     struct msghdr msg;
3341     abi_ulong count;
3342     struct iovec *vec;
3343     abi_ulong target_vec;
3344 
3345     if (msgp->msg_name) {
3346         msg.msg_namelen = tswap32(msgp->msg_namelen);
3347         msg.msg_name = alloca(msg.msg_namelen+1);
3348         ret = target_to_host_sockaddr(fd, msg.msg_name,
3349                                       tswapal(msgp->msg_name),
3350                                       msg.msg_namelen);
3351         if (ret == -TARGET_EFAULT) {
3352             /* For connected sockets msg_name and msg_namelen must
3353              * be ignored, so returning EFAULT immediately is wrong.
3354              * Instead, pass a bad msg_name to the host kernel, and
3355              * let it decide whether to return EFAULT or not.
3356              */
3357             msg.msg_name = (void *)-1;
3358         } else if (ret) {
3359             goto out2;
3360         }
3361     } else {
3362         msg.msg_name = NULL;
3363         msg.msg_namelen = 0;
3364     }
3365     msg.msg_controllen = 2 * tswapal(msgp->msg_controllen);
3366     msg.msg_control = alloca(msg.msg_controllen);
3367     memset(msg.msg_control, 0, msg.msg_controllen);
3368 
3369     msg.msg_flags = tswap32(msgp->msg_flags);
3370 
3371     count = tswapal(msgp->msg_iovlen);
3372     target_vec = tswapal(msgp->msg_iov);
3373 
3374     if (count > IOV_MAX) {
3375         /* sendrcvmsg returns a different errno for this condition than
3376          * readv/writev, so we must catch it here before lock_iovec() does.
3377          */
3378         ret = -TARGET_EMSGSIZE;
3379         goto out2;
3380     }
3381 
3382     vec = lock_iovec(send ? VERIFY_READ : VERIFY_WRITE,
3383                      target_vec, count, send);
3384     if (vec == NULL) {
3385         ret = -host_to_target_errno(errno);
3386         goto out2;
3387     }
3388     msg.msg_iovlen = count;
3389     msg.msg_iov = vec;
3390 
3391     if (send) {
3392         if (fd_trans_target_to_host_data(fd)) {
3393             void *host_msg;
3394 
3395             host_msg = g_malloc(msg.msg_iov->iov_len);
3396             memcpy(host_msg, msg.msg_iov->iov_base, msg.msg_iov->iov_len);
3397             ret = fd_trans_target_to_host_data(fd)(host_msg,
3398                                                    msg.msg_iov->iov_len);
3399             if (ret >= 0) {
3400                 msg.msg_iov->iov_base = host_msg;
3401                 ret = get_errno(safe_sendmsg(fd, &msg, flags));
3402             }
3403             g_free(host_msg);
3404         } else {
3405             ret = target_to_host_cmsg(&msg, msgp);
3406             if (ret == 0) {
3407                 ret = get_errno(safe_sendmsg(fd, &msg, flags));
3408             }
3409         }
3410     } else {
3411         ret = get_errno(safe_recvmsg(fd, &msg, flags));
3412         if (!is_error(ret)) {
3413             len = ret;
3414             if (fd_trans_host_to_target_data(fd)) {
3415                 ret = fd_trans_host_to_target_data(fd)(msg.msg_iov->iov_base,
3416                                                MIN(msg.msg_iov->iov_len, len));
3417             } else {
3418                 ret = host_to_target_cmsg(msgp, &msg);
3419             }
3420             if (!is_error(ret)) {
3421                 msgp->msg_namelen = tswap32(msg.msg_namelen);
3422                 msgp->msg_flags = tswap32(msg.msg_flags);
3423                 if (msg.msg_name != NULL && msg.msg_name != (void *)-1) {
3424                     ret = host_to_target_sockaddr(tswapal(msgp->msg_name),
3425                                     msg.msg_name, msg.msg_namelen);
3426                     if (ret) {
3427                         goto out;
3428                     }
3429                 }
3430 
3431                 ret = len;
3432             }
3433         }
3434     }
3435 
3436 out:
3437     unlock_iovec(vec, target_vec, count, !send);
3438 out2:
3439     return ret;
3440 }
3441 
3442 static abi_long do_sendrecvmsg(int fd, abi_ulong target_msg,
3443                                int flags, int send)
3444 {
3445     abi_long ret;
3446     struct target_msghdr *msgp;
3447 
3448     if (!lock_user_struct(send ? VERIFY_READ : VERIFY_WRITE,
3449                           msgp,
3450                           target_msg,
3451                           send ? 1 : 0)) {
3452         return -TARGET_EFAULT;
3453     }
3454     ret = do_sendrecvmsg_locked(fd, msgp, flags, send);
3455     unlock_user_struct(msgp, target_msg, send ? 0 : 1);
3456     return ret;
3457 }
3458 
3459 /* We don't rely on the C library to have sendmmsg/recvmmsg support,
3460  * so it might not have this *mmsg-specific flag either.
3461  */
3462 #ifndef MSG_WAITFORONE
3463 #define MSG_WAITFORONE 0x10000
3464 #endif
3465 
3466 static abi_long do_sendrecvmmsg(int fd, abi_ulong target_msgvec,
3467                                 unsigned int vlen, unsigned int flags,
3468                                 int send)
3469 {
3470     struct target_mmsghdr *mmsgp;
3471     abi_long ret = 0;
3472     int i;
3473 
3474     if (vlen > UIO_MAXIOV) {
3475         vlen = UIO_MAXIOV;
3476     }
3477 
3478     mmsgp = lock_user(VERIFY_WRITE, target_msgvec, sizeof(*mmsgp) * vlen, 1);
3479     if (!mmsgp) {
3480         return -TARGET_EFAULT;
3481     }
3482 
3483     for (i = 0; i < vlen; i++) {
3484         ret = do_sendrecvmsg_locked(fd, &mmsgp[i].msg_hdr, flags, send);
3485         if (is_error(ret)) {
3486             break;
3487         }
3488         mmsgp[i].msg_len = tswap32(ret);
3489         /* MSG_WAITFORONE turns on MSG_DONTWAIT after one packet */
3490         if (flags & MSG_WAITFORONE) {
3491             flags |= MSG_DONTWAIT;
3492         }
3493     }
3494 
3495     unlock_user(mmsgp, target_msgvec, sizeof(*mmsgp) * i);
3496 
3497     /* Return number of datagrams sent if we sent any at all;
3498      * otherwise return the error.
3499      */
3500     if (i) {
3501         return i;
3502     }
3503     return ret;
3504 }
3505 
3506 /* do_accept4() Must return target values and target errnos. */
3507 static abi_long do_accept4(int fd, abi_ulong target_addr,
3508                            abi_ulong target_addrlen_addr, int flags)
3509 {
3510     socklen_t addrlen, ret_addrlen;
3511     void *addr;
3512     abi_long ret;
3513     int host_flags;
3514 
3515     host_flags = target_to_host_bitmask(flags, fcntl_flags_tbl);
3516 
3517     if (target_addr == 0) {
3518         return get_errno(safe_accept4(fd, NULL, NULL, host_flags));
3519     }
3520 
3521     /* linux returns EFAULT if addrlen pointer is invalid */
3522     if (get_user_u32(addrlen, target_addrlen_addr))
3523         return -TARGET_EFAULT;
3524 
3525     if ((int)addrlen < 0) {
3526         return -TARGET_EINVAL;
3527     }
3528 
3529     if (!access_ok(thread_cpu, VERIFY_WRITE, target_addr, addrlen)) {
3530         return -TARGET_EFAULT;
3531     }
3532 
3533     addr = alloca(addrlen);
3534 
3535     ret_addrlen = addrlen;
3536     ret = get_errno(safe_accept4(fd, addr, &ret_addrlen, host_flags));
3537     if (!is_error(ret)) {
3538         host_to_target_sockaddr(target_addr, addr, MIN(addrlen, ret_addrlen));
3539         if (put_user_u32(ret_addrlen, target_addrlen_addr)) {
3540             ret = -TARGET_EFAULT;
3541         }
3542     }
3543     return ret;
3544 }
3545 
3546 /* do_getpeername() Must return target values and target errnos. */
3547 static abi_long do_getpeername(int fd, abi_ulong target_addr,
3548                                abi_ulong target_addrlen_addr)
3549 {
3550     socklen_t addrlen, ret_addrlen;
3551     void *addr;
3552     abi_long ret;
3553 
3554     if (get_user_u32(addrlen, target_addrlen_addr))
3555         return -TARGET_EFAULT;
3556 
3557     if ((int)addrlen < 0) {
3558         return -TARGET_EINVAL;
3559     }
3560 
3561     if (!access_ok(thread_cpu, VERIFY_WRITE, target_addr, addrlen)) {
3562         return -TARGET_EFAULT;
3563     }
3564 
3565     addr = alloca(addrlen);
3566 
3567     ret_addrlen = addrlen;
3568     ret = get_errno(getpeername(fd, addr, &ret_addrlen));
3569     if (!is_error(ret)) {
3570         host_to_target_sockaddr(target_addr, addr, MIN(addrlen, ret_addrlen));
3571         if (put_user_u32(ret_addrlen, target_addrlen_addr)) {
3572             ret = -TARGET_EFAULT;
3573         }
3574     }
3575     return ret;
3576 }
3577 
3578 /* do_getsockname() Must return target values and target errnos. */
3579 static abi_long do_getsockname(int fd, abi_ulong target_addr,
3580                                abi_ulong target_addrlen_addr)
3581 {
3582     socklen_t addrlen, ret_addrlen;
3583     void *addr;
3584     abi_long ret;
3585 
3586     if (get_user_u32(addrlen, target_addrlen_addr))
3587         return -TARGET_EFAULT;
3588 
3589     if ((int)addrlen < 0) {
3590         return -TARGET_EINVAL;
3591     }
3592 
3593     if (!access_ok(thread_cpu, VERIFY_WRITE, target_addr, addrlen)) {
3594         return -TARGET_EFAULT;
3595     }
3596 
3597     addr = alloca(addrlen);
3598 
3599     ret_addrlen = addrlen;
3600     ret = get_errno(getsockname(fd, addr, &ret_addrlen));
3601     if (!is_error(ret)) {
3602         host_to_target_sockaddr(target_addr, addr, MIN(addrlen, ret_addrlen));
3603         if (put_user_u32(ret_addrlen, target_addrlen_addr)) {
3604             ret = -TARGET_EFAULT;
3605         }
3606     }
3607     return ret;
3608 }
3609 
3610 /* do_socketpair() Must return target values and target errnos. */
3611 static abi_long do_socketpair(int domain, int type, int protocol,
3612                               abi_ulong target_tab_addr)
3613 {
3614     int tab[2];
3615     abi_long ret;
3616 
3617     target_to_host_sock_type(&type);
3618 
3619     ret = get_errno(socketpair(domain, type, protocol, tab));
3620     if (!is_error(ret)) {
3621         if (put_user_s32(tab[0], target_tab_addr)
3622             || put_user_s32(tab[1], target_tab_addr + sizeof(tab[0])))
3623             ret = -TARGET_EFAULT;
3624     }
3625     return ret;
3626 }
3627 
3628 /* do_sendto() Must return target values and target errnos. */
3629 static abi_long do_sendto(int fd, abi_ulong msg, size_t len, int flags,
3630                           abi_ulong target_addr, socklen_t addrlen)
3631 {
3632     void *addr;
3633     void *host_msg;
3634     void *copy_msg = NULL;
3635     abi_long ret;
3636 
3637     if ((int)addrlen < 0) {
3638         return -TARGET_EINVAL;
3639     }
3640 
3641     host_msg = lock_user(VERIFY_READ, msg, len, 1);
3642     if (!host_msg)
3643         return -TARGET_EFAULT;
3644     if (fd_trans_target_to_host_data(fd)) {
3645         copy_msg = host_msg;
3646         host_msg = g_malloc(len);
3647         memcpy(host_msg, copy_msg, len);
3648         ret = fd_trans_target_to_host_data(fd)(host_msg, len);
3649         if (ret < 0) {
3650             goto fail;
3651         }
3652     }
3653     if (target_addr) {
3654         addr = alloca(addrlen+1);
3655         ret = target_to_host_sockaddr(fd, addr, target_addr, addrlen);
3656         if (ret) {
3657             goto fail;
3658         }
3659         ret = get_errno(safe_sendto(fd, host_msg, len, flags, addr, addrlen));
3660     } else {
3661         ret = get_errno(safe_sendto(fd, host_msg, len, flags, NULL, 0));
3662     }
3663 fail:
3664     if (copy_msg) {
3665         g_free(host_msg);
3666         host_msg = copy_msg;
3667     }
3668     unlock_user(host_msg, msg, 0);
3669     return ret;
3670 }
3671 
3672 /* do_recvfrom() Must return target values and target errnos. */
3673 static abi_long do_recvfrom(int fd, abi_ulong msg, size_t len, int flags,
3674                             abi_ulong target_addr,
3675                             abi_ulong target_addrlen)
3676 {
3677     socklen_t addrlen, ret_addrlen;
3678     void *addr;
3679     void *host_msg;
3680     abi_long ret;
3681 
3682     if (!msg) {
3683         host_msg = NULL;
3684     } else {
3685         host_msg = lock_user(VERIFY_WRITE, msg, len, 0);
3686         if (!host_msg) {
3687             return -TARGET_EFAULT;
3688         }
3689     }
3690     if (target_addr) {
3691         if (get_user_u32(addrlen, target_addrlen)) {
3692             ret = -TARGET_EFAULT;
3693             goto fail;
3694         }
3695         if ((int)addrlen < 0) {
3696             ret = -TARGET_EINVAL;
3697             goto fail;
3698         }
3699         addr = alloca(addrlen);
3700         ret_addrlen = addrlen;
3701         ret = get_errno(safe_recvfrom(fd, host_msg, len, flags,
3702                                       addr, &ret_addrlen));
3703     } else {
3704         addr = NULL; /* To keep compiler quiet.  */
3705         addrlen = 0; /* To keep compiler quiet.  */
3706         ret = get_errno(safe_recvfrom(fd, host_msg, len, flags, NULL, 0));
3707     }
3708     if (!is_error(ret)) {
3709         if (fd_trans_host_to_target_data(fd)) {
3710             abi_long trans;
3711             trans = fd_trans_host_to_target_data(fd)(host_msg, MIN(ret, len));
3712             if (is_error(trans)) {
3713                 ret = trans;
3714                 goto fail;
3715             }
3716         }
3717         if (target_addr) {
3718             host_to_target_sockaddr(target_addr, addr,
3719                                     MIN(addrlen, ret_addrlen));
3720             if (put_user_u32(ret_addrlen, target_addrlen)) {
3721                 ret = -TARGET_EFAULT;
3722                 goto fail;
3723             }
3724         }
3725         unlock_user(host_msg, msg, len);
3726     } else {
3727 fail:
3728         unlock_user(host_msg, msg, 0);
3729     }
3730     return ret;
3731 }
3732 
3733 #ifdef TARGET_NR_socketcall
3734 /* do_socketcall() must return target values and target errnos. */
3735 static abi_long do_socketcall(int num, abi_ulong vptr)
3736 {
3737     static const unsigned nargs[] = { /* number of arguments per operation */
3738         [TARGET_SYS_SOCKET] = 3,      /* domain, type, protocol */
3739         [TARGET_SYS_BIND] = 3,        /* fd, addr, addrlen */
3740         [TARGET_SYS_CONNECT] = 3,     /* fd, addr, addrlen */
3741         [TARGET_SYS_LISTEN] = 2,      /* fd, backlog */
3742         [TARGET_SYS_ACCEPT] = 3,      /* fd, addr, addrlen */
3743         [TARGET_SYS_GETSOCKNAME] = 3, /* fd, addr, addrlen */
3744         [TARGET_SYS_GETPEERNAME] = 3, /* fd, addr, addrlen */
3745         [TARGET_SYS_SOCKETPAIR] = 4,  /* domain, type, protocol, tab */
3746         [TARGET_SYS_SEND] = 4,        /* fd, msg, len, flags */
3747         [TARGET_SYS_RECV] = 4,        /* fd, msg, len, flags */
3748         [TARGET_SYS_SENDTO] = 6,      /* fd, msg, len, flags, addr, addrlen */
3749         [TARGET_SYS_RECVFROM] = 6,    /* fd, msg, len, flags, addr, addrlen */
3750         [TARGET_SYS_SHUTDOWN] = 2,    /* fd, how */
3751         [TARGET_SYS_SETSOCKOPT] = 5,  /* fd, level, optname, optval, optlen */
3752         [TARGET_SYS_GETSOCKOPT] = 5,  /* fd, level, optname, optval, optlen */
3753         [TARGET_SYS_SENDMSG] = 3,     /* fd, msg, flags */
3754         [TARGET_SYS_RECVMSG] = 3,     /* fd, msg, flags */
3755         [TARGET_SYS_ACCEPT4] = 4,     /* fd, addr, addrlen, flags */
3756         [TARGET_SYS_RECVMMSG] = 4,    /* fd, msgvec, vlen, flags */
3757         [TARGET_SYS_SENDMMSG] = 4,    /* fd, msgvec, vlen, flags */
3758     };
3759     abi_long a[6]; /* max 6 args */
3760     unsigned i;
3761 
3762     /* check the range of the first argument num */
3763     /* (TARGET_SYS_SENDMMSG is the highest among TARGET_SYS_xxx) */
3764     if (num < 1 || num > TARGET_SYS_SENDMMSG) {
3765         return -TARGET_EINVAL;
3766     }
3767     /* ensure we have space for args */
3768     if (nargs[num] > ARRAY_SIZE(a)) {
3769         return -TARGET_EINVAL;
3770     }
3771     /* collect the arguments in a[] according to nargs[] */
3772     for (i = 0; i < nargs[num]; ++i) {
3773         if (get_user_ual(a[i], vptr + i * sizeof(abi_long)) != 0) {
3774             return -TARGET_EFAULT;
3775         }
3776     }
3777     /* now when we have the args, invoke the appropriate underlying function */
3778     switch (num) {
3779     case TARGET_SYS_SOCKET: /* domain, type, protocol */
3780         return do_socket(a[0], a[1], a[2]);
3781     case TARGET_SYS_BIND: /* sockfd, addr, addrlen */
3782         return do_bind(a[0], a[1], a[2]);
3783     case TARGET_SYS_CONNECT: /* sockfd, addr, addrlen */
3784         return do_connect(a[0], a[1], a[2]);
3785     case TARGET_SYS_LISTEN: /* sockfd, backlog */
3786         return get_errno(listen(a[0], a[1]));
3787     case TARGET_SYS_ACCEPT: /* sockfd, addr, addrlen */
3788         return do_accept4(a[0], a[1], a[2], 0);
3789     case TARGET_SYS_GETSOCKNAME: /* sockfd, addr, addrlen */
3790         return do_getsockname(a[0], a[1], a[2]);
3791     case TARGET_SYS_GETPEERNAME: /* sockfd, addr, addrlen */
3792         return do_getpeername(a[0], a[1], a[2]);
3793     case TARGET_SYS_SOCKETPAIR: /* domain, type, protocol, tab */
3794         return do_socketpair(a[0], a[1], a[2], a[3]);
3795     case TARGET_SYS_SEND: /* sockfd, msg, len, flags */
3796         return do_sendto(a[0], a[1], a[2], a[3], 0, 0);
3797     case TARGET_SYS_RECV: /* sockfd, msg, len, flags */
3798         return do_recvfrom(a[0], a[1], a[2], a[3], 0, 0);
3799     case TARGET_SYS_SENDTO: /* sockfd, msg, len, flags, addr, addrlen */
3800         return do_sendto(a[0], a[1], a[2], a[3], a[4], a[5]);
3801     case TARGET_SYS_RECVFROM: /* sockfd, msg, len, flags, addr, addrlen */
3802         return do_recvfrom(a[0], a[1], a[2], a[3], a[4], a[5]);
3803     case TARGET_SYS_SHUTDOWN: /* sockfd, how */
3804         return get_errno(shutdown(a[0], a[1]));
3805     case TARGET_SYS_SETSOCKOPT: /* sockfd, level, optname, optval, optlen */
3806         return do_setsockopt(a[0], a[1], a[2], a[3], a[4]);
3807     case TARGET_SYS_GETSOCKOPT: /* sockfd, level, optname, optval, optlen */
3808         return do_getsockopt(a[0], a[1], a[2], a[3], a[4]);
3809     case TARGET_SYS_SENDMSG: /* sockfd, msg, flags */
3810         return do_sendrecvmsg(a[0], a[1], a[2], 1);
3811     case TARGET_SYS_RECVMSG: /* sockfd, msg, flags */
3812         return do_sendrecvmsg(a[0], a[1], a[2], 0);
3813     case TARGET_SYS_ACCEPT4: /* sockfd, addr, addrlen, flags */
3814         return do_accept4(a[0], a[1], a[2], a[3]);
3815     case TARGET_SYS_RECVMMSG: /* sockfd, msgvec, vlen, flags */
3816         return do_sendrecvmmsg(a[0], a[1], a[2], a[3], 0);
3817     case TARGET_SYS_SENDMMSG: /* sockfd, msgvec, vlen, flags */
3818         return do_sendrecvmmsg(a[0], a[1], a[2], a[3], 1);
3819     default:
3820         qemu_log_mask(LOG_UNIMP, "Unsupported socketcall: %d\n", num);
3821         return -TARGET_EINVAL;
3822     }
3823 }
3824 #endif
3825 
3826 #define N_SHM_REGIONS	32
3827 
3828 static struct shm_region {
3829     abi_ulong start;
3830     abi_ulong size;
3831     bool in_use;
3832 } shm_regions[N_SHM_REGIONS];
3833 
3834 #ifndef TARGET_SEMID64_DS
3835 /* asm-generic version of this struct */
3836 struct target_semid64_ds
3837 {
3838   struct target_ipc_perm sem_perm;
3839   abi_ulong sem_otime;
3840 #if TARGET_ABI_BITS == 32
3841   abi_ulong __unused1;
3842 #endif
3843   abi_ulong sem_ctime;
3844 #if TARGET_ABI_BITS == 32
3845   abi_ulong __unused2;
3846 #endif
3847   abi_ulong sem_nsems;
3848   abi_ulong __unused3;
3849   abi_ulong __unused4;
3850 };
3851 #endif
3852 
3853 static inline abi_long target_to_host_ipc_perm(struct ipc_perm *host_ip,
3854                                                abi_ulong target_addr)
3855 {
3856     struct target_ipc_perm *target_ip;
3857     struct target_semid64_ds *target_sd;
3858 
3859     if (!lock_user_struct(VERIFY_READ, target_sd, target_addr, 1))
3860         return -TARGET_EFAULT;
3861     target_ip = &(target_sd->sem_perm);
3862     host_ip->__key = tswap32(target_ip->__key);
3863     host_ip->uid = tswap32(target_ip->uid);
3864     host_ip->gid = tswap32(target_ip->gid);
3865     host_ip->cuid = tswap32(target_ip->cuid);
3866     host_ip->cgid = tswap32(target_ip->cgid);
3867 #if defined(TARGET_ALPHA) || defined(TARGET_MIPS) || defined(TARGET_PPC)
3868     host_ip->mode = tswap32(target_ip->mode);
3869 #else
3870     host_ip->mode = tswap16(target_ip->mode);
3871 #endif
3872 #if defined(TARGET_PPC)
3873     host_ip->__seq = tswap32(target_ip->__seq);
3874 #else
3875     host_ip->__seq = tswap16(target_ip->__seq);
3876 #endif
3877     unlock_user_struct(target_sd, target_addr, 0);
3878     return 0;
3879 }
3880 
3881 static inline abi_long host_to_target_ipc_perm(abi_ulong target_addr,
3882                                                struct ipc_perm *host_ip)
3883 {
3884     struct target_ipc_perm *target_ip;
3885     struct target_semid64_ds *target_sd;
3886 
3887     if (!lock_user_struct(VERIFY_WRITE, target_sd, target_addr, 0))
3888         return -TARGET_EFAULT;
3889     target_ip = &(target_sd->sem_perm);
3890     target_ip->__key = tswap32(host_ip->__key);
3891     target_ip->uid = tswap32(host_ip->uid);
3892     target_ip->gid = tswap32(host_ip->gid);
3893     target_ip->cuid = tswap32(host_ip->cuid);
3894     target_ip->cgid = tswap32(host_ip->cgid);
3895 #if defined(TARGET_ALPHA) || defined(TARGET_MIPS) || defined(TARGET_PPC)
3896     target_ip->mode = tswap32(host_ip->mode);
3897 #else
3898     target_ip->mode = tswap16(host_ip->mode);
3899 #endif
3900 #if defined(TARGET_PPC)
3901     target_ip->__seq = tswap32(host_ip->__seq);
3902 #else
3903     target_ip->__seq = tswap16(host_ip->__seq);
3904 #endif
3905     unlock_user_struct(target_sd, target_addr, 1);
3906     return 0;
3907 }
3908 
3909 static inline abi_long target_to_host_semid_ds(struct semid_ds *host_sd,
3910                                                abi_ulong target_addr)
3911 {
3912     struct target_semid64_ds *target_sd;
3913 
3914     if (!lock_user_struct(VERIFY_READ, target_sd, target_addr, 1))
3915         return -TARGET_EFAULT;
3916     if (target_to_host_ipc_perm(&(host_sd->sem_perm),target_addr))
3917         return -TARGET_EFAULT;
3918     host_sd->sem_nsems = tswapal(target_sd->sem_nsems);
3919     host_sd->sem_otime = tswapal(target_sd->sem_otime);
3920     host_sd->sem_ctime = tswapal(target_sd->sem_ctime);
3921     unlock_user_struct(target_sd, target_addr, 0);
3922     return 0;
3923 }
3924 
3925 static inline abi_long host_to_target_semid_ds(abi_ulong target_addr,
3926                                                struct semid_ds *host_sd)
3927 {
3928     struct target_semid64_ds *target_sd;
3929 
3930     if (!lock_user_struct(VERIFY_WRITE, target_sd, target_addr, 0))
3931         return -TARGET_EFAULT;
3932     if (host_to_target_ipc_perm(target_addr,&(host_sd->sem_perm)))
3933         return -TARGET_EFAULT;
3934     target_sd->sem_nsems = tswapal(host_sd->sem_nsems);
3935     target_sd->sem_otime = tswapal(host_sd->sem_otime);
3936     target_sd->sem_ctime = tswapal(host_sd->sem_ctime);
3937     unlock_user_struct(target_sd, target_addr, 1);
3938     return 0;
3939 }
3940 
3941 struct target_seminfo {
3942     int semmap;
3943     int semmni;
3944     int semmns;
3945     int semmnu;
3946     int semmsl;
3947     int semopm;
3948     int semume;
3949     int semusz;
3950     int semvmx;
3951     int semaem;
3952 };
3953 
3954 static inline abi_long host_to_target_seminfo(abi_ulong target_addr,
3955                                               struct seminfo *host_seminfo)
3956 {
3957     struct target_seminfo *target_seminfo;
3958     if (!lock_user_struct(VERIFY_WRITE, target_seminfo, target_addr, 0))
3959         return -TARGET_EFAULT;
3960     __put_user(host_seminfo->semmap, &target_seminfo->semmap);
3961     __put_user(host_seminfo->semmni, &target_seminfo->semmni);
3962     __put_user(host_seminfo->semmns, &target_seminfo->semmns);
3963     __put_user(host_seminfo->semmnu, &target_seminfo->semmnu);
3964     __put_user(host_seminfo->semmsl, &target_seminfo->semmsl);
3965     __put_user(host_seminfo->semopm, &target_seminfo->semopm);
3966     __put_user(host_seminfo->semume, &target_seminfo->semume);
3967     __put_user(host_seminfo->semusz, &target_seminfo->semusz);
3968     __put_user(host_seminfo->semvmx, &target_seminfo->semvmx);
3969     __put_user(host_seminfo->semaem, &target_seminfo->semaem);
3970     unlock_user_struct(target_seminfo, target_addr, 1);
3971     return 0;
3972 }
3973 
3974 union semun {
3975 	int val;
3976 	struct semid_ds *buf;
3977 	unsigned short *array;
3978 	struct seminfo *__buf;
3979 };
3980 
3981 union target_semun {
3982 	int val;
3983 	abi_ulong buf;
3984 	abi_ulong array;
3985 	abi_ulong __buf;
3986 };
3987 
3988 static inline abi_long target_to_host_semarray(int semid, unsigned short **host_array,
3989                                                abi_ulong target_addr)
3990 {
3991     int nsems;
3992     unsigned short *array;
3993     union semun semun;
3994     struct semid_ds semid_ds;
3995     int i, ret;
3996 
3997     semun.buf = &semid_ds;
3998 
3999     ret = semctl(semid, 0, IPC_STAT, semun);
4000     if (ret == -1)
4001         return get_errno(ret);
4002 
4003     nsems = semid_ds.sem_nsems;
4004 
4005     *host_array = g_try_new(unsigned short, nsems);
4006     if (!*host_array) {
4007         return -TARGET_ENOMEM;
4008     }
4009     array = lock_user(VERIFY_READ, target_addr,
4010                       nsems*sizeof(unsigned short), 1);
4011     if (!array) {
4012         g_free(*host_array);
4013         return -TARGET_EFAULT;
4014     }
4015 
4016     for(i=0; i<nsems; i++) {
4017         __get_user((*host_array)[i], &array[i]);
4018     }
4019     unlock_user(array, target_addr, 0);
4020 
4021     return 0;
4022 }
4023 
4024 static inline abi_long host_to_target_semarray(int semid, abi_ulong target_addr,
4025                                                unsigned short **host_array)
4026 {
4027     int nsems;
4028     unsigned short *array;
4029     union semun semun;
4030     struct semid_ds semid_ds;
4031     int i, ret;
4032 
4033     semun.buf = &semid_ds;
4034 
4035     ret = semctl(semid, 0, IPC_STAT, semun);
4036     if (ret == -1)
4037         return get_errno(ret);
4038 
4039     nsems = semid_ds.sem_nsems;
4040 
4041     array = lock_user(VERIFY_WRITE, target_addr,
4042                       nsems*sizeof(unsigned short), 0);
4043     if (!array)
4044         return -TARGET_EFAULT;
4045 
4046     for(i=0; i<nsems; i++) {
4047         __put_user((*host_array)[i], &array[i]);
4048     }
4049     g_free(*host_array);
4050     unlock_user(array, target_addr, 1);
4051 
4052     return 0;
4053 }
4054 
4055 static inline abi_long do_semctl(int semid, int semnum, int cmd,
4056                                  abi_ulong target_arg)
4057 {
4058     union target_semun target_su = { .buf = target_arg };
4059     union semun arg;
4060     struct semid_ds dsarg;
4061     unsigned short *array = NULL;
4062     struct seminfo seminfo;
4063     abi_long ret = -TARGET_EINVAL;
4064     abi_long err;
4065     cmd &= 0xff;
4066 
4067     switch( cmd ) {
4068 	case GETVAL:
4069 	case SETVAL:
4070             /* In 64 bit cross-endian situations, we will erroneously pick up
4071              * the wrong half of the union for the "val" element.  To rectify
4072              * this, the entire 8-byte structure is byteswapped, followed by
4073 	     * a swap of the 4 byte val field. In other cases, the data is
4074 	     * already in proper host byte order. */
4075 	    if (sizeof(target_su.val) != (sizeof(target_su.buf))) {
4076 		target_su.buf = tswapal(target_su.buf);
4077 		arg.val = tswap32(target_su.val);
4078 	    } else {
4079 		arg.val = target_su.val;
4080 	    }
4081             ret = get_errno(semctl(semid, semnum, cmd, arg));
4082             break;
4083 	case GETALL:
4084 	case SETALL:
4085             err = target_to_host_semarray(semid, &array, target_su.array);
4086             if (err)
4087                 return err;
4088             arg.array = array;
4089             ret = get_errno(semctl(semid, semnum, cmd, arg));
4090             err = host_to_target_semarray(semid, target_su.array, &array);
4091             if (err)
4092                 return err;
4093             break;
4094 	case IPC_STAT:
4095 	case IPC_SET:
4096 	case SEM_STAT:
4097             err = target_to_host_semid_ds(&dsarg, target_su.buf);
4098             if (err)
4099                 return err;
4100             arg.buf = &dsarg;
4101             ret = get_errno(semctl(semid, semnum, cmd, arg));
4102             err = host_to_target_semid_ds(target_su.buf, &dsarg);
4103             if (err)
4104                 return err;
4105             break;
4106 	case IPC_INFO:
4107 	case SEM_INFO:
4108             arg.__buf = &seminfo;
4109             ret = get_errno(semctl(semid, semnum, cmd, arg));
4110             err = host_to_target_seminfo(target_su.__buf, &seminfo);
4111             if (err)
4112                 return err;
4113             break;
4114 	case IPC_RMID:
4115 	case GETPID:
4116 	case GETNCNT:
4117 	case GETZCNT:
4118             ret = get_errno(semctl(semid, semnum, cmd, NULL));
4119             break;
4120     }
4121 
4122     return ret;
4123 }
4124 
4125 struct target_sembuf {
4126     unsigned short sem_num;
4127     short sem_op;
4128     short sem_flg;
4129 };
4130 
4131 static inline abi_long target_to_host_sembuf(struct sembuf *host_sembuf,
4132                                              abi_ulong target_addr,
4133                                              unsigned nsops)
4134 {
4135     struct target_sembuf *target_sembuf;
4136     int i;
4137 
4138     target_sembuf = lock_user(VERIFY_READ, target_addr,
4139                               nsops*sizeof(struct target_sembuf), 1);
4140     if (!target_sembuf)
4141         return -TARGET_EFAULT;
4142 
4143     for(i=0; i<nsops; i++) {
4144         __get_user(host_sembuf[i].sem_num, &target_sembuf[i].sem_num);
4145         __get_user(host_sembuf[i].sem_op, &target_sembuf[i].sem_op);
4146         __get_user(host_sembuf[i].sem_flg, &target_sembuf[i].sem_flg);
4147     }
4148 
4149     unlock_user(target_sembuf, target_addr, 0);
4150 
4151     return 0;
4152 }
4153 
4154 #if defined(TARGET_NR_ipc) || defined(TARGET_NR_semop) || \
4155     defined(TARGET_NR_semtimedop) || defined(TARGET_NR_semtimedop_time64)
4156 
4157 /*
4158  * This macro is required to handle the s390 variants, which passes the
4159  * arguments in a different order than default.
4160  */
4161 #ifdef __s390x__
4162 #define SEMTIMEDOP_IPC_ARGS(__nsops, __sops, __timeout) \
4163   (__nsops), (__timeout), (__sops)
4164 #else
4165 #define SEMTIMEDOP_IPC_ARGS(__nsops, __sops, __timeout) \
4166   (__nsops), 0, (__sops), (__timeout)
4167 #endif
4168 
4169 static inline abi_long do_semtimedop(int semid,
4170                                      abi_long ptr,
4171                                      unsigned nsops,
4172                                      abi_long timeout, bool time64)
4173 {
4174     struct sembuf *sops;
4175     struct timespec ts, *pts = NULL;
4176     abi_long ret;
4177 
4178     if (timeout) {
4179         pts = &ts;
4180         if (time64) {
4181             if (target_to_host_timespec64(pts, timeout)) {
4182                 return -TARGET_EFAULT;
4183             }
4184         } else {
4185             if (target_to_host_timespec(pts, timeout)) {
4186                 return -TARGET_EFAULT;
4187             }
4188         }
4189     }
4190 
4191     if (nsops > TARGET_SEMOPM) {
4192         return -TARGET_E2BIG;
4193     }
4194 
4195     sops = g_new(struct sembuf, nsops);
4196 
4197     if (target_to_host_sembuf(sops, ptr, nsops)) {
4198         g_free(sops);
4199         return -TARGET_EFAULT;
4200     }
4201 
4202     ret = -TARGET_ENOSYS;
4203 #ifdef __NR_semtimedop
4204     ret = get_errno(safe_semtimedop(semid, sops, nsops, pts));
4205 #endif
4206 #ifdef __NR_ipc
4207     if (ret == -TARGET_ENOSYS) {
4208         ret = get_errno(safe_ipc(IPCOP_semtimedop, semid,
4209                                  SEMTIMEDOP_IPC_ARGS(nsops, sops, (long)pts)));
4210     }
4211 #endif
4212     g_free(sops);
4213     return ret;
4214 }
4215 #endif
4216 
4217 struct target_msqid_ds
4218 {
4219     struct target_ipc_perm msg_perm;
4220     abi_ulong msg_stime;
4221 #if TARGET_ABI_BITS == 32
4222     abi_ulong __unused1;
4223 #endif
4224     abi_ulong msg_rtime;
4225 #if TARGET_ABI_BITS == 32
4226     abi_ulong __unused2;
4227 #endif
4228     abi_ulong msg_ctime;
4229 #if TARGET_ABI_BITS == 32
4230     abi_ulong __unused3;
4231 #endif
4232     abi_ulong __msg_cbytes;
4233     abi_ulong msg_qnum;
4234     abi_ulong msg_qbytes;
4235     abi_ulong msg_lspid;
4236     abi_ulong msg_lrpid;
4237     abi_ulong __unused4;
4238     abi_ulong __unused5;
4239 };
4240 
4241 static inline abi_long target_to_host_msqid_ds(struct msqid_ds *host_md,
4242                                                abi_ulong target_addr)
4243 {
4244     struct target_msqid_ds *target_md;
4245 
4246     if (!lock_user_struct(VERIFY_READ, target_md, target_addr, 1))
4247         return -TARGET_EFAULT;
4248     if (target_to_host_ipc_perm(&(host_md->msg_perm),target_addr))
4249         return -TARGET_EFAULT;
4250     host_md->msg_stime = tswapal(target_md->msg_stime);
4251     host_md->msg_rtime = tswapal(target_md->msg_rtime);
4252     host_md->msg_ctime = tswapal(target_md->msg_ctime);
4253     host_md->__msg_cbytes = tswapal(target_md->__msg_cbytes);
4254     host_md->msg_qnum = tswapal(target_md->msg_qnum);
4255     host_md->msg_qbytes = tswapal(target_md->msg_qbytes);
4256     host_md->msg_lspid = tswapal(target_md->msg_lspid);
4257     host_md->msg_lrpid = tswapal(target_md->msg_lrpid);
4258     unlock_user_struct(target_md, target_addr, 0);
4259     return 0;
4260 }
4261 
4262 static inline abi_long host_to_target_msqid_ds(abi_ulong target_addr,
4263                                                struct msqid_ds *host_md)
4264 {
4265     struct target_msqid_ds *target_md;
4266 
4267     if (!lock_user_struct(VERIFY_WRITE, target_md, target_addr, 0))
4268         return -TARGET_EFAULT;
4269     if (host_to_target_ipc_perm(target_addr,&(host_md->msg_perm)))
4270         return -TARGET_EFAULT;
4271     target_md->msg_stime = tswapal(host_md->msg_stime);
4272     target_md->msg_rtime = tswapal(host_md->msg_rtime);
4273     target_md->msg_ctime = tswapal(host_md->msg_ctime);
4274     target_md->__msg_cbytes = tswapal(host_md->__msg_cbytes);
4275     target_md->msg_qnum = tswapal(host_md->msg_qnum);
4276     target_md->msg_qbytes = tswapal(host_md->msg_qbytes);
4277     target_md->msg_lspid = tswapal(host_md->msg_lspid);
4278     target_md->msg_lrpid = tswapal(host_md->msg_lrpid);
4279     unlock_user_struct(target_md, target_addr, 1);
4280     return 0;
4281 }
4282 
4283 struct target_msginfo {
4284     int msgpool;
4285     int msgmap;
4286     int msgmax;
4287     int msgmnb;
4288     int msgmni;
4289     int msgssz;
4290     int msgtql;
4291     unsigned short int msgseg;
4292 };
4293 
4294 static inline abi_long host_to_target_msginfo(abi_ulong target_addr,
4295                                               struct msginfo *host_msginfo)
4296 {
4297     struct target_msginfo *target_msginfo;
4298     if (!lock_user_struct(VERIFY_WRITE, target_msginfo, target_addr, 0))
4299         return -TARGET_EFAULT;
4300     __put_user(host_msginfo->msgpool, &target_msginfo->msgpool);
4301     __put_user(host_msginfo->msgmap, &target_msginfo->msgmap);
4302     __put_user(host_msginfo->msgmax, &target_msginfo->msgmax);
4303     __put_user(host_msginfo->msgmnb, &target_msginfo->msgmnb);
4304     __put_user(host_msginfo->msgmni, &target_msginfo->msgmni);
4305     __put_user(host_msginfo->msgssz, &target_msginfo->msgssz);
4306     __put_user(host_msginfo->msgtql, &target_msginfo->msgtql);
4307     __put_user(host_msginfo->msgseg, &target_msginfo->msgseg);
4308     unlock_user_struct(target_msginfo, target_addr, 1);
4309     return 0;
4310 }
4311 
4312 static inline abi_long do_msgctl(int msgid, int cmd, abi_long ptr)
4313 {
4314     struct msqid_ds dsarg;
4315     struct msginfo msginfo;
4316     abi_long ret = -TARGET_EINVAL;
4317 
4318     cmd &= 0xff;
4319 
4320     switch (cmd) {
4321     case IPC_STAT:
4322     case IPC_SET:
4323     case MSG_STAT:
4324         if (target_to_host_msqid_ds(&dsarg,ptr))
4325             return -TARGET_EFAULT;
4326         ret = get_errno(msgctl(msgid, cmd, &dsarg));
4327         if (host_to_target_msqid_ds(ptr,&dsarg))
4328             return -TARGET_EFAULT;
4329         break;
4330     case IPC_RMID:
4331         ret = get_errno(msgctl(msgid, cmd, NULL));
4332         break;
4333     case IPC_INFO:
4334     case MSG_INFO:
4335         ret = get_errno(msgctl(msgid, cmd, (struct msqid_ds *)&msginfo));
4336         if (host_to_target_msginfo(ptr, &msginfo))
4337             return -TARGET_EFAULT;
4338         break;
4339     }
4340 
4341     return ret;
4342 }
4343 
4344 struct target_msgbuf {
4345     abi_long mtype;
4346     char	mtext[1];
4347 };
4348 
4349 static inline abi_long do_msgsnd(int msqid, abi_long msgp,
4350                                  ssize_t msgsz, int msgflg)
4351 {
4352     struct target_msgbuf *target_mb;
4353     struct msgbuf *host_mb;
4354     abi_long ret = 0;
4355 
4356     if (msgsz < 0) {
4357         return -TARGET_EINVAL;
4358     }
4359 
4360     if (!lock_user_struct(VERIFY_READ, target_mb, msgp, 0))
4361         return -TARGET_EFAULT;
4362     host_mb = g_try_malloc(msgsz + sizeof(long));
4363     if (!host_mb) {
4364         unlock_user_struct(target_mb, msgp, 0);
4365         return -TARGET_ENOMEM;
4366     }
4367     host_mb->mtype = (abi_long) tswapal(target_mb->mtype);
4368     memcpy(host_mb->mtext, target_mb->mtext, msgsz);
4369     ret = -TARGET_ENOSYS;
4370 #ifdef __NR_msgsnd
4371     ret = get_errno(safe_msgsnd(msqid, host_mb, msgsz, msgflg));
4372 #endif
4373 #ifdef __NR_ipc
4374     if (ret == -TARGET_ENOSYS) {
4375 #ifdef __s390x__
4376         ret = get_errno(safe_ipc(IPCOP_msgsnd, msqid, msgsz, msgflg,
4377                                  host_mb));
4378 #else
4379         ret = get_errno(safe_ipc(IPCOP_msgsnd, msqid, msgsz, msgflg,
4380                                  host_mb, 0));
4381 #endif
4382     }
4383 #endif
4384     g_free(host_mb);
4385     unlock_user_struct(target_mb, msgp, 0);
4386 
4387     return ret;
4388 }
4389 
4390 #ifdef __NR_ipc
4391 #if defined(__sparc__)
4392 /* SPARC for msgrcv it does not use the kludge on final 2 arguments.  */
4393 #define MSGRCV_ARGS(__msgp, __msgtyp) __msgp, __msgtyp
4394 #elif defined(__s390x__)
4395 /* The s390 sys_ipc variant has only five parameters.  */
4396 #define MSGRCV_ARGS(__msgp, __msgtyp) \
4397     ((long int[]){(long int)__msgp, __msgtyp})
4398 #else
4399 #define MSGRCV_ARGS(__msgp, __msgtyp) \
4400     ((long int[]){(long int)__msgp, __msgtyp}), 0
4401 #endif
4402 #endif
4403 
4404 static inline abi_long do_msgrcv(int msqid, abi_long msgp,
4405                                  ssize_t msgsz, abi_long msgtyp,
4406                                  int msgflg)
4407 {
4408     struct target_msgbuf *target_mb;
4409     char *target_mtext;
4410     struct msgbuf *host_mb;
4411     abi_long ret = 0;
4412 
4413     if (msgsz < 0) {
4414         return -TARGET_EINVAL;
4415     }
4416 
4417     if (!lock_user_struct(VERIFY_WRITE, target_mb, msgp, 0))
4418         return -TARGET_EFAULT;
4419 
4420     host_mb = g_try_malloc(msgsz + sizeof(long));
4421     if (!host_mb) {
4422         ret = -TARGET_ENOMEM;
4423         goto end;
4424     }
4425     ret = -TARGET_ENOSYS;
4426 #ifdef __NR_msgrcv
4427     ret = get_errno(safe_msgrcv(msqid, host_mb, msgsz, msgtyp, msgflg));
4428 #endif
4429 #ifdef __NR_ipc
4430     if (ret == -TARGET_ENOSYS) {
4431         ret = get_errno(safe_ipc(IPCOP_CALL(1, IPCOP_msgrcv), msqid, msgsz,
4432                         msgflg, MSGRCV_ARGS(host_mb, msgtyp)));
4433     }
4434 #endif
4435 
4436     if (ret > 0) {
4437         abi_ulong target_mtext_addr = msgp + sizeof(abi_ulong);
4438         target_mtext = lock_user(VERIFY_WRITE, target_mtext_addr, ret, 0);
4439         if (!target_mtext) {
4440             ret = -TARGET_EFAULT;
4441             goto end;
4442         }
4443         memcpy(target_mb->mtext, host_mb->mtext, ret);
4444         unlock_user(target_mtext, target_mtext_addr, ret);
4445     }
4446 
4447     target_mb->mtype = tswapal(host_mb->mtype);
4448 
4449 end:
4450     if (target_mb)
4451         unlock_user_struct(target_mb, msgp, 1);
4452     g_free(host_mb);
4453     return ret;
4454 }
4455 
4456 static inline abi_long target_to_host_shmid_ds(struct shmid_ds *host_sd,
4457                                                abi_ulong target_addr)
4458 {
4459     struct target_shmid_ds *target_sd;
4460 
4461     if (!lock_user_struct(VERIFY_READ, target_sd, target_addr, 1))
4462         return -TARGET_EFAULT;
4463     if (target_to_host_ipc_perm(&(host_sd->shm_perm), target_addr))
4464         return -TARGET_EFAULT;
4465     __get_user(host_sd->shm_segsz, &target_sd->shm_segsz);
4466     __get_user(host_sd->shm_atime, &target_sd->shm_atime);
4467     __get_user(host_sd->shm_dtime, &target_sd->shm_dtime);
4468     __get_user(host_sd->shm_ctime, &target_sd->shm_ctime);
4469     __get_user(host_sd->shm_cpid, &target_sd->shm_cpid);
4470     __get_user(host_sd->shm_lpid, &target_sd->shm_lpid);
4471     __get_user(host_sd->shm_nattch, &target_sd->shm_nattch);
4472     unlock_user_struct(target_sd, target_addr, 0);
4473     return 0;
4474 }
4475 
4476 static inline abi_long host_to_target_shmid_ds(abi_ulong target_addr,
4477                                                struct shmid_ds *host_sd)
4478 {
4479     struct target_shmid_ds *target_sd;
4480 
4481     if (!lock_user_struct(VERIFY_WRITE, target_sd, target_addr, 0))
4482         return -TARGET_EFAULT;
4483     if (host_to_target_ipc_perm(target_addr, &(host_sd->shm_perm)))
4484         return -TARGET_EFAULT;
4485     __put_user(host_sd->shm_segsz, &target_sd->shm_segsz);
4486     __put_user(host_sd->shm_atime, &target_sd->shm_atime);
4487     __put_user(host_sd->shm_dtime, &target_sd->shm_dtime);
4488     __put_user(host_sd->shm_ctime, &target_sd->shm_ctime);
4489     __put_user(host_sd->shm_cpid, &target_sd->shm_cpid);
4490     __put_user(host_sd->shm_lpid, &target_sd->shm_lpid);
4491     __put_user(host_sd->shm_nattch, &target_sd->shm_nattch);
4492     unlock_user_struct(target_sd, target_addr, 1);
4493     return 0;
4494 }
4495 
4496 struct  target_shminfo {
4497     abi_ulong shmmax;
4498     abi_ulong shmmin;
4499     abi_ulong shmmni;
4500     abi_ulong shmseg;
4501     abi_ulong shmall;
4502 };
4503 
4504 static inline abi_long host_to_target_shminfo(abi_ulong target_addr,
4505                                               struct shminfo *host_shminfo)
4506 {
4507     struct target_shminfo *target_shminfo;
4508     if (!lock_user_struct(VERIFY_WRITE, target_shminfo, target_addr, 0))
4509         return -TARGET_EFAULT;
4510     __put_user(host_shminfo->shmmax, &target_shminfo->shmmax);
4511     __put_user(host_shminfo->shmmin, &target_shminfo->shmmin);
4512     __put_user(host_shminfo->shmmni, &target_shminfo->shmmni);
4513     __put_user(host_shminfo->shmseg, &target_shminfo->shmseg);
4514     __put_user(host_shminfo->shmall, &target_shminfo->shmall);
4515     unlock_user_struct(target_shminfo, target_addr, 1);
4516     return 0;
4517 }
4518 
4519 struct target_shm_info {
4520     int used_ids;
4521     abi_ulong shm_tot;
4522     abi_ulong shm_rss;
4523     abi_ulong shm_swp;
4524     abi_ulong swap_attempts;
4525     abi_ulong swap_successes;
4526 };
4527 
4528 static inline abi_long host_to_target_shm_info(abi_ulong target_addr,
4529                                                struct shm_info *host_shm_info)
4530 {
4531     struct target_shm_info *target_shm_info;
4532     if (!lock_user_struct(VERIFY_WRITE, target_shm_info, target_addr, 0))
4533         return -TARGET_EFAULT;
4534     __put_user(host_shm_info->used_ids, &target_shm_info->used_ids);
4535     __put_user(host_shm_info->shm_tot, &target_shm_info->shm_tot);
4536     __put_user(host_shm_info->shm_rss, &target_shm_info->shm_rss);
4537     __put_user(host_shm_info->shm_swp, &target_shm_info->shm_swp);
4538     __put_user(host_shm_info->swap_attempts, &target_shm_info->swap_attempts);
4539     __put_user(host_shm_info->swap_successes, &target_shm_info->swap_successes);
4540     unlock_user_struct(target_shm_info, target_addr, 1);
4541     return 0;
4542 }
4543 
4544 static inline abi_long do_shmctl(int shmid, int cmd, abi_long buf)
4545 {
4546     struct shmid_ds dsarg;
4547     struct shminfo shminfo;
4548     struct shm_info shm_info;
4549     abi_long ret = -TARGET_EINVAL;
4550 
4551     cmd &= 0xff;
4552 
4553     switch(cmd) {
4554     case IPC_STAT:
4555     case IPC_SET:
4556     case SHM_STAT:
4557         if (target_to_host_shmid_ds(&dsarg, buf))
4558             return -TARGET_EFAULT;
4559         ret = get_errno(shmctl(shmid, cmd, &dsarg));
4560         if (host_to_target_shmid_ds(buf, &dsarg))
4561             return -TARGET_EFAULT;
4562         break;
4563     case IPC_INFO:
4564         ret = get_errno(shmctl(shmid, cmd, (struct shmid_ds *)&shminfo));
4565         if (host_to_target_shminfo(buf, &shminfo))
4566             return -TARGET_EFAULT;
4567         break;
4568     case SHM_INFO:
4569         ret = get_errno(shmctl(shmid, cmd, (struct shmid_ds *)&shm_info));
4570         if (host_to_target_shm_info(buf, &shm_info))
4571             return -TARGET_EFAULT;
4572         break;
4573     case IPC_RMID:
4574     case SHM_LOCK:
4575     case SHM_UNLOCK:
4576         ret = get_errno(shmctl(shmid, cmd, NULL));
4577         break;
4578     }
4579 
4580     return ret;
4581 }
4582 
4583 #ifndef TARGET_FORCE_SHMLBA
4584 /* For most architectures, SHMLBA is the same as the page size;
4585  * some architectures have larger values, in which case they should
4586  * define TARGET_FORCE_SHMLBA and provide a target_shmlba() function.
4587  * This corresponds to the kernel arch code defining __ARCH_FORCE_SHMLBA
4588  * and defining its own value for SHMLBA.
4589  *
4590  * The kernel also permits SHMLBA to be set by the architecture to a
4591  * value larger than the page size without setting __ARCH_FORCE_SHMLBA;
4592  * this means that addresses are rounded to the large size if
4593  * SHM_RND is set but addresses not aligned to that size are not rejected
4594  * as long as they are at least page-aligned. Since the only architecture
4595  * which uses this is ia64 this code doesn't provide for that oddity.
4596  */
4597 static inline abi_ulong target_shmlba(CPUArchState *cpu_env)
4598 {
4599     return TARGET_PAGE_SIZE;
4600 }
4601 #endif
4602 
4603 static inline abi_ulong do_shmat(CPUArchState *cpu_env,
4604                                  int shmid, abi_ulong shmaddr, int shmflg)
4605 {
4606     CPUState *cpu = env_cpu(cpu_env);
4607     abi_long raddr;
4608     void *host_raddr;
4609     struct shmid_ds shm_info;
4610     int i,ret;
4611     abi_ulong shmlba;
4612 
4613     /* shmat pointers are always untagged */
4614 
4615     /* find out the length of the shared memory segment */
4616     ret = get_errno(shmctl(shmid, IPC_STAT, &shm_info));
4617     if (is_error(ret)) {
4618         /* can't get length, bail out */
4619         return ret;
4620     }
4621 
4622     shmlba = target_shmlba(cpu_env);
4623 
4624     if (shmaddr & (shmlba - 1)) {
4625         if (shmflg & SHM_RND) {
4626             shmaddr &= ~(shmlba - 1);
4627         } else {
4628             return -TARGET_EINVAL;
4629         }
4630     }
4631     if (!guest_range_valid_untagged(shmaddr, shm_info.shm_segsz)) {
4632         return -TARGET_EINVAL;
4633     }
4634 
4635     mmap_lock();
4636 
4637     /*
4638      * We're mapping shared memory, so ensure we generate code for parallel
4639      * execution and flush old translations.  This will work up to the level
4640      * supported by the host -- anything that requires EXCP_ATOMIC will not
4641      * be atomic with respect to an external process.
4642      */
4643     if (!(cpu->tcg_cflags & CF_PARALLEL)) {
4644         cpu->tcg_cflags |= CF_PARALLEL;
4645         tb_flush(cpu);
4646     }
4647 
4648     if (shmaddr)
4649         host_raddr = shmat(shmid, (void *)g2h_untagged(shmaddr), shmflg);
4650     else {
4651         abi_ulong mmap_start;
4652 
4653         /* In order to use the host shmat, we need to honor host SHMLBA.  */
4654         mmap_start = mmap_find_vma(0, shm_info.shm_segsz, MAX(SHMLBA, shmlba));
4655 
4656         if (mmap_start == -1) {
4657             errno = ENOMEM;
4658             host_raddr = (void *)-1;
4659         } else
4660             host_raddr = shmat(shmid, g2h_untagged(mmap_start),
4661                                shmflg | SHM_REMAP);
4662     }
4663 
4664     if (host_raddr == (void *)-1) {
4665         mmap_unlock();
4666         return get_errno((long)host_raddr);
4667     }
4668     raddr=h2g((unsigned long)host_raddr);
4669 
4670     page_set_flags(raddr, raddr + shm_info.shm_segsz,
4671                    PAGE_VALID | PAGE_RESET | PAGE_READ |
4672                    (shmflg & SHM_RDONLY ? 0 : PAGE_WRITE));
4673 
4674     for (i = 0; i < N_SHM_REGIONS; i++) {
4675         if (!shm_regions[i].in_use) {
4676             shm_regions[i].in_use = true;
4677             shm_regions[i].start = raddr;
4678             shm_regions[i].size = shm_info.shm_segsz;
4679             break;
4680         }
4681     }
4682 
4683     mmap_unlock();
4684     return raddr;
4685 
4686 }
4687 
4688 static inline abi_long do_shmdt(abi_ulong shmaddr)
4689 {
4690     int i;
4691     abi_long rv;
4692 
4693     /* shmdt pointers are always untagged */
4694 
4695     mmap_lock();
4696 
4697     for (i = 0; i < N_SHM_REGIONS; ++i) {
4698         if (shm_regions[i].in_use && shm_regions[i].start == shmaddr) {
4699             shm_regions[i].in_use = false;
4700             page_set_flags(shmaddr, shmaddr + shm_regions[i].size, 0);
4701             break;
4702         }
4703     }
4704     rv = get_errno(shmdt(g2h_untagged(shmaddr)));
4705 
4706     mmap_unlock();
4707 
4708     return rv;
4709 }
4710 
4711 #ifdef TARGET_NR_ipc
4712 /* ??? This only works with linear mappings.  */
4713 /* do_ipc() must return target values and target errnos. */
4714 static abi_long do_ipc(CPUArchState *cpu_env,
4715                        unsigned int call, abi_long first,
4716                        abi_long second, abi_long third,
4717                        abi_long ptr, abi_long fifth)
4718 {
4719     int version;
4720     abi_long ret = 0;
4721 
4722     version = call >> 16;
4723     call &= 0xffff;
4724 
4725     switch (call) {
4726     case IPCOP_semop:
4727         ret = do_semtimedop(first, ptr, second, 0, false);
4728         break;
4729     case IPCOP_semtimedop:
4730     /*
4731      * The s390 sys_ipc variant has only five parameters instead of six
4732      * (as for default variant) and the only difference is the handling of
4733      * SEMTIMEDOP where on s390 the third parameter is used as a pointer
4734      * to a struct timespec where the generic variant uses fifth parameter.
4735      */
4736 #if defined(TARGET_S390X)
4737         ret = do_semtimedop(first, ptr, second, third, TARGET_ABI_BITS == 64);
4738 #else
4739         ret = do_semtimedop(first, ptr, second, fifth, TARGET_ABI_BITS == 64);
4740 #endif
4741         break;
4742 
4743     case IPCOP_semget:
4744         ret = get_errno(semget(first, second, third));
4745         break;
4746 
4747     case IPCOP_semctl: {
4748         /* The semun argument to semctl is passed by value, so dereference the
4749          * ptr argument. */
4750         abi_ulong atptr;
4751         get_user_ual(atptr, ptr);
4752         ret = do_semctl(first, second, third, atptr);
4753         break;
4754     }
4755 
4756     case IPCOP_msgget:
4757         ret = get_errno(msgget(first, second));
4758         break;
4759 
4760     case IPCOP_msgsnd:
4761         ret = do_msgsnd(first, ptr, second, third);
4762         break;
4763 
4764     case IPCOP_msgctl:
4765         ret = do_msgctl(first, second, ptr);
4766         break;
4767 
4768     case IPCOP_msgrcv:
4769         switch (version) {
4770         case 0:
4771             {
4772                 struct target_ipc_kludge {
4773                     abi_long msgp;
4774                     abi_long msgtyp;
4775                 } *tmp;
4776 
4777                 if (!lock_user_struct(VERIFY_READ, tmp, ptr, 1)) {
4778                     ret = -TARGET_EFAULT;
4779                     break;
4780                 }
4781 
4782                 ret = do_msgrcv(first, tswapal(tmp->msgp), second, tswapal(tmp->msgtyp), third);
4783 
4784                 unlock_user_struct(tmp, ptr, 0);
4785                 break;
4786             }
4787         default:
4788             ret = do_msgrcv(first, ptr, second, fifth, third);
4789         }
4790         break;
4791 
4792     case IPCOP_shmat:
4793         switch (version) {
4794         default:
4795         {
4796             abi_ulong raddr;
4797             raddr = do_shmat(cpu_env, first, ptr, second);
4798             if (is_error(raddr))
4799                 return get_errno(raddr);
4800             if (put_user_ual(raddr, third))
4801                 return -TARGET_EFAULT;
4802             break;
4803         }
4804         case 1:
4805             ret = -TARGET_EINVAL;
4806             break;
4807         }
4808 	break;
4809     case IPCOP_shmdt:
4810         ret = do_shmdt(ptr);
4811 	break;
4812 
4813     case IPCOP_shmget:
4814 	/* IPC_* flag values are the same on all linux platforms */
4815 	ret = get_errno(shmget(first, second, third));
4816 	break;
4817 
4818 	/* IPC_* and SHM_* command values are the same on all linux platforms */
4819     case IPCOP_shmctl:
4820         ret = do_shmctl(first, second, ptr);
4821         break;
4822     default:
4823         qemu_log_mask(LOG_UNIMP, "Unsupported ipc call: %d (version %d)\n",
4824                       call, version);
4825 	ret = -TARGET_ENOSYS;
4826 	break;
4827     }
4828     return ret;
4829 }
4830 #endif
4831 
4832 /* kernel structure types definitions */
4833 
4834 #define STRUCT(name, ...) STRUCT_ ## name,
4835 #define STRUCT_SPECIAL(name) STRUCT_ ## name,
4836 enum {
4837 #include "syscall_types.h"
4838 STRUCT_MAX
4839 };
4840 #undef STRUCT
4841 #undef STRUCT_SPECIAL
4842 
4843 #define STRUCT(name, ...) static const argtype struct_ ## name ## _def[] = {  __VA_ARGS__, TYPE_NULL };
4844 #define STRUCT_SPECIAL(name)
4845 #include "syscall_types.h"
4846 #undef STRUCT
4847 #undef STRUCT_SPECIAL
4848 
4849 #define MAX_STRUCT_SIZE 4096
4850 
4851 #ifdef CONFIG_FIEMAP
4852 /* So fiemap access checks don't overflow on 32 bit systems.
4853  * This is very slightly smaller than the limit imposed by
4854  * the underlying kernel.
4855  */
4856 #define FIEMAP_MAX_EXTENTS ((UINT_MAX - sizeof(struct fiemap))  \
4857                             / sizeof(struct fiemap_extent))
4858 
4859 static abi_long do_ioctl_fs_ioc_fiemap(const IOCTLEntry *ie, uint8_t *buf_temp,
4860                                        int fd, int cmd, abi_long arg)
4861 {
4862     /* The parameter for this ioctl is a struct fiemap followed
4863      * by an array of struct fiemap_extent whose size is set
4864      * in fiemap->fm_extent_count. The array is filled in by the
4865      * ioctl.
4866      */
4867     int target_size_in, target_size_out;
4868     struct fiemap *fm;
4869     const argtype *arg_type = ie->arg_type;
4870     const argtype extent_arg_type[] = { MK_STRUCT(STRUCT_fiemap_extent) };
4871     void *argptr, *p;
4872     abi_long ret;
4873     int i, extent_size = thunk_type_size(extent_arg_type, 0);
4874     uint32_t outbufsz;
4875     int free_fm = 0;
4876 
4877     assert(arg_type[0] == TYPE_PTR);
4878     assert(ie->access == IOC_RW);
4879     arg_type++;
4880     target_size_in = thunk_type_size(arg_type, 0);
4881     argptr = lock_user(VERIFY_READ, arg, target_size_in, 1);
4882     if (!argptr) {
4883         return -TARGET_EFAULT;
4884     }
4885     thunk_convert(buf_temp, argptr, arg_type, THUNK_HOST);
4886     unlock_user(argptr, arg, 0);
4887     fm = (struct fiemap *)buf_temp;
4888     if (fm->fm_extent_count > FIEMAP_MAX_EXTENTS) {
4889         return -TARGET_EINVAL;
4890     }
4891 
4892     outbufsz = sizeof (*fm) +
4893         (sizeof(struct fiemap_extent) * fm->fm_extent_count);
4894 
4895     if (outbufsz > MAX_STRUCT_SIZE) {
4896         /* We can't fit all the extents into the fixed size buffer.
4897          * Allocate one that is large enough and use it instead.
4898          */
4899         fm = g_try_malloc(outbufsz);
4900         if (!fm) {
4901             return -TARGET_ENOMEM;
4902         }
4903         memcpy(fm, buf_temp, sizeof(struct fiemap));
4904         free_fm = 1;
4905     }
4906     ret = get_errno(safe_ioctl(fd, ie->host_cmd, fm));
4907     if (!is_error(ret)) {
4908         target_size_out = target_size_in;
4909         /* An extent_count of 0 means we were only counting the extents
4910          * so there are no structs to copy
4911          */
4912         if (fm->fm_extent_count != 0) {
4913             target_size_out += fm->fm_mapped_extents * extent_size;
4914         }
4915         argptr = lock_user(VERIFY_WRITE, arg, target_size_out, 0);
4916         if (!argptr) {
4917             ret = -TARGET_EFAULT;
4918         } else {
4919             /* Convert the struct fiemap */
4920             thunk_convert(argptr, fm, arg_type, THUNK_TARGET);
4921             if (fm->fm_extent_count != 0) {
4922                 p = argptr + target_size_in;
4923                 /* ...and then all the struct fiemap_extents */
4924                 for (i = 0; i < fm->fm_mapped_extents; i++) {
4925                     thunk_convert(p, &fm->fm_extents[i], extent_arg_type,
4926                                   THUNK_TARGET);
4927                     p += extent_size;
4928                 }
4929             }
4930             unlock_user(argptr, arg, target_size_out);
4931         }
4932     }
4933     if (free_fm) {
4934         g_free(fm);
4935     }
4936     return ret;
4937 }
4938 #endif
4939 
4940 static abi_long do_ioctl_ifconf(const IOCTLEntry *ie, uint8_t *buf_temp,
4941                                 int fd, int cmd, abi_long arg)
4942 {
4943     const argtype *arg_type = ie->arg_type;
4944     int target_size;
4945     void *argptr;
4946     int ret;
4947     struct ifconf *host_ifconf;
4948     uint32_t outbufsz;
4949     const argtype ifreq_arg_type[] = { MK_STRUCT(STRUCT_sockaddr_ifreq) };
4950     const argtype ifreq_max_type[] = { MK_STRUCT(STRUCT_ifmap_ifreq) };
4951     int target_ifreq_size;
4952     int nb_ifreq;
4953     int free_buf = 0;
4954     int i;
4955     int target_ifc_len;
4956     abi_long target_ifc_buf;
4957     int host_ifc_len;
4958     char *host_ifc_buf;
4959 
4960     assert(arg_type[0] == TYPE_PTR);
4961     assert(ie->access == IOC_RW);
4962 
4963     arg_type++;
4964     target_size = thunk_type_size(arg_type, 0);
4965 
4966     argptr = lock_user(VERIFY_READ, arg, target_size, 1);
4967     if (!argptr)
4968         return -TARGET_EFAULT;
4969     thunk_convert(buf_temp, argptr, arg_type, THUNK_HOST);
4970     unlock_user(argptr, arg, 0);
4971 
4972     host_ifconf = (struct ifconf *)(unsigned long)buf_temp;
4973     target_ifc_buf = (abi_long)(unsigned long)host_ifconf->ifc_buf;
4974     target_ifreq_size = thunk_type_size(ifreq_max_type, 0);
4975 
4976     if (target_ifc_buf != 0) {
4977         target_ifc_len = host_ifconf->ifc_len;
4978         nb_ifreq = target_ifc_len / target_ifreq_size;
4979         host_ifc_len = nb_ifreq * sizeof(struct ifreq);
4980 
4981         outbufsz = sizeof(*host_ifconf) + host_ifc_len;
4982         if (outbufsz > MAX_STRUCT_SIZE) {
4983             /*
4984              * We can't fit all the extents into the fixed size buffer.
4985              * Allocate one that is large enough and use it instead.
4986              */
4987             host_ifconf = malloc(outbufsz);
4988             if (!host_ifconf) {
4989                 return -TARGET_ENOMEM;
4990             }
4991             memcpy(host_ifconf, buf_temp, sizeof(*host_ifconf));
4992             free_buf = 1;
4993         }
4994         host_ifc_buf = (char *)host_ifconf + sizeof(*host_ifconf);
4995 
4996         host_ifconf->ifc_len = host_ifc_len;
4997     } else {
4998       host_ifc_buf = NULL;
4999     }
5000     host_ifconf->ifc_buf = host_ifc_buf;
5001 
5002     ret = get_errno(safe_ioctl(fd, ie->host_cmd, host_ifconf));
5003     if (!is_error(ret)) {
5004 	/* convert host ifc_len to target ifc_len */
5005 
5006         nb_ifreq = host_ifconf->ifc_len / sizeof(struct ifreq);
5007         target_ifc_len = nb_ifreq * target_ifreq_size;
5008         host_ifconf->ifc_len = target_ifc_len;
5009 
5010 	/* restore target ifc_buf */
5011 
5012         host_ifconf->ifc_buf = (char *)(unsigned long)target_ifc_buf;
5013 
5014 	/* copy struct ifconf to target user */
5015 
5016         argptr = lock_user(VERIFY_WRITE, arg, target_size, 0);
5017         if (!argptr)
5018             return -TARGET_EFAULT;
5019         thunk_convert(argptr, host_ifconf, arg_type, THUNK_TARGET);
5020         unlock_user(argptr, arg, target_size);
5021 
5022         if (target_ifc_buf != 0) {
5023             /* copy ifreq[] to target user */
5024             argptr = lock_user(VERIFY_WRITE, target_ifc_buf, target_ifc_len, 0);
5025             for (i = 0; i < nb_ifreq ; i++) {
5026                 thunk_convert(argptr + i * target_ifreq_size,
5027                               host_ifc_buf + i * sizeof(struct ifreq),
5028                               ifreq_arg_type, THUNK_TARGET);
5029             }
5030             unlock_user(argptr, target_ifc_buf, target_ifc_len);
5031         }
5032     }
5033 
5034     if (free_buf) {
5035         free(host_ifconf);
5036     }
5037 
5038     return ret;
5039 }
5040 
5041 #if defined(CONFIG_USBFS)
5042 #if HOST_LONG_BITS > 64
5043 #error USBDEVFS thunks do not support >64 bit hosts yet.
5044 #endif
5045 struct live_urb {
5046     uint64_t target_urb_adr;
5047     uint64_t target_buf_adr;
5048     char *target_buf_ptr;
5049     struct usbdevfs_urb host_urb;
5050 };
5051 
5052 static GHashTable *usbdevfs_urb_hashtable(void)
5053 {
5054     static GHashTable *urb_hashtable;
5055 
5056     if (!urb_hashtable) {
5057         urb_hashtable = g_hash_table_new(g_int64_hash, g_int64_equal);
5058     }
5059     return urb_hashtable;
5060 }
5061 
5062 static void urb_hashtable_insert(struct live_urb *urb)
5063 {
5064     GHashTable *urb_hashtable = usbdevfs_urb_hashtable();
5065     g_hash_table_insert(urb_hashtable, urb, urb);
5066 }
5067 
5068 static struct live_urb *urb_hashtable_lookup(uint64_t target_urb_adr)
5069 {
5070     GHashTable *urb_hashtable = usbdevfs_urb_hashtable();
5071     return g_hash_table_lookup(urb_hashtable, &target_urb_adr);
5072 }
5073 
5074 static void urb_hashtable_remove(struct live_urb *urb)
5075 {
5076     GHashTable *urb_hashtable = usbdevfs_urb_hashtable();
5077     g_hash_table_remove(urb_hashtable, urb);
5078 }
5079 
5080 static abi_long
5081 do_ioctl_usbdevfs_reapurb(const IOCTLEntry *ie, uint8_t *buf_temp,
5082                           int fd, int cmd, abi_long arg)
5083 {
5084     const argtype usbfsurb_arg_type[] = { MK_STRUCT(STRUCT_usbdevfs_urb) };
5085     const argtype ptrvoid_arg_type[] = { TYPE_PTRVOID, 0, 0 };
5086     struct live_urb *lurb;
5087     void *argptr;
5088     uint64_t hurb;
5089     int target_size;
5090     uintptr_t target_urb_adr;
5091     abi_long ret;
5092 
5093     target_size = thunk_type_size(usbfsurb_arg_type, THUNK_TARGET);
5094 
5095     memset(buf_temp, 0, sizeof(uint64_t));
5096     ret = get_errno(safe_ioctl(fd, ie->host_cmd, buf_temp));
5097     if (is_error(ret)) {
5098         return ret;
5099     }
5100 
5101     memcpy(&hurb, buf_temp, sizeof(uint64_t));
5102     lurb = (void *)((uintptr_t)hurb - offsetof(struct live_urb, host_urb));
5103     if (!lurb->target_urb_adr) {
5104         return -TARGET_EFAULT;
5105     }
5106     urb_hashtable_remove(lurb);
5107     unlock_user(lurb->target_buf_ptr, lurb->target_buf_adr,
5108         lurb->host_urb.buffer_length);
5109     lurb->target_buf_ptr = NULL;
5110 
5111     /* restore the guest buffer pointer */
5112     lurb->host_urb.buffer = (void *)(uintptr_t)lurb->target_buf_adr;
5113 
5114     /* update the guest urb struct */
5115     argptr = lock_user(VERIFY_WRITE, lurb->target_urb_adr, target_size, 0);
5116     if (!argptr) {
5117         g_free(lurb);
5118         return -TARGET_EFAULT;
5119     }
5120     thunk_convert(argptr, &lurb->host_urb, usbfsurb_arg_type, THUNK_TARGET);
5121     unlock_user(argptr, lurb->target_urb_adr, target_size);
5122 
5123     target_size = thunk_type_size(ptrvoid_arg_type, THUNK_TARGET);
5124     /* write back the urb handle */
5125     argptr = lock_user(VERIFY_WRITE, arg, target_size, 0);
5126     if (!argptr) {
5127         g_free(lurb);
5128         return -TARGET_EFAULT;
5129     }
5130 
5131     /* GHashTable uses 64-bit keys but thunk_convert expects uintptr_t */
5132     target_urb_adr = lurb->target_urb_adr;
5133     thunk_convert(argptr, &target_urb_adr, ptrvoid_arg_type, THUNK_TARGET);
5134     unlock_user(argptr, arg, target_size);
5135 
5136     g_free(lurb);
5137     return ret;
5138 }
5139 
5140 static abi_long
5141 do_ioctl_usbdevfs_discardurb(const IOCTLEntry *ie,
5142                              uint8_t *buf_temp __attribute__((unused)),
5143                              int fd, int cmd, abi_long arg)
5144 {
5145     struct live_urb *lurb;
5146 
5147     /* map target address back to host URB with metadata. */
5148     lurb = urb_hashtable_lookup(arg);
5149     if (!lurb) {
5150         return -TARGET_EFAULT;
5151     }
5152     return get_errno(safe_ioctl(fd, ie->host_cmd, &lurb->host_urb));
5153 }
5154 
5155 static abi_long
5156 do_ioctl_usbdevfs_submiturb(const IOCTLEntry *ie, uint8_t *buf_temp,
5157                             int fd, int cmd, abi_long arg)
5158 {
5159     const argtype *arg_type = ie->arg_type;
5160     int target_size;
5161     abi_long ret;
5162     void *argptr;
5163     int rw_dir;
5164     struct live_urb *lurb;
5165 
5166     /*
5167      * each submitted URB needs to map to a unique ID for the
5168      * kernel, and that unique ID needs to be a pointer to
5169      * host memory.  hence, we need to malloc for each URB.
5170      * isochronous transfers have a variable length struct.
5171      */
5172     arg_type++;
5173     target_size = thunk_type_size(arg_type, THUNK_TARGET);
5174 
5175     /* construct host copy of urb and metadata */
5176     lurb = g_try_malloc0(sizeof(struct live_urb));
5177     if (!lurb) {
5178         return -TARGET_ENOMEM;
5179     }
5180 
5181     argptr = lock_user(VERIFY_READ, arg, target_size, 1);
5182     if (!argptr) {
5183         g_free(lurb);
5184         return -TARGET_EFAULT;
5185     }
5186     thunk_convert(&lurb->host_urb, argptr, arg_type, THUNK_HOST);
5187     unlock_user(argptr, arg, 0);
5188 
5189     lurb->target_urb_adr = arg;
5190     lurb->target_buf_adr = (uintptr_t)lurb->host_urb.buffer;
5191 
5192     /* buffer space used depends on endpoint type so lock the entire buffer */
5193     /* control type urbs should check the buffer contents for true direction */
5194     rw_dir = lurb->host_urb.endpoint & USB_DIR_IN ? VERIFY_WRITE : VERIFY_READ;
5195     lurb->target_buf_ptr = lock_user(rw_dir, lurb->target_buf_adr,
5196         lurb->host_urb.buffer_length, 1);
5197     if (lurb->target_buf_ptr == NULL) {
5198         g_free(lurb);
5199         return -TARGET_EFAULT;
5200     }
5201 
5202     /* update buffer pointer in host copy */
5203     lurb->host_urb.buffer = lurb->target_buf_ptr;
5204 
5205     ret = get_errno(safe_ioctl(fd, ie->host_cmd, &lurb->host_urb));
5206     if (is_error(ret)) {
5207         unlock_user(lurb->target_buf_ptr, lurb->target_buf_adr, 0);
5208         g_free(lurb);
5209     } else {
5210         urb_hashtable_insert(lurb);
5211     }
5212 
5213     return ret;
5214 }
5215 #endif /* CONFIG_USBFS */
5216 
5217 static abi_long do_ioctl_dm(const IOCTLEntry *ie, uint8_t *buf_temp, int fd,
5218                             int cmd, abi_long arg)
5219 {
5220     void *argptr;
5221     struct dm_ioctl *host_dm;
5222     abi_long guest_data;
5223     uint32_t guest_data_size;
5224     int target_size;
5225     const argtype *arg_type = ie->arg_type;
5226     abi_long ret;
5227     void *big_buf = NULL;
5228     char *host_data;
5229 
5230     arg_type++;
5231     target_size = thunk_type_size(arg_type, 0);
5232     argptr = lock_user(VERIFY_READ, arg, target_size, 1);
5233     if (!argptr) {
5234         ret = -TARGET_EFAULT;
5235         goto out;
5236     }
5237     thunk_convert(buf_temp, argptr, arg_type, THUNK_HOST);
5238     unlock_user(argptr, arg, 0);
5239 
5240     /* buf_temp is too small, so fetch things into a bigger buffer */
5241     big_buf = g_malloc0(((struct dm_ioctl*)buf_temp)->data_size * 2);
5242     memcpy(big_buf, buf_temp, target_size);
5243     buf_temp = big_buf;
5244     host_dm = big_buf;
5245 
5246     guest_data = arg + host_dm->data_start;
5247     if ((guest_data - arg) < 0) {
5248         ret = -TARGET_EINVAL;
5249         goto out;
5250     }
5251     guest_data_size = host_dm->data_size - host_dm->data_start;
5252     host_data = (char*)host_dm + host_dm->data_start;
5253 
5254     argptr = lock_user(VERIFY_READ, guest_data, guest_data_size, 1);
5255     if (!argptr) {
5256         ret = -TARGET_EFAULT;
5257         goto out;
5258     }
5259 
5260     switch (ie->host_cmd) {
5261     case DM_REMOVE_ALL:
5262     case DM_LIST_DEVICES:
5263     case DM_DEV_CREATE:
5264     case DM_DEV_REMOVE:
5265     case DM_DEV_SUSPEND:
5266     case DM_DEV_STATUS:
5267     case DM_DEV_WAIT:
5268     case DM_TABLE_STATUS:
5269     case DM_TABLE_CLEAR:
5270     case DM_TABLE_DEPS:
5271     case DM_LIST_VERSIONS:
5272         /* no input data */
5273         break;
5274     case DM_DEV_RENAME:
5275     case DM_DEV_SET_GEOMETRY:
5276         /* data contains only strings */
5277         memcpy(host_data, argptr, guest_data_size);
5278         break;
5279     case DM_TARGET_MSG:
5280         memcpy(host_data, argptr, guest_data_size);
5281         *(uint64_t*)host_data = tswap64(*(uint64_t*)argptr);
5282         break;
5283     case DM_TABLE_LOAD:
5284     {
5285         void *gspec = argptr;
5286         void *cur_data = host_data;
5287         const argtype arg_type[] = { MK_STRUCT(STRUCT_dm_target_spec) };
5288         int spec_size = thunk_type_size(arg_type, 0);
5289         int i;
5290 
5291         for (i = 0; i < host_dm->target_count; i++) {
5292             struct dm_target_spec *spec = cur_data;
5293             uint32_t next;
5294             int slen;
5295 
5296             thunk_convert(spec, gspec, arg_type, THUNK_HOST);
5297             slen = strlen((char*)gspec + spec_size) + 1;
5298             next = spec->next;
5299             spec->next = sizeof(*spec) + slen;
5300             strcpy((char*)&spec[1], gspec + spec_size);
5301             gspec += next;
5302             cur_data += spec->next;
5303         }
5304         break;
5305     }
5306     default:
5307         ret = -TARGET_EINVAL;
5308         unlock_user(argptr, guest_data, 0);
5309         goto out;
5310     }
5311     unlock_user(argptr, guest_data, 0);
5312 
5313     ret = get_errno(safe_ioctl(fd, ie->host_cmd, buf_temp));
5314     if (!is_error(ret)) {
5315         guest_data = arg + host_dm->data_start;
5316         guest_data_size = host_dm->data_size - host_dm->data_start;
5317         argptr = lock_user(VERIFY_WRITE, guest_data, guest_data_size, 0);
5318         switch (ie->host_cmd) {
5319         case DM_REMOVE_ALL:
5320         case DM_DEV_CREATE:
5321         case DM_DEV_REMOVE:
5322         case DM_DEV_RENAME:
5323         case DM_DEV_SUSPEND:
5324         case DM_DEV_STATUS:
5325         case DM_TABLE_LOAD:
5326         case DM_TABLE_CLEAR:
5327         case DM_TARGET_MSG:
5328         case DM_DEV_SET_GEOMETRY:
5329             /* no return data */
5330             break;
5331         case DM_LIST_DEVICES:
5332         {
5333             struct dm_name_list *nl = (void*)host_dm + host_dm->data_start;
5334             uint32_t remaining_data = guest_data_size;
5335             void *cur_data = argptr;
5336             const argtype arg_type[] = { MK_STRUCT(STRUCT_dm_name_list) };
5337             int nl_size = 12; /* can't use thunk_size due to alignment */
5338 
5339             while (1) {
5340                 uint32_t next = nl->next;
5341                 if (next) {
5342                     nl->next = nl_size + (strlen(nl->name) + 1);
5343                 }
5344                 if (remaining_data < nl->next) {
5345                     host_dm->flags |= DM_BUFFER_FULL_FLAG;
5346                     break;
5347                 }
5348                 thunk_convert(cur_data, nl, arg_type, THUNK_TARGET);
5349                 strcpy(cur_data + nl_size, nl->name);
5350                 cur_data += nl->next;
5351                 remaining_data -= nl->next;
5352                 if (!next) {
5353                     break;
5354                 }
5355                 nl = (void*)nl + next;
5356             }
5357             break;
5358         }
5359         case DM_DEV_WAIT:
5360         case DM_TABLE_STATUS:
5361         {
5362             struct dm_target_spec *spec = (void*)host_dm + host_dm->data_start;
5363             void *cur_data = argptr;
5364             const argtype arg_type[] = { MK_STRUCT(STRUCT_dm_target_spec) };
5365             int spec_size = thunk_type_size(arg_type, 0);
5366             int i;
5367 
5368             for (i = 0; i < host_dm->target_count; i++) {
5369                 uint32_t next = spec->next;
5370                 int slen = strlen((char*)&spec[1]) + 1;
5371                 spec->next = (cur_data - argptr) + spec_size + slen;
5372                 if (guest_data_size < spec->next) {
5373                     host_dm->flags |= DM_BUFFER_FULL_FLAG;
5374                     break;
5375                 }
5376                 thunk_convert(cur_data, spec, arg_type, THUNK_TARGET);
5377                 strcpy(cur_data + spec_size, (char*)&spec[1]);
5378                 cur_data = argptr + spec->next;
5379                 spec = (void*)host_dm + host_dm->data_start + next;
5380             }
5381             break;
5382         }
5383         case DM_TABLE_DEPS:
5384         {
5385             void *hdata = (void*)host_dm + host_dm->data_start;
5386             int count = *(uint32_t*)hdata;
5387             uint64_t *hdev = hdata + 8;
5388             uint64_t *gdev = argptr + 8;
5389             int i;
5390 
5391             *(uint32_t*)argptr = tswap32(count);
5392             for (i = 0; i < count; i++) {
5393                 *gdev = tswap64(*hdev);
5394                 gdev++;
5395                 hdev++;
5396             }
5397             break;
5398         }
5399         case DM_LIST_VERSIONS:
5400         {
5401             struct dm_target_versions *vers = (void*)host_dm + host_dm->data_start;
5402             uint32_t remaining_data = guest_data_size;
5403             void *cur_data = argptr;
5404             const argtype arg_type[] = { MK_STRUCT(STRUCT_dm_target_versions) };
5405             int vers_size = thunk_type_size(arg_type, 0);
5406 
5407             while (1) {
5408                 uint32_t next = vers->next;
5409                 if (next) {
5410                     vers->next = vers_size + (strlen(vers->name) + 1);
5411                 }
5412                 if (remaining_data < vers->next) {
5413                     host_dm->flags |= DM_BUFFER_FULL_FLAG;
5414                     break;
5415                 }
5416                 thunk_convert(cur_data, vers, arg_type, THUNK_TARGET);
5417                 strcpy(cur_data + vers_size, vers->name);
5418                 cur_data += vers->next;
5419                 remaining_data -= vers->next;
5420                 if (!next) {
5421                     break;
5422                 }
5423                 vers = (void*)vers + next;
5424             }
5425             break;
5426         }
5427         default:
5428             unlock_user(argptr, guest_data, 0);
5429             ret = -TARGET_EINVAL;
5430             goto out;
5431         }
5432         unlock_user(argptr, guest_data, guest_data_size);
5433 
5434         argptr = lock_user(VERIFY_WRITE, arg, target_size, 0);
5435         if (!argptr) {
5436             ret = -TARGET_EFAULT;
5437             goto out;
5438         }
5439         thunk_convert(argptr, buf_temp, arg_type, THUNK_TARGET);
5440         unlock_user(argptr, arg, target_size);
5441     }
5442 out:
5443     g_free(big_buf);
5444     return ret;
5445 }
5446 
5447 static abi_long do_ioctl_blkpg(const IOCTLEntry *ie, uint8_t *buf_temp, int fd,
5448                                int cmd, abi_long arg)
5449 {
5450     void *argptr;
5451     int target_size;
5452     const argtype *arg_type = ie->arg_type;
5453     const argtype part_arg_type[] = { MK_STRUCT(STRUCT_blkpg_partition) };
5454     abi_long ret;
5455 
5456     struct blkpg_ioctl_arg *host_blkpg = (void*)buf_temp;
5457     struct blkpg_partition host_part;
5458 
5459     /* Read and convert blkpg */
5460     arg_type++;
5461     target_size = thunk_type_size(arg_type, 0);
5462     argptr = lock_user(VERIFY_READ, arg, target_size, 1);
5463     if (!argptr) {
5464         ret = -TARGET_EFAULT;
5465         goto out;
5466     }
5467     thunk_convert(buf_temp, argptr, arg_type, THUNK_HOST);
5468     unlock_user(argptr, arg, 0);
5469 
5470     switch (host_blkpg->op) {
5471     case BLKPG_ADD_PARTITION:
5472     case BLKPG_DEL_PARTITION:
5473         /* payload is struct blkpg_partition */
5474         break;
5475     default:
5476         /* Unknown opcode */
5477         ret = -TARGET_EINVAL;
5478         goto out;
5479     }
5480 
5481     /* Read and convert blkpg->data */
5482     arg = (abi_long)(uintptr_t)host_blkpg->data;
5483     target_size = thunk_type_size(part_arg_type, 0);
5484     argptr = lock_user(VERIFY_READ, arg, target_size, 1);
5485     if (!argptr) {
5486         ret = -TARGET_EFAULT;
5487         goto out;
5488     }
5489     thunk_convert(&host_part, argptr, part_arg_type, THUNK_HOST);
5490     unlock_user(argptr, arg, 0);
5491 
5492     /* Swizzle the data pointer to our local copy and call! */
5493     host_blkpg->data = &host_part;
5494     ret = get_errno(safe_ioctl(fd, ie->host_cmd, host_blkpg));
5495 
5496 out:
5497     return ret;
5498 }
5499 
5500 static abi_long do_ioctl_rt(const IOCTLEntry *ie, uint8_t *buf_temp,
5501                                 int fd, int cmd, abi_long arg)
5502 {
5503     const argtype *arg_type = ie->arg_type;
5504     const StructEntry *se;
5505     const argtype *field_types;
5506     const int *dst_offsets, *src_offsets;
5507     int target_size;
5508     void *argptr;
5509     abi_ulong *target_rt_dev_ptr = NULL;
5510     unsigned long *host_rt_dev_ptr = NULL;
5511     abi_long ret;
5512     int i;
5513 
5514     assert(ie->access == IOC_W);
5515     assert(*arg_type == TYPE_PTR);
5516     arg_type++;
5517     assert(*arg_type == TYPE_STRUCT);
5518     target_size = thunk_type_size(arg_type, 0);
5519     argptr = lock_user(VERIFY_READ, arg, target_size, 1);
5520     if (!argptr) {
5521         return -TARGET_EFAULT;
5522     }
5523     arg_type++;
5524     assert(*arg_type == (int)STRUCT_rtentry);
5525     se = struct_entries + *arg_type++;
5526     assert(se->convert[0] == NULL);
5527     /* convert struct here to be able to catch rt_dev string */
5528     field_types = se->field_types;
5529     dst_offsets = se->field_offsets[THUNK_HOST];
5530     src_offsets = se->field_offsets[THUNK_TARGET];
5531     for (i = 0; i < se->nb_fields; i++) {
5532         if (dst_offsets[i] == offsetof(struct rtentry, rt_dev)) {
5533             assert(*field_types == TYPE_PTRVOID);
5534             target_rt_dev_ptr = (abi_ulong *)(argptr + src_offsets[i]);
5535             host_rt_dev_ptr = (unsigned long *)(buf_temp + dst_offsets[i]);
5536             if (*target_rt_dev_ptr != 0) {
5537                 *host_rt_dev_ptr = (unsigned long)lock_user_string(
5538                                                   tswapal(*target_rt_dev_ptr));
5539                 if (!*host_rt_dev_ptr) {
5540                     unlock_user(argptr, arg, 0);
5541                     return -TARGET_EFAULT;
5542                 }
5543             } else {
5544                 *host_rt_dev_ptr = 0;
5545             }
5546             field_types++;
5547             continue;
5548         }
5549         field_types = thunk_convert(buf_temp + dst_offsets[i],
5550                                     argptr + src_offsets[i],
5551                                     field_types, THUNK_HOST);
5552     }
5553     unlock_user(argptr, arg, 0);
5554 
5555     ret = get_errno(safe_ioctl(fd, ie->host_cmd, buf_temp));
5556 
5557     assert(host_rt_dev_ptr != NULL);
5558     assert(target_rt_dev_ptr != NULL);
5559     if (*host_rt_dev_ptr != 0) {
5560         unlock_user((void *)*host_rt_dev_ptr,
5561                     *target_rt_dev_ptr, 0);
5562     }
5563     return ret;
5564 }
5565 
5566 static abi_long do_ioctl_kdsigaccept(const IOCTLEntry *ie, uint8_t *buf_temp,
5567                                      int fd, int cmd, abi_long arg)
5568 {
5569     int sig = target_to_host_signal(arg);
5570     return get_errno(safe_ioctl(fd, ie->host_cmd, sig));
5571 }
5572 
5573 static abi_long do_ioctl_SIOCGSTAMP(const IOCTLEntry *ie, uint8_t *buf_temp,
5574                                     int fd, int cmd, abi_long arg)
5575 {
5576     struct timeval tv;
5577     abi_long ret;
5578 
5579     ret = get_errno(safe_ioctl(fd, SIOCGSTAMP, &tv));
5580     if (is_error(ret)) {
5581         return ret;
5582     }
5583 
5584     if (cmd == (int)TARGET_SIOCGSTAMP_OLD) {
5585         if (copy_to_user_timeval(arg, &tv)) {
5586             return -TARGET_EFAULT;
5587         }
5588     } else {
5589         if (copy_to_user_timeval64(arg, &tv)) {
5590             return -TARGET_EFAULT;
5591         }
5592     }
5593 
5594     return ret;
5595 }
5596 
5597 static abi_long do_ioctl_SIOCGSTAMPNS(const IOCTLEntry *ie, uint8_t *buf_temp,
5598                                       int fd, int cmd, abi_long arg)
5599 {
5600     struct timespec ts;
5601     abi_long ret;
5602 
5603     ret = get_errno(safe_ioctl(fd, SIOCGSTAMPNS, &ts));
5604     if (is_error(ret)) {
5605         return ret;
5606     }
5607 
5608     if (cmd == (int)TARGET_SIOCGSTAMPNS_OLD) {
5609         if (host_to_target_timespec(arg, &ts)) {
5610             return -TARGET_EFAULT;
5611         }
5612     } else{
5613         if (host_to_target_timespec64(arg, &ts)) {
5614             return -TARGET_EFAULT;
5615         }
5616     }
5617 
5618     return ret;
5619 }
5620 
5621 #ifdef TIOCGPTPEER
5622 static abi_long do_ioctl_tiocgptpeer(const IOCTLEntry *ie, uint8_t *buf_temp,
5623                                      int fd, int cmd, abi_long arg)
5624 {
5625     int flags = target_to_host_bitmask(arg, fcntl_flags_tbl);
5626     return get_errno(safe_ioctl(fd, ie->host_cmd, flags));
5627 }
5628 #endif
5629 
5630 #ifdef HAVE_DRM_H
5631 
5632 static void unlock_drm_version(struct drm_version *host_ver,
5633                                struct target_drm_version *target_ver,
5634                                bool copy)
5635 {
5636     unlock_user(host_ver->name, target_ver->name,
5637                                 copy ? host_ver->name_len : 0);
5638     unlock_user(host_ver->date, target_ver->date,
5639                                 copy ? host_ver->date_len : 0);
5640     unlock_user(host_ver->desc, target_ver->desc,
5641                                 copy ? host_ver->desc_len : 0);
5642 }
5643 
5644 static inline abi_long target_to_host_drmversion(struct drm_version *host_ver,
5645                                           struct target_drm_version *target_ver)
5646 {
5647     memset(host_ver, 0, sizeof(*host_ver));
5648 
5649     __get_user(host_ver->name_len, &target_ver->name_len);
5650     if (host_ver->name_len) {
5651         host_ver->name = lock_user(VERIFY_WRITE, target_ver->name,
5652                                    target_ver->name_len, 0);
5653         if (!host_ver->name) {
5654             return -EFAULT;
5655         }
5656     }
5657 
5658     __get_user(host_ver->date_len, &target_ver->date_len);
5659     if (host_ver->date_len) {
5660         host_ver->date = lock_user(VERIFY_WRITE, target_ver->date,
5661                                    target_ver->date_len, 0);
5662         if (!host_ver->date) {
5663             goto err;
5664         }
5665     }
5666 
5667     __get_user(host_ver->desc_len, &target_ver->desc_len);
5668     if (host_ver->desc_len) {
5669         host_ver->desc = lock_user(VERIFY_WRITE, target_ver->desc,
5670                                    target_ver->desc_len, 0);
5671         if (!host_ver->desc) {
5672             goto err;
5673         }
5674     }
5675 
5676     return 0;
5677 err:
5678     unlock_drm_version(host_ver, target_ver, false);
5679     return -EFAULT;
5680 }
5681 
5682 static inline void host_to_target_drmversion(
5683                                           struct target_drm_version *target_ver,
5684                                           struct drm_version *host_ver)
5685 {
5686     __put_user(host_ver->version_major, &target_ver->version_major);
5687     __put_user(host_ver->version_minor, &target_ver->version_minor);
5688     __put_user(host_ver->version_patchlevel, &target_ver->version_patchlevel);
5689     __put_user(host_ver->name_len, &target_ver->name_len);
5690     __put_user(host_ver->date_len, &target_ver->date_len);
5691     __put_user(host_ver->desc_len, &target_ver->desc_len);
5692     unlock_drm_version(host_ver, target_ver, true);
5693 }
5694 
5695 static abi_long do_ioctl_drm(const IOCTLEntry *ie, uint8_t *buf_temp,
5696                              int fd, int cmd, abi_long arg)
5697 {
5698     struct drm_version *ver;
5699     struct target_drm_version *target_ver;
5700     abi_long ret;
5701 
5702     switch (ie->host_cmd) {
5703     case DRM_IOCTL_VERSION:
5704         if (!lock_user_struct(VERIFY_WRITE, target_ver, arg, 0)) {
5705             return -TARGET_EFAULT;
5706         }
5707         ver = (struct drm_version *)buf_temp;
5708         ret = target_to_host_drmversion(ver, target_ver);
5709         if (!is_error(ret)) {
5710             ret = get_errno(safe_ioctl(fd, ie->host_cmd, ver));
5711             if (is_error(ret)) {
5712                 unlock_drm_version(ver, target_ver, false);
5713             } else {
5714                 host_to_target_drmversion(target_ver, ver);
5715             }
5716         }
5717         unlock_user_struct(target_ver, arg, 0);
5718         return ret;
5719     }
5720     return -TARGET_ENOSYS;
5721 }
5722 
5723 static abi_long do_ioctl_drm_i915_getparam(const IOCTLEntry *ie,
5724                                            struct drm_i915_getparam *gparam,
5725                                            int fd, abi_long arg)
5726 {
5727     abi_long ret;
5728     int value;
5729     struct target_drm_i915_getparam *target_gparam;
5730 
5731     if (!lock_user_struct(VERIFY_READ, target_gparam, arg, 0)) {
5732         return -TARGET_EFAULT;
5733     }
5734 
5735     __get_user(gparam->param, &target_gparam->param);
5736     gparam->value = &value;
5737     ret = get_errno(safe_ioctl(fd, ie->host_cmd, gparam));
5738     put_user_s32(value, target_gparam->value);
5739 
5740     unlock_user_struct(target_gparam, arg, 0);
5741     return ret;
5742 }
5743 
5744 static abi_long do_ioctl_drm_i915(const IOCTLEntry *ie, uint8_t *buf_temp,
5745                                   int fd, int cmd, abi_long arg)
5746 {
5747     switch (ie->host_cmd) {
5748     case DRM_IOCTL_I915_GETPARAM:
5749         return do_ioctl_drm_i915_getparam(ie,
5750                                           (struct drm_i915_getparam *)buf_temp,
5751                                           fd, arg);
5752     default:
5753         return -TARGET_ENOSYS;
5754     }
5755 }
5756 
5757 #endif
5758 
5759 static abi_long do_ioctl_TUNSETTXFILTER(const IOCTLEntry *ie, uint8_t *buf_temp,
5760                                         int fd, int cmd, abi_long arg)
5761 {
5762     struct tun_filter *filter = (struct tun_filter *)buf_temp;
5763     struct tun_filter *target_filter;
5764     char *target_addr;
5765 
5766     assert(ie->access == IOC_W);
5767 
5768     target_filter = lock_user(VERIFY_READ, arg, sizeof(*target_filter), 1);
5769     if (!target_filter) {
5770         return -TARGET_EFAULT;
5771     }
5772     filter->flags = tswap16(target_filter->flags);
5773     filter->count = tswap16(target_filter->count);
5774     unlock_user(target_filter, arg, 0);
5775 
5776     if (filter->count) {
5777         if (offsetof(struct tun_filter, addr) + filter->count * ETH_ALEN >
5778             MAX_STRUCT_SIZE) {
5779             return -TARGET_EFAULT;
5780         }
5781 
5782         target_addr = lock_user(VERIFY_READ,
5783                                 arg + offsetof(struct tun_filter, addr),
5784                                 filter->count * ETH_ALEN, 1);
5785         if (!target_addr) {
5786             return -TARGET_EFAULT;
5787         }
5788         memcpy(filter->addr, target_addr, filter->count * ETH_ALEN);
5789         unlock_user(target_addr, arg + offsetof(struct tun_filter, addr), 0);
5790     }
5791 
5792     return get_errno(safe_ioctl(fd, ie->host_cmd, filter));
5793 }
5794 
5795 IOCTLEntry ioctl_entries[] = {
5796 #define IOCTL(cmd, access, ...) \
5797     { TARGET_ ## cmd, cmd, #cmd, access, 0, {  __VA_ARGS__ } },
5798 #define IOCTL_SPECIAL(cmd, access, dofn, ...)                      \
5799     { TARGET_ ## cmd, cmd, #cmd, access, dofn, {  __VA_ARGS__ } },
5800 #define IOCTL_IGNORE(cmd) \
5801     { TARGET_ ## cmd, 0, #cmd },
5802 #include "ioctls.h"
5803     { 0, 0, },
5804 };
5805 
5806 /* ??? Implement proper locking for ioctls.  */
5807 /* do_ioctl() Must return target values and target errnos. */
5808 static abi_long do_ioctl(int fd, int cmd, abi_long arg)
5809 {
5810     const IOCTLEntry *ie;
5811     const argtype *arg_type;
5812     abi_long ret;
5813     uint8_t buf_temp[MAX_STRUCT_SIZE];
5814     int target_size;
5815     void *argptr;
5816 
5817     ie = ioctl_entries;
5818     for(;;) {
5819         if (ie->target_cmd == 0) {
5820             qemu_log_mask(
5821                 LOG_UNIMP, "Unsupported ioctl: cmd=0x%04lx\n", (long)cmd);
5822             return -TARGET_ENOSYS;
5823         }
5824         if (ie->target_cmd == cmd)
5825             break;
5826         ie++;
5827     }
5828     arg_type = ie->arg_type;
5829     if (ie->do_ioctl) {
5830         return ie->do_ioctl(ie, buf_temp, fd, cmd, arg);
5831     } else if (!ie->host_cmd) {
5832         /* Some architectures define BSD ioctls in their headers
5833            that are not implemented in Linux.  */
5834         return -TARGET_ENOSYS;
5835     }
5836 
5837     switch(arg_type[0]) {
5838     case TYPE_NULL:
5839         /* no argument */
5840         ret = get_errno(safe_ioctl(fd, ie->host_cmd));
5841         break;
5842     case TYPE_PTRVOID:
5843     case TYPE_INT:
5844     case TYPE_LONG:
5845     case TYPE_ULONG:
5846         ret = get_errno(safe_ioctl(fd, ie->host_cmd, arg));
5847         break;
5848     case TYPE_PTR:
5849         arg_type++;
5850         target_size = thunk_type_size(arg_type, 0);
5851         switch(ie->access) {
5852         case IOC_R:
5853             ret = get_errno(safe_ioctl(fd, ie->host_cmd, buf_temp));
5854             if (!is_error(ret)) {
5855                 argptr = lock_user(VERIFY_WRITE, arg, target_size, 0);
5856                 if (!argptr)
5857                     return -TARGET_EFAULT;
5858                 thunk_convert(argptr, buf_temp, arg_type, THUNK_TARGET);
5859                 unlock_user(argptr, arg, target_size);
5860             }
5861             break;
5862         case IOC_W:
5863             argptr = lock_user(VERIFY_READ, arg, target_size, 1);
5864             if (!argptr)
5865                 return -TARGET_EFAULT;
5866             thunk_convert(buf_temp, argptr, arg_type, THUNK_HOST);
5867             unlock_user(argptr, arg, 0);
5868             ret = get_errno(safe_ioctl(fd, ie->host_cmd, buf_temp));
5869             break;
5870         default:
5871         case IOC_RW:
5872             argptr = lock_user(VERIFY_READ, arg, target_size, 1);
5873             if (!argptr)
5874                 return -TARGET_EFAULT;
5875             thunk_convert(buf_temp, argptr, arg_type, THUNK_HOST);
5876             unlock_user(argptr, arg, 0);
5877             ret = get_errno(safe_ioctl(fd, ie->host_cmd, buf_temp));
5878             if (!is_error(ret)) {
5879                 argptr = lock_user(VERIFY_WRITE, arg, target_size, 0);
5880                 if (!argptr)
5881                     return -TARGET_EFAULT;
5882                 thunk_convert(argptr, buf_temp, arg_type, THUNK_TARGET);
5883                 unlock_user(argptr, arg, target_size);
5884             }
5885             break;
5886         }
5887         break;
5888     default:
5889         qemu_log_mask(LOG_UNIMP,
5890                       "Unsupported ioctl type: cmd=0x%04lx type=%d\n",
5891                       (long)cmd, arg_type[0]);
5892         ret = -TARGET_ENOSYS;
5893         break;
5894     }
5895     return ret;
5896 }
5897 
5898 static const bitmask_transtbl iflag_tbl[] = {
5899         { TARGET_IGNBRK, TARGET_IGNBRK, IGNBRK, IGNBRK },
5900         { TARGET_BRKINT, TARGET_BRKINT, BRKINT, BRKINT },
5901         { TARGET_IGNPAR, TARGET_IGNPAR, IGNPAR, IGNPAR },
5902         { TARGET_PARMRK, TARGET_PARMRK, PARMRK, PARMRK },
5903         { TARGET_INPCK, TARGET_INPCK, INPCK, INPCK },
5904         { TARGET_ISTRIP, TARGET_ISTRIP, ISTRIP, ISTRIP },
5905         { TARGET_INLCR, TARGET_INLCR, INLCR, INLCR },
5906         { TARGET_IGNCR, TARGET_IGNCR, IGNCR, IGNCR },
5907         { TARGET_ICRNL, TARGET_ICRNL, ICRNL, ICRNL },
5908         { TARGET_IUCLC, TARGET_IUCLC, IUCLC, IUCLC },
5909         { TARGET_IXON, TARGET_IXON, IXON, IXON },
5910         { TARGET_IXANY, TARGET_IXANY, IXANY, IXANY },
5911         { TARGET_IXOFF, TARGET_IXOFF, IXOFF, IXOFF },
5912         { TARGET_IMAXBEL, TARGET_IMAXBEL, IMAXBEL, IMAXBEL },
5913         { TARGET_IUTF8, TARGET_IUTF8, IUTF8, IUTF8},
5914         { 0, 0, 0, 0 }
5915 };
5916 
5917 static const bitmask_transtbl oflag_tbl[] = {
5918 	{ TARGET_OPOST, TARGET_OPOST, OPOST, OPOST },
5919 	{ TARGET_OLCUC, TARGET_OLCUC, OLCUC, OLCUC },
5920 	{ TARGET_ONLCR, TARGET_ONLCR, ONLCR, ONLCR },
5921 	{ TARGET_OCRNL, TARGET_OCRNL, OCRNL, OCRNL },
5922 	{ TARGET_ONOCR, TARGET_ONOCR, ONOCR, ONOCR },
5923 	{ TARGET_ONLRET, TARGET_ONLRET, ONLRET, ONLRET },
5924 	{ TARGET_OFILL, TARGET_OFILL, OFILL, OFILL },
5925 	{ TARGET_OFDEL, TARGET_OFDEL, OFDEL, OFDEL },
5926 	{ TARGET_NLDLY, TARGET_NL0, NLDLY, NL0 },
5927 	{ TARGET_NLDLY, TARGET_NL1, NLDLY, NL1 },
5928 	{ TARGET_CRDLY, TARGET_CR0, CRDLY, CR0 },
5929 	{ TARGET_CRDLY, TARGET_CR1, CRDLY, CR1 },
5930 	{ TARGET_CRDLY, TARGET_CR2, CRDLY, CR2 },
5931 	{ TARGET_CRDLY, TARGET_CR3, CRDLY, CR3 },
5932 	{ TARGET_TABDLY, TARGET_TAB0, TABDLY, TAB0 },
5933 	{ TARGET_TABDLY, TARGET_TAB1, TABDLY, TAB1 },
5934 	{ TARGET_TABDLY, TARGET_TAB2, TABDLY, TAB2 },
5935 	{ TARGET_TABDLY, TARGET_TAB3, TABDLY, TAB3 },
5936 	{ TARGET_BSDLY, TARGET_BS0, BSDLY, BS0 },
5937 	{ TARGET_BSDLY, TARGET_BS1, BSDLY, BS1 },
5938 	{ TARGET_VTDLY, TARGET_VT0, VTDLY, VT0 },
5939 	{ TARGET_VTDLY, TARGET_VT1, VTDLY, VT1 },
5940 	{ TARGET_FFDLY, TARGET_FF0, FFDLY, FF0 },
5941 	{ TARGET_FFDLY, TARGET_FF1, FFDLY, FF1 },
5942 	{ 0, 0, 0, 0 }
5943 };
5944 
5945 static const bitmask_transtbl cflag_tbl[] = {
5946 	{ TARGET_CBAUD, TARGET_B0, CBAUD, B0 },
5947 	{ TARGET_CBAUD, TARGET_B50, CBAUD, B50 },
5948 	{ TARGET_CBAUD, TARGET_B75, CBAUD, B75 },
5949 	{ TARGET_CBAUD, TARGET_B110, CBAUD, B110 },
5950 	{ TARGET_CBAUD, TARGET_B134, CBAUD, B134 },
5951 	{ TARGET_CBAUD, TARGET_B150, CBAUD, B150 },
5952 	{ TARGET_CBAUD, TARGET_B200, CBAUD, B200 },
5953 	{ TARGET_CBAUD, TARGET_B300, CBAUD, B300 },
5954 	{ TARGET_CBAUD, TARGET_B600, CBAUD, B600 },
5955 	{ TARGET_CBAUD, TARGET_B1200, CBAUD, B1200 },
5956 	{ TARGET_CBAUD, TARGET_B1800, CBAUD, B1800 },
5957 	{ TARGET_CBAUD, TARGET_B2400, CBAUD, B2400 },
5958 	{ TARGET_CBAUD, TARGET_B4800, CBAUD, B4800 },
5959 	{ TARGET_CBAUD, TARGET_B9600, CBAUD, B9600 },
5960 	{ TARGET_CBAUD, TARGET_B19200, CBAUD, B19200 },
5961 	{ TARGET_CBAUD, TARGET_B38400, CBAUD, B38400 },
5962 	{ TARGET_CBAUD, TARGET_B57600, CBAUD, B57600 },
5963 	{ TARGET_CBAUD, TARGET_B115200, CBAUD, B115200 },
5964 	{ TARGET_CBAUD, TARGET_B230400, CBAUD, B230400 },
5965 	{ TARGET_CBAUD, TARGET_B460800, CBAUD, B460800 },
5966 	{ TARGET_CSIZE, TARGET_CS5, CSIZE, CS5 },
5967 	{ TARGET_CSIZE, TARGET_CS6, CSIZE, CS6 },
5968 	{ TARGET_CSIZE, TARGET_CS7, CSIZE, CS7 },
5969 	{ TARGET_CSIZE, TARGET_CS8, CSIZE, CS8 },
5970 	{ TARGET_CSTOPB, TARGET_CSTOPB, CSTOPB, CSTOPB },
5971 	{ TARGET_CREAD, TARGET_CREAD, CREAD, CREAD },
5972 	{ TARGET_PARENB, TARGET_PARENB, PARENB, PARENB },
5973 	{ TARGET_PARODD, TARGET_PARODD, PARODD, PARODD },
5974 	{ TARGET_HUPCL, TARGET_HUPCL, HUPCL, HUPCL },
5975 	{ TARGET_CLOCAL, TARGET_CLOCAL, CLOCAL, CLOCAL },
5976 	{ TARGET_CRTSCTS, TARGET_CRTSCTS, CRTSCTS, CRTSCTS },
5977 	{ 0, 0, 0, 0 }
5978 };
5979 
5980 static const bitmask_transtbl lflag_tbl[] = {
5981   { TARGET_ISIG, TARGET_ISIG, ISIG, ISIG },
5982   { TARGET_ICANON, TARGET_ICANON, ICANON, ICANON },
5983   { TARGET_XCASE, TARGET_XCASE, XCASE, XCASE },
5984   { TARGET_ECHO, TARGET_ECHO, ECHO, ECHO },
5985   { TARGET_ECHOE, TARGET_ECHOE, ECHOE, ECHOE },
5986   { TARGET_ECHOK, TARGET_ECHOK, ECHOK, ECHOK },
5987   { TARGET_ECHONL, TARGET_ECHONL, ECHONL, ECHONL },
5988   { TARGET_NOFLSH, TARGET_NOFLSH, NOFLSH, NOFLSH },
5989   { TARGET_TOSTOP, TARGET_TOSTOP, TOSTOP, TOSTOP },
5990   { TARGET_ECHOCTL, TARGET_ECHOCTL, ECHOCTL, ECHOCTL },
5991   { TARGET_ECHOPRT, TARGET_ECHOPRT, ECHOPRT, ECHOPRT },
5992   { TARGET_ECHOKE, TARGET_ECHOKE, ECHOKE, ECHOKE },
5993   { TARGET_FLUSHO, TARGET_FLUSHO, FLUSHO, FLUSHO },
5994   { TARGET_PENDIN, TARGET_PENDIN, PENDIN, PENDIN },
5995   { TARGET_IEXTEN, TARGET_IEXTEN, IEXTEN, IEXTEN },
5996   { TARGET_EXTPROC, TARGET_EXTPROC, EXTPROC, EXTPROC},
5997   { 0, 0, 0, 0 }
5998 };
5999 
6000 static void target_to_host_termios (void *dst, const void *src)
6001 {
6002     struct host_termios *host = dst;
6003     const struct target_termios *target = src;
6004 
6005     host->c_iflag =
6006         target_to_host_bitmask(tswap32(target->c_iflag), iflag_tbl);
6007     host->c_oflag =
6008         target_to_host_bitmask(tswap32(target->c_oflag), oflag_tbl);
6009     host->c_cflag =
6010         target_to_host_bitmask(tswap32(target->c_cflag), cflag_tbl);
6011     host->c_lflag =
6012         target_to_host_bitmask(tswap32(target->c_lflag), lflag_tbl);
6013     host->c_line = target->c_line;
6014 
6015     memset(host->c_cc, 0, sizeof(host->c_cc));
6016     host->c_cc[VINTR] = target->c_cc[TARGET_VINTR];
6017     host->c_cc[VQUIT] = target->c_cc[TARGET_VQUIT];
6018     host->c_cc[VERASE] = target->c_cc[TARGET_VERASE];
6019     host->c_cc[VKILL] = target->c_cc[TARGET_VKILL];
6020     host->c_cc[VEOF] = target->c_cc[TARGET_VEOF];
6021     host->c_cc[VTIME] = target->c_cc[TARGET_VTIME];
6022     host->c_cc[VMIN] = target->c_cc[TARGET_VMIN];
6023     host->c_cc[VSWTC] = target->c_cc[TARGET_VSWTC];
6024     host->c_cc[VSTART] = target->c_cc[TARGET_VSTART];
6025     host->c_cc[VSTOP] = target->c_cc[TARGET_VSTOP];
6026     host->c_cc[VSUSP] = target->c_cc[TARGET_VSUSP];
6027     host->c_cc[VEOL] = target->c_cc[TARGET_VEOL];
6028     host->c_cc[VREPRINT] = target->c_cc[TARGET_VREPRINT];
6029     host->c_cc[VDISCARD] = target->c_cc[TARGET_VDISCARD];
6030     host->c_cc[VWERASE] = target->c_cc[TARGET_VWERASE];
6031     host->c_cc[VLNEXT] = target->c_cc[TARGET_VLNEXT];
6032     host->c_cc[VEOL2] = target->c_cc[TARGET_VEOL2];
6033 }
6034 
6035 static void host_to_target_termios (void *dst, const void *src)
6036 {
6037     struct target_termios *target = dst;
6038     const struct host_termios *host = src;
6039 
6040     target->c_iflag =
6041         tswap32(host_to_target_bitmask(host->c_iflag, iflag_tbl));
6042     target->c_oflag =
6043         tswap32(host_to_target_bitmask(host->c_oflag, oflag_tbl));
6044     target->c_cflag =
6045         tswap32(host_to_target_bitmask(host->c_cflag, cflag_tbl));
6046     target->c_lflag =
6047         tswap32(host_to_target_bitmask(host->c_lflag, lflag_tbl));
6048     target->c_line = host->c_line;
6049 
6050     memset(target->c_cc, 0, sizeof(target->c_cc));
6051     target->c_cc[TARGET_VINTR] = host->c_cc[VINTR];
6052     target->c_cc[TARGET_VQUIT] = host->c_cc[VQUIT];
6053     target->c_cc[TARGET_VERASE] = host->c_cc[VERASE];
6054     target->c_cc[TARGET_VKILL] = host->c_cc[VKILL];
6055     target->c_cc[TARGET_VEOF] = host->c_cc[VEOF];
6056     target->c_cc[TARGET_VTIME] = host->c_cc[VTIME];
6057     target->c_cc[TARGET_VMIN] = host->c_cc[VMIN];
6058     target->c_cc[TARGET_VSWTC] = host->c_cc[VSWTC];
6059     target->c_cc[TARGET_VSTART] = host->c_cc[VSTART];
6060     target->c_cc[TARGET_VSTOP] = host->c_cc[VSTOP];
6061     target->c_cc[TARGET_VSUSP] = host->c_cc[VSUSP];
6062     target->c_cc[TARGET_VEOL] = host->c_cc[VEOL];
6063     target->c_cc[TARGET_VREPRINT] = host->c_cc[VREPRINT];
6064     target->c_cc[TARGET_VDISCARD] = host->c_cc[VDISCARD];
6065     target->c_cc[TARGET_VWERASE] = host->c_cc[VWERASE];
6066     target->c_cc[TARGET_VLNEXT] = host->c_cc[VLNEXT];
6067     target->c_cc[TARGET_VEOL2] = host->c_cc[VEOL2];
6068 }
6069 
6070 static const StructEntry struct_termios_def = {
6071     .convert = { host_to_target_termios, target_to_host_termios },
6072     .size = { sizeof(struct target_termios), sizeof(struct host_termios) },
6073     .align = { __alignof__(struct target_termios), __alignof__(struct host_termios) },
6074     .print = print_termios,
6075 };
6076 
6077 static const bitmask_transtbl mmap_flags_tbl[] = {
6078     { TARGET_MAP_SHARED, TARGET_MAP_SHARED, MAP_SHARED, MAP_SHARED },
6079     { TARGET_MAP_PRIVATE, TARGET_MAP_PRIVATE, MAP_PRIVATE, MAP_PRIVATE },
6080     { TARGET_MAP_FIXED, TARGET_MAP_FIXED, MAP_FIXED, MAP_FIXED },
6081     { TARGET_MAP_ANONYMOUS, TARGET_MAP_ANONYMOUS,
6082       MAP_ANONYMOUS, MAP_ANONYMOUS },
6083     { TARGET_MAP_GROWSDOWN, TARGET_MAP_GROWSDOWN,
6084       MAP_GROWSDOWN, MAP_GROWSDOWN },
6085     { TARGET_MAP_DENYWRITE, TARGET_MAP_DENYWRITE,
6086       MAP_DENYWRITE, MAP_DENYWRITE },
6087     { TARGET_MAP_EXECUTABLE, TARGET_MAP_EXECUTABLE,
6088       MAP_EXECUTABLE, MAP_EXECUTABLE },
6089     { TARGET_MAP_LOCKED, TARGET_MAP_LOCKED, MAP_LOCKED, MAP_LOCKED },
6090     { TARGET_MAP_NORESERVE, TARGET_MAP_NORESERVE,
6091       MAP_NORESERVE, MAP_NORESERVE },
6092     { TARGET_MAP_HUGETLB, TARGET_MAP_HUGETLB, MAP_HUGETLB, MAP_HUGETLB },
6093     /* MAP_STACK had been ignored by the kernel for quite some time.
6094        Recognize it for the target insofar as we do not want to pass
6095        it through to the host.  */
6096     { TARGET_MAP_STACK, TARGET_MAP_STACK, 0, 0 },
6097     { 0, 0, 0, 0 }
6098 };
6099 
6100 /*
6101  * NOTE: TARGET_ABI32 is defined for TARGET_I386 (but not for TARGET_X86_64)
6102  *       TARGET_I386 is defined if TARGET_X86_64 is defined
6103  */
6104 #if defined(TARGET_I386)
6105 
6106 /* NOTE: there is really one LDT for all the threads */
6107 static uint8_t *ldt_table;
6108 
6109 static abi_long read_ldt(abi_ulong ptr, unsigned long bytecount)
6110 {
6111     int size;
6112     void *p;
6113 
6114     if (!ldt_table)
6115         return 0;
6116     size = TARGET_LDT_ENTRIES * TARGET_LDT_ENTRY_SIZE;
6117     if (size > bytecount)
6118         size = bytecount;
6119     p = lock_user(VERIFY_WRITE, ptr, size, 0);
6120     if (!p)
6121         return -TARGET_EFAULT;
6122     /* ??? Should this by byteswapped?  */
6123     memcpy(p, ldt_table, size);
6124     unlock_user(p, ptr, size);
6125     return size;
6126 }
6127 
6128 /* XXX: add locking support */
6129 static abi_long write_ldt(CPUX86State *env,
6130                           abi_ulong ptr, unsigned long bytecount, int oldmode)
6131 {
6132     struct target_modify_ldt_ldt_s ldt_info;
6133     struct target_modify_ldt_ldt_s *target_ldt_info;
6134     int seg_32bit, contents, read_exec_only, limit_in_pages;
6135     int seg_not_present, useable, lm;
6136     uint32_t *lp, entry_1, entry_2;
6137 
6138     if (bytecount != sizeof(ldt_info))
6139         return -TARGET_EINVAL;
6140     if (!lock_user_struct(VERIFY_READ, target_ldt_info, ptr, 1))
6141         return -TARGET_EFAULT;
6142     ldt_info.entry_number = tswap32(target_ldt_info->entry_number);
6143     ldt_info.base_addr = tswapal(target_ldt_info->base_addr);
6144     ldt_info.limit = tswap32(target_ldt_info->limit);
6145     ldt_info.flags = tswap32(target_ldt_info->flags);
6146     unlock_user_struct(target_ldt_info, ptr, 0);
6147 
6148     if (ldt_info.entry_number >= TARGET_LDT_ENTRIES)
6149         return -TARGET_EINVAL;
6150     seg_32bit = ldt_info.flags & 1;
6151     contents = (ldt_info.flags >> 1) & 3;
6152     read_exec_only = (ldt_info.flags >> 3) & 1;
6153     limit_in_pages = (ldt_info.flags >> 4) & 1;
6154     seg_not_present = (ldt_info.flags >> 5) & 1;
6155     useable = (ldt_info.flags >> 6) & 1;
6156 #ifdef TARGET_ABI32
6157     lm = 0;
6158 #else
6159     lm = (ldt_info.flags >> 7) & 1;
6160 #endif
6161     if (contents == 3) {
6162         if (oldmode)
6163             return -TARGET_EINVAL;
6164         if (seg_not_present == 0)
6165             return -TARGET_EINVAL;
6166     }
6167     /* allocate the LDT */
6168     if (!ldt_table) {
6169         env->ldt.base = target_mmap(0,
6170                                     TARGET_LDT_ENTRIES * TARGET_LDT_ENTRY_SIZE,
6171                                     PROT_READ|PROT_WRITE,
6172                                     MAP_ANONYMOUS|MAP_PRIVATE, -1, 0);
6173         if (env->ldt.base == -1)
6174             return -TARGET_ENOMEM;
6175         memset(g2h_untagged(env->ldt.base), 0,
6176                TARGET_LDT_ENTRIES * TARGET_LDT_ENTRY_SIZE);
6177         env->ldt.limit = 0xffff;
6178         ldt_table = g2h_untagged(env->ldt.base);
6179     }
6180 
6181     /* NOTE: same code as Linux kernel */
6182     /* Allow LDTs to be cleared by the user. */
6183     if (ldt_info.base_addr == 0 && ldt_info.limit == 0) {
6184         if (oldmode ||
6185             (contents == 0		&&
6186              read_exec_only == 1	&&
6187              seg_32bit == 0		&&
6188              limit_in_pages == 0	&&
6189              seg_not_present == 1	&&
6190              useable == 0 )) {
6191             entry_1 = 0;
6192             entry_2 = 0;
6193             goto install;
6194         }
6195     }
6196 
6197     entry_1 = ((ldt_info.base_addr & 0x0000ffff) << 16) |
6198         (ldt_info.limit & 0x0ffff);
6199     entry_2 = (ldt_info.base_addr & 0xff000000) |
6200         ((ldt_info.base_addr & 0x00ff0000) >> 16) |
6201         (ldt_info.limit & 0xf0000) |
6202         ((read_exec_only ^ 1) << 9) |
6203         (contents << 10) |
6204         ((seg_not_present ^ 1) << 15) |
6205         (seg_32bit << 22) |
6206         (limit_in_pages << 23) |
6207         (lm << 21) |
6208         0x7000;
6209     if (!oldmode)
6210         entry_2 |= (useable << 20);
6211 
6212     /* Install the new entry ...  */
6213 install:
6214     lp = (uint32_t *)(ldt_table + (ldt_info.entry_number << 3));
6215     lp[0] = tswap32(entry_1);
6216     lp[1] = tswap32(entry_2);
6217     return 0;
6218 }
6219 
6220 /* specific and weird i386 syscalls */
6221 static abi_long do_modify_ldt(CPUX86State *env, int func, abi_ulong ptr,
6222                               unsigned long bytecount)
6223 {
6224     abi_long ret;
6225 
6226     switch (func) {
6227     case 0:
6228         ret = read_ldt(ptr, bytecount);
6229         break;
6230     case 1:
6231         ret = write_ldt(env, ptr, bytecount, 1);
6232         break;
6233     case 0x11:
6234         ret = write_ldt(env, ptr, bytecount, 0);
6235         break;
6236     default:
6237         ret = -TARGET_ENOSYS;
6238         break;
6239     }
6240     return ret;
6241 }
6242 
6243 #if defined(TARGET_ABI32)
6244 abi_long do_set_thread_area(CPUX86State *env, abi_ulong ptr)
6245 {
6246     uint64_t *gdt_table = g2h_untagged(env->gdt.base);
6247     struct target_modify_ldt_ldt_s ldt_info;
6248     struct target_modify_ldt_ldt_s *target_ldt_info;
6249     int seg_32bit, contents, read_exec_only, limit_in_pages;
6250     int seg_not_present, useable, lm;
6251     uint32_t *lp, entry_1, entry_2;
6252     int i;
6253 
6254     lock_user_struct(VERIFY_WRITE, target_ldt_info, ptr, 1);
6255     if (!target_ldt_info)
6256         return -TARGET_EFAULT;
6257     ldt_info.entry_number = tswap32(target_ldt_info->entry_number);
6258     ldt_info.base_addr = tswapal(target_ldt_info->base_addr);
6259     ldt_info.limit = tswap32(target_ldt_info->limit);
6260     ldt_info.flags = tswap32(target_ldt_info->flags);
6261     if (ldt_info.entry_number == -1) {
6262         for (i=TARGET_GDT_ENTRY_TLS_MIN; i<=TARGET_GDT_ENTRY_TLS_MAX; i++) {
6263             if (gdt_table[i] == 0) {
6264                 ldt_info.entry_number = i;
6265                 target_ldt_info->entry_number = tswap32(i);
6266                 break;
6267             }
6268         }
6269     }
6270     unlock_user_struct(target_ldt_info, ptr, 1);
6271 
6272     if (ldt_info.entry_number < TARGET_GDT_ENTRY_TLS_MIN ||
6273         ldt_info.entry_number > TARGET_GDT_ENTRY_TLS_MAX)
6274            return -TARGET_EINVAL;
6275     seg_32bit = ldt_info.flags & 1;
6276     contents = (ldt_info.flags >> 1) & 3;
6277     read_exec_only = (ldt_info.flags >> 3) & 1;
6278     limit_in_pages = (ldt_info.flags >> 4) & 1;
6279     seg_not_present = (ldt_info.flags >> 5) & 1;
6280     useable = (ldt_info.flags >> 6) & 1;
6281 #ifdef TARGET_ABI32
6282     lm = 0;
6283 #else
6284     lm = (ldt_info.flags >> 7) & 1;
6285 #endif
6286 
6287     if (contents == 3) {
6288         if (seg_not_present == 0)
6289             return -TARGET_EINVAL;
6290     }
6291 
6292     /* NOTE: same code as Linux kernel */
6293     /* Allow LDTs to be cleared by the user. */
6294     if (ldt_info.base_addr == 0 && ldt_info.limit == 0) {
6295         if ((contents == 0             &&
6296              read_exec_only == 1       &&
6297              seg_32bit == 0            &&
6298              limit_in_pages == 0       &&
6299              seg_not_present == 1      &&
6300              useable == 0 )) {
6301             entry_1 = 0;
6302             entry_2 = 0;
6303             goto install;
6304         }
6305     }
6306 
6307     entry_1 = ((ldt_info.base_addr & 0x0000ffff) << 16) |
6308         (ldt_info.limit & 0x0ffff);
6309     entry_2 = (ldt_info.base_addr & 0xff000000) |
6310         ((ldt_info.base_addr & 0x00ff0000) >> 16) |
6311         (ldt_info.limit & 0xf0000) |
6312         ((read_exec_only ^ 1) << 9) |
6313         (contents << 10) |
6314         ((seg_not_present ^ 1) << 15) |
6315         (seg_32bit << 22) |
6316         (limit_in_pages << 23) |
6317         (useable << 20) |
6318         (lm << 21) |
6319         0x7000;
6320 
6321     /* Install the new entry ...  */
6322 install:
6323     lp = (uint32_t *)(gdt_table + ldt_info.entry_number);
6324     lp[0] = tswap32(entry_1);
6325     lp[1] = tswap32(entry_2);
6326     return 0;
6327 }
6328 
6329 static abi_long do_get_thread_area(CPUX86State *env, abi_ulong ptr)
6330 {
6331     struct target_modify_ldt_ldt_s *target_ldt_info;
6332     uint64_t *gdt_table = g2h_untagged(env->gdt.base);
6333     uint32_t base_addr, limit, flags;
6334     int seg_32bit, contents, read_exec_only, limit_in_pages, idx;
6335     int seg_not_present, useable, lm;
6336     uint32_t *lp, entry_1, entry_2;
6337 
6338     lock_user_struct(VERIFY_WRITE, target_ldt_info, ptr, 1);
6339     if (!target_ldt_info)
6340         return -TARGET_EFAULT;
6341     idx = tswap32(target_ldt_info->entry_number);
6342     if (idx < TARGET_GDT_ENTRY_TLS_MIN ||
6343         idx > TARGET_GDT_ENTRY_TLS_MAX) {
6344         unlock_user_struct(target_ldt_info, ptr, 1);
6345         return -TARGET_EINVAL;
6346     }
6347     lp = (uint32_t *)(gdt_table + idx);
6348     entry_1 = tswap32(lp[0]);
6349     entry_2 = tswap32(lp[1]);
6350 
6351     read_exec_only = ((entry_2 >> 9) & 1) ^ 1;
6352     contents = (entry_2 >> 10) & 3;
6353     seg_not_present = ((entry_2 >> 15) & 1) ^ 1;
6354     seg_32bit = (entry_2 >> 22) & 1;
6355     limit_in_pages = (entry_2 >> 23) & 1;
6356     useable = (entry_2 >> 20) & 1;
6357 #ifdef TARGET_ABI32
6358     lm = 0;
6359 #else
6360     lm = (entry_2 >> 21) & 1;
6361 #endif
6362     flags = (seg_32bit << 0) | (contents << 1) |
6363         (read_exec_only << 3) | (limit_in_pages << 4) |
6364         (seg_not_present << 5) | (useable << 6) | (lm << 7);
6365     limit = (entry_1 & 0xffff) | (entry_2  & 0xf0000);
6366     base_addr = (entry_1 >> 16) |
6367         (entry_2 & 0xff000000) |
6368         ((entry_2 & 0xff) << 16);
6369     target_ldt_info->base_addr = tswapal(base_addr);
6370     target_ldt_info->limit = tswap32(limit);
6371     target_ldt_info->flags = tswap32(flags);
6372     unlock_user_struct(target_ldt_info, ptr, 1);
6373     return 0;
6374 }
6375 
6376 abi_long do_arch_prctl(CPUX86State *env, int code, abi_ulong addr)
6377 {
6378     return -TARGET_ENOSYS;
6379 }
6380 #else
6381 abi_long do_arch_prctl(CPUX86State *env, int code, abi_ulong addr)
6382 {
6383     abi_long ret = 0;
6384     abi_ulong val;
6385     int idx;
6386 
6387     switch(code) {
6388     case TARGET_ARCH_SET_GS:
6389     case TARGET_ARCH_SET_FS:
6390         if (code == TARGET_ARCH_SET_GS)
6391             idx = R_GS;
6392         else
6393             idx = R_FS;
6394         cpu_x86_load_seg(env, idx, 0);
6395         env->segs[idx].base = addr;
6396         break;
6397     case TARGET_ARCH_GET_GS:
6398     case TARGET_ARCH_GET_FS:
6399         if (code == TARGET_ARCH_GET_GS)
6400             idx = R_GS;
6401         else
6402             idx = R_FS;
6403         val = env->segs[idx].base;
6404         if (put_user(val, addr, abi_ulong))
6405             ret = -TARGET_EFAULT;
6406         break;
6407     default:
6408         ret = -TARGET_EINVAL;
6409         break;
6410     }
6411     return ret;
6412 }
6413 #endif /* defined(TARGET_ABI32 */
6414 
6415 #endif /* defined(TARGET_I386) */
6416 
6417 #define NEW_STACK_SIZE 0x40000
6418 
6419 
6420 static pthread_mutex_t clone_lock = PTHREAD_MUTEX_INITIALIZER;
6421 typedef struct {
6422     CPUArchState *env;
6423     pthread_mutex_t mutex;
6424     pthread_cond_t cond;
6425     pthread_t thread;
6426     uint32_t tid;
6427     abi_ulong child_tidptr;
6428     abi_ulong parent_tidptr;
6429     sigset_t sigmask;
6430 } new_thread_info;
6431 
6432 static void *clone_func(void *arg)
6433 {
6434     new_thread_info *info = arg;
6435     CPUArchState *env;
6436     CPUState *cpu;
6437     TaskState *ts;
6438 
6439     rcu_register_thread();
6440     tcg_register_thread();
6441     env = info->env;
6442     cpu = env_cpu(env);
6443     thread_cpu = cpu;
6444     ts = (TaskState *)cpu->opaque;
6445     info->tid = sys_gettid();
6446     task_settid(ts);
6447     if (info->child_tidptr)
6448         put_user_u32(info->tid, info->child_tidptr);
6449     if (info->parent_tidptr)
6450         put_user_u32(info->tid, info->parent_tidptr);
6451     qemu_guest_random_seed_thread_part2(cpu->random_seed);
6452     /* Enable signals.  */
6453     sigprocmask(SIG_SETMASK, &info->sigmask, NULL);
6454     /* Signal to the parent that we're ready.  */
6455     pthread_mutex_lock(&info->mutex);
6456     pthread_cond_broadcast(&info->cond);
6457     pthread_mutex_unlock(&info->mutex);
6458     /* Wait until the parent has finished initializing the tls state.  */
6459     pthread_mutex_lock(&clone_lock);
6460     pthread_mutex_unlock(&clone_lock);
6461     cpu_loop(env);
6462     /* never exits */
6463     return NULL;
6464 }
6465 
6466 /* do_fork() Must return host values and target errnos (unlike most
6467    do_*() functions). */
6468 static int do_fork(CPUArchState *env, unsigned int flags, abi_ulong newsp,
6469                    abi_ulong parent_tidptr, target_ulong newtls,
6470                    abi_ulong child_tidptr)
6471 {
6472     CPUState *cpu = env_cpu(env);
6473     int ret;
6474     TaskState *ts;
6475     CPUState *new_cpu;
6476     CPUArchState *new_env;
6477     sigset_t sigmask;
6478 
6479     flags &= ~CLONE_IGNORED_FLAGS;
6480 
6481     /* Emulate vfork() with fork() */
6482     if (flags & CLONE_VFORK)
6483         flags &= ~(CLONE_VFORK | CLONE_VM);
6484 
6485     if (flags & CLONE_VM) {
6486         TaskState *parent_ts = (TaskState *)cpu->opaque;
6487         new_thread_info info;
6488         pthread_attr_t attr;
6489 
6490         if (((flags & CLONE_THREAD_FLAGS) != CLONE_THREAD_FLAGS) ||
6491             (flags & CLONE_INVALID_THREAD_FLAGS)) {
6492             return -TARGET_EINVAL;
6493         }
6494 
6495         ts = g_new0(TaskState, 1);
6496         init_task_state(ts);
6497 
6498         /* Grab a mutex so that thread setup appears atomic.  */
6499         pthread_mutex_lock(&clone_lock);
6500 
6501         /*
6502          * If this is our first additional thread, we need to ensure we
6503          * generate code for parallel execution and flush old translations.
6504          * Do this now so that the copy gets CF_PARALLEL too.
6505          */
6506         if (!(cpu->tcg_cflags & CF_PARALLEL)) {
6507             cpu->tcg_cflags |= CF_PARALLEL;
6508             tb_flush(cpu);
6509         }
6510 
6511         /* we create a new CPU instance. */
6512         new_env = cpu_copy(env);
6513         /* Init regs that differ from the parent.  */
6514         cpu_clone_regs_child(new_env, newsp, flags);
6515         cpu_clone_regs_parent(env, flags);
6516         new_cpu = env_cpu(new_env);
6517         new_cpu->opaque = ts;
6518         ts->bprm = parent_ts->bprm;
6519         ts->info = parent_ts->info;
6520         ts->signal_mask = parent_ts->signal_mask;
6521 
6522         if (flags & CLONE_CHILD_CLEARTID) {
6523             ts->child_tidptr = child_tidptr;
6524         }
6525 
6526         if (flags & CLONE_SETTLS) {
6527             cpu_set_tls (new_env, newtls);
6528         }
6529 
6530         memset(&info, 0, sizeof(info));
6531         pthread_mutex_init(&info.mutex, NULL);
6532         pthread_mutex_lock(&info.mutex);
6533         pthread_cond_init(&info.cond, NULL);
6534         info.env = new_env;
6535         if (flags & CLONE_CHILD_SETTID) {
6536             info.child_tidptr = child_tidptr;
6537         }
6538         if (flags & CLONE_PARENT_SETTID) {
6539             info.parent_tidptr = parent_tidptr;
6540         }
6541 
6542         ret = pthread_attr_init(&attr);
6543         ret = pthread_attr_setstacksize(&attr, NEW_STACK_SIZE);
6544         ret = pthread_attr_setdetachstate(&attr, PTHREAD_CREATE_DETACHED);
6545         /* It is not safe to deliver signals until the child has finished
6546            initializing, so temporarily block all signals.  */
6547         sigfillset(&sigmask);
6548         sigprocmask(SIG_BLOCK, &sigmask, &info.sigmask);
6549         cpu->random_seed = qemu_guest_random_seed_thread_part1();
6550 
6551         ret = pthread_create(&info.thread, &attr, clone_func, &info);
6552         /* TODO: Free new CPU state if thread creation failed.  */
6553 
6554         sigprocmask(SIG_SETMASK, &info.sigmask, NULL);
6555         pthread_attr_destroy(&attr);
6556         if (ret == 0) {
6557             /* Wait for the child to initialize.  */
6558             pthread_cond_wait(&info.cond, &info.mutex);
6559             ret = info.tid;
6560         } else {
6561             ret = -1;
6562         }
6563         pthread_mutex_unlock(&info.mutex);
6564         pthread_cond_destroy(&info.cond);
6565         pthread_mutex_destroy(&info.mutex);
6566         pthread_mutex_unlock(&clone_lock);
6567     } else {
6568         /* if no CLONE_VM, we consider it is a fork */
6569         if (flags & CLONE_INVALID_FORK_FLAGS) {
6570             return -TARGET_EINVAL;
6571         }
6572 
6573         /* We can't support custom termination signals */
6574         if ((flags & CSIGNAL) != TARGET_SIGCHLD) {
6575             return -TARGET_EINVAL;
6576         }
6577 
6578         if (block_signals()) {
6579             return -TARGET_ERESTARTSYS;
6580         }
6581 
6582         fork_start();
6583         ret = fork();
6584         if (ret == 0) {
6585             /* Child Process.  */
6586             cpu_clone_regs_child(env, newsp, flags);
6587             fork_end(1);
6588             /* There is a race condition here.  The parent process could
6589                theoretically read the TID in the child process before the child
6590                tid is set.  This would require using either ptrace
6591                (not implemented) or having *_tidptr to point at a shared memory
6592                mapping.  We can't repeat the spinlock hack used above because
6593                the child process gets its own copy of the lock.  */
6594             if (flags & CLONE_CHILD_SETTID)
6595                 put_user_u32(sys_gettid(), child_tidptr);
6596             if (flags & CLONE_PARENT_SETTID)
6597                 put_user_u32(sys_gettid(), parent_tidptr);
6598             ts = (TaskState *)cpu->opaque;
6599             if (flags & CLONE_SETTLS)
6600                 cpu_set_tls (env, newtls);
6601             if (flags & CLONE_CHILD_CLEARTID)
6602                 ts->child_tidptr = child_tidptr;
6603         } else {
6604             cpu_clone_regs_parent(env, flags);
6605             fork_end(0);
6606         }
6607     }
6608     return ret;
6609 }
6610 
6611 /* warning : doesn't handle linux specific flags... */
6612 static int target_to_host_fcntl_cmd(int cmd)
6613 {
6614     int ret;
6615 
6616     switch(cmd) {
6617     case TARGET_F_DUPFD:
6618     case TARGET_F_GETFD:
6619     case TARGET_F_SETFD:
6620     case TARGET_F_GETFL:
6621     case TARGET_F_SETFL:
6622     case TARGET_F_OFD_GETLK:
6623     case TARGET_F_OFD_SETLK:
6624     case TARGET_F_OFD_SETLKW:
6625         ret = cmd;
6626         break;
6627     case TARGET_F_GETLK:
6628         ret = F_GETLK64;
6629         break;
6630     case TARGET_F_SETLK:
6631         ret = F_SETLK64;
6632         break;
6633     case TARGET_F_SETLKW:
6634         ret = F_SETLKW64;
6635         break;
6636     case TARGET_F_GETOWN:
6637         ret = F_GETOWN;
6638         break;
6639     case TARGET_F_SETOWN:
6640         ret = F_SETOWN;
6641         break;
6642     case TARGET_F_GETSIG:
6643         ret = F_GETSIG;
6644         break;
6645     case TARGET_F_SETSIG:
6646         ret = F_SETSIG;
6647         break;
6648 #if TARGET_ABI_BITS == 32
6649     case TARGET_F_GETLK64:
6650         ret = F_GETLK64;
6651         break;
6652     case TARGET_F_SETLK64:
6653         ret = F_SETLK64;
6654         break;
6655     case TARGET_F_SETLKW64:
6656         ret = F_SETLKW64;
6657         break;
6658 #endif
6659     case TARGET_F_SETLEASE:
6660         ret = F_SETLEASE;
6661         break;
6662     case TARGET_F_GETLEASE:
6663         ret = F_GETLEASE;
6664         break;
6665 #ifdef F_DUPFD_CLOEXEC
6666     case TARGET_F_DUPFD_CLOEXEC:
6667         ret = F_DUPFD_CLOEXEC;
6668         break;
6669 #endif
6670     case TARGET_F_NOTIFY:
6671         ret = F_NOTIFY;
6672         break;
6673 #ifdef F_GETOWN_EX
6674     case TARGET_F_GETOWN_EX:
6675         ret = F_GETOWN_EX;
6676         break;
6677 #endif
6678 #ifdef F_SETOWN_EX
6679     case TARGET_F_SETOWN_EX:
6680         ret = F_SETOWN_EX;
6681         break;
6682 #endif
6683 #ifdef F_SETPIPE_SZ
6684     case TARGET_F_SETPIPE_SZ:
6685         ret = F_SETPIPE_SZ;
6686         break;
6687     case TARGET_F_GETPIPE_SZ:
6688         ret = F_GETPIPE_SZ;
6689         break;
6690 #endif
6691 #ifdef F_ADD_SEALS
6692     case TARGET_F_ADD_SEALS:
6693         ret = F_ADD_SEALS;
6694         break;
6695     case TARGET_F_GET_SEALS:
6696         ret = F_GET_SEALS;
6697         break;
6698 #endif
6699     default:
6700         ret = -TARGET_EINVAL;
6701         break;
6702     }
6703 
6704 #if defined(__powerpc64__)
6705     /* On PPC64, glibc headers has the F_*LK* defined to 12, 13 and 14 and
6706      * is not supported by kernel. The glibc fcntl call actually adjusts
6707      * them to 5, 6 and 7 before making the syscall(). Since we make the
6708      * syscall directly, adjust to what is supported by the kernel.
6709      */
6710     if (ret >= F_GETLK64 && ret <= F_SETLKW64) {
6711         ret -= F_GETLK64 - 5;
6712     }
6713 #endif
6714 
6715     return ret;
6716 }
6717 
6718 #define FLOCK_TRANSTBL \
6719     switch (type) { \
6720     TRANSTBL_CONVERT(F_RDLCK); \
6721     TRANSTBL_CONVERT(F_WRLCK); \
6722     TRANSTBL_CONVERT(F_UNLCK); \
6723     }
6724 
6725 static int target_to_host_flock(int type)
6726 {
6727 #define TRANSTBL_CONVERT(a) case TARGET_##a: return a
6728     FLOCK_TRANSTBL
6729 #undef  TRANSTBL_CONVERT
6730     return -TARGET_EINVAL;
6731 }
6732 
6733 static int host_to_target_flock(int type)
6734 {
6735 #define TRANSTBL_CONVERT(a) case a: return TARGET_##a
6736     FLOCK_TRANSTBL
6737 #undef  TRANSTBL_CONVERT
6738     /* if we don't know how to convert the value coming
6739      * from the host we copy to the target field as-is
6740      */
6741     return type;
6742 }
6743 
6744 static inline abi_long copy_from_user_flock(struct flock64 *fl,
6745                                             abi_ulong target_flock_addr)
6746 {
6747     struct target_flock *target_fl;
6748     int l_type;
6749 
6750     if (!lock_user_struct(VERIFY_READ, target_fl, target_flock_addr, 1)) {
6751         return -TARGET_EFAULT;
6752     }
6753 
6754     __get_user(l_type, &target_fl->l_type);
6755     l_type = target_to_host_flock(l_type);
6756     if (l_type < 0) {
6757         return l_type;
6758     }
6759     fl->l_type = l_type;
6760     __get_user(fl->l_whence, &target_fl->l_whence);
6761     __get_user(fl->l_start, &target_fl->l_start);
6762     __get_user(fl->l_len, &target_fl->l_len);
6763     __get_user(fl->l_pid, &target_fl->l_pid);
6764     unlock_user_struct(target_fl, target_flock_addr, 0);
6765     return 0;
6766 }
6767 
6768 static inline abi_long copy_to_user_flock(abi_ulong target_flock_addr,
6769                                           const struct flock64 *fl)
6770 {
6771     struct target_flock *target_fl;
6772     short l_type;
6773 
6774     if (!lock_user_struct(VERIFY_WRITE, target_fl, target_flock_addr, 0)) {
6775         return -TARGET_EFAULT;
6776     }
6777 
6778     l_type = host_to_target_flock(fl->l_type);
6779     __put_user(l_type, &target_fl->l_type);
6780     __put_user(fl->l_whence, &target_fl->l_whence);
6781     __put_user(fl->l_start, &target_fl->l_start);
6782     __put_user(fl->l_len, &target_fl->l_len);
6783     __put_user(fl->l_pid, &target_fl->l_pid);
6784     unlock_user_struct(target_fl, target_flock_addr, 1);
6785     return 0;
6786 }
6787 
6788 typedef abi_long from_flock64_fn(struct flock64 *fl, abi_ulong target_addr);
6789 typedef abi_long to_flock64_fn(abi_ulong target_addr, const struct flock64 *fl);
6790 
6791 #if defined(TARGET_ARM) && TARGET_ABI_BITS == 32
6792 static inline abi_long copy_from_user_oabi_flock64(struct flock64 *fl,
6793                                                    abi_ulong target_flock_addr)
6794 {
6795     struct target_oabi_flock64 *target_fl;
6796     int l_type;
6797 
6798     if (!lock_user_struct(VERIFY_READ, target_fl, target_flock_addr, 1)) {
6799         return -TARGET_EFAULT;
6800     }
6801 
6802     __get_user(l_type, &target_fl->l_type);
6803     l_type = target_to_host_flock(l_type);
6804     if (l_type < 0) {
6805         return l_type;
6806     }
6807     fl->l_type = l_type;
6808     __get_user(fl->l_whence, &target_fl->l_whence);
6809     __get_user(fl->l_start, &target_fl->l_start);
6810     __get_user(fl->l_len, &target_fl->l_len);
6811     __get_user(fl->l_pid, &target_fl->l_pid);
6812     unlock_user_struct(target_fl, target_flock_addr, 0);
6813     return 0;
6814 }
6815 
6816 static inline abi_long copy_to_user_oabi_flock64(abi_ulong target_flock_addr,
6817                                                  const struct flock64 *fl)
6818 {
6819     struct target_oabi_flock64 *target_fl;
6820     short l_type;
6821 
6822     if (!lock_user_struct(VERIFY_WRITE, target_fl, target_flock_addr, 0)) {
6823         return -TARGET_EFAULT;
6824     }
6825 
6826     l_type = host_to_target_flock(fl->l_type);
6827     __put_user(l_type, &target_fl->l_type);
6828     __put_user(fl->l_whence, &target_fl->l_whence);
6829     __put_user(fl->l_start, &target_fl->l_start);
6830     __put_user(fl->l_len, &target_fl->l_len);
6831     __put_user(fl->l_pid, &target_fl->l_pid);
6832     unlock_user_struct(target_fl, target_flock_addr, 1);
6833     return 0;
6834 }
6835 #endif
6836 
6837 static inline abi_long copy_from_user_flock64(struct flock64 *fl,
6838                                               abi_ulong target_flock_addr)
6839 {
6840     struct target_flock64 *target_fl;
6841     int l_type;
6842 
6843     if (!lock_user_struct(VERIFY_READ, target_fl, target_flock_addr, 1)) {
6844         return -TARGET_EFAULT;
6845     }
6846 
6847     __get_user(l_type, &target_fl->l_type);
6848     l_type = target_to_host_flock(l_type);
6849     if (l_type < 0) {
6850         return l_type;
6851     }
6852     fl->l_type = l_type;
6853     __get_user(fl->l_whence, &target_fl->l_whence);
6854     __get_user(fl->l_start, &target_fl->l_start);
6855     __get_user(fl->l_len, &target_fl->l_len);
6856     __get_user(fl->l_pid, &target_fl->l_pid);
6857     unlock_user_struct(target_fl, target_flock_addr, 0);
6858     return 0;
6859 }
6860 
6861 static inline abi_long copy_to_user_flock64(abi_ulong target_flock_addr,
6862                                             const struct flock64 *fl)
6863 {
6864     struct target_flock64 *target_fl;
6865     short l_type;
6866 
6867     if (!lock_user_struct(VERIFY_WRITE, target_fl, target_flock_addr, 0)) {
6868         return -TARGET_EFAULT;
6869     }
6870 
6871     l_type = host_to_target_flock(fl->l_type);
6872     __put_user(l_type, &target_fl->l_type);
6873     __put_user(fl->l_whence, &target_fl->l_whence);
6874     __put_user(fl->l_start, &target_fl->l_start);
6875     __put_user(fl->l_len, &target_fl->l_len);
6876     __put_user(fl->l_pid, &target_fl->l_pid);
6877     unlock_user_struct(target_fl, target_flock_addr, 1);
6878     return 0;
6879 }
6880 
6881 static abi_long do_fcntl(int fd, int cmd, abi_ulong arg)
6882 {
6883     struct flock64 fl64;
6884 #ifdef F_GETOWN_EX
6885     struct f_owner_ex fox;
6886     struct target_f_owner_ex *target_fox;
6887 #endif
6888     abi_long ret;
6889     int host_cmd = target_to_host_fcntl_cmd(cmd);
6890 
6891     if (host_cmd == -TARGET_EINVAL)
6892 	    return host_cmd;
6893 
6894     switch(cmd) {
6895     case TARGET_F_GETLK:
6896         ret = copy_from_user_flock(&fl64, arg);
6897         if (ret) {
6898             return ret;
6899         }
6900         ret = get_errno(safe_fcntl(fd, host_cmd, &fl64));
6901         if (ret == 0) {
6902             ret = copy_to_user_flock(arg, &fl64);
6903         }
6904         break;
6905 
6906     case TARGET_F_SETLK:
6907     case TARGET_F_SETLKW:
6908         ret = copy_from_user_flock(&fl64, arg);
6909         if (ret) {
6910             return ret;
6911         }
6912         ret = get_errno(safe_fcntl(fd, host_cmd, &fl64));
6913         break;
6914 
6915     case TARGET_F_GETLK64:
6916     case TARGET_F_OFD_GETLK:
6917         ret = copy_from_user_flock64(&fl64, arg);
6918         if (ret) {
6919             return ret;
6920         }
6921         ret = get_errno(safe_fcntl(fd, host_cmd, &fl64));
6922         if (ret == 0) {
6923             ret = copy_to_user_flock64(arg, &fl64);
6924         }
6925         break;
6926     case TARGET_F_SETLK64:
6927     case TARGET_F_SETLKW64:
6928     case TARGET_F_OFD_SETLK:
6929     case TARGET_F_OFD_SETLKW:
6930         ret = copy_from_user_flock64(&fl64, arg);
6931         if (ret) {
6932             return ret;
6933         }
6934         ret = get_errno(safe_fcntl(fd, host_cmd, &fl64));
6935         break;
6936 
6937     case TARGET_F_GETFL:
6938         ret = get_errno(safe_fcntl(fd, host_cmd, arg));
6939         if (ret >= 0) {
6940             ret = host_to_target_bitmask(ret, fcntl_flags_tbl);
6941         }
6942         break;
6943 
6944     case TARGET_F_SETFL:
6945         ret = get_errno(safe_fcntl(fd, host_cmd,
6946                                    target_to_host_bitmask(arg,
6947                                                           fcntl_flags_tbl)));
6948         break;
6949 
6950 #ifdef F_GETOWN_EX
6951     case TARGET_F_GETOWN_EX:
6952         ret = get_errno(safe_fcntl(fd, host_cmd, &fox));
6953         if (ret >= 0) {
6954             if (!lock_user_struct(VERIFY_WRITE, target_fox, arg, 0))
6955                 return -TARGET_EFAULT;
6956             target_fox->type = tswap32(fox.type);
6957             target_fox->pid = tswap32(fox.pid);
6958             unlock_user_struct(target_fox, arg, 1);
6959         }
6960         break;
6961 #endif
6962 
6963 #ifdef F_SETOWN_EX
6964     case TARGET_F_SETOWN_EX:
6965         if (!lock_user_struct(VERIFY_READ, target_fox, arg, 1))
6966             return -TARGET_EFAULT;
6967         fox.type = tswap32(target_fox->type);
6968         fox.pid = tswap32(target_fox->pid);
6969         unlock_user_struct(target_fox, arg, 0);
6970         ret = get_errno(safe_fcntl(fd, host_cmd, &fox));
6971         break;
6972 #endif
6973 
6974     case TARGET_F_SETSIG:
6975         ret = get_errno(safe_fcntl(fd, host_cmd, target_to_host_signal(arg)));
6976         break;
6977 
6978     case TARGET_F_GETSIG:
6979         ret = host_to_target_signal(get_errno(safe_fcntl(fd, host_cmd, arg)));
6980         break;
6981 
6982     case TARGET_F_SETOWN:
6983     case TARGET_F_GETOWN:
6984     case TARGET_F_SETLEASE:
6985     case TARGET_F_GETLEASE:
6986     case TARGET_F_SETPIPE_SZ:
6987     case TARGET_F_GETPIPE_SZ:
6988     case TARGET_F_ADD_SEALS:
6989     case TARGET_F_GET_SEALS:
6990         ret = get_errno(safe_fcntl(fd, host_cmd, arg));
6991         break;
6992 
6993     default:
6994         ret = get_errno(safe_fcntl(fd, cmd, arg));
6995         break;
6996     }
6997     return ret;
6998 }
6999 
7000 #ifdef USE_UID16
7001 
7002 static inline int high2lowuid(int uid)
7003 {
7004     if (uid > 65535)
7005         return 65534;
7006     else
7007         return uid;
7008 }
7009 
7010 static inline int high2lowgid(int gid)
7011 {
7012     if (gid > 65535)
7013         return 65534;
7014     else
7015         return gid;
7016 }
7017 
7018 static inline int low2highuid(int uid)
7019 {
7020     if ((int16_t)uid == -1)
7021         return -1;
7022     else
7023         return uid;
7024 }
7025 
7026 static inline int low2highgid(int gid)
7027 {
7028     if ((int16_t)gid == -1)
7029         return -1;
7030     else
7031         return gid;
7032 }
7033 static inline int tswapid(int id)
7034 {
7035     return tswap16(id);
7036 }
7037 
7038 #define put_user_id(x, gaddr) put_user_u16(x, gaddr)
7039 
7040 #else /* !USE_UID16 */
7041 static inline int high2lowuid(int uid)
7042 {
7043     return uid;
7044 }
7045 static inline int high2lowgid(int gid)
7046 {
7047     return gid;
7048 }
7049 static inline int low2highuid(int uid)
7050 {
7051     return uid;
7052 }
7053 static inline int low2highgid(int gid)
7054 {
7055     return gid;
7056 }
7057 static inline int tswapid(int id)
7058 {
7059     return tswap32(id);
7060 }
7061 
7062 #define put_user_id(x, gaddr) put_user_u32(x, gaddr)
7063 
7064 #endif /* USE_UID16 */
7065 
7066 /* We must do direct syscalls for setting UID/GID, because we want to
7067  * implement the Linux system call semantics of "change only for this thread",
7068  * not the libc/POSIX semantics of "change for all threads in process".
7069  * (See http://ewontfix.com/17/ for more details.)
7070  * We use the 32-bit version of the syscalls if present; if it is not
7071  * then either the host architecture supports 32-bit UIDs natively with
7072  * the standard syscall, or the 16-bit UID is the best we can do.
7073  */
7074 #ifdef __NR_setuid32
7075 #define __NR_sys_setuid __NR_setuid32
7076 #else
7077 #define __NR_sys_setuid __NR_setuid
7078 #endif
7079 #ifdef __NR_setgid32
7080 #define __NR_sys_setgid __NR_setgid32
7081 #else
7082 #define __NR_sys_setgid __NR_setgid
7083 #endif
7084 #ifdef __NR_setresuid32
7085 #define __NR_sys_setresuid __NR_setresuid32
7086 #else
7087 #define __NR_sys_setresuid __NR_setresuid
7088 #endif
7089 #ifdef __NR_setresgid32
7090 #define __NR_sys_setresgid __NR_setresgid32
7091 #else
7092 #define __NR_sys_setresgid __NR_setresgid
7093 #endif
7094 
7095 _syscall1(int, sys_setuid, uid_t, uid)
7096 _syscall1(int, sys_setgid, gid_t, gid)
7097 _syscall3(int, sys_setresuid, uid_t, ruid, uid_t, euid, uid_t, suid)
7098 _syscall3(int, sys_setresgid, gid_t, rgid, gid_t, egid, gid_t, sgid)
7099 
7100 void syscall_init(void)
7101 {
7102     IOCTLEntry *ie;
7103     const argtype *arg_type;
7104     int size;
7105     int i;
7106 
7107     thunk_init(STRUCT_MAX);
7108 
7109 #define STRUCT(name, ...) thunk_register_struct(STRUCT_ ## name, #name, struct_ ## name ## _def);
7110 #define STRUCT_SPECIAL(name) thunk_register_struct_direct(STRUCT_ ## name, #name, &struct_ ## name ## _def);
7111 #include "syscall_types.h"
7112 #undef STRUCT
7113 #undef STRUCT_SPECIAL
7114 
7115     /* Build target_to_host_errno_table[] table from
7116      * host_to_target_errno_table[]. */
7117     for (i = 0; i < ERRNO_TABLE_SIZE; i++) {
7118         target_to_host_errno_table[host_to_target_errno_table[i]] = i;
7119     }
7120 
7121     /* we patch the ioctl size if necessary. We rely on the fact that
7122        no ioctl has all the bits at '1' in the size field */
7123     ie = ioctl_entries;
7124     while (ie->target_cmd != 0) {
7125         if (((ie->target_cmd >> TARGET_IOC_SIZESHIFT) & TARGET_IOC_SIZEMASK) ==
7126             TARGET_IOC_SIZEMASK) {
7127             arg_type = ie->arg_type;
7128             if (arg_type[0] != TYPE_PTR) {
7129                 fprintf(stderr, "cannot patch size for ioctl 0x%x\n",
7130                         ie->target_cmd);
7131                 exit(1);
7132             }
7133             arg_type++;
7134             size = thunk_type_size(arg_type, 0);
7135             ie->target_cmd = (ie->target_cmd &
7136                               ~(TARGET_IOC_SIZEMASK << TARGET_IOC_SIZESHIFT)) |
7137                 (size << TARGET_IOC_SIZESHIFT);
7138         }
7139 
7140         /* automatic consistency check if same arch */
7141 #if (defined(__i386__) && defined(TARGET_I386) && defined(TARGET_ABI32)) || \
7142     (defined(__x86_64__) && defined(TARGET_X86_64))
7143         if (unlikely(ie->target_cmd != ie->host_cmd)) {
7144             fprintf(stderr, "ERROR: ioctl(%s): target=0x%x host=0x%x\n",
7145                     ie->name, ie->target_cmd, ie->host_cmd);
7146         }
7147 #endif
7148         ie++;
7149     }
7150 }
7151 
7152 #ifdef TARGET_NR_truncate64
7153 static inline abi_long target_truncate64(void *cpu_env, const char *arg1,
7154                                          abi_long arg2,
7155                                          abi_long arg3,
7156                                          abi_long arg4)
7157 {
7158     if (regpairs_aligned(cpu_env, TARGET_NR_truncate64)) {
7159         arg2 = arg3;
7160         arg3 = arg4;
7161     }
7162     return get_errno(truncate64(arg1, target_offset64(arg2, arg3)));
7163 }
7164 #endif
7165 
7166 #ifdef TARGET_NR_ftruncate64
7167 static inline abi_long target_ftruncate64(void *cpu_env, abi_long arg1,
7168                                           abi_long arg2,
7169                                           abi_long arg3,
7170                                           abi_long arg4)
7171 {
7172     if (regpairs_aligned(cpu_env, TARGET_NR_ftruncate64)) {
7173         arg2 = arg3;
7174         arg3 = arg4;
7175     }
7176     return get_errno(ftruncate64(arg1, target_offset64(arg2, arg3)));
7177 }
7178 #endif
7179 
7180 #if defined(TARGET_NR_timer_settime) || \
7181     (defined(TARGET_NR_timerfd_settime) && defined(CONFIG_TIMERFD))
7182 static inline abi_long target_to_host_itimerspec(struct itimerspec *host_its,
7183                                                  abi_ulong target_addr)
7184 {
7185     if (target_to_host_timespec(&host_its->it_interval, target_addr +
7186                                 offsetof(struct target_itimerspec,
7187                                          it_interval)) ||
7188         target_to_host_timespec(&host_its->it_value, target_addr +
7189                                 offsetof(struct target_itimerspec,
7190                                          it_value))) {
7191         return -TARGET_EFAULT;
7192     }
7193 
7194     return 0;
7195 }
7196 #endif
7197 
7198 #if defined(TARGET_NR_timer_settime64) || \
7199     (defined(TARGET_NR_timerfd_settime64) && defined(CONFIG_TIMERFD))
7200 static inline abi_long target_to_host_itimerspec64(struct itimerspec *host_its,
7201                                                    abi_ulong target_addr)
7202 {
7203     if (target_to_host_timespec64(&host_its->it_interval, target_addr +
7204                                   offsetof(struct target__kernel_itimerspec,
7205                                            it_interval)) ||
7206         target_to_host_timespec64(&host_its->it_value, target_addr +
7207                                   offsetof(struct target__kernel_itimerspec,
7208                                            it_value))) {
7209         return -TARGET_EFAULT;
7210     }
7211 
7212     return 0;
7213 }
7214 #endif
7215 
7216 #if ((defined(TARGET_NR_timerfd_gettime) || \
7217       defined(TARGET_NR_timerfd_settime)) && defined(CONFIG_TIMERFD)) || \
7218       defined(TARGET_NR_timer_gettime) || defined(TARGET_NR_timer_settime)
7219 static inline abi_long host_to_target_itimerspec(abi_ulong target_addr,
7220                                                  struct itimerspec *host_its)
7221 {
7222     if (host_to_target_timespec(target_addr + offsetof(struct target_itimerspec,
7223                                                        it_interval),
7224                                 &host_its->it_interval) ||
7225         host_to_target_timespec(target_addr + offsetof(struct target_itimerspec,
7226                                                        it_value),
7227                                 &host_its->it_value)) {
7228         return -TARGET_EFAULT;
7229     }
7230     return 0;
7231 }
7232 #endif
7233 
7234 #if ((defined(TARGET_NR_timerfd_gettime64) || \
7235       defined(TARGET_NR_timerfd_settime64)) && defined(CONFIG_TIMERFD)) || \
7236       defined(TARGET_NR_timer_gettime64) || defined(TARGET_NR_timer_settime64)
7237 static inline abi_long host_to_target_itimerspec64(abi_ulong target_addr,
7238                                                    struct itimerspec *host_its)
7239 {
7240     if (host_to_target_timespec64(target_addr +
7241                                   offsetof(struct target__kernel_itimerspec,
7242                                            it_interval),
7243                                   &host_its->it_interval) ||
7244         host_to_target_timespec64(target_addr +
7245                                   offsetof(struct target__kernel_itimerspec,
7246                                            it_value),
7247                                   &host_its->it_value)) {
7248         return -TARGET_EFAULT;
7249     }
7250     return 0;
7251 }
7252 #endif
7253 
7254 #if defined(TARGET_NR_adjtimex) || \
7255     (defined(TARGET_NR_clock_adjtime) && defined(CONFIG_CLOCK_ADJTIME))
7256 static inline abi_long target_to_host_timex(struct timex *host_tx,
7257                                             abi_long target_addr)
7258 {
7259     struct target_timex *target_tx;
7260 
7261     if (!lock_user_struct(VERIFY_READ, target_tx, target_addr, 1)) {
7262         return -TARGET_EFAULT;
7263     }
7264 
7265     __get_user(host_tx->modes, &target_tx->modes);
7266     __get_user(host_tx->offset, &target_tx->offset);
7267     __get_user(host_tx->freq, &target_tx->freq);
7268     __get_user(host_tx->maxerror, &target_tx->maxerror);
7269     __get_user(host_tx->esterror, &target_tx->esterror);
7270     __get_user(host_tx->status, &target_tx->status);
7271     __get_user(host_tx->constant, &target_tx->constant);
7272     __get_user(host_tx->precision, &target_tx->precision);
7273     __get_user(host_tx->tolerance, &target_tx->tolerance);
7274     __get_user(host_tx->time.tv_sec, &target_tx->time.tv_sec);
7275     __get_user(host_tx->time.tv_usec, &target_tx->time.tv_usec);
7276     __get_user(host_tx->tick, &target_tx->tick);
7277     __get_user(host_tx->ppsfreq, &target_tx->ppsfreq);
7278     __get_user(host_tx->jitter, &target_tx->jitter);
7279     __get_user(host_tx->shift, &target_tx->shift);
7280     __get_user(host_tx->stabil, &target_tx->stabil);
7281     __get_user(host_tx->jitcnt, &target_tx->jitcnt);
7282     __get_user(host_tx->calcnt, &target_tx->calcnt);
7283     __get_user(host_tx->errcnt, &target_tx->errcnt);
7284     __get_user(host_tx->stbcnt, &target_tx->stbcnt);
7285     __get_user(host_tx->tai, &target_tx->tai);
7286 
7287     unlock_user_struct(target_tx, target_addr, 0);
7288     return 0;
7289 }
7290 
7291 static inline abi_long host_to_target_timex(abi_long target_addr,
7292                                             struct timex *host_tx)
7293 {
7294     struct target_timex *target_tx;
7295 
7296     if (!lock_user_struct(VERIFY_WRITE, target_tx, target_addr, 0)) {
7297         return -TARGET_EFAULT;
7298     }
7299 
7300     __put_user(host_tx->modes, &target_tx->modes);
7301     __put_user(host_tx->offset, &target_tx->offset);
7302     __put_user(host_tx->freq, &target_tx->freq);
7303     __put_user(host_tx->maxerror, &target_tx->maxerror);
7304     __put_user(host_tx->esterror, &target_tx->esterror);
7305     __put_user(host_tx->status, &target_tx->status);
7306     __put_user(host_tx->constant, &target_tx->constant);
7307     __put_user(host_tx->precision, &target_tx->precision);
7308     __put_user(host_tx->tolerance, &target_tx->tolerance);
7309     __put_user(host_tx->time.tv_sec, &target_tx->time.tv_sec);
7310     __put_user(host_tx->time.tv_usec, &target_tx->time.tv_usec);
7311     __put_user(host_tx->tick, &target_tx->tick);
7312     __put_user(host_tx->ppsfreq, &target_tx->ppsfreq);
7313     __put_user(host_tx->jitter, &target_tx->jitter);
7314     __put_user(host_tx->shift, &target_tx->shift);
7315     __put_user(host_tx->stabil, &target_tx->stabil);
7316     __put_user(host_tx->jitcnt, &target_tx->jitcnt);
7317     __put_user(host_tx->calcnt, &target_tx->calcnt);
7318     __put_user(host_tx->errcnt, &target_tx->errcnt);
7319     __put_user(host_tx->stbcnt, &target_tx->stbcnt);
7320     __put_user(host_tx->tai, &target_tx->tai);
7321 
7322     unlock_user_struct(target_tx, target_addr, 1);
7323     return 0;
7324 }
7325 #endif
7326 
7327 
7328 #if defined(TARGET_NR_clock_adjtime64) && defined(CONFIG_CLOCK_ADJTIME)
7329 static inline abi_long target_to_host_timex64(struct timex *host_tx,
7330                                               abi_long target_addr)
7331 {
7332     struct target__kernel_timex *target_tx;
7333 
7334     if (copy_from_user_timeval64(&host_tx->time, target_addr +
7335                                  offsetof(struct target__kernel_timex,
7336                                           time))) {
7337         return -TARGET_EFAULT;
7338     }
7339 
7340     if (!lock_user_struct(VERIFY_READ, target_tx, target_addr, 1)) {
7341         return -TARGET_EFAULT;
7342     }
7343 
7344     __get_user(host_tx->modes, &target_tx->modes);
7345     __get_user(host_tx->offset, &target_tx->offset);
7346     __get_user(host_tx->freq, &target_tx->freq);
7347     __get_user(host_tx->maxerror, &target_tx->maxerror);
7348     __get_user(host_tx->esterror, &target_tx->esterror);
7349     __get_user(host_tx->status, &target_tx->status);
7350     __get_user(host_tx->constant, &target_tx->constant);
7351     __get_user(host_tx->precision, &target_tx->precision);
7352     __get_user(host_tx->tolerance, &target_tx->tolerance);
7353     __get_user(host_tx->tick, &target_tx->tick);
7354     __get_user(host_tx->ppsfreq, &target_tx->ppsfreq);
7355     __get_user(host_tx->jitter, &target_tx->jitter);
7356     __get_user(host_tx->shift, &target_tx->shift);
7357     __get_user(host_tx->stabil, &target_tx->stabil);
7358     __get_user(host_tx->jitcnt, &target_tx->jitcnt);
7359     __get_user(host_tx->calcnt, &target_tx->calcnt);
7360     __get_user(host_tx->errcnt, &target_tx->errcnt);
7361     __get_user(host_tx->stbcnt, &target_tx->stbcnt);
7362     __get_user(host_tx->tai, &target_tx->tai);
7363 
7364     unlock_user_struct(target_tx, target_addr, 0);
7365     return 0;
7366 }
7367 
7368 static inline abi_long host_to_target_timex64(abi_long target_addr,
7369                                               struct timex *host_tx)
7370 {
7371     struct target__kernel_timex *target_tx;
7372 
7373    if (copy_to_user_timeval64(target_addr +
7374                               offsetof(struct target__kernel_timex, time),
7375                               &host_tx->time)) {
7376         return -TARGET_EFAULT;
7377     }
7378 
7379     if (!lock_user_struct(VERIFY_WRITE, target_tx, target_addr, 0)) {
7380         return -TARGET_EFAULT;
7381     }
7382 
7383     __put_user(host_tx->modes, &target_tx->modes);
7384     __put_user(host_tx->offset, &target_tx->offset);
7385     __put_user(host_tx->freq, &target_tx->freq);
7386     __put_user(host_tx->maxerror, &target_tx->maxerror);
7387     __put_user(host_tx->esterror, &target_tx->esterror);
7388     __put_user(host_tx->status, &target_tx->status);
7389     __put_user(host_tx->constant, &target_tx->constant);
7390     __put_user(host_tx->precision, &target_tx->precision);
7391     __put_user(host_tx->tolerance, &target_tx->tolerance);
7392     __put_user(host_tx->tick, &target_tx->tick);
7393     __put_user(host_tx->ppsfreq, &target_tx->ppsfreq);
7394     __put_user(host_tx->jitter, &target_tx->jitter);
7395     __put_user(host_tx->shift, &target_tx->shift);
7396     __put_user(host_tx->stabil, &target_tx->stabil);
7397     __put_user(host_tx->jitcnt, &target_tx->jitcnt);
7398     __put_user(host_tx->calcnt, &target_tx->calcnt);
7399     __put_user(host_tx->errcnt, &target_tx->errcnt);
7400     __put_user(host_tx->stbcnt, &target_tx->stbcnt);
7401     __put_user(host_tx->tai, &target_tx->tai);
7402 
7403     unlock_user_struct(target_tx, target_addr, 1);
7404     return 0;
7405 }
7406 #endif
7407 
7408 static inline abi_long target_to_host_sigevent(struct sigevent *host_sevp,
7409                                                abi_ulong target_addr)
7410 {
7411     struct target_sigevent *target_sevp;
7412 
7413     if (!lock_user_struct(VERIFY_READ, target_sevp, target_addr, 1)) {
7414         return -TARGET_EFAULT;
7415     }
7416 
7417     /* This union is awkward on 64 bit systems because it has a 32 bit
7418      * integer and a pointer in it; we follow the conversion approach
7419      * used for handling sigval types in signal.c so the guest should get
7420      * the correct value back even if we did a 64 bit byteswap and it's
7421      * using the 32 bit integer.
7422      */
7423     host_sevp->sigev_value.sival_ptr =
7424         (void *)(uintptr_t)tswapal(target_sevp->sigev_value.sival_ptr);
7425     host_sevp->sigev_signo =
7426         target_to_host_signal(tswap32(target_sevp->sigev_signo));
7427     host_sevp->sigev_notify = tswap32(target_sevp->sigev_notify);
7428     host_sevp->_sigev_un._tid = tswap32(target_sevp->_sigev_un._tid);
7429 
7430     unlock_user_struct(target_sevp, target_addr, 1);
7431     return 0;
7432 }
7433 
7434 #if defined(TARGET_NR_mlockall)
7435 static inline int target_to_host_mlockall_arg(int arg)
7436 {
7437     int result = 0;
7438 
7439     if (arg & TARGET_MCL_CURRENT) {
7440         result |= MCL_CURRENT;
7441     }
7442     if (arg & TARGET_MCL_FUTURE) {
7443         result |= MCL_FUTURE;
7444     }
7445 #ifdef MCL_ONFAULT
7446     if (arg & TARGET_MCL_ONFAULT) {
7447         result |= MCL_ONFAULT;
7448     }
7449 #endif
7450 
7451     return result;
7452 }
7453 #endif
7454 
7455 #if (defined(TARGET_NR_stat64) || defined(TARGET_NR_lstat64) ||     \
7456      defined(TARGET_NR_fstat64) || defined(TARGET_NR_fstatat64) ||  \
7457      defined(TARGET_NR_newfstatat))
7458 static inline abi_long host_to_target_stat64(void *cpu_env,
7459                                              abi_ulong target_addr,
7460                                              struct stat *host_st)
7461 {
7462 #if defined(TARGET_ARM) && defined(TARGET_ABI32)
7463     if (((CPUARMState *)cpu_env)->eabi) {
7464         struct target_eabi_stat64 *target_st;
7465 
7466         if (!lock_user_struct(VERIFY_WRITE, target_st, target_addr, 0))
7467             return -TARGET_EFAULT;
7468         memset(target_st, 0, sizeof(struct target_eabi_stat64));
7469         __put_user(host_st->st_dev, &target_st->st_dev);
7470         __put_user(host_st->st_ino, &target_st->st_ino);
7471 #ifdef TARGET_STAT64_HAS_BROKEN_ST_INO
7472         __put_user(host_st->st_ino, &target_st->__st_ino);
7473 #endif
7474         __put_user(host_st->st_mode, &target_st->st_mode);
7475         __put_user(host_st->st_nlink, &target_st->st_nlink);
7476         __put_user(host_st->st_uid, &target_st->st_uid);
7477         __put_user(host_st->st_gid, &target_st->st_gid);
7478         __put_user(host_st->st_rdev, &target_st->st_rdev);
7479         __put_user(host_st->st_size, &target_st->st_size);
7480         __put_user(host_st->st_blksize, &target_st->st_blksize);
7481         __put_user(host_st->st_blocks, &target_st->st_blocks);
7482         __put_user(host_st->st_atime, &target_st->target_st_atime);
7483         __put_user(host_st->st_mtime, &target_st->target_st_mtime);
7484         __put_user(host_st->st_ctime, &target_st->target_st_ctime);
7485 #if _POSIX_C_SOURCE >= 200809L || _XOPEN_SOURCE >= 700
7486         __put_user(host_st->st_atim.tv_nsec, &target_st->target_st_atime_nsec);
7487         __put_user(host_st->st_mtim.tv_nsec, &target_st->target_st_mtime_nsec);
7488         __put_user(host_st->st_ctim.tv_nsec, &target_st->target_st_ctime_nsec);
7489 #endif
7490         unlock_user_struct(target_st, target_addr, 1);
7491     } else
7492 #endif
7493     {
7494 #if defined(TARGET_HAS_STRUCT_STAT64)
7495         struct target_stat64 *target_st;
7496 #else
7497         struct target_stat *target_st;
7498 #endif
7499 
7500         if (!lock_user_struct(VERIFY_WRITE, target_st, target_addr, 0))
7501             return -TARGET_EFAULT;
7502         memset(target_st, 0, sizeof(*target_st));
7503         __put_user(host_st->st_dev, &target_st->st_dev);
7504         __put_user(host_st->st_ino, &target_st->st_ino);
7505 #ifdef TARGET_STAT64_HAS_BROKEN_ST_INO
7506         __put_user(host_st->st_ino, &target_st->__st_ino);
7507 #endif
7508         __put_user(host_st->st_mode, &target_st->st_mode);
7509         __put_user(host_st->st_nlink, &target_st->st_nlink);
7510         __put_user(host_st->st_uid, &target_st->st_uid);
7511         __put_user(host_st->st_gid, &target_st->st_gid);
7512         __put_user(host_st->st_rdev, &target_st->st_rdev);
7513         /* XXX: better use of kernel struct */
7514         __put_user(host_st->st_size, &target_st->st_size);
7515         __put_user(host_st->st_blksize, &target_st->st_blksize);
7516         __put_user(host_st->st_blocks, &target_st->st_blocks);
7517         __put_user(host_st->st_atime, &target_st->target_st_atime);
7518         __put_user(host_st->st_mtime, &target_st->target_st_mtime);
7519         __put_user(host_st->st_ctime, &target_st->target_st_ctime);
7520 #if _POSIX_C_SOURCE >= 200809L || _XOPEN_SOURCE >= 700
7521         __put_user(host_st->st_atim.tv_nsec, &target_st->target_st_atime_nsec);
7522         __put_user(host_st->st_mtim.tv_nsec, &target_st->target_st_mtime_nsec);
7523         __put_user(host_st->st_ctim.tv_nsec, &target_st->target_st_ctime_nsec);
7524 #endif
7525         unlock_user_struct(target_st, target_addr, 1);
7526     }
7527 
7528     return 0;
7529 }
7530 #endif
7531 
7532 #if defined(TARGET_NR_statx) && defined(__NR_statx)
7533 static inline abi_long host_to_target_statx(struct target_statx *host_stx,
7534                                             abi_ulong target_addr)
7535 {
7536     struct target_statx *target_stx;
7537 
7538     if (!lock_user_struct(VERIFY_WRITE, target_stx, target_addr,  0)) {
7539         return -TARGET_EFAULT;
7540     }
7541     memset(target_stx, 0, sizeof(*target_stx));
7542 
7543     __put_user(host_stx->stx_mask, &target_stx->stx_mask);
7544     __put_user(host_stx->stx_blksize, &target_stx->stx_blksize);
7545     __put_user(host_stx->stx_attributes, &target_stx->stx_attributes);
7546     __put_user(host_stx->stx_nlink, &target_stx->stx_nlink);
7547     __put_user(host_stx->stx_uid, &target_stx->stx_uid);
7548     __put_user(host_stx->stx_gid, &target_stx->stx_gid);
7549     __put_user(host_stx->stx_mode, &target_stx->stx_mode);
7550     __put_user(host_stx->stx_ino, &target_stx->stx_ino);
7551     __put_user(host_stx->stx_size, &target_stx->stx_size);
7552     __put_user(host_stx->stx_blocks, &target_stx->stx_blocks);
7553     __put_user(host_stx->stx_attributes_mask, &target_stx->stx_attributes_mask);
7554     __put_user(host_stx->stx_atime.tv_sec, &target_stx->stx_atime.tv_sec);
7555     __put_user(host_stx->stx_atime.tv_nsec, &target_stx->stx_atime.tv_nsec);
7556     __put_user(host_stx->stx_btime.tv_sec, &target_stx->stx_btime.tv_sec);
7557     __put_user(host_stx->stx_btime.tv_nsec, &target_stx->stx_btime.tv_nsec);
7558     __put_user(host_stx->stx_ctime.tv_sec, &target_stx->stx_ctime.tv_sec);
7559     __put_user(host_stx->stx_ctime.tv_nsec, &target_stx->stx_ctime.tv_nsec);
7560     __put_user(host_stx->stx_mtime.tv_sec, &target_stx->stx_mtime.tv_sec);
7561     __put_user(host_stx->stx_mtime.tv_nsec, &target_stx->stx_mtime.tv_nsec);
7562     __put_user(host_stx->stx_rdev_major, &target_stx->stx_rdev_major);
7563     __put_user(host_stx->stx_rdev_minor, &target_stx->stx_rdev_minor);
7564     __put_user(host_stx->stx_dev_major, &target_stx->stx_dev_major);
7565     __put_user(host_stx->stx_dev_minor, &target_stx->stx_dev_minor);
7566 
7567     unlock_user_struct(target_stx, target_addr, 1);
7568 
7569     return 0;
7570 }
7571 #endif
7572 
7573 static int do_sys_futex(int *uaddr, int op, int val,
7574                          const struct timespec *timeout, int *uaddr2,
7575                          int val3)
7576 {
7577 #if HOST_LONG_BITS == 64
7578 #if defined(__NR_futex)
7579     /* always a 64-bit time_t, it doesn't define _time64 version  */
7580     return sys_futex(uaddr, op, val, timeout, uaddr2, val3);
7581 
7582 #endif
7583 #else /* HOST_LONG_BITS == 64 */
7584 #if defined(__NR_futex_time64)
7585     if (sizeof(timeout->tv_sec) == 8) {
7586         /* _time64 function on 32bit arch */
7587         return sys_futex_time64(uaddr, op, val, timeout, uaddr2, val3);
7588     }
7589 #endif
7590 #if defined(__NR_futex)
7591     /* old function on 32bit arch */
7592     return sys_futex(uaddr, op, val, timeout, uaddr2, val3);
7593 #endif
7594 #endif /* HOST_LONG_BITS == 64 */
7595     g_assert_not_reached();
7596 }
7597 
7598 static int do_safe_futex(int *uaddr, int op, int val,
7599                          const struct timespec *timeout, int *uaddr2,
7600                          int val3)
7601 {
7602 #if HOST_LONG_BITS == 64
7603 #if defined(__NR_futex)
7604     /* always a 64-bit time_t, it doesn't define _time64 version  */
7605     return get_errno(safe_futex(uaddr, op, val, timeout, uaddr2, val3));
7606 #endif
7607 #else /* HOST_LONG_BITS == 64 */
7608 #if defined(__NR_futex_time64)
7609     if (sizeof(timeout->tv_sec) == 8) {
7610         /* _time64 function on 32bit arch */
7611         return get_errno(safe_futex_time64(uaddr, op, val, timeout, uaddr2,
7612                                            val3));
7613     }
7614 #endif
7615 #if defined(__NR_futex)
7616     /* old function on 32bit arch */
7617     return get_errno(safe_futex(uaddr, op, val, timeout, uaddr2, val3));
7618 #endif
7619 #endif /* HOST_LONG_BITS == 64 */
7620     return -TARGET_ENOSYS;
7621 }
7622 
7623 /* ??? Using host futex calls even when target atomic operations
7624    are not really atomic probably breaks things.  However implementing
7625    futexes locally would make futexes shared between multiple processes
7626    tricky.  However they're probably useless because guest atomic
7627    operations won't work either.  */
7628 #if defined(TARGET_NR_futex)
7629 static int do_futex(CPUState *cpu, target_ulong uaddr, int op, int val,
7630                     target_ulong timeout, target_ulong uaddr2, int val3)
7631 {
7632     struct timespec ts, *pts;
7633     int base_op;
7634 
7635     /* ??? We assume FUTEX_* constants are the same on both host
7636        and target.  */
7637 #ifdef FUTEX_CMD_MASK
7638     base_op = op & FUTEX_CMD_MASK;
7639 #else
7640     base_op = op;
7641 #endif
7642     switch (base_op) {
7643     case FUTEX_WAIT:
7644     case FUTEX_WAIT_BITSET:
7645         if (timeout) {
7646             pts = &ts;
7647             target_to_host_timespec(pts, timeout);
7648         } else {
7649             pts = NULL;
7650         }
7651         return do_safe_futex(g2h(cpu, uaddr),
7652                              op, tswap32(val), pts, NULL, val3);
7653     case FUTEX_WAKE:
7654         return do_safe_futex(g2h(cpu, uaddr),
7655                              op, val, NULL, NULL, 0);
7656     case FUTEX_FD:
7657         return do_safe_futex(g2h(cpu, uaddr),
7658                              op, val, NULL, NULL, 0);
7659     case FUTEX_REQUEUE:
7660     case FUTEX_CMP_REQUEUE:
7661     case FUTEX_WAKE_OP:
7662         /* For FUTEX_REQUEUE, FUTEX_CMP_REQUEUE, and FUTEX_WAKE_OP, the
7663            TIMEOUT parameter is interpreted as a uint32_t by the kernel.
7664            But the prototype takes a `struct timespec *'; insert casts
7665            to satisfy the compiler.  We do not need to tswap TIMEOUT
7666            since it's not compared to guest memory.  */
7667         pts = (struct timespec *)(uintptr_t) timeout;
7668         return do_safe_futex(g2h(cpu, uaddr), op, val, pts, g2h(cpu, uaddr2),
7669                              (base_op == FUTEX_CMP_REQUEUE
7670                               ? tswap32(val3) : val3));
7671     default:
7672         return -TARGET_ENOSYS;
7673     }
7674 }
7675 #endif
7676 
7677 #if defined(TARGET_NR_futex_time64)
7678 static int do_futex_time64(CPUState *cpu, target_ulong uaddr, int op,
7679                            int val, target_ulong timeout,
7680                            target_ulong uaddr2, int val3)
7681 {
7682     struct timespec ts, *pts;
7683     int base_op;
7684 
7685     /* ??? We assume FUTEX_* constants are the same on both host
7686        and target.  */
7687 #ifdef FUTEX_CMD_MASK
7688     base_op = op & FUTEX_CMD_MASK;
7689 #else
7690     base_op = op;
7691 #endif
7692     switch (base_op) {
7693     case FUTEX_WAIT:
7694     case FUTEX_WAIT_BITSET:
7695         if (timeout) {
7696             pts = &ts;
7697             if (target_to_host_timespec64(pts, timeout)) {
7698                 return -TARGET_EFAULT;
7699             }
7700         } else {
7701             pts = NULL;
7702         }
7703         return do_safe_futex(g2h(cpu, uaddr), op,
7704                              tswap32(val), pts, NULL, val3);
7705     case FUTEX_WAKE:
7706         return do_safe_futex(g2h(cpu, uaddr), op, val, NULL, NULL, 0);
7707     case FUTEX_FD:
7708         return do_safe_futex(g2h(cpu, uaddr), op, val, NULL, NULL, 0);
7709     case FUTEX_REQUEUE:
7710     case FUTEX_CMP_REQUEUE:
7711     case FUTEX_WAKE_OP:
7712         /* For FUTEX_REQUEUE, FUTEX_CMP_REQUEUE, and FUTEX_WAKE_OP, the
7713            TIMEOUT parameter is interpreted as a uint32_t by the kernel.
7714            But the prototype takes a `struct timespec *'; insert casts
7715            to satisfy the compiler.  We do not need to tswap TIMEOUT
7716            since it's not compared to guest memory.  */
7717         pts = (struct timespec *)(uintptr_t) timeout;
7718         return do_safe_futex(g2h(cpu, uaddr), op, val, pts, g2h(cpu, uaddr2),
7719                              (base_op == FUTEX_CMP_REQUEUE
7720                               ? tswap32(val3) : val3));
7721     default:
7722         return -TARGET_ENOSYS;
7723     }
7724 }
7725 #endif
7726 
7727 #if defined(TARGET_NR_name_to_handle_at) && defined(CONFIG_OPEN_BY_HANDLE)
7728 static abi_long do_name_to_handle_at(abi_long dirfd, abi_long pathname,
7729                                      abi_long handle, abi_long mount_id,
7730                                      abi_long flags)
7731 {
7732     struct file_handle *target_fh;
7733     struct file_handle *fh;
7734     int mid = 0;
7735     abi_long ret;
7736     char *name;
7737     unsigned int size, total_size;
7738 
7739     if (get_user_s32(size, handle)) {
7740         return -TARGET_EFAULT;
7741     }
7742 
7743     name = lock_user_string(pathname);
7744     if (!name) {
7745         return -TARGET_EFAULT;
7746     }
7747 
7748     total_size = sizeof(struct file_handle) + size;
7749     target_fh = lock_user(VERIFY_WRITE, handle, total_size, 0);
7750     if (!target_fh) {
7751         unlock_user(name, pathname, 0);
7752         return -TARGET_EFAULT;
7753     }
7754 
7755     fh = g_malloc0(total_size);
7756     fh->handle_bytes = size;
7757 
7758     ret = get_errno(name_to_handle_at(dirfd, path(name), fh, &mid, flags));
7759     unlock_user(name, pathname, 0);
7760 
7761     /* man name_to_handle_at(2):
7762      * Other than the use of the handle_bytes field, the caller should treat
7763      * the file_handle structure as an opaque data type
7764      */
7765 
7766     memcpy(target_fh, fh, total_size);
7767     target_fh->handle_bytes = tswap32(fh->handle_bytes);
7768     target_fh->handle_type = tswap32(fh->handle_type);
7769     g_free(fh);
7770     unlock_user(target_fh, handle, total_size);
7771 
7772     if (put_user_s32(mid, mount_id)) {
7773         return -TARGET_EFAULT;
7774     }
7775 
7776     return ret;
7777 
7778 }
7779 #endif
7780 
7781 #if defined(TARGET_NR_open_by_handle_at) && defined(CONFIG_OPEN_BY_HANDLE)
7782 static abi_long do_open_by_handle_at(abi_long mount_fd, abi_long handle,
7783                                      abi_long flags)
7784 {
7785     struct file_handle *target_fh;
7786     struct file_handle *fh;
7787     unsigned int size, total_size;
7788     abi_long ret;
7789 
7790     if (get_user_s32(size, handle)) {
7791         return -TARGET_EFAULT;
7792     }
7793 
7794     total_size = sizeof(struct file_handle) + size;
7795     target_fh = lock_user(VERIFY_READ, handle, total_size, 1);
7796     if (!target_fh) {
7797         return -TARGET_EFAULT;
7798     }
7799 
7800     fh = g_memdup(target_fh, total_size);
7801     fh->handle_bytes = size;
7802     fh->handle_type = tswap32(target_fh->handle_type);
7803 
7804     ret = get_errno(open_by_handle_at(mount_fd, fh,
7805                     target_to_host_bitmask(flags, fcntl_flags_tbl)));
7806 
7807     g_free(fh);
7808 
7809     unlock_user(target_fh, handle, total_size);
7810 
7811     return ret;
7812 }
7813 #endif
7814 
7815 #if defined(TARGET_NR_signalfd) || defined(TARGET_NR_signalfd4)
7816 
7817 static abi_long do_signalfd4(int fd, abi_long mask, int flags)
7818 {
7819     int host_flags;
7820     target_sigset_t *target_mask;
7821     sigset_t host_mask;
7822     abi_long ret;
7823 
7824     if (flags & ~(TARGET_O_NONBLOCK_MASK | TARGET_O_CLOEXEC)) {
7825         return -TARGET_EINVAL;
7826     }
7827     if (!lock_user_struct(VERIFY_READ, target_mask, mask, 1)) {
7828         return -TARGET_EFAULT;
7829     }
7830 
7831     target_to_host_sigset(&host_mask, target_mask);
7832 
7833     host_flags = target_to_host_bitmask(flags, fcntl_flags_tbl);
7834 
7835     ret = get_errno(signalfd(fd, &host_mask, host_flags));
7836     if (ret >= 0) {
7837         fd_trans_register(ret, &target_signalfd_trans);
7838     }
7839 
7840     unlock_user_struct(target_mask, mask, 0);
7841 
7842     return ret;
7843 }
7844 #endif
7845 
7846 /* Map host to target signal numbers for the wait family of syscalls.
7847    Assume all other status bits are the same.  */
7848 int host_to_target_waitstatus(int status)
7849 {
7850     if (WIFSIGNALED(status)) {
7851         return host_to_target_signal(WTERMSIG(status)) | (status & ~0x7f);
7852     }
7853     if (WIFSTOPPED(status)) {
7854         return (host_to_target_signal(WSTOPSIG(status)) << 8)
7855                | (status & 0xff);
7856     }
7857     return status;
7858 }
7859 
7860 static int open_self_cmdline(void *cpu_env, int fd)
7861 {
7862     CPUState *cpu = env_cpu((CPUArchState *)cpu_env);
7863     struct linux_binprm *bprm = ((TaskState *)cpu->opaque)->bprm;
7864     int i;
7865 
7866     for (i = 0; i < bprm->argc; i++) {
7867         size_t len = strlen(bprm->argv[i]) + 1;
7868 
7869         if (write(fd, bprm->argv[i], len) != len) {
7870             return -1;
7871         }
7872     }
7873 
7874     return 0;
7875 }
7876 
7877 static int open_self_maps(void *cpu_env, int fd)
7878 {
7879     CPUState *cpu = env_cpu((CPUArchState *)cpu_env);
7880     TaskState *ts = cpu->opaque;
7881     GSList *map_info = read_self_maps();
7882     GSList *s;
7883     int count;
7884 
7885     for (s = map_info; s; s = g_slist_next(s)) {
7886         MapInfo *e = (MapInfo *) s->data;
7887 
7888         if (h2g_valid(e->start)) {
7889             unsigned long min = e->start;
7890             unsigned long max = e->end;
7891             int flags = page_get_flags(h2g(min));
7892             const char *path;
7893 
7894             max = h2g_valid(max - 1) ?
7895                 max : (uintptr_t) g2h_untagged(GUEST_ADDR_MAX) + 1;
7896 
7897             if (page_check_range(h2g(min), max - min, flags) == -1) {
7898                 continue;
7899             }
7900 
7901             if (h2g(min) == ts->info->stack_limit) {
7902                 path = "[stack]";
7903             } else {
7904                 path = e->path;
7905             }
7906 
7907             count = dprintf(fd, TARGET_ABI_FMT_ptr "-" TARGET_ABI_FMT_ptr
7908                             " %c%c%c%c %08" PRIx64 " %s %"PRId64,
7909                             h2g(min), h2g(max - 1) + 1,
7910                             (flags & PAGE_READ) ? 'r' : '-',
7911                             (flags & PAGE_WRITE_ORG) ? 'w' : '-',
7912                             (flags & PAGE_EXEC) ? 'x' : '-',
7913                             e->is_priv ? 'p' : '-',
7914                             (uint64_t) e->offset, e->dev, e->inode);
7915             if (path) {
7916                 dprintf(fd, "%*s%s\n", 73 - count, "", path);
7917             } else {
7918                 dprintf(fd, "\n");
7919             }
7920         }
7921     }
7922 
7923     free_self_maps(map_info);
7924 
7925 #ifdef TARGET_VSYSCALL_PAGE
7926     /*
7927      * We only support execution from the vsyscall page.
7928      * This is as if CONFIG_LEGACY_VSYSCALL_XONLY=y from v5.3.
7929      */
7930     count = dprintf(fd, TARGET_FMT_lx "-" TARGET_FMT_lx
7931                     " --xp 00000000 00:00 0",
7932                     TARGET_VSYSCALL_PAGE, TARGET_VSYSCALL_PAGE + TARGET_PAGE_SIZE);
7933     dprintf(fd, "%*s%s\n", 73 - count, "",  "[vsyscall]");
7934 #endif
7935 
7936     return 0;
7937 }
7938 
7939 static int open_self_stat(void *cpu_env, int fd)
7940 {
7941     CPUState *cpu = env_cpu((CPUArchState *)cpu_env);
7942     TaskState *ts = cpu->opaque;
7943     g_autoptr(GString) buf = g_string_new(NULL);
7944     int i;
7945 
7946     for (i = 0; i < 44; i++) {
7947         if (i == 0) {
7948             /* pid */
7949             g_string_printf(buf, FMT_pid " ", getpid());
7950         } else if (i == 1) {
7951             /* app name */
7952             gchar *bin = g_strrstr(ts->bprm->argv[0], "/");
7953             bin = bin ? bin + 1 : ts->bprm->argv[0];
7954             g_string_printf(buf, "(%.15s) ", bin);
7955         } else if (i == 27) {
7956             /* stack bottom */
7957             g_string_printf(buf, TARGET_ABI_FMT_ld " ", ts->info->start_stack);
7958         } else {
7959             /* for the rest, there is MasterCard */
7960             g_string_printf(buf, "0%c", i == 43 ? '\n' : ' ');
7961         }
7962 
7963         if (write(fd, buf->str, buf->len) != buf->len) {
7964             return -1;
7965         }
7966     }
7967 
7968     return 0;
7969 }
7970 
7971 static int open_self_auxv(void *cpu_env, int fd)
7972 {
7973     CPUState *cpu = env_cpu((CPUArchState *)cpu_env);
7974     TaskState *ts = cpu->opaque;
7975     abi_ulong auxv = ts->info->saved_auxv;
7976     abi_ulong len = ts->info->auxv_len;
7977     char *ptr;
7978 
7979     /*
7980      * Auxiliary vector is stored in target process stack.
7981      * read in whole auxv vector and copy it to file
7982      */
7983     ptr = lock_user(VERIFY_READ, auxv, len, 0);
7984     if (ptr != NULL) {
7985         while (len > 0) {
7986             ssize_t r;
7987             r = write(fd, ptr, len);
7988             if (r <= 0) {
7989                 break;
7990             }
7991             len -= r;
7992             ptr += r;
7993         }
7994         lseek(fd, 0, SEEK_SET);
7995         unlock_user(ptr, auxv, len);
7996     }
7997 
7998     return 0;
7999 }
8000 
8001 static int is_proc_myself(const char *filename, const char *entry)
8002 {
8003     if (!strncmp(filename, "/proc/", strlen("/proc/"))) {
8004         filename += strlen("/proc/");
8005         if (!strncmp(filename, "self/", strlen("self/"))) {
8006             filename += strlen("self/");
8007         } else if (*filename >= '1' && *filename <= '9') {
8008             char myself[80];
8009             snprintf(myself, sizeof(myself), "%d/", getpid());
8010             if (!strncmp(filename, myself, strlen(myself))) {
8011                 filename += strlen(myself);
8012             } else {
8013                 return 0;
8014             }
8015         } else {
8016             return 0;
8017         }
8018         if (!strcmp(filename, entry)) {
8019             return 1;
8020         }
8021     }
8022     return 0;
8023 }
8024 
8025 #if defined(HOST_WORDS_BIGENDIAN) != defined(TARGET_WORDS_BIGENDIAN) || \
8026     defined(TARGET_SPARC) || defined(TARGET_M68K) || defined(TARGET_HPPA)
8027 static int is_proc(const char *filename, const char *entry)
8028 {
8029     return strcmp(filename, entry) == 0;
8030 }
8031 #endif
8032 
8033 #if defined(HOST_WORDS_BIGENDIAN) != defined(TARGET_WORDS_BIGENDIAN)
8034 static int open_net_route(void *cpu_env, int fd)
8035 {
8036     FILE *fp;
8037     char *line = NULL;
8038     size_t len = 0;
8039     ssize_t read;
8040 
8041     fp = fopen("/proc/net/route", "r");
8042     if (fp == NULL) {
8043         return -1;
8044     }
8045 
8046     /* read header */
8047 
8048     read = getline(&line, &len, fp);
8049     dprintf(fd, "%s", line);
8050 
8051     /* read routes */
8052 
8053     while ((read = getline(&line, &len, fp)) != -1) {
8054         char iface[16];
8055         uint32_t dest, gw, mask;
8056         unsigned int flags, refcnt, use, metric, mtu, window, irtt;
8057         int fields;
8058 
8059         fields = sscanf(line,
8060                         "%s\t%08x\t%08x\t%04x\t%d\t%d\t%d\t%08x\t%d\t%u\t%u\n",
8061                         iface, &dest, &gw, &flags, &refcnt, &use, &metric,
8062                         &mask, &mtu, &window, &irtt);
8063         if (fields != 11) {
8064             continue;
8065         }
8066         dprintf(fd, "%s\t%08x\t%08x\t%04x\t%d\t%d\t%d\t%08x\t%d\t%u\t%u\n",
8067                 iface, tswap32(dest), tswap32(gw), flags, refcnt, use,
8068                 metric, tswap32(mask), mtu, window, irtt);
8069     }
8070 
8071     free(line);
8072     fclose(fp);
8073 
8074     return 0;
8075 }
8076 #endif
8077 
8078 #if defined(TARGET_SPARC)
8079 static int open_cpuinfo(void *cpu_env, int fd)
8080 {
8081     dprintf(fd, "type\t\t: sun4u\n");
8082     return 0;
8083 }
8084 #endif
8085 
8086 #if defined(TARGET_HPPA)
8087 static int open_cpuinfo(void *cpu_env, int fd)
8088 {
8089     dprintf(fd, "cpu family\t: PA-RISC 1.1e\n");
8090     dprintf(fd, "cpu\t\t: PA7300LC (PCX-L2)\n");
8091     dprintf(fd, "capabilities\t: os32\n");
8092     dprintf(fd, "model\t\t: 9000/778/B160L\n");
8093     dprintf(fd, "model name\t: Merlin L2 160 QEMU (9000/778/B160L)\n");
8094     return 0;
8095 }
8096 #endif
8097 
8098 #if defined(TARGET_M68K)
8099 static int open_hardware(void *cpu_env, int fd)
8100 {
8101     dprintf(fd, "Model:\t\tqemu-m68k\n");
8102     return 0;
8103 }
8104 #endif
8105 
8106 static int do_openat(void *cpu_env, int dirfd, const char *pathname, int flags, mode_t mode)
8107 {
8108     struct fake_open {
8109         const char *filename;
8110         int (*fill)(void *cpu_env, int fd);
8111         int (*cmp)(const char *s1, const char *s2);
8112     };
8113     const struct fake_open *fake_open;
8114     static const struct fake_open fakes[] = {
8115         { "maps", open_self_maps, is_proc_myself },
8116         { "stat", open_self_stat, is_proc_myself },
8117         { "auxv", open_self_auxv, is_proc_myself },
8118         { "cmdline", open_self_cmdline, is_proc_myself },
8119 #if defined(HOST_WORDS_BIGENDIAN) != defined(TARGET_WORDS_BIGENDIAN)
8120         { "/proc/net/route", open_net_route, is_proc },
8121 #endif
8122 #if defined(TARGET_SPARC) || defined(TARGET_HPPA)
8123         { "/proc/cpuinfo", open_cpuinfo, is_proc },
8124 #endif
8125 #if defined(TARGET_M68K)
8126         { "/proc/hardware", open_hardware, is_proc },
8127 #endif
8128         { NULL, NULL, NULL }
8129     };
8130 
8131     if (is_proc_myself(pathname, "exe")) {
8132         int execfd = qemu_getauxval(AT_EXECFD);
8133         return execfd ? execfd : safe_openat(dirfd, exec_path, flags, mode);
8134     }
8135 
8136     for (fake_open = fakes; fake_open->filename; fake_open++) {
8137         if (fake_open->cmp(pathname, fake_open->filename)) {
8138             break;
8139         }
8140     }
8141 
8142     if (fake_open->filename) {
8143         const char *tmpdir;
8144         char filename[PATH_MAX];
8145         int fd, r;
8146 
8147         /* create temporary file to map stat to */
8148         tmpdir = getenv("TMPDIR");
8149         if (!tmpdir)
8150             tmpdir = "/tmp";
8151         snprintf(filename, sizeof(filename), "%s/qemu-open.XXXXXX", tmpdir);
8152         fd = mkstemp(filename);
8153         if (fd < 0) {
8154             return fd;
8155         }
8156         unlink(filename);
8157 
8158         if ((r = fake_open->fill(cpu_env, fd))) {
8159             int e = errno;
8160             close(fd);
8161             errno = e;
8162             return r;
8163         }
8164         lseek(fd, 0, SEEK_SET);
8165 
8166         return fd;
8167     }
8168 
8169     return safe_openat(dirfd, path(pathname), flags, mode);
8170 }
8171 
8172 #define TIMER_MAGIC 0x0caf0000
8173 #define TIMER_MAGIC_MASK 0xffff0000
8174 
8175 /* Convert QEMU provided timer ID back to internal 16bit index format */
8176 static target_timer_t get_timer_id(abi_long arg)
8177 {
8178     target_timer_t timerid = arg;
8179 
8180     if ((timerid & TIMER_MAGIC_MASK) != TIMER_MAGIC) {
8181         return -TARGET_EINVAL;
8182     }
8183 
8184     timerid &= 0xffff;
8185 
8186     if (timerid >= ARRAY_SIZE(g_posix_timers)) {
8187         return -TARGET_EINVAL;
8188     }
8189 
8190     return timerid;
8191 }
8192 
8193 static int target_to_host_cpu_mask(unsigned long *host_mask,
8194                                    size_t host_size,
8195                                    abi_ulong target_addr,
8196                                    size_t target_size)
8197 {
8198     unsigned target_bits = sizeof(abi_ulong) * 8;
8199     unsigned host_bits = sizeof(*host_mask) * 8;
8200     abi_ulong *target_mask;
8201     unsigned i, j;
8202 
8203     assert(host_size >= target_size);
8204 
8205     target_mask = lock_user(VERIFY_READ, target_addr, target_size, 1);
8206     if (!target_mask) {
8207         return -TARGET_EFAULT;
8208     }
8209     memset(host_mask, 0, host_size);
8210 
8211     for (i = 0 ; i < target_size / sizeof(abi_ulong); i++) {
8212         unsigned bit = i * target_bits;
8213         abi_ulong val;
8214 
8215         __get_user(val, &target_mask[i]);
8216         for (j = 0; j < target_bits; j++, bit++) {
8217             if (val & (1UL << j)) {
8218                 host_mask[bit / host_bits] |= 1UL << (bit % host_bits);
8219             }
8220         }
8221     }
8222 
8223     unlock_user(target_mask, target_addr, 0);
8224     return 0;
8225 }
8226 
8227 static int host_to_target_cpu_mask(const unsigned long *host_mask,
8228                                    size_t host_size,
8229                                    abi_ulong target_addr,
8230                                    size_t target_size)
8231 {
8232     unsigned target_bits = sizeof(abi_ulong) * 8;
8233     unsigned host_bits = sizeof(*host_mask) * 8;
8234     abi_ulong *target_mask;
8235     unsigned i, j;
8236 
8237     assert(host_size >= target_size);
8238 
8239     target_mask = lock_user(VERIFY_WRITE, target_addr, target_size, 0);
8240     if (!target_mask) {
8241         return -TARGET_EFAULT;
8242     }
8243 
8244     for (i = 0 ; i < target_size / sizeof(abi_ulong); i++) {
8245         unsigned bit = i * target_bits;
8246         abi_ulong val = 0;
8247 
8248         for (j = 0; j < target_bits; j++, bit++) {
8249             if (host_mask[bit / host_bits] & (1UL << (bit % host_bits))) {
8250                 val |= 1UL << j;
8251             }
8252         }
8253         __put_user(val, &target_mask[i]);
8254     }
8255 
8256     unlock_user(target_mask, target_addr, target_size);
8257     return 0;
8258 }
8259 
8260 /* This is an internal helper for do_syscall so that it is easier
8261  * to have a single return point, so that actions, such as logging
8262  * of syscall results, can be performed.
8263  * All errnos that do_syscall() returns must be -TARGET_<errcode>.
8264  */
8265 static abi_long do_syscall1(void *cpu_env, int num, abi_long arg1,
8266                             abi_long arg2, abi_long arg3, abi_long arg4,
8267                             abi_long arg5, abi_long arg6, abi_long arg7,
8268                             abi_long arg8)
8269 {
8270     CPUState *cpu = env_cpu(cpu_env);
8271     abi_long ret;
8272 #if defined(TARGET_NR_stat) || defined(TARGET_NR_stat64) \
8273     || defined(TARGET_NR_lstat) || defined(TARGET_NR_lstat64) \
8274     || defined(TARGET_NR_fstat) || defined(TARGET_NR_fstat64) \
8275     || defined(TARGET_NR_statx)
8276     struct stat st;
8277 #endif
8278 #if defined(TARGET_NR_statfs) || defined(TARGET_NR_statfs64) \
8279     || defined(TARGET_NR_fstatfs)
8280     struct statfs stfs;
8281 #endif
8282     void *p;
8283 
8284     switch(num) {
8285     case TARGET_NR_exit:
8286         /* In old applications this may be used to implement _exit(2).
8287            However in threaded applications it is used for thread termination,
8288            and _exit_group is used for application termination.
8289            Do thread termination if we have more then one thread.  */
8290 
8291         if (block_signals()) {
8292             return -TARGET_ERESTARTSYS;
8293         }
8294 
8295         pthread_mutex_lock(&clone_lock);
8296 
8297         if (CPU_NEXT(first_cpu)) {
8298             TaskState *ts = cpu->opaque;
8299 
8300             object_property_set_bool(OBJECT(cpu), "realized", false, NULL);
8301             object_unref(OBJECT(cpu));
8302             /*
8303              * At this point the CPU should be unrealized and removed
8304              * from cpu lists. We can clean-up the rest of the thread
8305              * data without the lock held.
8306              */
8307 
8308             pthread_mutex_unlock(&clone_lock);
8309 
8310             if (ts->child_tidptr) {
8311                 put_user_u32(0, ts->child_tidptr);
8312                 do_sys_futex(g2h(cpu, ts->child_tidptr),
8313                              FUTEX_WAKE, INT_MAX, NULL, NULL, 0);
8314             }
8315             thread_cpu = NULL;
8316             g_free(ts);
8317             rcu_unregister_thread();
8318             pthread_exit(NULL);
8319         }
8320 
8321         pthread_mutex_unlock(&clone_lock);
8322         preexit_cleanup(cpu_env, arg1);
8323         _exit(arg1);
8324         return 0; /* avoid warning */
8325     case TARGET_NR_read:
8326         if (arg2 == 0 && arg3 == 0) {
8327             return get_errno(safe_read(arg1, 0, 0));
8328         } else {
8329             if (!(p = lock_user(VERIFY_WRITE, arg2, arg3, 0)))
8330                 return -TARGET_EFAULT;
8331             ret = get_errno(safe_read(arg1, p, arg3));
8332             if (ret >= 0 &&
8333                 fd_trans_host_to_target_data(arg1)) {
8334                 ret = fd_trans_host_to_target_data(arg1)(p, ret);
8335             }
8336             unlock_user(p, arg2, ret);
8337         }
8338         return ret;
8339     case TARGET_NR_write:
8340         if (arg2 == 0 && arg3 == 0) {
8341             return get_errno(safe_write(arg1, 0, 0));
8342         }
8343         if (!(p = lock_user(VERIFY_READ, arg2, arg3, 1)))
8344             return -TARGET_EFAULT;
8345         if (fd_trans_target_to_host_data(arg1)) {
8346             void *copy = g_malloc(arg3);
8347             memcpy(copy, p, arg3);
8348             ret = fd_trans_target_to_host_data(arg1)(copy, arg3);
8349             if (ret >= 0) {
8350                 ret = get_errno(safe_write(arg1, copy, ret));
8351             }
8352             g_free(copy);
8353         } else {
8354             ret = get_errno(safe_write(arg1, p, arg3));
8355         }
8356         unlock_user(p, arg2, 0);
8357         return ret;
8358 
8359 #ifdef TARGET_NR_open
8360     case TARGET_NR_open:
8361         if (!(p = lock_user_string(arg1)))
8362             return -TARGET_EFAULT;
8363         ret = get_errno(do_openat(cpu_env, AT_FDCWD, p,
8364                                   target_to_host_bitmask(arg2, fcntl_flags_tbl),
8365                                   arg3));
8366         fd_trans_unregister(ret);
8367         unlock_user(p, arg1, 0);
8368         return ret;
8369 #endif
8370     case TARGET_NR_openat:
8371         if (!(p = lock_user_string(arg2)))
8372             return -TARGET_EFAULT;
8373         ret = get_errno(do_openat(cpu_env, arg1, p,
8374                                   target_to_host_bitmask(arg3, fcntl_flags_tbl),
8375                                   arg4));
8376         fd_trans_unregister(ret);
8377         unlock_user(p, arg2, 0);
8378         return ret;
8379 #if defined(TARGET_NR_name_to_handle_at) && defined(CONFIG_OPEN_BY_HANDLE)
8380     case TARGET_NR_name_to_handle_at:
8381         ret = do_name_to_handle_at(arg1, arg2, arg3, arg4, arg5);
8382         return ret;
8383 #endif
8384 #if defined(TARGET_NR_open_by_handle_at) && defined(CONFIG_OPEN_BY_HANDLE)
8385     case TARGET_NR_open_by_handle_at:
8386         ret = do_open_by_handle_at(arg1, arg2, arg3);
8387         fd_trans_unregister(ret);
8388         return ret;
8389 #endif
8390     case TARGET_NR_close:
8391         fd_trans_unregister(arg1);
8392         return get_errno(close(arg1));
8393 
8394     case TARGET_NR_brk:
8395         return do_brk(arg1);
8396 #ifdef TARGET_NR_fork
8397     case TARGET_NR_fork:
8398         return get_errno(do_fork(cpu_env, TARGET_SIGCHLD, 0, 0, 0, 0));
8399 #endif
8400 #ifdef TARGET_NR_waitpid
8401     case TARGET_NR_waitpid:
8402         {
8403             int status;
8404             ret = get_errno(safe_wait4(arg1, &status, arg3, 0));
8405             if (!is_error(ret) && arg2 && ret
8406                 && put_user_s32(host_to_target_waitstatus(status), arg2))
8407                 return -TARGET_EFAULT;
8408         }
8409         return ret;
8410 #endif
8411 #ifdef TARGET_NR_waitid
8412     case TARGET_NR_waitid:
8413         {
8414             siginfo_t info;
8415             info.si_pid = 0;
8416             ret = get_errno(safe_waitid(arg1, arg2, &info, arg4, NULL));
8417             if (!is_error(ret) && arg3 && info.si_pid != 0) {
8418                 if (!(p = lock_user(VERIFY_WRITE, arg3, sizeof(target_siginfo_t), 0)))
8419                     return -TARGET_EFAULT;
8420                 host_to_target_siginfo(p, &info);
8421                 unlock_user(p, arg3, sizeof(target_siginfo_t));
8422             }
8423         }
8424         return ret;
8425 #endif
8426 #ifdef TARGET_NR_creat /* not on alpha */
8427     case TARGET_NR_creat:
8428         if (!(p = lock_user_string(arg1)))
8429             return -TARGET_EFAULT;
8430         ret = get_errno(creat(p, arg2));
8431         fd_trans_unregister(ret);
8432         unlock_user(p, arg1, 0);
8433         return ret;
8434 #endif
8435 #ifdef TARGET_NR_link
8436     case TARGET_NR_link:
8437         {
8438             void * p2;
8439             p = lock_user_string(arg1);
8440             p2 = lock_user_string(arg2);
8441             if (!p || !p2)
8442                 ret = -TARGET_EFAULT;
8443             else
8444                 ret = get_errno(link(p, p2));
8445             unlock_user(p2, arg2, 0);
8446             unlock_user(p, arg1, 0);
8447         }
8448         return ret;
8449 #endif
8450 #if defined(TARGET_NR_linkat)
8451     case TARGET_NR_linkat:
8452         {
8453             void * p2 = NULL;
8454             if (!arg2 || !arg4)
8455                 return -TARGET_EFAULT;
8456             p  = lock_user_string(arg2);
8457             p2 = lock_user_string(arg4);
8458             if (!p || !p2)
8459                 ret = -TARGET_EFAULT;
8460             else
8461                 ret = get_errno(linkat(arg1, p, arg3, p2, arg5));
8462             unlock_user(p, arg2, 0);
8463             unlock_user(p2, arg4, 0);
8464         }
8465         return ret;
8466 #endif
8467 #ifdef TARGET_NR_unlink
8468     case TARGET_NR_unlink:
8469         if (!(p = lock_user_string(arg1)))
8470             return -TARGET_EFAULT;
8471         ret = get_errno(unlink(p));
8472         unlock_user(p, arg1, 0);
8473         return ret;
8474 #endif
8475 #if defined(TARGET_NR_unlinkat)
8476     case TARGET_NR_unlinkat:
8477         if (!(p = lock_user_string(arg2)))
8478             return -TARGET_EFAULT;
8479         ret = get_errno(unlinkat(arg1, p, arg3));
8480         unlock_user(p, arg2, 0);
8481         return ret;
8482 #endif
8483     case TARGET_NR_execve:
8484         {
8485             char **argp, **envp;
8486             int argc, envc;
8487             abi_ulong gp;
8488             abi_ulong guest_argp;
8489             abi_ulong guest_envp;
8490             abi_ulong addr;
8491             char **q;
8492             int total_size = 0;
8493 
8494             argc = 0;
8495             guest_argp = arg2;
8496             for (gp = guest_argp; gp; gp += sizeof(abi_ulong)) {
8497                 if (get_user_ual(addr, gp))
8498                     return -TARGET_EFAULT;
8499                 if (!addr)
8500                     break;
8501                 argc++;
8502             }
8503             envc = 0;
8504             guest_envp = arg3;
8505             for (gp = guest_envp; gp; gp += sizeof(abi_ulong)) {
8506                 if (get_user_ual(addr, gp))
8507                     return -TARGET_EFAULT;
8508                 if (!addr)
8509                     break;
8510                 envc++;
8511             }
8512 
8513             argp = g_new0(char *, argc + 1);
8514             envp = g_new0(char *, envc + 1);
8515 
8516             for (gp = guest_argp, q = argp; gp;
8517                   gp += sizeof(abi_ulong), q++) {
8518                 if (get_user_ual(addr, gp))
8519                     goto execve_efault;
8520                 if (!addr)
8521                     break;
8522                 if (!(*q = lock_user_string(addr)))
8523                     goto execve_efault;
8524                 total_size += strlen(*q) + 1;
8525             }
8526             *q = NULL;
8527 
8528             for (gp = guest_envp, q = envp; gp;
8529                   gp += sizeof(abi_ulong), q++) {
8530                 if (get_user_ual(addr, gp))
8531                     goto execve_efault;
8532                 if (!addr)
8533                     break;
8534                 if (!(*q = lock_user_string(addr)))
8535                     goto execve_efault;
8536                 total_size += strlen(*q) + 1;
8537             }
8538             *q = NULL;
8539 
8540             if (!(p = lock_user_string(arg1)))
8541                 goto execve_efault;
8542             /* Although execve() is not an interruptible syscall it is
8543              * a special case where we must use the safe_syscall wrapper:
8544              * if we allow a signal to happen before we make the host
8545              * syscall then we will 'lose' it, because at the point of
8546              * execve the process leaves QEMU's control. So we use the
8547              * safe syscall wrapper to ensure that we either take the
8548              * signal as a guest signal, or else it does not happen
8549              * before the execve completes and makes it the other
8550              * program's problem.
8551              */
8552             ret = get_errno(safe_execve(p, argp, envp));
8553             unlock_user(p, arg1, 0);
8554 
8555             goto execve_end;
8556 
8557         execve_efault:
8558             ret = -TARGET_EFAULT;
8559 
8560         execve_end:
8561             for (gp = guest_argp, q = argp; *q;
8562                   gp += sizeof(abi_ulong), q++) {
8563                 if (get_user_ual(addr, gp)
8564                     || !addr)
8565                     break;
8566                 unlock_user(*q, addr, 0);
8567             }
8568             for (gp = guest_envp, q = envp; *q;
8569                   gp += sizeof(abi_ulong), q++) {
8570                 if (get_user_ual(addr, gp)
8571                     || !addr)
8572                     break;
8573                 unlock_user(*q, addr, 0);
8574             }
8575 
8576             g_free(argp);
8577             g_free(envp);
8578         }
8579         return ret;
8580     case TARGET_NR_chdir:
8581         if (!(p = lock_user_string(arg1)))
8582             return -TARGET_EFAULT;
8583         ret = get_errno(chdir(p));
8584         unlock_user(p, arg1, 0);
8585         return ret;
8586 #ifdef TARGET_NR_time
8587     case TARGET_NR_time:
8588         {
8589             time_t host_time;
8590             ret = get_errno(time(&host_time));
8591             if (!is_error(ret)
8592                 && arg1
8593                 && put_user_sal(host_time, arg1))
8594                 return -TARGET_EFAULT;
8595         }
8596         return ret;
8597 #endif
8598 #ifdef TARGET_NR_mknod
8599     case TARGET_NR_mknod:
8600         if (!(p = lock_user_string(arg1)))
8601             return -TARGET_EFAULT;
8602         ret = get_errno(mknod(p, arg2, arg3));
8603         unlock_user(p, arg1, 0);
8604         return ret;
8605 #endif
8606 #if defined(TARGET_NR_mknodat)
8607     case TARGET_NR_mknodat:
8608         if (!(p = lock_user_string(arg2)))
8609             return -TARGET_EFAULT;
8610         ret = get_errno(mknodat(arg1, p, arg3, arg4));
8611         unlock_user(p, arg2, 0);
8612         return ret;
8613 #endif
8614 #ifdef TARGET_NR_chmod
8615     case TARGET_NR_chmod:
8616         if (!(p = lock_user_string(arg1)))
8617             return -TARGET_EFAULT;
8618         ret = get_errno(chmod(p, arg2));
8619         unlock_user(p, arg1, 0);
8620         return ret;
8621 #endif
8622 #ifdef TARGET_NR_lseek
8623     case TARGET_NR_lseek:
8624         return get_errno(lseek(arg1, arg2, arg3));
8625 #endif
8626 #if defined(TARGET_NR_getxpid) && defined(TARGET_ALPHA)
8627     /* Alpha specific */
8628     case TARGET_NR_getxpid:
8629         ((CPUAlphaState *)cpu_env)->ir[IR_A4] = getppid();
8630         return get_errno(getpid());
8631 #endif
8632 #ifdef TARGET_NR_getpid
8633     case TARGET_NR_getpid:
8634         return get_errno(getpid());
8635 #endif
8636     case TARGET_NR_mount:
8637         {
8638             /* need to look at the data field */
8639             void *p2, *p3;
8640 
8641             if (arg1) {
8642                 p = lock_user_string(arg1);
8643                 if (!p) {
8644                     return -TARGET_EFAULT;
8645                 }
8646             } else {
8647                 p = NULL;
8648             }
8649 
8650             p2 = lock_user_string(arg2);
8651             if (!p2) {
8652                 if (arg1) {
8653                     unlock_user(p, arg1, 0);
8654                 }
8655                 return -TARGET_EFAULT;
8656             }
8657 
8658             if (arg3) {
8659                 p3 = lock_user_string(arg3);
8660                 if (!p3) {
8661                     if (arg1) {
8662                         unlock_user(p, arg1, 0);
8663                     }
8664                     unlock_user(p2, arg2, 0);
8665                     return -TARGET_EFAULT;
8666                 }
8667             } else {
8668                 p3 = NULL;
8669             }
8670 
8671             /* FIXME - arg5 should be locked, but it isn't clear how to
8672              * do that since it's not guaranteed to be a NULL-terminated
8673              * string.
8674              */
8675             if (!arg5) {
8676                 ret = mount(p, p2, p3, (unsigned long)arg4, NULL);
8677             } else {
8678                 ret = mount(p, p2, p3, (unsigned long)arg4, g2h(cpu, arg5));
8679             }
8680             ret = get_errno(ret);
8681 
8682             if (arg1) {
8683                 unlock_user(p, arg1, 0);
8684             }
8685             unlock_user(p2, arg2, 0);
8686             if (arg3) {
8687                 unlock_user(p3, arg3, 0);
8688             }
8689         }
8690         return ret;
8691 #if defined(TARGET_NR_umount) || defined(TARGET_NR_oldumount)
8692 #if defined(TARGET_NR_umount)
8693     case TARGET_NR_umount:
8694 #endif
8695 #if defined(TARGET_NR_oldumount)
8696     case TARGET_NR_oldumount:
8697 #endif
8698         if (!(p = lock_user_string(arg1)))
8699             return -TARGET_EFAULT;
8700         ret = get_errno(umount(p));
8701         unlock_user(p, arg1, 0);
8702         return ret;
8703 #endif
8704 #ifdef TARGET_NR_stime /* not on alpha */
8705     case TARGET_NR_stime:
8706         {
8707             struct timespec ts;
8708             ts.tv_nsec = 0;
8709             if (get_user_sal(ts.tv_sec, arg1)) {
8710                 return -TARGET_EFAULT;
8711             }
8712             return get_errno(clock_settime(CLOCK_REALTIME, &ts));
8713         }
8714 #endif
8715 #ifdef TARGET_NR_alarm /* not on alpha */
8716     case TARGET_NR_alarm:
8717         return alarm(arg1);
8718 #endif
8719 #ifdef TARGET_NR_pause /* not on alpha */
8720     case TARGET_NR_pause:
8721         if (!block_signals()) {
8722             sigsuspend(&((TaskState *)cpu->opaque)->signal_mask);
8723         }
8724         return -TARGET_EINTR;
8725 #endif
8726 #ifdef TARGET_NR_utime
8727     case TARGET_NR_utime:
8728         {
8729             struct utimbuf tbuf, *host_tbuf;
8730             struct target_utimbuf *target_tbuf;
8731             if (arg2) {
8732                 if (!lock_user_struct(VERIFY_READ, target_tbuf, arg2, 1))
8733                     return -TARGET_EFAULT;
8734                 tbuf.actime = tswapal(target_tbuf->actime);
8735                 tbuf.modtime = tswapal(target_tbuf->modtime);
8736                 unlock_user_struct(target_tbuf, arg2, 0);
8737                 host_tbuf = &tbuf;
8738             } else {
8739                 host_tbuf = NULL;
8740             }
8741             if (!(p = lock_user_string(arg1)))
8742                 return -TARGET_EFAULT;
8743             ret = get_errno(utime(p, host_tbuf));
8744             unlock_user(p, arg1, 0);
8745         }
8746         return ret;
8747 #endif
8748 #ifdef TARGET_NR_utimes
8749     case TARGET_NR_utimes:
8750         {
8751             struct timeval *tvp, tv[2];
8752             if (arg2) {
8753                 if (copy_from_user_timeval(&tv[0], arg2)
8754                     || copy_from_user_timeval(&tv[1],
8755                                               arg2 + sizeof(struct target_timeval)))
8756                     return -TARGET_EFAULT;
8757                 tvp = tv;
8758             } else {
8759                 tvp = NULL;
8760             }
8761             if (!(p = lock_user_string(arg1)))
8762                 return -TARGET_EFAULT;
8763             ret = get_errno(utimes(p, tvp));
8764             unlock_user(p, arg1, 0);
8765         }
8766         return ret;
8767 #endif
8768 #if defined(TARGET_NR_futimesat)
8769     case TARGET_NR_futimesat:
8770         {
8771             struct timeval *tvp, tv[2];
8772             if (arg3) {
8773                 if (copy_from_user_timeval(&tv[0], arg3)
8774                     || copy_from_user_timeval(&tv[1],
8775                                               arg3 + sizeof(struct target_timeval)))
8776                     return -TARGET_EFAULT;
8777                 tvp = tv;
8778             } else {
8779                 tvp = NULL;
8780             }
8781             if (!(p = lock_user_string(arg2))) {
8782                 return -TARGET_EFAULT;
8783             }
8784             ret = get_errno(futimesat(arg1, path(p), tvp));
8785             unlock_user(p, arg2, 0);
8786         }
8787         return ret;
8788 #endif
8789 #ifdef TARGET_NR_access
8790     case TARGET_NR_access:
8791         if (!(p = lock_user_string(arg1))) {
8792             return -TARGET_EFAULT;
8793         }
8794         ret = get_errno(access(path(p), arg2));
8795         unlock_user(p, arg1, 0);
8796         return ret;
8797 #endif
8798 #if defined(TARGET_NR_faccessat) && defined(__NR_faccessat)
8799     case TARGET_NR_faccessat:
8800         if (!(p = lock_user_string(arg2))) {
8801             return -TARGET_EFAULT;
8802         }
8803         ret = get_errno(faccessat(arg1, p, arg3, 0));
8804         unlock_user(p, arg2, 0);
8805         return ret;
8806 #endif
8807 #ifdef TARGET_NR_nice /* not on alpha */
8808     case TARGET_NR_nice:
8809         return get_errno(nice(arg1));
8810 #endif
8811     case TARGET_NR_sync:
8812         sync();
8813         return 0;
8814 #if defined(TARGET_NR_syncfs) && defined(CONFIG_SYNCFS)
8815     case TARGET_NR_syncfs:
8816         return get_errno(syncfs(arg1));
8817 #endif
8818     case TARGET_NR_kill:
8819         return get_errno(safe_kill(arg1, target_to_host_signal(arg2)));
8820 #ifdef TARGET_NR_rename
8821     case TARGET_NR_rename:
8822         {
8823             void *p2;
8824             p = lock_user_string(arg1);
8825             p2 = lock_user_string(arg2);
8826             if (!p || !p2)
8827                 ret = -TARGET_EFAULT;
8828             else
8829                 ret = get_errno(rename(p, p2));
8830             unlock_user(p2, arg2, 0);
8831             unlock_user(p, arg1, 0);
8832         }
8833         return ret;
8834 #endif
8835 #if defined(TARGET_NR_renameat)
8836     case TARGET_NR_renameat:
8837         {
8838             void *p2;
8839             p  = lock_user_string(arg2);
8840             p2 = lock_user_string(arg4);
8841             if (!p || !p2)
8842                 ret = -TARGET_EFAULT;
8843             else
8844                 ret = get_errno(renameat(arg1, p, arg3, p2));
8845             unlock_user(p2, arg4, 0);
8846             unlock_user(p, arg2, 0);
8847         }
8848         return ret;
8849 #endif
8850 #if defined(TARGET_NR_renameat2)
8851     case TARGET_NR_renameat2:
8852         {
8853             void *p2;
8854             p  = lock_user_string(arg2);
8855             p2 = lock_user_string(arg4);
8856             if (!p || !p2) {
8857                 ret = -TARGET_EFAULT;
8858             } else {
8859                 ret = get_errno(sys_renameat2(arg1, p, arg3, p2, arg5));
8860             }
8861             unlock_user(p2, arg4, 0);
8862             unlock_user(p, arg2, 0);
8863         }
8864         return ret;
8865 #endif
8866 #ifdef TARGET_NR_mkdir
8867     case TARGET_NR_mkdir:
8868         if (!(p = lock_user_string(arg1)))
8869             return -TARGET_EFAULT;
8870         ret = get_errno(mkdir(p, arg2));
8871         unlock_user(p, arg1, 0);
8872         return ret;
8873 #endif
8874 #if defined(TARGET_NR_mkdirat)
8875     case TARGET_NR_mkdirat:
8876         if (!(p = lock_user_string(arg2)))
8877             return -TARGET_EFAULT;
8878         ret = get_errno(mkdirat(arg1, p, arg3));
8879         unlock_user(p, arg2, 0);
8880         return ret;
8881 #endif
8882 #ifdef TARGET_NR_rmdir
8883     case TARGET_NR_rmdir:
8884         if (!(p = lock_user_string(arg1)))
8885             return -TARGET_EFAULT;
8886         ret = get_errno(rmdir(p));
8887         unlock_user(p, arg1, 0);
8888         return ret;
8889 #endif
8890     case TARGET_NR_dup:
8891         ret = get_errno(dup(arg1));
8892         if (ret >= 0) {
8893             fd_trans_dup(arg1, ret);
8894         }
8895         return ret;
8896 #ifdef TARGET_NR_pipe
8897     case TARGET_NR_pipe:
8898         return do_pipe(cpu_env, arg1, 0, 0);
8899 #endif
8900 #ifdef TARGET_NR_pipe2
8901     case TARGET_NR_pipe2:
8902         return do_pipe(cpu_env, arg1,
8903                        target_to_host_bitmask(arg2, fcntl_flags_tbl), 1);
8904 #endif
8905     case TARGET_NR_times:
8906         {
8907             struct target_tms *tmsp;
8908             struct tms tms;
8909             ret = get_errno(times(&tms));
8910             if (arg1) {
8911                 tmsp = lock_user(VERIFY_WRITE, arg1, sizeof(struct target_tms), 0);
8912                 if (!tmsp)
8913                     return -TARGET_EFAULT;
8914                 tmsp->tms_utime = tswapal(host_to_target_clock_t(tms.tms_utime));
8915                 tmsp->tms_stime = tswapal(host_to_target_clock_t(tms.tms_stime));
8916                 tmsp->tms_cutime = tswapal(host_to_target_clock_t(tms.tms_cutime));
8917                 tmsp->tms_cstime = tswapal(host_to_target_clock_t(tms.tms_cstime));
8918             }
8919             if (!is_error(ret))
8920                 ret = host_to_target_clock_t(ret);
8921         }
8922         return ret;
8923     case TARGET_NR_acct:
8924         if (arg1 == 0) {
8925             ret = get_errno(acct(NULL));
8926         } else {
8927             if (!(p = lock_user_string(arg1))) {
8928                 return -TARGET_EFAULT;
8929             }
8930             ret = get_errno(acct(path(p)));
8931             unlock_user(p, arg1, 0);
8932         }
8933         return ret;
8934 #ifdef TARGET_NR_umount2
8935     case TARGET_NR_umount2:
8936         if (!(p = lock_user_string(arg1)))
8937             return -TARGET_EFAULT;
8938         ret = get_errno(umount2(p, arg2));
8939         unlock_user(p, arg1, 0);
8940         return ret;
8941 #endif
8942     case TARGET_NR_ioctl:
8943         return do_ioctl(arg1, arg2, arg3);
8944 #ifdef TARGET_NR_fcntl
8945     case TARGET_NR_fcntl:
8946         return do_fcntl(arg1, arg2, arg3);
8947 #endif
8948     case TARGET_NR_setpgid:
8949         return get_errno(setpgid(arg1, arg2));
8950     case TARGET_NR_umask:
8951         return get_errno(umask(arg1));
8952     case TARGET_NR_chroot:
8953         if (!(p = lock_user_string(arg1)))
8954             return -TARGET_EFAULT;
8955         ret = get_errno(chroot(p));
8956         unlock_user(p, arg1, 0);
8957         return ret;
8958 #ifdef TARGET_NR_dup2
8959     case TARGET_NR_dup2:
8960         ret = get_errno(dup2(arg1, arg2));
8961         if (ret >= 0) {
8962             fd_trans_dup(arg1, arg2);
8963         }
8964         return ret;
8965 #endif
8966 #if defined(CONFIG_DUP3) && defined(TARGET_NR_dup3)
8967     case TARGET_NR_dup3:
8968     {
8969         int host_flags;
8970 
8971         if ((arg3 & ~TARGET_O_CLOEXEC) != 0) {
8972             return -EINVAL;
8973         }
8974         host_flags = target_to_host_bitmask(arg3, fcntl_flags_tbl);
8975         ret = get_errno(dup3(arg1, arg2, host_flags));
8976         if (ret >= 0) {
8977             fd_trans_dup(arg1, arg2);
8978         }
8979         return ret;
8980     }
8981 #endif
8982 #ifdef TARGET_NR_getppid /* not on alpha */
8983     case TARGET_NR_getppid:
8984         return get_errno(getppid());
8985 #endif
8986 #ifdef TARGET_NR_getpgrp
8987     case TARGET_NR_getpgrp:
8988         return get_errno(getpgrp());
8989 #endif
8990     case TARGET_NR_setsid:
8991         return get_errno(setsid());
8992 #ifdef TARGET_NR_sigaction
8993     case TARGET_NR_sigaction:
8994         {
8995 #if defined(TARGET_MIPS)
8996 	    struct target_sigaction act, oact, *pact, *old_act;
8997 
8998 	    if (arg2) {
8999                 if (!lock_user_struct(VERIFY_READ, old_act, arg2, 1))
9000                     return -TARGET_EFAULT;
9001 		act._sa_handler = old_act->_sa_handler;
9002 		target_siginitset(&act.sa_mask, old_act->sa_mask.sig[0]);
9003 		act.sa_flags = old_act->sa_flags;
9004 		unlock_user_struct(old_act, arg2, 0);
9005 		pact = &act;
9006 	    } else {
9007 		pact = NULL;
9008 	    }
9009 
9010         ret = get_errno(do_sigaction(arg1, pact, &oact, 0));
9011 
9012 	    if (!is_error(ret) && arg3) {
9013                 if (!lock_user_struct(VERIFY_WRITE, old_act, arg3, 0))
9014                     return -TARGET_EFAULT;
9015 		old_act->_sa_handler = oact._sa_handler;
9016 		old_act->sa_flags = oact.sa_flags;
9017 		old_act->sa_mask.sig[0] = oact.sa_mask.sig[0];
9018 		old_act->sa_mask.sig[1] = 0;
9019 		old_act->sa_mask.sig[2] = 0;
9020 		old_act->sa_mask.sig[3] = 0;
9021 		unlock_user_struct(old_act, arg3, 1);
9022 	    }
9023 #else
9024             struct target_old_sigaction *old_act;
9025             struct target_sigaction act, oact, *pact;
9026             if (arg2) {
9027                 if (!lock_user_struct(VERIFY_READ, old_act, arg2, 1))
9028                     return -TARGET_EFAULT;
9029                 act._sa_handler = old_act->_sa_handler;
9030                 target_siginitset(&act.sa_mask, old_act->sa_mask);
9031                 act.sa_flags = old_act->sa_flags;
9032 #ifdef TARGET_ARCH_HAS_SA_RESTORER
9033                 act.sa_restorer = old_act->sa_restorer;
9034 #endif
9035                 unlock_user_struct(old_act, arg2, 0);
9036                 pact = &act;
9037             } else {
9038                 pact = NULL;
9039             }
9040             ret = get_errno(do_sigaction(arg1, pact, &oact, 0));
9041             if (!is_error(ret) && arg3) {
9042                 if (!lock_user_struct(VERIFY_WRITE, old_act, arg3, 0))
9043                     return -TARGET_EFAULT;
9044                 old_act->_sa_handler = oact._sa_handler;
9045                 old_act->sa_mask = oact.sa_mask.sig[0];
9046                 old_act->sa_flags = oact.sa_flags;
9047 #ifdef TARGET_ARCH_HAS_SA_RESTORER
9048                 old_act->sa_restorer = oact.sa_restorer;
9049 #endif
9050                 unlock_user_struct(old_act, arg3, 1);
9051             }
9052 #endif
9053         }
9054         return ret;
9055 #endif
9056     case TARGET_NR_rt_sigaction:
9057         {
9058             /*
9059              * For Alpha and SPARC this is a 5 argument syscall, with
9060              * a 'restorer' parameter which must be copied into the
9061              * sa_restorer field of the sigaction struct.
9062              * For Alpha that 'restorer' is arg5; for SPARC it is arg4,
9063              * and arg5 is the sigsetsize.
9064              */
9065 #if defined(TARGET_ALPHA)
9066             target_ulong sigsetsize = arg4;
9067             target_ulong restorer = arg5;
9068 #elif defined(TARGET_SPARC)
9069             target_ulong restorer = arg4;
9070             target_ulong sigsetsize = arg5;
9071 #else
9072             target_ulong sigsetsize = arg4;
9073             target_ulong restorer = 0;
9074 #endif
9075             struct target_sigaction *act = NULL;
9076             struct target_sigaction *oact = NULL;
9077 
9078             if (sigsetsize != sizeof(target_sigset_t)) {
9079                 return -TARGET_EINVAL;
9080             }
9081             if (arg2 && !lock_user_struct(VERIFY_READ, act, arg2, 1)) {
9082                 return -TARGET_EFAULT;
9083             }
9084             if (arg3 && !lock_user_struct(VERIFY_WRITE, oact, arg3, 0)) {
9085                 ret = -TARGET_EFAULT;
9086             } else {
9087                 ret = get_errno(do_sigaction(arg1, act, oact, restorer));
9088                 if (oact) {
9089                     unlock_user_struct(oact, arg3, 1);
9090                 }
9091             }
9092             if (act) {
9093                 unlock_user_struct(act, arg2, 0);
9094             }
9095         }
9096         return ret;
9097 #ifdef TARGET_NR_sgetmask /* not on alpha */
9098     case TARGET_NR_sgetmask:
9099         {
9100             sigset_t cur_set;
9101             abi_ulong target_set;
9102             ret = do_sigprocmask(0, NULL, &cur_set);
9103             if (!ret) {
9104                 host_to_target_old_sigset(&target_set, &cur_set);
9105                 ret = target_set;
9106             }
9107         }
9108         return ret;
9109 #endif
9110 #ifdef TARGET_NR_ssetmask /* not on alpha */
9111     case TARGET_NR_ssetmask:
9112         {
9113             sigset_t set, oset;
9114             abi_ulong target_set = arg1;
9115             target_to_host_old_sigset(&set, &target_set);
9116             ret = do_sigprocmask(SIG_SETMASK, &set, &oset);
9117             if (!ret) {
9118                 host_to_target_old_sigset(&target_set, &oset);
9119                 ret = target_set;
9120             }
9121         }
9122         return ret;
9123 #endif
9124 #ifdef TARGET_NR_sigprocmask
9125     case TARGET_NR_sigprocmask:
9126         {
9127 #if defined(TARGET_ALPHA)
9128             sigset_t set, oldset;
9129             abi_ulong mask;
9130             int how;
9131 
9132             switch (arg1) {
9133             case TARGET_SIG_BLOCK:
9134                 how = SIG_BLOCK;
9135                 break;
9136             case TARGET_SIG_UNBLOCK:
9137                 how = SIG_UNBLOCK;
9138                 break;
9139             case TARGET_SIG_SETMASK:
9140                 how = SIG_SETMASK;
9141                 break;
9142             default:
9143                 return -TARGET_EINVAL;
9144             }
9145             mask = arg2;
9146             target_to_host_old_sigset(&set, &mask);
9147 
9148             ret = do_sigprocmask(how, &set, &oldset);
9149             if (!is_error(ret)) {
9150                 host_to_target_old_sigset(&mask, &oldset);
9151                 ret = mask;
9152                 ((CPUAlphaState *)cpu_env)->ir[IR_V0] = 0; /* force no error */
9153             }
9154 #else
9155             sigset_t set, oldset, *set_ptr;
9156             int how;
9157 
9158             if (arg2) {
9159                 switch (arg1) {
9160                 case TARGET_SIG_BLOCK:
9161                     how = SIG_BLOCK;
9162                     break;
9163                 case TARGET_SIG_UNBLOCK:
9164                     how = SIG_UNBLOCK;
9165                     break;
9166                 case TARGET_SIG_SETMASK:
9167                     how = SIG_SETMASK;
9168                     break;
9169                 default:
9170                     return -TARGET_EINVAL;
9171                 }
9172                 if (!(p = lock_user(VERIFY_READ, arg2, sizeof(target_sigset_t), 1)))
9173                     return -TARGET_EFAULT;
9174                 target_to_host_old_sigset(&set, p);
9175                 unlock_user(p, arg2, 0);
9176                 set_ptr = &set;
9177             } else {
9178                 how = 0;
9179                 set_ptr = NULL;
9180             }
9181             ret = do_sigprocmask(how, set_ptr, &oldset);
9182             if (!is_error(ret) && arg3) {
9183                 if (!(p = lock_user(VERIFY_WRITE, arg3, sizeof(target_sigset_t), 0)))
9184                     return -TARGET_EFAULT;
9185                 host_to_target_old_sigset(p, &oldset);
9186                 unlock_user(p, arg3, sizeof(target_sigset_t));
9187             }
9188 #endif
9189         }
9190         return ret;
9191 #endif
9192     case TARGET_NR_rt_sigprocmask:
9193         {
9194             int how = arg1;
9195             sigset_t set, oldset, *set_ptr;
9196 
9197             if (arg4 != sizeof(target_sigset_t)) {
9198                 return -TARGET_EINVAL;
9199             }
9200 
9201             if (arg2) {
9202                 switch(how) {
9203                 case TARGET_SIG_BLOCK:
9204                     how = SIG_BLOCK;
9205                     break;
9206                 case TARGET_SIG_UNBLOCK:
9207                     how = SIG_UNBLOCK;
9208                     break;
9209                 case TARGET_SIG_SETMASK:
9210                     how = SIG_SETMASK;
9211                     break;
9212                 default:
9213                     return -TARGET_EINVAL;
9214                 }
9215                 if (!(p = lock_user(VERIFY_READ, arg2, sizeof(target_sigset_t), 1)))
9216                     return -TARGET_EFAULT;
9217                 target_to_host_sigset(&set, p);
9218                 unlock_user(p, arg2, 0);
9219                 set_ptr = &set;
9220             } else {
9221                 how = 0;
9222                 set_ptr = NULL;
9223             }
9224             ret = do_sigprocmask(how, set_ptr, &oldset);
9225             if (!is_error(ret) && arg3) {
9226                 if (!(p = lock_user(VERIFY_WRITE, arg3, sizeof(target_sigset_t), 0)))
9227                     return -TARGET_EFAULT;
9228                 host_to_target_sigset(p, &oldset);
9229                 unlock_user(p, arg3, sizeof(target_sigset_t));
9230             }
9231         }
9232         return ret;
9233 #ifdef TARGET_NR_sigpending
9234     case TARGET_NR_sigpending:
9235         {
9236             sigset_t set;
9237             ret = get_errno(sigpending(&set));
9238             if (!is_error(ret)) {
9239                 if (!(p = lock_user(VERIFY_WRITE, arg1, sizeof(target_sigset_t), 0)))
9240                     return -TARGET_EFAULT;
9241                 host_to_target_old_sigset(p, &set);
9242                 unlock_user(p, arg1, sizeof(target_sigset_t));
9243             }
9244         }
9245         return ret;
9246 #endif
9247     case TARGET_NR_rt_sigpending:
9248         {
9249             sigset_t set;
9250 
9251             /* Yes, this check is >, not != like most. We follow the kernel's
9252              * logic and it does it like this because it implements
9253              * NR_sigpending through the same code path, and in that case
9254              * the old_sigset_t is smaller in size.
9255              */
9256             if (arg2 > sizeof(target_sigset_t)) {
9257                 return -TARGET_EINVAL;
9258             }
9259 
9260             ret = get_errno(sigpending(&set));
9261             if (!is_error(ret)) {
9262                 if (!(p = lock_user(VERIFY_WRITE, arg1, sizeof(target_sigset_t), 0)))
9263                     return -TARGET_EFAULT;
9264                 host_to_target_sigset(p, &set);
9265                 unlock_user(p, arg1, sizeof(target_sigset_t));
9266             }
9267         }
9268         return ret;
9269 #ifdef TARGET_NR_sigsuspend
9270     case TARGET_NR_sigsuspend:
9271         {
9272             TaskState *ts = cpu->opaque;
9273 #if defined(TARGET_ALPHA)
9274             abi_ulong mask = arg1;
9275             target_to_host_old_sigset(&ts->sigsuspend_mask, &mask);
9276 #else
9277             if (!(p = lock_user(VERIFY_READ, arg1, sizeof(target_sigset_t), 1)))
9278                 return -TARGET_EFAULT;
9279             target_to_host_old_sigset(&ts->sigsuspend_mask, p);
9280             unlock_user(p, arg1, 0);
9281 #endif
9282             ret = get_errno(safe_rt_sigsuspend(&ts->sigsuspend_mask,
9283                                                SIGSET_T_SIZE));
9284             if (ret != -TARGET_ERESTARTSYS) {
9285                 ts->in_sigsuspend = 1;
9286             }
9287         }
9288         return ret;
9289 #endif
9290     case TARGET_NR_rt_sigsuspend:
9291         {
9292             TaskState *ts = cpu->opaque;
9293 
9294             if (arg2 != sizeof(target_sigset_t)) {
9295                 return -TARGET_EINVAL;
9296             }
9297             if (!(p = lock_user(VERIFY_READ, arg1, sizeof(target_sigset_t), 1)))
9298                 return -TARGET_EFAULT;
9299             target_to_host_sigset(&ts->sigsuspend_mask, p);
9300             unlock_user(p, arg1, 0);
9301             ret = get_errno(safe_rt_sigsuspend(&ts->sigsuspend_mask,
9302                                                SIGSET_T_SIZE));
9303             if (ret != -TARGET_ERESTARTSYS) {
9304                 ts->in_sigsuspend = 1;
9305             }
9306         }
9307         return ret;
9308 #ifdef TARGET_NR_rt_sigtimedwait
9309     case TARGET_NR_rt_sigtimedwait:
9310         {
9311             sigset_t set;
9312             struct timespec uts, *puts;
9313             siginfo_t uinfo;
9314 
9315             if (arg4 != sizeof(target_sigset_t)) {
9316                 return -TARGET_EINVAL;
9317             }
9318 
9319             if (!(p = lock_user(VERIFY_READ, arg1, sizeof(target_sigset_t), 1)))
9320                 return -TARGET_EFAULT;
9321             target_to_host_sigset(&set, p);
9322             unlock_user(p, arg1, 0);
9323             if (arg3) {
9324                 puts = &uts;
9325                 if (target_to_host_timespec(puts, arg3)) {
9326                     return -TARGET_EFAULT;
9327                 }
9328             } else {
9329                 puts = NULL;
9330             }
9331             ret = get_errno(safe_rt_sigtimedwait(&set, &uinfo, puts,
9332                                                  SIGSET_T_SIZE));
9333             if (!is_error(ret)) {
9334                 if (arg2) {
9335                     p = lock_user(VERIFY_WRITE, arg2, sizeof(target_siginfo_t),
9336                                   0);
9337                     if (!p) {
9338                         return -TARGET_EFAULT;
9339                     }
9340                     host_to_target_siginfo(p, &uinfo);
9341                     unlock_user(p, arg2, sizeof(target_siginfo_t));
9342                 }
9343                 ret = host_to_target_signal(ret);
9344             }
9345         }
9346         return ret;
9347 #endif
9348 #ifdef TARGET_NR_rt_sigtimedwait_time64
9349     case TARGET_NR_rt_sigtimedwait_time64:
9350         {
9351             sigset_t set;
9352             struct timespec uts, *puts;
9353             siginfo_t uinfo;
9354 
9355             if (arg4 != sizeof(target_sigset_t)) {
9356                 return -TARGET_EINVAL;
9357             }
9358 
9359             p = lock_user(VERIFY_READ, arg1, sizeof(target_sigset_t), 1);
9360             if (!p) {
9361                 return -TARGET_EFAULT;
9362             }
9363             target_to_host_sigset(&set, p);
9364             unlock_user(p, arg1, 0);
9365             if (arg3) {
9366                 puts = &uts;
9367                 if (target_to_host_timespec64(puts, arg3)) {
9368                     return -TARGET_EFAULT;
9369                 }
9370             } else {
9371                 puts = NULL;
9372             }
9373             ret = get_errno(safe_rt_sigtimedwait(&set, &uinfo, puts,
9374                                                  SIGSET_T_SIZE));
9375             if (!is_error(ret)) {
9376                 if (arg2) {
9377                     p = lock_user(VERIFY_WRITE, arg2,
9378                                   sizeof(target_siginfo_t), 0);
9379                     if (!p) {
9380                         return -TARGET_EFAULT;
9381                     }
9382                     host_to_target_siginfo(p, &uinfo);
9383                     unlock_user(p, arg2, sizeof(target_siginfo_t));
9384                 }
9385                 ret = host_to_target_signal(ret);
9386             }
9387         }
9388         return ret;
9389 #endif
9390     case TARGET_NR_rt_sigqueueinfo:
9391         {
9392             siginfo_t uinfo;
9393 
9394             p = lock_user(VERIFY_READ, arg3, sizeof(target_siginfo_t), 1);
9395             if (!p) {
9396                 return -TARGET_EFAULT;
9397             }
9398             target_to_host_siginfo(&uinfo, p);
9399             unlock_user(p, arg3, 0);
9400             ret = get_errno(sys_rt_sigqueueinfo(arg1, arg2, &uinfo));
9401         }
9402         return ret;
9403     case TARGET_NR_rt_tgsigqueueinfo:
9404         {
9405             siginfo_t uinfo;
9406 
9407             p = lock_user(VERIFY_READ, arg4, sizeof(target_siginfo_t), 1);
9408             if (!p) {
9409                 return -TARGET_EFAULT;
9410             }
9411             target_to_host_siginfo(&uinfo, p);
9412             unlock_user(p, arg4, 0);
9413             ret = get_errno(sys_rt_tgsigqueueinfo(arg1, arg2, arg3, &uinfo));
9414         }
9415         return ret;
9416 #ifdef TARGET_NR_sigreturn
9417     case TARGET_NR_sigreturn:
9418         if (block_signals()) {
9419             return -TARGET_ERESTARTSYS;
9420         }
9421         return do_sigreturn(cpu_env);
9422 #endif
9423     case TARGET_NR_rt_sigreturn:
9424         if (block_signals()) {
9425             return -TARGET_ERESTARTSYS;
9426         }
9427         return do_rt_sigreturn(cpu_env);
9428     case TARGET_NR_sethostname:
9429         if (!(p = lock_user_string(arg1)))
9430             return -TARGET_EFAULT;
9431         ret = get_errno(sethostname(p, arg2));
9432         unlock_user(p, arg1, 0);
9433         return ret;
9434 #ifdef TARGET_NR_setrlimit
9435     case TARGET_NR_setrlimit:
9436         {
9437             int resource = target_to_host_resource(arg1);
9438             struct target_rlimit *target_rlim;
9439             struct rlimit rlim;
9440             if (!lock_user_struct(VERIFY_READ, target_rlim, arg2, 1))
9441                 return -TARGET_EFAULT;
9442             rlim.rlim_cur = target_to_host_rlim(target_rlim->rlim_cur);
9443             rlim.rlim_max = target_to_host_rlim(target_rlim->rlim_max);
9444             unlock_user_struct(target_rlim, arg2, 0);
9445             /*
9446              * If we just passed through resource limit settings for memory then
9447              * they would also apply to QEMU's own allocations, and QEMU will
9448              * crash or hang or die if its allocations fail. Ideally we would
9449              * track the guest allocations in QEMU and apply the limits ourselves.
9450              * For now, just tell the guest the call succeeded but don't actually
9451              * limit anything.
9452              */
9453             if (resource != RLIMIT_AS &&
9454                 resource != RLIMIT_DATA &&
9455                 resource != RLIMIT_STACK) {
9456                 return get_errno(setrlimit(resource, &rlim));
9457             } else {
9458                 return 0;
9459             }
9460         }
9461 #endif
9462 #ifdef TARGET_NR_getrlimit
9463     case TARGET_NR_getrlimit:
9464         {
9465             int resource = target_to_host_resource(arg1);
9466             struct target_rlimit *target_rlim;
9467             struct rlimit rlim;
9468 
9469             ret = get_errno(getrlimit(resource, &rlim));
9470             if (!is_error(ret)) {
9471                 if (!lock_user_struct(VERIFY_WRITE, target_rlim, arg2, 0))
9472                     return -TARGET_EFAULT;
9473                 target_rlim->rlim_cur = host_to_target_rlim(rlim.rlim_cur);
9474                 target_rlim->rlim_max = host_to_target_rlim(rlim.rlim_max);
9475                 unlock_user_struct(target_rlim, arg2, 1);
9476             }
9477         }
9478         return ret;
9479 #endif
9480     case TARGET_NR_getrusage:
9481         {
9482             struct rusage rusage;
9483             ret = get_errno(getrusage(arg1, &rusage));
9484             if (!is_error(ret)) {
9485                 ret = host_to_target_rusage(arg2, &rusage);
9486             }
9487         }
9488         return ret;
9489 #if defined(TARGET_NR_gettimeofday)
9490     case TARGET_NR_gettimeofday:
9491         {
9492             struct timeval tv;
9493             struct timezone tz;
9494 
9495             ret = get_errno(gettimeofday(&tv, &tz));
9496             if (!is_error(ret)) {
9497                 if (arg1 && copy_to_user_timeval(arg1, &tv)) {
9498                     return -TARGET_EFAULT;
9499                 }
9500                 if (arg2 && copy_to_user_timezone(arg2, &tz)) {
9501                     return -TARGET_EFAULT;
9502                 }
9503             }
9504         }
9505         return ret;
9506 #endif
9507 #if defined(TARGET_NR_settimeofday)
9508     case TARGET_NR_settimeofday:
9509         {
9510             struct timeval tv, *ptv = NULL;
9511             struct timezone tz, *ptz = NULL;
9512 
9513             if (arg1) {
9514                 if (copy_from_user_timeval(&tv, arg1)) {
9515                     return -TARGET_EFAULT;
9516                 }
9517                 ptv = &tv;
9518             }
9519 
9520             if (arg2) {
9521                 if (copy_from_user_timezone(&tz, arg2)) {
9522                     return -TARGET_EFAULT;
9523                 }
9524                 ptz = &tz;
9525             }
9526 
9527             return get_errno(settimeofday(ptv, ptz));
9528         }
9529 #endif
9530 #if defined(TARGET_NR_select)
9531     case TARGET_NR_select:
9532 #if defined(TARGET_WANT_NI_OLD_SELECT)
9533         /* some architectures used to have old_select here
9534          * but now ENOSYS it.
9535          */
9536         ret = -TARGET_ENOSYS;
9537 #elif defined(TARGET_WANT_OLD_SYS_SELECT)
9538         ret = do_old_select(arg1);
9539 #else
9540         ret = do_select(arg1, arg2, arg3, arg4, arg5);
9541 #endif
9542         return ret;
9543 #endif
9544 #ifdef TARGET_NR_pselect6
9545     case TARGET_NR_pselect6:
9546         return do_pselect6(arg1, arg2, arg3, arg4, arg5, arg6, false);
9547 #endif
9548 #ifdef TARGET_NR_pselect6_time64
9549     case TARGET_NR_pselect6_time64:
9550         return do_pselect6(arg1, arg2, arg3, arg4, arg5, arg6, true);
9551 #endif
9552 #ifdef TARGET_NR_symlink
9553     case TARGET_NR_symlink:
9554         {
9555             void *p2;
9556             p = lock_user_string(arg1);
9557             p2 = lock_user_string(arg2);
9558             if (!p || !p2)
9559                 ret = -TARGET_EFAULT;
9560             else
9561                 ret = get_errno(symlink(p, p2));
9562             unlock_user(p2, arg2, 0);
9563             unlock_user(p, arg1, 0);
9564         }
9565         return ret;
9566 #endif
9567 #if defined(TARGET_NR_symlinkat)
9568     case TARGET_NR_symlinkat:
9569         {
9570             void *p2;
9571             p  = lock_user_string(arg1);
9572             p2 = lock_user_string(arg3);
9573             if (!p || !p2)
9574                 ret = -TARGET_EFAULT;
9575             else
9576                 ret = get_errno(symlinkat(p, arg2, p2));
9577             unlock_user(p2, arg3, 0);
9578             unlock_user(p, arg1, 0);
9579         }
9580         return ret;
9581 #endif
9582 #ifdef TARGET_NR_readlink
9583     case TARGET_NR_readlink:
9584         {
9585             void *p2;
9586             p = lock_user_string(arg1);
9587             p2 = lock_user(VERIFY_WRITE, arg2, arg3, 0);
9588             if (!p || !p2) {
9589                 ret = -TARGET_EFAULT;
9590             } else if (!arg3) {
9591                 /* Short circuit this for the magic exe check. */
9592                 ret = -TARGET_EINVAL;
9593             } else if (is_proc_myself((const char *)p, "exe")) {
9594                 char real[PATH_MAX], *temp;
9595                 temp = realpath(exec_path, real);
9596                 /* Return value is # of bytes that we wrote to the buffer. */
9597                 if (temp == NULL) {
9598                     ret = get_errno(-1);
9599                 } else {
9600                     /* Don't worry about sign mismatch as earlier mapping
9601                      * logic would have thrown a bad address error. */
9602                     ret = MIN(strlen(real), arg3);
9603                     /* We cannot NUL terminate the string. */
9604                     memcpy(p2, real, ret);
9605                 }
9606             } else {
9607                 ret = get_errno(readlink(path(p), p2, arg3));
9608             }
9609             unlock_user(p2, arg2, ret);
9610             unlock_user(p, arg1, 0);
9611         }
9612         return ret;
9613 #endif
9614 #if defined(TARGET_NR_readlinkat)
9615     case TARGET_NR_readlinkat:
9616         {
9617             void *p2;
9618             p  = lock_user_string(arg2);
9619             p2 = lock_user(VERIFY_WRITE, arg3, arg4, 0);
9620             if (!p || !p2) {
9621                 ret = -TARGET_EFAULT;
9622             } else if (is_proc_myself((const char *)p, "exe")) {
9623                 char real[PATH_MAX], *temp;
9624                 temp = realpath(exec_path, real);
9625                 ret = temp == NULL ? get_errno(-1) : strlen(real) ;
9626                 snprintf((char *)p2, arg4, "%s", real);
9627             } else {
9628                 ret = get_errno(readlinkat(arg1, path(p), p2, arg4));
9629             }
9630             unlock_user(p2, arg3, ret);
9631             unlock_user(p, arg2, 0);
9632         }
9633         return ret;
9634 #endif
9635 #ifdef TARGET_NR_swapon
9636     case TARGET_NR_swapon:
9637         if (!(p = lock_user_string(arg1)))
9638             return -TARGET_EFAULT;
9639         ret = get_errno(swapon(p, arg2));
9640         unlock_user(p, arg1, 0);
9641         return ret;
9642 #endif
9643     case TARGET_NR_reboot:
9644         if (arg3 == LINUX_REBOOT_CMD_RESTART2) {
9645            /* arg4 must be ignored in all other cases */
9646            p = lock_user_string(arg4);
9647            if (!p) {
9648                return -TARGET_EFAULT;
9649            }
9650            ret = get_errno(reboot(arg1, arg2, arg3, p));
9651            unlock_user(p, arg4, 0);
9652         } else {
9653            ret = get_errno(reboot(arg1, arg2, arg3, NULL));
9654         }
9655         return ret;
9656 #ifdef TARGET_NR_mmap
9657     case TARGET_NR_mmap:
9658 #if (defined(TARGET_I386) && defined(TARGET_ABI32)) || \
9659     (defined(TARGET_ARM) && defined(TARGET_ABI32)) || \
9660     defined(TARGET_M68K) || defined(TARGET_CRIS) || defined(TARGET_MICROBLAZE) \
9661     || defined(TARGET_S390X)
9662         {
9663             abi_ulong *v;
9664             abi_ulong v1, v2, v3, v4, v5, v6;
9665             if (!(v = lock_user(VERIFY_READ, arg1, 6 * sizeof(abi_ulong), 1)))
9666                 return -TARGET_EFAULT;
9667             v1 = tswapal(v[0]);
9668             v2 = tswapal(v[1]);
9669             v3 = tswapal(v[2]);
9670             v4 = tswapal(v[3]);
9671             v5 = tswapal(v[4]);
9672             v6 = tswapal(v[5]);
9673             unlock_user(v, arg1, 0);
9674             ret = get_errno(target_mmap(v1, v2, v3,
9675                                         target_to_host_bitmask(v4, mmap_flags_tbl),
9676                                         v5, v6));
9677         }
9678 #else
9679         /* mmap pointers are always untagged */
9680         ret = get_errno(target_mmap(arg1, arg2, arg3,
9681                                     target_to_host_bitmask(arg4, mmap_flags_tbl),
9682                                     arg5,
9683                                     arg6));
9684 #endif
9685         return ret;
9686 #endif
9687 #ifdef TARGET_NR_mmap2
9688     case TARGET_NR_mmap2:
9689 #ifndef MMAP_SHIFT
9690 #define MMAP_SHIFT 12
9691 #endif
9692         ret = target_mmap(arg1, arg2, arg3,
9693                           target_to_host_bitmask(arg4, mmap_flags_tbl),
9694                           arg5, arg6 << MMAP_SHIFT);
9695         return get_errno(ret);
9696 #endif
9697     case TARGET_NR_munmap:
9698         arg1 = cpu_untagged_addr(cpu, arg1);
9699         return get_errno(target_munmap(arg1, arg2));
9700     case TARGET_NR_mprotect:
9701         arg1 = cpu_untagged_addr(cpu, arg1);
9702         {
9703             TaskState *ts = cpu->opaque;
9704             /* Special hack to detect libc making the stack executable.  */
9705             if ((arg3 & PROT_GROWSDOWN)
9706                 && arg1 >= ts->info->stack_limit
9707                 && arg1 <= ts->info->start_stack) {
9708                 arg3 &= ~PROT_GROWSDOWN;
9709                 arg2 = arg2 + arg1 - ts->info->stack_limit;
9710                 arg1 = ts->info->stack_limit;
9711             }
9712         }
9713         return get_errno(target_mprotect(arg1, arg2, arg3));
9714 #ifdef TARGET_NR_mremap
9715     case TARGET_NR_mremap:
9716         arg1 = cpu_untagged_addr(cpu, arg1);
9717         /* mremap new_addr (arg5) is always untagged */
9718         return get_errno(target_mremap(arg1, arg2, arg3, arg4, arg5));
9719 #endif
9720         /* ??? msync/mlock/munlock are broken for softmmu.  */
9721 #ifdef TARGET_NR_msync
9722     case TARGET_NR_msync:
9723         return get_errno(msync(g2h(cpu, arg1), arg2, arg3));
9724 #endif
9725 #ifdef TARGET_NR_mlock
9726     case TARGET_NR_mlock:
9727         return get_errno(mlock(g2h(cpu, arg1), arg2));
9728 #endif
9729 #ifdef TARGET_NR_munlock
9730     case TARGET_NR_munlock:
9731         return get_errno(munlock(g2h(cpu, arg1), arg2));
9732 #endif
9733 #ifdef TARGET_NR_mlockall
9734     case TARGET_NR_mlockall:
9735         return get_errno(mlockall(target_to_host_mlockall_arg(arg1)));
9736 #endif
9737 #ifdef TARGET_NR_munlockall
9738     case TARGET_NR_munlockall:
9739         return get_errno(munlockall());
9740 #endif
9741 #ifdef TARGET_NR_truncate
9742     case TARGET_NR_truncate:
9743         if (!(p = lock_user_string(arg1)))
9744             return -TARGET_EFAULT;
9745         ret = get_errno(truncate(p, arg2));
9746         unlock_user(p, arg1, 0);
9747         return ret;
9748 #endif
9749 #ifdef TARGET_NR_ftruncate
9750     case TARGET_NR_ftruncate:
9751         return get_errno(ftruncate(arg1, arg2));
9752 #endif
9753     case TARGET_NR_fchmod:
9754         return get_errno(fchmod(arg1, arg2));
9755 #if defined(TARGET_NR_fchmodat)
9756     case TARGET_NR_fchmodat:
9757         if (!(p = lock_user_string(arg2)))
9758             return -TARGET_EFAULT;
9759         ret = get_errno(fchmodat(arg1, p, arg3, 0));
9760         unlock_user(p, arg2, 0);
9761         return ret;
9762 #endif
9763     case TARGET_NR_getpriority:
9764         /* Note that negative values are valid for getpriority, so we must
9765            differentiate based on errno settings.  */
9766         errno = 0;
9767         ret = getpriority(arg1, arg2);
9768         if (ret == -1 && errno != 0) {
9769             return -host_to_target_errno(errno);
9770         }
9771 #ifdef TARGET_ALPHA
9772         /* Return value is the unbiased priority.  Signal no error.  */
9773         ((CPUAlphaState *)cpu_env)->ir[IR_V0] = 0;
9774 #else
9775         /* Return value is a biased priority to avoid negative numbers.  */
9776         ret = 20 - ret;
9777 #endif
9778         return ret;
9779     case TARGET_NR_setpriority:
9780         return get_errno(setpriority(arg1, arg2, arg3));
9781 #ifdef TARGET_NR_statfs
9782     case TARGET_NR_statfs:
9783         if (!(p = lock_user_string(arg1))) {
9784             return -TARGET_EFAULT;
9785         }
9786         ret = get_errno(statfs(path(p), &stfs));
9787         unlock_user(p, arg1, 0);
9788     convert_statfs:
9789         if (!is_error(ret)) {
9790             struct target_statfs *target_stfs;
9791 
9792             if (!lock_user_struct(VERIFY_WRITE, target_stfs, arg2, 0))
9793                 return -TARGET_EFAULT;
9794             __put_user(stfs.f_type, &target_stfs->f_type);
9795             __put_user(stfs.f_bsize, &target_stfs->f_bsize);
9796             __put_user(stfs.f_blocks, &target_stfs->f_blocks);
9797             __put_user(stfs.f_bfree, &target_stfs->f_bfree);
9798             __put_user(stfs.f_bavail, &target_stfs->f_bavail);
9799             __put_user(stfs.f_files, &target_stfs->f_files);
9800             __put_user(stfs.f_ffree, &target_stfs->f_ffree);
9801             __put_user(stfs.f_fsid.__val[0], &target_stfs->f_fsid.val[0]);
9802             __put_user(stfs.f_fsid.__val[1], &target_stfs->f_fsid.val[1]);
9803             __put_user(stfs.f_namelen, &target_stfs->f_namelen);
9804             __put_user(stfs.f_frsize, &target_stfs->f_frsize);
9805 #ifdef _STATFS_F_FLAGS
9806             __put_user(stfs.f_flags, &target_stfs->f_flags);
9807 #else
9808             __put_user(0, &target_stfs->f_flags);
9809 #endif
9810             memset(target_stfs->f_spare, 0, sizeof(target_stfs->f_spare));
9811             unlock_user_struct(target_stfs, arg2, 1);
9812         }
9813         return ret;
9814 #endif
9815 #ifdef TARGET_NR_fstatfs
9816     case TARGET_NR_fstatfs:
9817         ret = get_errno(fstatfs(arg1, &stfs));
9818         goto convert_statfs;
9819 #endif
9820 #ifdef TARGET_NR_statfs64
9821     case TARGET_NR_statfs64:
9822         if (!(p = lock_user_string(arg1))) {
9823             return -TARGET_EFAULT;
9824         }
9825         ret = get_errno(statfs(path(p), &stfs));
9826         unlock_user(p, arg1, 0);
9827     convert_statfs64:
9828         if (!is_error(ret)) {
9829             struct target_statfs64 *target_stfs;
9830 
9831             if (!lock_user_struct(VERIFY_WRITE, target_stfs, arg3, 0))
9832                 return -TARGET_EFAULT;
9833             __put_user(stfs.f_type, &target_stfs->f_type);
9834             __put_user(stfs.f_bsize, &target_stfs->f_bsize);
9835             __put_user(stfs.f_blocks, &target_stfs->f_blocks);
9836             __put_user(stfs.f_bfree, &target_stfs->f_bfree);
9837             __put_user(stfs.f_bavail, &target_stfs->f_bavail);
9838             __put_user(stfs.f_files, &target_stfs->f_files);
9839             __put_user(stfs.f_ffree, &target_stfs->f_ffree);
9840             __put_user(stfs.f_fsid.__val[0], &target_stfs->f_fsid.val[0]);
9841             __put_user(stfs.f_fsid.__val[1], &target_stfs->f_fsid.val[1]);
9842             __put_user(stfs.f_namelen, &target_stfs->f_namelen);
9843             __put_user(stfs.f_frsize, &target_stfs->f_frsize);
9844 #ifdef _STATFS_F_FLAGS
9845             __put_user(stfs.f_flags, &target_stfs->f_flags);
9846 #else
9847             __put_user(0, &target_stfs->f_flags);
9848 #endif
9849             memset(target_stfs->f_spare, 0, sizeof(target_stfs->f_spare));
9850             unlock_user_struct(target_stfs, arg3, 1);
9851         }
9852         return ret;
9853     case TARGET_NR_fstatfs64:
9854         ret = get_errno(fstatfs(arg1, &stfs));
9855         goto convert_statfs64;
9856 #endif
9857 #ifdef TARGET_NR_socketcall
9858     case TARGET_NR_socketcall:
9859         return do_socketcall(arg1, arg2);
9860 #endif
9861 #ifdef TARGET_NR_accept
9862     case TARGET_NR_accept:
9863         return do_accept4(arg1, arg2, arg3, 0);
9864 #endif
9865 #ifdef TARGET_NR_accept4
9866     case TARGET_NR_accept4:
9867         return do_accept4(arg1, arg2, arg3, arg4);
9868 #endif
9869 #ifdef TARGET_NR_bind
9870     case TARGET_NR_bind:
9871         return do_bind(arg1, arg2, arg3);
9872 #endif
9873 #ifdef TARGET_NR_connect
9874     case TARGET_NR_connect:
9875         return do_connect(arg1, arg2, arg3);
9876 #endif
9877 #ifdef TARGET_NR_getpeername
9878     case TARGET_NR_getpeername:
9879         return do_getpeername(arg1, arg2, arg3);
9880 #endif
9881 #ifdef TARGET_NR_getsockname
9882     case TARGET_NR_getsockname:
9883         return do_getsockname(arg1, arg2, arg3);
9884 #endif
9885 #ifdef TARGET_NR_getsockopt
9886     case TARGET_NR_getsockopt:
9887         return do_getsockopt(arg1, arg2, arg3, arg4, arg5);
9888 #endif
9889 #ifdef TARGET_NR_listen
9890     case TARGET_NR_listen:
9891         return get_errno(listen(arg1, arg2));
9892 #endif
9893 #ifdef TARGET_NR_recv
9894     case TARGET_NR_recv:
9895         return do_recvfrom(arg1, arg2, arg3, arg4, 0, 0);
9896 #endif
9897 #ifdef TARGET_NR_recvfrom
9898     case TARGET_NR_recvfrom:
9899         return do_recvfrom(arg1, arg2, arg3, arg4, arg5, arg6);
9900 #endif
9901 #ifdef TARGET_NR_recvmsg
9902     case TARGET_NR_recvmsg:
9903         return do_sendrecvmsg(arg1, arg2, arg3, 0);
9904 #endif
9905 #ifdef TARGET_NR_send
9906     case TARGET_NR_send:
9907         return do_sendto(arg1, arg2, arg3, arg4, 0, 0);
9908 #endif
9909 #ifdef TARGET_NR_sendmsg
9910     case TARGET_NR_sendmsg:
9911         return do_sendrecvmsg(arg1, arg2, arg3, 1);
9912 #endif
9913 #ifdef TARGET_NR_sendmmsg
9914     case TARGET_NR_sendmmsg:
9915         return do_sendrecvmmsg(arg1, arg2, arg3, arg4, 1);
9916 #endif
9917 #ifdef TARGET_NR_recvmmsg
9918     case TARGET_NR_recvmmsg:
9919         return do_sendrecvmmsg(arg1, arg2, arg3, arg4, 0);
9920 #endif
9921 #ifdef TARGET_NR_sendto
9922     case TARGET_NR_sendto:
9923         return do_sendto(arg1, arg2, arg3, arg4, arg5, arg6);
9924 #endif
9925 #ifdef TARGET_NR_shutdown
9926     case TARGET_NR_shutdown:
9927         return get_errno(shutdown(arg1, arg2));
9928 #endif
9929 #if defined(TARGET_NR_getrandom) && defined(__NR_getrandom)
9930     case TARGET_NR_getrandom:
9931         p = lock_user(VERIFY_WRITE, arg1, arg2, 0);
9932         if (!p) {
9933             return -TARGET_EFAULT;
9934         }
9935         ret = get_errno(getrandom(p, arg2, arg3));
9936         unlock_user(p, arg1, ret);
9937         return ret;
9938 #endif
9939 #ifdef TARGET_NR_socket
9940     case TARGET_NR_socket:
9941         return do_socket(arg1, arg2, arg3);
9942 #endif
9943 #ifdef TARGET_NR_socketpair
9944     case TARGET_NR_socketpair:
9945         return do_socketpair(arg1, arg2, arg3, arg4);
9946 #endif
9947 #ifdef TARGET_NR_setsockopt
9948     case TARGET_NR_setsockopt:
9949         return do_setsockopt(arg1, arg2, arg3, arg4, (socklen_t) arg5);
9950 #endif
9951 #if defined(TARGET_NR_syslog)
9952     case TARGET_NR_syslog:
9953         {
9954             int len = arg2;
9955 
9956             switch (arg1) {
9957             case TARGET_SYSLOG_ACTION_CLOSE:         /* Close log */
9958             case TARGET_SYSLOG_ACTION_OPEN:          /* Open log */
9959             case TARGET_SYSLOG_ACTION_CLEAR:         /* Clear ring buffer */
9960             case TARGET_SYSLOG_ACTION_CONSOLE_OFF:   /* Disable logging */
9961             case TARGET_SYSLOG_ACTION_CONSOLE_ON:    /* Enable logging */
9962             case TARGET_SYSLOG_ACTION_CONSOLE_LEVEL: /* Set messages level */
9963             case TARGET_SYSLOG_ACTION_SIZE_UNREAD:   /* Number of chars */
9964             case TARGET_SYSLOG_ACTION_SIZE_BUFFER:   /* Size of the buffer */
9965                 return get_errno(sys_syslog((int)arg1, NULL, (int)arg3));
9966             case TARGET_SYSLOG_ACTION_READ:          /* Read from log */
9967             case TARGET_SYSLOG_ACTION_READ_CLEAR:    /* Read/clear msgs */
9968             case TARGET_SYSLOG_ACTION_READ_ALL:      /* Read last messages */
9969                 {
9970                     if (len < 0) {
9971                         return -TARGET_EINVAL;
9972                     }
9973                     if (len == 0) {
9974                         return 0;
9975                     }
9976                     p = lock_user(VERIFY_WRITE, arg2, arg3, 0);
9977                     if (!p) {
9978                         return -TARGET_EFAULT;
9979                     }
9980                     ret = get_errno(sys_syslog((int)arg1, p, (int)arg3));
9981                     unlock_user(p, arg2, arg3);
9982                 }
9983                 return ret;
9984             default:
9985                 return -TARGET_EINVAL;
9986             }
9987         }
9988         break;
9989 #endif
9990     case TARGET_NR_setitimer:
9991         {
9992             struct itimerval value, ovalue, *pvalue;
9993 
9994             if (arg2) {
9995                 pvalue = &value;
9996                 if (copy_from_user_timeval(&pvalue->it_interval, arg2)
9997                     || copy_from_user_timeval(&pvalue->it_value,
9998                                               arg2 + sizeof(struct target_timeval)))
9999                     return -TARGET_EFAULT;
10000             } else {
10001                 pvalue = NULL;
10002             }
10003             ret = get_errno(setitimer(arg1, pvalue, &ovalue));
10004             if (!is_error(ret) && arg3) {
10005                 if (copy_to_user_timeval(arg3,
10006                                          &ovalue.it_interval)
10007                     || copy_to_user_timeval(arg3 + sizeof(struct target_timeval),
10008                                             &ovalue.it_value))
10009                     return -TARGET_EFAULT;
10010             }
10011         }
10012         return ret;
10013     case TARGET_NR_getitimer:
10014         {
10015             struct itimerval value;
10016 
10017             ret = get_errno(getitimer(arg1, &value));
10018             if (!is_error(ret) && arg2) {
10019                 if (copy_to_user_timeval(arg2,
10020                                          &value.it_interval)
10021                     || copy_to_user_timeval(arg2 + sizeof(struct target_timeval),
10022                                             &value.it_value))
10023                     return -TARGET_EFAULT;
10024             }
10025         }
10026         return ret;
10027 #ifdef TARGET_NR_stat
10028     case TARGET_NR_stat:
10029         if (!(p = lock_user_string(arg1))) {
10030             return -TARGET_EFAULT;
10031         }
10032         ret = get_errno(stat(path(p), &st));
10033         unlock_user(p, arg1, 0);
10034         goto do_stat;
10035 #endif
10036 #ifdef TARGET_NR_lstat
10037     case TARGET_NR_lstat:
10038         if (!(p = lock_user_string(arg1))) {
10039             return -TARGET_EFAULT;
10040         }
10041         ret = get_errno(lstat(path(p), &st));
10042         unlock_user(p, arg1, 0);
10043         goto do_stat;
10044 #endif
10045 #ifdef TARGET_NR_fstat
10046     case TARGET_NR_fstat:
10047         {
10048             ret = get_errno(fstat(arg1, &st));
10049 #if defined(TARGET_NR_stat) || defined(TARGET_NR_lstat)
10050         do_stat:
10051 #endif
10052             if (!is_error(ret)) {
10053                 struct target_stat *target_st;
10054 
10055                 if (!lock_user_struct(VERIFY_WRITE, target_st, arg2, 0))
10056                     return -TARGET_EFAULT;
10057                 memset(target_st, 0, sizeof(*target_st));
10058                 __put_user(st.st_dev, &target_st->st_dev);
10059                 __put_user(st.st_ino, &target_st->st_ino);
10060                 __put_user(st.st_mode, &target_st->st_mode);
10061                 __put_user(st.st_uid, &target_st->st_uid);
10062                 __put_user(st.st_gid, &target_st->st_gid);
10063                 __put_user(st.st_nlink, &target_st->st_nlink);
10064                 __put_user(st.st_rdev, &target_st->st_rdev);
10065                 __put_user(st.st_size, &target_st->st_size);
10066                 __put_user(st.st_blksize, &target_st->st_blksize);
10067                 __put_user(st.st_blocks, &target_st->st_blocks);
10068                 __put_user(st.st_atime, &target_st->target_st_atime);
10069                 __put_user(st.st_mtime, &target_st->target_st_mtime);
10070                 __put_user(st.st_ctime, &target_st->target_st_ctime);
10071 #if (_POSIX_C_SOURCE >= 200809L || _XOPEN_SOURCE >= 700) && \
10072     defined(TARGET_STAT_HAVE_NSEC)
10073                 __put_user(st.st_atim.tv_nsec,
10074                            &target_st->target_st_atime_nsec);
10075                 __put_user(st.st_mtim.tv_nsec,
10076                            &target_st->target_st_mtime_nsec);
10077                 __put_user(st.st_ctim.tv_nsec,
10078                            &target_st->target_st_ctime_nsec);
10079 #endif
10080                 unlock_user_struct(target_st, arg2, 1);
10081             }
10082         }
10083         return ret;
10084 #endif
10085     case TARGET_NR_vhangup:
10086         return get_errno(vhangup());
10087 #ifdef TARGET_NR_syscall
10088     case TARGET_NR_syscall:
10089         return do_syscall(cpu_env, arg1 & 0xffff, arg2, arg3, arg4, arg5,
10090                           arg6, arg7, arg8, 0);
10091 #endif
10092 #if defined(TARGET_NR_wait4)
10093     case TARGET_NR_wait4:
10094         {
10095             int status;
10096             abi_long status_ptr = arg2;
10097             struct rusage rusage, *rusage_ptr;
10098             abi_ulong target_rusage = arg4;
10099             abi_long rusage_err;
10100             if (target_rusage)
10101                 rusage_ptr = &rusage;
10102             else
10103                 rusage_ptr = NULL;
10104             ret = get_errno(safe_wait4(arg1, &status, arg3, rusage_ptr));
10105             if (!is_error(ret)) {
10106                 if (status_ptr && ret) {
10107                     status = host_to_target_waitstatus(status);
10108                     if (put_user_s32(status, status_ptr))
10109                         return -TARGET_EFAULT;
10110                 }
10111                 if (target_rusage) {
10112                     rusage_err = host_to_target_rusage(target_rusage, &rusage);
10113                     if (rusage_err) {
10114                         ret = rusage_err;
10115                     }
10116                 }
10117             }
10118         }
10119         return ret;
10120 #endif
10121 #ifdef TARGET_NR_swapoff
10122     case TARGET_NR_swapoff:
10123         if (!(p = lock_user_string(arg1)))
10124             return -TARGET_EFAULT;
10125         ret = get_errno(swapoff(p));
10126         unlock_user(p, arg1, 0);
10127         return ret;
10128 #endif
10129     case TARGET_NR_sysinfo:
10130         {
10131             struct target_sysinfo *target_value;
10132             struct sysinfo value;
10133             ret = get_errno(sysinfo(&value));
10134             if (!is_error(ret) && arg1)
10135             {
10136                 if (!lock_user_struct(VERIFY_WRITE, target_value, arg1, 0))
10137                     return -TARGET_EFAULT;
10138                 __put_user(value.uptime, &target_value->uptime);
10139                 __put_user(value.loads[0], &target_value->loads[0]);
10140                 __put_user(value.loads[1], &target_value->loads[1]);
10141                 __put_user(value.loads[2], &target_value->loads[2]);
10142                 __put_user(value.totalram, &target_value->totalram);
10143                 __put_user(value.freeram, &target_value->freeram);
10144                 __put_user(value.sharedram, &target_value->sharedram);
10145                 __put_user(value.bufferram, &target_value->bufferram);
10146                 __put_user(value.totalswap, &target_value->totalswap);
10147                 __put_user(value.freeswap, &target_value->freeswap);
10148                 __put_user(value.procs, &target_value->procs);
10149                 __put_user(value.totalhigh, &target_value->totalhigh);
10150                 __put_user(value.freehigh, &target_value->freehigh);
10151                 __put_user(value.mem_unit, &target_value->mem_unit);
10152                 unlock_user_struct(target_value, arg1, 1);
10153             }
10154         }
10155         return ret;
10156 #ifdef TARGET_NR_ipc
10157     case TARGET_NR_ipc:
10158         return do_ipc(cpu_env, arg1, arg2, arg3, arg4, arg5, arg6);
10159 #endif
10160 #ifdef TARGET_NR_semget
10161     case TARGET_NR_semget:
10162         return get_errno(semget(arg1, arg2, arg3));
10163 #endif
10164 #ifdef TARGET_NR_semop
10165     case TARGET_NR_semop:
10166         return do_semtimedop(arg1, arg2, arg3, 0, false);
10167 #endif
10168 #ifdef TARGET_NR_semtimedop
10169     case TARGET_NR_semtimedop:
10170         return do_semtimedop(arg1, arg2, arg3, arg4, false);
10171 #endif
10172 #ifdef TARGET_NR_semtimedop_time64
10173     case TARGET_NR_semtimedop_time64:
10174         return do_semtimedop(arg1, arg2, arg3, arg4, true);
10175 #endif
10176 #ifdef TARGET_NR_semctl
10177     case TARGET_NR_semctl:
10178         return do_semctl(arg1, arg2, arg3, arg4);
10179 #endif
10180 #ifdef TARGET_NR_msgctl
10181     case TARGET_NR_msgctl:
10182         return do_msgctl(arg1, arg2, arg3);
10183 #endif
10184 #ifdef TARGET_NR_msgget
10185     case TARGET_NR_msgget:
10186         return get_errno(msgget(arg1, arg2));
10187 #endif
10188 #ifdef TARGET_NR_msgrcv
10189     case TARGET_NR_msgrcv:
10190         return do_msgrcv(arg1, arg2, arg3, arg4, arg5);
10191 #endif
10192 #ifdef TARGET_NR_msgsnd
10193     case TARGET_NR_msgsnd:
10194         return do_msgsnd(arg1, arg2, arg3, arg4);
10195 #endif
10196 #ifdef TARGET_NR_shmget
10197     case TARGET_NR_shmget:
10198         return get_errno(shmget(arg1, arg2, arg3));
10199 #endif
10200 #ifdef TARGET_NR_shmctl
10201     case TARGET_NR_shmctl:
10202         return do_shmctl(arg1, arg2, arg3);
10203 #endif
10204 #ifdef TARGET_NR_shmat
10205     case TARGET_NR_shmat:
10206         return do_shmat(cpu_env, arg1, arg2, arg3);
10207 #endif
10208 #ifdef TARGET_NR_shmdt
10209     case TARGET_NR_shmdt:
10210         return do_shmdt(arg1);
10211 #endif
10212     case TARGET_NR_fsync:
10213         return get_errno(fsync(arg1));
10214     case TARGET_NR_clone:
10215         /* Linux manages to have three different orderings for its
10216          * arguments to clone(); the BACKWARDS and BACKWARDS2 defines
10217          * match the kernel's CONFIG_CLONE_* settings.
10218          * Microblaze is further special in that it uses a sixth
10219          * implicit argument to clone for the TLS pointer.
10220          */
10221 #if defined(TARGET_MICROBLAZE)
10222         ret = get_errno(do_fork(cpu_env, arg1, arg2, arg4, arg6, arg5));
10223 #elif defined(TARGET_CLONE_BACKWARDS)
10224         ret = get_errno(do_fork(cpu_env, arg1, arg2, arg3, arg4, arg5));
10225 #elif defined(TARGET_CLONE_BACKWARDS2)
10226         ret = get_errno(do_fork(cpu_env, arg2, arg1, arg3, arg5, arg4));
10227 #else
10228         ret = get_errno(do_fork(cpu_env, arg1, arg2, arg3, arg5, arg4));
10229 #endif
10230         return ret;
10231 #ifdef __NR_exit_group
10232         /* new thread calls */
10233     case TARGET_NR_exit_group:
10234         preexit_cleanup(cpu_env, arg1);
10235         return get_errno(exit_group(arg1));
10236 #endif
10237     case TARGET_NR_setdomainname:
10238         if (!(p = lock_user_string(arg1)))
10239             return -TARGET_EFAULT;
10240         ret = get_errno(setdomainname(p, arg2));
10241         unlock_user(p, arg1, 0);
10242         return ret;
10243     case TARGET_NR_uname:
10244         /* no need to transcode because we use the linux syscall */
10245         {
10246             struct new_utsname * buf;
10247 
10248             if (!lock_user_struct(VERIFY_WRITE, buf, arg1, 0))
10249                 return -TARGET_EFAULT;
10250             ret = get_errno(sys_uname(buf));
10251             if (!is_error(ret)) {
10252                 /* Overwrite the native machine name with whatever is being
10253                    emulated. */
10254                 g_strlcpy(buf->machine, cpu_to_uname_machine(cpu_env),
10255                           sizeof(buf->machine));
10256                 /* Allow the user to override the reported release.  */
10257                 if (qemu_uname_release && *qemu_uname_release) {
10258                     g_strlcpy(buf->release, qemu_uname_release,
10259                               sizeof(buf->release));
10260                 }
10261             }
10262             unlock_user_struct(buf, arg1, 1);
10263         }
10264         return ret;
10265 #ifdef TARGET_I386
10266     case TARGET_NR_modify_ldt:
10267         return do_modify_ldt(cpu_env, arg1, arg2, arg3);
10268 #if !defined(TARGET_X86_64)
10269     case TARGET_NR_vm86:
10270         return do_vm86(cpu_env, arg1, arg2);
10271 #endif
10272 #endif
10273 #if defined(TARGET_NR_adjtimex)
10274     case TARGET_NR_adjtimex:
10275         {
10276             struct timex host_buf;
10277 
10278             if (target_to_host_timex(&host_buf, arg1) != 0) {
10279                 return -TARGET_EFAULT;
10280             }
10281             ret = get_errno(adjtimex(&host_buf));
10282             if (!is_error(ret)) {
10283                 if (host_to_target_timex(arg1, &host_buf) != 0) {
10284                     return -TARGET_EFAULT;
10285                 }
10286             }
10287         }
10288         return ret;
10289 #endif
10290 #if defined(TARGET_NR_clock_adjtime) && defined(CONFIG_CLOCK_ADJTIME)
10291     case TARGET_NR_clock_adjtime:
10292         {
10293             struct timex htx, *phtx = &htx;
10294 
10295             if (target_to_host_timex(phtx, arg2) != 0) {
10296                 return -TARGET_EFAULT;
10297             }
10298             ret = get_errno(clock_adjtime(arg1, phtx));
10299             if (!is_error(ret) && phtx) {
10300                 if (host_to_target_timex(arg2, phtx) != 0) {
10301                     return -TARGET_EFAULT;
10302                 }
10303             }
10304         }
10305         return ret;
10306 #endif
10307 #if defined(TARGET_NR_clock_adjtime64) && defined(CONFIG_CLOCK_ADJTIME)
10308     case TARGET_NR_clock_adjtime64:
10309         {
10310             struct timex htx;
10311 
10312             if (target_to_host_timex64(&htx, arg2) != 0) {
10313                 return -TARGET_EFAULT;
10314             }
10315             ret = get_errno(clock_adjtime(arg1, &htx));
10316             if (!is_error(ret) && host_to_target_timex64(arg2, &htx)) {
10317                     return -TARGET_EFAULT;
10318             }
10319         }
10320         return ret;
10321 #endif
10322     case TARGET_NR_getpgid:
10323         return get_errno(getpgid(arg1));
10324     case TARGET_NR_fchdir:
10325         return get_errno(fchdir(arg1));
10326     case TARGET_NR_personality:
10327         return get_errno(personality(arg1));
10328 #ifdef TARGET_NR__llseek /* Not on alpha */
10329     case TARGET_NR__llseek:
10330         {
10331             int64_t res;
10332 #if !defined(__NR_llseek)
10333             res = lseek(arg1, ((uint64_t)arg2 << 32) | (abi_ulong)arg3, arg5);
10334             if (res == -1) {
10335                 ret = get_errno(res);
10336             } else {
10337                 ret = 0;
10338             }
10339 #else
10340             ret = get_errno(_llseek(arg1, arg2, arg3, &res, arg5));
10341 #endif
10342             if ((ret == 0) && put_user_s64(res, arg4)) {
10343                 return -TARGET_EFAULT;
10344             }
10345         }
10346         return ret;
10347 #endif
10348 #ifdef TARGET_NR_getdents
10349     case TARGET_NR_getdents:
10350 #ifdef EMULATE_GETDENTS_WITH_GETDENTS
10351 #if TARGET_ABI_BITS == 32 && HOST_LONG_BITS == 64
10352         {
10353             struct target_dirent *target_dirp;
10354             struct linux_dirent *dirp;
10355             abi_long count = arg3;
10356 
10357             dirp = g_try_malloc(count);
10358             if (!dirp) {
10359                 return -TARGET_ENOMEM;
10360             }
10361 
10362             ret = get_errno(sys_getdents(arg1, dirp, count));
10363             if (!is_error(ret)) {
10364                 struct linux_dirent *de;
10365 		struct target_dirent *tde;
10366                 int len = ret;
10367                 int reclen, treclen;
10368 		int count1, tnamelen;
10369 
10370 		count1 = 0;
10371                 de = dirp;
10372                 if (!(target_dirp = lock_user(VERIFY_WRITE, arg2, count, 0)))
10373                     return -TARGET_EFAULT;
10374 		tde = target_dirp;
10375                 while (len > 0) {
10376                     reclen = de->d_reclen;
10377                     tnamelen = reclen - offsetof(struct linux_dirent, d_name);
10378                     assert(tnamelen >= 0);
10379                     treclen = tnamelen + offsetof(struct target_dirent, d_name);
10380                     assert(count1 + treclen <= count);
10381                     tde->d_reclen = tswap16(treclen);
10382                     tde->d_ino = tswapal(de->d_ino);
10383                     tde->d_off = tswapal(de->d_off);
10384                     memcpy(tde->d_name, de->d_name, tnamelen);
10385                     de = (struct linux_dirent *)((char *)de + reclen);
10386                     len -= reclen;
10387                     tde = (struct target_dirent *)((char *)tde + treclen);
10388 		    count1 += treclen;
10389                 }
10390 		ret = count1;
10391                 unlock_user(target_dirp, arg2, ret);
10392             }
10393             g_free(dirp);
10394         }
10395 #else
10396         {
10397             struct linux_dirent *dirp;
10398             abi_long count = arg3;
10399 
10400             if (!(dirp = lock_user(VERIFY_WRITE, arg2, count, 0)))
10401                 return -TARGET_EFAULT;
10402             ret = get_errno(sys_getdents(arg1, dirp, count));
10403             if (!is_error(ret)) {
10404                 struct linux_dirent *de;
10405                 int len = ret;
10406                 int reclen;
10407                 de = dirp;
10408                 while (len > 0) {
10409                     reclen = de->d_reclen;
10410                     if (reclen > len)
10411                         break;
10412                     de->d_reclen = tswap16(reclen);
10413                     tswapls(&de->d_ino);
10414                     tswapls(&de->d_off);
10415                     de = (struct linux_dirent *)((char *)de + reclen);
10416                     len -= reclen;
10417                 }
10418             }
10419             unlock_user(dirp, arg2, ret);
10420         }
10421 #endif
10422 #else
10423         /* Implement getdents in terms of getdents64 */
10424         {
10425             struct linux_dirent64 *dirp;
10426             abi_long count = arg3;
10427 
10428             dirp = lock_user(VERIFY_WRITE, arg2, count, 0);
10429             if (!dirp) {
10430                 return -TARGET_EFAULT;
10431             }
10432             ret = get_errno(sys_getdents64(arg1, dirp, count));
10433             if (!is_error(ret)) {
10434                 /* Convert the dirent64 structs to target dirent.  We do this
10435                  * in-place, since we can guarantee that a target_dirent is no
10436                  * larger than a dirent64; however this means we have to be
10437                  * careful to read everything before writing in the new format.
10438                  */
10439                 struct linux_dirent64 *de;
10440                 struct target_dirent *tde;
10441                 int len = ret;
10442                 int tlen = 0;
10443 
10444                 de = dirp;
10445                 tde = (struct target_dirent *)dirp;
10446                 while (len > 0) {
10447                     int namelen, treclen;
10448                     int reclen = de->d_reclen;
10449                     uint64_t ino = de->d_ino;
10450                     int64_t off = de->d_off;
10451                     uint8_t type = de->d_type;
10452 
10453                     namelen = strlen(de->d_name);
10454                     treclen = offsetof(struct target_dirent, d_name)
10455                         + namelen + 2;
10456                     treclen = QEMU_ALIGN_UP(treclen, sizeof(abi_long));
10457 
10458                     memmove(tde->d_name, de->d_name, namelen + 1);
10459                     tde->d_ino = tswapal(ino);
10460                     tde->d_off = tswapal(off);
10461                     tde->d_reclen = tswap16(treclen);
10462                     /* The target_dirent type is in what was formerly a padding
10463                      * byte at the end of the structure:
10464                      */
10465                     *(((char *)tde) + treclen - 1) = type;
10466 
10467                     de = (struct linux_dirent64 *)((char *)de + reclen);
10468                     tde = (struct target_dirent *)((char *)tde + treclen);
10469                     len -= reclen;
10470                     tlen += treclen;
10471                 }
10472                 ret = tlen;
10473             }
10474             unlock_user(dirp, arg2, ret);
10475         }
10476 #endif
10477         return ret;
10478 #endif /* TARGET_NR_getdents */
10479 #if defined(TARGET_NR_getdents64) && defined(__NR_getdents64)
10480     case TARGET_NR_getdents64:
10481         {
10482             struct linux_dirent64 *dirp;
10483             abi_long count = arg3;
10484             if (!(dirp = lock_user(VERIFY_WRITE, arg2, count, 0)))
10485                 return -TARGET_EFAULT;
10486             ret = get_errno(sys_getdents64(arg1, dirp, count));
10487             if (!is_error(ret)) {
10488                 struct linux_dirent64 *de;
10489                 int len = ret;
10490                 int reclen;
10491                 de = dirp;
10492                 while (len > 0) {
10493                     reclen = de->d_reclen;
10494                     if (reclen > len)
10495                         break;
10496                     de->d_reclen = tswap16(reclen);
10497                     tswap64s((uint64_t *)&de->d_ino);
10498                     tswap64s((uint64_t *)&de->d_off);
10499                     de = (struct linux_dirent64 *)((char *)de + reclen);
10500                     len -= reclen;
10501                 }
10502             }
10503             unlock_user(dirp, arg2, ret);
10504         }
10505         return ret;
10506 #endif /* TARGET_NR_getdents64 */
10507 #if defined(TARGET_NR__newselect)
10508     case TARGET_NR__newselect:
10509         return do_select(arg1, arg2, arg3, arg4, arg5);
10510 #endif
10511 #ifdef TARGET_NR_poll
10512     case TARGET_NR_poll:
10513         return do_ppoll(arg1, arg2, arg3, arg4, arg5, false, false);
10514 #endif
10515 #ifdef TARGET_NR_ppoll
10516     case TARGET_NR_ppoll:
10517         return do_ppoll(arg1, arg2, arg3, arg4, arg5, true, false);
10518 #endif
10519 #ifdef TARGET_NR_ppoll_time64
10520     case TARGET_NR_ppoll_time64:
10521         return do_ppoll(arg1, arg2, arg3, arg4, arg5, true, true);
10522 #endif
10523     case TARGET_NR_flock:
10524         /* NOTE: the flock constant seems to be the same for every
10525            Linux platform */
10526         return get_errno(safe_flock(arg1, arg2));
10527     case TARGET_NR_readv:
10528         {
10529             struct iovec *vec = lock_iovec(VERIFY_WRITE, arg2, arg3, 0);
10530             if (vec != NULL) {
10531                 ret = get_errno(safe_readv(arg1, vec, arg3));
10532                 unlock_iovec(vec, arg2, arg3, 1);
10533             } else {
10534                 ret = -host_to_target_errno(errno);
10535             }
10536         }
10537         return ret;
10538     case TARGET_NR_writev:
10539         {
10540             struct iovec *vec = lock_iovec(VERIFY_READ, arg2, arg3, 1);
10541             if (vec != NULL) {
10542                 ret = get_errno(safe_writev(arg1, vec, arg3));
10543                 unlock_iovec(vec, arg2, arg3, 0);
10544             } else {
10545                 ret = -host_to_target_errno(errno);
10546             }
10547         }
10548         return ret;
10549 #if defined(TARGET_NR_preadv)
10550     case TARGET_NR_preadv:
10551         {
10552             struct iovec *vec = lock_iovec(VERIFY_WRITE, arg2, arg3, 0);
10553             if (vec != NULL) {
10554                 unsigned long low, high;
10555 
10556                 target_to_host_low_high(arg4, arg5, &low, &high);
10557                 ret = get_errno(safe_preadv(arg1, vec, arg3, low, high));
10558                 unlock_iovec(vec, arg2, arg3, 1);
10559             } else {
10560                 ret = -host_to_target_errno(errno);
10561            }
10562         }
10563         return ret;
10564 #endif
10565 #if defined(TARGET_NR_pwritev)
10566     case TARGET_NR_pwritev:
10567         {
10568             struct iovec *vec = lock_iovec(VERIFY_READ, arg2, arg3, 1);
10569             if (vec != NULL) {
10570                 unsigned long low, high;
10571 
10572                 target_to_host_low_high(arg4, arg5, &low, &high);
10573                 ret = get_errno(safe_pwritev(arg1, vec, arg3, low, high));
10574                 unlock_iovec(vec, arg2, arg3, 0);
10575             } else {
10576                 ret = -host_to_target_errno(errno);
10577            }
10578         }
10579         return ret;
10580 #endif
10581     case TARGET_NR_getsid:
10582         return get_errno(getsid(arg1));
10583 #if defined(TARGET_NR_fdatasync) /* Not on alpha (osf_datasync ?) */
10584     case TARGET_NR_fdatasync:
10585         return get_errno(fdatasync(arg1));
10586 #endif
10587     case TARGET_NR_sched_getaffinity:
10588         {
10589             unsigned int mask_size;
10590             unsigned long *mask;
10591 
10592             /*
10593              * sched_getaffinity needs multiples of ulong, so need to take
10594              * care of mismatches between target ulong and host ulong sizes.
10595              */
10596             if (arg2 & (sizeof(abi_ulong) - 1)) {
10597                 return -TARGET_EINVAL;
10598             }
10599             mask_size = (arg2 + (sizeof(*mask) - 1)) & ~(sizeof(*mask) - 1);
10600 
10601             mask = alloca(mask_size);
10602             memset(mask, 0, mask_size);
10603             ret = get_errno(sys_sched_getaffinity(arg1, mask_size, mask));
10604 
10605             if (!is_error(ret)) {
10606                 if (ret > arg2) {
10607                     /* More data returned than the caller's buffer will fit.
10608                      * This only happens if sizeof(abi_long) < sizeof(long)
10609                      * and the caller passed us a buffer holding an odd number
10610                      * of abi_longs. If the host kernel is actually using the
10611                      * extra 4 bytes then fail EINVAL; otherwise we can just
10612                      * ignore them and only copy the interesting part.
10613                      */
10614                     int numcpus = sysconf(_SC_NPROCESSORS_CONF);
10615                     if (numcpus > arg2 * 8) {
10616                         return -TARGET_EINVAL;
10617                     }
10618                     ret = arg2;
10619                 }
10620 
10621                 if (host_to_target_cpu_mask(mask, mask_size, arg3, ret)) {
10622                     return -TARGET_EFAULT;
10623                 }
10624             }
10625         }
10626         return ret;
10627     case TARGET_NR_sched_setaffinity:
10628         {
10629             unsigned int mask_size;
10630             unsigned long *mask;
10631 
10632             /*
10633              * sched_setaffinity needs multiples of ulong, so need to take
10634              * care of mismatches between target ulong and host ulong sizes.
10635              */
10636             if (arg2 & (sizeof(abi_ulong) - 1)) {
10637                 return -TARGET_EINVAL;
10638             }
10639             mask_size = (arg2 + (sizeof(*mask) - 1)) & ~(sizeof(*mask) - 1);
10640             mask = alloca(mask_size);
10641 
10642             ret = target_to_host_cpu_mask(mask, mask_size, arg3, arg2);
10643             if (ret) {
10644                 return ret;
10645             }
10646 
10647             return get_errno(sys_sched_setaffinity(arg1, mask_size, mask));
10648         }
10649     case TARGET_NR_getcpu:
10650         {
10651             unsigned cpu, node;
10652             ret = get_errno(sys_getcpu(arg1 ? &cpu : NULL,
10653                                        arg2 ? &node : NULL,
10654                                        NULL));
10655             if (is_error(ret)) {
10656                 return ret;
10657             }
10658             if (arg1 && put_user_u32(cpu, arg1)) {
10659                 return -TARGET_EFAULT;
10660             }
10661             if (arg2 && put_user_u32(node, arg2)) {
10662                 return -TARGET_EFAULT;
10663             }
10664         }
10665         return ret;
10666     case TARGET_NR_sched_setparam:
10667         {
10668             struct sched_param *target_schp;
10669             struct sched_param schp;
10670 
10671             if (arg2 == 0) {
10672                 return -TARGET_EINVAL;
10673             }
10674             if (!lock_user_struct(VERIFY_READ, target_schp, arg2, 1))
10675                 return -TARGET_EFAULT;
10676             schp.sched_priority = tswap32(target_schp->sched_priority);
10677             unlock_user_struct(target_schp, arg2, 0);
10678             return get_errno(sched_setparam(arg1, &schp));
10679         }
10680     case TARGET_NR_sched_getparam:
10681         {
10682             struct sched_param *target_schp;
10683             struct sched_param schp;
10684 
10685             if (arg2 == 0) {
10686                 return -TARGET_EINVAL;
10687             }
10688             ret = get_errno(sched_getparam(arg1, &schp));
10689             if (!is_error(ret)) {
10690                 if (!lock_user_struct(VERIFY_WRITE, target_schp, arg2, 0))
10691                     return -TARGET_EFAULT;
10692                 target_schp->sched_priority = tswap32(schp.sched_priority);
10693                 unlock_user_struct(target_schp, arg2, 1);
10694             }
10695         }
10696         return ret;
10697     case TARGET_NR_sched_setscheduler:
10698         {
10699             struct sched_param *target_schp;
10700             struct sched_param schp;
10701             if (arg3 == 0) {
10702                 return -TARGET_EINVAL;
10703             }
10704             if (!lock_user_struct(VERIFY_READ, target_schp, arg3, 1))
10705                 return -TARGET_EFAULT;
10706             schp.sched_priority = tswap32(target_schp->sched_priority);
10707             unlock_user_struct(target_schp, arg3, 0);
10708             return get_errno(sched_setscheduler(arg1, arg2, &schp));
10709         }
10710     case TARGET_NR_sched_getscheduler:
10711         return get_errno(sched_getscheduler(arg1));
10712     case TARGET_NR_sched_yield:
10713         return get_errno(sched_yield());
10714     case TARGET_NR_sched_get_priority_max:
10715         return get_errno(sched_get_priority_max(arg1));
10716     case TARGET_NR_sched_get_priority_min:
10717         return get_errno(sched_get_priority_min(arg1));
10718 #ifdef TARGET_NR_sched_rr_get_interval
10719     case TARGET_NR_sched_rr_get_interval:
10720         {
10721             struct timespec ts;
10722             ret = get_errno(sched_rr_get_interval(arg1, &ts));
10723             if (!is_error(ret)) {
10724                 ret = host_to_target_timespec(arg2, &ts);
10725             }
10726         }
10727         return ret;
10728 #endif
10729 #ifdef TARGET_NR_sched_rr_get_interval_time64
10730     case TARGET_NR_sched_rr_get_interval_time64:
10731         {
10732             struct timespec ts;
10733             ret = get_errno(sched_rr_get_interval(arg1, &ts));
10734             if (!is_error(ret)) {
10735                 ret = host_to_target_timespec64(arg2, &ts);
10736             }
10737         }
10738         return ret;
10739 #endif
10740 #if defined(TARGET_NR_nanosleep)
10741     case TARGET_NR_nanosleep:
10742         {
10743             struct timespec req, rem;
10744             target_to_host_timespec(&req, arg1);
10745             ret = get_errno(safe_nanosleep(&req, &rem));
10746             if (is_error(ret) && arg2) {
10747                 host_to_target_timespec(arg2, &rem);
10748             }
10749         }
10750         return ret;
10751 #endif
10752     case TARGET_NR_prctl:
10753         switch (arg1) {
10754         case PR_GET_PDEATHSIG:
10755         {
10756             int deathsig;
10757             ret = get_errno(prctl(arg1, &deathsig, arg3, arg4, arg5));
10758             if (!is_error(ret) && arg2
10759                 && put_user_s32(deathsig, arg2)) {
10760                 return -TARGET_EFAULT;
10761             }
10762             return ret;
10763         }
10764 #ifdef PR_GET_NAME
10765         case PR_GET_NAME:
10766         {
10767             void *name = lock_user(VERIFY_WRITE, arg2, 16, 1);
10768             if (!name) {
10769                 return -TARGET_EFAULT;
10770             }
10771             ret = get_errno(prctl(arg1, (unsigned long)name,
10772                                   arg3, arg4, arg5));
10773             unlock_user(name, arg2, 16);
10774             return ret;
10775         }
10776         case PR_SET_NAME:
10777         {
10778             void *name = lock_user(VERIFY_READ, arg2, 16, 1);
10779             if (!name) {
10780                 return -TARGET_EFAULT;
10781             }
10782             ret = get_errno(prctl(arg1, (unsigned long)name,
10783                                   arg3, arg4, arg5));
10784             unlock_user(name, arg2, 0);
10785             return ret;
10786         }
10787 #endif
10788 #ifdef TARGET_MIPS
10789         case TARGET_PR_GET_FP_MODE:
10790         {
10791             CPUMIPSState *env = ((CPUMIPSState *)cpu_env);
10792             ret = 0;
10793             if (env->CP0_Status & (1 << CP0St_FR)) {
10794                 ret |= TARGET_PR_FP_MODE_FR;
10795             }
10796             if (env->CP0_Config5 & (1 << CP0C5_FRE)) {
10797                 ret |= TARGET_PR_FP_MODE_FRE;
10798             }
10799             return ret;
10800         }
10801         case TARGET_PR_SET_FP_MODE:
10802         {
10803             CPUMIPSState *env = ((CPUMIPSState *)cpu_env);
10804             bool old_fr = env->CP0_Status & (1 << CP0St_FR);
10805             bool old_fre = env->CP0_Config5 & (1 << CP0C5_FRE);
10806             bool new_fr = arg2 & TARGET_PR_FP_MODE_FR;
10807             bool new_fre = arg2 & TARGET_PR_FP_MODE_FRE;
10808 
10809             const unsigned int known_bits = TARGET_PR_FP_MODE_FR |
10810                                             TARGET_PR_FP_MODE_FRE;
10811 
10812             /* If nothing to change, return right away, successfully.  */
10813             if (old_fr == new_fr && old_fre == new_fre) {
10814                 return 0;
10815             }
10816             /* Check the value is valid */
10817             if (arg2 & ~known_bits) {
10818                 return -TARGET_EOPNOTSUPP;
10819             }
10820             /* Setting FRE without FR is not supported.  */
10821             if (new_fre && !new_fr) {
10822                 return -TARGET_EOPNOTSUPP;
10823             }
10824             if (new_fr && !(env->active_fpu.fcr0 & (1 << FCR0_F64))) {
10825                 /* FR1 is not supported */
10826                 return -TARGET_EOPNOTSUPP;
10827             }
10828             if (!new_fr && (env->active_fpu.fcr0 & (1 << FCR0_F64))
10829                 && !(env->CP0_Status_rw_bitmask & (1 << CP0St_FR))) {
10830                 /* cannot set FR=0 */
10831                 return -TARGET_EOPNOTSUPP;
10832             }
10833             if (new_fre && !(env->active_fpu.fcr0 & (1 << FCR0_FREP))) {
10834                 /* Cannot set FRE=1 */
10835                 return -TARGET_EOPNOTSUPP;
10836             }
10837 
10838             int i;
10839             fpr_t *fpr = env->active_fpu.fpr;
10840             for (i = 0; i < 32 ; i += 2) {
10841                 if (!old_fr && new_fr) {
10842                     fpr[i].w[!FP_ENDIAN_IDX] = fpr[i + 1].w[FP_ENDIAN_IDX];
10843                 } else if (old_fr && !new_fr) {
10844                     fpr[i + 1].w[FP_ENDIAN_IDX] = fpr[i].w[!FP_ENDIAN_IDX];
10845                 }
10846             }
10847 
10848             if (new_fr) {
10849                 env->CP0_Status |= (1 << CP0St_FR);
10850                 env->hflags |= MIPS_HFLAG_F64;
10851             } else {
10852                 env->CP0_Status &= ~(1 << CP0St_FR);
10853                 env->hflags &= ~MIPS_HFLAG_F64;
10854             }
10855             if (new_fre) {
10856                 env->CP0_Config5 |= (1 << CP0C5_FRE);
10857                 if (env->active_fpu.fcr0 & (1 << FCR0_FREP)) {
10858                     env->hflags |= MIPS_HFLAG_FRE;
10859                 }
10860             } else {
10861                 env->CP0_Config5 &= ~(1 << CP0C5_FRE);
10862                 env->hflags &= ~MIPS_HFLAG_FRE;
10863             }
10864 
10865             return 0;
10866         }
10867 #endif /* MIPS */
10868 #ifdef TARGET_AARCH64
10869         case TARGET_PR_SVE_SET_VL:
10870             /*
10871              * We cannot support either PR_SVE_SET_VL_ONEXEC or
10872              * PR_SVE_VL_INHERIT.  Note the kernel definition
10873              * of sve_vl_valid allows for VQ=512, i.e. VL=8192,
10874              * even though the current architectural maximum is VQ=16.
10875              */
10876             ret = -TARGET_EINVAL;
10877             if (cpu_isar_feature(aa64_sve, env_archcpu(cpu_env))
10878                 && arg2 >= 0 && arg2 <= 512 * 16 && !(arg2 & 15)) {
10879                 CPUARMState *env = cpu_env;
10880                 ARMCPU *cpu = env_archcpu(env);
10881                 uint32_t vq, old_vq;
10882 
10883                 old_vq = (env->vfp.zcr_el[1] & 0xf) + 1;
10884                 vq = MAX(arg2 / 16, 1);
10885                 vq = MIN(vq, cpu->sve_max_vq);
10886 
10887                 if (vq < old_vq) {
10888                     aarch64_sve_narrow_vq(env, vq);
10889                 }
10890                 env->vfp.zcr_el[1] = vq - 1;
10891                 arm_rebuild_hflags(env);
10892                 ret = vq * 16;
10893             }
10894             return ret;
10895         case TARGET_PR_SVE_GET_VL:
10896             ret = -TARGET_EINVAL;
10897             {
10898                 ARMCPU *cpu = env_archcpu(cpu_env);
10899                 if (cpu_isar_feature(aa64_sve, cpu)) {
10900                     ret = ((cpu->env.vfp.zcr_el[1] & 0xf) + 1) * 16;
10901                 }
10902             }
10903             return ret;
10904         case TARGET_PR_PAC_RESET_KEYS:
10905             {
10906                 CPUARMState *env = cpu_env;
10907                 ARMCPU *cpu = env_archcpu(env);
10908 
10909                 if (arg3 || arg4 || arg5) {
10910                     return -TARGET_EINVAL;
10911                 }
10912                 if (cpu_isar_feature(aa64_pauth, cpu)) {
10913                     int all = (TARGET_PR_PAC_APIAKEY | TARGET_PR_PAC_APIBKEY |
10914                                TARGET_PR_PAC_APDAKEY | TARGET_PR_PAC_APDBKEY |
10915                                TARGET_PR_PAC_APGAKEY);
10916                     int ret = 0;
10917                     Error *err = NULL;
10918 
10919                     if (arg2 == 0) {
10920                         arg2 = all;
10921                     } else if (arg2 & ~all) {
10922                         return -TARGET_EINVAL;
10923                     }
10924                     if (arg2 & TARGET_PR_PAC_APIAKEY) {
10925                         ret |= qemu_guest_getrandom(&env->keys.apia,
10926                                                     sizeof(ARMPACKey), &err);
10927                     }
10928                     if (arg2 & TARGET_PR_PAC_APIBKEY) {
10929                         ret |= qemu_guest_getrandom(&env->keys.apib,
10930                                                     sizeof(ARMPACKey), &err);
10931                     }
10932                     if (arg2 & TARGET_PR_PAC_APDAKEY) {
10933                         ret |= qemu_guest_getrandom(&env->keys.apda,
10934                                                     sizeof(ARMPACKey), &err);
10935                     }
10936                     if (arg2 & TARGET_PR_PAC_APDBKEY) {
10937                         ret |= qemu_guest_getrandom(&env->keys.apdb,
10938                                                     sizeof(ARMPACKey), &err);
10939                     }
10940                     if (arg2 & TARGET_PR_PAC_APGAKEY) {
10941                         ret |= qemu_guest_getrandom(&env->keys.apga,
10942                                                     sizeof(ARMPACKey), &err);
10943                     }
10944                     if (ret != 0) {
10945                         /*
10946                          * Some unknown failure in the crypto.  The best
10947                          * we can do is log it and fail the syscall.
10948                          * The real syscall cannot fail this way.
10949                          */
10950                         qemu_log_mask(LOG_UNIMP,
10951                                       "PR_PAC_RESET_KEYS: Crypto failure: %s",
10952                                       error_get_pretty(err));
10953                         error_free(err);
10954                         return -TARGET_EIO;
10955                     }
10956                     return 0;
10957                 }
10958             }
10959             return -TARGET_EINVAL;
10960         case TARGET_PR_SET_TAGGED_ADDR_CTRL:
10961             {
10962                 abi_ulong valid_mask = TARGET_PR_TAGGED_ADDR_ENABLE;
10963                 CPUARMState *env = cpu_env;
10964                 ARMCPU *cpu = env_archcpu(env);
10965 
10966                 if (cpu_isar_feature(aa64_mte, cpu)) {
10967                     valid_mask |= TARGET_PR_MTE_TCF_MASK;
10968                     valid_mask |= TARGET_PR_MTE_TAG_MASK;
10969                 }
10970 
10971                 if ((arg2 & ~valid_mask) || arg3 || arg4 || arg5) {
10972                     return -TARGET_EINVAL;
10973                 }
10974                 env->tagged_addr_enable = arg2 & TARGET_PR_TAGGED_ADDR_ENABLE;
10975 
10976                 if (cpu_isar_feature(aa64_mte, cpu)) {
10977                     switch (arg2 & TARGET_PR_MTE_TCF_MASK) {
10978                     case TARGET_PR_MTE_TCF_NONE:
10979                     case TARGET_PR_MTE_TCF_SYNC:
10980                     case TARGET_PR_MTE_TCF_ASYNC:
10981                         break;
10982                     default:
10983                         return -EINVAL;
10984                     }
10985 
10986                     /*
10987                      * Write PR_MTE_TCF to SCTLR_EL1[TCF0].
10988                      * Note that the syscall values are consistent with hw.
10989                      */
10990                     env->cp15.sctlr_el[1] =
10991                         deposit64(env->cp15.sctlr_el[1], 38, 2,
10992                                   arg2 >> TARGET_PR_MTE_TCF_SHIFT);
10993 
10994                     /*
10995                      * Write PR_MTE_TAG to GCR_EL1[Exclude].
10996                      * Note that the syscall uses an include mask,
10997                      * and hardware uses an exclude mask -- invert.
10998                      */
10999                     env->cp15.gcr_el1 =
11000                         deposit64(env->cp15.gcr_el1, 0, 16,
11001                                   ~arg2 >> TARGET_PR_MTE_TAG_SHIFT);
11002                     arm_rebuild_hflags(env);
11003                 }
11004                 return 0;
11005             }
11006         case TARGET_PR_GET_TAGGED_ADDR_CTRL:
11007             {
11008                 abi_long ret = 0;
11009                 CPUARMState *env = cpu_env;
11010                 ARMCPU *cpu = env_archcpu(env);
11011 
11012                 if (arg2 || arg3 || arg4 || arg5) {
11013                     return -TARGET_EINVAL;
11014                 }
11015                 if (env->tagged_addr_enable) {
11016                     ret |= TARGET_PR_TAGGED_ADDR_ENABLE;
11017                 }
11018                 if (cpu_isar_feature(aa64_mte, cpu)) {
11019                     /* See above. */
11020                     ret |= (extract64(env->cp15.sctlr_el[1], 38, 2)
11021                             << TARGET_PR_MTE_TCF_SHIFT);
11022                     ret = deposit64(ret, TARGET_PR_MTE_TAG_SHIFT, 16,
11023                                     ~env->cp15.gcr_el1);
11024                 }
11025                 return ret;
11026             }
11027 #endif /* AARCH64 */
11028         case PR_GET_SECCOMP:
11029         case PR_SET_SECCOMP:
11030             /* Disable seccomp to prevent the target disabling syscalls we
11031              * need. */
11032             return -TARGET_EINVAL;
11033         default:
11034             /* Most prctl options have no pointer arguments */
11035             return get_errno(prctl(arg1, arg2, arg3, arg4, arg5));
11036         }
11037         break;
11038 #ifdef TARGET_NR_arch_prctl
11039     case TARGET_NR_arch_prctl:
11040         return do_arch_prctl(cpu_env, arg1, arg2);
11041 #endif
11042 #ifdef TARGET_NR_pread64
11043     case TARGET_NR_pread64:
11044         if (regpairs_aligned(cpu_env, num)) {
11045             arg4 = arg5;
11046             arg5 = arg6;
11047         }
11048         if (arg2 == 0 && arg3 == 0) {
11049             /* Special-case NULL buffer and zero length, which should succeed */
11050             p = 0;
11051         } else {
11052             p = lock_user(VERIFY_WRITE, arg2, arg3, 0);
11053             if (!p) {
11054                 return -TARGET_EFAULT;
11055             }
11056         }
11057         ret = get_errno(pread64(arg1, p, arg3, target_offset64(arg4, arg5)));
11058         unlock_user(p, arg2, ret);
11059         return ret;
11060     case TARGET_NR_pwrite64:
11061         if (regpairs_aligned(cpu_env, num)) {
11062             arg4 = arg5;
11063             arg5 = arg6;
11064         }
11065         if (arg2 == 0 && arg3 == 0) {
11066             /* Special-case NULL buffer and zero length, which should succeed */
11067             p = 0;
11068         } else {
11069             p = lock_user(VERIFY_READ, arg2, arg3, 1);
11070             if (!p) {
11071                 return -TARGET_EFAULT;
11072             }
11073         }
11074         ret = get_errno(pwrite64(arg1, p, arg3, target_offset64(arg4, arg5)));
11075         unlock_user(p, arg2, 0);
11076         return ret;
11077 #endif
11078     case TARGET_NR_getcwd:
11079         if (!(p = lock_user(VERIFY_WRITE, arg1, arg2, 0)))
11080             return -TARGET_EFAULT;
11081         ret = get_errno(sys_getcwd1(p, arg2));
11082         unlock_user(p, arg1, ret);
11083         return ret;
11084     case TARGET_NR_capget:
11085     case TARGET_NR_capset:
11086     {
11087         struct target_user_cap_header *target_header;
11088         struct target_user_cap_data *target_data = NULL;
11089         struct __user_cap_header_struct header;
11090         struct __user_cap_data_struct data[2];
11091         struct __user_cap_data_struct *dataptr = NULL;
11092         int i, target_datalen;
11093         int data_items = 1;
11094 
11095         if (!lock_user_struct(VERIFY_WRITE, target_header, arg1, 1)) {
11096             return -TARGET_EFAULT;
11097         }
11098         header.version = tswap32(target_header->version);
11099         header.pid = tswap32(target_header->pid);
11100 
11101         if (header.version != _LINUX_CAPABILITY_VERSION) {
11102             /* Version 2 and up takes pointer to two user_data structs */
11103             data_items = 2;
11104         }
11105 
11106         target_datalen = sizeof(*target_data) * data_items;
11107 
11108         if (arg2) {
11109             if (num == TARGET_NR_capget) {
11110                 target_data = lock_user(VERIFY_WRITE, arg2, target_datalen, 0);
11111             } else {
11112                 target_data = lock_user(VERIFY_READ, arg2, target_datalen, 1);
11113             }
11114             if (!target_data) {
11115                 unlock_user_struct(target_header, arg1, 0);
11116                 return -TARGET_EFAULT;
11117             }
11118 
11119             if (num == TARGET_NR_capset) {
11120                 for (i = 0; i < data_items; i++) {
11121                     data[i].effective = tswap32(target_data[i].effective);
11122                     data[i].permitted = tswap32(target_data[i].permitted);
11123                     data[i].inheritable = tswap32(target_data[i].inheritable);
11124                 }
11125             }
11126 
11127             dataptr = data;
11128         }
11129 
11130         if (num == TARGET_NR_capget) {
11131             ret = get_errno(capget(&header, dataptr));
11132         } else {
11133             ret = get_errno(capset(&header, dataptr));
11134         }
11135 
11136         /* The kernel always updates version for both capget and capset */
11137         target_header->version = tswap32(header.version);
11138         unlock_user_struct(target_header, arg1, 1);
11139 
11140         if (arg2) {
11141             if (num == TARGET_NR_capget) {
11142                 for (i = 0; i < data_items; i++) {
11143                     target_data[i].effective = tswap32(data[i].effective);
11144                     target_data[i].permitted = tswap32(data[i].permitted);
11145                     target_data[i].inheritable = tswap32(data[i].inheritable);
11146                 }
11147                 unlock_user(target_data, arg2, target_datalen);
11148             } else {
11149                 unlock_user(target_data, arg2, 0);
11150             }
11151         }
11152         return ret;
11153     }
11154     case TARGET_NR_sigaltstack:
11155         return do_sigaltstack(arg1, arg2, cpu_env);
11156 
11157 #ifdef CONFIG_SENDFILE
11158 #ifdef TARGET_NR_sendfile
11159     case TARGET_NR_sendfile:
11160     {
11161         off_t *offp = NULL;
11162         off_t off;
11163         if (arg3) {
11164             ret = get_user_sal(off, arg3);
11165             if (is_error(ret)) {
11166                 return ret;
11167             }
11168             offp = &off;
11169         }
11170         ret = get_errno(sendfile(arg1, arg2, offp, arg4));
11171         if (!is_error(ret) && arg3) {
11172             abi_long ret2 = put_user_sal(off, arg3);
11173             if (is_error(ret2)) {
11174                 ret = ret2;
11175             }
11176         }
11177         return ret;
11178     }
11179 #endif
11180 #ifdef TARGET_NR_sendfile64
11181     case TARGET_NR_sendfile64:
11182     {
11183         off_t *offp = NULL;
11184         off_t off;
11185         if (arg3) {
11186             ret = get_user_s64(off, arg3);
11187             if (is_error(ret)) {
11188                 return ret;
11189             }
11190             offp = &off;
11191         }
11192         ret = get_errno(sendfile(arg1, arg2, offp, arg4));
11193         if (!is_error(ret) && arg3) {
11194             abi_long ret2 = put_user_s64(off, arg3);
11195             if (is_error(ret2)) {
11196                 ret = ret2;
11197             }
11198         }
11199         return ret;
11200     }
11201 #endif
11202 #endif
11203 #ifdef TARGET_NR_vfork
11204     case TARGET_NR_vfork:
11205         return get_errno(do_fork(cpu_env,
11206                          CLONE_VFORK | CLONE_VM | TARGET_SIGCHLD,
11207                          0, 0, 0, 0));
11208 #endif
11209 #ifdef TARGET_NR_ugetrlimit
11210     case TARGET_NR_ugetrlimit:
11211     {
11212 	struct rlimit rlim;
11213 	int resource = target_to_host_resource(arg1);
11214 	ret = get_errno(getrlimit(resource, &rlim));
11215 	if (!is_error(ret)) {
11216 	    struct target_rlimit *target_rlim;
11217             if (!lock_user_struct(VERIFY_WRITE, target_rlim, arg2, 0))
11218                 return -TARGET_EFAULT;
11219 	    target_rlim->rlim_cur = host_to_target_rlim(rlim.rlim_cur);
11220 	    target_rlim->rlim_max = host_to_target_rlim(rlim.rlim_max);
11221             unlock_user_struct(target_rlim, arg2, 1);
11222 	}
11223         return ret;
11224     }
11225 #endif
11226 #ifdef TARGET_NR_truncate64
11227     case TARGET_NR_truncate64:
11228         if (!(p = lock_user_string(arg1)))
11229             return -TARGET_EFAULT;
11230 	ret = target_truncate64(cpu_env, p, arg2, arg3, arg4);
11231         unlock_user(p, arg1, 0);
11232         return ret;
11233 #endif
11234 #ifdef TARGET_NR_ftruncate64
11235     case TARGET_NR_ftruncate64:
11236         return target_ftruncate64(cpu_env, arg1, arg2, arg3, arg4);
11237 #endif
11238 #ifdef TARGET_NR_stat64
11239     case TARGET_NR_stat64:
11240         if (!(p = lock_user_string(arg1))) {
11241             return -TARGET_EFAULT;
11242         }
11243         ret = get_errno(stat(path(p), &st));
11244         unlock_user(p, arg1, 0);
11245         if (!is_error(ret))
11246             ret = host_to_target_stat64(cpu_env, arg2, &st);
11247         return ret;
11248 #endif
11249 #ifdef TARGET_NR_lstat64
11250     case TARGET_NR_lstat64:
11251         if (!(p = lock_user_string(arg1))) {
11252             return -TARGET_EFAULT;
11253         }
11254         ret = get_errno(lstat(path(p), &st));
11255         unlock_user(p, arg1, 0);
11256         if (!is_error(ret))
11257             ret = host_to_target_stat64(cpu_env, arg2, &st);
11258         return ret;
11259 #endif
11260 #ifdef TARGET_NR_fstat64
11261     case TARGET_NR_fstat64:
11262         ret = get_errno(fstat(arg1, &st));
11263         if (!is_error(ret))
11264             ret = host_to_target_stat64(cpu_env, arg2, &st);
11265         return ret;
11266 #endif
11267 #if (defined(TARGET_NR_fstatat64) || defined(TARGET_NR_newfstatat))
11268 #ifdef TARGET_NR_fstatat64
11269     case TARGET_NR_fstatat64:
11270 #endif
11271 #ifdef TARGET_NR_newfstatat
11272     case TARGET_NR_newfstatat:
11273 #endif
11274         if (!(p = lock_user_string(arg2))) {
11275             return -TARGET_EFAULT;
11276         }
11277         ret = get_errno(fstatat(arg1, path(p), &st, arg4));
11278         unlock_user(p, arg2, 0);
11279         if (!is_error(ret))
11280             ret = host_to_target_stat64(cpu_env, arg3, &st);
11281         return ret;
11282 #endif
11283 #if defined(TARGET_NR_statx)
11284     case TARGET_NR_statx:
11285         {
11286             struct target_statx *target_stx;
11287             int dirfd = arg1;
11288             int flags = arg3;
11289 
11290             p = lock_user_string(arg2);
11291             if (p == NULL) {
11292                 return -TARGET_EFAULT;
11293             }
11294 #if defined(__NR_statx)
11295             {
11296                 /*
11297                  * It is assumed that struct statx is architecture independent.
11298                  */
11299                 struct target_statx host_stx;
11300                 int mask = arg4;
11301 
11302                 ret = get_errno(sys_statx(dirfd, p, flags, mask, &host_stx));
11303                 if (!is_error(ret)) {
11304                     if (host_to_target_statx(&host_stx, arg5) != 0) {
11305                         unlock_user(p, arg2, 0);
11306                         return -TARGET_EFAULT;
11307                     }
11308                 }
11309 
11310                 if (ret != -TARGET_ENOSYS) {
11311                     unlock_user(p, arg2, 0);
11312                     return ret;
11313                 }
11314             }
11315 #endif
11316             ret = get_errno(fstatat(dirfd, path(p), &st, flags));
11317             unlock_user(p, arg2, 0);
11318 
11319             if (!is_error(ret)) {
11320                 if (!lock_user_struct(VERIFY_WRITE, target_stx, arg5, 0)) {
11321                     return -TARGET_EFAULT;
11322                 }
11323                 memset(target_stx, 0, sizeof(*target_stx));
11324                 __put_user(major(st.st_dev), &target_stx->stx_dev_major);
11325                 __put_user(minor(st.st_dev), &target_stx->stx_dev_minor);
11326                 __put_user(st.st_ino, &target_stx->stx_ino);
11327                 __put_user(st.st_mode, &target_stx->stx_mode);
11328                 __put_user(st.st_uid, &target_stx->stx_uid);
11329                 __put_user(st.st_gid, &target_stx->stx_gid);
11330                 __put_user(st.st_nlink, &target_stx->stx_nlink);
11331                 __put_user(major(st.st_rdev), &target_stx->stx_rdev_major);
11332                 __put_user(minor(st.st_rdev), &target_stx->stx_rdev_minor);
11333                 __put_user(st.st_size, &target_stx->stx_size);
11334                 __put_user(st.st_blksize, &target_stx->stx_blksize);
11335                 __put_user(st.st_blocks, &target_stx->stx_blocks);
11336                 __put_user(st.st_atime, &target_stx->stx_atime.tv_sec);
11337                 __put_user(st.st_mtime, &target_stx->stx_mtime.tv_sec);
11338                 __put_user(st.st_ctime, &target_stx->stx_ctime.tv_sec);
11339                 unlock_user_struct(target_stx, arg5, 1);
11340             }
11341         }
11342         return ret;
11343 #endif
11344 #ifdef TARGET_NR_lchown
11345     case TARGET_NR_lchown:
11346         if (!(p = lock_user_string(arg1)))
11347             return -TARGET_EFAULT;
11348         ret = get_errno(lchown(p, low2highuid(arg2), low2highgid(arg3)));
11349         unlock_user(p, arg1, 0);
11350         return ret;
11351 #endif
11352 #ifdef TARGET_NR_getuid
11353     case TARGET_NR_getuid:
11354         return get_errno(high2lowuid(getuid()));
11355 #endif
11356 #ifdef TARGET_NR_getgid
11357     case TARGET_NR_getgid:
11358         return get_errno(high2lowgid(getgid()));
11359 #endif
11360 #ifdef TARGET_NR_geteuid
11361     case TARGET_NR_geteuid:
11362         return get_errno(high2lowuid(geteuid()));
11363 #endif
11364 #ifdef TARGET_NR_getegid
11365     case TARGET_NR_getegid:
11366         return get_errno(high2lowgid(getegid()));
11367 #endif
11368     case TARGET_NR_setreuid:
11369         return get_errno(setreuid(low2highuid(arg1), low2highuid(arg2)));
11370     case TARGET_NR_setregid:
11371         return get_errno(setregid(low2highgid(arg1), low2highgid(arg2)));
11372     case TARGET_NR_getgroups:
11373         {
11374             int gidsetsize = arg1;
11375             target_id *target_grouplist;
11376             gid_t *grouplist;
11377             int i;
11378 
11379             grouplist = alloca(gidsetsize * sizeof(gid_t));
11380             ret = get_errno(getgroups(gidsetsize, grouplist));
11381             if (gidsetsize == 0)
11382                 return ret;
11383             if (!is_error(ret)) {
11384                 target_grouplist = lock_user(VERIFY_WRITE, arg2, gidsetsize * sizeof(target_id), 0);
11385                 if (!target_grouplist)
11386                     return -TARGET_EFAULT;
11387                 for(i = 0;i < ret; i++)
11388                     target_grouplist[i] = tswapid(high2lowgid(grouplist[i]));
11389                 unlock_user(target_grouplist, arg2, gidsetsize * sizeof(target_id));
11390             }
11391         }
11392         return ret;
11393     case TARGET_NR_setgroups:
11394         {
11395             int gidsetsize = arg1;
11396             target_id *target_grouplist;
11397             gid_t *grouplist = NULL;
11398             int i;
11399             if (gidsetsize) {
11400                 grouplist = alloca(gidsetsize * sizeof(gid_t));
11401                 target_grouplist = lock_user(VERIFY_READ, arg2, gidsetsize * sizeof(target_id), 1);
11402                 if (!target_grouplist) {
11403                     return -TARGET_EFAULT;
11404                 }
11405                 for (i = 0; i < gidsetsize; i++) {
11406                     grouplist[i] = low2highgid(tswapid(target_grouplist[i]));
11407                 }
11408                 unlock_user(target_grouplist, arg2, 0);
11409             }
11410             return get_errno(setgroups(gidsetsize, grouplist));
11411         }
11412     case TARGET_NR_fchown:
11413         return get_errno(fchown(arg1, low2highuid(arg2), low2highgid(arg3)));
11414 #if defined(TARGET_NR_fchownat)
11415     case TARGET_NR_fchownat:
11416         if (!(p = lock_user_string(arg2)))
11417             return -TARGET_EFAULT;
11418         ret = get_errno(fchownat(arg1, p, low2highuid(arg3),
11419                                  low2highgid(arg4), arg5));
11420         unlock_user(p, arg2, 0);
11421         return ret;
11422 #endif
11423 #ifdef TARGET_NR_setresuid
11424     case TARGET_NR_setresuid:
11425         return get_errno(sys_setresuid(low2highuid(arg1),
11426                                        low2highuid(arg2),
11427                                        low2highuid(arg3)));
11428 #endif
11429 #ifdef TARGET_NR_getresuid
11430     case TARGET_NR_getresuid:
11431         {
11432             uid_t ruid, euid, suid;
11433             ret = get_errno(getresuid(&ruid, &euid, &suid));
11434             if (!is_error(ret)) {
11435                 if (put_user_id(high2lowuid(ruid), arg1)
11436                     || put_user_id(high2lowuid(euid), arg2)
11437                     || put_user_id(high2lowuid(suid), arg3))
11438                     return -TARGET_EFAULT;
11439             }
11440         }
11441         return ret;
11442 #endif
11443 #ifdef TARGET_NR_getresgid
11444     case TARGET_NR_setresgid:
11445         return get_errno(sys_setresgid(low2highgid(arg1),
11446                                        low2highgid(arg2),
11447                                        low2highgid(arg3)));
11448 #endif
11449 #ifdef TARGET_NR_getresgid
11450     case TARGET_NR_getresgid:
11451         {
11452             gid_t rgid, egid, sgid;
11453             ret = get_errno(getresgid(&rgid, &egid, &sgid));
11454             if (!is_error(ret)) {
11455                 if (put_user_id(high2lowgid(rgid), arg1)
11456                     || put_user_id(high2lowgid(egid), arg2)
11457                     || put_user_id(high2lowgid(sgid), arg3))
11458                     return -TARGET_EFAULT;
11459             }
11460         }
11461         return ret;
11462 #endif
11463 #ifdef TARGET_NR_chown
11464     case TARGET_NR_chown:
11465         if (!(p = lock_user_string(arg1)))
11466             return -TARGET_EFAULT;
11467         ret = get_errno(chown(p, low2highuid(arg2), low2highgid(arg3)));
11468         unlock_user(p, arg1, 0);
11469         return ret;
11470 #endif
11471     case TARGET_NR_setuid:
11472         return get_errno(sys_setuid(low2highuid(arg1)));
11473     case TARGET_NR_setgid:
11474         return get_errno(sys_setgid(low2highgid(arg1)));
11475     case TARGET_NR_setfsuid:
11476         return get_errno(setfsuid(arg1));
11477     case TARGET_NR_setfsgid:
11478         return get_errno(setfsgid(arg1));
11479 
11480 #ifdef TARGET_NR_lchown32
11481     case TARGET_NR_lchown32:
11482         if (!(p = lock_user_string(arg1)))
11483             return -TARGET_EFAULT;
11484         ret = get_errno(lchown(p, arg2, arg3));
11485         unlock_user(p, arg1, 0);
11486         return ret;
11487 #endif
11488 #ifdef TARGET_NR_getuid32
11489     case TARGET_NR_getuid32:
11490         return get_errno(getuid());
11491 #endif
11492 
11493 #if defined(TARGET_NR_getxuid) && defined(TARGET_ALPHA)
11494    /* Alpha specific */
11495     case TARGET_NR_getxuid:
11496          {
11497             uid_t euid;
11498             euid=geteuid();
11499             ((CPUAlphaState *)cpu_env)->ir[IR_A4]=euid;
11500          }
11501         return get_errno(getuid());
11502 #endif
11503 #if defined(TARGET_NR_getxgid) && defined(TARGET_ALPHA)
11504    /* Alpha specific */
11505     case TARGET_NR_getxgid:
11506          {
11507             uid_t egid;
11508             egid=getegid();
11509             ((CPUAlphaState *)cpu_env)->ir[IR_A4]=egid;
11510          }
11511         return get_errno(getgid());
11512 #endif
11513 #if defined(TARGET_NR_osf_getsysinfo) && defined(TARGET_ALPHA)
11514     /* Alpha specific */
11515     case TARGET_NR_osf_getsysinfo:
11516         ret = -TARGET_EOPNOTSUPP;
11517         switch (arg1) {
11518           case TARGET_GSI_IEEE_FP_CONTROL:
11519             {
11520                 uint64_t fpcr = cpu_alpha_load_fpcr(cpu_env);
11521                 uint64_t swcr = ((CPUAlphaState *)cpu_env)->swcr;
11522 
11523                 swcr &= ~SWCR_STATUS_MASK;
11524                 swcr |= (fpcr >> 35) & SWCR_STATUS_MASK;
11525 
11526                 if (put_user_u64 (swcr, arg2))
11527                         return -TARGET_EFAULT;
11528                 ret = 0;
11529             }
11530             break;
11531 
11532           /* case GSI_IEEE_STATE_AT_SIGNAL:
11533              -- Not implemented in linux kernel.
11534              case GSI_UACPROC:
11535              -- Retrieves current unaligned access state; not much used.
11536              case GSI_PROC_TYPE:
11537              -- Retrieves implver information; surely not used.
11538              case GSI_GET_HWRPB:
11539              -- Grabs a copy of the HWRPB; surely not used.
11540           */
11541         }
11542         return ret;
11543 #endif
11544 #if defined(TARGET_NR_osf_setsysinfo) && defined(TARGET_ALPHA)
11545     /* Alpha specific */
11546     case TARGET_NR_osf_setsysinfo:
11547         ret = -TARGET_EOPNOTSUPP;
11548         switch (arg1) {
11549           case TARGET_SSI_IEEE_FP_CONTROL:
11550             {
11551                 uint64_t swcr, fpcr;
11552 
11553                 if (get_user_u64 (swcr, arg2)) {
11554                     return -TARGET_EFAULT;
11555                 }
11556 
11557                 /*
11558                  * The kernel calls swcr_update_status to update the
11559                  * status bits from the fpcr at every point that it
11560                  * could be queried.  Therefore, we store the status
11561                  * bits only in FPCR.
11562                  */
11563                 ((CPUAlphaState *)cpu_env)->swcr
11564                     = swcr & (SWCR_TRAP_ENABLE_MASK | SWCR_MAP_MASK);
11565 
11566                 fpcr = cpu_alpha_load_fpcr(cpu_env);
11567                 fpcr &= ((uint64_t)FPCR_DYN_MASK << 32);
11568                 fpcr |= alpha_ieee_swcr_to_fpcr(swcr);
11569                 cpu_alpha_store_fpcr(cpu_env, fpcr);
11570                 ret = 0;
11571             }
11572             break;
11573 
11574           case TARGET_SSI_IEEE_RAISE_EXCEPTION:
11575             {
11576                 uint64_t exc, fpcr, fex;
11577 
11578                 if (get_user_u64(exc, arg2)) {
11579                     return -TARGET_EFAULT;
11580                 }
11581                 exc &= SWCR_STATUS_MASK;
11582                 fpcr = cpu_alpha_load_fpcr(cpu_env);
11583 
11584                 /* Old exceptions are not signaled.  */
11585                 fex = alpha_ieee_fpcr_to_swcr(fpcr);
11586                 fex = exc & ~fex;
11587                 fex >>= SWCR_STATUS_TO_EXCSUM_SHIFT;
11588                 fex &= ((CPUArchState *)cpu_env)->swcr;
11589 
11590                 /* Update the hardware fpcr.  */
11591                 fpcr |= alpha_ieee_swcr_to_fpcr(exc);
11592                 cpu_alpha_store_fpcr(cpu_env, fpcr);
11593 
11594                 if (fex) {
11595                     int si_code = TARGET_FPE_FLTUNK;
11596                     target_siginfo_t info;
11597 
11598                     if (fex & SWCR_TRAP_ENABLE_DNO) {
11599                         si_code = TARGET_FPE_FLTUND;
11600                     }
11601                     if (fex & SWCR_TRAP_ENABLE_INE) {
11602                         si_code = TARGET_FPE_FLTRES;
11603                     }
11604                     if (fex & SWCR_TRAP_ENABLE_UNF) {
11605                         si_code = TARGET_FPE_FLTUND;
11606                     }
11607                     if (fex & SWCR_TRAP_ENABLE_OVF) {
11608                         si_code = TARGET_FPE_FLTOVF;
11609                     }
11610                     if (fex & SWCR_TRAP_ENABLE_DZE) {
11611                         si_code = TARGET_FPE_FLTDIV;
11612                     }
11613                     if (fex & SWCR_TRAP_ENABLE_INV) {
11614                         si_code = TARGET_FPE_FLTINV;
11615                     }
11616 
11617                     info.si_signo = SIGFPE;
11618                     info.si_errno = 0;
11619                     info.si_code = si_code;
11620                     info._sifields._sigfault._addr
11621                         = ((CPUArchState *)cpu_env)->pc;
11622                     queue_signal((CPUArchState *)cpu_env, info.si_signo,
11623                                  QEMU_SI_FAULT, &info);
11624                 }
11625                 ret = 0;
11626             }
11627             break;
11628 
11629           /* case SSI_NVPAIRS:
11630              -- Used with SSIN_UACPROC to enable unaligned accesses.
11631              case SSI_IEEE_STATE_AT_SIGNAL:
11632              case SSI_IEEE_IGNORE_STATE_AT_SIGNAL:
11633              -- Not implemented in linux kernel
11634           */
11635         }
11636         return ret;
11637 #endif
11638 #ifdef TARGET_NR_osf_sigprocmask
11639     /* Alpha specific.  */
11640     case TARGET_NR_osf_sigprocmask:
11641         {
11642             abi_ulong mask;
11643             int how;
11644             sigset_t set, oldset;
11645 
11646             switch(arg1) {
11647             case TARGET_SIG_BLOCK:
11648                 how = SIG_BLOCK;
11649                 break;
11650             case TARGET_SIG_UNBLOCK:
11651                 how = SIG_UNBLOCK;
11652                 break;
11653             case TARGET_SIG_SETMASK:
11654                 how = SIG_SETMASK;
11655                 break;
11656             default:
11657                 return -TARGET_EINVAL;
11658             }
11659             mask = arg2;
11660             target_to_host_old_sigset(&set, &mask);
11661             ret = do_sigprocmask(how, &set, &oldset);
11662             if (!ret) {
11663                 host_to_target_old_sigset(&mask, &oldset);
11664                 ret = mask;
11665             }
11666         }
11667         return ret;
11668 #endif
11669 
11670 #ifdef TARGET_NR_getgid32
11671     case TARGET_NR_getgid32:
11672         return get_errno(getgid());
11673 #endif
11674 #ifdef TARGET_NR_geteuid32
11675     case TARGET_NR_geteuid32:
11676         return get_errno(geteuid());
11677 #endif
11678 #ifdef TARGET_NR_getegid32
11679     case TARGET_NR_getegid32:
11680         return get_errno(getegid());
11681 #endif
11682 #ifdef TARGET_NR_setreuid32
11683     case TARGET_NR_setreuid32:
11684         return get_errno(setreuid(arg1, arg2));
11685 #endif
11686 #ifdef TARGET_NR_setregid32
11687     case TARGET_NR_setregid32:
11688         return get_errno(setregid(arg1, arg2));
11689 #endif
11690 #ifdef TARGET_NR_getgroups32
11691     case TARGET_NR_getgroups32:
11692         {
11693             int gidsetsize = arg1;
11694             uint32_t *target_grouplist;
11695             gid_t *grouplist;
11696             int i;
11697 
11698             grouplist = alloca(gidsetsize * sizeof(gid_t));
11699             ret = get_errno(getgroups(gidsetsize, grouplist));
11700             if (gidsetsize == 0)
11701                 return ret;
11702             if (!is_error(ret)) {
11703                 target_grouplist = lock_user(VERIFY_WRITE, arg2, gidsetsize * 4, 0);
11704                 if (!target_grouplist) {
11705                     return -TARGET_EFAULT;
11706                 }
11707                 for(i = 0;i < ret; i++)
11708                     target_grouplist[i] = tswap32(grouplist[i]);
11709                 unlock_user(target_grouplist, arg2, gidsetsize * 4);
11710             }
11711         }
11712         return ret;
11713 #endif
11714 #ifdef TARGET_NR_setgroups32
11715     case TARGET_NR_setgroups32:
11716         {
11717             int gidsetsize = arg1;
11718             uint32_t *target_grouplist;
11719             gid_t *grouplist;
11720             int i;
11721 
11722             grouplist = alloca(gidsetsize * sizeof(gid_t));
11723             target_grouplist = lock_user(VERIFY_READ, arg2, gidsetsize * 4, 1);
11724             if (!target_grouplist) {
11725                 return -TARGET_EFAULT;
11726             }
11727             for(i = 0;i < gidsetsize; i++)
11728                 grouplist[i] = tswap32(target_grouplist[i]);
11729             unlock_user(target_grouplist, arg2, 0);
11730             return get_errno(setgroups(gidsetsize, grouplist));
11731         }
11732 #endif
11733 #ifdef TARGET_NR_fchown32
11734     case TARGET_NR_fchown32:
11735         return get_errno(fchown(arg1, arg2, arg3));
11736 #endif
11737 #ifdef TARGET_NR_setresuid32
11738     case TARGET_NR_setresuid32:
11739         return get_errno(sys_setresuid(arg1, arg2, arg3));
11740 #endif
11741 #ifdef TARGET_NR_getresuid32
11742     case TARGET_NR_getresuid32:
11743         {
11744             uid_t ruid, euid, suid;
11745             ret = get_errno(getresuid(&ruid, &euid, &suid));
11746             if (!is_error(ret)) {
11747                 if (put_user_u32(ruid, arg1)
11748                     || put_user_u32(euid, arg2)
11749                     || put_user_u32(suid, arg3))
11750                     return -TARGET_EFAULT;
11751             }
11752         }
11753         return ret;
11754 #endif
11755 #ifdef TARGET_NR_setresgid32
11756     case TARGET_NR_setresgid32:
11757         return get_errno(sys_setresgid(arg1, arg2, arg3));
11758 #endif
11759 #ifdef TARGET_NR_getresgid32
11760     case TARGET_NR_getresgid32:
11761         {
11762             gid_t rgid, egid, sgid;
11763             ret = get_errno(getresgid(&rgid, &egid, &sgid));
11764             if (!is_error(ret)) {
11765                 if (put_user_u32(rgid, arg1)
11766                     || put_user_u32(egid, arg2)
11767                     || put_user_u32(sgid, arg3))
11768                     return -TARGET_EFAULT;
11769             }
11770         }
11771         return ret;
11772 #endif
11773 #ifdef TARGET_NR_chown32
11774     case TARGET_NR_chown32:
11775         if (!(p = lock_user_string(arg1)))
11776             return -TARGET_EFAULT;
11777         ret = get_errno(chown(p, arg2, arg3));
11778         unlock_user(p, arg1, 0);
11779         return ret;
11780 #endif
11781 #ifdef TARGET_NR_setuid32
11782     case TARGET_NR_setuid32:
11783         return get_errno(sys_setuid(arg1));
11784 #endif
11785 #ifdef TARGET_NR_setgid32
11786     case TARGET_NR_setgid32:
11787         return get_errno(sys_setgid(arg1));
11788 #endif
11789 #ifdef TARGET_NR_setfsuid32
11790     case TARGET_NR_setfsuid32:
11791         return get_errno(setfsuid(arg1));
11792 #endif
11793 #ifdef TARGET_NR_setfsgid32
11794     case TARGET_NR_setfsgid32:
11795         return get_errno(setfsgid(arg1));
11796 #endif
11797 #ifdef TARGET_NR_mincore
11798     case TARGET_NR_mincore:
11799         {
11800             void *a = lock_user(VERIFY_READ, arg1, arg2, 0);
11801             if (!a) {
11802                 return -TARGET_ENOMEM;
11803             }
11804             p = lock_user_string(arg3);
11805             if (!p) {
11806                 ret = -TARGET_EFAULT;
11807             } else {
11808                 ret = get_errno(mincore(a, arg2, p));
11809                 unlock_user(p, arg3, ret);
11810             }
11811             unlock_user(a, arg1, 0);
11812         }
11813         return ret;
11814 #endif
11815 #ifdef TARGET_NR_arm_fadvise64_64
11816     case TARGET_NR_arm_fadvise64_64:
11817         /* arm_fadvise64_64 looks like fadvise64_64 but
11818          * with different argument order: fd, advice, offset, len
11819          * rather than the usual fd, offset, len, advice.
11820          * Note that offset and len are both 64-bit so appear as
11821          * pairs of 32-bit registers.
11822          */
11823         ret = posix_fadvise(arg1, target_offset64(arg3, arg4),
11824                             target_offset64(arg5, arg6), arg2);
11825         return -host_to_target_errno(ret);
11826 #endif
11827 
11828 #if TARGET_ABI_BITS == 32
11829 
11830 #ifdef TARGET_NR_fadvise64_64
11831     case TARGET_NR_fadvise64_64:
11832 #if defined(TARGET_PPC) || defined(TARGET_XTENSA)
11833         /* 6 args: fd, advice, offset (high, low), len (high, low) */
11834         ret = arg2;
11835         arg2 = arg3;
11836         arg3 = arg4;
11837         arg4 = arg5;
11838         arg5 = arg6;
11839         arg6 = ret;
11840 #else
11841         /* 6 args: fd, offset (high, low), len (high, low), advice */
11842         if (regpairs_aligned(cpu_env, num)) {
11843             /* offset is in (3,4), len in (5,6) and advice in 7 */
11844             arg2 = arg3;
11845             arg3 = arg4;
11846             arg4 = arg5;
11847             arg5 = arg6;
11848             arg6 = arg7;
11849         }
11850 #endif
11851         ret = posix_fadvise(arg1, target_offset64(arg2, arg3),
11852                             target_offset64(arg4, arg5), arg6);
11853         return -host_to_target_errno(ret);
11854 #endif
11855 
11856 #ifdef TARGET_NR_fadvise64
11857     case TARGET_NR_fadvise64:
11858         /* 5 args: fd, offset (high, low), len, advice */
11859         if (regpairs_aligned(cpu_env, num)) {
11860             /* offset is in (3,4), len in 5 and advice in 6 */
11861             arg2 = arg3;
11862             arg3 = arg4;
11863             arg4 = arg5;
11864             arg5 = arg6;
11865         }
11866         ret = posix_fadvise(arg1, target_offset64(arg2, arg3), arg4, arg5);
11867         return -host_to_target_errno(ret);
11868 #endif
11869 
11870 #else /* not a 32-bit ABI */
11871 #if defined(TARGET_NR_fadvise64_64) || defined(TARGET_NR_fadvise64)
11872 #ifdef TARGET_NR_fadvise64_64
11873     case TARGET_NR_fadvise64_64:
11874 #endif
11875 #ifdef TARGET_NR_fadvise64
11876     case TARGET_NR_fadvise64:
11877 #endif
11878 #ifdef TARGET_S390X
11879         switch (arg4) {
11880         case 4: arg4 = POSIX_FADV_NOREUSE + 1; break; /* make sure it's an invalid value */
11881         case 5: arg4 = POSIX_FADV_NOREUSE + 2; break; /* ditto */
11882         case 6: arg4 = POSIX_FADV_DONTNEED; break;
11883         case 7: arg4 = POSIX_FADV_NOREUSE; break;
11884         default: break;
11885         }
11886 #endif
11887         return -host_to_target_errno(posix_fadvise(arg1, arg2, arg3, arg4));
11888 #endif
11889 #endif /* end of 64-bit ABI fadvise handling */
11890 
11891 #ifdef TARGET_NR_madvise
11892     case TARGET_NR_madvise:
11893         /* A straight passthrough may not be safe because qemu sometimes
11894            turns private file-backed mappings into anonymous mappings.
11895            This will break MADV_DONTNEED.
11896            This is a hint, so ignoring and returning success is ok.  */
11897         return 0;
11898 #endif
11899 #ifdef TARGET_NR_fcntl64
11900     case TARGET_NR_fcntl64:
11901     {
11902         int cmd;
11903         struct flock64 fl;
11904         from_flock64_fn *copyfrom = copy_from_user_flock64;
11905         to_flock64_fn *copyto = copy_to_user_flock64;
11906 
11907 #ifdef TARGET_ARM
11908         if (!((CPUARMState *)cpu_env)->eabi) {
11909             copyfrom = copy_from_user_oabi_flock64;
11910             copyto = copy_to_user_oabi_flock64;
11911         }
11912 #endif
11913 
11914         cmd = target_to_host_fcntl_cmd(arg2);
11915         if (cmd == -TARGET_EINVAL) {
11916             return cmd;
11917         }
11918 
11919         switch(arg2) {
11920         case TARGET_F_GETLK64:
11921             ret = copyfrom(&fl, arg3);
11922             if (ret) {
11923                 break;
11924             }
11925             ret = get_errno(safe_fcntl(arg1, cmd, &fl));
11926             if (ret == 0) {
11927                 ret = copyto(arg3, &fl);
11928             }
11929 	    break;
11930 
11931         case TARGET_F_SETLK64:
11932         case TARGET_F_SETLKW64:
11933             ret = copyfrom(&fl, arg3);
11934             if (ret) {
11935                 break;
11936             }
11937             ret = get_errno(safe_fcntl(arg1, cmd, &fl));
11938 	    break;
11939         default:
11940             ret = do_fcntl(arg1, arg2, arg3);
11941             break;
11942         }
11943         return ret;
11944     }
11945 #endif
11946 #ifdef TARGET_NR_cacheflush
11947     case TARGET_NR_cacheflush:
11948         /* self-modifying code is handled automatically, so nothing needed */
11949         return 0;
11950 #endif
11951 #ifdef TARGET_NR_getpagesize
11952     case TARGET_NR_getpagesize:
11953         return TARGET_PAGE_SIZE;
11954 #endif
11955     case TARGET_NR_gettid:
11956         return get_errno(sys_gettid());
11957 #ifdef TARGET_NR_readahead
11958     case TARGET_NR_readahead:
11959 #if TARGET_ABI_BITS == 32
11960         if (regpairs_aligned(cpu_env, num)) {
11961             arg2 = arg3;
11962             arg3 = arg4;
11963             arg4 = arg5;
11964         }
11965         ret = get_errno(readahead(arg1, target_offset64(arg2, arg3) , arg4));
11966 #else
11967         ret = get_errno(readahead(arg1, arg2, arg3));
11968 #endif
11969         return ret;
11970 #endif
11971 #ifdef CONFIG_ATTR
11972 #ifdef TARGET_NR_setxattr
11973     case TARGET_NR_listxattr:
11974     case TARGET_NR_llistxattr:
11975     {
11976         void *p, *b = 0;
11977         if (arg2) {
11978             b = lock_user(VERIFY_WRITE, arg2, arg3, 0);
11979             if (!b) {
11980                 return -TARGET_EFAULT;
11981             }
11982         }
11983         p = lock_user_string(arg1);
11984         if (p) {
11985             if (num == TARGET_NR_listxattr) {
11986                 ret = get_errno(listxattr(p, b, arg3));
11987             } else {
11988                 ret = get_errno(llistxattr(p, b, arg3));
11989             }
11990         } else {
11991             ret = -TARGET_EFAULT;
11992         }
11993         unlock_user(p, arg1, 0);
11994         unlock_user(b, arg2, arg3);
11995         return ret;
11996     }
11997     case TARGET_NR_flistxattr:
11998     {
11999         void *b = 0;
12000         if (arg2) {
12001             b = lock_user(VERIFY_WRITE, arg2, arg3, 0);
12002             if (!b) {
12003                 return -TARGET_EFAULT;
12004             }
12005         }
12006         ret = get_errno(flistxattr(arg1, b, arg3));
12007         unlock_user(b, arg2, arg3);
12008         return ret;
12009     }
12010     case TARGET_NR_setxattr:
12011     case TARGET_NR_lsetxattr:
12012         {
12013             void *p, *n, *v = 0;
12014             if (arg3) {
12015                 v = lock_user(VERIFY_READ, arg3, arg4, 1);
12016                 if (!v) {
12017                     return -TARGET_EFAULT;
12018                 }
12019             }
12020             p = lock_user_string(arg1);
12021             n = lock_user_string(arg2);
12022             if (p && n) {
12023                 if (num == TARGET_NR_setxattr) {
12024                     ret = get_errno(setxattr(p, n, v, arg4, arg5));
12025                 } else {
12026                     ret = get_errno(lsetxattr(p, n, v, arg4, arg5));
12027                 }
12028             } else {
12029                 ret = -TARGET_EFAULT;
12030             }
12031             unlock_user(p, arg1, 0);
12032             unlock_user(n, arg2, 0);
12033             unlock_user(v, arg3, 0);
12034         }
12035         return ret;
12036     case TARGET_NR_fsetxattr:
12037         {
12038             void *n, *v = 0;
12039             if (arg3) {
12040                 v = lock_user(VERIFY_READ, arg3, arg4, 1);
12041                 if (!v) {
12042                     return -TARGET_EFAULT;
12043                 }
12044             }
12045             n = lock_user_string(arg2);
12046             if (n) {
12047                 ret = get_errno(fsetxattr(arg1, n, v, arg4, arg5));
12048             } else {
12049                 ret = -TARGET_EFAULT;
12050             }
12051             unlock_user(n, arg2, 0);
12052             unlock_user(v, arg3, 0);
12053         }
12054         return ret;
12055     case TARGET_NR_getxattr:
12056     case TARGET_NR_lgetxattr:
12057         {
12058             void *p, *n, *v = 0;
12059             if (arg3) {
12060                 v = lock_user(VERIFY_WRITE, arg3, arg4, 0);
12061                 if (!v) {
12062                     return -TARGET_EFAULT;
12063                 }
12064             }
12065             p = lock_user_string(arg1);
12066             n = lock_user_string(arg2);
12067             if (p && n) {
12068                 if (num == TARGET_NR_getxattr) {
12069                     ret = get_errno(getxattr(p, n, v, arg4));
12070                 } else {
12071                     ret = get_errno(lgetxattr(p, n, v, arg4));
12072                 }
12073             } else {
12074                 ret = -TARGET_EFAULT;
12075             }
12076             unlock_user(p, arg1, 0);
12077             unlock_user(n, arg2, 0);
12078             unlock_user(v, arg3, arg4);
12079         }
12080         return ret;
12081     case TARGET_NR_fgetxattr:
12082         {
12083             void *n, *v = 0;
12084             if (arg3) {
12085                 v = lock_user(VERIFY_WRITE, arg3, arg4, 0);
12086                 if (!v) {
12087                     return -TARGET_EFAULT;
12088                 }
12089             }
12090             n = lock_user_string(arg2);
12091             if (n) {
12092                 ret = get_errno(fgetxattr(arg1, n, v, arg4));
12093             } else {
12094                 ret = -TARGET_EFAULT;
12095             }
12096             unlock_user(n, arg2, 0);
12097             unlock_user(v, arg3, arg4);
12098         }
12099         return ret;
12100     case TARGET_NR_removexattr:
12101     case TARGET_NR_lremovexattr:
12102         {
12103             void *p, *n;
12104             p = lock_user_string(arg1);
12105             n = lock_user_string(arg2);
12106             if (p && n) {
12107                 if (num == TARGET_NR_removexattr) {
12108                     ret = get_errno(removexattr(p, n));
12109                 } else {
12110                     ret = get_errno(lremovexattr(p, n));
12111                 }
12112             } else {
12113                 ret = -TARGET_EFAULT;
12114             }
12115             unlock_user(p, arg1, 0);
12116             unlock_user(n, arg2, 0);
12117         }
12118         return ret;
12119     case TARGET_NR_fremovexattr:
12120         {
12121             void *n;
12122             n = lock_user_string(arg2);
12123             if (n) {
12124                 ret = get_errno(fremovexattr(arg1, n));
12125             } else {
12126                 ret = -TARGET_EFAULT;
12127             }
12128             unlock_user(n, arg2, 0);
12129         }
12130         return ret;
12131 #endif
12132 #endif /* CONFIG_ATTR */
12133 #ifdef TARGET_NR_set_thread_area
12134     case TARGET_NR_set_thread_area:
12135 #if defined(TARGET_MIPS)
12136       ((CPUMIPSState *) cpu_env)->active_tc.CP0_UserLocal = arg1;
12137       return 0;
12138 #elif defined(TARGET_CRIS)
12139       if (arg1 & 0xff)
12140           ret = -TARGET_EINVAL;
12141       else {
12142           ((CPUCRISState *) cpu_env)->pregs[PR_PID] = arg1;
12143           ret = 0;
12144       }
12145       return ret;
12146 #elif defined(TARGET_I386) && defined(TARGET_ABI32)
12147       return do_set_thread_area(cpu_env, arg1);
12148 #elif defined(TARGET_M68K)
12149       {
12150           TaskState *ts = cpu->opaque;
12151           ts->tp_value = arg1;
12152           return 0;
12153       }
12154 #else
12155       return -TARGET_ENOSYS;
12156 #endif
12157 #endif
12158 #ifdef TARGET_NR_get_thread_area
12159     case TARGET_NR_get_thread_area:
12160 #if defined(TARGET_I386) && defined(TARGET_ABI32)
12161         return do_get_thread_area(cpu_env, arg1);
12162 #elif defined(TARGET_M68K)
12163         {
12164             TaskState *ts = cpu->opaque;
12165             return ts->tp_value;
12166         }
12167 #else
12168         return -TARGET_ENOSYS;
12169 #endif
12170 #endif
12171 #ifdef TARGET_NR_getdomainname
12172     case TARGET_NR_getdomainname:
12173         return -TARGET_ENOSYS;
12174 #endif
12175 
12176 #ifdef TARGET_NR_clock_settime
12177     case TARGET_NR_clock_settime:
12178     {
12179         struct timespec ts;
12180 
12181         ret = target_to_host_timespec(&ts, arg2);
12182         if (!is_error(ret)) {
12183             ret = get_errno(clock_settime(arg1, &ts));
12184         }
12185         return ret;
12186     }
12187 #endif
12188 #ifdef TARGET_NR_clock_settime64
12189     case TARGET_NR_clock_settime64:
12190     {
12191         struct timespec ts;
12192 
12193         ret = target_to_host_timespec64(&ts, arg2);
12194         if (!is_error(ret)) {
12195             ret = get_errno(clock_settime(arg1, &ts));
12196         }
12197         return ret;
12198     }
12199 #endif
12200 #ifdef TARGET_NR_clock_gettime
12201     case TARGET_NR_clock_gettime:
12202     {
12203         struct timespec ts;
12204         ret = get_errno(clock_gettime(arg1, &ts));
12205         if (!is_error(ret)) {
12206             ret = host_to_target_timespec(arg2, &ts);
12207         }
12208         return ret;
12209     }
12210 #endif
12211 #ifdef TARGET_NR_clock_gettime64
12212     case TARGET_NR_clock_gettime64:
12213     {
12214         struct timespec ts;
12215         ret = get_errno(clock_gettime(arg1, &ts));
12216         if (!is_error(ret)) {
12217             ret = host_to_target_timespec64(arg2, &ts);
12218         }
12219         return ret;
12220     }
12221 #endif
12222 #ifdef TARGET_NR_clock_getres
12223     case TARGET_NR_clock_getres:
12224     {
12225         struct timespec ts;
12226         ret = get_errno(clock_getres(arg1, &ts));
12227         if (!is_error(ret)) {
12228             host_to_target_timespec(arg2, &ts);
12229         }
12230         return ret;
12231     }
12232 #endif
12233 #ifdef TARGET_NR_clock_getres_time64
12234     case TARGET_NR_clock_getres_time64:
12235     {
12236         struct timespec ts;
12237         ret = get_errno(clock_getres(arg1, &ts));
12238         if (!is_error(ret)) {
12239             host_to_target_timespec64(arg2, &ts);
12240         }
12241         return ret;
12242     }
12243 #endif
12244 #ifdef TARGET_NR_clock_nanosleep
12245     case TARGET_NR_clock_nanosleep:
12246     {
12247         struct timespec ts;
12248         if (target_to_host_timespec(&ts, arg3)) {
12249             return -TARGET_EFAULT;
12250         }
12251         ret = get_errno(safe_clock_nanosleep(arg1, arg2,
12252                                              &ts, arg4 ? &ts : NULL));
12253         /*
12254          * if the call is interrupted by a signal handler, it fails
12255          * with error -TARGET_EINTR and if arg4 is not NULL and arg2 is not
12256          * TIMER_ABSTIME, it returns the remaining unslept time in arg4.
12257          */
12258         if (ret == -TARGET_EINTR && arg4 && arg2 != TIMER_ABSTIME &&
12259             host_to_target_timespec(arg4, &ts)) {
12260               return -TARGET_EFAULT;
12261         }
12262 
12263         return ret;
12264     }
12265 #endif
12266 #ifdef TARGET_NR_clock_nanosleep_time64
12267     case TARGET_NR_clock_nanosleep_time64:
12268     {
12269         struct timespec ts;
12270 
12271         if (target_to_host_timespec64(&ts, arg3)) {
12272             return -TARGET_EFAULT;
12273         }
12274 
12275         ret = get_errno(safe_clock_nanosleep(arg1, arg2,
12276                                              &ts, arg4 ? &ts : NULL));
12277 
12278         if (ret == -TARGET_EINTR && arg4 && arg2 != TIMER_ABSTIME &&
12279             host_to_target_timespec64(arg4, &ts)) {
12280             return -TARGET_EFAULT;
12281         }
12282         return ret;
12283     }
12284 #endif
12285 
12286 #if defined(TARGET_NR_set_tid_address) && defined(__NR_set_tid_address)
12287     case TARGET_NR_set_tid_address:
12288         return get_errno(set_tid_address((int *)g2h(cpu, arg1)));
12289 #endif
12290 
12291     case TARGET_NR_tkill:
12292         return get_errno(safe_tkill((int)arg1, target_to_host_signal(arg2)));
12293 
12294     case TARGET_NR_tgkill:
12295         return get_errno(safe_tgkill((int)arg1, (int)arg2,
12296                          target_to_host_signal(arg3)));
12297 
12298 #ifdef TARGET_NR_set_robust_list
12299     case TARGET_NR_set_robust_list:
12300     case TARGET_NR_get_robust_list:
12301         /* The ABI for supporting robust futexes has userspace pass
12302          * the kernel a pointer to a linked list which is updated by
12303          * userspace after the syscall; the list is walked by the kernel
12304          * when the thread exits. Since the linked list in QEMU guest
12305          * memory isn't a valid linked list for the host and we have
12306          * no way to reliably intercept the thread-death event, we can't
12307          * support these. Silently return ENOSYS so that guest userspace
12308          * falls back to a non-robust futex implementation (which should
12309          * be OK except in the corner case of the guest crashing while
12310          * holding a mutex that is shared with another process via
12311          * shared memory).
12312          */
12313         return -TARGET_ENOSYS;
12314 #endif
12315 
12316 #if defined(TARGET_NR_utimensat)
12317     case TARGET_NR_utimensat:
12318         {
12319             struct timespec *tsp, ts[2];
12320             if (!arg3) {
12321                 tsp = NULL;
12322             } else {
12323                 if (target_to_host_timespec(ts, arg3)) {
12324                     return -TARGET_EFAULT;
12325                 }
12326                 if (target_to_host_timespec(ts + 1, arg3 +
12327                                             sizeof(struct target_timespec))) {
12328                     return -TARGET_EFAULT;
12329                 }
12330                 tsp = ts;
12331             }
12332             if (!arg2)
12333                 ret = get_errno(sys_utimensat(arg1, NULL, tsp, arg4));
12334             else {
12335                 if (!(p = lock_user_string(arg2))) {
12336                     return -TARGET_EFAULT;
12337                 }
12338                 ret = get_errno(sys_utimensat(arg1, path(p), tsp, arg4));
12339                 unlock_user(p, arg2, 0);
12340             }
12341         }
12342         return ret;
12343 #endif
12344 #ifdef TARGET_NR_utimensat_time64
12345     case TARGET_NR_utimensat_time64:
12346         {
12347             struct timespec *tsp, ts[2];
12348             if (!arg3) {
12349                 tsp = NULL;
12350             } else {
12351                 if (target_to_host_timespec64(ts, arg3)) {
12352                     return -TARGET_EFAULT;
12353                 }
12354                 if (target_to_host_timespec64(ts + 1, arg3 +
12355                                      sizeof(struct target__kernel_timespec))) {
12356                     return -TARGET_EFAULT;
12357                 }
12358                 tsp = ts;
12359             }
12360             if (!arg2)
12361                 ret = get_errno(sys_utimensat(arg1, NULL, tsp, arg4));
12362             else {
12363                 p = lock_user_string(arg2);
12364                 if (!p) {
12365                     return -TARGET_EFAULT;
12366                 }
12367                 ret = get_errno(sys_utimensat(arg1, path(p), tsp, arg4));
12368                 unlock_user(p, arg2, 0);
12369             }
12370         }
12371         return ret;
12372 #endif
12373 #ifdef TARGET_NR_futex
12374     case TARGET_NR_futex:
12375         return do_futex(cpu, arg1, arg2, arg3, arg4, arg5, arg6);
12376 #endif
12377 #ifdef TARGET_NR_futex_time64
12378     case TARGET_NR_futex_time64:
12379         return do_futex_time64(cpu, arg1, arg2, arg3, arg4, arg5, arg6);
12380 #endif
12381 #if defined(TARGET_NR_inotify_init) && defined(__NR_inotify_init)
12382     case TARGET_NR_inotify_init:
12383         ret = get_errno(sys_inotify_init());
12384         if (ret >= 0) {
12385             fd_trans_register(ret, &target_inotify_trans);
12386         }
12387         return ret;
12388 #endif
12389 #ifdef CONFIG_INOTIFY1
12390 #if defined(TARGET_NR_inotify_init1) && defined(__NR_inotify_init1)
12391     case TARGET_NR_inotify_init1:
12392         ret = get_errno(sys_inotify_init1(target_to_host_bitmask(arg1,
12393                                           fcntl_flags_tbl)));
12394         if (ret >= 0) {
12395             fd_trans_register(ret, &target_inotify_trans);
12396         }
12397         return ret;
12398 #endif
12399 #endif
12400 #if defined(TARGET_NR_inotify_add_watch) && defined(__NR_inotify_add_watch)
12401     case TARGET_NR_inotify_add_watch:
12402         p = lock_user_string(arg2);
12403         ret = get_errno(sys_inotify_add_watch(arg1, path(p), arg3));
12404         unlock_user(p, arg2, 0);
12405         return ret;
12406 #endif
12407 #if defined(TARGET_NR_inotify_rm_watch) && defined(__NR_inotify_rm_watch)
12408     case TARGET_NR_inotify_rm_watch:
12409         return get_errno(sys_inotify_rm_watch(arg1, arg2));
12410 #endif
12411 
12412 #if defined(TARGET_NR_mq_open) && defined(__NR_mq_open)
12413     case TARGET_NR_mq_open:
12414         {
12415             struct mq_attr posix_mq_attr;
12416             struct mq_attr *pposix_mq_attr;
12417             int host_flags;
12418 
12419             host_flags = target_to_host_bitmask(arg2, fcntl_flags_tbl);
12420             pposix_mq_attr = NULL;
12421             if (arg4) {
12422                 if (copy_from_user_mq_attr(&posix_mq_attr, arg4) != 0) {
12423                     return -TARGET_EFAULT;
12424                 }
12425                 pposix_mq_attr = &posix_mq_attr;
12426             }
12427             p = lock_user_string(arg1 - 1);
12428             if (!p) {
12429                 return -TARGET_EFAULT;
12430             }
12431             ret = get_errno(mq_open(p, host_flags, arg3, pposix_mq_attr));
12432             unlock_user (p, arg1, 0);
12433         }
12434         return ret;
12435 
12436     case TARGET_NR_mq_unlink:
12437         p = lock_user_string(arg1 - 1);
12438         if (!p) {
12439             return -TARGET_EFAULT;
12440         }
12441         ret = get_errno(mq_unlink(p));
12442         unlock_user (p, arg1, 0);
12443         return ret;
12444 
12445 #ifdef TARGET_NR_mq_timedsend
12446     case TARGET_NR_mq_timedsend:
12447         {
12448             struct timespec ts;
12449 
12450             p = lock_user (VERIFY_READ, arg2, arg3, 1);
12451             if (arg5 != 0) {
12452                 if (target_to_host_timespec(&ts, arg5)) {
12453                     return -TARGET_EFAULT;
12454                 }
12455                 ret = get_errno(safe_mq_timedsend(arg1, p, arg3, arg4, &ts));
12456                 if (!is_error(ret) && host_to_target_timespec(arg5, &ts)) {
12457                     return -TARGET_EFAULT;
12458                 }
12459             } else {
12460                 ret = get_errno(safe_mq_timedsend(arg1, p, arg3, arg4, NULL));
12461             }
12462             unlock_user (p, arg2, arg3);
12463         }
12464         return ret;
12465 #endif
12466 #ifdef TARGET_NR_mq_timedsend_time64
12467     case TARGET_NR_mq_timedsend_time64:
12468         {
12469             struct timespec ts;
12470 
12471             p = lock_user(VERIFY_READ, arg2, arg3, 1);
12472             if (arg5 != 0) {
12473                 if (target_to_host_timespec64(&ts, arg5)) {
12474                     return -TARGET_EFAULT;
12475                 }
12476                 ret = get_errno(safe_mq_timedsend(arg1, p, arg3, arg4, &ts));
12477                 if (!is_error(ret) && host_to_target_timespec64(arg5, &ts)) {
12478                     return -TARGET_EFAULT;
12479                 }
12480             } else {
12481                 ret = get_errno(safe_mq_timedsend(arg1, p, arg3, arg4, NULL));
12482             }
12483             unlock_user(p, arg2, arg3);
12484         }
12485         return ret;
12486 #endif
12487 
12488 #ifdef TARGET_NR_mq_timedreceive
12489     case TARGET_NR_mq_timedreceive:
12490         {
12491             struct timespec ts;
12492             unsigned int prio;
12493 
12494             p = lock_user (VERIFY_READ, arg2, arg3, 1);
12495             if (arg5 != 0) {
12496                 if (target_to_host_timespec(&ts, arg5)) {
12497                     return -TARGET_EFAULT;
12498                 }
12499                 ret = get_errno(safe_mq_timedreceive(arg1, p, arg3,
12500                                                      &prio, &ts));
12501                 if (!is_error(ret) && host_to_target_timespec(arg5, &ts)) {
12502                     return -TARGET_EFAULT;
12503                 }
12504             } else {
12505                 ret = get_errno(safe_mq_timedreceive(arg1, p, arg3,
12506                                                      &prio, NULL));
12507             }
12508             unlock_user (p, arg2, arg3);
12509             if (arg4 != 0)
12510                 put_user_u32(prio, arg4);
12511         }
12512         return ret;
12513 #endif
12514 #ifdef TARGET_NR_mq_timedreceive_time64
12515     case TARGET_NR_mq_timedreceive_time64:
12516         {
12517             struct timespec ts;
12518             unsigned int prio;
12519 
12520             p = lock_user(VERIFY_READ, arg2, arg3, 1);
12521             if (arg5 != 0) {
12522                 if (target_to_host_timespec64(&ts, arg5)) {
12523                     return -TARGET_EFAULT;
12524                 }
12525                 ret = get_errno(safe_mq_timedreceive(arg1, p, arg3,
12526                                                      &prio, &ts));
12527                 if (!is_error(ret) && host_to_target_timespec64(arg5, &ts)) {
12528                     return -TARGET_EFAULT;
12529                 }
12530             } else {
12531                 ret = get_errno(safe_mq_timedreceive(arg1, p, arg3,
12532                                                      &prio, NULL));
12533             }
12534             unlock_user(p, arg2, arg3);
12535             if (arg4 != 0) {
12536                 put_user_u32(prio, arg4);
12537             }
12538         }
12539         return ret;
12540 #endif
12541 
12542     /* Not implemented for now... */
12543 /*     case TARGET_NR_mq_notify: */
12544 /*         break; */
12545 
12546     case TARGET_NR_mq_getsetattr:
12547         {
12548             struct mq_attr posix_mq_attr_in, posix_mq_attr_out;
12549             ret = 0;
12550             if (arg2 != 0) {
12551                 copy_from_user_mq_attr(&posix_mq_attr_in, arg2);
12552                 ret = get_errno(mq_setattr(arg1, &posix_mq_attr_in,
12553                                            &posix_mq_attr_out));
12554             } else if (arg3 != 0) {
12555                 ret = get_errno(mq_getattr(arg1, &posix_mq_attr_out));
12556             }
12557             if (ret == 0 && arg3 != 0) {
12558                 copy_to_user_mq_attr(arg3, &posix_mq_attr_out);
12559             }
12560         }
12561         return ret;
12562 #endif
12563 
12564 #ifdef CONFIG_SPLICE
12565 #ifdef TARGET_NR_tee
12566     case TARGET_NR_tee:
12567         {
12568             ret = get_errno(tee(arg1,arg2,arg3,arg4));
12569         }
12570         return ret;
12571 #endif
12572 #ifdef TARGET_NR_splice
12573     case TARGET_NR_splice:
12574         {
12575             loff_t loff_in, loff_out;
12576             loff_t *ploff_in = NULL, *ploff_out = NULL;
12577             if (arg2) {
12578                 if (get_user_u64(loff_in, arg2)) {
12579                     return -TARGET_EFAULT;
12580                 }
12581                 ploff_in = &loff_in;
12582             }
12583             if (arg4) {
12584                 if (get_user_u64(loff_out, arg4)) {
12585                     return -TARGET_EFAULT;
12586                 }
12587                 ploff_out = &loff_out;
12588             }
12589             ret = get_errno(splice(arg1, ploff_in, arg3, ploff_out, arg5, arg6));
12590             if (arg2) {
12591                 if (put_user_u64(loff_in, arg2)) {
12592                     return -TARGET_EFAULT;
12593                 }
12594             }
12595             if (arg4) {
12596                 if (put_user_u64(loff_out, arg4)) {
12597                     return -TARGET_EFAULT;
12598                 }
12599             }
12600         }
12601         return ret;
12602 #endif
12603 #ifdef TARGET_NR_vmsplice
12604 	case TARGET_NR_vmsplice:
12605         {
12606             struct iovec *vec = lock_iovec(VERIFY_READ, arg2, arg3, 1);
12607             if (vec != NULL) {
12608                 ret = get_errno(vmsplice(arg1, vec, arg3, arg4));
12609                 unlock_iovec(vec, arg2, arg3, 0);
12610             } else {
12611                 ret = -host_to_target_errno(errno);
12612             }
12613         }
12614         return ret;
12615 #endif
12616 #endif /* CONFIG_SPLICE */
12617 #ifdef CONFIG_EVENTFD
12618 #if defined(TARGET_NR_eventfd)
12619     case TARGET_NR_eventfd:
12620         ret = get_errno(eventfd(arg1, 0));
12621         if (ret >= 0) {
12622             fd_trans_register(ret, &target_eventfd_trans);
12623         }
12624         return ret;
12625 #endif
12626 #if defined(TARGET_NR_eventfd2)
12627     case TARGET_NR_eventfd2:
12628     {
12629         int host_flags = arg2 & (~(TARGET_O_NONBLOCK_MASK | TARGET_O_CLOEXEC));
12630         if (arg2 & TARGET_O_NONBLOCK) {
12631             host_flags |= O_NONBLOCK;
12632         }
12633         if (arg2 & TARGET_O_CLOEXEC) {
12634             host_flags |= O_CLOEXEC;
12635         }
12636         ret = get_errno(eventfd(arg1, host_flags));
12637         if (ret >= 0) {
12638             fd_trans_register(ret, &target_eventfd_trans);
12639         }
12640         return ret;
12641     }
12642 #endif
12643 #endif /* CONFIG_EVENTFD  */
12644 #if defined(CONFIG_FALLOCATE) && defined(TARGET_NR_fallocate)
12645     case TARGET_NR_fallocate:
12646 #if TARGET_ABI_BITS == 32
12647         ret = get_errno(fallocate(arg1, arg2, target_offset64(arg3, arg4),
12648                                   target_offset64(arg5, arg6)));
12649 #else
12650         ret = get_errno(fallocate(arg1, arg2, arg3, arg4));
12651 #endif
12652         return ret;
12653 #endif
12654 #if defined(CONFIG_SYNC_FILE_RANGE)
12655 #if defined(TARGET_NR_sync_file_range)
12656     case TARGET_NR_sync_file_range:
12657 #if TARGET_ABI_BITS == 32
12658 #if defined(TARGET_MIPS)
12659         ret = get_errno(sync_file_range(arg1, target_offset64(arg3, arg4),
12660                                         target_offset64(arg5, arg6), arg7));
12661 #else
12662         ret = get_errno(sync_file_range(arg1, target_offset64(arg2, arg3),
12663                                         target_offset64(arg4, arg5), arg6));
12664 #endif /* !TARGET_MIPS */
12665 #else
12666         ret = get_errno(sync_file_range(arg1, arg2, arg3, arg4));
12667 #endif
12668         return ret;
12669 #endif
12670 #if defined(TARGET_NR_sync_file_range2) || \
12671     defined(TARGET_NR_arm_sync_file_range)
12672 #if defined(TARGET_NR_sync_file_range2)
12673     case TARGET_NR_sync_file_range2:
12674 #endif
12675 #if defined(TARGET_NR_arm_sync_file_range)
12676     case TARGET_NR_arm_sync_file_range:
12677 #endif
12678         /* This is like sync_file_range but the arguments are reordered */
12679 #if TARGET_ABI_BITS == 32
12680         ret = get_errno(sync_file_range(arg1, target_offset64(arg3, arg4),
12681                                         target_offset64(arg5, arg6), arg2));
12682 #else
12683         ret = get_errno(sync_file_range(arg1, arg3, arg4, arg2));
12684 #endif
12685         return ret;
12686 #endif
12687 #endif
12688 #if defined(TARGET_NR_signalfd4)
12689     case TARGET_NR_signalfd4:
12690         return do_signalfd4(arg1, arg2, arg4);
12691 #endif
12692 #if defined(TARGET_NR_signalfd)
12693     case TARGET_NR_signalfd:
12694         return do_signalfd4(arg1, arg2, 0);
12695 #endif
12696 #if defined(CONFIG_EPOLL)
12697 #if defined(TARGET_NR_epoll_create)
12698     case TARGET_NR_epoll_create:
12699         return get_errno(epoll_create(arg1));
12700 #endif
12701 #if defined(TARGET_NR_epoll_create1) && defined(CONFIG_EPOLL_CREATE1)
12702     case TARGET_NR_epoll_create1:
12703         return get_errno(epoll_create1(target_to_host_bitmask(arg1, fcntl_flags_tbl)));
12704 #endif
12705 #if defined(TARGET_NR_epoll_ctl)
12706     case TARGET_NR_epoll_ctl:
12707     {
12708         struct epoll_event ep;
12709         struct epoll_event *epp = 0;
12710         if (arg4) {
12711             if (arg2 != EPOLL_CTL_DEL) {
12712                 struct target_epoll_event *target_ep;
12713                 if (!lock_user_struct(VERIFY_READ, target_ep, arg4, 1)) {
12714                     return -TARGET_EFAULT;
12715                 }
12716                 ep.events = tswap32(target_ep->events);
12717                 /*
12718                  * The epoll_data_t union is just opaque data to the kernel,
12719                  * so we transfer all 64 bits across and need not worry what
12720                  * actual data type it is.
12721                  */
12722                 ep.data.u64 = tswap64(target_ep->data.u64);
12723                 unlock_user_struct(target_ep, arg4, 0);
12724             }
12725             /*
12726              * before kernel 2.6.9, EPOLL_CTL_DEL operation required a
12727              * non-null pointer, even though this argument is ignored.
12728              *
12729              */
12730             epp = &ep;
12731         }
12732         return get_errno(epoll_ctl(arg1, arg2, arg3, epp));
12733     }
12734 #endif
12735 
12736 #if defined(TARGET_NR_epoll_wait) || defined(TARGET_NR_epoll_pwait)
12737 #if defined(TARGET_NR_epoll_wait)
12738     case TARGET_NR_epoll_wait:
12739 #endif
12740 #if defined(TARGET_NR_epoll_pwait)
12741     case TARGET_NR_epoll_pwait:
12742 #endif
12743     {
12744         struct target_epoll_event *target_ep;
12745         struct epoll_event *ep;
12746         int epfd = arg1;
12747         int maxevents = arg3;
12748         int timeout = arg4;
12749 
12750         if (maxevents <= 0 || maxevents > TARGET_EP_MAX_EVENTS) {
12751             return -TARGET_EINVAL;
12752         }
12753 
12754         target_ep = lock_user(VERIFY_WRITE, arg2,
12755                               maxevents * sizeof(struct target_epoll_event), 1);
12756         if (!target_ep) {
12757             return -TARGET_EFAULT;
12758         }
12759 
12760         ep = g_try_new(struct epoll_event, maxevents);
12761         if (!ep) {
12762             unlock_user(target_ep, arg2, 0);
12763             return -TARGET_ENOMEM;
12764         }
12765 
12766         switch (num) {
12767 #if defined(TARGET_NR_epoll_pwait)
12768         case TARGET_NR_epoll_pwait:
12769         {
12770             target_sigset_t *target_set;
12771             sigset_t _set, *set = &_set;
12772 
12773             if (arg5) {
12774                 if (arg6 != sizeof(target_sigset_t)) {
12775                     ret = -TARGET_EINVAL;
12776                     break;
12777                 }
12778 
12779                 target_set = lock_user(VERIFY_READ, arg5,
12780                                        sizeof(target_sigset_t), 1);
12781                 if (!target_set) {
12782                     ret = -TARGET_EFAULT;
12783                     break;
12784                 }
12785                 target_to_host_sigset(set, target_set);
12786                 unlock_user(target_set, arg5, 0);
12787             } else {
12788                 set = NULL;
12789             }
12790 
12791             ret = get_errno(safe_epoll_pwait(epfd, ep, maxevents, timeout,
12792                                              set, SIGSET_T_SIZE));
12793             break;
12794         }
12795 #endif
12796 #if defined(TARGET_NR_epoll_wait)
12797         case TARGET_NR_epoll_wait:
12798             ret = get_errno(safe_epoll_pwait(epfd, ep, maxevents, timeout,
12799                                              NULL, 0));
12800             break;
12801 #endif
12802         default:
12803             ret = -TARGET_ENOSYS;
12804         }
12805         if (!is_error(ret)) {
12806             int i;
12807             for (i = 0; i < ret; i++) {
12808                 target_ep[i].events = tswap32(ep[i].events);
12809                 target_ep[i].data.u64 = tswap64(ep[i].data.u64);
12810             }
12811             unlock_user(target_ep, arg2,
12812                         ret * sizeof(struct target_epoll_event));
12813         } else {
12814             unlock_user(target_ep, arg2, 0);
12815         }
12816         g_free(ep);
12817         return ret;
12818     }
12819 #endif
12820 #endif
12821 #ifdef TARGET_NR_prlimit64
12822     case TARGET_NR_prlimit64:
12823     {
12824         /* args: pid, resource number, ptr to new rlimit, ptr to old rlimit */
12825         struct target_rlimit64 *target_rnew, *target_rold;
12826         struct host_rlimit64 rnew, rold, *rnewp = 0;
12827         int resource = target_to_host_resource(arg2);
12828 
12829         if (arg3 && (resource != RLIMIT_AS &&
12830                      resource != RLIMIT_DATA &&
12831                      resource != RLIMIT_STACK)) {
12832             if (!lock_user_struct(VERIFY_READ, target_rnew, arg3, 1)) {
12833                 return -TARGET_EFAULT;
12834             }
12835             rnew.rlim_cur = tswap64(target_rnew->rlim_cur);
12836             rnew.rlim_max = tswap64(target_rnew->rlim_max);
12837             unlock_user_struct(target_rnew, arg3, 0);
12838             rnewp = &rnew;
12839         }
12840 
12841         ret = get_errno(sys_prlimit64(arg1, resource, rnewp, arg4 ? &rold : 0));
12842         if (!is_error(ret) && arg4) {
12843             if (!lock_user_struct(VERIFY_WRITE, target_rold, arg4, 1)) {
12844                 return -TARGET_EFAULT;
12845             }
12846             target_rold->rlim_cur = tswap64(rold.rlim_cur);
12847             target_rold->rlim_max = tswap64(rold.rlim_max);
12848             unlock_user_struct(target_rold, arg4, 1);
12849         }
12850         return ret;
12851     }
12852 #endif
12853 #ifdef TARGET_NR_gethostname
12854     case TARGET_NR_gethostname:
12855     {
12856         char *name = lock_user(VERIFY_WRITE, arg1, arg2, 0);
12857         if (name) {
12858             ret = get_errno(gethostname(name, arg2));
12859             unlock_user(name, arg1, arg2);
12860         } else {
12861             ret = -TARGET_EFAULT;
12862         }
12863         return ret;
12864     }
12865 #endif
12866 #ifdef TARGET_NR_atomic_cmpxchg_32
12867     case TARGET_NR_atomic_cmpxchg_32:
12868     {
12869         /* should use start_exclusive from main.c */
12870         abi_ulong mem_value;
12871         if (get_user_u32(mem_value, arg6)) {
12872             target_siginfo_t info;
12873             info.si_signo = SIGSEGV;
12874             info.si_errno = 0;
12875             info.si_code = TARGET_SEGV_MAPERR;
12876             info._sifields._sigfault._addr = arg6;
12877             queue_signal((CPUArchState *)cpu_env, info.si_signo,
12878                          QEMU_SI_FAULT, &info);
12879             ret = 0xdeadbeef;
12880 
12881         }
12882         if (mem_value == arg2)
12883             put_user_u32(arg1, arg6);
12884         return mem_value;
12885     }
12886 #endif
12887 #ifdef TARGET_NR_atomic_barrier
12888     case TARGET_NR_atomic_barrier:
12889         /* Like the kernel implementation and the
12890            qemu arm barrier, no-op this? */
12891         return 0;
12892 #endif
12893 
12894 #ifdef TARGET_NR_timer_create
12895     case TARGET_NR_timer_create:
12896     {
12897         /* args: clockid_t clockid, struct sigevent *sevp, timer_t *timerid */
12898 
12899         struct sigevent host_sevp = { {0}, }, *phost_sevp = NULL;
12900 
12901         int clkid = arg1;
12902         int timer_index = next_free_host_timer();
12903 
12904         if (timer_index < 0) {
12905             ret = -TARGET_EAGAIN;
12906         } else {
12907             timer_t *phtimer = g_posix_timers  + timer_index;
12908 
12909             if (arg2) {
12910                 phost_sevp = &host_sevp;
12911                 ret = target_to_host_sigevent(phost_sevp, arg2);
12912                 if (ret != 0) {
12913                     return ret;
12914                 }
12915             }
12916 
12917             ret = get_errno(timer_create(clkid, phost_sevp, phtimer));
12918             if (ret) {
12919                 phtimer = NULL;
12920             } else {
12921                 if (put_user(TIMER_MAGIC | timer_index, arg3, target_timer_t)) {
12922                     return -TARGET_EFAULT;
12923                 }
12924             }
12925         }
12926         return ret;
12927     }
12928 #endif
12929 
12930 #ifdef TARGET_NR_timer_settime
12931     case TARGET_NR_timer_settime:
12932     {
12933         /* args: timer_t timerid, int flags, const struct itimerspec *new_value,
12934          * struct itimerspec * old_value */
12935         target_timer_t timerid = get_timer_id(arg1);
12936 
12937         if (timerid < 0) {
12938             ret = timerid;
12939         } else if (arg3 == 0) {
12940             ret = -TARGET_EINVAL;
12941         } else {
12942             timer_t htimer = g_posix_timers[timerid];
12943             struct itimerspec hspec_new = {{0},}, hspec_old = {{0},};
12944 
12945             if (target_to_host_itimerspec(&hspec_new, arg3)) {
12946                 return -TARGET_EFAULT;
12947             }
12948             ret = get_errno(
12949                           timer_settime(htimer, arg2, &hspec_new, &hspec_old));
12950             if (arg4 && host_to_target_itimerspec(arg4, &hspec_old)) {
12951                 return -TARGET_EFAULT;
12952             }
12953         }
12954         return ret;
12955     }
12956 #endif
12957 
12958 #ifdef TARGET_NR_timer_settime64
12959     case TARGET_NR_timer_settime64:
12960     {
12961         target_timer_t timerid = get_timer_id(arg1);
12962 
12963         if (timerid < 0) {
12964             ret = timerid;
12965         } else if (arg3 == 0) {
12966             ret = -TARGET_EINVAL;
12967         } else {
12968             timer_t htimer = g_posix_timers[timerid];
12969             struct itimerspec hspec_new = {{0},}, hspec_old = {{0},};
12970 
12971             if (target_to_host_itimerspec64(&hspec_new, arg3)) {
12972                 return -TARGET_EFAULT;
12973             }
12974             ret = get_errno(
12975                           timer_settime(htimer, arg2, &hspec_new, &hspec_old));
12976             if (arg4 && host_to_target_itimerspec64(arg4, &hspec_old)) {
12977                 return -TARGET_EFAULT;
12978             }
12979         }
12980         return ret;
12981     }
12982 #endif
12983 
12984 #ifdef TARGET_NR_timer_gettime
12985     case TARGET_NR_timer_gettime:
12986     {
12987         /* args: timer_t timerid, struct itimerspec *curr_value */
12988         target_timer_t timerid = get_timer_id(arg1);
12989 
12990         if (timerid < 0) {
12991             ret = timerid;
12992         } else if (!arg2) {
12993             ret = -TARGET_EFAULT;
12994         } else {
12995             timer_t htimer = g_posix_timers[timerid];
12996             struct itimerspec hspec;
12997             ret = get_errno(timer_gettime(htimer, &hspec));
12998 
12999             if (host_to_target_itimerspec(arg2, &hspec)) {
13000                 ret = -TARGET_EFAULT;
13001             }
13002         }
13003         return ret;
13004     }
13005 #endif
13006 
13007 #ifdef TARGET_NR_timer_gettime64
13008     case TARGET_NR_timer_gettime64:
13009     {
13010         /* args: timer_t timerid, struct itimerspec64 *curr_value */
13011         target_timer_t timerid = get_timer_id(arg1);
13012 
13013         if (timerid < 0) {
13014             ret = timerid;
13015         } else if (!arg2) {
13016             ret = -TARGET_EFAULT;
13017         } else {
13018             timer_t htimer = g_posix_timers[timerid];
13019             struct itimerspec hspec;
13020             ret = get_errno(timer_gettime(htimer, &hspec));
13021 
13022             if (host_to_target_itimerspec64(arg2, &hspec)) {
13023                 ret = -TARGET_EFAULT;
13024             }
13025         }
13026         return ret;
13027     }
13028 #endif
13029 
13030 #ifdef TARGET_NR_timer_getoverrun
13031     case TARGET_NR_timer_getoverrun:
13032     {
13033         /* args: timer_t timerid */
13034         target_timer_t timerid = get_timer_id(arg1);
13035 
13036         if (timerid < 0) {
13037             ret = timerid;
13038         } else {
13039             timer_t htimer = g_posix_timers[timerid];
13040             ret = get_errno(timer_getoverrun(htimer));
13041         }
13042         return ret;
13043     }
13044 #endif
13045 
13046 #ifdef TARGET_NR_timer_delete
13047     case TARGET_NR_timer_delete:
13048     {
13049         /* args: timer_t timerid */
13050         target_timer_t timerid = get_timer_id(arg1);
13051 
13052         if (timerid < 0) {
13053             ret = timerid;
13054         } else {
13055             timer_t htimer = g_posix_timers[timerid];
13056             ret = get_errno(timer_delete(htimer));
13057             g_posix_timers[timerid] = 0;
13058         }
13059         return ret;
13060     }
13061 #endif
13062 
13063 #if defined(TARGET_NR_timerfd_create) && defined(CONFIG_TIMERFD)
13064     case TARGET_NR_timerfd_create:
13065         return get_errno(timerfd_create(arg1,
13066                           target_to_host_bitmask(arg2, fcntl_flags_tbl)));
13067 #endif
13068 
13069 #if defined(TARGET_NR_timerfd_gettime) && defined(CONFIG_TIMERFD)
13070     case TARGET_NR_timerfd_gettime:
13071         {
13072             struct itimerspec its_curr;
13073 
13074             ret = get_errno(timerfd_gettime(arg1, &its_curr));
13075 
13076             if (arg2 && host_to_target_itimerspec(arg2, &its_curr)) {
13077                 return -TARGET_EFAULT;
13078             }
13079         }
13080         return ret;
13081 #endif
13082 
13083 #if defined(TARGET_NR_timerfd_gettime64) && defined(CONFIG_TIMERFD)
13084     case TARGET_NR_timerfd_gettime64:
13085         {
13086             struct itimerspec its_curr;
13087 
13088             ret = get_errno(timerfd_gettime(arg1, &its_curr));
13089 
13090             if (arg2 && host_to_target_itimerspec64(arg2, &its_curr)) {
13091                 return -TARGET_EFAULT;
13092             }
13093         }
13094         return ret;
13095 #endif
13096 
13097 #if defined(TARGET_NR_timerfd_settime) && defined(CONFIG_TIMERFD)
13098     case TARGET_NR_timerfd_settime:
13099         {
13100             struct itimerspec its_new, its_old, *p_new;
13101 
13102             if (arg3) {
13103                 if (target_to_host_itimerspec(&its_new, arg3)) {
13104                     return -TARGET_EFAULT;
13105                 }
13106                 p_new = &its_new;
13107             } else {
13108                 p_new = NULL;
13109             }
13110 
13111             ret = get_errno(timerfd_settime(arg1, arg2, p_new, &its_old));
13112 
13113             if (arg4 && host_to_target_itimerspec(arg4, &its_old)) {
13114                 return -TARGET_EFAULT;
13115             }
13116         }
13117         return ret;
13118 #endif
13119 
13120 #if defined(TARGET_NR_timerfd_settime64) && defined(CONFIG_TIMERFD)
13121     case TARGET_NR_timerfd_settime64:
13122         {
13123             struct itimerspec its_new, its_old, *p_new;
13124 
13125             if (arg3) {
13126                 if (target_to_host_itimerspec64(&its_new, arg3)) {
13127                     return -TARGET_EFAULT;
13128                 }
13129                 p_new = &its_new;
13130             } else {
13131                 p_new = NULL;
13132             }
13133 
13134             ret = get_errno(timerfd_settime(arg1, arg2, p_new, &its_old));
13135 
13136             if (arg4 && host_to_target_itimerspec64(arg4, &its_old)) {
13137                 return -TARGET_EFAULT;
13138             }
13139         }
13140         return ret;
13141 #endif
13142 
13143 #if defined(TARGET_NR_ioprio_get) && defined(__NR_ioprio_get)
13144     case TARGET_NR_ioprio_get:
13145         return get_errno(ioprio_get(arg1, arg2));
13146 #endif
13147 
13148 #if defined(TARGET_NR_ioprio_set) && defined(__NR_ioprio_set)
13149     case TARGET_NR_ioprio_set:
13150         return get_errno(ioprio_set(arg1, arg2, arg3));
13151 #endif
13152 
13153 #if defined(TARGET_NR_setns) && defined(CONFIG_SETNS)
13154     case TARGET_NR_setns:
13155         return get_errno(setns(arg1, arg2));
13156 #endif
13157 #if defined(TARGET_NR_unshare) && defined(CONFIG_SETNS)
13158     case TARGET_NR_unshare:
13159         return get_errno(unshare(arg1));
13160 #endif
13161 #if defined(TARGET_NR_kcmp) && defined(__NR_kcmp)
13162     case TARGET_NR_kcmp:
13163         return get_errno(kcmp(arg1, arg2, arg3, arg4, arg5));
13164 #endif
13165 #ifdef TARGET_NR_swapcontext
13166     case TARGET_NR_swapcontext:
13167         /* PowerPC specific.  */
13168         return do_swapcontext(cpu_env, arg1, arg2, arg3);
13169 #endif
13170 #ifdef TARGET_NR_memfd_create
13171     case TARGET_NR_memfd_create:
13172         p = lock_user_string(arg1);
13173         if (!p) {
13174             return -TARGET_EFAULT;
13175         }
13176         ret = get_errno(memfd_create(p, arg2));
13177         fd_trans_unregister(ret);
13178         unlock_user(p, arg1, 0);
13179         return ret;
13180 #endif
13181 #if defined TARGET_NR_membarrier && defined __NR_membarrier
13182     case TARGET_NR_membarrier:
13183         return get_errno(membarrier(arg1, arg2));
13184 #endif
13185 
13186 #if defined(TARGET_NR_copy_file_range) && defined(__NR_copy_file_range)
13187     case TARGET_NR_copy_file_range:
13188         {
13189             loff_t inoff, outoff;
13190             loff_t *pinoff = NULL, *poutoff = NULL;
13191 
13192             if (arg2) {
13193                 if (get_user_u64(inoff, arg2)) {
13194                     return -TARGET_EFAULT;
13195                 }
13196                 pinoff = &inoff;
13197             }
13198             if (arg4) {
13199                 if (get_user_u64(outoff, arg4)) {
13200                     return -TARGET_EFAULT;
13201                 }
13202                 poutoff = &outoff;
13203             }
13204             /* Do not sign-extend the count parameter. */
13205             ret = get_errno(safe_copy_file_range(arg1, pinoff, arg3, poutoff,
13206                                                  (abi_ulong)arg5, arg6));
13207             if (!is_error(ret) && ret > 0) {
13208                 if (arg2) {
13209                     if (put_user_u64(inoff, arg2)) {
13210                         return -TARGET_EFAULT;
13211                     }
13212                 }
13213                 if (arg4) {
13214                     if (put_user_u64(outoff, arg4)) {
13215                         return -TARGET_EFAULT;
13216                     }
13217                 }
13218             }
13219         }
13220         return ret;
13221 #endif
13222 
13223     default:
13224         qemu_log_mask(LOG_UNIMP, "Unsupported syscall: %d\n", num);
13225         return -TARGET_ENOSYS;
13226     }
13227     return ret;
13228 }
13229 
13230 abi_long do_syscall(void *cpu_env, int num, abi_long arg1,
13231                     abi_long arg2, abi_long arg3, abi_long arg4,
13232                     abi_long arg5, abi_long arg6, abi_long arg7,
13233                     abi_long arg8)
13234 {
13235     CPUState *cpu = env_cpu(cpu_env);
13236     abi_long ret;
13237 
13238 #ifdef DEBUG_ERESTARTSYS
13239     /* Debug-only code for exercising the syscall-restart code paths
13240      * in the per-architecture cpu main loops: restart every syscall
13241      * the guest makes once before letting it through.
13242      */
13243     {
13244         static bool flag;
13245         flag = !flag;
13246         if (flag) {
13247             return -TARGET_ERESTARTSYS;
13248         }
13249     }
13250 #endif
13251 
13252     record_syscall_start(cpu, num, arg1,
13253                          arg2, arg3, arg4, arg5, arg6, arg7, arg8);
13254 
13255     if (unlikely(qemu_loglevel_mask(LOG_STRACE))) {
13256         print_syscall(cpu_env, num, arg1, arg2, arg3, arg4, arg5, arg6);
13257     }
13258 
13259     ret = do_syscall1(cpu_env, num, arg1, arg2, arg3, arg4,
13260                       arg5, arg6, arg7, arg8);
13261 
13262     if (unlikely(qemu_loglevel_mask(LOG_STRACE))) {
13263         print_syscall_ret(cpu_env, num, ret, arg1, arg2,
13264                           arg3, arg4, arg5, arg6);
13265     }
13266 
13267     record_syscall_return(cpu, num, ret);
13268     return ret;
13269 }
13270