xref: /openbmc/qemu/linux-user/syscall.c (revision ee1bf83d)
1 /*
2  *  Linux syscalls
3  *
4  *  Copyright (c) 2003 Fabrice Bellard
5  *
6  *  This program is free software; you can redistribute it and/or modify
7  *  it under the terms of the GNU General Public License as published by
8  *  the Free Software Foundation; either version 2 of the License, or
9  *  (at your option) any later version.
10  *
11  *  This program is distributed in the hope that it will be useful,
12  *  but WITHOUT ANY WARRANTY; without even the implied warranty of
13  *  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
14  *  GNU General Public License for more details.
15  *
16  *  You should have received a copy of the GNU General Public License
17  *  along with this program; if not, see <http://www.gnu.org/licenses/>.
18  */
19 #define _ATFILE_SOURCE
20 #include "qemu/osdep.h"
21 #include "qemu/cutils.h"
22 #include "qemu/path.h"
23 #include "qemu/memfd.h"
24 #include "qemu/queue.h"
25 #include <elf.h>
26 #include <endian.h>
27 #include <grp.h>
28 #include <sys/ipc.h>
29 #include <sys/msg.h>
30 #include <sys/wait.h>
31 #include <sys/mount.h>
32 #include <sys/file.h>
33 #include <sys/fsuid.h>
34 #include <sys/personality.h>
35 #include <sys/prctl.h>
36 #include <sys/resource.h>
37 #include <sys/swap.h>
38 #include <linux/capability.h>
39 #include <sched.h>
40 #include <sys/timex.h>
41 #include <sys/socket.h>
42 #include <linux/sockios.h>
43 #include <sys/un.h>
44 #include <sys/uio.h>
45 #include <poll.h>
46 #include <sys/times.h>
47 #include <sys/shm.h>
48 #include <sys/sem.h>
49 #include <sys/statfs.h>
50 #include <utime.h>
51 #include <sys/sysinfo.h>
52 #include <sys/signalfd.h>
53 //#include <sys/user.h>
54 #include <netinet/in.h>
55 #include <netinet/ip.h>
56 #include <netinet/tcp.h>
57 #include <netinet/udp.h>
58 #include <linux/wireless.h>
59 #include <linux/icmp.h>
60 #include <linux/icmpv6.h>
61 #include <linux/if_tun.h>
62 #include <linux/in6.h>
63 #include <linux/errqueue.h>
64 #include <linux/random.h>
65 #ifdef CONFIG_TIMERFD
66 #include <sys/timerfd.h>
67 #endif
68 #ifdef CONFIG_EVENTFD
69 #include <sys/eventfd.h>
70 #endif
71 #ifdef CONFIG_EPOLL
72 #include <sys/epoll.h>
73 #endif
74 #ifdef CONFIG_ATTR
75 #include "qemu/xattr.h"
76 #endif
77 #ifdef CONFIG_SENDFILE
78 #include <sys/sendfile.h>
79 #endif
80 #ifdef HAVE_SYS_KCOV_H
81 #include <sys/kcov.h>
82 #endif
83 
84 #define termios host_termios
85 #define winsize host_winsize
86 #define termio host_termio
87 #define sgttyb host_sgttyb /* same as target */
88 #define tchars host_tchars /* same as target */
89 #define ltchars host_ltchars /* same as target */
90 
91 #include <linux/termios.h>
92 #include <linux/unistd.h>
93 #include <linux/cdrom.h>
94 #include <linux/hdreg.h>
95 #include <linux/soundcard.h>
96 #include <linux/kd.h>
97 #include <linux/mtio.h>
98 #include <linux/fs.h>
99 #include <linux/fd.h>
100 #if defined(CONFIG_FIEMAP)
101 #include <linux/fiemap.h>
102 #endif
103 #include <linux/fb.h>
104 #if defined(CONFIG_USBFS)
105 #include <linux/usbdevice_fs.h>
106 #include <linux/usb/ch9.h>
107 #endif
108 #include <linux/vt.h>
109 #include <linux/dm-ioctl.h>
110 #include <linux/reboot.h>
111 #include <linux/route.h>
112 #include <linux/filter.h>
113 #include <linux/blkpg.h>
114 #include <netpacket/packet.h>
115 #include <linux/netlink.h>
116 #include <linux/if_alg.h>
117 #include <linux/rtc.h>
118 #include <sound/asound.h>
119 #ifdef HAVE_BTRFS_H
120 #include <linux/btrfs.h>
121 #endif
122 #ifdef HAVE_DRM_H
123 #include <libdrm/drm.h>
124 #include <libdrm/i915_drm.h>
125 #endif
126 #include "linux_loop.h"
127 #include "uname.h"
128 
129 #include "qemu.h"
130 #include "qemu/guest-random.h"
131 #include "qemu/selfmap.h"
132 #include "user/syscall-trace.h"
133 #include "qapi/error.h"
134 #include "fd-trans.h"
135 #include "tcg/tcg.h"
136 
137 #ifndef CLONE_IO
138 #define CLONE_IO                0x80000000      /* Clone io context */
139 #endif
140 
141 /* We can't directly call the host clone syscall, because this will
142  * badly confuse libc (breaking mutexes, for example). So we must
143  * divide clone flags into:
144  *  * flag combinations that look like pthread_create()
145  *  * flag combinations that look like fork()
146  *  * flags we can implement within QEMU itself
147  *  * flags we can't support and will return an error for
148  */
149 /* For thread creation, all these flags must be present; for
150  * fork, none must be present.
151  */
152 #define CLONE_THREAD_FLAGS                              \
153     (CLONE_VM | CLONE_FS | CLONE_FILES |                \
154      CLONE_SIGHAND | CLONE_THREAD | CLONE_SYSVSEM)
155 
156 /* These flags are ignored:
157  * CLONE_DETACHED is now ignored by the kernel;
158  * CLONE_IO is just an optimisation hint to the I/O scheduler
159  */
160 #define CLONE_IGNORED_FLAGS                     \
161     (CLONE_DETACHED | CLONE_IO)
162 
163 /* Flags for fork which we can implement within QEMU itself */
164 #define CLONE_OPTIONAL_FORK_FLAGS               \
165     (CLONE_SETTLS | CLONE_PARENT_SETTID |       \
166      CLONE_CHILD_CLEARTID | CLONE_CHILD_SETTID)
167 
168 /* Flags for thread creation which we can implement within QEMU itself */
169 #define CLONE_OPTIONAL_THREAD_FLAGS                             \
170     (CLONE_SETTLS | CLONE_PARENT_SETTID |                       \
171      CLONE_CHILD_CLEARTID | CLONE_CHILD_SETTID | CLONE_PARENT)
172 
173 #define CLONE_INVALID_FORK_FLAGS                                        \
174     (~(CSIGNAL | CLONE_OPTIONAL_FORK_FLAGS | CLONE_IGNORED_FLAGS))
175 
176 #define CLONE_INVALID_THREAD_FLAGS                                      \
177     (~(CSIGNAL | CLONE_THREAD_FLAGS | CLONE_OPTIONAL_THREAD_FLAGS |     \
178        CLONE_IGNORED_FLAGS))
179 
180 /* CLONE_VFORK is special cased early in do_fork(). The other flag bits
181  * have almost all been allocated. We cannot support any of
182  * CLONE_NEWNS, CLONE_NEWCGROUP, CLONE_NEWUTS, CLONE_NEWIPC,
183  * CLONE_NEWUSER, CLONE_NEWPID, CLONE_NEWNET, CLONE_PTRACE, CLONE_UNTRACED.
184  * The checks against the invalid thread masks above will catch these.
185  * (The one remaining unallocated bit is 0x1000 which used to be CLONE_PID.)
186  */
187 
188 /* Define DEBUG_ERESTARTSYS to force every syscall to be restarted
189  * once. This exercises the codepaths for restart.
190  */
191 //#define DEBUG_ERESTARTSYS
192 
193 //#include <linux/msdos_fs.h>
194 #define	VFAT_IOCTL_READDIR_BOTH		_IOR('r', 1, struct linux_dirent [2])
195 #define	VFAT_IOCTL_READDIR_SHORT	_IOR('r', 2, struct linux_dirent [2])
196 
197 #undef _syscall0
198 #undef _syscall1
199 #undef _syscall2
200 #undef _syscall3
201 #undef _syscall4
202 #undef _syscall5
203 #undef _syscall6
204 
205 #define _syscall0(type,name)		\
206 static type name (void)			\
207 {					\
208 	return syscall(__NR_##name);	\
209 }
210 
211 #define _syscall1(type,name,type1,arg1)		\
212 static type name (type1 arg1)			\
213 {						\
214 	return syscall(__NR_##name, arg1);	\
215 }
216 
217 #define _syscall2(type,name,type1,arg1,type2,arg2)	\
218 static type name (type1 arg1,type2 arg2)		\
219 {							\
220 	return syscall(__NR_##name, arg1, arg2);	\
221 }
222 
223 #define _syscall3(type,name,type1,arg1,type2,arg2,type3,arg3)	\
224 static type name (type1 arg1,type2 arg2,type3 arg3)		\
225 {								\
226 	return syscall(__NR_##name, arg1, arg2, arg3);		\
227 }
228 
229 #define _syscall4(type,name,type1,arg1,type2,arg2,type3,arg3,type4,arg4)	\
230 static type name (type1 arg1,type2 arg2,type3 arg3,type4 arg4)			\
231 {										\
232 	return syscall(__NR_##name, arg1, arg2, arg3, arg4);			\
233 }
234 
235 #define _syscall5(type,name,type1,arg1,type2,arg2,type3,arg3,type4,arg4,	\
236 		  type5,arg5)							\
237 static type name (type1 arg1,type2 arg2,type3 arg3,type4 arg4,type5 arg5)	\
238 {										\
239 	return syscall(__NR_##name, arg1, arg2, arg3, arg4, arg5);		\
240 }
241 
242 
243 #define _syscall6(type,name,type1,arg1,type2,arg2,type3,arg3,type4,arg4,	\
244 		  type5,arg5,type6,arg6)					\
245 static type name (type1 arg1,type2 arg2,type3 arg3,type4 arg4,type5 arg5,	\
246                   type6 arg6)							\
247 {										\
248 	return syscall(__NR_##name, arg1, arg2, arg3, arg4, arg5, arg6);	\
249 }
250 
251 
252 #define __NR_sys_uname __NR_uname
253 #define __NR_sys_getcwd1 __NR_getcwd
254 #define __NR_sys_getdents __NR_getdents
255 #define __NR_sys_getdents64 __NR_getdents64
256 #define __NR_sys_getpriority __NR_getpriority
257 #define __NR_sys_rt_sigqueueinfo __NR_rt_sigqueueinfo
258 #define __NR_sys_rt_tgsigqueueinfo __NR_rt_tgsigqueueinfo
259 #define __NR_sys_syslog __NR_syslog
260 #if defined(__NR_futex)
261 # define __NR_sys_futex __NR_futex
262 #endif
263 #if defined(__NR_futex_time64)
264 # define __NR_sys_futex_time64 __NR_futex_time64
265 #endif
266 #define __NR_sys_inotify_init __NR_inotify_init
267 #define __NR_sys_inotify_add_watch __NR_inotify_add_watch
268 #define __NR_sys_inotify_rm_watch __NR_inotify_rm_watch
269 #define __NR_sys_statx __NR_statx
270 
271 #if defined(__alpha__) || defined(__x86_64__) || defined(__s390x__)
272 #define __NR__llseek __NR_lseek
273 #endif
274 
275 /* Newer kernel ports have llseek() instead of _llseek() */
276 #if defined(TARGET_NR_llseek) && !defined(TARGET_NR__llseek)
277 #define TARGET_NR__llseek TARGET_NR_llseek
278 #endif
279 
280 /* some platforms need to mask more bits than just TARGET_O_NONBLOCK */
281 #ifndef TARGET_O_NONBLOCK_MASK
282 #define TARGET_O_NONBLOCK_MASK TARGET_O_NONBLOCK
283 #endif
284 
285 #define __NR_sys_gettid __NR_gettid
286 _syscall0(int, sys_gettid)
287 
288 /* For the 64-bit guest on 32-bit host case we must emulate
289  * getdents using getdents64, because otherwise the host
290  * might hand us back more dirent records than we can fit
291  * into the guest buffer after structure format conversion.
292  * Otherwise we emulate getdents with getdents if the host has it.
293  */
294 #if defined(__NR_getdents) && HOST_LONG_BITS >= TARGET_ABI_BITS
295 #define EMULATE_GETDENTS_WITH_GETDENTS
296 #endif
297 
298 #if defined(TARGET_NR_getdents) && defined(EMULATE_GETDENTS_WITH_GETDENTS)
299 _syscall3(int, sys_getdents, uint, fd, struct linux_dirent *, dirp, uint, count);
300 #endif
301 #if (defined(TARGET_NR_getdents) && \
302       !defined(EMULATE_GETDENTS_WITH_GETDENTS)) || \
303     (defined(TARGET_NR_getdents64) && defined(__NR_getdents64))
304 _syscall3(int, sys_getdents64, uint, fd, struct linux_dirent64 *, dirp, uint, count);
305 #endif
306 #if defined(TARGET_NR__llseek) && defined(__NR_llseek)
307 _syscall5(int, _llseek,  uint,  fd, ulong, hi, ulong, lo,
308           loff_t *, res, uint, wh);
309 #endif
310 _syscall3(int, sys_rt_sigqueueinfo, pid_t, pid, int, sig, siginfo_t *, uinfo)
311 _syscall4(int, sys_rt_tgsigqueueinfo, pid_t, pid, pid_t, tid, int, sig,
312           siginfo_t *, uinfo)
313 _syscall3(int,sys_syslog,int,type,char*,bufp,int,len)
314 #ifdef __NR_exit_group
315 _syscall1(int,exit_group,int,error_code)
316 #endif
317 #if defined(TARGET_NR_set_tid_address) && defined(__NR_set_tid_address)
318 _syscall1(int,set_tid_address,int *,tidptr)
319 #endif
320 #if defined(__NR_futex)
321 _syscall6(int,sys_futex,int *,uaddr,int,op,int,val,
322           const struct timespec *,timeout,int *,uaddr2,int,val3)
323 #endif
324 #if defined(__NR_futex_time64)
325 _syscall6(int,sys_futex_time64,int *,uaddr,int,op,int,val,
326           const struct timespec *,timeout,int *,uaddr2,int,val3)
327 #endif
328 #define __NR_sys_sched_getaffinity __NR_sched_getaffinity
329 _syscall3(int, sys_sched_getaffinity, pid_t, pid, unsigned int, len,
330           unsigned long *, user_mask_ptr);
331 #define __NR_sys_sched_setaffinity __NR_sched_setaffinity
332 _syscall3(int, sys_sched_setaffinity, pid_t, pid, unsigned int, len,
333           unsigned long *, user_mask_ptr);
334 #define __NR_sys_getcpu __NR_getcpu
335 _syscall3(int, sys_getcpu, unsigned *, cpu, unsigned *, node, void *, tcache);
336 _syscall4(int, reboot, int, magic1, int, magic2, unsigned int, cmd,
337           void *, arg);
338 _syscall2(int, capget, struct __user_cap_header_struct *, header,
339           struct __user_cap_data_struct *, data);
340 _syscall2(int, capset, struct __user_cap_header_struct *, header,
341           struct __user_cap_data_struct *, data);
342 #if defined(TARGET_NR_ioprio_get) && defined(__NR_ioprio_get)
343 _syscall2(int, ioprio_get, int, which, int, who)
344 #endif
345 #if defined(TARGET_NR_ioprio_set) && defined(__NR_ioprio_set)
346 _syscall3(int, ioprio_set, int, which, int, who, int, ioprio)
347 #endif
348 #if defined(TARGET_NR_getrandom) && defined(__NR_getrandom)
349 _syscall3(int, getrandom, void *, buf, size_t, buflen, unsigned int, flags)
350 #endif
351 
352 #if defined(TARGET_NR_kcmp) && defined(__NR_kcmp)
353 _syscall5(int, kcmp, pid_t, pid1, pid_t, pid2, int, type,
354           unsigned long, idx1, unsigned long, idx2)
355 #endif
356 
357 /*
358  * It is assumed that struct statx is architecture independent.
359  */
360 #if defined(TARGET_NR_statx) && defined(__NR_statx)
361 _syscall5(int, sys_statx, int, dirfd, const char *, pathname, int, flags,
362           unsigned int, mask, struct target_statx *, statxbuf)
363 #endif
364 #if defined(TARGET_NR_membarrier) && defined(__NR_membarrier)
365 _syscall2(int, membarrier, int, cmd, int, flags)
366 #endif
367 
368 static bitmask_transtbl fcntl_flags_tbl[] = {
369   { TARGET_O_ACCMODE,   TARGET_O_WRONLY,    O_ACCMODE,   O_WRONLY,    },
370   { TARGET_O_ACCMODE,   TARGET_O_RDWR,      O_ACCMODE,   O_RDWR,      },
371   { TARGET_O_CREAT,     TARGET_O_CREAT,     O_CREAT,     O_CREAT,     },
372   { TARGET_O_EXCL,      TARGET_O_EXCL,      O_EXCL,      O_EXCL,      },
373   { TARGET_O_NOCTTY,    TARGET_O_NOCTTY,    O_NOCTTY,    O_NOCTTY,    },
374   { TARGET_O_TRUNC,     TARGET_O_TRUNC,     O_TRUNC,     O_TRUNC,     },
375   { TARGET_O_APPEND,    TARGET_O_APPEND,    O_APPEND,    O_APPEND,    },
376   { TARGET_O_NONBLOCK,  TARGET_O_NONBLOCK,  O_NONBLOCK,  O_NONBLOCK,  },
377   { TARGET_O_SYNC,      TARGET_O_DSYNC,     O_SYNC,      O_DSYNC,     },
378   { TARGET_O_SYNC,      TARGET_O_SYNC,      O_SYNC,      O_SYNC,      },
379   { TARGET_FASYNC,      TARGET_FASYNC,      FASYNC,      FASYNC,      },
380   { TARGET_O_DIRECTORY, TARGET_O_DIRECTORY, O_DIRECTORY, O_DIRECTORY, },
381   { TARGET_O_NOFOLLOW,  TARGET_O_NOFOLLOW,  O_NOFOLLOW,  O_NOFOLLOW,  },
382 #if defined(O_DIRECT)
383   { TARGET_O_DIRECT,    TARGET_O_DIRECT,    O_DIRECT,    O_DIRECT,    },
384 #endif
385 #if defined(O_NOATIME)
386   { TARGET_O_NOATIME,   TARGET_O_NOATIME,   O_NOATIME,   O_NOATIME    },
387 #endif
388 #if defined(O_CLOEXEC)
389   { TARGET_O_CLOEXEC,   TARGET_O_CLOEXEC,   O_CLOEXEC,   O_CLOEXEC    },
390 #endif
391 #if defined(O_PATH)
392   { TARGET_O_PATH,      TARGET_O_PATH,      O_PATH,      O_PATH       },
393 #endif
394 #if defined(O_TMPFILE)
395   { TARGET_O_TMPFILE,   TARGET_O_TMPFILE,   O_TMPFILE,   O_TMPFILE    },
396 #endif
397   /* Don't terminate the list prematurely on 64-bit host+guest.  */
398 #if TARGET_O_LARGEFILE != 0 || O_LARGEFILE != 0
399   { TARGET_O_LARGEFILE, TARGET_O_LARGEFILE, O_LARGEFILE, O_LARGEFILE, },
400 #endif
401   { 0, 0, 0, 0 }
402 };
403 
404 _syscall2(int, sys_getcwd1, char *, buf, size_t, size)
405 
406 #if defined(TARGET_NR_utimensat) || defined(TARGET_NR_utimensat_time64)
407 #if defined(__NR_utimensat)
408 #define __NR_sys_utimensat __NR_utimensat
409 _syscall4(int,sys_utimensat,int,dirfd,const char *,pathname,
410           const struct timespec *,tsp,int,flags)
411 #else
412 static int sys_utimensat(int dirfd, const char *pathname,
413                          const struct timespec times[2], int flags)
414 {
415     errno = ENOSYS;
416     return -1;
417 }
418 #endif
419 #endif /* TARGET_NR_utimensat */
420 
421 #ifdef TARGET_NR_renameat2
422 #if defined(__NR_renameat2)
423 #define __NR_sys_renameat2 __NR_renameat2
424 _syscall5(int, sys_renameat2, int, oldfd, const char *, old, int, newfd,
425           const char *, new, unsigned int, flags)
426 #else
427 static int sys_renameat2(int oldfd, const char *old,
428                          int newfd, const char *new, int flags)
429 {
430     if (flags == 0) {
431         return renameat(oldfd, old, newfd, new);
432     }
433     errno = ENOSYS;
434     return -1;
435 }
436 #endif
437 #endif /* TARGET_NR_renameat2 */
438 
439 #ifdef CONFIG_INOTIFY
440 #include <sys/inotify.h>
441 
442 #if defined(TARGET_NR_inotify_init) && defined(__NR_inotify_init)
443 static int sys_inotify_init(void)
444 {
445   return (inotify_init());
446 }
447 #endif
448 #if defined(TARGET_NR_inotify_add_watch) && defined(__NR_inotify_add_watch)
449 static int sys_inotify_add_watch(int fd,const char *pathname, int32_t mask)
450 {
451   return (inotify_add_watch(fd, pathname, mask));
452 }
453 #endif
454 #if defined(TARGET_NR_inotify_rm_watch) && defined(__NR_inotify_rm_watch)
455 static int sys_inotify_rm_watch(int fd, int32_t wd)
456 {
457   return (inotify_rm_watch(fd, wd));
458 }
459 #endif
460 #ifdef CONFIG_INOTIFY1
461 #if defined(TARGET_NR_inotify_init1) && defined(__NR_inotify_init1)
462 static int sys_inotify_init1(int flags)
463 {
464   return (inotify_init1(flags));
465 }
466 #endif
467 #endif
468 #else
469 /* Userspace can usually survive runtime without inotify */
470 #undef TARGET_NR_inotify_init
471 #undef TARGET_NR_inotify_init1
472 #undef TARGET_NR_inotify_add_watch
473 #undef TARGET_NR_inotify_rm_watch
474 #endif /* CONFIG_INOTIFY  */
475 
476 #if defined(TARGET_NR_prlimit64)
477 #ifndef __NR_prlimit64
478 # define __NR_prlimit64 -1
479 #endif
480 #define __NR_sys_prlimit64 __NR_prlimit64
481 /* The glibc rlimit structure may not be that used by the underlying syscall */
482 struct host_rlimit64 {
483     uint64_t rlim_cur;
484     uint64_t rlim_max;
485 };
486 _syscall4(int, sys_prlimit64, pid_t, pid, int, resource,
487           const struct host_rlimit64 *, new_limit,
488           struct host_rlimit64 *, old_limit)
489 #endif
490 
491 
492 #if defined(TARGET_NR_timer_create)
493 /* Maximum of 32 active POSIX timers allowed at any one time. */
494 static timer_t g_posix_timers[32] = { 0, } ;
495 
496 static inline int next_free_host_timer(void)
497 {
498     int k ;
499     /* FIXME: Does finding the next free slot require a lock? */
500     for (k = 0; k < ARRAY_SIZE(g_posix_timers); k++) {
501         if (g_posix_timers[k] == 0) {
502             g_posix_timers[k] = (timer_t) 1;
503             return k;
504         }
505     }
506     return -1;
507 }
508 #endif
509 
510 #define ERRNO_TABLE_SIZE 1200
511 
512 /* target_to_host_errno_table[] is initialized from
513  * host_to_target_errno_table[] in syscall_init(). */
514 static uint16_t target_to_host_errno_table[ERRNO_TABLE_SIZE] = {
515 };
516 
517 /*
518  * This list is the union of errno values overridden in asm-<arch>/errno.h
519  * minus the errnos that are not actually generic to all archs.
520  */
521 static uint16_t host_to_target_errno_table[ERRNO_TABLE_SIZE] = {
522     [EAGAIN]		= TARGET_EAGAIN,
523     [EIDRM]		= TARGET_EIDRM,
524     [ECHRNG]		= TARGET_ECHRNG,
525     [EL2NSYNC]		= TARGET_EL2NSYNC,
526     [EL3HLT]		= TARGET_EL3HLT,
527     [EL3RST]		= TARGET_EL3RST,
528     [ELNRNG]		= TARGET_ELNRNG,
529     [EUNATCH]		= TARGET_EUNATCH,
530     [ENOCSI]		= TARGET_ENOCSI,
531     [EL2HLT]		= TARGET_EL2HLT,
532     [EDEADLK]		= TARGET_EDEADLK,
533     [ENOLCK]		= TARGET_ENOLCK,
534     [EBADE]		= TARGET_EBADE,
535     [EBADR]		= TARGET_EBADR,
536     [EXFULL]		= TARGET_EXFULL,
537     [ENOANO]		= TARGET_ENOANO,
538     [EBADRQC]		= TARGET_EBADRQC,
539     [EBADSLT]		= TARGET_EBADSLT,
540     [EBFONT]		= TARGET_EBFONT,
541     [ENOSTR]		= TARGET_ENOSTR,
542     [ENODATA]		= TARGET_ENODATA,
543     [ETIME]		= TARGET_ETIME,
544     [ENOSR]		= TARGET_ENOSR,
545     [ENONET]		= TARGET_ENONET,
546     [ENOPKG]		= TARGET_ENOPKG,
547     [EREMOTE]		= TARGET_EREMOTE,
548     [ENOLINK]		= TARGET_ENOLINK,
549     [EADV]		= TARGET_EADV,
550     [ESRMNT]		= TARGET_ESRMNT,
551     [ECOMM]		= TARGET_ECOMM,
552     [EPROTO]		= TARGET_EPROTO,
553     [EDOTDOT]		= TARGET_EDOTDOT,
554     [EMULTIHOP]		= TARGET_EMULTIHOP,
555     [EBADMSG]		= TARGET_EBADMSG,
556     [ENAMETOOLONG]	= TARGET_ENAMETOOLONG,
557     [EOVERFLOW]		= TARGET_EOVERFLOW,
558     [ENOTUNIQ]		= TARGET_ENOTUNIQ,
559     [EBADFD]		= TARGET_EBADFD,
560     [EREMCHG]		= TARGET_EREMCHG,
561     [ELIBACC]		= TARGET_ELIBACC,
562     [ELIBBAD]		= TARGET_ELIBBAD,
563     [ELIBSCN]		= TARGET_ELIBSCN,
564     [ELIBMAX]		= TARGET_ELIBMAX,
565     [ELIBEXEC]		= TARGET_ELIBEXEC,
566     [EILSEQ]		= TARGET_EILSEQ,
567     [ENOSYS]		= TARGET_ENOSYS,
568     [ELOOP]		= TARGET_ELOOP,
569     [ERESTART]		= TARGET_ERESTART,
570     [ESTRPIPE]		= TARGET_ESTRPIPE,
571     [ENOTEMPTY]		= TARGET_ENOTEMPTY,
572     [EUSERS]		= TARGET_EUSERS,
573     [ENOTSOCK]		= TARGET_ENOTSOCK,
574     [EDESTADDRREQ]	= TARGET_EDESTADDRREQ,
575     [EMSGSIZE]		= TARGET_EMSGSIZE,
576     [EPROTOTYPE]	= TARGET_EPROTOTYPE,
577     [ENOPROTOOPT]	= TARGET_ENOPROTOOPT,
578     [EPROTONOSUPPORT]	= TARGET_EPROTONOSUPPORT,
579     [ESOCKTNOSUPPORT]	= TARGET_ESOCKTNOSUPPORT,
580     [EOPNOTSUPP]	= TARGET_EOPNOTSUPP,
581     [EPFNOSUPPORT]	= TARGET_EPFNOSUPPORT,
582     [EAFNOSUPPORT]	= TARGET_EAFNOSUPPORT,
583     [EADDRINUSE]	= TARGET_EADDRINUSE,
584     [EADDRNOTAVAIL]	= TARGET_EADDRNOTAVAIL,
585     [ENETDOWN]		= TARGET_ENETDOWN,
586     [ENETUNREACH]	= TARGET_ENETUNREACH,
587     [ENETRESET]		= TARGET_ENETRESET,
588     [ECONNABORTED]	= TARGET_ECONNABORTED,
589     [ECONNRESET]	= TARGET_ECONNRESET,
590     [ENOBUFS]		= TARGET_ENOBUFS,
591     [EISCONN]		= TARGET_EISCONN,
592     [ENOTCONN]		= TARGET_ENOTCONN,
593     [EUCLEAN]		= TARGET_EUCLEAN,
594     [ENOTNAM]		= TARGET_ENOTNAM,
595     [ENAVAIL]		= TARGET_ENAVAIL,
596     [EISNAM]		= TARGET_EISNAM,
597     [EREMOTEIO]		= TARGET_EREMOTEIO,
598     [EDQUOT]            = TARGET_EDQUOT,
599     [ESHUTDOWN]		= TARGET_ESHUTDOWN,
600     [ETOOMANYREFS]	= TARGET_ETOOMANYREFS,
601     [ETIMEDOUT]		= TARGET_ETIMEDOUT,
602     [ECONNREFUSED]	= TARGET_ECONNREFUSED,
603     [EHOSTDOWN]		= TARGET_EHOSTDOWN,
604     [EHOSTUNREACH]	= TARGET_EHOSTUNREACH,
605     [EALREADY]		= TARGET_EALREADY,
606     [EINPROGRESS]	= TARGET_EINPROGRESS,
607     [ESTALE]		= TARGET_ESTALE,
608     [ECANCELED]		= TARGET_ECANCELED,
609     [ENOMEDIUM]		= TARGET_ENOMEDIUM,
610     [EMEDIUMTYPE]	= TARGET_EMEDIUMTYPE,
611 #ifdef ENOKEY
612     [ENOKEY]		= TARGET_ENOKEY,
613 #endif
614 #ifdef EKEYEXPIRED
615     [EKEYEXPIRED]	= TARGET_EKEYEXPIRED,
616 #endif
617 #ifdef EKEYREVOKED
618     [EKEYREVOKED]	= TARGET_EKEYREVOKED,
619 #endif
620 #ifdef EKEYREJECTED
621     [EKEYREJECTED]	= TARGET_EKEYREJECTED,
622 #endif
623 #ifdef EOWNERDEAD
624     [EOWNERDEAD]	= TARGET_EOWNERDEAD,
625 #endif
626 #ifdef ENOTRECOVERABLE
627     [ENOTRECOVERABLE]	= TARGET_ENOTRECOVERABLE,
628 #endif
629 #ifdef ENOMSG
630     [ENOMSG]            = TARGET_ENOMSG,
631 #endif
632 #ifdef ERKFILL
633     [ERFKILL]           = TARGET_ERFKILL,
634 #endif
635 #ifdef EHWPOISON
636     [EHWPOISON]         = TARGET_EHWPOISON,
637 #endif
638 };
639 
640 static inline int host_to_target_errno(int err)
641 {
642     if (err >= 0 && err < ERRNO_TABLE_SIZE &&
643         host_to_target_errno_table[err]) {
644         return host_to_target_errno_table[err];
645     }
646     return err;
647 }
648 
649 static inline int target_to_host_errno(int err)
650 {
651     if (err >= 0 && err < ERRNO_TABLE_SIZE &&
652         target_to_host_errno_table[err]) {
653         return target_to_host_errno_table[err];
654     }
655     return err;
656 }
657 
658 static inline abi_long get_errno(abi_long ret)
659 {
660     if (ret == -1)
661         return -host_to_target_errno(errno);
662     else
663         return ret;
664 }
665 
666 const char *target_strerror(int err)
667 {
668     if (err == TARGET_ERESTARTSYS) {
669         return "To be restarted";
670     }
671     if (err == TARGET_QEMU_ESIGRETURN) {
672         return "Successful exit from sigreturn";
673     }
674 
675     if ((err >= ERRNO_TABLE_SIZE) || (err < 0)) {
676         return NULL;
677     }
678     return strerror(target_to_host_errno(err));
679 }
680 
681 #define safe_syscall0(type, name) \
682 static type safe_##name(void) \
683 { \
684     return safe_syscall(__NR_##name); \
685 }
686 
687 #define safe_syscall1(type, name, type1, arg1) \
688 static type safe_##name(type1 arg1) \
689 { \
690     return safe_syscall(__NR_##name, arg1); \
691 }
692 
693 #define safe_syscall2(type, name, type1, arg1, type2, arg2) \
694 static type safe_##name(type1 arg1, type2 arg2) \
695 { \
696     return safe_syscall(__NR_##name, arg1, arg2); \
697 }
698 
699 #define safe_syscall3(type, name, type1, arg1, type2, arg2, type3, arg3) \
700 static type safe_##name(type1 arg1, type2 arg2, type3 arg3) \
701 { \
702     return safe_syscall(__NR_##name, arg1, arg2, arg3); \
703 }
704 
705 #define safe_syscall4(type, name, type1, arg1, type2, arg2, type3, arg3, \
706     type4, arg4) \
707 static type safe_##name(type1 arg1, type2 arg2, type3 arg3, type4 arg4) \
708 { \
709     return safe_syscall(__NR_##name, arg1, arg2, arg3, arg4); \
710 }
711 
712 #define safe_syscall5(type, name, type1, arg1, type2, arg2, type3, arg3, \
713     type4, arg4, type5, arg5) \
714 static type safe_##name(type1 arg1, type2 arg2, type3 arg3, type4 arg4, \
715     type5 arg5) \
716 { \
717     return safe_syscall(__NR_##name, arg1, arg2, arg3, arg4, arg5); \
718 }
719 
720 #define safe_syscall6(type, name, type1, arg1, type2, arg2, type3, arg3, \
721     type4, arg4, type5, arg5, type6, arg6) \
722 static type safe_##name(type1 arg1, type2 arg2, type3 arg3, type4 arg4, \
723     type5 arg5, type6 arg6) \
724 { \
725     return safe_syscall(__NR_##name, arg1, arg2, arg3, arg4, arg5, arg6); \
726 }
727 
728 safe_syscall3(ssize_t, read, int, fd, void *, buff, size_t, count)
729 safe_syscall3(ssize_t, write, int, fd, const void *, buff, size_t, count)
730 safe_syscall4(int, openat, int, dirfd, const char *, pathname, \
731               int, flags, mode_t, mode)
732 #if defined(TARGET_NR_wait4) || defined(TARGET_NR_waitpid)
733 safe_syscall4(pid_t, wait4, pid_t, pid, int *, status, int, options, \
734               struct rusage *, rusage)
735 #endif
736 safe_syscall5(int, waitid, idtype_t, idtype, id_t, id, siginfo_t *, infop, \
737               int, options, struct rusage *, rusage)
738 safe_syscall3(int, execve, const char *, filename, char **, argv, char **, envp)
739 #if defined(TARGET_NR_select) || defined(TARGET_NR__newselect) || \
740     defined(TARGET_NR_pselect6) || defined(TARGET_NR_pselect6_time64)
741 safe_syscall6(int, pselect6, int, nfds, fd_set *, readfds, fd_set *, writefds, \
742               fd_set *, exceptfds, struct timespec *, timeout, void *, sig)
743 #endif
744 #if defined(TARGET_NR_ppoll) || defined(TARGET_NR_ppoll_time64)
745 safe_syscall5(int, ppoll, struct pollfd *, ufds, unsigned int, nfds,
746               struct timespec *, tsp, const sigset_t *, sigmask,
747               size_t, sigsetsize)
748 #endif
749 safe_syscall6(int, epoll_pwait, int, epfd, struct epoll_event *, events,
750               int, maxevents, int, timeout, const sigset_t *, sigmask,
751               size_t, sigsetsize)
752 #if defined(__NR_futex)
753 safe_syscall6(int,futex,int *,uaddr,int,op,int,val, \
754               const struct timespec *,timeout,int *,uaddr2,int,val3)
755 #endif
756 #if defined(__NR_futex_time64)
757 safe_syscall6(int,futex_time64,int *,uaddr,int,op,int,val, \
758               const struct timespec *,timeout,int *,uaddr2,int,val3)
759 #endif
760 safe_syscall2(int, rt_sigsuspend, sigset_t *, newset, size_t, sigsetsize)
761 safe_syscall2(int, kill, pid_t, pid, int, sig)
762 safe_syscall2(int, tkill, int, tid, int, sig)
763 safe_syscall3(int, tgkill, int, tgid, int, pid, int, sig)
764 safe_syscall3(ssize_t, readv, int, fd, const struct iovec *, iov, int, iovcnt)
765 safe_syscall3(ssize_t, writev, int, fd, const struct iovec *, iov, int, iovcnt)
766 safe_syscall5(ssize_t, preadv, int, fd, const struct iovec *, iov, int, iovcnt,
767               unsigned long, pos_l, unsigned long, pos_h)
768 safe_syscall5(ssize_t, pwritev, int, fd, const struct iovec *, iov, int, iovcnt,
769               unsigned long, pos_l, unsigned long, pos_h)
770 safe_syscall3(int, connect, int, fd, const struct sockaddr *, addr,
771               socklen_t, addrlen)
772 safe_syscall6(ssize_t, sendto, int, fd, const void *, buf, size_t, len,
773               int, flags, const struct sockaddr *, addr, socklen_t, addrlen)
774 safe_syscall6(ssize_t, recvfrom, int, fd, void *, buf, size_t, len,
775               int, flags, struct sockaddr *, addr, socklen_t *, addrlen)
776 safe_syscall3(ssize_t, sendmsg, int, fd, const struct msghdr *, msg, int, flags)
777 safe_syscall3(ssize_t, recvmsg, int, fd, struct msghdr *, msg, int, flags)
778 safe_syscall2(int, flock, int, fd, int, operation)
779 #if defined(TARGET_NR_rt_sigtimedwait) || defined(TARGET_NR_rt_sigtimedwait_time64)
780 safe_syscall4(int, rt_sigtimedwait, const sigset_t *, these, siginfo_t *, uinfo,
781               const struct timespec *, uts, size_t, sigsetsize)
782 #endif
783 safe_syscall4(int, accept4, int, fd, struct sockaddr *, addr, socklen_t *, len,
784               int, flags)
785 #if defined(TARGET_NR_nanosleep)
786 safe_syscall2(int, nanosleep, const struct timespec *, req,
787               struct timespec *, rem)
788 #endif
789 #if defined(TARGET_NR_clock_nanosleep) || \
790     defined(TARGET_NR_clock_nanosleep_time64)
791 safe_syscall4(int, clock_nanosleep, const clockid_t, clock, int, flags,
792               const struct timespec *, req, struct timespec *, rem)
793 #endif
794 #ifdef __NR_ipc
795 #ifdef __s390x__
796 safe_syscall5(int, ipc, int, call, long, first, long, second, long, third,
797               void *, ptr)
798 #else
799 safe_syscall6(int, ipc, int, call, long, first, long, second, long, third,
800               void *, ptr, long, fifth)
801 #endif
802 #endif
803 #ifdef __NR_msgsnd
804 safe_syscall4(int, msgsnd, int, msgid, const void *, msgp, size_t, sz,
805               int, flags)
806 #endif
807 #ifdef __NR_msgrcv
808 safe_syscall5(int, msgrcv, int, msgid, void *, msgp, size_t, sz,
809               long, msgtype, int, flags)
810 #endif
811 #ifdef __NR_semtimedop
812 safe_syscall4(int, semtimedop, int, semid, struct sembuf *, tsops,
813               unsigned, nsops, const struct timespec *, timeout)
814 #endif
815 #if defined(TARGET_NR_mq_timedsend) || \
816     defined(TARGET_NR_mq_timedsend_time64)
817 safe_syscall5(int, mq_timedsend, int, mqdes, const char *, msg_ptr,
818               size_t, len, unsigned, prio, const struct timespec *, timeout)
819 #endif
820 #if defined(TARGET_NR_mq_timedreceive) || \
821     defined(TARGET_NR_mq_timedreceive_time64)
822 safe_syscall5(int, mq_timedreceive, int, mqdes, char *, msg_ptr,
823               size_t, len, unsigned *, prio, const struct timespec *, timeout)
824 #endif
825 #if defined(TARGET_NR_copy_file_range) && defined(__NR_copy_file_range)
826 safe_syscall6(ssize_t, copy_file_range, int, infd, loff_t *, pinoff,
827               int, outfd, loff_t *, poutoff, size_t, length,
828               unsigned int, flags)
829 #endif
830 
831 /* We do ioctl like this rather than via safe_syscall3 to preserve the
832  * "third argument might be integer or pointer or not present" behaviour of
833  * the libc function.
834  */
835 #define safe_ioctl(...) safe_syscall(__NR_ioctl, __VA_ARGS__)
836 /* Similarly for fcntl. Note that callers must always:
837  *  pass the F_GETLK64 etc constants rather than the unsuffixed F_GETLK
838  *  use the flock64 struct rather than unsuffixed flock
839  * This will then work and use a 64-bit offset for both 32-bit and 64-bit hosts.
840  */
841 #ifdef __NR_fcntl64
842 #define safe_fcntl(...) safe_syscall(__NR_fcntl64, __VA_ARGS__)
843 #else
844 #define safe_fcntl(...) safe_syscall(__NR_fcntl, __VA_ARGS__)
845 #endif
846 
847 static inline int host_to_target_sock_type(int host_type)
848 {
849     int target_type;
850 
851     switch (host_type & 0xf /* SOCK_TYPE_MASK */) {
852     case SOCK_DGRAM:
853         target_type = TARGET_SOCK_DGRAM;
854         break;
855     case SOCK_STREAM:
856         target_type = TARGET_SOCK_STREAM;
857         break;
858     default:
859         target_type = host_type & 0xf /* SOCK_TYPE_MASK */;
860         break;
861     }
862 
863 #if defined(SOCK_CLOEXEC)
864     if (host_type & SOCK_CLOEXEC) {
865         target_type |= TARGET_SOCK_CLOEXEC;
866     }
867 #endif
868 
869 #if defined(SOCK_NONBLOCK)
870     if (host_type & SOCK_NONBLOCK) {
871         target_type |= TARGET_SOCK_NONBLOCK;
872     }
873 #endif
874 
875     return target_type;
876 }
877 
878 static abi_ulong target_brk;
879 static abi_ulong target_original_brk;
880 static abi_ulong brk_page;
881 
882 void target_set_brk(abi_ulong new_brk)
883 {
884     target_original_brk = target_brk = HOST_PAGE_ALIGN(new_brk);
885     brk_page = HOST_PAGE_ALIGN(target_brk);
886 }
887 
888 //#define DEBUGF_BRK(message, args...) do { fprintf(stderr, (message), ## args); } while (0)
889 #define DEBUGF_BRK(message, args...)
890 
891 /* do_brk() must return target values and target errnos. */
892 abi_long do_brk(abi_ulong new_brk)
893 {
894     abi_long mapped_addr;
895     abi_ulong new_alloc_size;
896 
897     /* brk pointers are always untagged */
898 
899     DEBUGF_BRK("do_brk(" TARGET_ABI_FMT_lx ") -> ", new_brk);
900 
901     if (!new_brk) {
902         DEBUGF_BRK(TARGET_ABI_FMT_lx " (!new_brk)\n", target_brk);
903         return target_brk;
904     }
905     if (new_brk < target_original_brk) {
906         DEBUGF_BRK(TARGET_ABI_FMT_lx " (new_brk < target_original_brk)\n",
907                    target_brk);
908         return target_brk;
909     }
910 
911     /* If the new brk is less than the highest page reserved to the
912      * target heap allocation, set it and we're almost done...  */
913     if (new_brk <= brk_page) {
914         /* Heap contents are initialized to zero, as for anonymous
915          * mapped pages.  */
916         if (new_brk > target_brk) {
917             memset(g2h_untagged(target_brk), 0, new_brk - target_brk);
918         }
919 	target_brk = new_brk;
920         DEBUGF_BRK(TARGET_ABI_FMT_lx " (new_brk <= brk_page)\n", target_brk);
921 	return target_brk;
922     }
923 
924     /* We need to allocate more memory after the brk... Note that
925      * we don't use MAP_FIXED because that will map over the top of
926      * any existing mapping (like the one with the host libc or qemu
927      * itself); instead we treat "mapped but at wrong address" as
928      * a failure and unmap again.
929      */
930     new_alloc_size = HOST_PAGE_ALIGN(new_brk - brk_page);
931     mapped_addr = get_errno(target_mmap(brk_page, new_alloc_size,
932                                         PROT_READ|PROT_WRITE,
933                                         MAP_ANON|MAP_PRIVATE, 0, 0));
934 
935     if (mapped_addr == brk_page) {
936         /* Heap contents are initialized to zero, as for anonymous
937          * mapped pages.  Technically the new pages are already
938          * initialized to zero since they *are* anonymous mapped
939          * pages, however we have to take care with the contents that
940          * come from the remaining part of the previous page: it may
941          * contains garbage data due to a previous heap usage (grown
942          * then shrunken).  */
943         memset(g2h_untagged(target_brk), 0, brk_page - target_brk);
944 
945         target_brk = new_brk;
946         brk_page = HOST_PAGE_ALIGN(target_brk);
947         DEBUGF_BRK(TARGET_ABI_FMT_lx " (mapped_addr == brk_page)\n",
948             target_brk);
949         return target_brk;
950     } else if (mapped_addr != -1) {
951         /* Mapped but at wrong address, meaning there wasn't actually
952          * enough space for this brk.
953          */
954         target_munmap(mapped_addr, new_alloc_size);
955         mapped_addr = -1;
956         DEBUGF_BRK(TARGET_ABI_FMT_lx " (mapped_addr != -1)\n", target_brk);
957     }
958     else {
959         DEBUGF_BRK(TARGET_ABI_FMT_lx " (otherwise)\n", target_brk);
960     }
961 
962 #if defined(TARGET_ALPHA)
963     /* We (partially) emulate OSF/1 on Alpha, which requires we
964        return a proper errno, not an unchanged brk value.  */
965     return -TARGET_ENOMEM;
966 #endif
967     /* For everything else, return the previous break. */
968     return target_brk;
969 }
970 
971 #if defined(TARGET_NR_select) || defined(TARGET_NR__newselect) || \
972     defined(TARGET_NR_pselect6) || defined(TARGET_NR_pselect6_time64)
973 static inline abi_long copy_from_user_fdset(fd_set *fds,
974                                             abi_ulong target_fds_addr,
975                                             int n)
976 {
977     int i, nw, j, k;
978     abi_ulong b, *target_fds;
979 
980     nw = DIV_ROUND_UP(n, TARGET_ABI_BITS);
981     if (!(target_fds = lock_user(VERIFY_READ,
982                                  target_fds_addr,
983                                  sizeof(abi_ulong) * nw,
984                                  1)))
985         return -TARGET_EFAULT;
986 
987     FD_ZERO(fds);
988     k = 0;
989     for (i = 0; i < nw; i++) {
990         /* grab the abi_ulong */
991         __get_user(b, &target_fds[i]);
992         for (j = 0; j < TARGET_ABI_BITS; j++) {
993             /* check the bit inside the abi_ulong */
994             if ((b >> j) & 1)
995                 FD_SET(k, fds);
996             k++;
997         }
998     }
999 
1000     unlock_user(target_fds, target_fds_addr, 0);
1001 
1002     return 0;
1003 }
1004 
1005 static inline abi_ulong copy_from_user_fdset_ptr(fd_set *fds, fd_set **fds_ptr,
1006                                                  abi_ulong target_fds_addr,
1007                                                  int n)
1008 {
1009     if (target_fds_addr) {
1010         if (copy_from_user_fdset(fds, target_fds_addr, n))
1011             return -TARGET_EFAULT;
1012         *fds_ptr = fds;
1013     } else {
1014         *fds_ptr = NULL;
1015     }
1016     return 0;
1017 }
1018 
1019 static inline abi_long copy_to_user_fdset(abi_ulong target_fds_addr,
1020                                           const fd_set *fds,
1021                                           int n)
1022 {
1023     int i, nw, j, k;
1024     abi_long v;
1025     abi_ulong *target_fds;
1026 
1027     nw = DIV_ROUND_UP(n, TARGET_ABI_BITS);
1028     if (!(target_fds = lock_user(VERIFY_WRITE,
1029                                  target_fds_addr,
1030                                  sizeof(abi_ulong) * nw,
1031                                  0)))
1032         return -TARGET_EFAULT;
1033 
1034     k = 0;
1035     for (i = 0; i < nw; i++) {
1036         v = 0;
1037         for (j = 0; j < TARGET_ABI_BITS; j++) {
1038             v |= ((abi_ulong)(FD_ISSET(k, fds) != 0) << j);
1039             k++;
1040         }
1041         __put_user(v, &target_fds[i]);
1042     }
1043 
1044     unlock_user(target_fds, target_fds_addr, sizeof(abi_ulong) * nw);
1045 
1046     return 0;
1047 }
1048 #endif
1049 
1050 #if defined(__alpha__)
1051 #define HOST_HZ 1024
1052 #else
1053 #define HOST_HZ 100
1054 #endif
1055 
1056 static inline abi_long host_to_target_clock_t(long ticks)
1057 {
1058 #if HOST_HZ == TARGET_HZ
1059     return ticks;
1060 #else
1061     return ((int64_t)ticks * TARGET_HZ) / HOST_HZ;
1062 #endif
1063 }
1064 
1065 static inline abi_long host_to_target_rusage(abi_ulong target_addr,
1066                                              const struct rusage *rusage)
1067 {
1068     struct target_rusage *target_rusage;
1069 
1070     if (!lock_user_struct(VERIFY_WRITE, target_rusage, target_addr, 0))
1071         return -TARGET_EFAULT;
1072     target_rusage->ru_utime.tv_sec = tswapal(rusage->ru_utime.tv_sec);
1073     target_rusage->ru_utime.tv_usec = tswapal(rusage->ru_utime.tv_usec);
1074     target_rusage->ru_stime.tv_sec = tswapal(rusage->ru_stime.tv_sec);
1075     target_rusage->ru_stime.tv_usec = tswapal(rusage->ru_stime.tv_usec);
1076     target_rusage->ru_maxrss = tswapal(rusage->ru_maxrss);
1077     target_rusage->ru_ixrss = tswapal(rusage->ru_ixrss);
1078     target_rusage->ru_idrss = tswapal(rusage->ru_idrss);
1079     target_rusage->ru_isrss = tswapal(rusage->ru_isrss);
1080     target_rusage->ru_minflt = tswapal(rusage->ru_minflt);
1081     target_rusage->ru_majflt = tswapal(rusage->ru_majflt);
1082     target_rusage->ru_nswap = tswapal(rusage->ru_nswap);
1083     target_rusage->ru_inblock = tswapal(rusage->ru_inblock);
1084     target_rusage->ru_oublock = tswapal(rusage->ru_oublock);
1085     target_rusage->ru_msgsnd = tswapal(rusage->ru_msgsnd);
1086     target_rusage->ru_msgrcv = tswapal(rusage->ru_msgrcv);
1087     target_rusage->ru_nsignals = tswapal(rusage->ru_nsignals);
1088     target_rusage->ru_nvcsw = tswapal(rusage->ru_nvcsw);
1089     target_rusage->ru_nivcsw = tswapal(rusage->ru_nivcsw);
1090     unlock_user_struct(target_rusage, target_addr, 1);
1091 
1092     return 0;
1093 }
1094 
1095 #ifdef TARGET_NR_setrlimit
1096 static inline rlim_t target_to_host_rlim(abi_ulong target_rlim)
1097 {
1098     abi_ulong target_rlim_swap;
1099     rlim_t result;
1100 
1101     target_rlim_swap = tswapal(target_rlim);
1102     if (target_rlim_swap == TARGET_RLIM_INFINITY)
1103         return RLIM_INFINITY;
1104 
1105     result = target_rlim_swap;
1106     if (target_rlim_swap != (rlim_t)result)
1107         return RLIM_INFINITY;
1108 
1109     return result;
1110 }
1111 #endif
1112 
1113 #if defined(TARGET_NR_getrlimit) || defined(TARGET_NR_ugetrlimit)
1114 static inline abi_ulong host_to_target_rlim(rlim_t rlim)
1115 {
1116     abi_ulong target_rlim_swap;
1117     abi_ulong result;
1118 
1119     if (rlim == RLIM_INFINITY || rlim != (abi_long)rlim)
1120         target_rlim_swap = TARGET_RLIM_INFINITY;
1121     else
1122         target_rlim_swap = rlim;
1123     result = tswapal(target_rlim_swap);
1124 
1125     return result;
1126 }
1127 #endif
1128 
1129 static inline int target_to_host_resource(int code)
1130 {
1131     switch (code) {
1132     case TARGET_RLIMIT_AS:
1133         return RLIMIT_AS;
1134     case TARGET_RLIMIT_CORE:
1135         return RLIMIT_CORE;
1136     case TARGET_RLIMIT_CPU:
1137         return RLIMIT_CPU;
1138     case TARGET_RLIMIT_DATA:
1139         return RLIMIT_DATA;
1140     case TARGET_RLIMIT_FSIZE:
1141         return RLIMIT_FSIZE;
1142     case TARGET_RLIMIT_LOCKS:
1143         return RLIMIT_LOCKS;
1144     case TARGET_RLIMIT_MEMLOCK:
1145         return RLIMIT_MEMLOCK;
1146     case TARGET_RLIMIT_MSGQUEUE:
1147         return RLIMIT_MSGQUEUE;
1148     case TARGET_RLIMIT_NICE:
1149         return RLIMIT_NICE;
1150     case TARGET_RLIMIT_NOFILE:
1151         return RLIMIT_NOFILE;
1152     case TARGET_RLIMIT_NPROC:
1153         return RLIMIT_NPROC;
1154     case TARGET_RLIMIT_RSS:
1155         return RLIMIT_RSS;
1156     case TARGET_RLIMIT_RTPRIO:
1157         return RLIMIT_RTPRIO;
1158     case TARGET_RLIMIT_SIGPENDING:
1159         return RLIMIT_SIGPENDING;
1160     case TARGET_RLIMIT_STACK:
1161         return RLIMIT_STACK;
1162     default:
1163         return code;
1164     }
1165 }
1166 
1167 static inline abi_long copy_from_user_timeval(struct timeval *tv,
1168                                               abi_ulong target_tv_addr)
1169 {
1170     struct target_timeval *target_tv;
1171 
1172     if (!lock_user_struct(VERIFY_READ, target_tv, target_tv_addr, 1)) {
1173         return -TARGET_EFAULT;
1174     }
1175 
1176     __get_user(tv->tv_sec, &target_tv->tv_sec);
1177     __get_user(tv->tv_usec, &target_tv->tv_usec);
1178 
1179     unlock_user_struct(target_tv, target_tv_addr, 0);
1180 
1181     return 0;
1182 }
1183 
1184 static inline abi_long copy_to_user_timeval(abi_ulong target_tv_addr,
1185                                             const struct timeval *tv)
1186 {
1187     struct target_timeval *target_tv;
1188 
1189     if (!lock_user_struct(VERIFY_WRITE, target_tv, target_tv_addr, 0)) {
1190         return -TARGET_EFAULT;
1191     }
1192 
1193     __put_user(tv->tv_sec, &target_tv->tv_sec);
1194     __put_user(tv->tv_usec, &target_tv->tv_usec);
1195 
1196     unlock_user_struct(target_tv, target_tv_addr, 1);
1197 
1198     return 0;
1199 }
1200 
1201 #if defined(TARGET_NR_clock_adjtime64) && defined(CONFIG_CLOCK_ADJTIME)
1202 static inline abi_long copy_from_user_timeval64(struct timeval *tv,
1203                                                 abi_ulong target_tv_addr)
1204 {
1205     struct target__kernel_sock_timeval *target_tv;
1206 
1207     if (!lock_user_struct(VERIFY_READ, target_tv, target_tv_addr, 1)) {
1208         return -TARGET_EFAULT;
1209     }
1210 
1211     __get_user(tv->tv_sec, &target_tv->tv_sec);
1212     __get_user(tv->tv_usec, &target_tv->tv_usec);
1213 
1214     unlock_user_struct(target_tv, target_tv_addr, 0);
1215 
1216     return 0;
1217 }
1218 #endif
1219 
1220 static inline abi_long copy_to_user_timeval64(abi_ulong target_tv_addr,
1221                                               const struct timeval *tv)
1222 {
1223     struct target__kernel_sock_timeval *target_tv;
1224 
1225     if (!lock_user_struct(VERIFY_WRITE, target_tv, target_tv_addr, 0)) {
1226         return -TARGET_EFAULT;
1227     }
1228 
1229     __put_user(tv->tv_sec, &target_tv->tv_sec);
1230     __put_user(tv->tv_usec, &target_tv->tv_usec);
1231 
1232     unlock_user_struct(target_tv, target_tv_addr, 1);
1233 
1234     return 0;
1235 }
1236 
1237 #if defined(TARGET_NR_futex) || \
1238     defined(TARGET_NR_rt_sigtimedwait) || \
1239     defined(TARGET_NR_pselect6) || defined(TARGET_NR_pselect6) || \
1240     defined(TARGET_NR_nanosleep) || defined(TARGET_NR_clock_settime) || \
1241     defined(TARGET_NR_utimensat) || defined(TARGET_NR_mq_timedsend) || \
1242     defined(TARGET_NR_mq_timedreceive) || defined(TARGET_NR_ipc) || \
1243     defined(TARGET_NR_semop) || defined(TARGET_NR_semtimedop) || \
1244     defined(TARGET_NR_timer_settime) || \
1245     (defined(TARGET_NR_timerfd_settime) && defined(CONFIG_TIMERFD))
1246 static inline abi_long target_to_host_timespec(struct timespec *host_ts,
1247                                                abi_ulong target_addr)
1248 {
1249     struct target_timespec *target_ts;
1250 
1251     if (!lock_user_struct(VERIFY_READ, target_ts, target_addr, 1)) {
1252         return -TARGET_EFAULT;
1253     }
1254     __get_user(host_ts->tv_sec, &target_ts->tv_sec);
1255     __get_user(host_ts->tv_nsec, &target_ts->tv_nsec);
1256     unlock_user_struct(target_ts, target_addr, 0);
1257     return 0;
1258 }
1259 #endif
1260 
1261 #if defined(TARGET_NR_clock_settime64) || defined(TARGET_NR_futex_time64) || \
1262     defined(TARGET_NR_timer_settime64) || \
1263     defined(TARGET_NR_mq_timedsend_time64) || \
1264     defined(TARGET_NR_mq_timedreceive_time64) || \
1265     (defined(TARGET_NR_timerfd_settime64) && defined(CONFIG_TIMERFD)) || \
1266     defined(TARGET_NR_clock_nanosleep_time64) || \
1267     defined(TARGET_NR_rt_sigtimedwait_time64) || \
1268     defined(TARGET_NR_utimensat) || \
1269     defined(TARGET_NR_utimensat_time64) || \
1270     defined(TARGET_NR_semtimedop_time64) || \
1271     defined(TARGET_NR_pselect6_time64) || defined(TARGET_NR_ppoll_time64)
1272 static inline abi_long target_to_host_timespec64(struct timespec *host_ts,
1273                                                  abi_ulong target_addr)
1274 {
1275     struct target__kernel_timespec *target_ts;
1276 
1277     if (!lock_user_struct(VERIFY_READ, target_ts, target_addr, 1)) {
1278         return -TARGET_EFAULT;
1279     }
1280     __get_user(host_ts->tv_sec, &target_ts->tv_sec);
1281     __get_user(host_ts->tv_nsec, &target_ts->tv_nsec);
1282     /* in 32bit mode, this drops the padding */
1283     host_ts->tv_nsec = (long)(abi_long)host_ts->tv_nsec;
1284     unlock_user_struct(target_ts, target_addr, 0);
1285     return 0;
1286 }
1287 #endif
1288 
1289 static inline abi_long host_to_target_timespec(abi_ulong target_addr,
1290                                                struct timespec *host_ts)
1291 {
1292     struct target_timespec *target_ts;
1293 
1294     if (!lock_user_struct(VERIFY_WRITE, target_ts, target_addr, 0)) {
1295         return -TARGET_EFAULT;
1296     }
1297     __put_user(host_ts->tv_sec, &target_ts->tv_sec);
1298     __put_user(host_ts->tv_nsec, &target_ts->tv_nsec);
1299     unlock_user_struct(target_ts, target_addr, 1);
1300     return 0;
1301 }
1302 
1303 static inline abi_long host_to_target_timespec64(abi_ulong target_addr,
1304                                                  struct timespec *host_ts)
1305 {
1306     struct target__kernel_timespec *target_ts;
1307 
1308     if (!lock_user_struct(VERIFY_WRITE, target_ts, target_addr, 0)) {
1309         return -TARGET_EFAULT;
1310     }
1311     __put_user(host_ts->tv_sec, &target_ts->tv_sec);
1312     __put_user(host_ts->tv_nsec, &target_ts->tv_nsec);
1313     unlock_user_struct(target_ts, target_addr, 1);
1314     return 0;
1315 }
1316 
1317 #if defined(TARGET_NR_gettimeofday)
1318 static inline abi_long copy_to_user_timezone(abi_ulong target_tz_addr,
1319                                              struct timezone *tz)
1320 {
1321     struct target_timezone *target_tz;
1322 
1323     if (!lock_user_struct(VERIFY_WRITE, target_tz, target_tz_addr, 1)) {
1324         return -TARGET_EFAULT;
1325     }
1326 
1327     __put_user(tz->tz_minuteswest, &target_tz->tz_minuteswest);
1328     __put_user(tz->tz_dsttime, &target_tz->tz_dsttime);
1329 
1330     unlock_user_struct(target_tz, target_tz_addr, 1);
1331 
1332     return 0;
1333 }
1334 #endif
1335 
1336 #if defined(TARGET_NR_settimeofday)
1337 static inline abi_long copy_from_user_timezone(struct timezone *tz,
1338                                                abi_ulong target_tz_addr)
1339 {
1340     struct target_timezone *target_tz;
1341 
1342     if (!lock_user_struct(VERIFY_READ, target_tz, target_tz_addr, 1)) {
1343         return -TARGET_EFAULT;
1344     }
1345 
1346     __get_user(tz->tz_minuteswest, &target_tz->tz_minuteswest);
1347     __get_user(tz->tz_dsttime, &target_tz->tz_dsttime);
1348 
1349     unlock_user_struct(target_tz, target_tz_addr, 0);
1350 
1351     return 0;
1352 }
1353 #endif
1354 
1355 #if defined(TARGET_NR_mq_open) && defined(__NR_mq_open)
1356 #include <mqueue.h>
1357 
1358 static inline abi_long copy_from_user_mq_attr(struct mq_attr *attr,
1359                                               abi_ulong target_mq_attr_addr)
1360 {
1361     struct target_mq_attr *target_mq_attr;
1362 
1363     if (!lock_user_struct(VERIFY_READ, target_mq_attr,
1364                           target_mq_attr_addr, 1))
1365         return -TARGET_EFAULT;
1366 
1367     __get_user(attr->mq_flags, &target_mq_attr->mq_flags);
1368     __get_user(attr->mq_maxmsg, &target_mq_attr->mq_maxmsg);
1369     __get_user(attr->mq_msgsize, &target_mq_attr->mq_msgsize);
1370     __get_user(attr->mq_curmsgs, &target_mq_attr->mq_curmsgs);
1371 
1372     unlock_user_struct(target_mq_attr, target_mq_attr_addr, 0);
1373 
1374     return 0;
1375 }
1376 
1377 static inline abi_long copy_to_user_mq_attr(abi_ulong target_mq_attr_addr,
1378                                             const struct mq_attr *attr)
1379 {
1380     struct target_mq_attr *target_mq_attr;
1381 
1382     if (!lock_user_struct(VERIFY_WRITE, target_mq_attr,
1383                           target_mq_attr_addr, 0))
1384         return -TARGET_EFAULT;
1385 
1386     __put_user(attr->mq_flags, &target_mq_attr->mq_flags);
1387     __put_user(attr->mq_maxmsg, &target_mq_attr->mq_maxmsg);
1388     __put_user(attr->mq_msgsize, &target_mq_attr->mq_msgsize);
1389     __put_user(attr->mq_curmsgs, &target_mq_attr->mq_curmsgs);
1390 
1391     unlock_user_struct(target_mq_attr, target_mq_attr_addr, 1);
1392 
1393     return 0;
1394 }
1395 #endif
1396 
1397 #if defined(TARGET_NR_select) || defined(TARGET_NR__newselect)
1398 /* do_select() must return target values and target errnos. */
1399 static abi_long do_select(int n,
1400                           abi_ulong rfd_addr, abi_ulong wfd_addr,
1401                           abi_ulong efd_addr, abi_ulong target_tv_addr)
1402 {
1403     fd_set rfds, wfds, efds;
1404     fd_set *rfds_ptr, *wfds_ptr, *efds_ptr;
1405     struct timeval tv;
1406     struct timespec ts, *ts_ptr;
1407     abi_long ret;
1408 
1409     ret = copy_from_user_fdset_ptr(&rfds, &rfds_ptr, rfd_addr, n);
1410     if (ret) {
1411         return ret;
1412     }
1413     ret = copy_from_user_fdset_ptr(&wfds, &wfds_ptr, wfd_addr, n);
1414     if (ret) {
1415         return ret;
1416     }
1417     ret = copy_from_user_fdset_ptr(&efds, &efds_ptr, efd_addr, n);
1418     if (ret) {
1419         return ret;
1420     }
1421 
1422     if (target_tv_addr) {
1423         if (copy_from_user_timeval(&tv, target_tv_addr))
1424             return -TARGET_EFAULT;
1425         ts.tv_sec = tv.tv_sec;
1426         ts.tv_nsec = tv.tv_usec * 1000;
1427         ts_ptr = &ts;
1428     } else {
1429         ts_ptr = NULL;
1430     }
1431 
1432     ret = get_errno(safe_pselect6(n, rfds_ptr, wfds_ptr, efds_ptr,
1433                                   ts_ptr, NULL));
1434 
1435     if (!is_error(ret)) {
1436         if (rfd_addr && copy_to_user_fdset(rfd_addr, &rfds, n))
1437             return -TARGET_EFAULT;
1438         if (wfd_addr && copy_to_user_fdset(wfd_addr, &wfds, n))
1439             return -TARGET_EFAULT;
1440         if (efd_addr && copy_to_user_fdset(efd_addr, &efds, n))
1441             return -TARGET_EFAULT;
1442 
1443         if (target_tv_addr) {
1444             tv.tv_sec = ts.tv_sec;
1445             tv.tv_usec = ts.tv_nsec / 1000;
1446             if (copy_to_user_timeval(target_tv_addr, &tv)) {
1447                 return -TARGET_EFAULT;
1448             }
1449         }
1450     }
1451 
1452     return ret;
1453 }
1454 
1455 #if defined(TARGET_WANT_OLD_SYS_SELECT)
1456 static abi_long do_old_select(abi_ulong arg1)
1457 {
1458     struct target_sel_arg_struct *sel;
1459     abi_ulong inp, outp, exp, tvp;
1460     long nsel;
1461 
1462     if (!lock_user_struct(VERIFY_READ, sel, arg1, 1)) {
1463         return -TARGET_EFAULT;
1464     }
1465 
1466     nsel = tswapal(sel->n);
1467     inp = tswapal(sel->inp);
1468     outp = tswapal(sel->outp);
1469     exp = tswapal(sel->exp);
1470     tvp = tswapal(sel->tvp);
1471 
1472     unlock_user_struct(sel, arg1, 0);
1473 
1474     return do_select(nsel, inp, outp, exp, tvp);
1475 }
1476 #endif
1477 #endif
1478 
1479 #if defined(TARGET_NR_pselect6) || defined(TARGET_NR_pselect6_time64)
1480 static abi_long do_pselect6(abi_long arg1, abi_long arg2, abi_long arg3,
1481                             abi_long arg4, abi_long arg5, abi_long arg6,
1482                             bool time64)
1483 {
1484     abi_long rfd_addr, wfd_addr, efd_addr, n, ts_addr;
1485     fd_set rfds, wfds, efds;
1486     fd_set *rfds_ptr, *wfds_ptr, *efds_ptr;
1487     struct timespec ts, *ts_ptr;
1488     abi_long ret;
1489 
1490     /*
1491      * The 6th arg is actually two args smashed together,
1492      * so we cannot use the C library.
1493      */
1494     sigset_t set;
1495     struct {
1496         sigset_t *set;
1497         size_t size;
1498     } sig, *sig_ptr;
1499 
1500     abi_ulong arg_sigset, arg_sigsize, *arg7;
1501     target_sigset_t *target_sigset;
1502 
1503     n = arg1;
1504     rfd_addr = arg2;
1505     wfd_addr = arg3;
1506     efd_addr = arg4;
1507     ts_addr = arg5;
1508 
1509     ret = copy_from_user_fdset_ptr(&rfds, &rfds_ptr, rfd_addr, n);
1510     if (ret) {
1511         return ret;
1512     }
1513     ret = copy_from_user_fdset_ptr(&wfds, &wfds_ptr, wfd_addr, n);
1514     if (ret) {
1515         return ret;
1516     }
1517     ret = copy_from_user_fdset_ptr(&efds, &efds_ptr, efd_addr, n);
1518     if (ret) {
1519         return ret;
1520     }
1521 
1522     /*
1523      * This takes a timespec, and not a timeval, so we cannot
1524      * use the do_select() helper ...
1525      */
1526     if (ts_addr) {
1527         if (time64) {
1528             if (target_to_host_timespec64(&ts, ts_addr)) {
1529                 return -TARGET_EFAULT;
1530             }
1531         } else {
1532             if (target_to_host_timespec(&ts, ts_addr)) {
1533                 return -TARGET_EFAULT;
1534             }
1535         }
1536             ts_ptr = &ts;
1537     } else {
1538         ts_ptr = NULL;
1539     }
1540 
1541     /* Extract the two packed args for the sigset */
1542     if (arg6) {
1543         sig_ptr = &sig;
1544         sig.size = SIGSET_T_SIZE;
1545 
1546         arg7 = lock_user(VERIFY_READ, arg6, sizeof(*arg7) * 2, 1);
1547         if (!arg7) {
1548             return -TARGET_EFAULT;
1549         }
1550         arg_sigset = tswapal(arg7[0]);
1551         arg_sigsize = tswapal(arg7[1]);
1552         unlock_user(arg7, arg6, 0);
1553 
1554         if (arg_sigset) {
1555             sig.set = &set;
1556             if (arg_sigsize != sizeof(*target_sigset)) {
1557                 /* Like the kernel, we enforce correct size sigsets */
1558                 return -TARGET_EINVAL;
1559             }
1560             target_sigset = lock_user(VERIFY_READ, arg_sigset,
1561                                       sizeof(*target_sigset), 1);
1562             if (!target_sigset) {
1563                 return -TARGET_EFAULT;
1564             }
1565             target_to_host_sigset(&set, target_sigset);
1566             unlock_user(target_sigset, arg_sigset, 0);
1567         } else {
1568             sig.set = NULL;
1569         }
1570     } else {
1571         sig_ptr = NULL;
1572     }
1573 
1574     ret = get_errno(safe_pselect6(n, rfds_ptr, wfds_ptr, efds_ptr,
1575                                   ts_ptr, sig_ptr));
1576 
1577     if (!is_error(ret)) {
1578         if (rfd_addr && copy_to_user_fdset(rfd_addr, &rfds, n)) {
1579             return -TARGET_EFAULT;
1580         }
1581         if (wfd_addr && copy_to_user_fdset(wfd_addr, &wfds, n)) {
1582             return -TARGET_EFAULT;
1583         }
1584         if (efd_addr && copy_to_user_fdset(efd_addr, &efds, n)) {
1585             return -TARGET_EFAULT;
1586         }
1587         if (time64) {
1588             if (ts_addr && host_to_target_timespec64(ts_addr, &ts)) {
1589                 return -TARGET_EFAULT;
1590             }
1591         } else {
1592             if (ts_addr && host_to_target_timespec(ts_addr, &ts)) {
1593                 return -TARGET_EFAULT;
1594             }
1595         }
1596     }
1597     return ret;
1598 }
1599 #endif
1600 
1601 #if defined(TARGET_NR_poll) || defined(TARGET_NR_ppoll) || \
1602     defined(TARGET_NR_ppoll_time64)
1603 static abi_long do_ppoll(abi_long arg1, abi_long arg2, abi_long arg3,
1604                          abi_long arg4, abi_long arg5, bool ppoll, bool time64)
1605 {
1606     struct target_pollfd *target_pfd;
1607     unsigned int nfds = arg2;
1608     struct pollfd *pfd;
1609     unsigned int i;
1610     abi_long ret;
1611 
1612     pfd = NULL;
1613     target_pfd = NULL;
1614     if (nfds) {
1615         if (nfds > (INT_MAX / sizeof(struct target_pollfd))) {
1616             return -TARGET_EINVAL;
1617         }
1618         target_pfd = lock_user(VERIFY_WRITE, arg1,
1619                                sizeof(struct target_pollfd) * nfds, 1);
1620         if (!target_pfd) {
1621             return -TARGET_EFAULT;
1622         }
1623 
1624         pfd = alloca(sizeof(struct pollfd) * nfds);
1625         for (i = 0; i < nfds; i++) {
1626             pfd[i].fd = tswap32(target_pfd[i].fd);
1627             pfd[i].events = tswap16(target_pfd[i].events);
1628         }
1629     }
1630     if (ppoll) {
1631         struct timespec _timeout_ts, *timeout_ts = &_timeout_ts;
1632         target_sigset_t *target_set;
1633         sigset_t _set, *set = &_set;
1634 
1635         if (arg3) {
1636             if (time64) {
1637                 if (target_to_host_timespec64(timeout_ts, arg3)) {
1638                     unlock_user(target_pfd, arg1, 0);
1639                     return -TARGET_EFAULT;
1640                 }
1641             } else {
1642                 if (target_to_host_timespec(timeout_ts, arg3)) {
1643                     unlock_user(target_pfd, arg1, 0);
1644                     return -TARGET_EFAULT;
1645                 }
1646             }
1647         } else {
1648             timeout_ts = NULL;
1649         }
1650 
1651         if (arg4) {
1652             if (arg5 != sizeof(target_sigset_t)) {
1653                 unlock_user(target_pfd, arg1, 0);
1654                 return -TARGET_EINVAL;
1655             }
1656 
1657             target_set = lock_user(VERIFY_READ, arg4,
1658                                    sizeof(target_sigset_t), 1);
1659             if (!target_set) {
1660                 unlock_user(target_pfd, arg1, 0);
1661                 return -TARGET_EFAULT;
1662             }
1663             target_to_host_sigset(set, target_set);
1664         } else {
1665             set = NULL;
1666         }
1667 
1668         ret = get_errno(safe_ppoll(pfd, nfds, timeout_ts,
1669                                    set, SIGSET_T_SIZE));
1670 
1671         if (!is_error(ret) && arg3) {
1672             if (time64) {
1673                 if (host_to_target_timespec64(arg3, timeout_ts)) {
1674                     return -TARGET_EFAULT;
1675                 }
1676             } else {
1677                 if (host_to_target_timespec(arg3, timeout_ts)) {
1678                     return -TARGET_EFAULT;
1679                 }
1680             }
1681         }
1682         if (arg4) {
1683             unlock_user(target_set, arg4, 0);
1684         }
1685     } else {
1686           struct timespec ts, *pts;
1687 
1688           if (arg3 >= 0) {
1689               /* Convert ms to secs, ns */
1690               ts.tv_sec = arg3 / 1000;
1691               ts.tv_nsec = (arg3 % 1000) * 1000000LL;
1692               pts = &ts;
1693           } else {
1694               /* -ve poll() timeout means "infinite" */
1695               pts = NULL;
1696           }
1697           ret = get_errno(safe_ppoll(pfd, nfds, pts, NULL, 0));
1698     }
1699 
1700     if (!is_error(ret)) {
1701         for (i = 0; i < nfds; i++) {
1702             target_pfd[i].revents = tswap16(pfd[i].revents);
1703         }
1704     }
1705     unlock_user(target_pfd, arg1, sizeof(struct target_pollfd) * nfds);
1706     return ret;
1707 }
1708 #endif
1709 
1710 static abi_long do_pipe2(int host_pipe[], int flags)
1711 {
1712 #ifdef CONFIG_PIPE2
1713     return pipe2(host_pipe, flags);
1714 #else
1715     return -ENOSYS;
1716 #endif
1717 }
1718 
1719 static abi_long do_pipe(void *cpu_env, abi_ulong pipedes,
1720                         int flags, int is_pipe2)
1721 {
1722     int host_pipe[2];
1723     abi_long ret;
1724     ret = flags ? do_pipe2(host_pipe, flags) : pipe(host_pipe);
1725 
1726     if (is_error(ret))
1727         return get_errno(ret);
1728 
1729     /* Several targets have special calling conventions for the original
1730        pipe syscall, but didn't replicate this into the pipe2 syscall.  */
1731     if (!is_pipe2) {
1732 #if defined(TARGET_ALPHA)
1733         ((CPUAlphaState *)cpu_env)->ir[IR_A4] = host_pipe[1];
1734         return host_pipe[0];
1735 #elif defined(TARGET_MIPS)
1736         ((CPUMIPSState*)cpu_env)->active_tc.gpr[3] = host_pipe[1];
1737         return host_pipe[0];
1738 #elif defined(TARGET_SH4)
1739         ((CPUSH4State*)cpu_env)->gregs[1] = host_pipe[1];
1740         return host_pipe[0];
1741 #elif defined(TARGET_SPARC)
1742         ((CPUSPARCState*)cpu_env)->regwptr[1] = host_pipe[1];
1743         return host_pipe[0];
1744 #endif
1745     }
1746 
1747     if (put_user_s32(host_pipe[0], pipedes)
1748         || put_user_s32(host_pipe[1], pipedes + sizeof(host_pipe[0])))
1749         return -TARGET_EFAULT;
1750     return get_errno(ret);
1751 }
1752 
1753 static inline abi_long target_to_host_ip_mreq(struct ip_mreqn *mreqn,
1754                                               abi_ulong target_addr,
1755                                               socklen_t len)
1756 {
1757     struct target_ip_mreqn *target_smreqn;
1758 
1759     target_smreqn = lock_user(VERIFY_READ, target_addr, len, 1);
1760     if (!target_smreqn)
1761         return -TARGET_EFAULT;
1762     mreqn->imr_multiaddr.s_addr = target_smreqn->imr_multiaddr.s_addr;
1763     mreqn->imr_address.s_addr = target_smreqn->imr_address.s_addr;
1764     if (len == sizeof(struct target_ip_mreqn))
1765         mreqn->imr_ifindex = tswapal(target_smreqn->imr_ifindex);
1766     unlock_user(target_smreqn, target_addr, 0);
1767 
1768     return 0;
1769 }
1770 
1771 static inline abi_long target_to_host_sockaddr(int fd, struct sockaddr *addr,
1772                                                abi_ulong target_addr,
1773                                                socklen_t len)
1774 {
1775     const socklen_t unix_maxlen = sizeof (struct sockaddr_un);
1776     sa_family_t sa_family;
1777     struct target_sockaddr *target_saddr;
1778 
1779     if (fd_trans_target_to_host_addr(fd)) {
1780         return fd_trans_target_to_host_addr(fd)(addr, target_addr, len);
1781     }
1782 
1783     target_saddr = lock_user(VERIFY_READ, target_addr, len, 1);
1784     if (!target_saddr)
1785         return -TARGET_EFAULT;
1786 
1787     sa_family = tswap16(target_saddr->sa_family);
1788 
1789     /* Oops. The caller might send a incomplete sun_path; sun_path
1790      * must be terminated by \0 (see the manual page), but
1791      * unfortunately it is quite common to specify sockaddr_un
1792      * length as "strlen(x->sun_path)" while it should be
1793      * "strlen(...) + 1". We'll fix that here if needed.
1794      * Linux kernel has a similar feature.
1795      */
1796 
1797     if (sa_family == AF_UNIX) {
1798         if (len < unix_maxlen && len > 0) {
1799             char *cp = (char*)target_saddr;
1800 
1801             if ( cp[len-1] && !cp[len] )
1802                 len++;
1803         }
1804         if (len > unix_maxlen)
1805             len = unix_maxlen;
1806     }
1807 
1808     memcpy(addr, target_saddr, len);
1809     addr->sa_family = sa_family;
1810     if (sa_family == AF_NETLINK) {
1811         struct sockaddr_nl *nladdr;
1812 
1813         nladdr = (struct sockaddr_nl *)addr;
1814         nladdr->nl_pid = tswap32(nladdr->nl_pid);
1815         nladdr->nl_groups = tswap32(nladdr->nl_groups);
1816     } else if (sa_family == AF_PACKET) {
1817 	struct target_sockaddr_ll *lladdr;
1818 
1819 	lladdr = (struct target_sockaddr_ll *)addr;
1820 	lladdr->sll_ifindex = tswap32(lladdr->sll_ifindex);
1821 	lladdr->sll_hatype = tswap16(lladdr->sll_hatype);
1822     }
1823     unlock_user(target_saddr, target_addr, 0);
1824 
1825     return 0;
1826 }
1827 
1828 static inline abi_long host_to_target_sockaddr(abi_ulong target_addr,
1829                                                struct sockaddr *addr,
1830                                                socklen_t len)
1831 {
1832     struct target_sockaddr *target_saddr;
1833 
1834     if (len == 0) {
1835         return 0;
1836     }
1837     assert(addr);
1838 
1839     target_saddr = lock_user(VERIFY_WRITE, target_addr, len, 0);
1840     if (!target_saddr)
1841         return -TARGET_EFAULT;
1842     memcpy(target_saddr, addr, len);
1843     if (len >= offsetof(struct target_sockaddr, sa_family) +
1844         sizeof(target_saddr->sa_family)) {
1845         target_saddr->sa_family = tswap16(addr->sa_family);
1846     }
1847     if (addr->sa_family == AF_NETLINK &&
1848         len >= sizeof(struct target_sockaddr_nl)) {
1849         struct target_sockaddr_nl *target_nl =
1850                (struct target_sockaddr_nl *)target_saddr;
1851         target_nl->nl_pid = tswap32(target_nl->nl_pid);
1852         target_nl->nl_groups = tswap32(target_nl->nl_groups);
1853     } else if (addr->sa_family == AF_PACKET) {
1854         struct sockaddr_ll *target_ll = (struct sockaddr_ll *)target_saddr;
1855         target_ll->sll_ifindex = tswap32(target_ll->sll_ifindex);
1856         target_ll->sll_hatype = tswap16(target_ll->sll_hatype);
1857     } else if (addr->sa_family == AF_INET6 &&
1858                len >= sizeof(struct target_sockaddr_in6)) {
1859         struct target_sockaddr_in6 *target_in6 =
1860                (struct target_sockaddr_in6 *)target_saddr;
1861         target_in6->sin6_scope_id = tswap16(target_in6->sin6_scope_id);
1862     }
1863     unlock_user(target_saddr, target_addr, len);
1864 
1865     return 0;
1866 }
1867 
1868 static inline abi_long target_to_host_cmsg(struct msghdr *msgh,
1869                                            struct target_msghdr *target_msgh)
1870 {
1871     struct cmsghdr *cmsg = CMSG_FIRSTHDR(msgh);
1872     abi_long msg_controllen;
1873     abi_ulong target_cmsg_addr;
1874     struct target_cmsghdr *target_cmsg, *target_cmsg_start;
1875     socklen_t space = 0;
1876 
1877     msg_controllen = tswapal(target_msgh->msg_controllen);
1878     if (msg_controllen < sizeof (struct target_cmsghdr))
1879         goto the_end;
1880     target_cmsg_addr = tswapal(target_msgh->msg_control);
1881     target_cmsg = lock_user(VERIFY_READ, target_cmsg_addr, msg_controllen, 1);
1882     target_cmsg_start = target_cmsg;
1883     if (!target_cmsg)
1884         return -TARGET_EFAULT;
1885 
1886     while (cmsg && target_cmsg) {
1887         void *data = CMSG_DATA(cmsg);
1888         void *target_data = TARGET_CMSG_DATA(target_cmsg);
1889 
1890         int len = tswapal(target_cmsg->cmsg_len)
1891             - sizeof(struct target_cmsghdr);
1892 
1893         space += CMSG_SPACE(len);
1894         if (space > msgh->msg_controllen) {
1895             space -= CMSG_SPACE(len);
1896             /* This is a QEMU bug, since we allocated the payload
1897              * area ourselves (unlike overflow in host-to-target
1898              * conversion, which is just the guest giving us a buffer
1899              * that's too small). It can't happen for the payload types
1900              * we currently support; if it becomes an issue in future
1901              * we would need to improve our allocation strategy to
1902              * something more intelligent than "twice the size of the
1903              * target buffer we're reading from".
1904              */
1905             qemu_log_mask(LOG_UNIMP,
1906                           ("Unsupported ancillary data %d/%d: "
1907                            "unhandled msg size\n"),
1908                           tswap32(target_cmsg->cmsg_level),
1909                           tswap32(target_cmsg->cmsg_type));
1910             break;
1911         }
1912 
1913         if (tswap32(target_cmsg->cmsg_level) == TARGET_SOL_SOCKET) {
1914             cmsg->cmsg_level = SOL_SOCKET;
1915         } else {
1916             cmsg->cmsg_level = tswap32(target_cmsg->cmsg_level);
1917         }
1918         cmsg->cmsg_type = tswap32(target_cmsg->cmsg_type);
1919         cmsg->cmsg_len = CMSG_LEN(len);
1920 
1921         if (cmsg->cmsg_level == SOL_SOCKET && cmsg->cmsg_type == SCM_RIGHTS) {
1922             int *fd = (int *)data;
1923             int *target_fd = (int *)target_data;
1924             int i, numfds = len / sizeof(int);
1925 
1926             for (i = 0; i < numfds; i++) {
1927                 __get_user(fd[i], target_fd + i);
1928             }
1929         } else if (cmsg->cmsg_level == SOL_SOCKET
1930                &&  cmsg->cmsg_type == SCM_CREDENTIALS) {
1931             struct ucred *cred = (struct ucred *)data;
1932             struct target_ucred *target_cred =
1933                 (struct target_ucred *)target_data;
1934 
1935             __get_user(cred->pid, &target_cred->pid);
1936             __get_user(cred->uid, &target_cred->uid);
1937             __get_user(cred->gid, &target_cred->gid);
1938         } else {
1939             qemu_log_mask(LOG_UNIMP, "Unsupported ancillary data: %d/%d\n",
1940                           cmsg->cmsg_level, cmsg->cmsg_type);
1941             memcpy(data, target_data, len);
1942         }
1943 
1944         cmsg = CMSG_NXTHDR(msgh, cmsg);
1945         target_cmsg = TARGET_CMSG_NXTHDR(target_msgh, target_cmsg,
1946                                          target_cmsg_start);
1947     }
1948     unlock_user(target_cmsg, target_cmsg_addr, 0);
1949  the_end:
1950     msgh->msg_controllen = space;
1951     return 0;
1952 }
1953 
1954 static inline abi_long host_to_target_cmsg(struct target_msghdr *target_msgh,
1955                                            struct msghdr *msgh)
1956 {
1957     struct cmsghdr *cmsg = CMSG_FIRSTHDR(msgh);
1958     abi_long msg_controllen;
1959     abi_ulong target_cmsg_addr;
1960     struct target_cmsghdr *target_cmsg, *target_cmsg_start;
1961     socklen_t space = 0;
1962 
1963     msg_controllen = tswapal(target_msgh->msg_controllen);
1964     if (msg_controllen < sizeof (struct target_cmsghdr))
1965         goto the_end;
1966     target_cmsg_addr = tswapal(target_msgh->msg_control);
1967     target_cmsg = lock_user(VERIFY_WRITE, target_cmsg_addr, msg_controllen, 0);
1968     target_cmsg_start = target_cmsg;
1969     if (!target_cmsg)
1970         return -TARGET_EFAULT;
1971 
1972     while (cmsg && target_cmsg) {
1973         void *data = CMSG_DATA(cmsg);
1974         void *target_data = TARGET_CMSG_DATA(target_cmsg);
1975 
1976         int len = cmsg->cmsg_len - sizeof(struct cmsghdr);
1977         int tgt_len, tgt_space;
1978 
1979         /* We never copy a half-header but may copy half-data;
1980          * this is Linux's behaviour in put_cmsg(). Note that
1981          * truncation here is a guest problem (which we report
1982          * to the guest via the CTRUNC bit), unlike truncation
1983          * in target_to_host_cmsg, which is a QEMU bug.
1984          */
1985         if (msg_controllen < sizeof(struct target_cmsghdr)) {
1986             target_msgh->msg_flags |= tswap32(MSG_CTRUNC);
1987             break;
1988         }
1989 
1990         if (cmsg->cmsg_level == SOL_SOCKET) {
1991             target_cmsg->cmsg_level = tswap32(TARGET_SOL_SOCKET);
1992         } else {
1993             target_cmsg->cmsg_level = tswap32(cmsg->cmsg_level);
1994         }
1995         target_cmsg->cmsg_type = tswap32(cmsg->cmsg_type);
1996 
1997         /* Payload types which need a different size of payload on
1998          * the target must adjust tgt_len here.
1999          */
2000         tgt_len = len;
2001         switch (cmsg->cmsg_level) {
2002         case SOL_SOCKET:
2003             switch (cmsg->cmsg_type) {
2004             case SO_TIMESTAMP:
2005                 tgt_len = sizeof(struct target_timeval);
2006                 break;
2007             default:
2008                 break;
2009             }
2010             break;
2011         default:
2012             break;
2013         }
2014 
2015         if (msg_controllen < TARGET_CMSG_LEN(tgt_len)) {
2016             target_msgh->msg_flags |= tswap32(MSG_CTRUNC);
2017             tgt_len = msg_controllen - sizeof(struct target_cmsghdr);
2018         }
2019 
2020         /* We must now copy-and-convert len bytes of payload
2021          * into tgt_len bytes of destination space. Bear in mind
2022          * that in both source and destination we may be dealing
2023          * with a truncated value!
2024          */
2025         switch (cmsg->cmsg_level) {
2026         case SOL_SOCKET:
2027             switch (cmsg->cmsg_type) {
2028             case SCM_RIGHTS:
2029             {
2030                 int *fd = (int *)data;
2031                 int *target_fd = (int *)target_data;
2032                 int i, numfds = tgt_len / sizeof(int);
2033 
2034                 for (i = 0; i < numfds; i++) {
2035                     __put_user(fd[i], target_fd + i);
2036                 }
2037                 break;
2038             }
2039             case SO_TIMESTAMP:
2040             {
2041                 struct timeval *tv = (struct timeval *)data;
2042                 struct target_timeval *target_tv =
2043                     (struct target_timeval *)target_data;
2044 
2045                 if (len != sizeof(struct timeval) ||
2046                     tgt_len != sizeof(struct target_timeval)) {
2047                     goto unimplemented;
2048                 }
2049 
2050                 /* copy struct timeval to target */
2051                 __put_user(tv->tv_sec, &target_tv->tv_sec);
2052                 __put_user(tv->tv_usec, &target_tv->tv_usec);
2053                 break;
2054             }
2055             case SCM_CREDENTIALS:
2056             {
2057                 struct ucred *cred = (struct ucred *)data;
2058                 struct target_ucred *target_cred =
2059                     (struct target_ucred *)target_data;
2060 
2061                 __put_user(cred->pid, &target_cred->pid);
2062                 __put_user(cred->uid, &target_cred->uid);
2063                 __put_user(cred->gid, &target_cred->gid);
2064                 break;
2065             }
2066             default:
2067                 goto unimplemented;
2068             }
2069             break;
2070 
2071         case SOL_IP:
2072             switch (cmsg->cmsg_type) {
2073             case IP_TTL:
2074             {
2075                 uint32_t *v = (uint32_t *)data;
2076                 uint32_t *t_int = (uint32_t *)target_data;
2077 
2078                 if (len != sizeof(uint32_t) ||
2079                     tgt_len != sizeof(uint32_t)) {
2080                     goto unimplemented;
2081                 }
2082                 __put_user(*v, t_int);
2083                 break;
2084             }
2085             case IP_RECVERR:
2086             {
2087                 struct errhdr_t {
2088                    struct sock_extended_err ee;
2089                    struct sockaddr_in offender;
2090                 };
2091                 struct errhdr_t *errh = (struct errhdr_t *)data;
2092                 struct errhdr_t *target_errh =
2093                     (struct errhdr_t *)target_data;
2094 
2095                 if (len != sizeof(struct errhdr_t) ||
2096                     tgt_len != sizeof(struct errhdr_t)) {
2097                     goto unimplemented;
2098                 }
2099                 __put_user(errh->ee.ee_errno, &target_errh->ee.ee_errno);
2100                 __put_user(errh->ee.ee_origin, &target_errh->ee.ee_origin);
2101                 __put_user(errh->ee.ee_type,  &target_errh->ee.ee_type);
2102                 __put_user(errh->ee.ee_code, &target_errh->ee.ee_code);
2103                 __put_user(errh->ee.ee_pad, &target_errh->ee.ee_pad);
2104                 __put_user(errh->ee.ee_info, &target_errh->ee.ee_info);
2105                 __put_user(errh->ee.ee_data, &target_errh->ee.ee_data);
2106                 host_to_target_sockaddr((unsigned long) &target_errh->offender,
2107                     (void *) &errh->offender, sizeof(errh->offender));
2108                 break;
2109             }
2110             default:
2111                 goto unimplemented;
2112             }
2113             break;
2114 
2115         case SOL_IPV6:
2116             switch (cmsg->cmsg_type) {
2117             case IPV6_HOPLIMIT:
2118             {
2119                 uint32_t *v = (uint32_t *)data;
2120                 uint32_t *t_int = (uint32_t *)target_data;
2121 
2122                 if (len != sizeof(uint32_t) ||
2123                     tgt_len != sizeof(uint32_t)) {
2124                     goto unimplemented;
2125                 }
2126                 __put_user(*v, t_int);
2127                 break;
2128             }
2129             case IPV6_RECVERR:
2130             {
2131                 struct errhdr6_t {
2132                    struct sock_extended_err ee;
2133                    struct sockaddr_in6 offender;
2134                 };
2135                 struct errhdr6_t *errh = (struct errhdr6_t *)data;
2136                 struct errhdr6_t *target_errh =
2137                     (struct errhdr6_t *)target_data;
2138 
2139                 if (len != sizeof(struct errhdr6_t) ||
2140                     tgt_len != sizeof(struct errhdr6_t)) {
2141                     goto unimplemented;
2142                 }
2143                 __put_user(errh->ee.ee_errno, &target_errh->ee.ee_errno);
2144                 __put_user(errh->ee.ee_origin, &target_errh->ee.ee_origin);
2145                 __put_user(errh->ee.ee_type,  &target_errh->ee.ee_type);
2146                 __put_user(errh->ee.ee_code, &target_errh->ee.ee_code);
2147                 __put_user(errh->ee.ee_pad, &target_errh->ee.ee_pad);
2148                 __put_user(errh->ee.ee_info, &target_errh->ee.ee_info);
2149                 __put_user(errh->ee.ee_data, &target_errh->ee.ee_data);
2150                 host_to_target_sockaddr((unsigned long) &target_errh->offender,
2151                     (void *) &errh->offender, sizeof(errh->offender));
2152                 break;
2153             }
2154             default:
2155                 goto unimplemented;
2156             }
2157             break;
2158 
2159         default:
2160         unimplemented:
2161             qemu_log_mask(LOG_UNIMP, "Unsupported ancillary data: %d/%d\n",
2162                           cmsg->cmsg_level, cmsg->cmsg_type);
2163             memcpy(target_data, data, MIN(len, tgt_len));
2164             if (tgt_len > len) {
2165                 memset(target_data + len, 0, tgt_len - len);
2166             }
2167         }
2168 
2169         target_cmsg->cmsg_len = tswapal(TARGET_CMSG_LEN(tgt_len));
2170         tgt_space = TARGET_CMSG_SPACE(tgt_len);
2171         if (msg_controllen < tgt_space) {
2172             tgt_space = msg_controllen;
2173         }
2174         msg_controllen -= tgt_space;
2175         space += tgt_space;
2176         cmsg = CMSG_NXTHDR(msgh, cmsg);
2177         target_cmsg = TARGET_CMSG_NXTHDR(target_msgh, target_cmsg,
2178                                          target_cmsg_start);
2179     }
2180     unlock_user(target_cmsg, target_cmsg_addr, space);
2181  the_end:
2182     target_msgh->msg_controllen = tswapal(space);
2183     return 0;
2184 }
2185 
2186 /* do_setsockopt() Must return target values and target errnos. */
2187 static abi_long do_setsockopt(int sockfd, int level, int optname,
2188                               abi_ulong optval_addr, socklen_t optlen)
2189 {
2190     abi_long ret;
2191     int val;
2192     struct ip_mreqn *ip_mreq;
2193     struct ip_mreq_source *ip_mreq_source;
2194 
2195     switch(level) {
2196     case SOL_TCP:
2197     case SOL_UDP:
2198         /* TCP and UDP options all take an 'int' value.  */
2199         if (optlen < sizeof(uint32_t))
2200             return -TARGET_EINVAL;
2201 
2202         if (get_user_u32(val, optval_addr))
2203             return -TARGET_EFAULT;
2204         ret = get_errno(setsockopt(sockfd, level, optname, &val, sizeof(val)));
2205         break;
2206     case SOL_IP:
2207         switch(optname) {
2208         case IP_TOS:
2209         case IP_TTL:
2210         case IP_HDRINCL:
2211         case IP_ROUTER_ALERT:
2212         case IP_RECVOPTS:
2213         case IP_RETOPTS:
2214         case IP_PKTINFO:
2215         case IP_MTU_DISCOVER:
2216         case IP_RECVERR:
2217         case IP_RECVTTL:
2218         case IP_RECVTOS:
2219 #ifdef IP_FREEBIND
2220         case IP_FREEBIND:
2221 #endif
2222         case IP_MULTICAST_TTL:
2223         case IP_MULTICAST_LOOP:
2224             val = 0;
2225             if (optlen >= sizeof(uint32_t)) {
2226                 if (get_user_u32(val, optval_addr))
2227                     return -TARGET_EFAULT;
2228             } else if (optlen >= 1) {
2229                 if (get_user_u8(val, optval_addr))
2230                     return -TARGET_EFAULT;
2231             }
2232             ret = get_errno(setsockopt(sockfd, level, optname, &val, sizeof(val)));
2233             break;
2234         case IP_ADD_MEMBERSHIP:
2235         case IP_DROP_MEMBERSHIP:
2236             if (optlen < sizeof (struct target_ip_mreq) ||
2237                 optlen > sizeof (struct target_ip_mreqn))
2238                 return -TARGET_EINVAL;
2239 
2240             ip_mreq = (struct ip_mreqn *) alloca(optlen);
2241             target_to_host_ip_mreq(ip_mreq, optval_addr, optlen);
2242             ret = get_errno(setsockopt(sockfd, level, optname, ip_mreq, optlen));
2243             break;
2244 
2245         case IP_BLOCK_SOURCE:
2246         case IP_UNBLOCK_SOURCE:
2247         case IP_ADD_SOURCE_MEMBERSHIP:
2248         case IP_DROP_SOURCE_MEMBERSHIP:
2249             if (optlen != sizeof (struct target_ip_mreq_source))
2250                 return -TARGET_EINVAL;
2251 
2252             ip_mreq_source = lock_user(VERIFY_READ, optval_addr, optlen, 1);
2253             ret = get_errno(setsockopt(sockfd, level, optname, ip_mreq_source, optlen));
2254             unlock_user (ip_mreq_source, optval_addr, 0);
2255             break;
2256 
2257         default:
2258             goto unimplemented;
2259         }
2260         break;
2261     case SOL_IPV6:
2262         switch (optname) {
2263         case IPV6_MTU_DISCOVER:
2264         case IPV6_MTU:
2265         case IPV6_V6ONLY:
2266         case IPV6_RECVPKTINFO:
2267         case IPV6_UNICAST_HOPS:
2268         case IPV6_MULTICAST_HOPS:
2269         case IPV6_MULTICAST_LOOP:
2270         case IPV6_RECVERR:
2271         case IPV6_RECVHOPLIMIT:
2272         case IPV6_2292HOPLIMIT:
2273         case IPV6_CHECKSUM:
2274         case IPV6_ADDRFORM:
2275         case IPV6_2292PKTINFO:
2276         case IPV6_RECVTCLASS:
2277         case IPV6_RECVRTHDR:
2278         case IPV6_2292RTHDR:
2279         case IPV6_RECVHOPOPTS:
2280         case IPV6_2292HOPOPTS:
2281         case IPV6_RECVDSTOPTS:
2282         case IPV6_2292DSTOPTS:
2283         case IPV6_TCLASS:
2284         case IPV6_ADDR_PREFERENCES:
2285 #ifdef IPV6_RECVPATHMTU
2286         case IPV6_RECVPATHMTU:
2287 #endif
2288 #ifdef IPV6_TRANSPARENT
2289         case IPV6_TRANSPARENT:
2290 #endif
2291 #ifdef IPV6_FREEBIND
2292         case IPV6_FREEBIND:
2293 #endif
2294 #ifdef IPV6_RECVORIGDSTADDR
2295         case IPV6_RECVORIGDSTADDR:
2296 #endif
2297             val = 0;
2298             if (optlen < sizeof(uint32_t)) {
2299                 return -TARGET_EINVAL;
2300             }
2301             if (get_user_u32(val, optval_addr)) {
2302                 return -TARGET_EFAULT;
2303             }
2304             ret = get_errno(setsockopt(sockfd, level, optname,
2305                                        &val, sizeof(val)));
2306             break;
2307         case IPV6_PKTINFO:
2308         {
2309             struct in6_pktinfo pki;
2310 
2311             if (optlen < sizeof(pki)) {
2312                 return -TARGET_EINVAL;
2313             }
2314 
2315             if (copy_from_user(&pki, optval_addr, sizeof(pki))) {
2316                 return -TARGET_EFAULT;
2317             }
2318 
2319             pki.ipi6_ifindex = tswap32(pki.ipi6_ifindex);
2320 
2321             ret = get_errno(setsockopt(sockfd, level, optname,
2322                                        &pki, sizeof(pki)));
2323             break;
2324         }
2325         case IPV6_ADD_MEMBERSHIP:
2326         case IPV6_DROP_MEMBERSHIP:
2327         {
2328             struct ipv6_mreq ipv6mreq;
2329 
2330             if (optlen < sizeof(ipv6mreq)) {
2331                 return -TARGET_EINVAL;
2332             }
2333 
2334             if (copy_from_user(&ipv6mreq, optval_addr, sizeof(ipv6mreq))) {
2335                 return -TARGET_EFAULT;
2336             }
2337 
2338             ipv6mreq.ipv6mr_interface = tswap32(ipv6mreq.ipv6mr_interface);
2339 
2340             ret = get_errno(setsockopt(sockfd, level, optname,
2341                                        &ipv6mreq, sizeof(ipv6mreq)));
2342             break;
2343         }
2344         default:
2345             goto unimplemented;
2346         }
2347         break;
2348     case SOL_ICMPV6:
2349         switch (optname) {
2350         case ICMPV6_FILTER:
2351         {
2352             struct icmp6_filter icmp6f;
2353 
2354             if (optlen > sizeof(icmp6f)) {
2355                 optlen = sizeof(icmp6f);
2356             }
2357 
2358             if (copy_from_user(&icmp6f, optval_addr, optlen)) {
2359                 return -TARGET_EFAULT;
2360             }
2361 
2362             for (val = 0; val < 8; val++) {
2363                 icmp6f.data[val] = tswap32(icmp6f.data[val]);
2364             }
2365 
2366             ret = get_errno(setsockopt(sockfd, level, optname,
2367                                        &icmp6f, optlen));
2368             break;
2369         }
2370         default:
2371             goto unimplemented;
2372         }
2373         break;
2374     case SOL_RAW:
2375         switch (optname) {
2376         case ICMP_FILTER:
2377         case IPV6_CHECKSUM:
2378             /* those take an u32 value */
2379             if (optlen < sizeof(uint32_t)) {
2380                 return -TARGET_EINVAL;
2381             }
2382 
2383             if (get_user_u32(val, optval_addr)) {
2384                 return -TARGET_EFAULT;
2385             }
2386             ret = get_errno(setsockopt(sockfd, level, optname,
2387                                        &val, sizeof(val)));
2388             break;
2389 
2390         default:
2391             goto unimplemented;
2392         }
2393         break;
2394 #if defined(SOL_ALG) && defined(ALG_SET_KEY) && defined(ALG_SET_AEAD_AUTHSIZE)
2395     case SOL_ALG:
2396         switch (optname) {
2397         case ALG_SET_KEY:
2398         {
2399             char *alg_key = g_malloc(optlen);
2400 
2401             if (!alg_key) {
2402                 return -TARGET_ENOMEM;
2403             }
2404             if (copy_from_user(alg_key, optval_addr, optlen)) {
2405                 g_free(alg_key);
2406                 return -TARGET_EFAULT;
2407             }
2408             ret = get_errno(setsockopt(sockfd, level, optname,
2409                                        alg_key, optlen));
2410             g_free(alg_key);
2411             break;
2412         }
2413         case ALG_SET_AEAD_AUTHSIZE:
2414         {
2415             ret = get_errno(setsockopt(sockfd, level, optname,
2416                                        NULL, optlen));
2417             break;
2418         }
2419         default:
2420             goto unimplemented;
2421         }
2422         break;
2423 #endif
2424     case TARGET_SOL_SOCKET:
2425         switch (optname) {
2426         case TARGET_SO_RCVTIMEO:
2427         {
2428                 struct timeval tv;
2429 
2430                 optname = SO_RCVTIMEO;
2431 
2432 set_timeout:
2433                 if (optlen != sizeof(struct target_timeval)) {
2434                     return -TARGET_EINVAL;
2435                 }
2436 
2437                 if (copy_from_user_timeval(&tv, optval_addr)) {
2438                     return -TARGET_EFAULT;
2439                 }
2440 
2441                 ret = get_errno(setsockopt(sockfd, SOL_SOCKET, optname,
2442                                 &tv, sizeof(tv)));
2443                 return ret;
2444         }
2445         case TARGET_SO_SNDTIMEO:
2446                 optname = SO_SNDTIMEO;
2447                 goto set_timeout;
2448         case TARGET_SO_ATTACH_FILTER:
2449         {
2450                 struct target_sock_fprog *tfprog;
2451                 struct target_sock_filter *tfilter;
2452                 struct sock_fprog fprog;
2453                 struct sock_filter *filter;
2454                 int i;
2455 
2456                 if (optlen != sizeof(*tfprog)) {
2457                     return -TARGET_EINVAL;
2458                 }
2459                 if (!lock_user_struct(VERIFY_READ, tfprog, optval_addr, 0)) {
2460                     return -TARGET_EFAULT;
2461                 }
2462                 if (!lock_user_struct(VERIFY_READ, tfilter,
2463                                       tswapal(tfprog->filter), 0)) {
2464                     unlock_user_struct(tfprog, optval_addr, 1);
2465                     return -TARGET_EFAULT;
2466                 }
2467 
2468                 fprog.len = tswap16(tfprog->len);
2469                 filter = g_try_new(struct sock_filter, fprog.len);
2470                 if (filter == NULL) {
2471                     unlock_user_struct(tfilter, tfprog->filter, 1);
2472                     unlock_user_struct(tfprog, optval_addr, 1);
2473                     return -TARGET_ENOMEM;
2474                 }
2475                 for (i = 0; i < fprog.len; i++) {
2476                     filter[i].code = tswap16(tfilter[i].code);
2477                     filter[i].jt = tfilter[i].jt;
2478                     filter[i].jf = tfilter[i].jf;
2479                     filter[i].k = tswap32(tfilter[i].k);
2480                 }
2481                 fprog.filter = filter;
2482 
2483                 ret = get_errno(setsockopt(sockfd, SOL_SOCKET,
2484                                 SO_ATTACH_FILTER, &fprog, sizeof(fprog)));
2485                 g_free(filter);
2486 
2487                 unlock_user_struct(tfilter, tfprog->filter, 1);
2488                 unlock_user_struct(tfprog, optval_addr, 1);
2489                 return ret;
2490         }
2491 	case TARGET_SO_BINDTODEVICE:
2492 	{
2493 		char *dev_ifname, *addr_ifname;
2494 
2495 		if (optlen > IFNAMSIZ - 1) {
2496 		    optlen = IFNAMSIZ - 1;
2497 		}
2498 		dev_ifname = lock_user(VERIFY_READ, optval_addr, optlen, 1);
2499 		if (!dev_ifname) {
2500 		    return -TARGET_EFAULT;
2501 		}
2502 		optname = SO_BINDTODEVICE;
2503 		addr_ifname = alloca(IFNAMSIZ);
2504 		memcpy(addr_ifname, dev_ifname, optlen);
2505 		addr_ifname[optlen] = 0;
2506 		ret = get_errno(setsockopt(sockfd, SOL_SOCKET, optname,
2507                                            addr_ifname, optlen));
2508 		unlock_user (dev_ifname, optval_addr, 0);
2509 		return ret;
2510 	}
2511         case TARGET_SO_LINGER:
2512         {
2513                 struct linger lg;
2514                 struct target_linger *tlg;
2515 
2516                 if (optlen != sizeof(struct target_linger)) {
2517                     return -TARGET_EINVAL;
2518                 }
2519                 if (!lock_user_struct(VERIFY_READ, tlg, optval_addr, 1)) {
2520                     return -TARGET_EFAULT;
2521                 }
2522                 __get_user(lg.l_onoff, &tlg->l_onoff);
2523                 __get_user(lg.l_linger, &tlg->l_linger);
2524                 ret = get_errno(setsockopt(sockfd, SOL_SOCKET, SO_LINGER,
2525                                 &lg, sizeof(lg)));
2526                 unlock_user_struct(tlg, optval_addr, 0);
2527                 return ret;
2528         }
2529             /* Options with 'int' argument.  */
2530         case TARGET_SO_DEBUG:
2531 		optname = SO_DEBUG;
2532 		break;
2533         case TARGET_SO_REUSEADDR:
2534 		optname = SO_REUSEADDR;
2535 		break;
2536 #ifdef SO_REUSEPORT
2537         case TARGET_SO_REUSEPORT:
2538                 optname = SO_REUSEPORT;
2539                 break;
2540 #endif
2541         case TARGET_SO_TYPE:
2542 		optname = SO_TYPE;
2543 		break;
2544         case TARGET_SO_ERROR:
2545 		optname = SO_ERROR;
2546 		break;
2547         case TARGET_SO_DONTROUTE:
2548 		optname = SO_DONTROUTE;
2549 		break;
2550         case TARGET_SO_BROADCAST:
2551 		optname = SO_BROADCAST;
2552 		break;
2553         case TARGET_SO_SNDBUF:
2554 		optname = SO_SNDBUF;
2555 		break;
2556         case TARGET_SO_SNDBUFFORCE:
2557                 optname = SO_SNDBUFFORCE;
2558                 break;
2559         case TARGET_SO_RCVBUF:
2560 		optname = SO_RCVBUF;
2561 		break;
2562         case TARGET_SO_RCVBUFFORCE:
2563                 optname = SO_RCVBUFFORCE;
2564                 break;
2565         case TARGET_SO_KEEPALIVE:
2566 		optname = SO_KEEPALIVE;
2567 		break;
2568         case TARGET_SO_OOBINLINE:
2569 		optname = SO_OOBINLINE;
2570 		break;
2571         case TARGET_SO_NO_CHECK:
2572 		optname = SO_NO_CHECK;
2573 		break;
2574         case TARGET_SO_PRIORITY:
2575 		optname = SO_PRIORITY;
2576 		break;
2577 #ifdef SO_BSDCOMPAT
2578         case TARGET_SO_BSDCOMPAT:
2579 		optname = SO_BSDCOMPAT;
2580 		break;
2581 #endif
2582         case TARGET_SO_PASSCRED:
2583 		optname = SO_PASSCRED;
2584 		break;
2585         case TARGET_SO_PASSSEC:
2586                 optname = SO_PASSSEC;
2587                 break;
2588         case TARGET_SO_TIMESTAMP:
2589 		optname = SO_TIMESTAMP;
2590 		break;
2591         case TARGET_SO_RCVLOWAT:
2592 		optname = SO_RCVLOWAT;
2593 		break;
2594         default:
2595             goto unimplemented;
2596         }
2597 	if (optlen < sizeof(uint32_t))
2598             return -TARGET_EINVAL;
2599 
2600 	if (get_user_u32(val, optval_addr))
2601             return -TARGET_EFAULT;
2602 	ret = get_errno(setsockopt(sockfd, SOL_SOCKET, optname, &val, sizeof(val)));
2603         break;
2604 #ifdef SOL_NETLINK
2605     case SOL_NETLINK:
2606         switch (optname) {
2607         case NETLINK_PKTINFO:
2608         case NETLINK_ADD_MEMBERSHIP:
2609         case NETLINK_DROP_MEMBERSHIP:
2610         case NETLINK_BROADCAST_ERROR:
2611         case NETLINK_NO_ENOBUFS:
2612 #if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 2, 0)
2613         case NETLINK_LISTEN_ALL_NSID:
2614         case NETLINK_CAP_ACK:
2615 #endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(4, 2, 0) */
2616 #if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 12, 0)
2617         case NETLINK_EXT_ACK:
2618 #endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(4, 12, 0) */
2619 #if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 20, 0)
2620         case NETLINK_GET_STRICT_CHK:
2621 #endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(4, 12, 0) */
2622             break;
2623         default:
2624             goto unimplemented;
2625         }
2626         val = 0;
2627         if (optlen < sizeof(uint32_t)) {
2628             return -TARGET_EINVAL;
2629         }
2630         if (get_user_u32(val, optval_addr)) {
2631             return -TARGET_EFAULT;
2632         }
2633         ret = get_errno(setsockopt(sockfd, SOL_NETLINK, optname, &val,
2634                                    sizeof(val)));
2635         break;
2636 #endif /* SOL_NETLINK */
2637     default:
2638     unimplemented:
2639         qemu_log_mask(LOG_UNIMP, "Unsupported setsockopt level=%d optname=%d\n",
2640                       level, optname);
2641         ret = -TARGET_ENOPROTOOPT;
2642     }
2643     return ret;
2644 }
2645 
2646 /* do_getsockopt() Must return target values and target errnos. */
2647 static abi_long do_getsockopt(int sockfd, int level, int optname,
2648                               abi_ulong optval_addr, abi_ulong optlen)
2649 {
2650     abi_long ret;
2651     int len, val;
2652     socklen_t lv;
2653 
2654     switch(level) {
2655     case TARGET_SOL_SOCKET:
2656         level = SOL_SOCKET;
2657         switch (optname) {
2658         /* These don't just return a single integer */
2659         case TARGET_SO_PEERNAME:
2660             goto unimplemented;
2661         case TARGET_SO_RCVTIMEO: {
2662             struct timeval tv;
2663             socklen_t tvlen;
2664 
2665             optname = SO_RCVTIMEO;
2666 
2667 get_timeout:
2668             if (get_user_u32(len, optlen)) {
2669                 return -TARGET_EFAULT;
2670             }
2671             if (len < 0) {
2672                 return -TARGET_EINVAL;
2673             }
2674 
2675             tvlen = sizeof(tv);
2676             ret = get_errno(getsockopt(sockfd, level, optname,
2677                                        &tv, &tvlen));
2678             if (ret < 0) {
2679                 return ret;
2680             }
2681             if (len > sizeof(struct target_timeval)) {
2682                 len = sizeof(struct target_timeval);
2683             }
2684             if (copy_to_user_timeval(optval_addr, &tv)) {
2685                 return -TARGET_EFAULT;
2686             }
2687             if (put_user_u32(len, optlen)) {
2688                 return -TARGET_EFAULT;
2689             }
2690             break;
2691         }
2692         case TARGET_SO_SNDTIMEO:
2693             optname = SO_SNDTIMEO;
2694             goto get_timeout;
2695         case TARGET_SO_PEERCRED: {
2696             struct ucred cr;
2697             socklen_t crlen;
2698             struct target_ucred *tcr;
2699 
2700             if (get_user_u32(len, optlen)) {
2701                 return -TARGET_EFAULT;
2702             }
2703             if (len < 0) {
2704                 return -TARGET_EINVAL;
2705             }
2706 
2707             crlen = sizeof(cr);
2708             ret = get_errno(getsockopt(sockfd, level, SO_PEERCRED,
2709                                        &cr, &crlen));
2710             if (ret < 0) {
2711                 return ret;
2712             }
2713             if (len > crlen) {
2714                 len = crlen;
2715             }
2716             if (!lock_user_struct(VERIFY_WRITE, tcr, optval_addr, 0)) {
2717                 return -TARGET_EFAULT;
2718             }
2719             __put_user(cr.pid, &tcr->pid);
2720             __put_user(cr.uid, &tcr->uid);
2721             __put_user(cr.gid, &tcr->gid);
2722             unlock_user_struct(tcr, optval_addr, 1);
2723             if (put_user_u32(len, optlen)) {
2724                 return -TARGET_EFAULT;
2725             }
2726             break;
2727         }
2728         case TARGET_SO_PEERSEC: {
2729             char *name;
2730 
2731             if (get_user_u32(len, optlen)) {
2732                 return -TARGET_EFAULT;
2733             }
2734             if (len < 0) {
2735                 return -TARGET_EINVAL;
2736             }
2737             name = lock_user(VERIFY_WRITE, optval_addr, len, 0);
2738             if (!name) {
2739                 return -TARGET_EFAULT;
2740             }
2741             lv = len;
2742             ret = get_errno(getsockopt(sockfd, level, SO_PEERSEC,
2743                                        name, &lv));
2744             if (put_user_u32(lv, optlen)) {
2745                 ret = -TARGET_EFAULT;
2746             }
2747             unlock_user(name, optval_addr, lv);
2748             break;
2749         }
2750         case TARGET_SO_LINGER:
2751         {
2752             struct linger lg;
2753             socklen_t lglen;
2754             struct target_linger *tlg;
2755 
2756             if (get_user_u32(len, optlen)) {
2757                 return -TARGET_EFAULT;
2758             }
2759             if (len < 0) {
2760                 return -TARGET_EINVAL;
2761             }
2762 
2763             lglen = sizeof(lg);
2764             ret = get_errno(getsockopt(sockfd, level, SO_LINGER,
2765                                        &lg, &lglen));
2766             if (ret < 0) {
2767                 return ret;
2768             }
2769             if (len > lglen) {
2770                 len = lglen;
2771             }
2772             if (!lock_user_struct(VERIFY_WRITE, tlg, optval_addr, 0)) {
2773                 return -TARGET_EFAULT;
2774             }
2775             __put_user(lg.l_onoff, &tlg->l_onoff);
2776             __put_user(lg.l_linger, &tlg->l_linger);
2777             unlock_user_struct(tlg, optval_addr, 1);
2778             if (put_user_u32(len, optlen)) {
2779                 return -TARGET_EFAULT;
2780             }
2781             break;
2782         }
2783         /* Options with 'int' argument.  */
2784         case TARGET_SO_DEBUG:
2785             optname = SO_DEBUG;
2786             goto int_case;
2787         case TARGET_SO_REUSEADDR:
2788             optname = SO_REUSEADDR;
2789             goto int_case;
2790 #ifdef SO_REUSEPORT
2791         case TARGET_SO_REUSEPORT:
2792             optname = SO_REUSEPORT;
2793             goto int_case;
2794 #endif
2795         case TARGET_SO_TYPE:
2796             optname = SO_TYPE;
2797             goto int_case;
2798         case TARGET_SO_ERROR:
2799             optname = SO_ERROR;
2800             goto int_case;
2801         case TARGET_SO_DONTROUTE:
2802             optname = SO_DONTROUTE;
2803             goto int_case;
2804         case TARGET_SO_BROADCAST:
2805             optname = SO_BROADCAST;
2806             goto int_case;
2807         case TARGET_SO_SNDBUF:
2808             optname = SO_SNDBUF;
2809             goto int_case;
2810         case TARGET_SO_RCVBUF:
2811             optname = SO_RCVBUF;
2812             goto int_case;
2813         case TARGET_SO_KEEPALIVE:
2814             optname = SO_KEEPALIVE;
2815             goto int_case;
2816         case TARGET_SO_OOBINLINE:
2817             optname = SO_OOBINLINE;
2818             goto int_case;
2819         case TARGET_SO_NO_CHECK:
2820             optname = SO_NO_CHECK;
2821             goto int_case;
2822         case TARGET_SO_PRIORITY:
2823             optname = SO_PRIORITY;
2824             goto int_case;
2825 #ifdef SO_BSDCOMPAT
2826         case TARGET_SO_BSDCOMPAT:
2827             optname = SO_BSDCOMPAT;
2828             goto int_case;
2829 #endif
2830         case TARGET_SO_PASSCRED:
2831             optname = SO_PASSCRED;
2832             goto int_case;
2833         case TARGET_SO_TIMESTAMP:
2834             optname = SO_TIMESTAMP;
2835             goto int_case;
2836         case TARGET_SO_RCVLOWAT:
2837             optname = SO_RCVLOWAT;
2838             goto int_case;
2839         case TARGET_SO_ACCEPTCONN:
2840             optname = SO_ACCEPTCONN;
2841             goto int_case;
2842         case TARGET_SO_PROTOCOL:
2843             optname = SO_PROTOCOL;
2844             goto int_case;
2845         case TARGET_SO_DOMAIN:
2846             optname = SO_DOMAIN;
2847             goto int_case;
2848         default:
2849             goto int_case;
2850         }
2851         break;
2852     case SOL_TCP:
2853     case SOL_UDP:
2854         /* TCP and UDP options all take an 'int' value.  */
2855     int_case:
2856         if (get_user_u32(len, optlen))
2857             return -TARGET_EFAULT;
2858         if (len < 0)
2859             return -TARGET_EINVAL;
2860         lv = sizeof(lv);
2861         ret = get_errno(getsockopt(sockfd, level, optname, &val, &lv));
2862         if (ret < 0)
2863             return ret;
2864         if (optname == SO_TYPE) {
2865             val = host_to_target_sock_type(val);
2866         }
2867         if (len > lv)
2868             len = lv;
2869         if (len == 4) {
2870             if (put_user_u32(val, optval_addr))
2871                 return -TARGET_EFAULT;
2872         } else {
2873             if (put_user_u8(val, optval_addr))
2874                 return -TARGET_EFAULT;
2875         }
2876         if (put_user_u32(len, optlen))
2877             return -TARGET_EFAULT;
2878         break;
2879     case SOL_IP:
2880         switch(optname) {
2881         case IP_TOS:
2882         case IP_TTL:
2883         case IP_HDRINCL:
2884         case IP_ROUTER_ALERT:
2885         case IP_RECVOPTS:
2886         case IP_RETOPTS:
2887         case IP_PKTINFO:
2888         case IP_MTU_DISCOVER:
2889         case IP_RECVERR:
2890         case IP_RECVTOS:
2891 #ifdef IP_FREEBIND
2892         case IP_FREEBIND:
2893 #endif
2894         case IP_MULTICAST_TTL:
2895         case IP_MULTICAST_LOOP:
2896             if (get_user_u32(len, optlen))
2897                 return -TARGET_EFAULT;
2898             if (len < 0)
2899                 return -TARGET_EINVAL;
2900             lv = sizeof(lv);
2901             ret = get_errno(getsockopt(sockfd, level, optname, &val, &lv));
2902             if (ret < 0)
2903                 return ret;
2904             if (len < sizeof(int) && len > 0 && val >= 0 && val < 255) {
2905                 len = 1;
2906                 if (put_user_u32(len, optlen)
2907                     || put_user_u8(val, optval_addr))
2908                     return -TARGET_EFAULT;
2909             } else {
2910                 if (len > sizeof(int))
2911                     len = sizeof(int);
2912                 if (put_user_u32(len, optlen)
2913                     || put_user_u32(val, optval_addr))
2914                     return -TARGET_EFAULT;
2915             }
2916             break;
2917         default:
2918             ret = -TARGET_ENOPROTOOPT;
2919             break;
2920         }
2921         break;
2922     case SOL_IPV6:
2923         switch (optname) {
2924         case IPV6_MTU_DISCOVER:
2925         case IPV6_MTU:
2926         case IPV6_V6ONLY:
2927         case IPV6_RECVPKTINFO:
2928         case IPV6_UNICAST_HOPS:
2929         case IPV6_MULTICAST_HOPS:
2930         case IPV6_MULTICAST_LOOP:
2931         case IPV6_RECVERR:
2932         case IPV6_RECVHOPLIMIT:
2933         case IPV6_2292HOPLIMIT:
2934         case IPV6_CHECKSUM:
2935         case IPV6_ADDRFORM:
2936         case IPV6_2292PKTINFO:
2937         case IPV6_RECVTCLASS:
2938         case IPV6_RECVRTHDR:
2939         case IPV6_2292RTHDR:
2940         case IPV6_RECVHOPOPTS:
2941         case IPV6_2292HOPOPTS:
2942         case IPV6_RECVDSTOPTS:
2943         case IPV6_2292DSTOPTS:
2944         case IPV6_TCLASS:
2945         case IPV6_ADDR_PREFERENCES:
2946 #ifdef IPV6_RECVPATHMTU
2947         case IPV6_RECVPATHMTU:
2948 #endif
2949 #ifdef IPV6_TRANSPARENT
2950         case IPV6_TRANSPARENT:
2951 #endif
2952 #ifdef IPV6_FREEBIND
2953         case IPV6_FREEBIND:
2954 #endif
2955 #ifdef IPV6_RECVORIGDSTADDR
2956         case IPV6_RECVORIGDSTADDR:
2957 #endif
2958             if (get_user_u32(len, optlen))
2959                 return -TARGET_EFAULT;
2960             if (len < 0)
2961                 return -TARGET_EINVAL;
2962             lv = sizeof(lv);
2963             ret = get_errno(getsockopt(sockfd, level, optname, &val, &lv));
2964             if (ret < 0)
2965                 return ret;
2966             if (len < sizeof(int) && len > 0 && val >= 0 && val < 255) {
2967                 len = 1;
2968                 if (put_user_u32(len, optlen)
2969                     || put_user_u8(val, optval_addr))
2970                     return -TARGET_EFAULT;
2971             } else {
2972                 if (len > sizeof(int))
2973                     len = sizeof(int);
2974                 if (put_user_u32(len, optlen)
2975                     || put_user_u32(val, optval_addr))
2976                     return -TARGET_EFAULT;
2977             }
2978             break;
2979         default:
2980             ret = -TARGET_ENOPROTOOPT;
2981             break;
2982         }
2983         break;
2984 #ifdef SOL_NETLINK
2985     case SOL_NETLINK:
2986         switch (optname) {
2987         case NETLINK_PKTINFO:
2988         case NETLINK_BROADCAST_ERROR:
2989         case NETLINK_NO_ENOBUFS:
2990 #if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 2, 0)
2991         case NETLINK_LISTEN_ALL_NSID:
2992         case NETLINK_CAP_ACK:
2993 #endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(4, 2, 0) */
2994 #if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 12, 0)
2995         case NETLINK_EXT_ACK:
2996 #endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(4, 12, 0) */
2997 #if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 20, 0)
2998         case NETLINK_GET_STRICT_CHK:
2999 #endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(4, 12, 0) */
3000             if (get_user_u32(len, optlen)) {
3001                 return -TARGET_EFAULT;
3002             }
3003             if (len != sizeof(val)) {
3004                 return -TARGET_EINVAL;
3005             }
3006             lv = len;
3007             ret = get_errno(getsockopt(sockfd, level, optname, &val, &lv));
3008             if (ret < 0) {
3009                 return ret;
3010             }
3011             if (put_user_u32(lv, optlen)
3012                 || put_user_u32(val, optval_addr)) {
3013                 return -TARGET_EFAULT;
3014             }
3015             break;
3016 #if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 2, 0)
3017         case NETLINK_LIST_MEMBERSHIPS:
3018         {
3019             uint32_t *results;
3020             int i;
3021             if (get_user_u32(len, optlen)) {
3022                 return -TARGET_EFAULT;
3023             }
3024             if (len < 0) {
3025                 return -TARGET_EINVAL;
3026             }
3027             results = lock_user(VERIFY_WRITE, optval_addr, len, 1);
3028             if (!results) {
3029                 return -TARGET_EFAULT;
3030             }
3031             lv = len;
3032             ret = get_errno(getsockopt(sockfd, level, optname, results, &lv));
3033             if (ret < 0) {
3034                 unlock_user(results, optval_addr, 0);
3035                 return ret;
3036             }
3037             /* swap host endianess to target endianess. */
3038             for (i = 0; i < (len / sizeof(uint32_t)); i++) {
3039                 results[i] = tswap32(results[i]);
3040             }
3041             if (put_user_u32(lv, optlen)) {
3042                 return -TARGET_EFAULT;
3043             }
3044             unlock_user(results, optval_addr, 0);
3045             break;
3046         }
3047 #endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(4, 2, 0) */
3048         default:
3049             goto unimplemented;
3050         }
3051         break;
3052 #endif /* SOL_NETLINK */
3053     default:
3054     unimplemented:
3055         qemu_log_mask(LOG_UNIMP,
3056                       "getsockopt level=%d optname=%d not yet supported\n",
3057                       level, optname);
3058         ret = -TARGET_EOPNOTSUPP;
3059         break;
3060     }
3061     return ret;
3062 }
3063 
3064 /* Convert target low/high pair representing file offset into the host
3065  * low/high pair. This function doesn't handle offsets bigger than 64 bits
3066  * as the kernel doesn't handle them either.
3067  */
3068 static void target_to_host_low_high(abi_ulong tlow,
3069                                     abi_ulong thigh,
3070                                     unsigned long *hlow,
3071                                     unsigned long *hhigh)
3072 {
3073     uint64_t off = tlow |
3074         ((unsigned long long)thigh << TARGET_LONG_BITS / 2) <<
3075         TARGET_LONG_BITS / 2;
3076 
3077     *hlow = off;
3078     *hhigh = (off >> HOST_LONG_BITS / 2) >> HOST_LONG_BITS / 2;
3079 }
3080 
3081 static struct iovec *lock_iovec(int type, abi_ulong target_addr,
3082                                 abi_ulong count, int copy)
3083 {
3084     struct target_iovec *target_vec;
3085     struct iovec *vec;
3086     abi_ulong total_len, max_len;
3087     int i;
3088     int err = 0;
3089     bool bad_address = false;
3090 
3091     if (count == 0) {
3092         errno = 0;
3093         return NULL;
3094     }
3095     if (count > IOV_MAX) {
3096         errno = EINVAL;
3097         return NULL;
3098     }
3099 
3100     vec = g_try_new0(struct iovec, count);
3101     if (vec == NULL) {
3102         errno = ENOMEM;
3103         return NULL;
3104     }
3105 
3106     target_vec = lock_user(VERIFY_READ, target_addr,
3107                            count * sizeof(struct target_iovec), 1);
3108     if (target_vec == NULL) {
3109         err = EFAULT;
3110         goto fail2;
3111     }
3112 
3113     /* ??? If host page size > target page size, this will result in a
3114        value larger than what we can actually support.  */
3115     max_len = 0x7fffffff & TARGET_PAGE_MASK;
3116     total_len = 0;
3117 
3118     for (i = 0; i < count; i++) {
3119         abi_ulong base = tswapal(target_vec[i].iov_base);
3120         abi_long len = tswapal(target_vec[i].iov_len);
3121 
3122         if (len < 0) {
3123             err = EINVAL;
3124             goto fail;
3125         } else if (len == 0) {
3126             /* Zero length pointer is ignored.  */
3127             vec[i].iov_base = 0;
3128         } else {
3129             vec[i].iov_base = lock_user(type, base, len, copy);
3130             /* If the first buffer pointer is bad, this is a fault.  But
3131              * subsequent bad buffers will result in a partial write; this
3132              * is realized by filling the vector with null pointers and
3133              * zero lengths. */
3134             if (!vec[i].iov_base) {
3135                 if (i == 0) {
3136                     err = EFAULT;
3137                     goto fail;
3138                 } else {
3139                     bad_address = true;
3140                 }
3141             }
3142             if (bad_address) {
3143                 len = 0;
3144             }
3145             if (len > max_len - total_len) {
3146                 len = max_len - total_len;
3147             }
3148         }
3149         vec[i].iov_len = len;
3150         total_len += len;
3151     }
3152 
3153     unlock_user(target_vec, target_addr, 0);
3154     return vec;
3155 
3156  fail:
3157     while (--i >= 0) {
3158         if (tswapal(target_vec[i].iov_len) > 0) {
3159             unlock_user(vec[i].iov_base, tswapal(target_vec[i].iov_base), 0);
3160         }
3161     }
3162     unlock_user(target_vec, target_addr, 0);
3163  fail2:
3164     g_free(vec);
3165     errno = err;
3166     return NULL;
3167 }
3168 
3169 static void unlock_iovec(struct iovec *vec, abi_ulong target_addr,
3170                          abi_ulong count, int copy)
3171 {
3172     struct target_iovec *target_vec;
3173     int i;
3174 
3175     target_vec = lock_user(VERIFY_READ, target_addr,
3176                            count * sizeof(struct target_iovec), 1);
3177     if (target_vec) {
3178         for (i = 0; i < count; i++) {
3179             abi_ulong base = tswapal(target_vec[i].iov_base);
3180             abi_long len = tswapal(target_vec[i].iov_len);
3181             if (len < 0) {
3182                 break;
3183             }
3184             unlock_user(vec[i].iov_base, base, copy ? vec[i].iov_len : 0);
3185         }
3186         unlock_user(target_vec, target_addr, 0);
3187     }
3188 
3189     g_free(vec);
3190 }
3191 
3192 static inline int target_to_host_sock_type(int *type)
3193 {
3194     int host_type = 0;
3195     int target_type = *type;
3196 
3197     switch (target_type & TARGET_SOCK_TYPE_MASK) {
3198     case TARGET_SOCK_DGRAM:
3199         host_type = SOCK_DGRAM;
3200         break;
3201     case TARGET_SOCK_STREAM:
3202         host_type = SOCK_STREAM;
3203         break;
3204     default:
3205         host_type = target_type & TARGET_SOCK_TYPE_MASK;
3206         break;
3207     }
3208     if (target_type & TARGET_SOCK_CLOEXEC) {
3209 #if defined(SOCK_CLOEXEC)
3210         host_type |= SOCK_CLOEXEC;
3211 #else
3212         return -TARGET_EINVAL;
3213 #endif
3214     }
3215     if (target_type & TARGET_SOCK_NONBLOCK) {
3216 #if defined(SOCK_NONBLOCK)
3217         host_type |= SOCK_NONBLOCK;
3218 #elif !defined(O_NONBLOCK)
3219         return -TARGET_EINVAL;
3220 #endif
3221     }
3222     *type = host_type;
3223     return 0;
3224 }
3225 
3226 /* Try to emulate socket type flags after socket creation.  */
3227 static int sock_flags_fixup(int fd, int target_type)
3228 {
3229 #if !defined(SOCK_NONBLOCK) && defined(O_NONBLOCK)
3230     if (target_type & TARGET_SOCK_NONBLOCK) {
3231         int flags = fcntl(fd, F_GETFL);
3232         if (fcntl(fd, F_SETFL, O_NONBLOCK | flags) == -1) {
3233             close(fd);
3234             return -TARGET_EINVAL;
3235         }
3236     }
3237 #endif
3238     return fd;
3239 }
3240 
3241 /* do_socket() Must return target values and target errnos. */
3242 static abi_long do_socket(int domain, int type, int protocol)
3243 {
3244     int target_type = type;
3245     int ret;
3246 
3247     ret = target_to_host_sock_type(&type);
3248     if (ret) {
3249         return ret;
3250     }
3251 
3252     if (domain == PF_NETLINK && !(
3253 #ifdef CONFIG_RTNETLINK
3254          protocol == NETLINK_ROUTE ||
3255 #endif
3256          protocol == NETLINK_KOBJECT_UEVENT ||
3257          protocol == NETLINK_AUDIT)) {
3258         return -TARGET_EPROTONOSUPPORT;
3259     }
3260 
3261     if (domain == AF_PACKET ||
3262         (domain == AF_INET && type == SOCK_PACKET)) {
3263         protocol = tswap16(protocol);
3264     }
3265 
3266     ret = get_errno(socket(domain, type, protocol));
3267     if (ret >= 0) {
3268         ret = sock_flags_fixup(ret, target_type);
3269         if (type == SOCK_PACKET) {
3270             /* Manage an obsolete case :
3271              * if socket type is SOCK_PACKET, bind by name
3272              */
3273             fd_trans_register(ret, &target_packet_trans);
3274         } else if (domain == PF_NETLINK) {
3275             switch (protocol) {
3276 #ifdef CONFIG_RTNETLINK
3277             case NETLINK_ROUTE:
3278                 fd_trans_register(ret, &target_netlink_route_trans);
3279                 break;
3280 #endif
3281             case NETLINK_KOBJECT_UEVENT:
3282                 /* nothing to do: messages are strings */
3283                 break;
3284             case NETLINK_AUDIT:
3285                 fd_trans_register(ret, &target_netlink_audit_trans);
3286                 break;
3287             default:
3288                 g_assert_not_reached();
3289             }
3290         }
3291     }
3292     return ret;
3293 }
3294 
3295 /* do_bind() Must return target values and target errnos. */
3296 static abi_long do_bind(int sockfd, abi_ulong target_addr,
3297                         socklen_t addrlen)
3298 {
3299     void *addr;
3300     abi_long ret;
3301 
3302     if ((int)addrlen < 0) {
3303         return -TARGET_EINVAL;
3304     }
3305 
3306     addr = alloca(addrlen+1);
3307 
3308     ret = target_to_host_sockaddr(sockfd, addr, target_addr, addrlen);
3309     if (ret)
3310         return ret;
3311 
3312     return get_errno(bind(sockfd, addr, addrlen));
3313 }
3314 
3315 /* do_connect() Must return target values and target errnos. */
3316 static abi_long do_connect(int sockfd, abi_ulong target_addr,
3317                            socklen_t addrlen)
3318 {
3319     void *addr;
3320     abi_long ret;
3321 
3322     if ((int)addrlen < 0) {
3323         return -TARGET_EINVAL;
3324     }
3325 
3326     addr = alloca(addrlen+1);
3327 
3328     ret = target_to_host_sockaddr(sockfd, addr, target_addr, addrlen);
3329     if (ret)
3330         return ret;
3331 
3332     return get_errno(safe_connect(sockfd, addr, addrlen));
3333 }
3334 
3335 /* do_sendrecvmsg_locked() Must return target values and target errnos. */
3336 static abi_long do_sendrecvmsg_locked(int fd, struct target_msghdr *msgp,
3337                                       int flags, int send)
3338 {
3339     abi_long ret, len;
3340     struct msghdr msg;
3341     abi_ulong count;
3342     struct iovec *vec;
3343     abi_ulong target_vec;
3344 
3345     if (msgp->msg_name) {
3346         msg.msg_namelen = tswap32(msgp->msg_namelen);
3347         msg.msg_name = alloca(msg.msg_namelen+1);
3348         ret = target_to_host_sockaddr(fd, msg.msg_name,
3349                                       tswapal(msgp->msg_name),
3350                                       msg.msg_namelen);
3351         if (ret == -TARGET_EFAULT) {
3352             /* For connected sockets msg_name and msg_namelen must
3353              * be ignored, so returning EFAULT immediately is wrong.
3354              * Instead, pass a bad msg_name to the host kernel, and
3355              * let it decide whether to return EFAULT or not.
3356              */
3357             msg.msg_name = (void *)-1;
3358         } else if (ret) {
3359             goto out2;
3360         }
3361     } else {
3362         msg.msg_name = NULL;
3363         msg.msg_namelen = 0;
3364     }
3365     msg.msg_controllen = 2 * tswapal(msgp->msg_controllen);
3366     msg.msg_control = alloca(msg.msg_controllen);
3367     memset(msg.msg_control, 0, msg.msg_controllen);
3368 
3369     msg.msg_flags = tswap32(msgp->msg_flags);
3370 
3371     count = tswapal(msgp->msg_iovlen);
3372     target_vec = tswapal(msgp->msg_iov);
3373 
3374     if (count > IOV_MAX) {
3375         /* sendrcvmsg returns a different errno for this condition than
3376          * readv/writev, so we must catch it here before lock_iovec() does.
3377          */
3378         ret = -TARGET_EMSGSIZE;
3379         goto out2;
3380     }
3381 
3382     vec = lock_iovec(send ? VERIFY_READ : VERIFY_WRITE,
3383                      target_vec, count, send);
3384     if (vec == NULL) {
3385         ret = -host_to_target_errno(errno);
3386         goto out2;
3387     }
3388     msg.msg_iovlen = count;
3389     msg.msg_iov = vec;
3390 
3391     if (send) {
3392         if (fd_trans_target_to_host_data(fd)) {
3393             void *host_msg;
3394 
3395             host_msg = g_malloc(msg.msg_iov->iov_len);
3396             memcpy(host_msg, msg.msg_iov->iov_base, msg.msg_iov->iov_len);
3397             ret = fd_trans_target_to_host_data(fd)(host_msg,
3398                                                    msg.msg_iov->iov_len);
3399             if (ret >= 0) {
3400                 msg.msg_iov->iov_base = host_msg;
3401                 ret = get_errno(safe_sendmsg(fd, &msg, flags));
3402             }
3403             g_free(host_msg);
3404         } else {
3405             ret = target_to_host_cmsg(&msg, msgp);
3406             if (ret == 0) {
3407                 ret = get_errno(safe_sendmsg(fd, &msg, flags));
3408             }
3409         }
3410     } else {
3411         ret = get_errno(safe_recvmsg(fd, &msg, flags));
3412         if (!is_error(ret)) {
3413             len = ret;
3414             if (fd_trans_host_to_target_data(fd)) {
3415                 ret = fd_trans_host_to_target_data(fd)(msg.msg_iov->iov_base,
3416                                                MIN(msg.msg_iov->iov_len, len));
3417             } else {
3418                 ret = host_to_target_cmsg(msgp, &msg);
3419             }
3420             if (!is_error(ret)) {
3421                 msgp->msg_namelen = tswap32(msg.msg_namelen);
3422                 msgp->msg_flags = tswap32(msg.msg_flags);
3423                 if (msg.msg_name != NULL && msg.msg_name != (void *)-1) {
3424                     ret = host_to_target_sockaddr(tswapal(msgp->msg_name),
3425                                     msg.msg_name, msg.msg_namelen);
3426                     if (ret) {
3427                         goto out;
3428                     }
3429                 }
3430 
3431                 ret = len;
3432             }
3433         }
3434     }
3435 
3436 out:
3437     unlock_iovec(vec, target_vec, count, !send);
3438 out2:
3439     return ret;
3440 }
3441 
3442 static abi_long do_sendrecvmsg(int fd, abi_ulong target_msg,
3443                                int flags, int send)
3444 {
3445     abi_long ret;
3446     struct target_msghdr *msgp;
3447 
3448     if (!lock_user_struct(send ? VERIFY_READ : VERIFY_WRITE,
3449                           msgp,
3450                           target_msg,
3451                           send ? 1 : 0)) {
3452         return -TARGET_EFAULT;
3453     }
3454     ret = do_sendrecvmsg_locked(fd, msgp, flags, send);
3455     unlock_user_struct(msgp, target_msg, send ? 0 : 1);
3456     return ret;
3457 }
3458 
3459 /* We don't rely on the C library to have sendmmsg/recvmmsg support,
3460  * so it might not have this *mmsg-specific flag either.
3461  */
3462 #ifndef MSG_WAITFORONE
3463 #define MSG_WAITFORONE 0x10000
3464 #endif
3465 
3466 static abi_long do_sendrecvmmsg(int fd, abi_ulong target_msgvec,
3467                                 unsigned int vlen, unsigned int flags,
3468                                 int send)
3469 {
3470     struct target_mmsghdr *mmsgp;
3471     abi_long ret = 0;
3472     int i;
3473 
3474     if (vlen > UIO_MAXIOV) {
3475         vlen = UIO_MAXIOV;
3476     }
3477 
3478     mmsgp = lock_user(VERIFY_WRITE, target_msgvec, sizeof(*mmsgp) * vlen, 1);
3479     if (!mmsgp) {
3480         return -TARGET_EFAULT;
3481     }
3482 
3483     for (i = 0; i < vlen; i++) {
3484         ret = do_sendrecvmsg_locked(fd, &mmsgp[i].msg_hdr, flags, send);
3485         if (is_error(ret)) {
3486             break;
3487         }
3488         mmsgp[i].msg_len = tswap32(ret);
3489         /* MSG_WAITFORONE turns on MSG_DONTWAIT after one packet */
3490         if (flags & MSG_WAITFORONE) {
3491             flags |= MSG_DONTWAIT;
3492         }
3493     }
3494 
3495     unlock_user(mmsgp, target_msgvec, sizeof(*mmsgp) * i);
3496 
3497     /* Return number of datagrams sent if we sent any at all;
3498      * otherwise return the error.
3499      */
3500     if (i) {
3501         return i;
3502     }
3503     return ret;
3504 }
3505 
3506 /* do_accept4() Must return target values and target errnos. */
3507 static abi_long do_accept4(int fd, abi_ulong target_addr,
3508                            abi_ulong target_addrlen_addr, int flags)
3509 {
3510     socklen_t addrlen, ret_addrlen;
3511     void *addr;
3512     abi_long ret;
3513     int host_flags;
3514 
3515     host_flags = target_to_host_bitmask(flags, fcntl_flags_tbl);
3516 
3517     if (target_addr == 0) {
3518         return get_errno(safe_accept4(fd, NULL, NULL, host_flags));
3519     }
3520 
3521     /* linux returns EFAULT if addrlen pointer is invalid */
3522     if (get_user_u32(addrlen, target_addrlen_addr))
3523         return -TARGET_EFAULT;
3524 
3525     if ((int)addrlen < 0) {
3526         return -TARGET_EINVAL;
3527     }
3528 
3529     if (!access_ok(VERIFY_WRITE, target_addr, addrlen))
3530         return -TARGET_EFAULT;
3531 
3532     addr = alloca(addrlen);
3533 
3534     ret_addrlen = addrlen;
3535     ret = get_errno(safe_accept4(fd, addr, &ret_addrlen, host_flags));
3536     if (!is_error(ret)) {
3537         host_to_target_sockaddr(target_addr, addr, MIN(addrlen, ret_addrlen));
3538         if (put_user_u32(ret_addrlen, target_addrlen_addr)) {
3539             ret = -TARGET_EFAULT;
3540         }
3541     }
3542     return ret;
3543 }
3544 
3545 /* do_getpeername() Must return target values and target errnos. */
3546 static abi_long do_getpeername(int fd, abi_ulong target_addr,
3547                                abi_ulong target_addrlen_addr)
3548 {
3549     socklen_t addrlen, ret_addrlen;
3550     void *addr;
3551     abi_long ret;
3552 
3553     if (get_user_u32(addrlen, target_addrlen_addr))
3554         return -TARGET_EFAULT;
3555 
3556     if ((int)addrlen < 0) {
3557         return -TARGET_EINVAL;
3558     }
3559 
3560     if (!access_ok(VERIFY_WRITE, target_addr, addrlen))
3561         return -TARGET_EFAULT;
3562 
3563     addr = alloca(addrlen);
3564 
3565     ret_addrlen = addrlen;
3566     ret = get_errno(getpeername(fd, addr, &ret_addrlen));
3567     if (!is_error(ret)) {
3568         host_to_target_sockaddr(target_addr, addr, MIN(addrlen, ret_addrlen));
3569         if (put_user_u32(ret_addrlen, target_addrlen_addr)) {
3570             ret = -TARGET_EFAULT;
3571         }
3572     }
3573     return ret;
3574 }
3575 
3576 /* do_getsockname() Must return target values and target errnos. */
3577 static abi_long do_getsockname(int fd, abi_ulong target_addr,
3578                                abi_ulong target_addrlen_addr)
3579 {
3580     socklen_t addrlen, ret_addrlen;
3581     void *addr;
3582     abi_long ret;
3583 
3584     if (get_user_u32(addrlen, target_addrlen_addr))
3585         return -TARGET_EFAULT;
3586 
3587     if ((int)addrlen < 0) {
3588         return -TARGET_EINVAL;
3589     }
3590 
3591     if (!access_ok(VERIFY_WRITE, target_addr, addrlen))
3592         return -TARGET_EFAULT;
3593 
3594     addr = alloca(addrlen);
3595 
3596     ret_addrlen = addrlen;
3597     ret = get_errno(getsockname(fd, addr, &ret_addrlen));
3598     if (!is_error(ret)) {
3599         host_to_target_sockaddr(target_addr, addr, MIN(addrlen, ret_addrlen));
3600         if (put_user_u32(ret_addrlen, target_addrlen_addr)) {
3601             ret = -TARGET_EFAULT;
3602         }
3603     }
3604     return ret;
3605 }
3606 
3607 /* do_socketpair() Must return target values and target errnos. */
3608 static abi_long do_socketpair(int domain, int type, int protocol,
3609                               abi_ulong target_tab_addr)
3610 {
3611     int tab[2];
3612     abi_long ret;
3613 
3614     target_to_host_sock_type(&type);
3615 
3616     ret = get_errno(socketpair(domain, type, protocol, tab));
3617     if (!is_error(ret)) {
3618         if (put_user_s32(tab[0], target_tab_addr)
3619             || put_user_s32(tab[1], target_tab_addr + sizeof(tab[0])))
3620             ret = -TARGET_EFAULT;
3621     }
3622     return ret;
3623 }
3624 
3625 /* do_sendto() Must return target values and target errnos. */
3626 static abi_long do_sendto(int fd, abi_ulong msg, size_t len, int flags,
3627                           abi_ulong target_addr, socklen_t addrlen)
3628 {
3629     void *addr;
3630     void *host_msg;
3631     void *copy_msg = NULL;
3632     abi_long ret;
3633 
3634     if ((int)addrlen < 0) {
3635         return -TARGET_EINVAL;
3636     }
3637 
3638     host_msg = lock_user(VERIFY_READ, msg, len, 1);
3639     if (!host_msg)
3640         return -TARGET_EFAULT;
3641     if (fd_trans_target_to_host_data(fd)) {
3642         copy_msg = host_msg;
3643         host_msg = g_malloc(len);
3644         memcpy(host_msg, copy_msg, len);
3645         ret = fd_trans_target_to_host_data(fd)(host_msg, len);
3646         if (ret < 0) {
3647             goto fail;
3648         }
3649     }
3650     if (target_addr) {
3651         addr = alloca(addrlen+1);
3652         ret = target_to_host_sockaddr(fd, addr, target_addr, addrlen);
3653         if (ret) {
3654             goto fail;
3655         }
3656         ret = get_errno(safe_sendto(fd, host_msg, len, flags, addr, addrlen));
3657     } else {
3658         ret = get_errno(safe_sendto(fd, host_msg, len, flags, NULL, 0));
3659     }
3660 fail:
3661     if (copy_msg) {
3662         g_free(host_msg);
3663         host_msg = copy_msg;
3664     }
3665     unlock_user(host_msg, msg, 0);
3666     return ret;
3667 }
3668 
3669 /* do_recvfrom() Must return target values and target errnos. */
3670 static abi_long do_recvfrom(int fd, abi_ulong msg, size_t len, int flags,
3671                             abi_ulong target_addr,
3672                             abi_ulong target_addrlen)
3673 {
3674     socklen_t addrlen, ret_addrlen;
3675     void *addr;
3676     void *host_msg;
3677     abi_long ret;
3678 
3679     host_msg = lock_user(VERIFY_WRITE, msg, len, 0);
3680     if (!host_msg)
3681         return -TARGET_EFAULT;
3682     if (target_addr) {
3683         if (get_user_u32(addrlen, target_addrlen)) {
3684             ret = -TARGET_EFAULT;
3685             goto fail;
3686         }
3687         if ((int)addrlen < 0) {
3688             ret = -TARGET_EINVAL;
3689             goto fail;
3690         }
3691         addr = alloca(addrlen);
3692         ret_addrlen = addrlen;
3693         ret = get_errno(safe_recvfrom(fd, host_msg, len, flags,
3694                                       addr, &ret_addrlen));
3695     } else {
3696         addr = NULL; /* To keep compiler quiet.  */
3697         addrlen = 0; /* To keep compiler quiet.  */
3698         ret = get_errno(safe_recvfrom(fd, host_msg, len, flags, NULL, 0));
3699     }
3700     if (!is_error(ret)) {
3701         if (fd_trans_host_to_target_data(fd)) {
3702             abi_long trans;
3703             trans = fd_trans_host_to_target_data(fd)(host_msg, MIN(ret, len));
3704             if (is_error(trans)) {
3705                 ret = trans;
3706                 goto fail;
3707             }
3708         }
3709         if (target_addr) {
3710             host_to_target_sockaddr(target_addr, addr,
3711                                     MIN(addrlen, ret_addrlen));
3712             if (put_user_u32(ret_addrlen, target_addrlen)) {
3713                 ret = -TARGET_EFAULT;
3714                 goto fail;
3715             }
3716         }
3717         unlock_user(host_msg, msg, len);
3718     } else {
3719 fail:
3720         unlock_user(host_msg, msg, 0);
3721     }
3722     return ret;
3723 }
3724 
3725 #ifdef TARGET_NR_socketcall
3726 /* do_socketcall() must return target values and target errnos. */
3727 static abi_long do_socketcall(int num, abi_ulong vptr)
3728 {
3729     static const unsigned nargs[] = { /* number of arguments per operation */
3730         [TARGET_SYS_SOCKET] = 3,      /* domain, type, protocol */
3731         [TARGET_SYS_BIND] = 3,        /* fd, addr, addrlen */
3732         [TARGET_SYS_CONNECT] = 3,     /* fd, addr, addrlen */
3733         [TARGET_SYS_LISTEN] = 2,      /* fd, backlog */
3734         [TARGET_SYS_ACCEPT] = 3,      /* fd, addr, addrlen */
3735         [TARGET_SYS_GETSOCKNAME] = 3, /* fd, addr, addrlen */
3736         [TARGET_SYS_GETPEERNAME] = 3, /* fd, addr, addrlen */
3737         [TARGET_SYS_SOCKETPAIR] = 4,  /* domain, type, protocol, tab */
3738         [TARGET_SYS_SEND] = 4,        /* fd, msg, len, flags */
3739         [TARGET_SYS_RECV] = 4,        /* fd, msg, len, flags */
3740         [TARGET_SYS_SENDTO] = 6,      /* fd, msg, len, flags, addr, addrlen */
3741         [TARGET_SYS_RECVFROM] = 6,    /* fd, msg, len, flags, addr, addrlen */
3742         [TARGET_SYS_SHUTDOWN] = 2,    /* fd, how */
3743         [TARGET_SYS_SETSOCKOPT] = 5,  /* fd, level, optname, optval, optlen */
3744         [TARGET_SYS_GETSOCKOPT] = 5,  /* fd, level, optname, optval, optlen */
3745         [TARGET_SYS_SENDMSG] = 3,     /* fd, msg, flags */
3746         [TARGET_SYS_RECVMSG] = 3,     /* fd, msg, flags */
3747         [TARGET_SYS_ACCEPT4] = 4,     /* fd, addr, addrlen, flags */
3748         [TARGET_SYS_RECVMMSG] = 4,    /* fd, msgvec, vlen, flags */
3749         [TARGET_SYS_SENDMMSG] = 4,    /* fd, msgvec, vlen, flags */
3750     };
3751     abi_long a[6]; /* max 6 args */
3752     unsigned i;
3753 
3754     /* check the range of the first argument num */
3755     /* (TARGET_SYS_SENDMMSG is the highest among TARGET_SYS_xxx) */
3756     if (num < 1 || num > TARGET_SYS_SENDMMSG) {
3757         return -TARGET_EINVAL;
3758     }
3759     /* ensure we have space for args */
3760     if (nargs[num] > ARRAY_SIZE(a)) {
3761         return -TARGET_EINVAL;
3762     }
3763     /* collect the arguments in a[] according to nargs[] */
3764     for (i = 0; i < nargs[num]; ++i) {
3765         if (get_user_ual(a[i], vptr + i * sizeof(abi_long)) != 0) {
3766             return -TARGET_EFAULT;
3767         }
3768     }
3769     /* now when we have the args, invoke the appropriate underlying function */
3770     switch (num) {
3771     case TARGET_SYS_SOCKET: /* domain, type, protocol */
3772         return do_socket(a[0], a[1], a[2]);
3773     case TARGET_SYS_BIND: /* sockfd, addr, addrlen */
3774         return do_bind(a[0], a[1], a[2]);
3775     case TARGET_SYS_CONNECT: /* sockfd, addr, addrlen */
3776         return do_connect(a[0], a[1], a[2]);
3777     case TARGET_SYS_LISTEN: /* sockfd, backlog */
3778         return get_errno(listen(a[0], a[1]));
3779     case TARGET_SYS_ACCEPT: /* sockfd, addr, addrlen */
3780         return do_accept4(a[0], a[1], a[2], 0);
3781     case TARGET_SYS_GETSOCKNAME: /* sockfd, addr, addrlen */
3782         return do_getsockname(a[0], a[1], a[2]);
3783     case TARGET_SYS_GETPEERNAME: /* sockfd, addr, addrlen */
3784         return do_getpeername(a[0], a[1], a[2]);
3785     case TARGET_SYS_SOCKETPAIR: /* domain, type, protocol, tab */
3786         return do_socketpair(a[0], a[1], a[2], a[3]);
3787     case TARGET_SYS_SEND: /* sockfd, msg, len, flags */
3788         return do_sendto(a[0], a[1], a[2], a[3], 0, 0);
3789     case TARGET_SYS_RECV: /* sockfd, msg, len, flags */
3790         return do_recvfrom(a[0], a[1], a[2], a[3], 0, 0);
3791     case TARGET_SYS_SENDTO: /* sockfd, msg, len, flags, addr, addrlen */
3792         return do_sendto(a[0], a[1], a[2], a[3], a[4], a[5]);
3793     case TARGET_SYS_RECVFROM: /* sockfd, msg, len, flags, addr, addrlen */
3794         return do_recvfrom(a[0], a[1], a[2], a[3], a[4], a[5]);
3795     case TARGET_SYS_SHUTDOWN: /* sockfd, how */
3796         return get_errno(shutdown(a[0], a[1]));
3797     case TARGET_SYS_SETSOCKOPT: /* sockfd, level, optname, optval, optlen */
3798         return do_setsockopt(a[0], a[1], a[2], a[3], a[4]);
3799     case TARGET_SYS_GETSOCKOPT: /* sockfd, level, optname, optval, optlen */
3800         return do_getsockopt(a[0], a[1], a[2], a[3], a[4]);
3801     case TARGET_SYS_SENDMSG: /* sockfd, msg, flags */
3802         return do_sendrecvmsg(a[0], a[1], a[2], 1);
3803     case TARGET_SYS_RECVMSG: /* sockfd, msg, flags */
3804         return do_sendrecvmsg(a[0], a[1], a[2], 0);
3805     case TARGET_SYS_ACCEPT4: /* sockfd, addr, addrlen, flags */
3806         return do_accept4(a[0], a[1], a[2], a[3]);
3807     case TARGET_SYS_RECVMMSG: /* sockfd, msgvec, vlen, flags */
3808         return do_sendrecvmmsg(a[0], a[1], a[2], a[3], 0);
3809     case TARGET_SYS_SENDMMSG: /* sockfd, msgvec, vlen, flags */
3810         return do_sendrecvmmsg(a[0], a[1], a[2], a[3], 1);
3811     default:
3812         qemu_log_mask(LOG_UNIMP, "Unsupported socketcall: %d\n", num);
3813         return -TARGET_EINVAL;
3814     }
3815 }
3816 #endif
3817 
3818 #define N_SHM_REGIONS	32
3819 
3820 static struct shm_region {
3821     abi_ulong start;
3822     abi_ulong size;
3823     bool in_use;
3824 } shm_regions[N_SHM_REGIONS];
3825 
3826 #ifndef TARGET_SEMID64_DS
3827 /* asm-generic version of this struct */
3828 struct target_semid64_ds
3829 {
3830   struct target_ipc_perm sem_perm;
3831   abi_ulong sem_otime;
3832 #if TARGET_ABI_BITS == 32
3833   abi_ulong __unused1;
3834 #endif
3835   abi_ulong sem_ctime;
3836 #if TARGET_ABI_BITS == 32
3837   abi_ulong __unused2;
3838 #endif
3839   abi_ulong sem_nsems;
3840   abi_ulong __unused3;
3841   abi_ulong __unused4;
3842 };
3843 #endif
3844 
3845 static inline abi_long target_to_host_ipc_perm(struct ipc_perm *host_ip,
3846                                                abi_ulong target_addr)
3847 {
3848     struct target_ipc_perm *target_ip;
3849     struct target_semid64_ds *target_sd;
3850 
3851     if (!lock_user_struct(VERIFY_READ, target_sd, target_addr, 1))
3852         return -TARGET_EFAULT;
3853     target_ip = &(target_sd->sem_perm);
3854     host_ip->__key = tswap32(target_ip->__key);
3855     host_ip->uid = tswap32(target_ip->uid);
3856     host_ip->gid = tswap32(target_ip->gid);
3857     host_ip->cuid = tswap32(target_ip->cuid);
3858     host_ip->cgid = tswap32(target_ip->cgid);
3859 #if defined(TARGET_ALPHA) || defined(TARGET_MIPS) || defined(TARGET_PPC)
3860     host_ip->mode = tswap32(target_ip->mode);
3861 #else
3862     host_ip->mode = tswap16(target_ip->mode);
3863 #endif
3864 #if defined(TARGET_PPC)
3865     host_ip->__seq = tswap32(target_ip->__seq);
3866 #else
3867     host_ip->__seq = tswap16(target_ip->__seq);
3868 #endif
3869     unlock_user_struct(target_sd, target_addr, 0);
3870     return 0;
3871 }
3872 
3873 static inline abi_long host_to_target_ipc_perm(abi_ulong target_addr,
3874                                                struct ipc_perm *host_ip)
3875 {
3876     struct target_ipc_perm *target_ip;
3877     struct target_semid64_ds *target_sd;
3878 
3879     if (!lock_user_struct(VERIFY_WRITE, target_sd, target_addr, 0))
3880         return -TARGET_EFAULT;
3881     target_ip = &(target_sd->sem_perm);
3882     target_ip->__key = tswap32(host_ip->__key);
3883     target_ip->uid = tswap32(host_ip->uid);
3884     target_ip->gid = tswap32(host_ip->gid);
3885     target_ip->cuid = tswap32(host_ip->cuid);
3886     target_ip->cgid = tswap32(host_ip->cgid);
3887 #if defined(TARGET_ALPHA) || defined(TARGET_MIPS) || defined(TARGET_PPC)
3888     target_ip->mode = tswap32(host_ip->mode);
3889 #else
3890     target_ip->mode = tswap16(host_ip->mode);
3891 #endif
3892 #if defined(TARGET_PPC)
3893     target_ip->__seq = tswap32(host_ip->__seq);
3894 #else
3895     target_ip->__seq = tswap16(host_ip->__seq);
3896 #endif
3897     unlock_user_struct(target_sd, target_addr, 1);
3898     return 0;
3899 }
3900 
3901 static inline abi_long target_to_host_semid_ds(struct semid_ds *host_sd,
3902                                                abi_ulong target_addr)
3903 {
3904     struct target_semid64_ds *target_sd;
3905 
3906     if (!lock_user_struct(VERIFY_READ, target_sd, target_addr, 1))
3907         return -TARGET_EFAULT;
3908     if (target_to_host_ipc_perm(&(host_sd->sem_perm),target_addr))
3909         return -TARGET_EFAULT;
3910     host_sd->sem_nsems = tswapal(target_sd->sem_nsems);
3911     host_sd->sem_otime = tswapal(target_sd->sem_otime);
3912     host_sd->sem_ctime = tswapal(target_sd->sem_ctime);
3913     unlock_user_struct(target_sd, target_addr, 0);
3914     return 0;
3915 }
3916 
3917 static inline abi_long host_to_target_semid_ds(abi_ulong target_addr,
3918                                                struct semid_ds *host_sd)
3919 {
3920     struct target_semid64_ds *target_sd;
3921 
3922     if (!lock_user_struct(VERIFY_WRITE, target_sd, target_addr, 0))
3923         return -TARGET_EFAULT;
3924     if (host_to_target_ipc_perm(target_addr,&(host_sd->sem_perm)))
3925         return -TARGET_EFAULT;
3926     target_sd->sem_nsems = tswapal(host_sd->sem_nsems);
3927     target_sd->sem_otime = tswapal(host_sd->sem_otime);
3928     target_sd->sem_ctime = tswapal(host_sd->sem_ctime);
3929     unlock_user_struct(target_sd, target_addr, 1);
3930     return 0;
3931 }
3932 
3933 struct target_seminfo {
3934     int semmap;
3935     int semmni;
3936     int semmns;
3937     int semmnu;
3938     int semmsl;
3939     int semopm;
3940     int semume;
3941     int semusz;
3942     int semvmx;
3943     int semaem;
3944 };
3945 
3946 static inline abi_long host_to_target_seminfo(abi_ulong target_addr,
3947                                               struct seminfo *host_seminfo)
3948 {
3949     struct target_seminfo *target_seminfo;
3950     if (!lock_user_struct(VERIFY_WRITE, target_seminfo, target_addr, 0))
3951         return -TARGET_EFAULT;
3952     __put_user(host_seminfo->semmap, &target_seminfo->semmap);
3953     __put_user(host_seminfo->semmni, &target_seminfo->semmni);
3954     __put_user(host_seminfo->semmns, &target_seminfo->semmns);
3955     __put_user(host_seminfo->semmnu, &target_seminfo->semmnu);
3956     __put_user(host_seminfo->semmsl, &target_seminfo->semmsl);
3957     __put_user(host_seminfo->semopm, &target_seminfo->semopm);
3958     __put_user(host_seminfo->semume, &target_seminfo->semume);
3959     __put_user(host_seminfo->semusz, &target_seminfo->semusz);
3960     __put_user(host_seminfo->semvmx, &target_seminfo->semvmx);
3961     __put_user(host_seminfo->semaem, &target_seminfo->semaem);
3962     unlock_user_struct(target_seminfo, target_addr, 1);
3963     return 0;
3964 }
3965 
3966 union semun {
3967 	int val;
3968 	struct semid_ds *buf;
3969 	unsigned short *array;
3970 	struct seminfo *__buf;
3971 };
3972 
3973 union target_semun {
3974 	int val;
3975 	abi_ulong buf;
3976 	abi_ulong array;
3977 	abi_ulong __buf;
3978 };
3979 
3980 static inline abi_long target_to_host_semarray(int semid, unsigned short **host_array,
3981                                                abi_ulong target_addr)
3982 {
3983     int nsems;
3984     unsigned short *array;
3985     union semun semun;
3986     struct semid_ds semid_ds;
3987     int i, ret;
3988 
3989     semun.buf = &semid_ds;
3990 
3991     ret = semctl(semid, 0, IPC_STAT, semun);
3992     if (ret == -1)
3993         return get_errno(ret);
3994 
3995     nsems = semid_ds.sem_nsems;
3996 
3997     *host_array = g_try_new(unsigned short, nsems);
3998     if (!*host_array) {
3999         return -TARGET_ENOMEM;
4000     }
4001     array = lock_user(VERIFY_READ, target_addr,
4002                       nsems*sizeof(unsigned short), 1);
4003     if (!array) {
4004         g_free(*host_array);
4005         return -TARGET_EFAULT;
4006     }
4007 
4008     for(i=0; i<nsems; i++) {
4009         __get_user((*host_array)[i], &array[i]);
4010     }
4011     unlock_user(array, target_addr, 0);
4012 
4013     return 0;
4014 }
4015 
4016 static inline abi_long host_to_target_semarray(int semid, abi_ulong target_addr,
4017                                                unsigned short **host_array)
4018 {
4019     int nsems;
4020     unsigned short *array;
4021     union semun semun;
4022     struct semid_ds semid_ds;
4023     int i, ret;
4024 
4025     semun.buf = &semid_ds;
4026 
4027     ret = semctl(semid, 0, IPC_STAT, semun);
4028     if (ret == -1)
4029         return get_errno(ret);
4030 
4031     nsems = semid_ds.sem_nsems;
4032 
4033     array = lock_user(VERIFY_WRITE, target_addr,
4034                       nsems*sizeof(unsigned short), 0);
4035     if (!array)
4036         return -TARGET_EFAULT;
4037 
4038     for(i=0; i<nsems; i++) {
4039         __put_user((*host_array)[i], &array[i]);
4040     }
4041     g_free(*host_array);
4042     unlock_user(array, target_addr, 1);
4043 
4044     return 0;
4045 }
4046 
4047 static inline abi_long do_semctl(int semid, int semnum, int cmd,
4048                                  abi_ulong target_arg)
4049 {
4050     union target_semun target_su = { .buf = target_arg };
4051     union semun arg;
4052     struct semid_ds dsarg;
4053     unsigned short *array = NULL;
4054     struct seminfo seminfo;
4055     abi_long ret = -TARGET_EINVAL;
4056     abi_long err;
4057     cmd &= 0xff;
4058 
4059     switch( cmd ) {
4060 	case GETVAL:
4061 	case SETVAL:
4062             /* In 64 bit cross-endian situations, we will erroneously pick up
4063              * the wrong half of the union for the "val" element.  To rectify
4064              * this, the entire 8-byte structure is byteswapped, followed by
4065 	     * a swap of the 4 byte val field. In other cases, the data is
4066 	     * already in proper host byte order. */
4067 	    if (sizeof(target_su.val) != (sizeof(target_su.buf))) {
4068 		target_su.buf = tswapal(target_su.buf);
4069 		arg.val = tswap32(target_su.val);
4070 	    } else {
4071 		arg.val = target_su.val;
4072 	    }
4073             ret = get_errno(semctl(semid, semnum, cmd, arg));
4074             break;
4075 	case GETALL:
4076 	case SETALL:
4077             err = target_to_host_semarray(semid, &array, target_su.array);
4078             if (err)
4079                 return err;
4080             arg.array = array;
4081             ret = get_errno(semctl(semid, semnum, cmd, arg));
4082             err = host_to_target_semarray(semid, target_su.array, &array);
4083             if (err)
4084                 return err;
4085             break;
4086 	case IPC_STAT:
4087 	case IPC_SET:
4088 	case SEM_STAT:
4089             err = target_to_host_semid_ds(&dsarg, target_su.buf);
4090             if (err)
4091                 return err;
4092             arg.buf = &dsarg;
4093             ret = get_errno(semctl(semid, semnum, cmd, arg));
4094             err = host_to_target_semid_ds(target_su.buf, &dsarg);
4095             if (err)
4096                 return err;
4097             break;
4098 	case IPC_INFO:
4099 	case SEM_INFO:
4100             arg.__buf = &seminfo;
4101             ret = get_errno(semctl(semid, semnum, cmd, arg));
4102             err = host_to_target_seminfo(target_su.__buf, &seminfo);
4103             if (err)
4104                 return err;
4105             break;
4106 	case IPC_RMID:
4107 	case GETPID:
4108 	case GETNCNT:
4109 	case GETZCNT:
4110             ret = get_errno(semctl(semid, semnum, cmd, NULL));
4111             break;
4112     }
4113 
4114     return ret;
4115 }
4116 
4117 struct target_sembuf {
4118     unsigned short sem_num;
4119     short sem_op;
4120     short sem_flg;
4121 };
4122 
4123 static inline abi_long target_to_host_sembuf(struct sembuf *host_sembuf,
4124                                              abi_ulong target_addr,
4125                                              unsigned nsops)
4126 {
4127     struct target_sembuf *target_sembuf;
4128     int i;
4129 
4130     target_sembuf = lock_user(VERIFY_READ, target_addr,
4131                               nsops*sizeof(struct target_sembuf), 1);
4132     if (!target_sembuf)
4133         return -TARGET_EFAULT;
4134 
4135     for(i=0; i<nsops; i++) {
4136         __get_user(host_sembuf[i].sem_num, &target_sembuf[i].sem_num);
4137         __get_user(host_sembuf[i].sem_op, &target_sembuf[i].sem_op);
4138         __get_user(host_sembuf[i].sem_flg, &target_sembuf[i].sem_flg);
4139     }
4140 
4141     unlock_user(target_sembuf, target_addr, 0);
4142 
4143     return 0;
4144 }
4145 
4146 #if defined(TARGET_NR_ipc) || defined(TARGET_NR_semop) || \
4147     defined(TARGET_NR_semtimedop) || defined(TARGET_NR_semtimedop_time64)
4148 
4149 /*
4150  * This macro is required to handle the s390 variants, which passes the
4151  * arguments in a different order than default.
4152  */
4153 #ifdef __s390x__
4154 #define SEMTIMEDOP_IPC_ARGS(__nsops, __sops, __timeout) \
4155   (__nsops), (__timeout), (__sops)
4156 #else
4157 #define SEMTIMEDOP_IPC_ARGS(__nsops, __sops, __timeout) \
4158   (__nsops), 0, (__sops), (__timeout)
4159 #endif
4160 
4161 static inline abi_long do_semtimedop(int semid,
4162                                      abi_long ptr,
4163                                      unsigned nsops,
4164                                      abi_long timeout, bool time64)
4165 {
4166     struct sembuf *sops;
4167     struct timespec ts, *pts = NULL;
4168     abi_long ret;
4169 
4170     if (timeout) {
4171         pts = &ts;
4172         if (time64) {
4173             if (target_to_host_timespec64(pts, timeout)) {
4174                 return -TARGET_EFAULT;
4175             }
4176         } else {
4177             if (target_to_host_timespec(pts, timeout)) {
4178                 return -TARGET_EFAULT;
4179             }
4180         }
4181     }
4182 
4183     if (nsops > TARGET_SEMOPM) {
4184         return -TARGET_E2BIG;
4185     }
4186 
4187     sops = g_new(struct sembuf, nsops);
4188 
4189     if (target_to_host_sembuf(sops, ptr, nsops)) {
4190         g_free(sops);
4191         return -TARGET_EFAULT;
4192     }
4193 
4194     ret = -TARGET_ENOSYS;
4195 #ifdef __NR_semtimedop
4196     ret = get_errno(safe_semtimedop(semid, sops, nsops, pts));
4197 #endif
4198 #ifdef __NR_ipc
4199     if (ret == -TARGET_ENOSYS) {
4200         ret = get_errno(safe_ipc(IPCOP_semtimedop, semid,
4201                                  SEMTIMEDOP_IPC_ARGS(nsops, sops, (long)pts)));
4202     }
4203 #endif
4204     g_free(sops);
4205     return ret;
4206 }
4207 #endif
4208 
4209 struct target_msqid_ds
4210 {
4211     struct target_ipc_perm msg_perm;
4212     abi_ulong msg_stime;
4213 #if TARGET_ABI_BITS == 32
4214     abi_ulong __unused1;
4215 #endif
4216     abi_ulong msg_rtime;
4217 #if TARGET_ABI_BITS == 32
4218     abi_ulong __unused2;
4219 #endif
4220     abi_ulong msg_ctime;
4221 #if TARGET_ABI_BITS == 32
4222     abi_ulong __unused3;
4223 #endif
4224     abi_ulong __msg_cbytes;
4225     abi_ulong msg_qnum;
4226     abi_ulong msg_qbytes;
4227     abi_ulong msg_lspid;
4228     abi_ulong msg_lrpid;
4229     abi_ulong __unused4;
4230     abi_ulong __unused5;
4231 };
4232 
4233 static inline abi_long target_to_host_msqid_ds(struct msqid_ds *host_md,
4234                                                abi_ulong target_addr)
4235 {
4236     struct target_msqid_ds *target_md;
4237 
4238     if (!lock_user_struct(VERIFY_READ, target_md, target_addr, 1))
4239         return -TARGET_EFAULT;
4240     if (target_to_host_ipc_perm(&(host_md->msg_perm),target_addr))
4241         return -TARGET_EFAULT;
4242     host_md->msg_stime = tswapal(target_md->msg_stime);
4243     host_md->msg_rtime = tswapal(target_md->msg_rtime);
4244     host_md->msg_ctime = tswapal(target_md->msg_ctime);
4245     host_md->__msg_cbytes = tswapal(target_md->__msg_cbytes);
4246     host_md->msg_qnum = tswapal(target_md->msg_qnum);
4247     host_md->msg_qbytes = tswapal(target_md->msg_qbytes);
4248     host_md->msg_lspid = tswapal(target_md->msg_lspid);
4249     host_md->msg_lrpid = tswapal(target_md->msg_lrpid);
4250     unlock_user_struct(target_md, target_addr, 0);
4251     return 0;
4252 }
4253 
4254 static inline abi_long host_to_target_msqid_ds(abi_ulong target_addr,
4255                                                struct msqid_ds *host_md)
4256 {
4257     struct target_msqid_ds *target_md;
4258 
4259     if (!lock_user_struct(VERIFY_WRITE, target_md, target_addr, 0))
4260         return -TARGET_EFAULT;
4261     if (host_to_target_ipc_perm(target_addr,&(host_md->msg_perm)))
4262         return -TARGET_EFAULT;
4263     target_md->msg_stime = tswapal(host_md->msg_stime);
4264     target_md->msg_rtime = tswapal(host_md->msg_rtime);
4265     target_md->msg_ctime = tswapal(host_md->msg_ctime);
4266     target_md->__msg_cbytes = tswapal(host_md->__msg_cbytes);
4267     target_md->msg_qnum = tswapal(host_md->msg_qnum);
4268     target_md->msg_qbytes = tswapal(host_md->msg_qbytes);
4269     target_md->msg_lspid = tswapal(host_md->msg_lspid);
4270     target_md->msg_lrpid = tswapal(host_md->msg_lrpid);
4271     unlock_user_struct(target_md, target_addr, 1);
4272     return 0;
4273 }
4274 
4275 struct target_msginfo {
4276     int msgpool;
4277     int msgmap;
4278     int msgmax;
4279     int msgmnb;
4280     int msgmni;
4281     int msgssz;
4282     int msgtql;
4283     unsigned short int msgseg;
4284 };
4285 
4286 static inline abi_long host_to_target_msginfo(abi_ulong target_addr,
4287                                               struct msginfo *host_msginfo)
4288 {
4289     struct target_msginfo *target_msginfo;
4290     if (!lock_user_struct(VERIFY_WRITE, target_msginfo, target_addr, 0))
4291         return -TARGET_EFAULT;
4292     __put_user(host_msginfo->msgpool, &target_msginfo->msgpool);
4293     __put_user(host_msginfo->msgmap, &target_msginfo->msgmap);
4294     __put_user(host_msginfo->msgmax, &target_msginfo->msgmax);
4295     __put_user(host_msginfo->msgmnb, &target_msginfo->msgmnb);
4296     __put_user(host_msginfo->msgmni, &target_msginfo->msgmni);
4297     __put_user(host_msginfo->msgssz, &target_msginfo->msgssz);
4298     __put_user(host_msginfo->msgtql, &target_msginfo->msgtql);
4299     __put_user(host_msginfo->msgseg, &target_msginfo->msgseg);
4300     unlock_user_struct(target_msginfo, target_addr, 1);
4301     return 0;
4302 }
4303 
4304 static inline abi_long do_msgctl(int msgid, int cmd, abi_long ptr)
4305 {
4306     struct msqid_ds dsarg;
4307     struct msginfo msginfo;
4308     abi_long ret = -TARGET_EINVAL;
4309 
4310     cmd &= 0xff;
4311 
4312     switch (cmd) {
4313     case IPC_STAT:
4314     case IPC_SET:
4315     case MSG_STAT:
4316         if (target_to_host_msqid_ds(&dsarg,ptr))
4317             return -TARGET_EFAULT;
4318         ret = get_errno(msgctl(msgid, cmd, &dsarg));
4319         if (host_to_target_msqid_ds(ptr,&dsarg))
4320             return -TARGET_EFAULT;
4321         break;
4322     case IPC_RMID:
4323         ret = get_errno(msgctl(msgid, cmd, NULL));
4324         break;
4325     case IPC_INFO:
4326     case MSG_INFO:
4327         ret = get_errno(msgctl(msgid, cmd, (struct msqid_ds *)&msginfo));
4328         if (host_to_target_msginfo(ptr, &msginfo))
4329             return -TARGET_EFAULT;
4330         break;
4331     }
4332 
4333     return ret;
4334 }
4335 
4336 struct target_msgbuf {
4337     abi_long mtype;
4338     char	mtext[1];
4339 };
4340 
4341 static inline abi_long do_msgsnd(int msqid, abi_long msgp,
4342                                  ssize_t msgsz, int msgflg)
4343 {
4344     struct target_msgbuf *target_mb;
4345     struct msgbuf *host_mb;
4346     abi_long ret = 0;
4347 
4348     if (msgsz < 0) {
4349         return -TARGET_EINVAL;
4350     }
4351 
4352     if (!lock_user_struct(VERIFY_READ, target_mb, msgp, 0))
4353         return -TARGET_EFAULT;
4354     host_mb = g_try_malloc(msgsz + sizeof(long));
4355     if (!host_mb) {
4356         unlock_user_struct(target_mb, msgp, 0);
4357         return -TARGET_ENOMEM;
4358     }
4359     host_mb->mtype = (abi_long) tswapal(target_mb->mtype);
4360     memcpy(host_mb->mtext, target_mb->mtext, msgsz);
4361     ret = -TARGET_ENOSYS;
4362 #ifdef __NR_msgsnd
4363     ret = get_errno(safe_msgsnd(msqid, host_mb, msgsz, msgflg));
4364 #endif
4365 #ifdef __NR_ipc
4366     if (ret == -TARGET_ENOSYS) {
4367 #ifdef __s390x__
4368         ret = get_errno(safe_ipc(IPCOP_msgsnd, msqid, msgsz, msgflg,
4369                                  host_mb));
4370 #else
4371         ret = get_errno(safe_ipc(IPCOP_msgsnd, msqid, msgsz, msgflg,
4372                                  host_mb, 0));
4373 #endif
4374     }
4375 #endif
4376     g_free(host_mb);
4377     unlock_user_struct(target_mb, msgp, 0);
4378 
4379     return ret;
4380 }
4381 
4382 #ifdef __NR_ipc
4383 #if defined(__sparc__)
4384 /* SPARC for msgrcv it does not use the kludge on final 2 arguments.  */
4385 #define MSGRCV_ARGS(__msgp, __msgtyp) __msgp, __msgtyp
4386 #elif defined(__s390x__)
4387 /* The s390 sys_ipc variant has only five parameters.  */
4388 #define MSGRCV_ARGS(__msgp, __msgtyp) \
4389     ((long int[]){(long int)__msgp, __msgtyp})
4390 #else
4391 #define MSGRCV_ARGS(__msgp, __msgtyp) \
4392     ((long int[]){(long int)__msgp, __msgtyp}), 0
4393 #endif
4394 #endif
4395 
4396 static inline abi_long do_msgrcv(int msqid, abi_long msgp,
4397                                  ssize_t msgsz, abi_long msgtyp,
4398                                  int msgflg)
4399 {
4400     struct target_msgbuf *target_mb;
4401     char *target_mtext;
4402     struct msgbuf *host_mb;
4403     abi_long ret = 0;
4404 
4405     if (msgsz < 0) {
4406         return -TARGET_EINVAL;
4407     }
4408 
4409     if (!lock_user_struct(VERIFY_WRITE, target_mb, msgp, 0))
4410         return -TARGET_EFAULT;
4411 
4412     host_mb = g_try_malloc(msgsz + sizeof(long));
4413     if (!host_mb) {
4414         ret = -TARGET_ENOMEM;
4415         goto end;
4416     }
4417     ret = -TARGET_ENOSYS;
4418 #ifdef __NR_msgrcv
4419     ret = get_errno(safe_msgrcv(msqid, host_mb, msgsz, msgtyp, msgflg));
4420 #endif
4421 #ifdef __NR_ipc
4422     if (ret == -TARGET_ENOSYS) {
4423         ret = get_errno(safe_ipc(IPCOP_CALL(1, IPCOP_msgrcv), msqid, msgsz,
4424                         msgflg, MSGRCV_ARGS(host_mb, msgtyp)));
4425     }
4426 #endif
4427 
4428     if (ret > 0) {
4429         abi_ulong target_mtext_addr = msgp + sizeof(abi_ulong);
4430         target_mtext = lock_user(VERIFY_WRITE, target_mtext_addr, ret, 0);
4431         if (!target_mtext) {
4432             ret = -TARGET_EFAULT;
4433             goto end;
4434         }
4435         memcpy(target_mb->mtext, host_mb->mtext, ret);
4436         unlock_user(target_mtext, target_mtext_addr, ret);
4437     }
4438 
4439     target_mb->mtype = tswapal(host_mb->mtype);
4440 
4441 end:
4442     if (target_mb)
4443         unlock_user_struct(target_mb, msgp, 1);
4444     g_free(host_mb);
4445     return ret;
4446 }
4447 
4448 static inline abi_long target_to_host_shmid_ds(struct shmid_ds *host_sd,
4449                                                abi_ulong target_addr)
4450 {
4451     struct target_shmid_ds *target_sd;
4452 
4453     if (!lock_user_struct(VERIFY_READ, target_sd, target_addr, 1))
4454         return -TARGET_EFAULT;
4455     if (target_to_host_ipc_perm(&(host_sd->shm_perm), target_addr))
4456         return -TARGET_EFAULT;
4457     __get_user(host_sd->shm_segsz, &target_sd->shm_segsz);
4458     __get_user(host_sd->shm_atime, &target_sd->shm_atime);
4459     __get_user(host_sd->shm_dtime, &target_sd->shm_dtime);
4460     __get_user(host_sd->shm_ctime, &target_sd->shm_ctime);
4461     __get_user(host_sd->shm_cpid, &target_sd->shm_cpid);
4462     __get_user(host_sd->shm_lpid, &target_sd->shm_lpid);
4463     __get_user(host_sd->shm_nattch, &target_sd->shm_nattch);
4464     unlock_user_struct(target_sd, target_addr, 0);
4465     return 0;
4466 }
4467 
4468 static inline abi_long host_to_target_shmid_ds(abi_ulong target_addr,
4469                                                struct shmid_ds *host_sd)
4470 {
4471     struct target_shmid_ds *target_sd;
4472 
4473     if (!lock_user_struct(VERIFY_WRITE, target_sd, target_addr, 0))
4474         return -TARGET_EFAULT;
4475     if (host_to_target_ipc_perm(target_addr, &(host_sd->shm_perm)))
4476         return -TARGET_EFAULT;
4477     __put_user(host_sd->shm_segsz, &target_sd->shm_segsz);
4478     __put_user(host_sd->shm_atime, &target_sd->shm_atime);
4479     __put_user(host_sd->shm_dtime, &target_sd->shm_dtime);
4480     __put_user(host_sd->shm_ctime, &target_sd->shm_ctime);
4481     __put_user(host_sd->shm_cpid, &target_sd->shm_cpid);
4482     __put_user(host_sd->shm_lpid, &target_sd->shm_lpid);
4483     __put_user(host_sd->shm_nattch, &target_sd->shm_nattch);
4484     unlock_user_struct(target_sd, target_addr, 1);
4485     return 0;
4486 }
4487 
4488 struct  target_shminfo {
4489     abi_ulong shmmax;
4490     abi_ulong shmmin;
4491     abi_ulong shmmni;
4492     abi_ulong shmseg;
4493     abi_ulong shmall;
4494 };
4495 
4496 static inline abi_long host_to_target_shminfo(abi_ulong target_addr,
4497                                               struct shminfo *host_shminfo)
4498 {
4499     struct target_shminfo *target_shminfo;
4500     if (!lock_user_struct(VERIFY_WRITE, target_shminfo, target_addr, 0))
4501         return -TARGET_EFAULT;
4502     __put_user(host_shminfo->shmmax, &target_shminfo->shmmax);
4503     __put_user(host_shminfo->shmmin, &target_shminfo->shmmin);
4504     __put_user(host_shminfo->shmmni, &target_shminfo->shmmni);
4505     __put_user(host_shminfo->shmseg, &target_shminfo->shmseg);
4506     __put_user(host_shminfo->shmall, &target_shminfo->shmall);
4507     unlock_user_struct(target_shminfo, target_addr, 1);
4508     return 0;
4509 }
4510 
4511 struct target_shm_info {
4512     int used_ids;
4513     abi_ulong shm_tot;
4514     abi_ulong shm_rss;
4515     abi_ulong shm_swp;
4516     abi_ulong swap_attempts;
4517     abi_ulong swap_successes;
4518 };
4519 
4520 static inline abi_long host_to_target_shm_info(abi_ulong target_addr,
4521                                                struct shm_info *host_shm_info)
4522 {
4523     struct target_shm_info *target_shm_info;
4524     if (!lock_user_struct(VERIFY_WRITE, target_shm_info, target_addr, 0))
4525         return -TARGET_EFAULT;
4526     __put_user(host_shm_info->used_ids, &target_shm_info->used_ids);
4527     __put_user(host_shm_info->shm_tot, &target_shm_info->shm_tot);
4528     __put_user(host_shm_info->shm_rss, &target_shm_info->shm_rss);
4529     __put_user(host_shm_info->shm_swp, &target_shm_info->shm_swp);
4530     __put_user(host_shm_info->swap_attempts, &target_shm_info->swap_attempts);
4531     __put_user(host_shm_info->swap_successes, &target_shm_info->swap_successes);
4532     unlock_user_struct(target_shm_info, target_addr, 1);
4533     return 0;
4534 }
4535 
4536 static inline abi_long do_shmctl(int shmid, int cmd, abi_long buf)
4537 {
4538     struct shmid_ds dsarg;
4539     struct shminfo shminfo;
4540     struct shm_info shm_info;
4541     abi_long ret = -TARGET_EINVAL;
4542 
4543     cmd &= 0xff;
4544 
4545     switch(cmd) {
4546     case IPC_STAT:
4547     case IPC_SET:
4548     case SHM_STAT:
4549         if (target_to_host_shmid_ds(&dsarg, buf))
4550             return -TARGET_EFAULT;
4551         ret = get_errno(shmctl(shmid, cmd, &dsarg));
4552         if (host_to_target_shmid_ds(buf, &dsarg))
4553             return -TARGET_EFAULT;
4554         break;
4555     case IPC_INFO:
4556         ret = get_errno(shmctl(shmid, cmd, (struct shmid_ds *)&shminfo));
4557         if (host_to_target_shminfo(buf, &shminfo))
4558             return -TARGET_EFAULT;
4559         break;
4560     case SHM_INFO:
4561         ret = get_errno(shmctl(shmid, cmd, (struct shmid_ds *)&shm_info));
4562         if (host_to_target_shm_info(buf, &shm_info))
4563             return -TARGET_EFAULT;
4564         break;
4565     case IPC_RMID:
4566     case SHM_LOCK:
4567     case SHM_UNLOCK:
4568         ret = get_errno(shmctl(shmid, cmd, NULL));
4569         break;
4570     }
4571 
4572     return ret;
4573 }
4574 
4575 #ifndef TARGET_FORCE_SHMLBA
4576 /* For most architectures, SHMLBA is the same as the page size;
4577  * some architectures have larger values, in which case they should
4578  * define TARGET_FORCE_SHMLBA and provide a target_shmlba() function.
4579  * This corresponds to the kernel arch code defining __ARCH_FORCE_SHMLBA
4580  * and defining its own value for SHMLBA.
4581  *
4582  * The kernel also permits SHMLBA to be set by the architecture to a
4583  * value larger than the page size without setting __ARCH_FORCE_SHMLBA;
4584  * this means that addresses are rounded to the large size if
4585  * SHM_RND is set but addresses not aligned to that size are not rejected
4586  * as long as they are at least page-aligned. Since the only architecture
4587  * which uses this is ia64 this code doesn't provide for that oddity.
4588  */
4589 static inline abi_ulong target_shmlba(CPUArchState *cpu_env)
4590 {
4591     return TARGET_PAGE_SIZE;
4592 }
4593 #endif
4594 
4595 static inline abi_ulong do_shmat(CPUArchState *cpu_env,
4596                                  int shmid, abi_ulong shmaddr, int shmflg)
4597 {
4598     abi_long raddr;
4599     void *host_raddr;
4600     struct shmid_ds shm_info;
4601     int i,ret;
4602     abi_ulong shmlba;
4603 
4604     /* shmat pointers are always untagged */
4605 
4606     /* find out the length of the shared memory segment */
4607     ret = get_errno(shmctl(shmid, IPC_STAT, &shm_info));
4608     if (is_error(ret)) {
4609         /* can't get length, bail out */
4610         return ret;
4611     }
4612 
4613     shmlba = target_shmlba(cpu_env);
4614 
4615     if (shmaddr & (shmlba - 1)) {
4616         if (shmflg & SHM_RND) {
4617             shmaddr &= ~(shmlba - 1);
4618         } else {
4619             return -TARGET_EINVAL;
4620         }
4621     }
4622     if (!guest_range_valid(shmaddr, shm_info.shm_segsz)) {
4623         return -TARGET_EINVAL;
4624     }
4625 
4626     mmap_lock();
4627 
4628     if (shmaddr)
4629         host_raddr = shmat(shmid, (void *)g2h_untagged(shmaddr), shmflg);
4630     else {
4631         abi_ulong mmap_start;
4632 
4633         /* In order to use the host shmat, we need to honor host SHMLBA.  */
4634         mmap_start = mmap_find_vma(0, shm_info.shm_segsz, MAX(SHMLBA, shmlba));
4635 
4636         if (mmap_start == -1) {
4637             errno = ENOMEM;
4638             host_raddr = (void *)-1;
4639         } else
4640             host_raddr = shmat(shmid, g2h_untagged(mmap_start),
4641                                shmflg | SHM_REMAP);
4642     }
4643 
4644     if (host_raddr == (void *)-1) {
4645         mmap_unlock();
4646         return get_errno((long)host_raddr);
4647     }
4648     raddr=h2g((unsigned long)host_raddr);
4649 
4650     page_set_flags(raddr, raddr + shm_info.shm_segsz,
4651                    PAGE_VALID | PAGE_RESET | PAGE_READ |
4652                    (shmflg & SHM_RDONLY ? 0 : PAGE_WRITE));
4653 
4654     for (i = 0; i < N_SHM_REGIONS; i++) {
4655         if (!shm_regions[i].in_use) {
4656             shm_regions[i].in_use = true;
4657             shm_regions[i].start = raddr;
4658             shm_regions[i].size = shm_info.shm_segsz;
4659             break;
4660         }
4661     }
4662 
4663     mmap_unlock();
4664     return raddr;
4665 
4666 }
4667 
4668 static inline abi_long do_shmdt(abi_ulong shmaddr)
4669 {
4670     int i;
4671     abi_long rv;
4672 
4673     /* shmdt pointers are always untagged */
4674 
4675     mmap_lock();
4676 
4677     for (i = 0; i < N_SHM_REGIONS; ++i) {
4678         if (shm_regions[i].in_use && shm_regions[i].start == shmaddr) {
4679             shm_regions[i].in_use = false;
4680             page_set_flags(shmaddr, shmaddr + shm_regions[i].size, 0);
4681             break;
4682         }
4683     }
4684     rv = get_errno(shmdt(g2h_untagged(shmaddr)));
4685 
4686     mmap_unlock();
4687 
4688     return rv;
4689 }
4690 
4691 #ifdef TARGET_NR_ipc
4692 /* ??? This only works with linear mappings.  */
4693 /* do_ipc() must return target values and target errnos. */
4694 static abi_long do_ipc(CPUArchState *cpu_env,
4695                        unsigned int call, abi_long first,
4696                        abi_long second, abi_long third,
4697                        abi_long ptr, abi_long fifth)
4698 {
4699     int version;
4700     abi_long ret = 0;
4701 
4702     version = call >> 16;
4703     call &= 0xffff;
4704 
4705     switch (call) {
4706     case IPCOP_semop:
4707         ret = do_semtimedop(first, ptr, second, 0, false);
4708         break;
4709     case IPCOP_semtimedop:
4710     /*
4711      * The s390 sys_ipc variant has only five parameters instead of six
4712      * (as for default variant) and the only difference is the handling of
4713      * SEMTIMEDOP where on s390 the third parameter is used as a pointer
4714      * to a struct timespec where the generic variant uses fifth parameter.
4715      */
4716 #if defined(TARGET_S390X)
4717         ret = do_semtimedop(first, ptr, second, third, TARGET_ABI_BITS == 64);
4718 #else
4719         ret = do_semtimedop(first, ptr, second, fifth, TARGET_ABI_BITS == 64);
4720 #endif
4721         break;
4722 
4723     case IPCOP_semget:
4724         ret = get_errno(semget(first, second, third));
4725         break;
4726 
4727     case IPCOP_semctl: {
4728         /* The semun argument to semctl is passed by value, so dereference the
4729          * ptr argument. */
4730         abi_ulong atptr;
4731         get_user_ual(atptr, ptr);
4732         ret = do_semctl(first, second, third, atptr);
4733         break;
4734     }
4735 
4736     case IPCOP_msgget:
4737         ret = get_errno(msgget(first, second));
4738         break;
4739 
4740     case IPCOP_msgsnd:
4741         ret = do_msgsnd(first, ptr, second, third);
4742         break;
4743 
4744     case IPCOP_msgctl:
4745         ret = do_msgctl(first, second, ptr);
4746         break;
4747 
4748     case IPCOP_msgrcv:
4749         switch (version) {
4750         case 0:
4751             {
4752                 struct target_ipc_kludge {
4753                     abi_long msgp;
4754                     abi_long msgtyp;
4755                 } *tmp;
4756 
4757                 if (!lock_user_struct(VERIFY_READ, tmp, ptr, 1)) {
4758                     ret = -TARGET_EFAULT;
4759                     break;
4760                 }
4761 
4762                 ret = do_msgrcv(first, tswapal(tmp->msgp), second, tswapal(tmp->msgtyp), third);
4763 
4764                 unlock_user_struct(tmp, ptr, 0);
4765                 break;
4766             }
4767         default:
4768             ret = do_msgrcv(first, ptr, second, fifth, third);
4769         }
4770         break;
4771 
4772     case IPCOP_shmat:
4773         switch (version) {
4774         default:
4775         {
4776             abi_ulong raddr;
4777             raddr = do_shmat(cpu_env, first, ptr, second);
4778             if (is_error(raddr))
4779                 return get_errno(raddr);
4780             if (put_user_ual(raddr, third))
4781                 return -TARGET_EFAULT;
4782             break;
4783         }
4784         case 1:
4785             ret = -TARGET_EINVAL;
4786             break;
4787         }
4788 	break;
4789     case IPCOP_shmdt:
4790         ret = do_shmdt(ptr);
4791 	break;
4792 
4793     case IPCOP_shmget:
4794 	/* IPC_* flag values are the same on all linux platforms */
4795 	ret = get_errno(shmget(first, second, third));
4796 	break;
4797 
4798 	/* IPC_* and SHM_* command values are the same on all linux platforms */
4799     case IPCOP_shmctl:
4800         ret = do_shmctl(first, second, ptr);
4801         break;
4802     default:
4803         qemu_log_mask(LOG_UNIMP, "Unsupported ipc call: %d (version %d)\n",
4804                       call, version);
4805 	ret = -TARGET_ENOSYS;
4806 	break;
4807     }
4808     return ret;
4809 }
4810 #endif
4811 
4812 /* kernel structure types definitions */
4813 
4814 #define STRUCT(name, ...) STRUCT_ ## name,
4815 #define STRUCT_SPECIAL(name) STRUCT_ ## name,
4816 enum {
4817 #include "syscall_types.h"
4818 STRUCT_MAX
4819 };
4820 #undef STRUCT
4821 #undef STRUCT_SPECIAL
4822 
4823 #define STRUCT(name, ...) static const argtype struct_ ## name ## _def[] = {  __VA_ARGS__, TYPE_NULL };
4824 #define STRUCT_SPECIAL(name)
4825 #include "syscall_types.h"
4826 #undef STRUCT
4827 #undef STRUCT_SPECIAL
4828 
4829 #define MAX_STRUCT_SIZE 4096
4830 
4831 #ifdef CONFIG_FIEMAP
4832 /* So fiemap access checks don't overflow on 32 bit systems.
4833  * This is very slightly smaller than the limit imposed by
4834  * the underlying kernel.
4835  */
4836 #define FIEMAP_MAX_EXTENTS ((UINT_MAX - sizeof(struct fiemap))  \
4837                             / sizeof(struct fiemap_extent))
4838 
4839 static abi_long do_ioctl_fs_ioc_fiemap(const IOCTLEntry *ie, uint8_t *buf_temp,
4840                                        int fd, int cmd, abi_long arg)
4841 {
4842     /* The parameter for this ioctl is a struct fiemap followed
4843      * by an array of struct fiemap_extent whose size is set
4844      * in fiemap->fm_extent_count. The array is filled in by the
4845      * ioctl.
4846      */
4847     int target_size_in, target_size_out;
4848     struct fiemap *fm;
4849     const argtype *arg_type = ie->arg_type;
4850     const argtype extent_arg_type[] = { MK_STRUCT(STRUCT_fiemap_extent) };
4851     void *argptr, *p;
4852     abi_long ret;
4853     int i, extent_size = thunk_type_size(extent_arg_type, 0);
4854     uint32_t outbufsz;
4855     int free_fm = 0;
4856 
4857     assert(arg_type[0] == TYPE_PTR);
4858     assert(ie->access == IOC_RW);
4859     arg_type++;
4860     target_size_in = thunk_type_size(arg_type, 0);
4861     argptr = lock_user(VERIFY_READ, arg, target_size_in, 1);
4862     if (!argptr) {
4863         return -TARGET_EFAULT;
4864     }
4865     thunk_convert(buf_temp, argptr, arg_type, THUNK_HOST);
4866     unlock_user(argptr, arg, 0);
4867     fm = (struct fiemap *)buf_temp;
4868     if (fm->fm_extent_count > FIEMAP_MAX_EXTENTS) {
4869         return -TARGET_EINVAL;
4870     }
4871 
4872     outbufsz = sizeof (*fm) +
4873         (sizeof(struct fiemap_extent) * fm->fm_extent_count);
4874 
4875     if (outbufsz > MAX_STRUCT_SIZE) {
4876         /* We can't fit all the extents into the fixed size buffer.
4877          * Allocate one that is large enough and use it instead.
4878          */
4879         fm = g_try_malloc(outbufsz);
4880         if (!fm) {
4881             return -TARGET_ENOMEM;
4882         }
4883         memcpy(fm, buf_temp, sizeof(struct fiemap));
4884         free_fm = 1;
4885     }
4886     ret = get_errno(safe_ioctl(fd, ie->host_cmd, fm));
4887     if (!is_error(ret)) {
4888         target_size_out = target_size_in;
4889         /* An extent_count of 0 means we were only counting the extents
4890          * so there are no structs to copy
4891          */
4892         if (fm->fm_extent_count != 0) {
4893             target_size_out += fm->fm_mapped_extents * extent_size;
4894         }
4895         argptr = lock_user(VERIFY_WRITE, arg, target_size_out, 0);
4896         if (!argptr) {
4897             ret = -TARGET_EFAULT;
4898         } else {
4899             /* Convert the struct fiemap */
4900             thunk_convert(argptr, fm, arg_type, THUNK_TARGET);
4901             if (fm->fm_extent_count != 0) {
4902                 p = argptr + target_size_in;
4903                 /* ...and then all the struct fiemap_extents */
4904                 for (i = 0; i < fm->fm_mapped_extents; i++) {
4905                     thunk_convert(p, &fm->fm_extents[i], extent_arg_type,
4906                                   THUNK_TARGET);
4907                     p += extent_size;
4908                 }
4909             }
4910             unlock_user(argptr, arg, target_size_out);
4911         }
4912     }
4913     if (free_fm) {
4914         g_free(fm);
4915     }
4916     return ret;
4917 }
4918 #endif
4919 
4920 static abi_long do_ioctl_ifconf(const IOCTLEntry *ie, uint8_t *buf_temp,
4921                                 int fd, int cmd, abi_long arg)
4922 {
4923     const argtype *arg_type = ie->arg_type;
4924     int target_size;
4925     void *argptr;
4926     int ret;
4927     struct ifconf *host_ifconf;
4928     uint32_t outbufsz;
4929     const argtype ifreq_arg_type[] = { MK_STRUCT(STRUCT_sockaddr_ifreq) };
4930     const argtype ifreq_max_type[] = { MK_STRUCT(STRUCT_ifmap_ifreq) };
4931     int target_ifreq_size;
4932     int nb_ifreq;
4933     int free_buf = 0;
4934     int i;
4935     int target_ifc_len;
4936     abi_long target_ifc_buf;
4937     int host_ifc_len;
4938     char *host_ifc_buf;
4939 
4940     assert(arg_type[0] == TYPE_PTR);
4941     assert(ie->access == IOC_RW);
4942 
4943     arg_type++;
4944     target_size = thunk_type_size(arg_type, 0);
4945 
4946     argptr = lock_user(VERIFY_READ, arg, target_size, 1);
4947     if (!argptr)
4948         return -TARGET_EFAULT;
4949     thunk_convert(buf_temp, argptr, arg_type, THUNK_HOST);
4950     unlock_user(argptr, arg, 0);
4951 
4952     host_ifconf = (struct ifconf *)(unsigned long)buf_temp;
4953     target_ifc_buf = (abi_long)(unsigned long)host_ifconf->ifc_buf;
4954     target_ifreq_size = thunk_type_size(ifreq_max_type, 0);
4955 
4956     if (target_ifc_buf != 0) {
4957         target_ifc_len = host_ifconf->ifc_len;
4958         nb_ifreq = target_ifc_len / target_ifreq_size;
4959         host_ifc_len = nb_ifreq * sizeof(struct ifreq);
4960 
4961         outbufsz = sizeof(*host_ifconf) + host_ifc_len;
4962         if (outbufsz > MAX_STRUCT_SIZE) {
4963             /*
4964              * We can't fit all the extents into the fixed size buffer.
4965              * Allocate one that is large enough and use it instead.
4966              */
4967             host_ifconf = malloc(outbufsz);
4968             if (!host_ifconf) {
4969                 return -TARGET_ENOMEM;
4970             }
4971             memcpy(host_ifconf, buf_temp, sizeof(*host_ifconf));
4972             free_buf = 1;
4973         }
4974         host_ifc_buf = (char *)host_ifconf + sizeof(*host_ifconf);
4975 
4976         host_ifconf->ifc_len = host_ifc_len;
4977     } else {
4978       host_ifc_buf = NULL;
4979     }
4980     host_ifconf->ifc_buf = host_ifc_buf;
4981 
4982     ret = get_errno(safe_ioctl(fd, ie->host_cmd, host_ifconf));
4983     if (!is_error(ret)) {
4984 	/* convert host ifc_len to target ifc_len */
4985 
4986         nb_ifreq = host_ifconf->ifc_len / sizeof(struct ifreq);
4987         target_ifc_len = nb_ifreq * target_ifreq_size;
4988         host_ifconf->ifc_len = target_ifc_len;
4989 
4990 	/* restore target ifc_buf */
4991 
4992         host_ifconf->ifc_buf = (char *)(unsigned long)target_ifc_buf;
4993 
4994 	/* copy struct ifconf to target user */
4995 
4996         argptr = lock_user(VERIFY_WRITE, arg, target_size, 0);
4997         if (!argptr)
4998             return -TARGET_EFAULT;
4999         thunk_convert(argptr, host_ifconf, arg_type, THUNK_TARGET);
5000         unlock_user(argptr, arg, target_size);
5001 
5002         if (target_ifc_buf != 0) {
5003             /* copy ifreq[] to target user */
5004             argptr = lock_user(VERIFY_WRITE, target_ifc_buf, target_ifc_len, 0);
5005             for (i = 0; i < nb_ifreq ; i++) {
5006                 thunk_convert(argptr + i * target_ifreq_size,
5007                               host_ifc_buf + i * sizeof(struct ifreq),
5008                               ifreq_arg_type, THUNK_TARGET);
5009             }
5010             unlock_user(argptr, target_ifc_buf, target_ifc_len);
5011         }
5012     }
5013 
5014     if (free_buf) {
5015         free(host_ifconf);
5016     }
5017 
5018     return ret;
5019 }
5020 
5021 #if defined(CONFIG_USBFS)
5022 #if HOST_LONG_BITS > 64
5023 #error USBDEVFS thunks do not support >64 bit hosts yet.
5024 #endif
5025 struct live_urb {
5026     uint64_t target_urb_adr;
5027     uint64_t target_buf_adr;
5028     char *target_buf_ptr;
5029     struct usbdevfs_urb host_urb;
5030 };
5031 
5032 static GHashTable *usbdevfs_urb_hashtable(void)
5033 {
5034     static GHashTable *urb_hashtable;
5035 
5036     if (!urb_hashtable) {
5037         urb_hashtable = g_hash_table_new(g_int64_hash, g_int64_equal);
5038     }
5039     return urb_hashtable;
5040 }
5041 
5042 static void urb_hashtable_insert(struct live_urb *urb)
5043 {
5044     GHashTable *urb_hashtable = usbdevfs_urb_hashtable();
5045     g_hash_table_insert(urb_hashtable, urb, urb);
5046 }
5047 
5048 static struct live_urb *urb_hashtable_lookup(uint64_t target_urb_adr)
5049 {
5050     GHashTable *urb_hashtable = usbdevfs_urb_hashtable();
5051     return g_hash_table_lookup(urb_hashtable, &target_urb_adr);
5052 }
5053 
5054 static void urb_hashtable_remove(struct live_urb *urb)
5055 {
5056     GHashTable *urb_hashtable = usbdevfs_urb_hashtable();
5057     g_hash_table_remove(urb_hashtable, urb);
5058 }
5059 
5060 static abi_long
5061 do_ioctl_usbdevfs_reapurb(const IOCTLEntry *ie, uint8_t *buf_temp,
5062                           int fd, int cmd, abi_long arg)
5063 {
5064     const argtype usbfsurb_arg_type[] = { MK_STRUCT(STRUCT_usbdevfs_urb) };
5065     const argtype ptrvoid_arg_type[] = { TYPE_PTRVOID, 0, 0 };
5066     struct live_urb *lurb;
5067     void *argptr;
5068     uint64_t hurb;
5069     int target_size;
5070     uintptr_t target_urb_adr;
5071     abi_long ret;
5072 
5073     target_size = thunk_type_size(usbfsurb_arg_type, THUNK_TARGET);
5074 
5075     memset(buf_temp, 0, sizeof(uint64_t));
5076     ret = get_errno(safe_ioctl(fd, ie->host_cmd, buf_temp));
5077     if (is_error(ret)) {
5078         return ret;
5079     }
5080 
5081     memcpy(&hurb, buf_temp, sizeof(uint64_t));
5082     lurb = (void *)((uintptr_t)hurb - offsetof(struct live_urb, host_urb));
5083     if (!lurb->target_urb_adr) {
5084         return -TARGET_EFAULT;
5085     }
5086     urb_hashtable_remove(lurb);
5087     unlock_user(lurb->target_buf_ptr, lurb->target_buf_adr,
5088         lurb->host_urb.buffer_length);
5089     lurb->target_buf_ptr = NULL;
5090 
5091     /* restore the guest buffer pointer */
5092     lurb->host_urb.buffer = (void *)(uintptr_t)lurb->target_buf_adr;
5093 
5094     /* update the guest urb struct */
5095     argptr = lock_user(VERIFY_WRITE, lurb->target_urb_adr, target_size, 0);
5096     if (!argptr) {
5097         g_free(lurb);
5098         return -TARGET_EFAULT;
5099     }
5100     thunk_convert(argptr, &lurb->host_urb, usbfsurb_arg_type, THUNK_TARGET);
5101     unlock_user(argptr, lurb->target_urb_adr, target_size);
5102 
5103     target_size = thunk_type_size(ptrvoid_arg_type, THUNK_TARGET);
5104     /* write back the urb handle */
5105     argptr = lock_user(VERIFY_WRITE, arg, target_size, 0);
5106     if (!argptr) {
5107         g_free(lurb);
5108         return -TARGET_EFAULT;
5109     }
5110 
5111     /* GHashTable uses 64-bit keys but thunk_convert expects uintptr_t */
5112     target_urb_adr = lurb->target_urb_adr;
5113     thunk_convert(argptr, &target_urb_adr, ptrvoid_arg_type, THUNK_TARGET);
5114     unlock_user(argptr, arg, target_size);
5115 
5116     g_free(lurb);
5117     return ret;
5118 }
5119 
5120 static abi_long
5121 do_ioctl_usbdevfs_discardurb(const IOCTLEntry *ie,
5122                              uint8_t *buf_temp __attribute__((unused)),
5123                              int fd, int cmd, abi_long arg)
5124 {
5125     struct live_urb *lurb;
5126 
5127     /* map target address back to host URB with metadata. */
5128     lurb = urb_hashtable_lookup(arg);
5129     if (!lurb) {
5130         return -TARGET_EFAULT;
5131     }
5132     return get_errno(safe_ioctl(fd, ie->host_cmd, &lurb->host_urb));
5133 }
5134 
5135 static abi_long
5136 do_ioctl_usbdevfs_submiturb(const IOCTLEntry *ie, uint8_t *buf_temp,
5137                             int fd, int cmd, abi_long arg)
5138 {
5139     const argtype *arg_type = ie->arg_type;
5140     int target_size;
5141     abi_long ret;
5142     void *argptr;
5143     int rw_dir;
5144     struct live_urb *lurb;
5145 
5146     /*
5147      * each submitted URB needs to map to a unique ID for the
5148      * kernel, and that unique ID needs to be a pointer to
5149      * host memory.  hence, we need to malloc for each URB.
5150      * isochronous transfers have a variable length struct.
5151      */
5152     arg_type++;
5153     target_size = thunk_type_size(arg_type, THUNK_TARGET);
5154 
5155     /* construct host copy of urb and metadata */
5156     lurb = g_try_malloc0(sizeof(struct live_urb));
5157     if (!lurb) {
5158         return -TARGET_ENOMEM;
5159     }
5160 
5161     argptr = lock_user(VERIFY_READ, arg, target_size, 1);
5162     if (!argptr) {
5163         g_free(lurb);
5164         return -TARGET_EFAULT;
5165     }
5166     thunk_convert(&lurb->host_urb, argptr, arg_type, THUNK_HOST);
5167     unlock_user(argptr, arg, 0);
5168 
5169     lurb->target_urb_adr = arg;
5170     lurb->target_buf_adr = (uintptr_t)lurb->host_urb.buffer;
5171 
5172     /* buffer space used depends on endpoint type so lock the entire buffer */
5173     /* control type urbs should check the buffer contents for true direction */
5174     rw_dir = lurb->host_urb.endpoint & USB_DIR_IN ? VERIFY_WRITE : VERIFY_READ;
5175     lurb->target_buf_ptr = lock_user(rw_dir, lurb->target_buf_adr,
5176         lurb->host_urb.buffer_length, 1);
5177     if (lurb->target_buf_ptr == NULL) {
5178         g_free(lurb);
5179         return -TARGET_EFAULT;
5180     }
5181 
5182     /* update buffer pointer in host copy */
5183     lurb->host_urb.buffer = lurb->target_buf_ptr;
5184 
5185     ret = get_errno(safe_ioctl(fd, ie->host_cmd, &lurb->host_urb));
5186     if (is_error(ret)) {
5187         unlock_user(lurb->target_buf_ptr, lurb->target_buf_adr, 0);
5188         g_free(lurb);
5189     } else {
5190         urb_hashtable_insert(lurb);
5191     }
5192 
5193     return ret;
5194 }
5195 #endif /* CONFIG_USBFS */
5196 
5197 static abi_long do_ioctl_dm(const IOCTLEntry *ie, uint8_t *buf_temp, int fd,
5198                             int cmd, abi_long arg)
5199 {
5200     void *argptr;
5201     struct dm_ioctl *host_dm;
5202     abi_long guest_data;
5203     uint32_t guest_data_size;
5204     int target_size;
5205     const argtype *arg_type = ie->arg_type;
5206     abi_long ret;
5207     void *big_buf = NULL;
5208     char *host_data;
5209 
5210     arg_type++;
5211     target_size = thunk_type_size(arg_type, 0);
5212     argptr = lock_user(VERIFY_READ, arg, target_size, 1);
5213     if (!argptr) {
5214         ret = -TARGET_EFAULT;
5215         goto out;
5216     }
5217     thunk_convert(buf_temp, argptr, arg_type, THUNK_HOST);
5218     unlock_user(argptr, arg, 0);
5219 
5220     /* buf_temp is too small, so fetch things into a bigger buffer */
5221     big_buf = g_malloc0(((struct dm_ioctl*)buf_temp)->data_size * 2);
5222     memcpy(big_buf, buf_temp, target_size);
5223     buf_temp = big_buf;
5224     host_dm = big_buf;
5225 
5226     guest_data = arg + host_dm->data_start;
5227     if ((guest_data - arg) < 0) {
5228         ret = -TARGET_EINVAL;
5229         goto out;
5230     }
5231     guest_data_size = host_dm->data_size - host_dm->data_start;
5232     host_data = (char*)host_dm + host_dm->data_start;
5233 
5234     argptr = lock_user(VERIFY_READ, guest_data, guest_data_size, 1);
5235     if (!argptr) {
5236         ret = -TARGET_EFAULT;
5237         goto out;
5238     }
5239 
5240     switch (ie->host_cmd) {
5241     case DM_REMOVE_ALL:
5242     case DM_LIST_DEVICES:
5243     case DM_DEV_CREATE:
5244     case DM_DEV_REMOVE:
5245     case DM_DEV_SUSPEND:
5246     case DM_DEV_STATUS:
5247     case DM_DEV_WAIT:
5248     case DM_TABLE_STATUS:
5249     case DM_TABLE_CLEAR:
5250     case DM_TABLE_DEPS:
5251     case DM_LIST_VERSIONS:
5252         /* no input data */
5253         break;
5254     case DM_DEV_RENAME:
5255     case DM_DEV_SET_GEOMETRY:
5256         /* data contains only strings */
5257         memcpy(host_data, argptr, guest_data_size);
5258         break;
5259     case DM_TARGET_MSG:
5260         memcpy(host_data, argptr, guest_data_size);
5261         *(uint64_t*)host_data = tswap64(*(uint64_t*)argptr);
5262         break;
5263     case DM_TABLE_LOAD:
5264     {
5265         void *gspec = argptr;
5266         void *cur_data = host_data;
5267         const argtype arg_type[] = { MK_STRUCT(STRUCT_dm_target_spec) };
5268         int spec_size = thunk_type_size(arg_type, 0);
5269         int i;
5270 
5271         for (i = 0; i < host_dm->target_count; i++) {
5272             struct dm_target_spec *spec = cur_data;
5273             uint32_t next;
5274             int slen;
5275 
5276             thunk_convert(spec, gspec, arg_type, THUNK_HOST);
5277             slen = strlen((char*)gspec + spec_size) + 1;
5278             next = spec->next;
5279             spec->next = sizeof(*spec) + slen;
5280             strcpy((char*)&spec[1], gspec + spec_size);
5281             gspec += next;
5282             cur_data += spec->next;
5283         }
5284         break;
5285     }
5286     default:
5287         ret = -TARGET_EINVAL;
5288         unlock_user(argptr, guest_data, 0);
5289         goto out;
5290     }
5291     unlock_user(argptr, guest_data, 0);
5292 
5293     ret = get_errno(safe_ioctl(fd, ie->host_cmd, buf_temp));
5294     if (!is_error(ret)) {
5295         guest_data = arg + host_dm->data_start;
5296         guest_data_size = host_dm->data_size - host_dm->data_start;
5297         argptr = lock_user(VERIFY_WRITE, guest_data, guest_data_size, 0);
5298         switch (ie->host_cmd) {
5299         case DM_REMOVE_ALL:
5300         case DM_DEV_CREATE:
5301         case DM_DEV_REMOVE:
5302         case DM_DEV_RENAME:
5303         case DM_DEV_SUSPEND:
5304         case DM_DEV_STATUS:
5305         case DM_TABLE_LOAD:
5306         case DM_TABLE_CLEAR:
5307         case DM_TARGET_MSG:
5308         case DM_DEV_SET_GEOMETRY:
5309             /* no return data */
5310             break;
5311         case DM_LIST_DEVICES:
5312         {
5313             struct dm_name_list *nl = (void*)host_dm + host_dm->data_start;
5314             uint32_t remaining_data = guest_data_size;
5315             void *cur_data = argptr;
5316             const argtype arg_type[] = { MK_STRUCT(STRUCT_dm_name_list) };
5317             int nl_size = 12; /* can't use thunk_size due to alignment */
5318 
5319             while (1) {
5320                 uint32_t next = nl->next;
5321                 if (next) {
5322                     nl->next = nl_size + (strlen(nl->name) + 1);
5323                 }
5324                 if (remaining_data < nl->next) {
5325                     host_dm->flags |= DM_BUFFER_FULL_FLAG;
5326                     break;
5327                 }
5328                 thunk_convert(cur_data, nl, arg_type, THUNK_TARGET);
5329                 strcpy(cur_data + nl_size, nl->name);
5330                 cur_data += nl->next;
5331                 remaining_data -= nl->next;
5332                 if (!next) {
5333                     break;
5334                 }
5335                 nl = (void*)nl + next;
5336             }
5337             break;
5338         }
5339         case DM_DEV_WAIT:
5340         case DM_TABLE_STATUS:
5341         {
5342             struct dm_target_spec *spec = (void*)host_dm + host_dm->data_start;
5343             void *cur_data = argptr;
5344             const argtype arg_type[] = { MK_STRUCT(STRUCT_dm_target_spec) };
5345             int spec_size = thunk_type_size(arg_type, 0);
5346             int i;
5347 
5348             for (i = 0; i < host_dm->target_count; i++) {
5349                 uint32_t next = spec->next;
5350                 int slen = strlen((char*)&spec[1]) + 1;
5351                 spec->next = (cur_data - argptr) + spec_size + slen;
5352                 if (guest_data_size < spec->next) {
5353                     host_dm->flags |= DM_BUFFER_FULL_FLAG;
5354                     break;
5355                 }
5356                 thunk_convert(cur_data, spec, arg_type, THUNK_TARGET);
5357                 strcpy(cur_data + spec_size, (char*)&spec[1]);
5358                 cur_data = argptr + spec->next;
5359                 spec = (void*)host_dm + host_dm->data_start + next;
5360             }
5361             break;
5362         }
5363         case DM_TABLE_DEPS:
5364         {
5365             void *hdata = (void*)host_dm + host_dm->data_start;
5366             int count = *(uint32_t*)hdata;
5367             uint64_t *hdev = hdata + 8;
5368             uint64_t *gdev = argptr + 8;
5369             int i;
5370 
5371             *(uint32_t*)argptr = tswap32(count);
5372             for (i = 0; i < count; i++) {
5373                 *gdev = tswap64(*hdev);
5374                 gdev++;
5375                 hdev++;
5376             }
5377             break;
5378         }
5379         case DM_LIST_VERSIONS:
5380         {
5381             struct dm_target_versions *vers = (void*)host_dm + host_dm->data_start;
5382             uint32_t remaining_data = guest_data_size;
5383             void *cur_data = argptr;
5384             const argtype arg_type[] = { MK_STRUCT(STRUCT_dm_target_versions) };
5385             int vers_size = thunk_type_size(arg_type, 0);
5386 
5387             while (1) {
5388                 uint32_t next = vers->next;
5389                 if (next) {
5390                     vers->next = vers_size + (strlen(vers->name) + 1);
5391                 }
5392                 if (remaining_data < vers->next) {
5393                     host_dm->flags |= DM_BUFFER_FULL_FLAG;
5394                     break;
5395                 }
5396                 thunk_convert(cur_data, vers, arg_type, THUNK_TARGET);
5397                 strcpy(cur_data + vers_size, vers->name);
5398                 cur_data += vers->next;
5399                 remaining_data -= vers->next;
5400                 if (!next) {
5401                     break;
5402                 }
5403                 vers = (void*)vers + next;
5404             }
5405             break;
5406         }
5407         default:
5408             unlock_user(argptr, guest_data, 0);
5409             ret = -TARGET_EINVAL;
5410             goto out;
5411         }
5412         unlock_user(argptr, guest_data, guest_data_size);
5413 
5414         argptr = lock_user(VERIFY_WRITE, arg, target_size, 0);
5415         if (!argptr) {
5416             ret = -TARGET_EFAULT;
5417             goto out;
5418         }
5419         thunk_convert(argptr, buf_temp, arg_type, THUNK_TARGET);
5420         unlock_user(argptr, arg, target_size);
5421     }
5422 out:
5423     g_free(big_buf);
5424     return ret;
5425 }
5426 
5427 static abi_long do_ioctl_blkpg(const IOCTLEntry *ie, uint8_t *buf_temp, int fd,
5428                                int cmd, abi_long arg)
5429 {
5430     void *argptr;
5431     int target_size;
5432     const argtype *arg_type = ie->arg_type;
5433     const argtype part_arg_type[] = { MK_STRUCT(STRUCT_blkpg_partition) };
5434     abi_long ret;
5435 
5436     struct blkpg_ioctl_arg *host_blkpg = (void*)buf_temp;
5437     struct blkpg_partition host_part;
5438 
5439     /* Read and convert blkpg */
5440     arg_type++;
5441     target_size = thunk_type_size(arg_type, 0);
5442     argptr = lock_user(VERIFY_READ, arg, target_size, 1);
5443     if (!argptr) {
5444         ret = -TARGET_EFAULT;
5445         goto out;
5446     }
5447     thunk_convert(buf_temp, argptr, arg_type, THUNK_HOST);
5448     unlock_user(argptr, arg, 0);
5449 
5450     switch (host_blkpg->op) {
5451     case BLKPG_ADD_PARTITION:
5452     case BLKPG_DEL_PARTITION:
5453         /* payload is struct blkpg_partition */
5454         break;
5455     default:
5456         /* Unknown opcode */
5457         ret = -TARGET_EINVAL;
5458         goto out;
5459     }
5460 
5461     /* Read and convert blkpg->data */
5462     arg = (abi_long)(uintptr_t)host_blkpg->data;
5463     target_size = thunk_type_size(part_arg_type, 0);
5464     argptr = lock_user(VERIFY_READ, arg, target_size, 1);
5465     if (!argptr) {
5466         ret = -TARGET_EFAULT;
5467         goto out;
5468     }
5469     thunk_convert(&host_part, argptr, part_arg_type, THUNK_HOST);
5470     unlock_user(argptr, arg, 0);
5471 
5472     /* Swizzle the data pointer to our local copy and call! */
5473     host_blkpg->data = &host_part;
5474     ret = get_errno(safe_ioctl(fd, ie->host_cmd, host_blkpg));
5475 
5476 out:
5477     return ret;
5478 }
5479 
5480 static abi_long do_ioctl_rt(const IOCTLEntry *ie, uint8_t *buf_temp,
5481                                 int fd, int cmd, abi_long arg)
5482 {
5483     const argtype *arg_type = ie->arg_type;
5484     const StructEntry *se;
5485     const argtype *field_types;
5486     const int *dst_offsets, *src_offsets;
5487     int target_size;
5488     void *argptr;
5489     abi_ulong *target_rt_dev_ptr = NULL;
5490     unsigned long *host_rt_dev_ptr = NULL;
5491     abi_long ret;
5492     int i;
5493 
5494     assert(ie->access == IOC_W);
5495     assert(*arg_type == TYPE_PTR);
5496     arg_type++;
5497     assert(*arg_type == TYPE_STRUCT);
5498     target_size = thunk_type_size(arg_type, 0);
5499     argptr = lock_user(VERIFY_READ, arg, target_size, 1);
5500     if (!argptr) {
5501         return -TARGET_EFAULT;
5502     }
5503     arg_type++;
5504     assert(*arg_type == (int)STRUCT_rtentry);
5505     se = struct_entries + *arg_type++;
5506     assert(se->convert[0] == NULL);
5507     /* convert struct here to be able to catch rt_dev string */
5508     field_types = se->field_types;
5509     dst_offsets = se->field_offsets[THUNK_HOST];
5510     src_offsets = se->field_offsets[THUNK_TARGET];
5511     for (i = 0; i < se->nb_fields; i++) {
5512         if (dst_offsets[i] == offsetof(struct rtentry, rt_dev)) {
5513             assert(*field_types == TYPE_PTRVOID);
5514             target_rt_dev_ptr = (abi_ulong *)(argptr + src_offsets[i]);
5515             host_rt_dev_ptr = (unsigned long *)(buf_temp + dst_offsets[i]);
5516             if (*target_rt_dev_ptr != 0) {
5517                 *host_rt_dev_ptr = (unsigned long)lock_user_string(
5518                                                   tswapal(*target_rt_dev_ptr));
5519                 if (!*host_rt_dev_ptr) {
5520                     unlock_user(argptr, arg, 0);
5521                     return -TARGET_EFAULT;
5522                 }
5523             } else {
5524                 *host_rt_dev_ptr = 0;
5525             }
5526             field_types++;
5527             continue;
5528         }
5529         field_types = thunk_convert(buf_temp + dst_offsets[i],
5530                                     argptr + src_offsets[i],
5531                                     field_types, THUNK_HOST);
5532     }
5533     unlock_user(argptr, arg, 0);
5534 
5535     ret = get_errno(safe_ioctl(fd, ie->host_cmd, buf_temp));
5536 
5537     assert(host_rt_dev_ptr != NULL);
5538     assert(target_rt_dev_ptr != NULL);
5539     if (*host_rt_dev_ptr != 0) {
5540         unlock_user((void *)*host_rt_dev_ptr,
5541                     *target_rt_dev_ptr, 0);
5542     }
5543     return ret;
5544 }
5545 
5546 static abi_long do_ioctl_kdsigaccept(const IOCTLEntry *ie, uint8_t *buf_temp,
5547                                      int fd, int cmd, abi_long arg)
5548 {
5549     int sig = target_to_host_signal(arg);
5550     return get_errno(safe_ioctl(fd, ie->host_cmd, sig));
5551 }
5552 
5553 static abi_long do_ioctl_SIOCGSTAMP(const IOCTLEntry *ie, uint8_t *buf_temp,
5554                                     int fd, int cmd, abi_long arg)
5555 {
5556     struct timeval tv;
5557     abi_long ret;
5558 
5559     ret = get_errno(safe_ioctl(fd, SIOCGSTAMP, &tv));
5560     if (is_error(ret)) {
5561         return ret;
5562     }
5563 
5564     if (cmd == (int)TARGET_SIOCGSTAMP_OLD) {
5565         if (copy_to_user_timeval(arg, &tv)) {
5566             return -TARGET_EFAULT;
5567         }
5568     } else {
5569         if (copy_to_user_timeval64(arg, &tv)) {
5570             return -TARGET_EFAULT;
5571         }
5572     }
5573 
5574     return ret;
5575 }
5576 
5577 static abi_long do_ioctl_SIOCGSTAMPNS(const IOCTLEntry *ie, uint8_t *buf_temp,
5578                                       int fd, int cmd, abi_long arg)
5579 {
5580     struct timespec ts;
5581     abi_long ret;
5582 
5583     ret = get_errno(safe_ioctl(fd, SIOCGSTAMPNS, &ts));
5584     if (is_error(ret)) {
5585         return ret;
5586     }
5587 
5588     if (cmd == (int)TARGET_SIOCGSTAMPNS_OLD) {
5589         if (host_to_target_timespec(arg, &ts)) {
5590             return -TARGET_EFAULT;
5591         }
5592     } else{
5593         if (host_to_target_timespec64(arg, &ts)) {
5594             return -TARGET_EFAULT;
5595         }
5596     }
5597 
5598     return ret;
5599 }
5600 
5601 #ifdef TIOCGPTPEER
5602 static abi_long do_ioctl_tiocgptpeer(const IOCTLEntry *ie, uint8_t *buf_temp,
5603                                      int fd, int cmd, abi_long arg)
5604 {
5605     int flags = target_to_host_bitmask(arg, fcntl_flags_tbl);
5606     return get_errno(safe_ioctl(fd, ie->host_cmd, flags));
5607 }
5608 #endif
5609 
5610 #ifdef HAVE_DRM_H
5611 
5612 static void unlock_drm_version(struct drm_version *host_ver,
5613                                struct target_drm_version *target_ver,
5614                                bool copy)
5615 {
5616     unlock_user(host_ver->name, target_ver->name,
5617                                 copy ? host_ver->name_len : 0);
5618     unlock_user(host_ver->date, target_ver->date,
5619                                 copy ? host_ver->date_len : 0);
5620     unlock_user(host_ver->desc, target_ver->desc,
5621                                 copy ? host_ver->desc_len : 0);
5622 }
5623 
5624 static inline abi_long target_to_host_drmversion(struct drm_version *host_ver,
5625                                           struct target_drm_version *target_ver)
5626 {
5627     memset(host_ver, 0, sizeof(*host_ver));
5628 
5629     __get_user(host_ver->name_len, &target_ver->name_len);
5630     if (host_ver->name_len) {
5631         host_ver->name = lock_user(VERIFY_WRITE, target_ver->name,
5632                                    target_ver->name_len, 0);
5633         if (!host_ver->name) {
5634             return -EFAULT;
5635         }
5636     }
5637 
5638     __get_user(host_ver->date_len, &target_ver->date_len);
5639     if (host_ver->date_len) {
5640         host_ver->date = lock_user(VERIFY_WRITE, target_ver->date,
5641                                    target_ver->date_len, 0);
5642         if (!host_ver->date) {
5643             goto err;
5644         }
5645     }
5646 
5647     __get_user(host_ver->desc_len, &target_ver->desc_len);
5648     if (host_ver->desc_len) {
5649         host_ver->desc = lock_user(VERIFY_WRITE, target_ver->desc,
5650                                    target_ver->desc_len, 0);
5651         if (!host_ver->desc) {
5652             goto err;
5653         }
5654     }
5655 
5656     return 0;
5657 err:
5658     unlock_drm_version(host_ver, target_ver, false);
5659     return -EFAULT;
5660 }
5661 
5662 static inline void host_to_target_drmversion(
5663                                           struct target_drm_version *target_ver,
5664                                           struct drm_version *host_ver)
5665 {
5666     __put_user(host_ver->version_major, &target_ver->version_major);
5667     __put_user(host_ver->version_minor, &target_ver->version_minor);
5668     __put_user(host_ver->version_patchlevel, &target_ver->version_patchlevel);
5669     __put_user(host_ver->name_len, &target_ver->name_len);
5670     __put_user(host_ver->date_len, &target_ver->date_len);
5671     __put_user(host_ver->desc_len, &target_ver->desc_len);
5672     unlock_drm_version(host_ver, target_ver, true);
5673 }
5674 
5675 static abi_long do_ioctl_drm(const IOCTLEntry *ie, uint8_t *buf_temp,
5676                              int fd, int cmd, abi_long arg)
5677 {
5678     struct drm_version *ver;
5679     struct target_drm_version *target_ver;
5680     abi_long ret;
5681 
5682     switch (ie->host_cmd) {
5683     case DRM_IOCTL_VERSION:
5684         if (!lock_user_struct(VERIFY_WRITE, target_ver, arg, 0)) {
5685             return -TARGET_EFAULT;
5686         }
5687         ver = (struct drm_version *)buf_temp;
5688         ret = target_to_host_drmversion(ver, target_ver);
5689         if (!is_error(ret)) {
5690             ret = get_errno(safe_ioctl(fd, ie->host_cmd, ver));
5691             if (is_error(ret)) {
5692                 unlock_drm_version(ver, target_ver, false);
5693             } else {
5694                 host_to_target_drmversion(target_ver, ver);
5695             }
5696         }
5697         unlock_user_struct(target_ver, arg, 0);
5698         return ret;
5699     }
5700     return -TARGET_ENOSYS;
5701 }
5702 
5703 static abi_long do_ioctl_drm_i915_getparam(const IOCTLEntry *ie,
5704                                            struct drm_i915_getparam *gparam,
5705                                            int fd, abi_long arg)
5706 {
5707     abi_long ret;
5708     int value;
5709     struct target_drm_i915_getparam *target_gparam;
5710 
5711     if (!lock_user_struct(VERIFY_READ, target_gparam, arg, 0)) {
5712         return -TARGET_EFAULT;
5713     }
5714 
5715     __get_user(gparam->param, &target_gparam->param);
5716     gparam->value = &value;
5717     ret = get_errno(safe_ioctl(fd, ie->host_cmd, gparam));
5718     put_user_s32(value, target_gparam->value);
5719 
5720     unlock_user_struct(target_gparam, arg, 0);
5721     return ret;
5722 }
5723 
5724 static abi_long do_ioctl_drm_i915(const IOCTLEntry *ie, uint8_t *buf_temp,
5725                                   int fd, int cmd, abi_long arg)
5726 {
5727     switch (ie->host_cmd) {
5728     case DRM_IOCTL_I915_GETPARAM:
5729         return do_ioctl_drm_i915_getparam(ie,
5730                                           (struct drm_i915_getparam *)buf_temp,
5731                                           fd, arg);
5732     default:
5733         return -TARGET_ENOSYS;
5734     }
5735 }
5736 
5737 #endif
5738 
5739 static abi_long do_ioctl_TUNSETTXFILTER(const IOCTLEntry *ie, uint8_t *buf_temp,
5740                                         int fd, int cmd, abi_long arg)
5741 {
5742     struct tun_filter *filter = (struct tun_filter *)buf_temp;
5743     struct tun_filter *target_filter;
5744     char *target_addr;
5745 
5746     assert(ie->access == IOC_W);
5747 
5748     target_filter = lock_user(VERIFY_READ, arg, sizeof(*target_filter), 1);
5749     if (!target_filter) {
5750         return -TARGET_EFAULT;
5751     }
5752     filter->flags = tswap16(target_filter->flags);
5753     filter->count = tswap16(target_filter->count);
5754     unlock_user(target_filter, arg, 0);
5755 
5756     if (filter->count) {
5757         if (offsetof(struct tun_filter, addr) + filter->count * ETH_ALEN >
5758             MAX_STRUCT_SIZE) {
5759             return -TARGET_EFAULT;
5760         }
5761 
5762         target_addr = lock_user(VERIFY_READ,
5763                                 arg + offsetof(struct tun_filter, addr),
5764                                 filter->count * ETH_ALEN, 1);
5765         if (!target_addr) {
5766             return -TARGET_EFAULT;
5767         }
5768         memcpy(filter->addr, target_addr, filter->count * ETH_ALEN);
5769         unlock_user(target_addr, arg + offsetof(struct tun_filter, addr), 0);
5770     }
5771 
5772     return get_errno(safe_ioctl(fd, ie->host_cmd, filter));
5773 }
5774 
5775 IOCTLEntry ioctl_entries[] = {
5776 #define IOCTL(cmd, access, ...) \
5777     { TARGET_ ## cmd, cmd, #cmd, access, 0, {  __VA_ARGS__ } },
5778 #define IOCTL_SPECIAL(cmd, access, dofn, ...)                      \
5779     { TARGET_ ## cmd, cmd, #cmd, access, dofn, {  __VA_ARGS__ } },
5780 #define IOCTL_IGNORE(cmd) \
5781     { TARGET_ ## cmd, 0, #cmd },
5782 #include "ioctls.h"
5783     { 0, 0, },
5784 };
5785 
5786 /* ??? Implement proper locking for ioctls.  */
5787 /* do_ioctl() Must return target values and target errnos. */
5788 static abi_long do_ioctl(int fd, int cmd, abi_long arg)
5789 {
5790     const IOCTLEntry *ie;
5791     const argtype *arg_type;
5792     abi_long ret;
5793     uint8_t buf_temp[MAX_STRUCT_SIZE];
5794     int target_size;
5795     void *argptr;
5796 
5797     ie = ioctl_entries;
5798     for(;;) {
5799         if (ie->target_cmd == 0) {
5800             qemu_log_mask(
5801                 LOG_UNIMP, "Unsupported ioctl: cmd=0x%04lx\n", (long)cmd);
5802             return -TARGET_ENOSYS;
5803         }
5804         if (ie->target_cmd == cmd)
5805             break;
5806         ie++;
5807     }
5808     arg_type = ie->arg_type;
5809     if (ie->do_ioctl) {
5810         return ie->do_ioctl(ie, buf_temp, fd, cmd, arg);
5811     } else if (!ie->host_cmd) {
5812         /* Some architectures define BSD ioctls in their headers
5813            that are not implemented in Linux.  */
5814         return -TARGET_ENOSYS;
5815     }
5816 
5817     switch(arg_type[0]) {
5818     case TYPE_NULL:
5819         /* no argument */
5820         ret = get_errno(safe_ioctl(fd, ie->host_cmd));
5821         break;
5822     case TYPE_PTRVOID:
5823     case TYPE_INT:
5824     case TYPE_LONG:
5825     case TYPE_ULONG:
5826         ret = get_errno(safe_ioctl(fd, ie->host_cmd, arg));
5827         break;
5828     case TYPE_PTR:
5829         arg_type++;
5830         target_size = thunk_type_size(arg_type, 0);
5831         switch(ie->access) {
5832         case IOC_R:
5833             ret = get_errno(safe_ioctl(fd, ie->host_cmd, buf_temp));
5834             if (!is_error(ret)) {
5835                 argptr = lock_user(VERIFY_WRITE, arg, target_size, 0);
5836                 if (!argptr)
5837                     return -TARGET_EFAULT;
5838                 thunk_convert(argptr, buf_temp, arg_type, THUNK_TARGET);
5839                 unlock_user(argptr, arg, target_size);
5840             }
5841             break;
5842         case IOC_W:
5843             argptr = lock_user(VERIFY_READ, arg, target_size, 1);
5844             if (!argptr)
5845                 return -TARGET_EFAULT;
5846             thunk_convert(buf_temp, argptr, arg_type, THUNK_HOST);
5847             unlock_user(argptr, arg, 0);
5848             ret = get_errno(safe_ioctl(fd, ie->host_cmd, buf_temp));
5849             break;
5850         default:
5851         case IOC_RW:
5852             argptr = lock_user(VERIFY_READ, arg, target_size, 1);
5853             if (!argptr)
5854                 return -TARGET_EFAULT;
5855             thunk_convert(buf_temp, argptr, arg_type, THUNK_HOST);
5856             unlock_user(argptr, arg, 0);
5857             ret = get_errno(safe_ioctl(fd, ie->host_cmd, buf_temp));
5858             if (!is_error(ret)) {
5859                 argptr = lock_user(VERIFY_WRITE, arg, target_size, 0);
5860                 if (!argptr)
5861                     return -TARGET_EFAULT;
5862                 thunk_convert(argptr, buf_temp, arg_type, THUNK_TARGET);
5863                 unlock_user(argptr, arg, target_size);
5864             }
5865             break;
5866         }
5867         break;
5868     default:
5869         qemu_log_mask(LOG_UNIMP,
5870                       "Unsupported ioctl type: cmd=0x%04lx type=%d\n",
5871                       (long)cmd, arg_type[0]);
5872         ret = -TARGET_ENOSYS;
5873         break;
5874     }
5875     return ret;
5876 }
5877 
5878 static const bitmask_transtbl iflag_tbl[] = {
5879         { TARGET_IGNBRK, TARGET_IGNBRK, IGNBRK, IGNBRK },
5880         { TARGET_BRKINT, TARGET_BRKINT, BRKINT, BRKINT },
5881         { TARGET_IGNPAR, TARGET_IGNPAR, IGNPAR, IGNPAR },
5882         { TARGET_PARMRK, TARGET_PARMRK, PARMRK, PARMRK },
5883         { TARGET_INPCK, TARGET_INPCK, INPCK, INPCK },
5884         { TARGET_ISTRIP, TARGET_ISTRIP, ISTRIP, ISTRIP },
5885         { TARGET_INLCR, TARGET_INLCR, INLCR, INLCR },
5886         { TARGET_IGNCR, TARGET_IGNCR, IGNCR, IGNCR },
5887         { TARGET_ICRNL, TARGET_ICRNL, ICRNL, ICRNL },
5888         { TARGET_IUCLC, TARGET_IUCLC, IUCLC, IUCLC },
5889         { TARGET_IXON, TARGET_IXON, IXON, IXON },
5890         { TARGET_IXANY, TARGET_IXANY, IXANY, IXANY },
5891         { TARGET_IXOFF, TARGET_IXOFF, IXOFF, IXOFF },
5892         { TARGET_IMAXBEL, TARGET_IMAXBEL, IMAXBEL, IMAXBEL },
5893         { TARGET_IUTF8, TARGET_IUTF8, IUTF8, IUTF8},
5894         { 0, 0, 0, 0 }
5895 };
5896 
5897 static const bitmask_transtbl oflag_tbl[] = {
5898 	{ TARGET_OPOST, TARGET_OPOST, OPOST, OPOST },
5899 	{ TARGET_OLCUC, TARGET_OLCUC, OLCUC, OLCUC },
5900 	{ TARGET_ONLCR, TARGET_ONLCR, ONLCR, ONLCR },
5901 	{ TARGET_OCRNL, TARGET_OCRNL, OCRNL, OCRNL },
5902 	{ TARGET_ONOCR, TARGET_ONOCR, ONOCR, ONOCR },
5903 	{ TARGET_ONLRET, TARGET_ONLRET, ONLRET, ONLRET },
5904 	{ TARGET_OFILL, TARGET_OFILL, OFILL, OFILL },
5905 	{ TARGET_OFDEL, TARGET_OFDEL, OFDEL, OFDEL },
5906 	{ TARGET_NLDLY, TARGET_NL0, NLDLY, NL0 },
5907 	{ TARGET_NLDLY, TARGET_NL1, NLDLY, NL1 },
5908 	{ TARGET_CRDLY, TARGET_CR0, CRDLY, CR0 },
5909 	{ TARGET_CRDLY, TARGET_CR1, CRDLY, CR1 },
5910 	{ TARGET_CRDLY, TARGET_CR2, CRDLY, CR2 },
5911 	{ TARGET_CRDLY, TARGET_CR3, CRDLY, CR3 },
5912 	{ TARGET_TABDLY, TARGET_TAB0, TABDLY, TAB0 },
5913 	{ TARGET_TABDLY, TARGET_TAB1, TABDLY, TAB1 },
5914 	{ TARGET_TABDLY, TARGET_TAB2, TABDLY, TAB2 },
5915 	{ TARGET_TABDLY, TARGET_TAB3, TABDLY, TAB3 },
5916 	{ TARGET_BSDLY, TARGET_BS0, BSDLY, BS0 },
5917 	{ TARGET_BSDLY, TARGET_BS1, BSDLY, BS1 },
5918 	{ TARGET_VTDLY, TARGET_VT0, VTDLY, VT0 },
5919 	{ TARGET_VTDLY, TARGET_VT1, VTDLY, VT1 },
5920 	{ TARGET_FFDLY, TARGET_FF0, FFDLY, FF0 },
5921 	{ TARGET_FFDLY, TARGET_FF1, FFDLY, FF1 },
5922 	{ 0, 0, 0, 0 }
5923 };
5924 
5925 static const bitmask_transtbl cflag_tbl[] = {
5926 	{ TARGET_CBAUD, TARGET_B0, CBAUD, B0 },
5927 	{ TARGET_CBAUD, TARGET_B50, CBAUD, B50 },
5928 	{ TARGET_CBAUD, TARGET_B75, CBAUD, B75 },
5929 	{ TARGET_CBAUD, TARGET_B110, CBAUD, B110 },
5930 	{ TARGET_CBAUD, TARGET_B134, CBAUD, B134 },
5931 	{ TARGET_CBAUD, TARGET_B150, CBAUD, B150 },
5932 	{ TARGET_CBAUD, TARGET_B200, CBAUD, B200 },
5933 	{ TARGET_CBAUD, TARGET_B300, CBAUD, B300 },
5934 	{ TARGET_CBAUD, TARGET_B600, CBAUD, B600 },
5935 	{ TARGET_CBAUD, TARGET_B1200, CBAUD, B1200 },
5936 	{ TARGET_CBAUD, TARGET_B1800, CBAUD, B1800 },
5937 	{ TARGET_CBAUD, TARGET_B2400, CBAUD, B2400 },
5938 	{ TARGET_CBAUD, TARGET_B4800, CBAUD, B4800 },
5939 	{ TARGET_CBAUD, TARGET_B9600, CBAUD, B9600 },
5940 	{ TARGET_CBAUD, TARGET_B19200, CBAUD, B19200 },
5941 	{ TARGET_CBAUD, TARGET_B38400, CBAUD, B38400 },
5942 	{ TARGET_CBAUD, TARGET_B57600, CBAUD, B57600 },
5943 	{ TARGET_CBAUD, TARGET_B115200, CBAUD, B115200 },
5944 	{ TARGET_CBAUD, TARGET_B230400, CBAUD, B230400 },
5945 	{ TARGET_CBAUD, TARGET_B460800, CBAUD, B460800 },
5946 	{ TARGET_CSIZE, TARGET_CS5, CSIZE, CS5 },
5947 	{ TARGET_CSIZE, TARGET_CS6, CSIZE, CS6 },
5948 	{ TARGET_CSIZE, TARGET_CS7, CSIZE, CS7 },
5949 	{ TARGET_CSIZE, TARGET_CS8, CSIZE, CS8 },
5950 	{ TARGET_CSTOPB, TARGET_CSTOPB, CSTOPB, CSTOPB },
5951 	{ TARGET_CREAD, TARGET_CREAD, CREAD, CREAD },
5952 	{ TARGET_PARENB, TARGET_PARENB, PARENB, PARENB },
5953 	{ TARGET_PARODD, TARGET_PARODD, PARODD, PARODD },
5954 	{ TARGET_HUPCL, TARGET_HUPCL, HUPCL, HUPCL },
5955 	{ TARGET_CLOCAL, TARGET_CLOCAL, CLOCAL, CLOCAL },
5956 	{ TARGET_CRTSCTS, TARGET_CRTSCTS, CRTSCTS, CRTSCTS },
5957 	{ 0, 0, 0, 0 }
5958 };
5959 
5960 static const bitmask_transtbl lflag_tbl[] = {
5961   { TARGET_ISIG, TARGET_ISIG, ISIG, ISIG },
5962   { TARGET_ICANON, TARGET_ICANON, ICANON, ICANON },
5963   { TARGET_XCASE, TARGET_XCASE, XCASE, XCASE },
5964   { TARGET_ECHO, TARGET_ECHO, ECHO, ECHO },
5965   { TARGET_ECHOE, TARGET_ECHOE, ECHOE, ECHOE },
5966   { TARGET_ECHOK, TARGET_ECHOK, ECHOK, ECHOK },
5967   { TARGET_ECHONL, TARGET_ECHONL, ECHONL, ECHONL },
5968   { TARGET_NOFLSH, TARGET_NOFLSH, NOFLSH, NOFLSH },
5969   { TARGET_TOSTOP, TARGET_TOSTOP, TOSTOP, TOSTOP },
5970   { TARGET_ECHOCTL, TARGET_ECHOCTL, ECHOCTL, ECHOCTL },
5971   { TARGET_ECHOPRT, TARGET_ECHOPRT, ECHOPRT, ECHOPRT },
5972   { TARGET_ECHOKE, TARGET_ECHOKE, ECHOKE, ECHOKE },
5973   { TARGET_FLUSHO, TARGET_FLUSHO, FLUSHO, FLUSHO },
5974   { TARGET_PENDIN, TARGET_PENDIN, PENDIN, PENDIN },
5975   { TARGET_IEXTEN, TARGET_IEXTEN, IEXTEN, IEXTEN },
5976   { TARGET_EXTPROC, TARGET_EXTPROC, EXTPROC, EXTPROC},
5977   { 0, 0, 0, 0 }
5978 };
5979 
5980 static void target_to_host_termios (void *dst, const void *src)
5981 {
5982     struct host_termios *host = dst;
5983     const struct target_termios *target = src;
5984 
5985     host->c_iflag =
5986         target_to_host_bitmask(tswap32(target->c_iflag), iflag_tbl);
5987     host->c_oflag =
5988         target_to_host_bitmask(tswap32(target->c_oflag), oflag_tbl);
5989     host->c_cflag =
5990         target_to_host_bitmask(tswap32(target->c_cflag), cflag_tbl);
5991     host->c_lflag =
5992         target_to_host_bitmask(tswap32(target->c_lflag), lflag_tbl);
5993     host->c_line = target->c_line;
5994 
5995     memset(host->c_cc, 0, sizeof(host->c_cc));
5996     host->c_cc[VINTR] = target->c_cc[TARGET_VINTR];
5997     host->c_cc[VQUIT] = target->c_cc[TARGET_VQUIT];
5998     host->c_cc[VERASE] = target->c_cc[TARGET_VERASE];
5999     host->c_cc[VKILL] = target->c_cc[TARGET_VKILL];
6000     host->c_cc[VEOF] = target->c_cc[TARGET_VEOF];
6001     host->c_cc[VTIME] = target->c_cc[TARGET_VTIME];
6002     host->c_cc[VMIN] = target->c_cc[TARGET_VMIN];
6003     host->c_cc[VSWTC] = target->c_cc[TARGET_VSWTC];
6004     host->c_cc[VSTART] = target->c_cc[TARGET_VSTART];
6005     host->c_cc[VSTOP] = target->c_cc[TARGET_VSTOP];
6006     host->c_cc[VSUSP] = target->c_cc[TARGET_VSUSP];
6007     host->c_cc[VEOL] = target->c_cc[TARGET_VEOL];
6008     host->c_cc[VREPRINT] = target->c_cc[TARGET_VREPRINT];
6009     host->c_cc[VDISCARD] = target->c_cc[TARGET_VDISCARD];
6010     host->c_cc[VWERASE] = target->c_cc[TARGET_VWERASE];
6011     host->c_cc[VLNEXT] = target->c_cc[TARGET_VLNEXT];
6012     host->c_cc[VEOL2] = target->c_cc[TARGET_VEOL2];
6013 }
6014 
6015 static void host_to_target_termios (void *dst, const void *src)
6016 {
6017     struct target_termios *target = dst;
6018     const struct host_termios *host = src;
6019 
6020     target->c_iflag =
6021         tswap32(host_to_target_bitmask(host->c_iflag, iflag_tbl));
6022     target->c_oflag =
6023         tswap32(host_to_target_bitmask(host->c_oflag, oflag_tbl));
6024     target->c_cflag =
6025         tswap32(host_to_target_bitmask(host->c_cflag, cflag_tbl));
6026     target->c_lflag =
6027         tswap32(host_to_target_bitmask(host->c_lflag, lflag_tbl));
6028     target->c_line = host->c_line;
6029 
6030     memset(target->c_cc, 0, sizeof(target->c_cc));
6031     target->c_cc[TARGET_VINTR] = host->c_cc[VINTR];
6032     target->c_cc[TARGET_VQUIT] = host->c_cc[VQUIT];
6033     target->c_cc[TARGET_VERASE] = host->c_cc[VERASE];
6034     target->c_cc[TARGET_VKILL] = host->c_cc[VKILL];
6035     target->c_cc[TARGET_VEOF] = host->c_cc[VEOF];
6036     target->c_cc[TARGET_VTIME] = host->c_cc[VTIME];
6037     target->c_cc[TARGET_VMIN] = host->c_cc[VMIN];
6038     target->c_cc[TARGET_VSWTC] = host->c_cc[VSWTC];
6039     target->c_cc[TARGET_VSTART] = host->c_cc[VSTART];
6040     target->c_cc[TARGET_VSTOP] = host->c_cc[VSTOP];
6041     target->c_cc[TARGET_VSUSP] = host->c_cc[VSUSP];
6042     target->c_cc[TARGET_VEOL] = host->c_cc[VEOL];
6043     target->c_cc[TARGET_VREPRINT] = host->c_cc[VREPRINT];
6044     target->c_cc[TARGET_VDISCARD] = host->c_cc[VDISCARD];
6045     target->c_cc[TARGET_VWERASE] = host->c_cc[VWERASE];
6046     target->c_cc[TARGET_VLNEXT] = host->c_cc[VLNEXT];
6047     target->c_cc[TARGET_VEOL2] = host->c_cc[VEOL2];
6048 }
6049 
6050 static const StructEntry struct_termios_def = {
6051     .convert = { host_to_target_termios, target_to_host_termios },
6052     .size = { sizeof(struct target_termios), sizeof(struct host_termios) },
6053     .align = { __alignof__(struct target_termios), __alignof__(struct host_termios) },
6054     .print = print_termios,
6055 };
6056 
6057 static bitmask_transtbl mmap_flags_tbl[] = {
6058     { TARGET_MAP_SHARED, TARGET_MAP_SHARED, MAP_SHARED, MAP_SHARED },
6059     { TARGET_MAP_PRIVATE, TARGET_MAP_PRIVATE, MAP_PRIVATE, MAP_PRIVATE },
6060     { TARGET_MAP_FIXED, TARGET_MAP_FIXED, MAP_FIXED, MAP_FIXED },
6061     { TARGET_MAP_ANONYMOUS, TARGET_MAP_ANONYMOUS,
6062       MAP_ANONYMOUS, MAP_ANONYMOUS },
6063     { TARGET_MAP_GROWSDOWN, TARGET_MAP_GROWSDOWN,
6064       MAP_GROWSDOWN, MAP_GROWSDOWN },
6065     { TARGET_MAP_DENYWRITE, TARGET_MAP_DENYWRITE,
6066       MAP_DENYWRITE, MAP_DENYWRITE },
6067     { TARGET_MAP_EXECUTABLE, TARGET_MAP_EXECUTABLE,
6068       MAP_EXECUTABLE, MAP_EXECUTABLE },
6069     { TARGET_MAP_LOCKED, TARGET_MAP_LOCKED, MAP_LOCKED, MAP_LOCKED },
6070     { TARGET_MAP_NORESERVE, TARGET_MAP_NORESERVE,
6071       MAP_NORESERVE, MAP_NORESERVE },
6072     { TARGET_MAP_HUGETLB, TARGET_MAP_HUGETLB, MAP_HUGETLB, MAP_HUGETLB },
6073     /* MAP_STACK had been ignored by the kernel for quite some time.
6074        Recognize it for the target insofar as we do not want to pass
6075        it through to the host.  */
6076     { TARGET_MAP_STACK, TARGET_MAP_STACK, 0, 0 },
6077     { 0, 0, 0, 0 }
6078 };
6079 
6080 /*
6081  * NOTE: TARGET_ABI32 is defined for TARGET_I386 (but not for TARGET_X86_64)
6082  *       TARGET_I386 is defined if TARGET_X86_64 is defined
6083  */
6084 #if defined(TARGET_I386)
6085 
6086 /* NOTE: there is really one LDT for all the threads */
6087 static uint8_t *ldt_table;
6088 
6089 static abi_long read_ldt(abi_ulong ptr, unsigned long bytecount)
6090 {
6091     int size;
6092     void *p;
6093 
6094     if (!ldt_table)
6095         return 0;
6096     size = TARGET_LDT_ENTRIES * TARGET_LDT_ENTRY_SIZE;
6097     if (size > bytecount)
6098         size = bytecount;
6099     p = lock_user(VERIFY_WRITE, ptr, size, 0);
6100     if (!p)
6101         return -TARGET_EFAULT;
6102     /* ??? Should this by byteswapped?  */
6103     memcpy(p, ldt_table, size);
6104     unlock_user(p, ptr, size);
6105     return size;
6106 }
6107 
6108 /* XXX: add locking support */
6109 static abi_long write_ldt(CPUX86State *env,
6110                           abi_ulong ptr, unsigned long bytecount, int oldmode)
6111 {
6112     struct target_modify_ldt_ldt_s ldt_info;
6113     struct target_modify_ldt_ldt_s *target_ldt_info;
6114     int seg_32bit, contents, read_exec_only, limit_in_pages;
6115     int seg_not_present, useable, lm;
6116     uint32_t *lp, entry_1, entry_2;
6117 
6118     if (bytecount != sizeof(ldt_info))
6119         return -TARGET_EINVAL;
6120     if (!lock_user_struct(VERIFY_READ, target_ldt_info, ptr, 1))
6121         return -TARGET_EFAULT;
6122     ldt_info.entry_number = tswap32(target_ldt_info->entry_number);
6123     ldt_info.base_addr = tswapal(target_ldt_info->base_addr);
6124     ldt_info.limit = tswap32(target_ldt_info->limit);
6125     ldt_info.flags = tswap32(target_ldt_info->flags);
6126     unlock_user_struct(target_ldt_info, ptr, 0);
6127 
6128     if (ldt_info.entry_number >= TARGET_LDT_ENTRIES)
6129         return -TARGET_EINVAL;
6130     seg_32bit = ldt_info.flags & 1;
6131     contents = (ldt_info.flags >> 1) & 3;
6132     read_exec_only = (ldt_info.flags >> 3) & 1;
6133     limit_in_pages = (ldt_info.flags >> 4) & 1;
6134     seg_not_present = (ldt_info.flags >> 5) & 1;
6135     useable = (ldt_info.flags >> 6) & 1;
6136 #ifdef TARGET_ABI32
6137     lm = 0;
6138 #else
6139     lm = (ldt_info.flags >> 7) & 1;
6140 #endif
6141     if (contents == 3) {
6142         if (oldmode)
6143             return -TARGET_EINVAL;
6144         if (seg_not_present == 0)
6145             return -TARGET_EINVAL;
6146     }
6147     /* allocate the LDT */
6148     if (!ldt_table) {
6149         env->ldt.base = target_mmap(0,
6150                                     TARGET_LDT_ENTRIES * TARGET_LDT_ENTRY_SIZE,
6151                                     PROT_READ|PROT_WRITE,
6152                                     MAP_ANONYMOUS|MAP_PRIVATE, -1, 0);
6153         if (env->ldt.base == -1)
6154             return -TARGET_ENOMEM;
6155         memset(g2h_untagged(env->ldt.base), 0,
6156                TARGET_LDT_ENTRIES * TARGET_LDT_ENTRY_SIZE);
6157         env->ldt.limit = 0xffff;
6158         ldt_table = g2h_untagged(env->ldt.base);
6159     }
6160 
6161     /* NOTE: same code as Linux kernel */
6162     /* Allow LDTs to be cleared by the user. */
6163     if (ldt_info.base_addr == 0 && ldt_info.limit == 0) {
6164         if (oldmode ||
6165             (contents == 0		&&
6166              read_exec_only == 1	&&
6167              seg_32bit == 0		&&
6168              limit_in_pages == 0	&&
6169              seg_not_present == 1	&&
6170              useable == 0 )) {
6171             entry_1 = 0;
6172             entry_2 = 0;
6173             goto install;
6174         }
6175     }
6176 
6177     entry_1 = ((ldt_info.base_addr & 0x0000ffff) << 16) |
6178         (ldt_info.limit & 0x0ffff);
6179     entry_2 = (ldt_info.base_addr & 0xff000000) |
6180         ((ldt_info.base_addr & 0x00ff0000) >> 16) |
6181         (ldt_info.limit & 0xf0000) |
6182         ((read_exec_only ^ 1) << 9) |
6183         (contents << 10) |
6184         ((seg_not_present ^ 1) << 15) |
6185         (seg_32bit << 22) |
6186         (limit_in_pages << 23) |
6187         (lm << 21) |
6188         0x7000;
6189     if (!oldmode)
6190         entry_2 |= (useable << 20);
6191 
6192     /* Install the new entry ...  */
6193 install:
6194     lp = (uint32_t *)(ldt_table + (ldt_info.entry_number << 3));
6195     lp[0] = tswap32(entry_1);
6196     lp[1] = tswap32(entry_2);
6197     return 0;
6198 }
6199 
6200 /* specific and weird i386 syscalls */
6201 static abi_long do_modify_ldt(CPUX86State *env, int func, abi_ulong ptr,
6202                               unsigned long bytecount)
6203 {
6204     abi_long ret;
6205 
6206     switch (func) {
6207     case 0:
6208         ret = read_ldt(ptr, bytecount);
6209         break;
6210     case 1:
6211         ret = write_ldt(env, ptr, bytecount, 1);
6212         break;
6213     case 0x11:
6214         ret = write_ldt(env, ptr, bytecount, 0);
6215         break;
6216     default:
6217         ret = -TARGET_ENOSYS;
6218         break;
6219     }
6220     return ret;
6221 }
6222 
6223 #if defined(TARGET_ABI32)
6224 abi_long do_set_thread_area(CPUX86State *env, abi_ulong ptr)
6225 {
6226     uint64_t *gdt_table = g2h_untagged(env->gdt.base);
6227     struct target_modify_ldt_ldt_s ldt_info;
6228     struct target_modify_ldt_ldt_s *target_ldt_info;
6229     int seg_32bit, contents, read_exec_only, limit_in_pages;
6230     int seg_not_present, useable, lm;
6231     uint32_t *lp, entry_1, entry_2;
6232     int i;
6233 
6234     lock_user_struct(VERIFY_WRITE, target_ldt_info, ptr, 1);
6235     if (!target_ldt_info)
6236         return -TARGET_EFAULT;
6237     ldt_info.entry_number = tswap32(target_ldt_info->entry_number);
6238     ldt_info.base_addr = tswapal(target_ldt_info->base_addr);
6239     ldt_info.limit = tswap32(target_ldt_info->limit);
6240     ldt_info.flags = tswap32(target_ldt_info->flags);
6241     if (ldt_info.entry_number == -1) {
6242         for (i=TARGET_GDT_ENTRY_TLS_MIN; i<=TARGET_GDT_ENTRY_TLS_MAX; i++) {
6243             if (gdt_table[i] == 0) {
6244                 ldt_info.entry_number = i;
6245                 target_ldt_info->entry_number = tswap32(i);
6246                 break;
6247             }
6248         }
6249     }
6250     unlock_user_struct(target_ldt_info, ptr, 1);
6251 
6252     if (ldt_info.entry_number < TARGET_GDT_ENTRY_TLS_MIN ||
6253         ldt_info.entry_number > TARGET_GDT_ENTRY_TLS_MAX)
6254            return -TARGET_EINVAL;
6255     seg_32bit = ldt_info.flags & 1;
6256     contents = (ldt_info.flags >> 1) & 3;
6257     read_exec_only = (ldt_info.flags >> 3) & 1;
6258     limit_in_pages = (ldt_info.flags >> 4) & 1;
6259     seg_not_present = (ldt_info.flags >> 5) & 1;
6260     useable = (ldt_info.flags >> 6) & 1;
6261 #ifdef TARGET_ABI32
6262     lm = 0;
6263 #else
6264     lm = (ldt_info.flags >> 7) & 1;
6265 #endif
6266 
6267     if (contents == 3) {
6268         if (seg_not_present == 0)
6269             return -TARGET_EINVAL;
6270     }
6271 
6272     /* NOTE: same code as Linux kernel */
6273     /* Allow LDTs to be cleared by the user. */
6274     if (ldt_info.base_addr == 0 && ldt_info.limit == 0) {
6275         if ((contents == 0             &&
6276              read_exec_only == 1       &&
6277              seg_32bit == 0            &&
6278              limit_in_pages == 0       &&
6279              seg_not_present == 1      &&
6280              useable == 0 )) {
6281             entry_1 = 0;
6282             entry_2 = 0;
6283             goto install;
6284         }
6285     }
6286 
6287     entry_1 = ((ldt_info.base_addr & 0x0000ffff) << 16) |
6288         (ldt_info.limit & 0x0ffff);
6289     entry_2 = (ldt_info.base_addr & 0xff000000) |
6290         ((ldt_info.base_addr & 0x00ff0000) >> 16) |
6291         (ldt_info.limit & 0xf0000) |
6292         ((read_exec_only ^ 1) << 9) |
6293         (contents << 10) |
6294         ((seg_not_present ^ 1) << 15) |
6295         (seg_32bit << 22) |
6296         (limit_in_pages << 23) |
6297         (useable << 20) |
6298         (lm << 21) |
6299         0x7000;
6300 
6301     /* Install the new entry ...  */
6302 install:
6303     lp = (uint32_t *)(gdt_table + ldt_info.entry_number);
6304     lp[0] = tswap32(entry_1);
6305     lp[1] = tswap32(entry_2);
6306     return 0;
6307 }
6308 
6309 static abi_long do_get_thread_area(CPUX86State *env, abi_ulong ptr)
6310 {
6311     struct target_modify_ldt_ldt_s *target_ldt_info;
6312     uint64_t *gdt_table = g2h_untagged(env->gdt.base);
6313     uint32_t base_addr, limit, flags;
6314     int seg_32bit, contents, read_exec_only, limit_in_pages, idx;
6315     int seg_not_present, useable, lm;
6316     uint32_t *lp, entry_1, entry_2;
6317 
6318     lock_user_struct(VERIFY_WRITE, target_ldt_info, ptr, 1);
6319     if (!target_ldt_info)
6320         return -TARGET_EFAULT;
6321     idx = tswap32(target_ldt_info->entry_number);
6322     if (idx < TARGET_GDT_ENTRY_TLS_MIN ||
6323         idx > TARGET_GDT_ENTRY_TLS_MAX) {
6324         unlock_user_struct(target_ldt_info, ptr, 1);
6325         return -TARGET_EINVAL;
6326     }
6327     lp = (uint32_t *)(gdt_table + idx);
6328     entry_1 = tswap32(lp[0]);
6329     entry_2 = tswap32(lp[1]);
6330 
6331     read_exec_only = ((entry_2 >> 9) & 1) ^ 1;
6332     contents = (entry_2 >> 10) & 3;
6333     seg_not_present = ((entry_2 >> 15) & 1) ^ 1;
6334     seg_32bit = (entry_2 >> 22) & 1;
6335     limit_in_pages = (entry_2 >> 23) & 1;
6336     useable = (entry_2 >> 20) & 1;
6337 #ifdef TARGET_ABI32
6338     lm = 0;
6339 #else
6340     lm = (entry_2 >> 21) & 1;
6341 #endif
6342     flags = (seg_32bit << 0) | (contents << 1) |
6343         (read_exec_only << 3) | (limit_in_pages << 4) |
6344         (seg_not_present << 5) | (useable << 6) | (lm << 7);
6345     limit = (entry_1 & 0xffff) | (entry_2  & 0xf0000);
6346     base_addr = (entry_1 >> 16) |
6347         (entry_2 & 0xff000000) |
6348         ((entry_2 & 0xff) << 16);
6349     target_ldt_info->base_addr = tswapal(base_addr);
6350     target_ldt_info->limit = tswap32(limit);
6351     target_ldt_info->flags = tswap32(flags);
6352     unlock_user_struct(target_ldt_info, ptr, 1);
6353     return 0;
6354 }
6355 
6356 abi_long do_arch_prctl(CPUX86State *env, int code, abi_ulong addr)
6357 {
6358     return -TARGET_ENOSYS;
6359 }
6360 #else
6361 abi_long do_arch_prctl(CPUX86State *env, int code, abi_ulong addr)
6362 {
6363     abi_long ret = 0;
6364     abi_ulong val;
6365     int idx;
6366 
6367     switch(code) {
6368     case TARGET_ARCH_SET_GS:
6369     case TARGET_ARCH_SET_FS:
6370         if (code == TARGET_ARCH_SET_GS)
6371             idx = R_GS;
6372         else
6373             idx = R_FS;
6374         cpu_x86_load_seg(env, idx, 0);
6375         env->segs[idx].base = addr;
6376         break;
6377     case TARGET_ARCH_GET_GS:
6378     case TARGET_ARCH_GET_FS:
6379         if (code == TARGET_ARCH_GET_GS)
6380             idx = R_GS;
6381         else
6382             idx = R_FS;
6383         val = env->segs[idx].base;
6384         if (put_user(val, addr, abi_ulong))
6385             ret = -TARGET_EFAULT;
6386         break;
6387     default:
6388         ret = -TARGET_EINVAL;
6389         break;
6390     }
6391     return ret;
6392 }
6393 #endif /* defined(TARGET_ABI32 */
6394 
6395 #endif /* defined(TARGET_I386) */
6396 
6397 #define NEW_STACK_SIZE 0x40000
6398 
6399 
6400 static pthread_mutex_t clone_lock = PTHREAD_MUTEX_INITIALIZER;
6401 typedef struct {
6402     CPUArchState *env;
6403     pthread_mutex_t mutex;
6404     pthread_cond_t cond;
6405     pthread_t thread;
6406     uint32_t tid;
6407     abi_ulong child_tidptr;
6408     abi_ulong parent_tidptr;
6409     sigset_t sigmask;
6410 } new_thread_info;
6411 
6412 static void *clone_func(void *arg)
6413 {
6414     new_thread_info *info = arg;
6415     CPUArchState *env;
6416     CPUState *cpu;
6417     TaskState *ts;
6418 
6419     rcu_register_thread();
6420     tcg_register_thread();
6421     env = info->env;
6422     cpu = env_cpu(env);
6423     thread_cpu = cpu;
6424     ts = (TaskState *)cpu->opaque;
6425     info->tid = sys_gettid();
6426     task_settid(ts);
6427     if (info->child_tidptr)
6428         put_user_u32(info->tid, info->child_tidptr);
6429     if (info->parent_tidptr)
6430         put_user_u32(info->tid, info->parent_tidptr);
6431     qemu_guest_random_seed_thread_part2(cpu->random_seed);
6432     /* Enable signals.  */
6433     sigprocmask(SIG_SETMASK, &info->sigmask, NULL);
6434     /* Signal to the parent that we're ready.  */
6435     pthread_mutex_lock(&info->mutex);
6436     pthread_cond_broadcast(&info->cond);
6437     pthread_mutex_unlock(&info->mutex);
6438     /* Wait until the parent has finished initializing the tls state.  */
6439     pthread_mutex_lock(&clone_lock);
6440     pthread_mutex_unlock(&clone_lock);
6441     cpu_loop(env);
6442     /* never exits */
6443     return NULL;
6444 }
6445 
6446 /* do_fork() Must return host values and target errnos (unlike most
6447    do_*() functions). */
6448 static int do_fork(CPUArchState *env, unsigned int flags, abi_ulong newsp,
6449                    abi_ulong parent_tidptr, target_ulong newtls,
6450                    abi_ulong child_tidptr)
6451 {
6452     CPUState *cpu = env_cpu(env);
6453     int ret;
6454     TaskState *ts;
6455     CPUState *new_cpu;
6456     CPUArchState *new_env;
6457     sigset_t sigmask;
6458 
6459     flags &= ~CLONE_IGNORED_FLAGS;
6460 
6461     /* Emulate vfork() with fork() */
6462     if (flags & CLONE_VFORK)
6463         flags &= ~(CLONE_VFORK | CLONE_VM);
6464 
6465     if (flags & CLONE_VM) {
6466         TaskState *parent_ts = (TaskState *)cpu->opaque;
6467         new_thread_info info;
6468         pthread_attr_t attr;
6469 
6470         if (((flags & CLONE_THREAD_FLAGS) != CLONE_THREAD_FLAGS) ||
6471             (flags & CLONE_INVALID_THREAD_FLAGS)) {
6472             return -TARGET_EINVAL;
6473         }
6474 
6475         ts = g_new0(TaskState, 1);
6476         init_task_state(ts);
6477 
6478         /* Grab a mutex so that thread setup appears atomic.  */
6479         pthread_mutex_lock(&clone_lock);
6480 
6481         /* we create a new CPU instance. */
6482         new_env = cpu_copy(env);
6483         /* Init regs that differ from the parent.  */
6484         cpu_clone_regs_child(new_env, newsp, flags);
6485         cpu_clone_regs_parent(env, flags);
6486         new_cpu = env_cpu(new_env);
6487         new_cpu->opaque = ts;
6488         ts->bprm = parent_ts->bprm;
6489         ts->info = parent_ts->info;
6490         ts->signal_mask = parent_ts->signal_mask;
6491 
6492         if (flags & CLONE_CHILD_CLEARTID) {
6493             ts->child_tidptr = child_tidptr;
6494         }
6495 
6496         if (flags & CLONE_SETTLS) {
6497             cpu_set_tls (new_env, newtls);
6498         }
6499 
6500         memset(&info, 0, sizeof(info));
6501         pthread_mutex_init(&info.mutex, NULL);
6502         pthread_mutex_lock(&info.mutex);
6503         pthread_cond_init(&info.cond, NULL);
6504         info.env = new_env;
6505         if (flags & CLONE_CHILD_SETTID) {
6506             info.child_tidptr = child_tidptr;
6507         }
6508         if (flags & CLONE_PARENT_SETTID) {
6509             info.parent_tidptr = parent_tidptr;
6510         }
6511 
6512         ret = pthread_attr_init(&attr);
6513         ret = pthread_attr_setstacksize(&attr, NEW_STACK_SIZE);
6514         ret = pthread_attr_setdetachstate(&attr, PTHREAD_CREATE_DETACHED);
6515         /* It is not safe to deliver signals until the child has finished
6516            initializing, so temporarily block all signals.  */
6517         sigfillset(&sigmask);
6518         sigprocmask(SIG_BLOCK, &sigmask, &info.sigmask);
6519         cpu->random_seed = qemu_guest_random_seed_thread_part1();
6520 
6521         /* If this is our first additional thread, we need to ensure we
6522          * generate code for parallel execution and flush old translations.
6523          */
6524         if (!parallel_cpus) {
6525             parallel_cpus = true;
6526             tb_flush(cpu);
6527         }
6528 
6529         ret = pthread_create(&info.thread, &attr, clone_func, &info);
6530         /* TODO: Free new CPU state if thread creation failed.  */
6531 
6532         sigprocmask(SIG_SETMASK, &info.sigmask, NULL);
6533         pthread_attr_destroy(&attr);
6534         if (ret == 0) {
6535             /* Wait for the child to initialize.  */
6536             pthread_cond_wait(&info.cond, &info.mutex);
6537             ret = info.tid;
6538         } else {
6539             ret = -1;
6540         }
6541         pthread_mutex_unlock(&info.mutex);
6542         pthread_cond_destroy(&info.cond);
6543         pthread_mutex_destroy(&info.mutex);
6544         pthread_mutex_unlock(&clone_lock);
6545     } else {
6546         /* if no CLONE_VM, we consider it is a fork */
6547         if (flags & CLONE_INVALID_FORK_FLAGS) {
6548             return -TARGET_EINVAL;
6549         }
6550 
6551         /* We can't support custom termination signals */
6552         if ((flags & CSIGNAL) != TARGET_SIGCHLD) {
6553             return -TARGET_EINVAL;
6554         }
6555 
6556         if (block_signals()) {
6557             return -TARGET_ERESTARTSYS;
6558         }
6559 
6560         fork_start();
6561         ret = fork();
6562         if (ret == 0) {
6563             /* Child Process.  */
6564             cpu_clone_regs_child(env, newsp, flags);
6565             fork_end(1);
6566             /* There is a race condition here.  The parent process could
6567                theoretically read the TID in the child process before the child
6568                tid is set.  This would require using either ptrace
6569                (not implemented) or having *_tidptr to point at a shared memory
6570                mapping.  We can't repeat the spinlock hack used above because
6571                the child process gets its own copy of the lock.  */
6572             if (flags & CLONE_CHILD_SETTID)
6573                 put_user_u32(sys_gettid(), child_tidptr);
6574             if (flags & CLONE_PARENT_SETTID)
6575                 put_user_u32(sys_gettid(), parent_tidptr);
6576             ts = (TaskState *)cpu->opaque;
6577             if (flags & CLONE_SETTLS)
6578                 cpu_set_tls (env, newtls);
6579             if (flags & CLONE_CHILD_CLEARTID)
6580                 ts->child_tidptr = child_tidptr;
6581         } else {
6582             cpu_clone_regs_parent(env, flags);
6583             fork_end(0);
6584         }
6585     }
6586     return ret;
6587 }
6588 
6589 /* warning : doesn't handle linux specific flags... */
6590 static int target_to_host_fcntl_cmd(int cmd)
6591 {
6592     int ret;
6593 
6594     switch(cmd) {
6595     case TARGET_F_DUPFD:
6596     case TARGET_F_GETFD:
6597     case TARGET_F_SETFD:
6598     case TARGET_F_GETFL:
6599     case TARGET_F_SETFL:
6600     case TARGET_F_OFD_GETLK:
6601     case TARGET_F_OFD_SETLK:
6602     case TARGET_F_OFD_SETLKW:
6603         ret = cmd;
6604         break;
6605     case TARGET_F_GETLK:
6606         ret = F_GETLK64;
6607         break;
6608     case TARGET_F_SETLK:
6609         ret = F_SETLK64;
6610         break;
6611     case TARGET_F_SETLKW:
6612         ret = F_SETLKW64;
6613         break;
6614     case TARGET_F_GETOWN:
6615         ret = F_GETOWN;
6616         break;
6617     case TARGET_F_SETOWN:
6618         ret = F_SETOWN;
6619         break;
6620     case TARGET_F_GETSIG:
6621         ret = F_GETSIG;
6622         break;
6623     case TARGET_F_SETSIG:
6624         ret = F_SETSIG;
6625         break;
6626 #if TARGET_ABI_BITS == 32
6627     case TARGET_F_GETLK64:
6628         ret = F_GETLK64;
6629         break;
6630     case TARGET_F_SETLK64:
6631         ret = F_SETLK64;
6632         break;
6633     case TARGET_F_SETLKW64:
6634         ret = F_SETLKW64;
6635         break;
6636 #endif
6637     case TARGET_F_SETLEASE:
6638         ret = F_SETLEASE;
6639         break;
6640     case TARGET_F_GETLEASE:
6641         ret = F_GETLEASE;
6642         break;
6643 #ifdef F_DUPFD_CLOEXEC
6644     case TARGET_F_DUPFD_CLOEXEC:
6645         ret = F_DUPFD_CLOEXEC;
6646         break;
6647 #endif
6648     case TARGET_F_NOTIFY:
6649         ret = F_NOTIFY;
6650         break;
6651 #ifdef F_GETOWN_EX
6652     case TARGET_F_GETOWN_EX:
6653         ret = F_GETOWN_EX;
6654         break;
6655 #endif
6656 #ifdef F_SETOWN_EX
6657     case TARGET_F_SETOWN_EX:
6658         ret = F_SETOWN_EX;
6659         break;
6660 #endif
6661 #ifdef F_SETPIPE_SZ
6662     case TARGET_F_SETPIPE_SZ:
6663         ret = F_SETPIPE_SZ;
6664         break;
6665     case TARGET_F_GETPIPE_SZ:
6666         ret = F_GETPIPE_SZ;
6667         break;
6668 #endif
6669 #ifdef F_ADD_SEALS
6670     case TARGET_F_ADD_SEALS:
6671         ret = F_ADD_SEALS;
6672         break;
6673     case TARGET_F_GET_SEALS:
6674         ret = F_GET_SEALS;
6675         break;
6676 #endif
6677     default:
6678         ret = -TARGET_EINVAL;
6679         break;
6680     }
6681 
6682 #if defined(__powerpc64__)
6683     /* On PPC64, glibc headers has the F_*LK* defined to 12, 13 and 14 and
6684      * is not supported by kernel. The glibc fcntl call actually adjusts
6685      * them to 5, 6 and 7 before making the syscall(). Since we make the
6686      * syscall directly, adjust to what is supported by the kernel.
6687      */
6688     if (ret >= F_GETLK64 && ret <= F_SETLKW64) {
6689         ret -= F_GETLK64 - 5;
6690     }
6691 #endif
6692 
6693     return ret;
6694 }
6695 
6696 #define FLOCK_TRANSTBL \
6697     switch (type) { \
6698     TRANSTBL_CONVERT(F_RDLCK); \
6699     TRANSTBL_CONVERT(F_WRLCK); \
6700     TRANSTBL_CONVERT(F_UNLCK); \
6701     }
6702 
6703 static int target_to_host_flock(int type)
6704 {
6705 #define TRANSTBL_CONVERT(a) case TARGET_##a: return a
6706     FLOCK_TRANSTBL
6707 #undef  TRANSTBL_CONVERT
6708     return -TARGET_EINVAL;
6709 }
6710 
6711 static int host_to_target_flock(int type)
6712 {
6713 #define TRANSTBL_CONVERT(a) case a: return TARGET_##a
6714     FLOCK_TRANSTBL
6715 #undef  TRANSTBL_CONVERT
6716     /* if we don't know how to convert the value coming
6717      * from the host we copy to the target field as-is
6718      */
6719     return type;
6720 }
6721 
6722 static inline abi_long copy_from_user_flock(struct flock64 *fl,
6723                                             abi_ulong target_flock_addr)
6724 {
6725     struct target_flock *target_fl;
6726     int l_type;
6727 
6728     if (!lock_user_struct(VERIFY_READ, target_fl, target_flock_addr, 1)) {
6729         return -TARGET_EFAULT;
6730     }
6731 
6732     __get_user(l_type, &target_fl->l_type);
6733     l_type = target_to_host_flock(l_type);
6734     if (l_type < 0) {
6735         return l_type;
6736     }
6737     fl->l_type = l_type;
6738     __get_user(fl->l_whence, &target_fl->l_whence);
6739     __get_user(fl->l_start, &target_fl->l_start);
6740     __get_user(fl->l_len, &target_fl->l_len);
6741     __get_user(fl->l_pid, &target_fl->l_pid);
6742     unlock_user_struct(target_fl, target_flock_addr, 0);
6743     return 0;
6744 }
6745 
6746 static inline abi_long copy_to_user_flock(abi_ulong target_flock_addr,
6747                                           const struct flock64 *fl)
6748 {
6749     struct target_flock *target_fl;
6750     short l_type;
6751 
6752     if (!lock_user_struct(VERIFY_WRITE, target_fl, target_flock_addr, 0)) {
6753         return -TARGET_EFAULT;
6754     }
6755 
6756     l_type = host_to_target_flock(fl->l_type);
6757     __put_user(l_type, &target_fl->l_type);
6758     __put_user(fl->l_whence, &target_fl->l_whence);
6759     __put_user(fl->l_start, &target_fl->l_start);
6760     __put_user(fl->l_len, &target_fl->l_len);
6761     __put_user(fl->l_pid, &target_fl->l_pid);
6762     unlock_user_struct(target_fl, target_flock_addr, 1);
6763     return 0;
6764 }
6765 
6766 typedef abi_long from_flock64_fn(struct flock64 *fl, abi_ulong target_addr);
6767 typedef abi_long to_flock64_fn(abi_ulong target_addr, const struct flock64 *fl);
6768 
6769 #if defined(TARGET_ARM) && TARGET_ABI_BITS == 32
6770 static inline abi_long copy_from_user_oabi_flock64(struct flock64 *fl,
6771                                                    abi_ulong target_flock_addr)
6772 {
6773     struct target_oabi_flock64 *target_fl;
6774     int l_type;
6775 
6776     if (!lock_user_struct(VERIFY_READ, target_fl, target_flock_addr, 1)) {
6777         return -TARGET_EFAULT;
6778     }
6779 
6780     __get_user(l_type, &target_fl->l_type);
6781     l_type = target_to_host_flock(l_type);
6782     if (l_type < 0) {
6783         return l_type;
6784     }
6785     fl->l_type = l_type;
6786     __get_user(fl->l_whence, &target_fl->l_whence);
6787     __get_user(fl->l_start, &target_fl->l_start);
6788     __get_user(fl->l_len, &target_fl->l_len);
6789     __get_user(fl->l_pid, &target_fl->l_pid);
6790     unlock_user_struct(target_fl, target_flock_addr, 0);
6791     return 0;
6792 }
6793 
6794 static inline abi_long copy_to_user_oabi_flock64(abi_ulong target_flock_addr,
6795                                                  const struct flock64 *fl)
6796 {
6797     struct target_oabi_flock64 *target_fl;
6798     short l_type;
6799 
6800     if (!lock_user_struct(VERIFY_WRITE, target_fl, target_flock_addr, 0)) {
6801         return -TARGET_EFAULT;
6802     }
6803 
6804     l_type = host_to_target_flock(fl->l_type);
6805     __put_user(l_type, &target_fl->l_type);
6806     __put_user(fl->l_whence, &target_fl->l_whence);
6807     __put_user(fl->l_start, &target_fl->l_start);
6808     __put_user(fl->l_len, &target_fl->l_len);
6809     __put_user(fl->l_pid, &target_fl->l_pid);
6810     unlock_user_struct(target_fl, target_flock_addr, 1);
6811     return 0;
6812 }
6813 #endif
6814 
6815 static inline abi_long copy_from_user_flock64(struct flock64 *fl,
6816                                               abi_ulong target_flock_addr)
6817 {
6818     struct target_flock64 *target_fl;
6819     int l_type;
6820 
6821     if (!lock_user_struct(VERIFY_READ, target_fl, target_flock_addr, 1)) {
6822         return -TARGET_EFAULT;
6823     }
6824 
6825     __get_user(l_type, &target_fl->l_type);
6826     l_type = target_to_host_flock(l_type);
6827     if (l_type < 0) {
6828         return l_type;
6829     }
6830     fl->l_type = l_type;
6831     __get_user(fl->l_whence, &target_fl->l_whence);
6832     __get_user(fl->l_start, &target_fl->l_start);
6833     __get_user(fl->l_len, &target_fl->l_len);
6834     __get_user(fl->l_pid, &target_fl->l_pid);
6835     unlock_user_struct(target_fl, target_flock_addr, 0);
6836     return 0;
6837 }
6838 
6839 static inline abi_long copy_to_user_flock64(abi_ulong target_flock_addr,
6840                                             const struct flock64 *fl)
6841 {
6842     struct target_flock64 *target_fl;
6843     short l_type;
6844 
6845     if (!lock_user_struct(VERIFY_WRITE, target_fl, target_flock_addr, 0)) {
6846         return -TARGET_EFAULT;
6847     }
6848 
6849     l_type = host_to_target_flock(fl->l_type);
6850     __put_user(l_type, &target_fl->l_type);
6851     __put_user(fl->l_whence, &target_fl->l_whence);
6852     __put_user(fl->l_start, &target_fl->l_start);
6853     __put_user(fl->l_len, &target_fl->l_len);
6854     __put_user(fl->l_pid, &target_fl->l_pid);
6855     unlock_user_struct(target_fl, target_flock_addr, 1);
6856     return 0;
6857 }
6858 
6859 static abi_long do_fcntl(int fd, int cmd, abi_ulong arg)
6860 {
6861     struct flock64 fl64;
6862 #ifdef F_GETOWN_EX
6863     struct f_owner_ex fox;
6864     struct target_f_owner_ex *target_fox;
6865 #endif
6866     abi_long ret;
6867     int host_cmd = target_to_host_fcntl_cmd(cmd);
6868 
6869     if (host_cmd == -TARGET_EINVAL)
6870 	    return host_cmd;
6871 
6872     switch(cmd) {
6873     case TARGET_F_GETLK:
6874         ret = copy_from_user_flock(&fl64, arg);
6875         if (ret) {
6876             return ret;
6877         }
6878         ret = get_errno(safe_fcntl(fd, host_cmd, &fl64));
6879         if (ret == 0) {
6880             ret = copy_to_user_flock(arg, &fl64);
6881         }
6882         break;
6883 
6884     case TARGET_F_SETLK:
6885     case TARGET_F_SETLKW:
6886         ret = copy_from_user_flock(&fl64, arg);
6887         if (ret) {
6888             return ret;
6889         }
6890         ret = get_errno(safe_fcntl(fd, host_cmd, &fl64));
6891         break;
6892 
6893     case TARGET_F_GETLK64:
6894     case TARGET_F_OFD_GETLK:
6895         ret = copy_from_user_flock64(&fl64, arg);
6896         if (ret) {
6897             return ret;
6898         }
6899         ret = get_errno(safe_fcntl(fd, host_cmd, &fl64));
6900         if (ret == 0) {
6901             ret = copy_to_user_flock64(arg, &fl64);
6902         }
6903         break;
6904     case TARGET_F_SETLK64:
6905     case TARGET_F_SETLKW64:
6906     case TARGET_F_OFD_SETLK:
6907     case TARGET_F_OFD_SETLKW:
6908         ret = copy_from_user_flock64(&fl64, arg);
6909         if (ret) {
6910             return ret;
6911         }
6912         ret = get_errno(safe_fcntl(fd, host_cmd, &fl64));
6913         break;
6914 
6915     case TARGET_F_GETFL:
6916         ret = get_errno(safe_fcntl(fd, host_cmd, arg));
6917         if (ret >= 0) {
6918             ret = host_to_target_bitmask(ret, fcntl_flags_tbl);
6919         }
6920         break;
6921 
6922     case TARGET_F_SETFL:
6923         ret = get_errno(safe_fcntl(fd, host_cmd,
6924                                    target_to_host_bitmask(arg,
6925                                                           fcntl_flags_tbl)));
6926         break;
6927 
6928 #ifdef F_GETOWN_EX
6929     case TARGET_F_GETOWN_EX:
6930         ret = get_errno(safe_fcntl(fd, host_cmd, &fox));
6931         if (ret >= 0) {
6932             if (!lock_user_struct(VERIFY_WRITE, target_fox, arg, 0))
6933                 return -TARGET_EFAULT;
6934             target_fox->type = tswap32(fox.type);
6935             target_fox->pid = tswap32(fox.pid);
6936             unlock_user_struct(target_fox, arg, 1);
6937         }
6938         break;
6939 #endif
6940 
6941 #ifdef F_SETOWN_EX
6942     case TARGET_F_SETOWN_EX:
6943         if (!lock_user_struct(VERIFY_READ, target_fox, arg, 1))
6944             return -TARGET_EFAULT;
6945         fox.type = tswap32(target_fox->type);
6946         fox.pid = tswap32(target_fox->pid);
6947         unlock_user_struct(target_fox, arg, 0);
6948         ret = get_errno(safe_fcntl(fd, host_cmd, &fox));
6949         break;
6950 #endif
6951 
6952     case TARGET_F_SETSIG:
6953         ret = get_errno(safe_fcntl(fd, host_cmd, target_to_host_signal(arg)));
6954         break;
6955 
6956     case TARGET_F_GETSIG:
6957         ret = host_to_target_signal(get_errno(safe_fcntl(fd, host_cmd, arg)));
6958         break;
6959 
6960     case TARGET_F_SETOWN:
6961     case TARGET_F_GETOWN:
6962     case TARGET_F_SETLEASE:
6963     case TARGET_F_GETLEASE:
6964     case TARGET_F_SETPIPE_SZ:
6965     case TARGET_F_GETPIPE_SZ:
6966     case TARGET_F_ADD_SEALS:
6967     case TARGET_F_GET_SEALS:
6968         ret = get_errno(safe_fcntl(fd, host_cmd, arg));
6969         break;
6970 
6971     default:
6972         ret = get_errno(safe_fcntl(fd, cmd, arg));
6973         break;
6974     }
6975     return ret;
6976 }
6977 
6978 #ifdef USE_UID16
6979 
6980 static inline int high2lowuid(int uid)
6981 {
6982     if (uid > 65535)
6983         return 65534;
6984     else
6985         return uid;
6986 }
6987 
6988 static inline int high2lowgid(int gid)
6989 {
6990     if (gid > 65535)
6991         return 65534;
6992     else
6993         return gid;
6994 }
6995 
6996 static inline int low2highuid(int uid)
6997 {
6998     if ((int16_t)uid == -1)
6999         return -1;
7000     else
7001         return uid;
7002 }
7003 
7004 static inline int low2highgid(int gid)
7005 {
7006     if ((int16_t)gid == -1)
7007         return -1;
7008     else
7009         return gid;
7010 }
7011 static inline int tswapid(int id)
7012 {
7013     return tswap16(id);
7014 }
7015 
7016 #define put_user_id(x, gaddr) put_user_u16(x, gaddr)
7017 
7018 #else /* !USE_UID16 */
7019 static inline int high2lowuid(int uid)
7020 {
7021     return uid;
7022 }
7023 static inline int high2lowgid(int gid)
7024 {
7025     return gid;
7026 }
7027 static inline int low2highuid(int uid)
7028 {
7029     return uid;
7030 }
7031 static inline int low2highgid(int gid)
7032 {
7033     return gid;
7034 }
7035 static inline int tswapid(int id)
7036 {
7037     return tswap32(id);
7038 }
7039 
7040 #define put_user_id(x, gaddr) put_user_u32(x, gaddr)
7041 
7042 #endif /* USE_UID16 */
7043 
7044 /* We must do direct syscalls for setting UID/GID, because we want to
7045  * implement the Linux system call semantics of "change only for this thread",
7046  * not the libc/POSIX semantics of "change for all threads in process".
7047  * (See http://ewontfix.com/17/ for more details.)
7048  * We use the 32-bit version of the syscalls if present; if it is not
7049  * then either the host architecture supports 32-bit UIDs natively with
7050  * the standard syscall, or the 16-bit UID is the best we can do.
7051  */
7052 #ifdef __NR_setuid32
7053 #define __NR_sys_setuid __NR_setuid32
7054 #else
7055 #define __NR_sys_setuid __NR_setuid
7056 #endif
7057 #ifdef __NR_setgid32
7058 #define __NR_sys_setgid __NR_setgid32
7059 #else
7060 #define __NR_sys_setgid __NR_setgid
7061 #endif
7062 #ifdef __NR_setresuid32
7063 #define __NR_sys_setresuid __NR_setresuid32
7064 #else
7065 #define __NR_sys_setresuid __NR_setresuid
7066 #endif
7067 #ifdef __NR_setresgid32
7068 #define __NR_sys_setresgid __NR_setresgid32
7069 #else
7070 #define __NR_sys_setresgid __NR_setresgid
7071 #endif
7072 
7073 _syscall1(int, sys_setuid, uid_t, uid)
7074 _syscall1(int, sys_setgid, gid_t, gid)
7075 _syscall3(int, sys_setresuid, uid_t, ruid, uid_t, euid, uid_t, suid)
7076 _syscall3(int, sys_setresgid, gid_t, rgid, gid_t, egid, gid_t, sgid)
7077 
7078 void syscall_init(void)
7079 {
7080     IOCTLEntry *ie;
7081     const argtype *arg_type;
7082     int size;
7083     int i;
7084 
7085     thunk_init(STRUCT_MAX);
7086 
7087 #define STRUCT(name, ...) thunk_register_struct(STRUCT_ ## name, #name, struct_ ## name ## _def);
7088 #define STRUCT_SPECIAL(name) thunk_register_struct_direct(STRUCT_ ## name, #name, &struct_ ## name ## _def);
7089 #include "syscall_types.h"
7090 #undef STRUCT
7091 #undef STRUCT_SPECIAL
7092 
7093     /* Build target_to_host_errno_table[] table from
7094      * host_to_target_errno_table[]. */
7095     for (i = 0; i < ERRNO_TABLE_SIZE; i++) {
7096         target_to_host_errno_table[host_to_target_errno_table[i]] = i;
7097     }
7098 
7099     /* we patch the ioctl size if necessary. We rely on the fact that
7100        no ioctl has all the bits at '1' in the size field */
7101     ie = ioctl_entries;
7102     while (ie->target_cmd != 0) {
7103         if (((ie->target_cmd >> TARGET_IOC_SIZESHIFT) & TARGET_IOC_SIZEMASK) ==
7104             TARGET_IOC_SIZEMASK) {
7105             arg_type = ie->arg_type;
7106             if (arg_type[0] != TYPE_PTR) {
7107                 fprintf(stderr, "cannot patch size for ioctl 0x%x\n",
7108                         ie->target_cmd);
7109                 exit(1);
7110             }
7111             arg_type++;
7112             size = thunk_type_size(arg_type, 0);
7113             ie->target_cmd = (ie->target_cmd &
7114                               ~(TARGET_IOC_SIZEMASK << TARGET_IOC_SIZESHIFT)) |
7115                 (size << TARGET_IOC_SIZESHIFT);
7116         }
7117 
7118         /* automatic consistency check if same arch */
7119 #if (defined(__i386__) && defined(TARGET_I386) && defined(TARGET_ABI32)) || \
7120     (defined(__x86_64__) && defined(TARGET_X86_64))
7121         if (unlikely(ie->target_cmd != ie->host_cmd)) {
7122             fprintf(stderr, "ERROR: ioctl(%s): target=0x%x host=0x%x\n",
7123                     ie->name, ie->target_cmd, ie->host_cmd);
7124         }
7125 #endif
7126         ie++;
7127     }
7128 }
7129 
7130 #ifdef TARGET_NR_truncate64
7131 static inline abi_long target_truncate64(void *cpu_env, const char *arg1,
7132                                          abi_long arg2,
7133                                          abi_long arg3,
7134                                          abi_long arg4)
7135 {
7136     if (regpairs_aligned(cpu_env, TARGET_NR_truncate64)) {
7137         arg2 = arg3;
7138         arg3 = arg4;
7139     }
7140     return get_errno(truncate64(arg1, target_offset64(arg2, arg3)));
7141 }
7142 #endif
7143 
7144 #ifdef TARGET_NR_ftruncate64
7145 static inline abi_long target_ftruncate64(void *cpu_env, abi_long arg1,
7146                                           abi_long arg2,
7147                                           abi_long arg3,
7148                                           abi_long arg4)
7149 {
7150     if (regpairs_aligned(cpu_env, TARGET_NR_ftruncate64)) {
7151         arg2 = arg3;
7152         arg3 = arg4;
7153     }
7154     return get_errno(ftruncate64(arg1, target_offset64(arg2, arg3)));
7155 }
7156 #endif
7157 
7158 #if defined(TARGET_NR_timer_settime) || \
7159     (defined(TARGET_NR_timerfd_settime) && defined(CONFIG_TIMERFD))
7160 static inline abi_long target_to_host_itimerspec(struct itimerspec *host_its,
7161                                                  abi_ulong target_addr)
7162 {
7163     if (target_to_host_timespec(&host_its->it_interval, target_addr +
7164                                 offsetof(struct target_itimerspec,
7165                                          it_interval)) ||
7166         target_to_host_timespec(&host_its->it_value, target_addr +
7167                                 offsetof(struct target_itimerspec,
7168                                          it_value))) {
7169         return -TARGET_EFAULT;
7170     }
7171 
7172     return 0;
7173 }
7174 #endif
7175 
7176 #if defined(TARGET_NR_timer_settime64) || \
7177     (defined(TARGET_NR_timerfd_settime64) && defined(CONFIG_TIMERFD))
7178 static inline abi_long target_to_host_itimerspec64(struct itimerspec *host_its,
7179                                                    abi_ulong target_addr)
7180 {
7181     if (target_to_host_timespec64(&host_its->it_interval, target_addr +
7182                                   offsetof(struct target__kernel_itimerspec,
7183                                            it_interval)) ||
7184         target_to_host_timespec64(&host_its->it_value, target_addr +
7185                                   offsetof(struct target__kernel_itimerspec,
7186                                            it_value))) {
7187         return -TARGET_EFAULT;
7188     }
7189 
7190     return 0;
7191 }
7192 #endif
7193 
7194 #if ((defined(TARGET_NR_timerfd_gettime) || \
7195       defined(TARGET_NR_timerfd_settime)) && defined(CONFIG_TIMERFD)) || \
7196       defined(TARGET_NR_timer_gettime) || defined(TARGET_NR_timer_settime)
7197 static inline abi_long host_to_target_itimerspec(abi_ulong target_addr,
7198                                                  struct itimerspec *host_its)
7199 {
7200     if (host_to_target_timespec(target_addr + offsetof(struct target_itimerspec,
7201                                                        it_interval),
7202                                 &host_its->it_interval) ||
7203         host_to_target_timespec(target_addr + offsetof(struct target_itimerspec,
7204                                                        it_value),
7205                                 &host_its->it_value)) {
7206         return -TARGET_EFAULT;
7207     }
7208     return 0;
7209 }
7210 #endif
7211 
7212 #if ((defined(TARGET_NR_timerfd_gettime64) || \
7213       defined(TARGET_NR_timerfd_settime64)) && defined(CONFIG_TIMERFD)) || \
7214       defined(TARGET_NR_timer_gettime64) || defined(TARGET_NR_timer_settime64)
7215 static inline abi_long host_to_target_itimerspec64(abi_ulong target_addr,
7216                                                    struct itimerspec *host_its)
7217 {
7218     if (host_to_target_timespec64(target_addr +
7219                                   offsetof(struct target__kernel_itimerspec,
7220                                            it_interval),
7221                                   &host_its->it_interval) ||
7222         host_to_target_timespec64(target_addr +
7223                                   offsetof(struct target__kernel_itimerspec,
7224                                            it_value),
7225                                   &host_its->it_value)) {
7226         return -TARGET_EFAULT;
7227     }
7228     return 0;
7229 }
7230 #endif
7231 
7232 #if defined(TARGET_NR_adjtimex) || \
7233     (defined(TARGET_NR_clock_adjtime) && defined(CONFIG_CLOCK_ADJTIME))
7234 static inline abi_long target_to_host_timex(struct timex *host_tx,
7235                                             abi_long target_addr)
7236 {
7237     struct target_timex *target_tx;
7238 
7239     if (!lock_user_struct(VERIFY_READ, target_tx, target_addr, 1)) {
7240         return -TARGET_EFAULT;
7241     }
7242 
7243     __get_user(host_tx->modes, &target_tx->modes);
7244     __get_user(host_tx->offset, &target_tx->offset);
7245     __get_user(host_tx->freq, &target_tx->freq);
7246     __get_user(host_tx->maxerror, &target_tx->maxerror);
7247     __get_user(host_tx->esterror, &target_tx->esterror);
7248     __get_user(host_tx->status, &target_tx->status);
7249     __get_user(host_tx->constant, &target_tx->constant);
7250     __get_user(host_tx->precision, &target_tx->precision);
7251     __get_user(host_tx->tolerance, &target_tx->tolerance);
7252     __get_user(host_tx->time.tv_sec, &target_tx->time.tv_sec);
7253     __get_user(host_tx->time.tv_usec, &target_tx->time.tv_usec);
7254     __get_user(host_tx->tick, &target_tx->tick);
7255     __get_user(host_tx->ppsfreq, &target_tx->ppsfreq);
7256     __get_user(host_tx->jitter, &target_tx->jitter);
7257     __get_user(host_tx->shift, &target_tx->shift);
7258     __get_user(host_tx->stabil, &target_tx->stabil);
7259     __get_user(host_tx->jitcnt, &target_tx->jitcnt);
7260     __get_user(host_tx->calcnt, &target_tx->calcnt);
7261     __get_user(host_tx->errcnt, &target_tx->errcnt);
7262     __get_user(host_tx->stbcnt, &target_tx->stbcnt);
7263     __get_user(host_tx->tai, &target_tx->tai);
7264 
7265     unlock_user_struct(target_tx, target_addr, 0);
7266     return 0;
7267 }
7268 
7269 static inline abi_long host_to_target_timex(abi_long target_addr,
7270                                             struct timex *host_tx)
7271 {
7272     struct target_timex *target_tx;
7273 
7274     if (!lock_user_struct(VERIFY_WRITE, target_tx, target_addr, 0)) {
7275         return -TARGET_EFAULT;
7276     }
7277 
7278     __put_user(host_tx->modes, &target_tx->modes);
7279     __put_user(host_tx->offset, &target_tx->offset);
7280     __put_user(host_tx->freq, &target_tx->freq);
7281     __put_user(host_tx->maxerror, &target_tx->maxerror);
7282     __put_user(host_tx->esterror, &target_tx->esterror);
7283     __put_user(host_tx->status, &target_tx->status);
7284     __put_user(host_tx->constant, &target_tx->constant);
7285     __put_user(host_tx->precision, &target_tx->precision);
7286     __put_user(host_tx->tolerance, &target_tx->tolerance);
7287     __put_user(host_tx->time.tv_sec, &target_tx->time.tv_sec);
7288     __put_user(host_tx->time.tv_usec, &target_tx->time.tv_usec);
7289     __put_user(host_tx->tick, &target_tx->tick);
7290     __put_user(host_tx->ppsfreq, &target_tx->ppsfreq);
7291     __put_user(host_tx->jitter, &target_tx->jitter);
7292     __put_user(host_tx->shift, &target_tx->shift);
7293     __put_user(host_tx->stabil, &target_tx->stabil);
7294     __put_user(host_tx->jitcnt, &target_tx->jitcnt);
7295     __put_user(host_tx->calcnt, &target_tx->calcnt);
7296     __put_user(host_tx->errcnt, &target_tx->errcnt);
7297     __put_user(host_tx->stbcnt, &target_tx->stbcnt);
7298     __put_user(host_tx->tai, &target_tx->tai);
7299 
7300     unlock_user_struct(target_tx, target_addr, 1);
7301     return 0;
7302 }
7303 #endif
7304 
7305 
7306 #if defined(TARGET_NR_clock_adjtime64) && defined(CONFIG_CLOCK_ADJTIME)
7307 static inline abi_long target_to_host_timex64(struct timex *host_tx,
7308                                               abi_long target_addr)
7309 {
7310     struct target__kernel_timex *target_tx;
7311 
7312     if (copy_from_user_timeval64(&host_tx->time, target_addr +
7313                                  offsetof(struct target__kernel_timex,
7314                                           time))) {
7315         return -TARGET_EFAULT;
7316     }
7317 
7318     if (!lock_user_struct(VERIFY_READ, target_tx, target_addr, 1)) {
7319         return -TARGET_EFAULT;
7320     }
7321 
7322     __get_user(host_tx->modes, &target_tx->modes);
7323     __get_user(host_tx->offset, &target_tx->offset);
7324     __get_user(host_tx->freq, &target_tx->freq);
7325     __get_user(host_tx->maxerror, &target_tx->maxerror);
7326     __get_user(host_tx->esterror, &target_tx->esterror);
7327     __get_user(host_tx->status, &target_tx->status);
7328     __get_user(host_tx->constant, &target_tx->constant);
7329     __get_user(host_tx->precision, &target_tx->precision);
7330     __get_user(host_tx->tolerance, &target_tx->tolerance);
7331     __get_user(host_tx->tick, &target_tx->tick);
7332     __get_user(host_tx->ppsfreq, &target_tx->ppsfreq);
7333     __get_user(host_tx->jitter, &target_tx->jitter);
7334     __get_user(host_tx->shift, &target_tx->shift);
7335     __get_user(host_tx->stabil, &target_tx->stabil);
7336     __get_user(host_tx->jitcnt, &target_tx->jitcnt);
7337     __get_user(host_tx->calcnt, &target_tx->calcnt);
7338     __get_user(host_tx->errcnt, &target_tx->errcnt);
7339     __get_user(host_tx->stbcnt, &target_tx->stbcnt);
7340     __get_user(host_tx->tai, &target_tx->tai);
7341 
7342     unlock_user_struct(target_tx, target_addr, 0);
7343     return 0;
7344 }
7345 
7346 static inline abi_long host_to_target_timex64(abi_long target_addr,
7347                                               struct timex *host_tx)
7348 {
7349     struct target__kernel_timex *target_tx;
7350 
7351    if (copy_to_user_timeval64(target_addr +
7352                               offsetof(struct target__kernel_timex, time),
7353                               &host_tx->time)) {
7354         return -TARGET_EFAULT;
7355     }
7356 
7357     if (!lock_user_struct(VERIFY_WRITE, target_tx, target_addr, 0)) {
7358         return -TARGET_EFAULT;
7359     }
7360 
7361     __put_user(host_tx->modes, &target_tx->modes);
7362     __put_user(host_tx->offset, &target_tx->offset);
7363     __put_user(host_tx->freq, &target_tx->freq);
7364     __put_user(host_tx->maxerror, &target_tx->maxerror);
7365     __put_user(host_tx->esterror, &target_tx->esterror);
7366     __put_user(host_tx->status, &target_tx->status);
7367     __put_user(host_tx->constant, &target_tx->constant);
7368     __put_user(host_tx->precision, &target_tx->precision);
7369     __put_user(host_tx->tolerance, &target_tx->tolerance);
7370     __put_user(host_tx->tick, &target_tx->tick);
7371     __put_user(host_tx->ppsfreq, &target_tx->ppsfreq);
7372     __put_user(host_tx->jitter, &target_tx->jitter);
7373     __put_user(host_tx->shift, &target_tx->shift);
7374     __put_user(host_tx->stabil, &target_tx->stabil);
7375     __put_user(host_tx->jitcnt, &target_tx->jitcnt);
7376     __put_user(host_tx->calcnt, &target_tx->calcnt);
7377     __put_user(host_tx->errcnt, &target_tx->errcnt);
7378     __put_user(host_tx->stbcnt, &target_tx->stbcnt);
7379     __put_user(host_tx->tai, &target_tx->tai);
7380 
7381     unlock_user_struct(target_tx, target_addr, 1);
7382     return 0;
7383 }
7384 #endif
7385 
7386 static inline abi_long target_to_host_sigevent(struct sigevent *host_sevp,
7387                                                abi_ulong target_addr)
7388 {
7389     struct target_sigevent *target_sevp;
7390 
7391     if (!lock_user_struct(VERIFY_READ, target_sevp, target_addr, 1)) {
7392         return -TARGET_EFAULT;
7393     }
7394 
7395     /* This union is awkward on 64 bit systems because it has a 32 bit
7396      * integer and a pointer in it; we follow the conversion approach
7397      * used for handling sigval types in signal.c so the guest should get
7398      * the correct value back even if we did a 64 bit byteswap and it's
7399      * using the 32 bit integer.
7400      */
7401     host_sevp->sigev_value.sival_ptr =
7402         (void *)(uintptr_t)tswapal(target_sevp->sigev_value.sival_ptr);
7403     host_sevp->sigev_signo =
7404         target_to_host_signal(tswap32(target_sevp->sigev_signo));
7405     host_sevp->sigev_notify = tswap32(target_sevp->sigev_notify);
7406     host_sevp->_sigev_un._tid = tswap32(target_sevp->_sigev_un._tid);
7407 
7408     unlock_user_struct(target_sevp, target_addr, 1);
7409     return 0;
7410 }
7411 
7412 #if defined(TARGET_NR_mlockall)
7413 static inline int target_to_host_mlockall_arg(int arg)
7414 {
7415     int result = 0;
7416 
7417     if (arg & TARGET_MCL_CURRENT) {
7418         result |= MCL_CURRENT;
7419     }
7420     if (arg & TARGET_MCL_FUTURE) {
7421         result |= MCL_FUTURE;
7422     }
7423 #ifdef MCL_ONFAULT
7424     if (arg & TARGET_MCL_ONFAULT) {
7425         result |= MCL_ONFAULT;
7426     }
7427 #endif
7428 
7429     return result;
7430 }
7431 #endif
7432 
7433 #if (defined(TARGET_NR_stat64) || defined(TARGET_NR_lstat64) ||     \
7434      defined(TARGET_NR_fstat64) || defined(TARGET_NR_fstatat64) ||  \
7435      defined(TARGET_NR_newfstatat))
7436 static inline abi_long host_to_target_stat64(void *cpu_env,
7437                                              abi_ulong target_addr,
7438                                              struct stat *host_st)
7439 {
7440 #if defined(TARGET_ARM) && defined(TARGET_ABI32)
7441     if (((CPUARMState *)cpu_env)->eabi) {
7442         struct target_eabi_stat64 *target_st;
7443 
7444         if (!lock_user_struct(VERIFY_WRITE, target_st, target_addr, 0))
7445             return -TARGET_EFAULT;
7446         memset(target_st, 0, sizeof(struct target_eabi_stat64));
7447         __put_user(host_st->st_dev, &target_st->st_dev);
7448         __put_user(host_st->st_ino, &target_st->st_ino);
7449 #ifdef TARGET_STAT64_HAS_BROKEN_ST_INO
7450         __put_user(host_st->st_ino, &target_st->__st_ino);
7451 #endif
7452         __put_user(host_st->st_mode, &target_st->st_mode);
7453         __put_user(host_st->st_nlink, &target_st->st_nlink);
7454         __put_user(host_st->st_uid, &target_st->st_uid);
7455         __put_user(host_st->st_gid, &target_st->st_gid);
7456         __put_user(host_st->st_rdev, &target_st->st_rdev);
7457         __put_user(host_st->st_size, &target_st->st_size);
7458         __put_user(host_st->st_blksize, &target_st->st_blksize);
7459         __put_user(host_st->st_blocks, &target_st->st_blocks);
7460         __put_user(host_st->st_atime, &target_st->target_st_atime);
7461         __put_user(host_st->st_mtime, &target_st->target_st_mtime);
7462         __put_user(host_st->st_ctime, &target_st->target_st_ctime);
7463 #if _POSIX_C_SOURCE >= 200809L || _XOPEN_SOURCE >= 700
7464         __put_user(host_st->st_atim.tv_nsec, &target_st->target_st_atime_nsec);
7465         __put_user(host_st->st_mtim.tv_nsec, &target_st->target_st_mtime_nsec);
7466         __put_user(host_st->st_ctim.tv_nsec, &target_st->target_st_ctime_nsec);
7467 #endif
7468         unlock_user_struct(target_st, target_addr, 1);
7469     } else
7470 #endif
7471     {
7472 #if defined(TARGET_HAS_STRUCT_STAT64)
7473         struct target_stat64 *target_st;
7474 #else
7475         struct target_stat *target_st;
7476 #endif
7477 
7478         if (!lock_user_struct(VERIFY_WRITE, target_st, target_addr, 0))
7479             return -TARGET_EFAULT;
7480         memset(target_st, 0, sizeof(*target_st));
7481         __put_user(host_st->st_dev, &target_st->st_dev);
7482         __put_user(host_st->st_ino, &target_st->st_ino);
7483 #ifdef TARGET_STAT64_HAS_BROKEN_ST_INO
7484         __put_user(host_st->st_ino, &target_st->__st_ino);
7485 #endif
7486         __put_user(host_st->st_mode, &target_st->st_mode);
7487         __put_user(host_st->st_nlink, &target_st->st_nlink);
7488         __put_user(host_st->st_uid, &target_st->st_uid);
7489         __put_user(host_st->st_gid, &target_st->st_gid);
7490         __put_user(host_st->st_rdev, &target_st->st_rdev);
7491         /* XXX: better use of kernel struct */
7492         __put_user(host_st->st_size, &target_st->st_size);
7493         __put_user(host_st->st_blksize, &target_st->st_blksize);
7494         __put_user(host_st->st_blocks, &target_st->st_blocks);
7495         __put_user(host_st->st_atime, &target_st->target_st_atime);
7496         __put_user(host_st->st_mtime, &target_st->target_st_mtime);
7497         __put_user(host_st->st_ctime, &target_st->target_st_ctime);
7498 #if _POSIX_C_SOURCE >= 200809L || _XOPEN_SOURCE >= 700
7499         __put_user(host_st->st_atim.tv_nsec, &target_st->target_st_atime_nsec);
7500         __put_user(host_st->st_mtim.tv_nsec, &target_st->target_st_mtime_nsec);
7501         __put_user(host_st->st_ctim.tv_nsec, &target_st->target_st_ctime_nsec);
7502 #endif
7503         unlock_user_struct(target_st, target_addr, 1);
7504     }
7505 
7506     return 0;
7507 }
7508 #endif
7509 
7510 #if defined(TARGET_NR_statx) && defined(__NR_statx)
7511 static inline abi_long host_to_target_statx(struct target_statx *host_stx,
7512                                             abi_ulong target_addr)
7513 {
7514     struct target_statx *target_stx;
7515 
7516     if (!lock_user_struct(VERIFY_WRITE, target_stx, target_addr,  0)) {
7517         return -TARGET_EFAULT;
7518     }
7519     memset(target_stx, 0, sizeof(*target_stx));
7520 
7521     __put_user(host_stx->stx_mask, &target_stx->stx_mask);
7522     __put_user(host_stx->stx_blksize, &target_stx->stx_blksize);
7523     __put_user(host_stx->stx_attributes, &target_stx->stx_attributes);
7524     __put_user(host_stx->stx_nlink, &target_stx->stx_nlink);
7525     __put_user(host_stx->stx_uid, &target_stx->stx_uid);
7526     __put_user(host_stx->stx_gid, &target_stx->stx_gid);
7527     __put_user(host_stx->stx_mode, &target_stx->stx_mode);
7528     __put_user(host_stx->stx_ino, &target_stx->stx_ino);
7529     __put_user(host_stx->stx_size, &target_stx->stx_size);
7530     __put_user(host_stx->stx_blocks, &target_stx->stx_blocks);
7531     __put_user(host_stx->stx_attributes_mask, &target_stx->stx_attributes_mask);
7532     __put_user(host_stx->stx_atime.tv_sec, &target_stx->stx_atime.tv_sec);
7533     __put_user(host_stx->stx_atime.tv_nsec, &target_stx->stx_atime.tv_nsec);
7534     __put_user(host_stx->stx_btime.tv_sec, &target_stx->stx_btime.tv_sec);
7535     __put_user(host_stx->stx_btime.tv_nsec, &target_stx->stx_btime.tv_nsec);
7536     __put_user(host_stx->stx_ctime.tv_sec, &target_stx->stx_ctime.tv_sec);
7537     __put_user(host_stx->stx_ctime.tv_nsec, &target_stx->stx_ctime.tv_nsec);
7538     __put_user(host_stx->stx_mtime.tv_sec, &target_stx->stx_mtime.tv_sec);
7539     __put_user(host_stx->stx_mtime.tv_nsec, &target_stx->stx_mtime.tv_nsec);
7540     __put_user(host_stx->stx_rdev_major, &target_stx->stx_rdev_major);
7541     __put_user(host_stx->stx_rdev_minor, &target_stx->stx_rdev_minor);
7542     __put_user(host_stx->stx_dev_major, &target_stx->stx_dev_major);
7543     __put_user(host_stx->stx_dev_minor, &target_stx->stx_dev_minor);
7544 
7545     unlock_user_struct(target_stx, target_addr, 1);
7546 
7547     return 0;
7548 }
7549 #endif
7550 
7551 static int do_sys_futex(int *uaddr, int op, int val,
7552                          const struct timespec *timeout, int *uaddr2,
7553                          int val3)
7554 {
7555 #if HOST_LONG_BITS == 64
7556 #if defined(__NR_futex)
7557     /* always a 64-bit time_t, it doesn't define _time64 version  */
7558     return sys_futex(uaddr, op, val, timeout, uaddr2, val3);
7559 
7560 #endif
7561 #else /* HOST_LONG_BITS == 64 */
7562 #if defined(__NR_futex_time64)
7563     if (sizeof(timeout->tv_sec) == 8) {
7564         /* _time64 function on 32bit arch */
7565         return sys_futex_time64(uaddr, op, val, timeout, uaddr2, val3);
7566     }
7567 #endif
7568 #if defined(__NR_futex)
7569     /* old function on 32bit arch */
7570     return sys_futex(uaddr, op, val, timeout, uaddr2, val3);
7571 #endif
7572 #endif /* HOST_LONG_BITS == 64 */
7573     g_assert_not_reached();
7574 }
7575 
7576 static int do_safe_futex(int *uaddr, int op, int val,
7577                          const struct timespec *timeout, int *uaddr2,
7578                          int val3)
7579 {
7580 #if HOST_LONG_BITS == 64
7581 #if defined(__NR_futex)
7582     /* always a 64-bit time_t, it doesn't define _time64 version  */
7583     return get_errno(safe_futex(uaddr, op, val, timeout, uaddr2, val3));
7584 #endif
7585 #else /* HOST_LONG_BITS == 64 */
7586 #if defined(__NR_futex_time64)
7587     if (sizeof(timeout->tv_sec) == 8) {
7588         /* _time64 function on 32bit arch */
7589         return get_errno(safe_futex_time64(uaddr, op, val, timeout, uaddr2,
7590                                            val3));
7591     }
7592 #endif
7593 #if defined(__NR_futex)
7594     /* old function on 32bit arch */
7595     return get_errno(safe_futex(uaddr, op, val, timeout, uaddr2, val3));
7596 #endif
7597 #endif /* HOST_LONG_BITS == 64 */
7598     return -TARGET_ENOSYS;
7599 }
7600 
7601 /* ??? Using host futex calls even when target atomic operations
7602    are not really atomic probably breaks things.  However implementing
7603    futexes locally would make futexes shared between multiple processes
7604    tricky.  However they're probably useless because guest atomic
7605    operations won't work either.  */
7606 #if defined(TARGET_NR_futex)
7607 static int do_futex(CPUState *cpu, target_ulong uaddr, int op, int val,
7608                     target_ulong timeout, target_ulong uaddr2, int val3)
7609 {
7610     struct timespec ts, *pts;
7611     int base_op;
7612 
7613     /* ??? We assume FUTEX_* constants are the same on both host
7614        and target.  */
7615 #ifdef FUTEX_CMD_MASK
7616     base_op = op & FUTEX_CMD_MASK;
7617 #else
7618     base_op = op;
7619 #endif
7620     switch (base_op) {
7621     case FUTEX_WAIT:
7622     case FUTEX_WAIT_BITSET:
7623         if (timeout) {
7624             pts = &ts;
7625             target_to_host_timespec(pts, timeout);
7626         } else {
7627             pts = NULL;
7628         }
7629         return do_safe_futex(g2h(cpu, uaddr),
7630                              op, tswap32(val), pts, NULL, val3);
7631     case FUTEX_WAKE:
7632         return do_safe_futex(g2h(cpu, uaddr),
7633                              op, val, NULL, NULL, 0);
7634     case FUTEX_FD:
7635         return do_safe_futex(g2h(cpu, uaddr),
7636                              op, val, NULL, NULL, 0);
7637     case FUTEX_REQUEUE:
7638     case FUTEX_CMP_REQUEUE:
7639     case FUTEX_WAKE_OP:
7640         /* For FUTEX_REQUEUE, FUTEX_CMP_REQUEUE, and FUTEX_WAKE_OP, the
7641            TIMEOUT parameter is interpreted as a uint32_t by the kernel.
7642            But the prototype takes a `struct timespec *'; insert casts
7643            to satisfy the compiler.  We do not need to tswap TIMEOUT
7644            since it's not compared to guest memory.  */
7645         pts = (struct timespec *)(uintptr_t) timeout;
7646         return do_safe_futex(g2h(cpu, uaddr), op, val, pts, g2h(cpu, uaddr2),
7647                              (base_op == FUTEX_CMP_REQUEUE
7648                               ? tswap32(val3) : val3));
7649     default:
7650         return -TARGET_ENOSYS;
7651     }
7652 }
7653 #endif
7654 
7655 #if defined(TARGET_NR_futex_time64)
7656 static int do_futex_time64(CPUState *cpu, target_ulong uaddr, int op,
7657                            int val, target_ulong timeout,
7658                            target_ulong uaddr2, int val3)
7659 {
7660     struct timespec ts, *pts;
7661     int base_op;
7662 
7663     /* ??? We assume FUTEX_* constants are the same on both host
7664        and target.  */
7665 #ifdef FUTEX_CMD_MASK
7666     base_op = op & FUTEX_CMD_MASK;
7667 #else
7668     base_op = op;
7669 #endif
7670     switch (base_op) {
7671     case FUTEX_WAIT:
7672     case FUTEX_WAIT_BITSET:
7673         if (timeout) {
7674             pts = &ts;
7675             if (target_to_host_timespec64(pts, timeout)) {
7676                 return -TARGET_EFAULT;
7677             }
7678         } else {
7679             pts = NULL;
7680         }
7681         return do_safe_futex(g2h(cpu, uaddr), op,
7682                              tswap32(val), pts, NULL, val3);
7683     case FUTEX_WAKE:
7684         return do_safe_futex(g2h(cpu, uaddr), op, val, NULL, NULL, 0);
7685     case FUTEX_FD:
7686         return do_safe_futex(g2h(cpu, uaddr), op, val, NULL, NULL, 0);
7687     case FUTEX_REQUEUE:
7688     case FUTEX_CMP_REQUEUE:
7689     case FUTEX_WAKE_OP:
7690         /* For FUTEX_REQUEUE, FUTEX_CMP_REQUEUE, and FUTEX_WAKE_OP, the
7691            TIMEOUT parameter is interpreted as a uint32_t by the kernel.
7692            But the prototype takes a `struct timespec *'; insert casts
7693            to satisfy the compiler.  We do not need to tswap TIMEOUT
7694            since it's not compared to guest memory.  */
7695         pts = (struct timespec *)(uintptr_t) timeout;
7696         return do_safe_futex(g2h(cpu, uaddr), op, val, pts, g2h(cpu, uaddr2),
7697                              (base_op == FUTEX_CMP_REQUEUE
7698                               ? tswap32(val3) : val3));
7699     default:
7700         return -TARGET_ENOSYS;
7701     }
7702 }
7703 #endif
7704 
7705 #if defined(TARGET_NR_name_to_handle_at) && defined(CONFIG_OPEN_BY_HANDLE)
7706 static abi_long do_name_to_handle_at(abi_long dirfd, abi_long pathname,
7707                                      abi_long handle, abi_long mount_id,
7708                                      abi_long flags)
7709 {
7710     struct file_handle *target_fh;
7711     struct file_handle *fh;
7712     int mid = 0;
7713     abi_long ret;
7714     char *name;
7715     unsigned int size, total_size;
7716 
7717     if (get_user_s32(size, handle)) {
7718         return -TARGET_EFAULT;
7719     }
7720 
7721     name = lock_user_string(pathname);
7722     if (!name) {
7723         return -TARGET_EFAULT;
7724     }
7725 
7726     total_size = sizeof(struct file_handle) + size;
7727     target_fh = lock_user(VERIFY_WRITE, handle, total_size, 0);
7728     if (!target_fh) {
7729         unlock_user(name, pathname, 0);
7730         return -TARGET_EFAULT;
7731     }
7732 
7733     fh = g_malloc0(total_size);
7734     fh->handle_bytes = size;
7735 
7736     ret = get_errno(name_to_handle_at(dirfd, path(name), fh, &mid, flags));
7737     unlock_user(name, pathname, 0);
7738 
7739     /* man name_to_handle_at(2):
7740      * Other than the use of the handle_bytes field, the caller should treat
7741      * the file_handle structure as an opaque data type
7742      */
7743 
7744     memcpy(target_fh, fh, total_size);
7745     target_fh->handle_bytes = tswap32(fh->handle_bytes);
7746     target_fh->handle_type = tswap32(fh->handle_type);
7747     g_free(fh);
7748     unlock_user(target_fh, handle, total_size);
7749 
7750     if (put_user_s32(mid, mount_id)) {
7751         return -TARGET_EFAULT;
7752     }
7753 
7754     return ret;
7755 
7756 }
7757 #endif
7758 
7759 #if defined(TARGET_NR_open_by_handle_at) && defined(CONFIG_OPEN_BY_HANDLE)
7760 static abi_long do_open_by_handle_at(abi_long mount_fd, abi_long handle,
7761                                      abi_long flags)
7762 {
7763     struct file_handle *target_fh;
7764     struct file_handle *fh;
7765     unsigned int size, total_size;
7766     abi_long ret;
7767 
7768     if (get_user_s32(size, handle)) {
7769         return -TARGET_EFAULT;
7770     }
7771 
7772     total_size = sizeof(struct file_handle) + size;
7773     target_fh = lock_user(VERIFY_READ, handle, total_size, 1);
7774     if (!target_fh) {
7775         return -TARGET_EFAULT;
7776     }
7777 
7778     fh = g_memdup(target_fh, total_size);
7779     fh->handle_bytes = size;
7780     fh->handle_type = tswap32(target_fh->handle_type);
7781 
7782     ret = get_errno(open_by_handle_at(mount_fd, fh,
7783                     target_to_host_bitmask(flags, fcntl_flags_tbl)));
7784 
7785     g_free(fh);
7786 
7787     unlock_user(target_fh, handle, total_size);
7788 
7789     return ret;
7790 }
7791 #endif
7792 
7793 #if defined(TARGET_NR_signalfd) || defined(TARGET_NR_signalfd4)
7794 
7795 static abi_long do_signalfd4(int fd, abi_long mask, int flags)
7796 {
7797     int host_flags;
7798     target_sigset_t *target_mask;
7799     sigset_t host_mask;
7800     abi_long ret;
7801 
7802     if (flags & ~(TARGET_O_NONBLOCK_MASK | TARGET_O_CLOEXEC)) {
7803         return -TARGET_EINVAL;
7804     }
7805     if (!lock_user_struct(VERIFY_READ, target_mask, mask, 1)) {
7806         return -TARGET_EFAULT;
7807     }
7808 
7809     target_to_host_sigset(&host_mask, target_mask);
7810 
7811     host_flags = target_to_host_bitmask(flags, fcntl_flags_tbl);
7812 
7813     ret = get_errno(signalfd(fd, &host_mask, host_flags));
7814     if (ret >= 0) {
7815         fd_trans_register(ret, &target_signalfd_trans);
7816     }
7817 
7818     unlock_user_struct(target_mask, mask, 0);
7819 
7820     return ret;
7821 }
7822 #endif
7823 
7824 /* Map host to target signal numbers for the wait family of syscalls.
7825    Assume all other status bits are the same.  */
7826 int host_to_target_waitstatus(int status)
7827 {
7828     if (WIFSIGNALED(status)) {
7829         return host_to_target_signal(WTERMSIG(status)) | (status & ~0x7f);
7830     }
7831     if (WIFSTOPPED(status)) {
7832         return (host_to_target_signal(WSTOPSIG(status)) << 8)
7833                | (status & 0xff);
7834     }
7835     return status;
7836 }
7837 
7838 static int open_self_cmdline(void *cpu_env, int fd)
7839 {
7840     CPUState *cpu = env_cpu((CPUArchState *)cpu_env);
7841     struct linux_binprm *bprm = ((TaskState *)cpu->opaque)->bprm;
7842     int i;
7843 
7844     for (i = 0; i < bprm->argc; i++) {
7845         size_t len = strlen(bprm->argv[i]) + 1;
7846 
7847         if (write(fd, bprm->argv[i], len) != len) {
7848             return -1;
7849         }
7850     }
7851 
7852     return 0;
7853 }
7854 
7855 static int open_self_maps(void *cpu_env, int fd)
7856 {
7857     CPUState *cpu = env_cpu((CPUArchState *)cpu_env);
7858     TaskState *ts = cpu->opaque;
7859     GSList *map_info = read_self_maps();
7860     GSList *s;
7861     int count;
7862 
7863     for (s = map_info; s; s = g_slist_next(s)) {
7864         MapInfo *e = (MapInfo *) s->data;
7865 
7866         if (h2g_valid(e->start)) {
7867             unsigned long min = e->start;
7868             unsigned long max = e->end;
7869             int flags = page_get_flags(h2g(min));
7870             const char *path;
7871 
7872             max = h2g_valid(max - 1) ?
7873                 max : (uintptr_t) g2h_untagged(GUEST_ADDR_MAX) + 1;
7874 
7875             if (page_check_range(h2g(min), max - min, flags) == -1) {
7876                 continue;
7877             }
7878 
7879             if (h2g(min) == ts->info->stack_limit) {
7880                 path = "[stack]";
7881             } else {
7882                 path = e->path;
7883             }
7884 
7885             count = dprintf(fd, TARGET_ABI_FMT_ptr "-" TARGET_ABI_FMT_ptr
7886                             " %c%c%c%c %08" PRIx64 " %s %"PRId64,
7887                             h2g(min), h2g(max - 1) + 1,
7888                             e->is_read ? 'r' : '-',
7889                             e->is_write ? 'w' : '-',
7890                             e->is_exec ? 'x' : '-',
7891                             e->is_priv ? 'p' : '-',
7892                             (uint64_t) e->offset, e->dev, e->inode);
7893             if (path) {
7894                 dprintf(fd, "%*s%s\n", 73 - count, "", path);
7895             } else {
7896                 dprintf(fd, "\n");
7897             }
7898         }
7899     }
7900 
7901     free_self_maps(map_info);
7902 
7903 #ifdef TARGET_VSYSCALL_PAGE
7904     /*
7905      * We only support execution from the vsyscall page.
7906      * This is as if CONFIG_LEGACY_VSYSCALL_XONLY=y from v5.3.
7907      */
7908     count = dprintf(fd, TARGET_FMT_lx "-" TARGET_FMT_lx
7909                     " --xp 00000000 00:00 0",
7910                     TARGET_VSYSCALL_PAGE, TARGET_VSYSCALL_PAGE + TARGET_PAGE_SIZE);
7911     dprintf(fd, "%*s%s\n", 73 - count, "",  "[vsyscall]");
7912 #endif
7913 
7914     return 0;
7915 }
7916 
7917 static int open_self_stat(void *cpu_env, int fd)
7918 {
7919     CPUState *cpu = env_cpu((CPUArchState *)cpu_env);
7920     TaskState *ts = cpu->opaque;
7921     g_autoptr(GString) buf = g_string_new(NULL);
7922     int i;
7923 
7924     for (i = 0; i < 44; i++) {
7925         if (i == 0) {
7926             /* pid */
7927             g_string_printf(buf, FMT_pid " ", getpid());
7928         } else if (i == 1) {
7929             /* app name */
7930             gchar *bin = g_strrstr(ts->bprm->argv[0], "/");
7931             bin = bin ? bin + 1 : ts->bprm->argv[0];
7932             g_string_printf(buf, "(%.15s) ", bin);
7933         } else if (i == 27) {
7934             /* stack bottom */
7935             g_string_printf(buf, TARGET_ABI_FMT_ld " ", ts->info->start_stack);
7936         } else {
7937             /* for the rest, there is MasterCard */
7938             g_string_printf(buf, "0%c", i == 43 ? '\n' : ' ');
7939         }
7940 
7941         if (write(fd, buf->str, buf->len) != buf->len) {
7942             return -1;
7943         }
7944     }
7945 
7946     return 0;
7947 }
7948 
7949 static int open_self_auxv(void *cpu_env, int fd)
7950 {
7951     CPUState *cpu = env_cpu((CPUArchState *)cpu_env);
7952     TaskState *ts = cpu->opaque;
7953     abi_ulong auxv = ts->info->saved_auxv;
7954     abi_ulong len = ts->info->auxv_len;
7955     char *ptr;
7956 
7957     /*
7958      * Auxiliary vector is stored in target process stack.
7959      * read in whole auxv vector and copy it to file
7960      */
7961     ptr = lock_user(VERIFY_READ, auxv, len, 0);
7962     if (ptr != NULL) {
7963         while (len > 0) {
7964             ssize_t r;
7965             r = write(fd, ptr, len);
7966             if (r <= 0) {
7967                 break;
7968             }
7969             len -= r;
7970             ptr += r;
7971         }
7972         lseek(fd, 0, SEEK_SET);
7973         unlock_user(ptr, auxv, len);
7974     }
7975 
7976     return 0;
7977 }
7978 
7979 static int is_proc_myself(const char *filename, const char *entry)
7980 {
7981     if (!strncmp(filename, "/proc/", strlen("/proc/"))) {
7982         filename += strlen("/proc/");
7983         if (!strncmp(filename, "self/", strlen("self/"))) {
7984             filename += strlen("self/");
7985         } else if (*filename >= '1' && *filename <= '9') {
7986             char myself[80];
7987             snprintf(myself, sizeof(myself), "%d/", getpid());
7988             if (!strncmp(filename, myself, strlen(myself))) {
7989                 filename += strlen(myself);
7990             } else {
7991                 return 0;
7992             }
7993         } else {
7994             return 0;
7995         }
7996         if (!strcmp(filename, entry)) {
7997             return 1;
7998         }
7999     }
8000     return 0;
8001 }
8002 
8003 #if defined(HOST_WORDS_BIGENDIAN) != defined(TARGET_WORDS_BIGENDIAN) || \
8004     defined(TARGET_SPARC) || defined(TARGET_M68K) || defined(TARGET_HPPA)
8005 static int is_proc(const char *filename, const char *entry)
8006 {
8007     return strcmp(filename, entry) == 0;
8008 }
8009 #endif
8010 
8011 #if defined(HOST_WORDS_BIGENDIAN) != defined(TARGET_WORDS_BIGENDIAN)
8012 static int open_net_route(void *cpu_env, int fd)
8013 {
8014     FILE *fp;
8015     char *line = NULL;
8016     size_t len = 0;
8017     ssize_t read;
8018 
8019     fp = fopen("/proc/net/route", "r");
8020     if (fp == NULL) {
8021         return -1;
8022     }
8023 
8024     /* read header */
8025 
8026     read = getline(&line, &len, fp);
8027     dprintf(fd, "%s", line);
8028 
8029     /* read routes */
8030 
8031     while ((read = getline(&line, &len, fp)) != -1) {
8032         char iface[16];
8033         uint32_t dest, gw, mask;
8034         unsigned int flags, refcnt, use, metric, mtu, window, irtt;
8035         int fields;
8036 
8037         fields = sscanf(line,
8038                         "%s\t%08x\t%08x\t%04x\t%d\t%d\t%d\t%08x\t%d\t%u\t%u\n",
8039                         iface, &dest, &gw, &flags, &refcnt, &use, &metric,
8040                         &mask, &mtu, &window, &irtt);
8041         if (fields != 11) {
8042             continue;
8043         }
8044         dprintf(fd, "%s\t%08x\t%08x\t%04x\t%d\t%d\t%d\t%08x\t%d\t%u\t%u\n",
8045                 iface, tswap32(dest), tswap32(gw), flags, refcnt, use,
8046                 metric, tswap32(mask), mtu, window, irtt);
8047     }
8048 
8049     free(line);
8050     fclose(fp);
8051 
8052     return 0;
8053 }
8054 #endif
8055 
8056 #if defined(TARGET_SPARC)
8057 static int open_cpuinfo(void *cpu_env, int fd)
8058 {
8059     dprintf(fd, "type\t\t: sun4u\n");
8060     return 0;
8061 }
8062 #endif
8063 
8064 #if defined(TARGET_HPPA)
8065 static int open_cpuinfo(void *cpu_env, int fd)
8066 {
8067     dprintf(fd, "cpu family\t: PA-RISC 1.1e\n");
8068     dprintf(fd, "cpu\t\t: PA7300LC (PCX-L2)\n");
8069     dprintf(fd, "capabilities\t: os32\n");
8070     dprintf(fd, "model\t\t: 9000/778/B160L\n");
8071     dprintf(fd, "model name\t: Merlin L2 160 QEMU (9000/778/B160L)\n");
8072     return 0;
8073 }
8074 #endif
8075 
8076 #if defined(TARGET_M68K)
8077 static int open_hardware(void *cpu_env, int fd)
8078 {
8079     dprintf(fd, "Model:\t\tqemu-m68k\n");
8080     return 0;
8081 }
8082 #endif
8083 
8084 static int do_openat(void *cpu_env, int dirfd, const char *pathname, int flags, mode_t mode)
8085 {
8086     struct fake_open {
8087         const char *filename;
8088         int (*fill)(void *cpu_env, int fd);
8089         int (*cmp)(const char *s1, const char *s2);
8090     };
8091     const struct fake_open *fake_open;
8092     static const struct fake_open fakes[] = {
8093         { "maps", open_self_maps, is_proc_myself },
8094         { "stat", open_self_stat, is_proc_myself },
8095         { "auxv", open_self_auxv, is_proc_myself },
8096         { "cmdline", open_self_cmdline, is_proc_myself },
8097 #if defined(HOST_WORDS_BIGENDIAN) != defined(TARGET_WORDS_BIGENDIAN)
8098         { "/proc/net/route", open_net_route, is_proc },
8099 #endif
8100 #if defined(TARGET_SPARC) || defined(TARGET_HPPA)
8101         { "/proc/cpuinfo", open_cpuinfo, is_proc },
8102 #endif
8103 #if defined(TARGET_M68K)
8104         { "/proc/hardware", open_hardware, is_proc },
8105 #endif
8106         { NULL, NULL, NULL }
8107     };
8108 
8109     if (is_proc_myself(pathname, "exe")) {
8110         int execfd = qemu_getauxval(AT_EXECFD);
8111         return execfd ? execfd : safe_openat(dirfd, exec_path, flags, mode);
8112     }
8113 
8114     for (fake_open = fakes; fake_open->filename; fake_open++) {
8115         if (fake_open->cmp(pathname, fake_open->filename)) {
8116             break;
8117         }
8118     }
8119 
8120     if (fake_open->filename) {
8121         const char *tmpdir;
8122         char filename[PATH_MAX];
8123         int fd, r;
8124 
8125         /* create temporary file to map stat to */
8126         tmpdir = getenv("TMPDIR");
8127         if (!tmpdir)
8128             tmpdir = "/tmp";
8129         snprintf(filename, sizeof(filename), "%s/qemu-open.XXXXXX", tmpdir);
8130         fd = mkstemp(filename);
8131         if (fd < 0) {
8132             return fd;
8133         }
8134         unlink(filename);
8135 
8136         if ((r = fake_open->fill(cpu_env, fd))) {
8137             int e = errno;
8138             close(fd);
8139             errno = e;
8140             return r;
8141         }
8142         lseek(fd, 0, SEEK_SET);
8143 
8144         return fd;
8145     }
8146 
8147     return safe_openat(dirfd, path(pathname), flags, mode);
8148 }
8149 
8150 #define TIMER_MAGIC 0x0caf0000
8151 #define TIMER_MAGIC_MASK 0xffff0000
8152 
8153 /* Convert QEMU provided timer ID back to internal 16bit index format */
8154 static target_timer_t get_timer_id(abi_long arg)
8155 {
8156     target_timer_t timerid = arg;
8157 
8158     if ((timerid & TIMER_MAGIC_MASK) != TIMER_MAGIC) {
8159         return -TARGET_EINVAL;
8160     }
8161 
8162     timerid &= 0xffff;
8163 
8164     if (timerid >= ARRAY_SIZE(g_posix_timers)) {
8165         return -TARGET_EINVAL;
8166     }
8167 
8168     return timerid;
8169 }
8170 
8171 static int target_to_host_cpu_mask(unsigned long *host_mask,
8172                                    size_t host_size,
8173                                    abi_ulong target_addr,
8174                                    size_t target_size)
8175 {
8176     unsigned target_bits = sizeof(abi_ulong) * 8;
8177     unsigned host_bits = sizeof(*host_mask) * 8;
8178     abi_ulong *target_mask;
8179     unsigned i, j;
8180 
8181     assert(host_size >= target_size);
8182 
8183     target_mask = lock_user(VERIFY_READ, target_addr, target_size, 1);
8184     if (!target_mask) {
8185         return -TARGET_EFAULT;
8186     }
8187     memset(host_mask, 0, host_size);
8188 
8189     for (i = 0 ; i < target_size / sizeof(abi_ulong); i++) {
8190         unsigned bit = i * target_bits;
8191         abi_ulong val;
8192 
8193         __get_user(val, &target_mask[i]);
8194         for (j = 0; j < target_bits; j++, bit++) {
8195             if (val & (1UL << j)) {
8196                 host_mask[bit / host_bits] |= 1UL << (bit % host_bits);
8197             }
8198         }
8199     }
8200 
8201     unlock_user(target_mask, target_addr, 0);
8202     return 0;
8203 }
8204 
8205 static int host_to_target_cpu_mask(const unsigned long *host_mask,
8206                                    size_t host_size,
8207                                    abi_ulong target_addr,
8208                                    size_t target_size)
8209 {
8210     unsigned target_bits = sizeof(abi_ulong) * 8;
8211     unsigned host_bits = sizeof(*host_mask) * 8;
8212     abi_ulong *target_mask;
8213     unsigned i, j;
8214 
8215     assert(host_size >= target_size);
8216 
8217     target_mask = lock_user(VERIFY_WRITE, target_addr, target_size, 0);
8218     if (!target_mask) {
8219         return -TARGET_EFAULT;
8220     }
8221 
8222     for (i = 0 ; i < target_size / sizeof(abi_ulong); i++) {
8223         unsigned bit = i * target_bits;
8224         abi_ulong val = 0;
8225 
8226         for (j = 0; j < target_bits; j++, bit++) {
8227             if (host_mask[bit / host_bits] & (1UL << (bit % host_bits))) {
8228                 val |= 1UL << j;
8229             }
8230         }
8231         __put_user(val, &target_mask[i]);
8232     }
8233 
8234     unlock_user(target_mask, target_addr, target_size);
8235     return 0;
8236 }
8237 
8238 /* This is an internal helper for do_syscall so that it is easier
8239  * to have a single return point, so that actions, such as logging
8240  * of syscall results, can be performed.
8241  * All errnos that do_syscall() returns must be -TARGET_<errcode>.
8242  */
8243 static abi_long do_syscall1(void *cpu_env, int num, abi_long arg1,
8244                             abi_long arg2, abi_long arg3, abi_long arg4,
8245                             abi_long arg5, abi_long arg6, abi_long arg7,
8246                             abi_long arg8)
8247 {
8248     CPUState *cpu = env_cpu(cpu_env);
8249     abi_long ret;
8250 #if defined(TARGET_NR_stat) || defined(TARGET_NR_stat64) \
8251     || defined(TARGET_NR_lstat) || defined(TARGET_NR_lstat64) \
8252     || defined(TARGET_NR_fstat) || defined(TARGET_NR_fstat64) \
8253     || defined(TARGET_NR_statx)
8254     struct stat st;
8255 #endif
8256 #if defined(TARGET_NR_statfs) || defined(TARGET_NR_statfs64) \
8257     || defined(TARGET_NR_fstatfs)
8258     struct statfs stfs;
8259 #endif
8260     void *p;
8261 
8262     switch(num) {
8263     case TARGET_NR_exit:
8264         /* In old applications this may be used to implement _exit(2).
8265            However in threaded applications it is used for thread termination,
8266            and _exit_group is used for application termination.
8267            Do thread termination if we have more then one thread.  */
8268 
8269         if (block_signals()) {
8270             return -TARGET_ERESTARTSYS;
8271         }
8272 
8273         pthread_mutex_lock(&clone_lock);
8274 
8275         if (CPU_NEXT(first_cpu)) {
8276             TaskState *ts = cpu->opaque;
8277 
8278             object_property_set_bool(OBJECT(cpu), "realized", false, NULL);
8279             object_unref(OBJECT(cpu));
8280             /*
8281              * At this point the CPU should be unrealized and removed
8282              * from cpu lists. We can clean-up the rest of the thread
8283              * data without the lock held.
8284              */
8285 
8286             pthread_mutex_unlock(&clone_lock);
8287 
8288             if (ts->child_tidptr) {
8289                 put_user_u32(0, ts->child_tidptr);
8290                 do_sys_futex(g2h(cpu, ts->child_tidptr),
8291                              FUTEX_WAKE, INT_MAX, NULL, NULL, 0);
8292             }
8293             thread_cpu = NULL;
8294             g_free(ts);
8295             rcu_unregister_thread();
8296             pthread_exit(NULL);
8297         }
8298 
8299         pthread_mutex_unlock(&clone_lock);
8300         preexit_cleanup(cpu_env, arg1);
8301         _exit(arg1);
8302         return 0; /* avoid warning */
8303     case TARGET_NR_read:
8304         if (arg2 == 0 && arg3 == 0) {
8305             return get_errno(safe_read(arg1, 0, 0));
8306         } else {
8307             if (!(p = lock_user(VERIFY_WRITE, arg2, arg3, 0)))
8308                 return -TARGET_EFAULT;
8309             ret = get_errno(safe_read(arg1, p, arg3));
8310             if (ret >= 0 &&
8311                 fd_trans_host_to_target_data(arg1)) {
8312                 ret = fd_trans_host_to_target_data(arg1)(p, ret);
8313             }
8314             unlock_user(p, arg2, ret);
8315         }
8316         return ret;
8317     case TARGET_NR_write:
8318         if (arg2 == 0 && arg3 == 0) {
8319             return get_errno(safe_write(arg1, 0, 0));
8320         }
8321         if (!(p = lock_user(VERIFY_READ, arg2, arg3, 1)))
8322             return -TARGET_EFAULT;
8323         if (fd_trans_target_to_host_data(arg1)) {
8324             void *copy = g_malloc(arg3);
8325             memcpy(copy, p, arg3);
8326             ret = fd_trans_target_to_host_data(arg1)(copy, arg3);
8327             if (ret >= 0) {
8328                 ret = get_errno(safe_write(arg1, copy, ret));
8329             }
8330             g_free(copy);
8331         } else {
8332             ret = get_errno(safe_write(arg1, p, arg3));
8333         }
8334         unlock_user(p, arg2, 0);
8335         return ret;
8336 
8337 #ifdef TARGET_NR_open
8338     case TARGET_NR_open:
8339         if (!(p = lock_user_string(arg1)))
8340             return -TARGET_EFAULT;
8341         ret = get_errno(do_openat(cpu_env, AT_FDCWD, p,
8342                                   target_to_host_bitmask(arg2, fcntl_flags_tbl),
8343                                   arg3));
8344         fd_trans_unregister(ret);
8345         unlock_user(p, arg1, 0);
8346         return ret;
8347 #endif
8348     case TARGET_NR_openat:
8349         if (!(p = lock_user_string(arg2)))
8350             return -TARGET_EFAULT;
8351         ret = get_errno(do_openat(cpu_env, arg1, p,
8352                                   target_to_host_bitmask(arg3, fcntl_flags_tbl),
8353                                   arg4));
8354         fd_trans_unregister(ret);
8355         unlock_user(p, arg2, 0);
8356         return ret;
8357 #if defined(TARGET_NR_name_to_handle_at) && defined(CONFIG_OPEN_BY_HANDLE)
8358     case TARGET_NR_name_to_handle_at:
8359         ret = do_name_to_handle_at(arg1, arg2, arg3, arg4, arg5);
8360         return ret;
8361 #endif
8362 #if defined(TARGET_NR_open_by_handle_at) && defined(CONFIG_OPEN_BY_HANDLE)
8363     case TARGET_NR_open_by_handle_at:
8364         ret = do_open_by_handle_at(arg1, arg2, arg3);
8365         fd_trans_unregister(ret);
8366         return ret;
8367 #endif
8368     case TARGET_NR_close:
8369         fd_trans_unregister(arg1);
8370         return get_errno(close(arg1));
8371 
8372     case TARGET_NR_brk:
8373         return do_brk(arg1);
8374 #ifdef TARGET_NR_fork
8375     case TARGET_NR_fork:
8376         return get_errno(do_fork(cpu_env, TARGET_SIGCHLD, 0, 0, 0, 0));
8377 #endif
8378 #ifdef TARGET_NR_waitpid
8379     case TARGET_NR_waitpid:
8380         {
8381             int status;
8382             ret = get_errno(safe_wait4(arg1, &status, arg3, 0));
8383             if (!is_error(ret) && arg2 && ret
8384                 && put_user_s32(host_to_target_waitstatus(status), arg2))
8385                 return -TARGET_EFAULT;
8386         }
8387         return ret;
8388 #endif
8389 #ifdef TARGET_NR_waitid
8390     case TARGET_NR_waitid:
8391         {
8392             siginfo_t info;
8393             info.si_pid = 0;
8394             ret = get_errno(safe_waitid(arg1, arg2, &info, arg4, NULL));
8395             if (!is_error(ret) && arg3 && info.si_pid != 0) {
8396                 if (!(p = lock_user(VERIFY_WRITE, arg3, sizeof(target_siginfo_t), 0)))
8397                     return -TARGET_EFAULT;
8398                 host_to_target_siginfo(p, &info);
8399                 unlock_user(p, arg3, sizeof(target_siginfo_t));
8400             }
8401         }
8402         return ret;
8403 #endif
8404 #ifdef TARGET_NR_creat /* not on alpha */
8405     case TARGET_NR_creat:
8406         if (!(p = lock_user_string(arg1)))
8407             return -TARGET_EFAULT;
8408         ret = get_errno(creat(p, arg2));
8409         fd_trans_unregister(ret);
8410         unlock_user(p, arg1, 0);
8411         return ret;
8412 #endif
8413 #ifdef TARGET_NR_link
8414     case TARGET_NR_link:
8415         {
8416             void * p2;
8417             p = lock_user_string(arg1);
8418             p2 = lock_user_string(arg2);
8419             if (!p || !p2)
8420                 ret = -TARGET_EFAULT;
8421             else
8422                 ret = get_errno(link(p, p2));
8423             unlock_user(p2, arg2, 0);
8424             unlock_user(p, arg1, 0);
8425         }
8426         return ret;
8427 #endif
8428 #if defined(TARGET_NR_linkat)
8429     case TARGET_NR_linkat:
8430         {
8431             void * p2 = NULL;
8432             if (!arg2 || !arg4)
8433                 return -TARGET_EFAULT;
8434             p  = lock_user_string(arg2);
8435             p2 = lock_user_string(arg4);
8436             if (!p || !p2)
8437                 ret = -TARGET_EFAULT;
8438             else
8439                 ret = get_errno(linkat(arg1, p, arg3, p2, arg5));
8440             unlock_user(p, arg2, 0);
8441             unlock_user(p2, arg4, 0);
8442         }
8443         return ret;
8444 #endif
8445 #ifdef TARGET_NR_unlink
8446     case TARGET_NR_unlink:
8447         if (!(p = lock_user_string(arg1)))
8448             return -TARGET_EFAULT;
8449         ret = get_errno(unlink(p));
8450         unlock_user(p, arg1, 0);
8451         return ret;
8452 #endif
8453 #if defined(TARGET_NR_unlinkat)
8454     case TARGET_NR_unlinkat:
8455         if (!(p = lock_user_string(arg2)))
8456             return -TARGET_EFAULT;
8457         ret = get_errno(unlinkat(arg1, p, arg3));
8458         unlock_user(p, arg2, 0);
8459         return ret;
8460 #endif
8461     case TARGET_NR_execve:
8462         {
8463             char **argp, **envp;
8464             int argc, envc;
8465             abi_ulong gp;
8466             abi_ulong guest_argp;
8467             abi_ulong guest_envp;
8468             abi_ulong addr;
8469             char **q;
8470             int total_size = 0;
8471 
8472             argc = 0;
8473             guest_argp = arg2;
8474             for (gp = guest_argp; gp; gp += sizeof(abi_ulong)) {
8475                 if (get_user_ual(addr, gp))
8476                     return -TARGET_EFAULT;
8477                 if (!addr)
8478                     break;
8479                 argc++;
8480             }
8481             envc = 0;
8482             guest_envp = arg3;
8483             for (gp = guest_envp; gp; gp += sizeof(abi_ulong)) {
8484                 if (get_user_ual(addr, gp))
8485                     return -TARGET_EFAULT;
8486                 if (!addr)
8487                     break;
8488                 envc++;
8489             }
8490 
8491             argp = g_new0(char *, argc + 1);
8492             envp = g_new0(char *, envc + 1);
8493 
8494             for (gp = guest_argp, q = argp; gp;
8495                   gp += sizeof(abi_ulong), q++) {
8496                 if (get_user_ual(addr, gp))
8497                     goto execve_efault;
8498                 if (!addr)
8499                     break;
8500                 if (!(*q = lock_user_string(addr)))
8501                     goto execve_efault;
8502                 total_size += strlen(*q) + 1;
8503             }
8504             *q = NULL;
8505 
8506             for (gp = guest_envp, q = envp; gp;
8507                   gp += sizeof(abi_ulong), q++) {
8508                 if (get_user_ual(addr, gp))
8509                     goto execve_efault;
8510                 if (!addr)
8511                     break;
8512                 if (!(*q = lock_user_string(addr)))
8513                     goto execve_efault;
8514                 total_size += strlen(*q) + 1;
8515             }
8516             *q = NULL;
8517 
8518             if (!(p = lock_user_string(arg1)))
8519                 goto execve_efault;
8520             /* Although execve() is not an interruptible syscall it is
8521              * a special case where we must use the safe_syscall wrapper:
8522              * if we allow a signal to happen before we make the host
8523              * syscall then we will 'lose' it, because at the point of
8524              * execve the process leaves QEMU's control. So we use the
8525              * safe syscall wrapper to ensure that we either take the
8526              * signal as a guest signal, or else it does not happen
8527              * before the execve completes and makes it the other
8528              * program's problem.
8529              */
8530             ret = get_errno(safe_execve(p, argp, envp));
8531             unlock_user(p, arg1, 0);
8532 
8533             goto execve_end;
8534 
8535         execve_efault:
8536             ret = -TARGET_EFAULT;
8537 
8538         execve_end:
8539             for (gp = guest_argp, q = argp; *q;
8540                   gp += sizeof(abi_ulong), q++) {
8541                 if (get_user_ual(addr, gp)
8542                     || !addr)
8543                     break;
8544                 unlock_user(*q, addr, 0);
8545             }
8546             for (gp = guest_envp, q = envp; *q;
8547                   gp += sizeof(abi_ulong), q++) {
8548                 if (get_user_ual(addr, gp)
8549                     || !addr)
8550                     break;
8551                 unlock_user(*q, addr, 0);
8552             }
8553 
8554             g_free(argp);
8555             g_free(envp);
8556         }
8557         return ret;
8558     case TARGET_NR_chdir:
8559         if (!(p = lock_user_string(arg1)))
8560             return -TARGET_EFAULT;
8561         ret = get_errno(chdir(p));
8562         unlock_user(p, arg1, 0);
8563         return ret;
8564 #ifdef TARGET_NR_time
8565     case TARGET_NR_time:
8566         {
8567             time_t host_time;
8568             ret = get_errno(time(&host_time));
8569             if (!is_error(ret)
8570                 && arg1
8571                 && put_user_sal(host_time, arg1))
8572                 return -TARGET_EFAULT;
8573         }
8574         return ret;
8575 #endif
8576 #ifdef TARGET_NR_mknod
8577     case TARGET_NR_mknod:
8578         if (!(p = lock_user_string(arg1)))
8579             return -TARGET_EFAULT;
8580         ret = get_errno(mknod(p, arg2, arg3));
8581         unlock_user(p, arg1, 0);
8582         return ret;
8583 #endif
8584 #if defined(TARGET_NR_mknodat)
8585     case TARGET_NR_mknodat:
8586         if (!(p = lock_user_string(arg2)))
8587             return -TARGET_EFAULT;
8588         ret = get_errno(mknodat(arg1, p, arg3, arg4));
8589         unlock_user(p, arg2, 0);
8590         return ret;
8591 #endif
8592 #ifdef TARGET_NR_chmod
8593     case TARGET_NR_chmod:
8594         if (!(p = lock_user_string(arg1)))
8595             return -TARGET_EFAULT;
8596         ret = get_errno(chmod(p, arg2));
8597         unlock_user(p, arg1, 0);
8598         return ret;
8599 #endif
8600 #ifdef TARGET_NR_lseek
8601     case TARGET_NR_lseek:
8602         return get_errno(lseek(arg1, arg2, arg3));
8603 #endif
8604 #if defined(TARGET_NR_getxpid) && defined(TARGET_ALPHA)
8605     /* Alpha specific */
8606     case TARGET_NR_getxpid:
8607         ((CPUAlphaState *)cpu_env)->ir[IR_A4] = getppid();
8608         return get_errno(getpid());
8609 #endif
8610 #ifdef TARGET_NR_getpid
8611     case TARGET_NR_getpid:
8612         return get_errno(getpid());
8613 #endif
8614     case TARGET_NR_mount:
8615         {
8616             /* need to look at the data field */
8617             void *p2, *p3;
8618 
8619             if (arg1) {
8620                 p = lock_user_string(arg1);
8621                 if (!p) {
8622                     return -TARGET_EFAULT;
8623                 }
8624             } else {
8625                 p = NULL;
8626             }
8627 
8628             p2 = lock_user_string(arg2);
8629             if (!p2) {
8630                 if (arg1) {
8631                     unlock_user(p, arg1, 0);
8632                 }
8633                 return -TARGET_EFAULT;
8634             }
8635 
8636             if (arg3) {
8637                 p3 = lock_user_string(arg3);
8638                 if (!p3) {
8639                     if (arg1) {
8640                         unlock_user(p, arg1, 0);
8641                     }
8642                     unlock_user(p2, arg2, 0);
8643                     return -TARGET_EFAULT;
8644                 }
8645             } else {
8646                 p3 = NULL;
8647             }
8648 
8649             /* FIXME - arg5 should be locked, but it isn't clear how to
8650              * do that since it's not guaranteed to be a NULL-terminated
8651              * string.
8652              */
8653             if (!arg5) {
8654                 ret = mount(p, p2, p3, (unsigned long)arg4, NULL);
8655             } else {
8656                 ret = mount(p, p2, p3, (unsigned long)arg4, g2h(cpu, arg5));
8657             }
8658             ret = get_errno(ret);
8659 
8660             if (arg1) {
8661                 unlock_user(p, arg1, 0);
8662             }
8663             unlock_user(p2, arg2, 0);
8664             if (arg3) {
8665                 unlock_user(p3, arg3, 0);
8666             }
8667         }
8668         return ret;
8669 #if defined(TARGET_NR_umount) || defined(TARGET_NR_oldumount)
8670 #if defined(TARGET_NR_umount)
8671     case TARGET_NR_umount:
8672 #endif
8673 #if defined(TARGET_NR_oldumount)
8674     case TARGET_NR_oldumount:
8675 #endif
8676         if (!(p = lock_user_string(arg1)))
8677             return -TARGET_EFAULT;
8678         ret = get_errno(umount(p));
8679         unlock_user(p, arg1, 0);
8680         return ret;
8681 #endif
8682 #ifdef TARGET_NR_stime /* not on alpha */
8683     case TARGET_NR_stime:
8684         {
8685             struct timespec ts;
8686             ts.tv_nsec = 0;
8687             if (get_user_sal(ts.tv_sec, arg1)) {
8688                 return -TARGET_EFAULT;
8689             }
8690             return get_errno(clock_settime(CLOCK_REALTIME, &ts));
8691         }
8692 #endif
8693 #ifdef TARGET_NR_alarm /* not on alpha */
8694     case TARGET_NR_alarm:
8695         return alarm(arg1);
8696 #endif
8697 #ifdef TARGET_NR_pause /* not on alpha */
8698     case TARGET_NR_pause:
8699         if (!block_signals()) {
8700             sigsuspend(&((TaskState *)cpu->opaque)->signal_mask);
8701         }
8702         return -TARGET_EINTR;
8703 #endif
8704 #ifdef TARGET_NR_utime
8705     case TARGET_NR_utime:
8706         {
8707             struct utimbuf tbuf, *host_tbuf;
8708             struct target_utimbuf *target_tbuf;
8709             if (arg2) {
8710                 if (!lock_user_struct(VERIFY_READ, target_tbuf, arg2, 1))
8711                     return -TARGET_EFAULT;
8712                 tbuf.actime = tswapal(target_tbuf->actime);
8713                 tbuf.modtime = tswapal(target_tbuf->modtime);
8714                 unlock_user_struct(target_tbuf, arg2, 0);
8715                 host_tbuf = &tbuf;
8716             } else {
8717                 host_tbuf = NULL;
8718             }
8719             if (!(p = lock_user_string(arg1)))
8720                 return -TARGET_EFAULT;
8721             ret = get_errno(utime(p, host_tbuf));
8722             unlock_user(p, arg1, 0);
8723         }
8724         return ret;
8725 #endif
8726 #ifdef TARGET_NR_utimes
8727     case TARGET_NR_utimes:
8728         {
8729             struct timeval *tvp, tv[2];
8730             if (arg2) {
8731                 if (copy_from_user_timeval(&tv[0], arg2)
8732                     || copy_from_user_timeval(&tv[1],
8733                                               arg2 + sizeof(struct target_timeval)))
8734                     return -TARGET_EFAULT;
8735                 tvp = tv;
8736             } else {
8737                 tvp = NULL;
8738             }
8739             if (!(p = lock_user_string(arg1)))
8740                 return -TARGET_EFAULT;
8741             ret = get_errno(utimes(p, tvp));
8742             unlock_user(p, arg1, 0);
8743         }
8744         return ret;
8745 #endif
8746 #if defined(TARGET_NR_futimesat)
8747     case TARGET_NR_futimesat:
8748         {
8749             struct timeval *tvp, tv[2];
8750             if (arg3) {
8751                 if (copy_from_user_timeval(&tv[0], arg3)
8752                     || copy_from_user_timeval(&tv[1],
8753                                               arg3 + sizeof(struct target_timeval)))
8754                     return -TARGET_EFAULT;
8755                 tvp = tv;
8756             } else {
8757                 tvp = NULL;
8758             }
8759             if (!(p = lock_user_string(arg2))) {
8760                 return -TARGET_EFAULT;
8761             }
8762             ret = get_errno(futimesat(arg1, path(p), tvp));
8763             unlock_user(p, arg2, 0);
8764         }
8765         return ret;
8766 #endif
8767 #ifdef TARGET_NR_access
8768     case TARGET_NR_access:
8769         if (!(p = lock_user_string(arg1))) {
8770             return -TARGET_EFAULT;
8771         }
8772         ret = get_errno(access(path(p), arg2));
8773         unlock_user(p, arg1, 0);
8774         return ret;
8775 #endif
8776 #if defined(TARGET_NR_faccessat) && defined(__NR_faccessat)
8777     case TARGET_NR_faccessat:
8778         if (!(p = lock_user_string(arg2))) {
8779             return -TARGET_EFAULT;
8780         }
8781         ret = get_errno(faccessat(arg1, p, arg3, 0));
8782         unlock_user(p, arg2, 0);
8783         return ret;
8784 #endif
8785 #ifdef TARGET_NR_nice /* not on alpha */
8786     case TARGET_NR_nice:
8787         return get_errno(nice(arg1));
8788 #endif
8789     case TARGET_NR_sync:
8790         sync();
8791         return 0;
8792 #if defined(TARGET_NR_syncfs) && defined(CONFIG_SYNCFS)
8793     case TARGET_NR_syncfs:
8794         return get_errno(syncfs(arg1));
8795 #endif
8796     case TARGET_NR_kill:
8797         return get_errno(safe_kill(arg1, target_to_host_signal(arg2)));
8798 #ifdef TARGET_NR_rename
8799     case TARGET_NR_rename:
8800         {
8801             void *p2;
8802             p = lock_user_string(arg1);
8803             p2 = lock_user_string(arg2);
8804             if (!p || !p2)
8805                 ret = -TARGET_EFAULT;
8806             else
8807                 ret = get_errno(rename(p, p2));
8808             unlock_user(p2, arg2, 0);
8809             unlock_user(p, arg1, 0);
8810         }
8811         return ret;
8812 #endif
8813 #if defined(TARGET_NR_renameat)
8814     case TARGET_NR_renameat:
8815         {
8816             void *p2;
8817             p  = lock_user_string(arg2);
8818             p2 = lock_user_string(arg4);
8819             if (!p || !p2)
8820                 ret = -TARGET_EFAULT;
8821             else
8822                 ret = get_errno(renameat(arg1, p, arg3, p2));
8823             unlock_user(p2, arg4, 0);
8824             unlock_user(p, arg2, 0);
8825         }
8826         return ret;
8827 #endif
8828 #if defined(TARGET_NR_renameat2)
8829     case TARGET_NR_renameat2:
8830         {
8831             void *p2;
8832             p  = lock_user_string(arg2);
8833             p2 = lock_user_string(arg4);
8834             if (!p || !p2) {
8835                 ret = -TARGET_EFAULT;
8836             } else {
8837                 ret = get_errno(sys_renameat2(arg1, p, arg3, p2, arg5));
8838             }
8839             unlock_user(p2, arg4, 0);
8840             unlock_user(p, arg2, 0);
8841         }
8842         return ret;
8843 #endif
8844 #ifdef TARGET_NR_mkdir
8845     case TARGET_NR_mkdir:
8846         if (!(p = lock_user_string(arg1)))
8847             return -TARGET_EFAULT;
8848         ret = get_errno(mkdir(p, arg2));
8849         unlock_user(p, arg1, 0);
8850         return ret;
8851 #endif
8852 #if defined(TARGET_NR_mkdirat)
8853     case TARGET_NR_mkdirat:
8854         if (!(p = lock_user_string(arg2)))
8855             return -TARGET_EFAULT;
8856         ret = get_errno(mkdirat(arg1, p, arg3));
8857         unlock_user(p, arg2, 0);
8858         return ret;
8859 #endif
8860 #ifdef TARGET_NR_rmdir
8861     case TARGET_NR_rmdir:
8862         if (!(p = lock_user_string(arg1)))
8863             return -TARGET_EFAULT;
8864         ret = get_errno(rmdir(p));
8865         unlock_user(p, arg1, 0);
8866         return ret;
8867 #endif
8868     case TARGET_NR_dup:
8869         ret = get_errno(dup(arg1));
8870         if (ret >= 0) {
8871             fd_trans_dup(arg1, ret);
8872         }
8873         return ret;
8874 #ifdef TARGET_NR_pipe
8875     case TARGET_NR_pipe:
8876         return do_pipe(cpu_env, arg1, 0, 0);
8877 #endif
8878 #ifdef TARGET_NR_pipe2
8879     case TARGET_NR_pipe2:
8880         return do_pipe(cpu_env, arg1,
8881                        target_to_host_bitmask(arg2, fcntl_flags_tbl), 1);
8882 #endif
8883     case TARGET_NR_times:
8884         {
8885             struct target_tms *tmsp;
8886             struct tms tms;
8887             ret = get_errno(times(&tms));
8888             if (arg1) {
8889                 tmsp = lock_user(VERIFY_WRITE, arg1, sizeof(struct target_tms), 0);
8890                 if (!tmsp)
8891                     return -TARGET_EFAULT;
8892                 tmsp->tms_utime = tswapal(host_to_target_clock_t(tms.tms_utime));
8893                 tmsp->tms_stime = tswapal(host_to_target_clock_t(tms.tms_stime));
8894                 tmsp->tms_cutime = tswapal(host_to_target_clock_t(tms.tms_cutime));
8895                 tmsp->tms_cstime = tswapal(host_to_target_clock_t(tms.tms_cstime));
8896             }
8897             if (!is_error(ret))
8898                 ret = host_to_target_clock_t(ret);
8899         }
8900         return ret;
8901     case TARGET_NR_acct:
8902         if (arg1 == 0) {
8903             ret = get_errno(acct(NULL));
8904         } else {
8905             if (!(p = lock_user_string(arg1))) {
8906                 return -TARGET_EFAULT;
8907             }
8908             ret = get_errno(acct(path(p)));
8909             unlock_user(p, arg1, 0);
8910         }
8911         return ret;
8912 #ifdef TARGET_NR_umount2
8913     case TARGET_NR_umount2:
8914         if (!(p = lock_user_string(arg1)))
8915             return -TARGET_EFAULT;
8916         ret = get_errno(umount2(p, arg2));
8917         unlock_user(p, arg1, 0);
8918         return ret;
8919 #endif
8920     case TARGET_NR_ioctl:
8921         return do_ioctl(arg1, arg2, arg3);
8922 #ifdef TARGET_NR_fcntl
8923     case TARGET_NR_fcntl:
8924         return do_fcntl(arg1, arg2, arg3);
8925 #endif
8926     case TARGET_NR_setpgid:
8927         return get_errno(setpgid(arg1, arg2));
8928     case TARGET_NR_umask:
8929         return get_errno(umask(arg1));
8930     case TARGET_NR_chroot:
8931         if (!(p = lock_user_string(arg1)))
8932             return -TARGET_EFAULT;
8933         ret = get_errno(chroot(p));
8934         unlock_user(p, arg1, 0);
8935         return ret;
8936 #ifdef TARGET_NR_dup2
8937     case TARGET_NR_dup2:
8938         ret = get_errno(dup2(arg1, arg2));
8939         if (ret >= 0) {
8940             fd_trans_dup(arg1, arg2);
8941         }
8942         return ret;
8943 #endif
8944 #if defined(CONFIG_DUP3) && defined(TARGET_NR_dup3)
8945     case TARGET_NR_dup3:
8946     {
8947         int host_flags;
8948 
8949         if ((arg3 & ~TARGET_O_CLOEXEC) != 0) {
8950             return -EINVAL;
8951         }
8952         host_flags = target_to_host_bitmask(arg3, fcntl_flags_tbl);
8953         ret = get_errno(dup3(arg1, arg2, host_flags));
8954         if (ret >= 0) {
8955             fd_trans_dup(arg1, arg2);
8956         }
8957         return ret;
8958     }
8959 #endif
8960 #ifdef TARGET_NR_getppid /* not on alpha */
8961     case TARGET_NR_getppid:
8962         return get_errno(getppid());
8963 #endif
8964 #ifdef TARGET_NR_getpgrp
8965     case TARGET_NR_getpgrp:
8966         return get_errno(getpgrp());
8967 #endif
8968     case TARGET_NR_setsid:
8969         return get_errno(setsid());
8970 #ifdef TARGET_NR_sigaction
8971     case TARGET_NR_sigaction:
8972         {
8973 #if defined(TARGET_ALPHA)
8974             struct target_sigaction act, oact, *pact = 0;
8975             struct target_old_sigaction *old_act;
8976             if (arg2) {
8977                 if (!lock_user_struct(VERIFY_READ, old_act, arg2, 1))
8978                     return -TARGET_EFAULT;
8979                 act._sa_handler = old_act->_sa_handler;
8980                 target_siginitset(&act.sa_mask, old_act->sa_mask);
8981                 act.sa_flags = old_act->sa_flags;
8982                 act.sa_restorer = 0;
8983                 unlock_user_struct(old_act, arg2, 0);
8984                 pact = &act;
8985             }
8986             ret = get_errno(do_sigaction(arg1, pact, &oact));
8987             if (!is_error(ret) && arg3) {
8988                 if (!lock_user_struct(VERIFY_WRITE, old_act, arg3, 0))
8989                     return -TARGET_EFAULT;
8990                 old_act->_sa_handler = oact._sa_handler;
8991                 old_act->sa_mask = oact.sa_mask.sig[0];
8992                 old_act->sa_flags = oact.sa_flags;
8993                 unlock_user_struct(old_act, arg3, 1);
8994             }
8995 #elif defined(TARGET_MIPS)
8996 	    struct target_sigaction act, oact, *pact, *old_act;
8997 
8998 	    if (arg2) {
8999                 if (!lock_user_struct(VERIFY_READ, old_act, arg2, 1))
9000                     return -TARGET_EFAULT;
9001 		act._sa_handler = old_act->_sa_handler;
9002 		target_siginitset(&act.sa_mask, old_act->sa_mask.sig[0]);
9003 		act.sa_flags = old_act->sa_flags;
9004 		unlock_user_struct(old_act, arg2, 0);
9005 		pact = &act;
9006 	    } else {
9007 		pact = NULL;
9008 	    }
9009 
9010 	    ret = get_errno(do_sigaction(arg1, pact, &oact));
9011 
9012 	    if (!is_error(ret) && arg3) {
9013                 if (!lock_user_struct(VERIFY_WRITE, old_act, arg3, 0))
9014                     return -TARGET_EFAULT;
9015 		old_act->_sa_handler = oact._sa_handler;
9016 		old_act->sa_flags = oact.sa_flags;
9017 		old_act->sa_mask.sig[0] = oact.sa_mask.sig[0];
9018 		old_act->sa_mask.sig[1] = 0;
9019 		old_act->sa_mask.sig[2] = 0;
9020 		old_act->sa_mask.sig[3] = 0;
9021 		unlock_user_struct(old_act, arg3, 1);
9022 	    }
9023 #else
9024             struct target_old_sigaction *old_act;
9025             struct target_sigaction act, oact, *pact;
9026             if (arg2) {
9027                 if (!lock_user_struct(VERIFY_READ, old_act, arg2, 1))
9028                     return -TARGET_EFAULT;
9029                 act._sa_handler = old_act->_sa_handler;
9030                 target_siginitset(&act.sa_mask, old_act->sa_mask);
9031                 act.sa_flags = old_act->sa_flags;
9032                 act.sa_restorer = old_act->sa_restorer;
9033 #ifdef TARGET_ARCH_HAS_KA_RESTORER
9034                 act.ka_restorer = 0;
9035 #endif
9036                 unlock_user_struct(old_act, arg2, 0);
9037                 pact = &act;
9038             } else {
9039                 pact = NULL;
9040             }
9041             ret = get_errno(do_sigaction(arg1, pact, &oact));
9042             if (!is_error(ret) && arg3) {
9043                 if (!lock_user_struct(VERIFY_WRITE, old_act, arg3, 0))
9044                     return -TARGET_EFAULT;
9045                 old_act->_sa_handler = oact._sa_handler;
9046                 old_act->sa_mask = oact.sa_mask.sig[0];
9047                 old_act->sa_flags = oact.sa_flags;
9048                 old_act->sa_restorer = oact.sa_restorer;
9049                 unlock_user_struct(old_act, arg3, 1);
9050             }
9051 #endif
9052         }
9053         return ret;
9054 #endif
9055     case TARGET_NR_rt_sigaction:
9056         {
9057 #if defined(TARGET_ALPHA)
9058             /* For Alpha and SPARC this is a 5 argument syscall, with
9059              * a 'restorer' parameter which must be copied into the
9060              * sa_restorer field of the sigaction struct.
9061              * For Alpha that 'restorer' is arg5; for SPARC it is arg4,
9062              * and arg5 is the sigsetsize.
9063              * Alpha also has a separate rt_sigaction struct that it uses
9064              * here; SPARC uses the usual sigaction struct.
9065              */
9066             struct target_rt_sigaction *rt_act;
9067             struct target_sigaction act, oact, *pact = 0;
9068 
9069             if (arg4 != sizeof(target_sigset_t)) {
9070                 return -TARGET_EINVAL;
9071             }
9072             if (arg2) {
9073                 if (!lock_user_struct(VERIFY_READ, rt_act, arg2, 1))
9074                     return -TARGET_EFAULT;
9075                 act._sa_handler = rt_act->_sa_handler;
9076                 act.sa_mask = rt_act->sa_mask;
9077                 act.sa_flags = rt_act->sa_flags;
9078                 act.sa_restorer = arg5;
9079                 unlock_user_struct(rt_act, arg2, 0);
9080                 pact = &act;
9081             }
9082             ret = get_errno(do_sigaction(arg1, pact, &oact));
9083             if (!is_error(ret) && arg3) {
9084                 if (!lock_user_struct(VERIFY_WRITE, rt_act, arg3, 0))
9085                     return -TARGET_EFAULT;
9086                 rt_act->_sa_handler = oact._sa_handler;
9087                 rt_act->sa_mask = oact.sa_mask;
9088                 rt_act->sa_flags = oact.sa_flags;
9089                 unlock_user_struct(rt_act, arg3, 1);
9090             }
9091 #else
9092 #ifdef TARGET_SPARC
9093             target_ulong restorer = arg4;
9094             target_ulong sigsetsize = arg5;
9095 #else
9096             target_ulong sigsetsize = arg4;
9097 #endif
9098             struct target_sigaction *act;
9099             struct target_sigaction *oact;
9100 
9101             if (sigsetsize != sizeof(target_sigset_t)) {
9102                 return -TARGET_EINVAL;
9103             }
9104             if (arg2) {
9105                 if (!lock_user_struct(VERIFY_READ, act, arg2, 1)) {
9106                     return -TARGET_EFAULT;
9107                 }
9108 #ifdef TARGET_ARCH_HAS_KA_RESTORER
9109                 act->ka_restorer = restorer;
9110 #endif
9111             } else {
9112                 act = NULL;
9113             }
9114             if (arg3) {
9115                 if (!lock_user_struct(VERIFY_WRITE, oact, arg3, 0)) {
9116                     ret = -TARGET_EFAULT;
9117                     goto rt_sigaction_fail;
9118                 }
9119             } else
9120                 oact = NULL;
9121             ret = get_errno(do_sigaction(arg1, act, oact));
9122 	rt_sigaction_fail:
9123             if (act)
9124                 unlock_user_struct(act, arg2, 0);
9125             if (oact)
9126                 unlock_user_struct(oact, arg3, 1);
9127 #endif
9128         }
9129         return ret;
9130 #ifdef TARGET_NR_sgetmask /* not on alpha */
9131     case TARGET_NR_sgetmask:
9132         {
9133             sigset_t cur_set;
9134             abi_ulong target_set;
9135             ret = do_sigprocmask(0, NULL, &cur_set);
9136             if (!ret) {
9137                 host_to_target_old_sigset(&target_set, &cur_set);
9138                 ret = target_set;
9139             }
9140         }
9141         return ret;
9142 #endif
9143 #ifdef TARGET_NR_ssetmask /* not on alpha */
9144     case TARGET_NR_ssetmask:
9145         {
9146             sigset_t set, oset;
9147             abi_ulong target_set = arg1;
9148             target_to_host_old_sigset(&set, &target_set);
9149             ret = do_sigprocmask(SIG_SETMASK, &set, &oset);
9150             if (!ret) {
9151                 host_to_target_old_sigset(&target_set, &oset);
9152                 ret = target_set;
9153             }
9154         }
9155         return ret;
9156 #endif
9157 #ifdef TARGET_NR_sigprocmask
9158     case TARGET_NR_sigprocmask:
9159         {
9160 #if defined(TARGET_ALPHA)
9161             sigset_t set, oldset;
9162             abi_ulong mask;
9163             int how;
9164 
9165             switch (arg1) {
9166             case TARGET_SIG_BLOCK:
9167                 how = SIG_BLOCK;
9168                 break;
9169             case TARGET_SIG_UNBLOCK:
9170                 how = SIG_UNBLOCK;
9171                 break;
9172             case TARGET_SIG_SETMASK:
9173                 how = SIG_SETMASK;
9174                 break;
9175             default:
9176                 return -TARGET_EINVAL;
9177             }
9178             mask = arg2;
9179             target_to_host_old_sigset(&set, &mask);
9180 
9181             ret = do_sigprocmask(how, &set, &oldset);
9182             if (!is_error(ret)) {
9183                 host_to_target_old_sigset(&mask, &oldset);
9184                 ret = mask;
9185                 ((CPUAlphaState *)cpu_env)->ir[IR_V0] = 0; /* force no error */
9186             }
9187 #else
9188             sigset_t set, oldset, *set_ptr;
9189             int how;
9190 
9191             if (arg2) {
9192                 switch (arg1) {
9193                 case TARGET_SIG_BLOCK:
9194                     how = SIG_BLOCK;
9195                     break;
9196                 case TARGET_SIG_UNBLOCK:
9197                     how = SIG_UNBLOCK;
9198                     break;
9199                 case TARGET_SIG_SETMASK:
9200                     how = SIG_SETMASK;
9201                     break;
9202                 default:
9203                     return -TARGET_EINVAL;
9204                 }
9205                 if (!(p = lock_user(VERIFY_READ, arg2, sizeof(target_sigset_t), 1)))
9206                     return -TARGET_EFAULT;
9207                 target_to_host_old_sigset(&set, p);
9208                 unlock_user(p, arg2, 0);
9209                 set_ptr = &set;
9210             } else {
9211                 how = 0;
9212                 set_ptr = NULL;
9213             }
9214             ret = do_sigprocmask(how, set_ptr, &oldset);
9215             if (!is_error(ret) && arg3) {
9216                 if (!(p = lock_user(VERIFY_WRITE, arg3, sizeof(target_sigset_t), 0)))
9217                     return -TARGET_EFAULT;
9218                 host_to_target_old_sigset(p, &oldset);
9219                 unlock_user(p, arg3, sizeof(target_sigset_t));
9220             }
9221 #endif
9222         }
9223         return ret;
9224 #endif
9225     case TARGET_NR_rt_sigprocmask:
9226         {
9227             int how = arg1;
9228             sigset_t set, oldset, *set_ptr;
9229 
9230             if (arg4 != sizeof(target_sigset_t)) {
9231                 return -TARGET_EINVAL;
9232             }
9233 
9234             if (arg2) {
9235                 switch(how) {
9236                 case TARGET_SIG_BLOCK:
9237                     how = SIG_BLOCK;
9238                     break;
9239                 case TARGET_SIG_UNBLOCK:
9240                     how = SIG_UNBLOCK;
9241                     break;
9242                 case TARGET_SIG_SETMASK:
9243                     how = SIG_SETMASK;
9244                     break;
9245                 default:
9246                     return -TARGET_EINVAL;
9247                 }
9248                 if (!(p = lock_user(VERIFY_READ, arg2, sizeof(target_sigset_t), 1)))
9249                     return -TARGET_EFAULT;
9250                 target_to_host_sigset(&set, p);
9251                 unlock_user(p, arg2, 0);
9252                 set_ptr = &set;
9253             } else {
9254                 how = 0;
9255                 set_ptr = NULL;
9256             }
9257             ret = do_sigprocmask(how, set_ptr, &oldset);
9258             if (!is_error(ret) && arg3) {
9259                 if (!(p = lock_user(VERIFY_WRITE, arg3, sizeof(target_sigset_t), 0)))
9260                     return -TARGET_EFAULT;
9261                 host_to_target_sigset(p, &oldset);
9262                 unlock_user(p, arg3, sizeof(target_sigset_t));
9263             }
9264         }
9265         return ret;
9266 #ifdef TARGET_NR_sigpending
9267     case TARGET_NR_sigpending:
9268         {
9269             sigset_t set;
9270             ret = get_errno(sigpending(&set));
9271             if (!is_error(ret)) {
9272                 if (!(p = lock_user(VERIFY_WRITE, arg1, sizeof(target_sigset_t), 0)))
9273                     return -TARGET_EFAULT;
9274                 host_to_target_old_sigset(p, &set);
9275                 unlock_user(p, arg1, sizeof(target_sigset_t));
9276             }
9277         }
9278         return ret;
9279 #endif
9280     case TARGET_NR_rt_sigpending:
9281         {
9282             sigset_t set;
9283 
9284             /* Yes, this check is >, not != like most. We follow the kernel's
9285              * logic and it does it like this because it implements
9286              * NR_sigpending through the same code path, and in that case
9287              * the old_sigset_t is smaller in size.
9288              */
9289             if (arg2 > sizeof(target_sigset_t)) {
9290                 return -TARGET_EINVAL;
9291             }
9292 
9293             ret = get_errno(sigpending(&set));
9294             if (!is_error(ret)) {
9295                 if (!(p = lock_user(VERIFY_WRITE, arg1, sizeof(target_sigset_t), 0)))
9296                     return -TARGET_EFAULT;
9297                 host_to_target_sigset(p, &set);
9298                 unlock_user(p, arg1, sizeof(target_sigset_t));
9299             }
9300         }
9301         return ret;
9302 #ifdef TARGET_NR_sigsuspend
9303     case TARGET_NR_sigsuspend:
9304         {
9305             TaskState *ts = cpu->opaque;
9306 #if defined(TARGET_ALPHA)
9307             abi_ulong mask = arg1;
9308             target_to_host_old_sigset(&ts->sigsuspend_mask, &mask);
9309 #else
9310             if (!(p = lock_user(VERIFY_READ, arg1, sizeof(target_sigset_t), 1)))
9311                 return -TARGET_EFAULT;
9312             target_to_host_old_sigset(&ts->sigsuspend_mask, p);
9313             unlock_user(p, arg1, 0);
9314 #endif
9315             ret = get_errno(safe_rt_sigsuspend(&ts->sigsuspend_mask,
9316                                                SIGSET_T_SIZE));
9317             if (ret != -TARGET_ERESTARTSYS) {
9318                 ts->in_sigsuspend = 1;
9319             }
9320         }
9321         return ret;
9322 #endif
9323     case TARGET_NR_rt_sigsuspend:
9324         {
9325             TaskState *ts = cpu->opaque;
9326 
9327             if (arg2 != sizeof(target_sigset_t)) {
9328                 return -TARGET_EINVAL;
9329             }
9330             if (!(p = lock_user(VERIFY_READ, arg1, sizeof(target_sigset_t), 1)))
9331                 return -TARGET_EFAULT;
9332             target_to_host_sigset(&ts->sigsuspend_mask, p);
9333             unlock_user(p, arg1, 0);
9334             ret = get_errno(safe_rt_sigsuspend(&ts->sigsuspend_mask,
9335                                                SIGSET_T_SIZE));
9336             if (ret != -TARGET_ERESTARTSYS) {
9337                 ts->in_sigsuspend = 1;
9338             }
9339         }
9340         return ret;
9341 #ifdef TARGET_NR_rt_sigtimedwait
9342     case TARGET_NR_rt_sigtimedwait:
9343         {
9344             sigset_t set;
9345             struct timespec uts, *puts;
9346             siginfo_t uinfo;
9347 
9348             if (arg4 != sizeof(target_sigset_t)) {
9349                 return -TARGET_EINVAL;
9350             }
9351 
9352             if (!(p = lock_user(VERIFY_READ, arg1, sizeof(target_sigset_t), 1)))
9353                 return -TARGET_EFAULT;
9354             target_to_host_sigset(&set, p);
9355             unlock_user(p, arg1, 0);
9356             if (arg3) {
9357                 puts = &uts;
9358                 if (target_to_host_timespec(puts, arg3)) {
9359                     return -TARGET_EFAULT;
9360                 }
9361             } else {
9362                 puts = NULL;
9363             }
9364             ret = get_errno(safe_rt_sigtimedwait(&set, &uinfo, puts,
9365                                                  SIGSET_T_SIZE));
9366             if (!is_error(ret)) {
9367                 if (arg2) {
9368                     p = lock_user(VERIFY_WRITE, arg2, sizeof(target_siginfo_t),
9369                                   0);
9370                     if (!p) {
9371                         return -TARGET_EFAULT;
9372                     }
9373                     host_to_target_siginfo(p, &uinfo);
9374                     unlock_user(p, arg2, sizeof(target_siginfo_t));
9375                 }
9376                 ret = host_to_target_signal(ret);
9377             }
9378         }
9379         return ret;
9380 #endif
9381 #ifdef TARGET_NR_rt_sigtimedwait_time64
9382     case TARGET_NR_rt_sigtimedwait_time64:
9383         {
9384             sigset_t set;
9385             struct timespec uts, *puts;
9386             siginfo_t uinfo;
9387 
9388             if (arg4 != sizeof(target_sigset_t)) {
9389                 return -TARGET_EINVAL;
9390             }
9391 
9392             p = lock_user(VERIFY_READ, arg1, sizeof(target_sigset_t), 1);
9393             if (!p) {
9394                 return -TARGET_EFAULT;
9395             }
9396             target_to_host_sigset(&set, p);
9397             unlock_user(p, arg1, 0);
9398             if (arg3) {
9399                 puts = &uts;
9400                 if (target_to_host_timespec64(puts, arg3)) {
9401                     return -TARGET_EFAULT;
9402                 }
9403             } else {
9404                 puts = NULL;
9405             }
9406             ret = get_errno(safe_rt_sigtimedwait(&set, &uinfo, puts,
9407                                                  SIGSET_T_SIZE));
9408             if (!is_error(ret)) {
9409                 if (arg2) {
9410                     p = lock_user(VERIFY_WRITE, arg2,
9411                                   sizeof(target_siginfo_t), 0);
9412                     if (!p) {
9413                         return -TARGET_EFAULT;
9414                     }
9415                     host_to_target_siginfo(p, &uinfo);
9416                     unlock_user(p, arg2, sizeof(target_siginfo_t));
9417                 }
9418                 ret = host_to_target_signal(ret);
9419             }
9420         }
9421         return ret;
9422 #endif
9423     case TARGET_NR_rt_sigqueueinfo:
9424         {
9425             siginfo_t uinfo;
9426 
9427             p = lock_user(VERIFY_READ, arg3, sizeof(target_siginfo_t), 1);
9428             if (!p) {
9429                 return -TARGET_EFAULT;
9430             }
9431             target_to_host_siginfo(&uinfo, p);
9432             unlock_user(p, arg3, 0);
9433             ret = get_errno(sys_rt_sigqueueinfo(arg1, arg2, &uinfo));
9434         }
9435         return ret;
9436     case TARGET_NR_rt_tgsigqueueinfo:
9437         {
9438             siginfo_t uinfo;
9439 
9440             p = lock_user(VERIFY_READ, arg4, sizeof(target_siginfo_t), 1);
9441             if (!p) {
9442                 return -TARGET_EFAULT;
9443             }
9444             target_to_host_siginfo(&uinfo, p);
9445             unlock_user(p, arg4, 0);
9446             ret = get_errno(sys_rt_tgsigqueueinfo(arg1, arg2, arg3, &uinfo));
9447         }
9448         return ret;
9449 #ifdef TARGET_NR_sigreturn
9450     case TARGET_NR_sigreturn:
9451         if (block_signals()) {
9452             return -TARGET_ERESTARTSYS;
9453         }
9454         return do_sigreturn(cpu_env);
9455 #endif
9456     case TARGET_NR_rt_sigreturn:
9457         if (block_signals()) {
9458             return -TARGET_ERESTARTSYS;
9459         }
9460         return do_rt_sigreturn(cpu_env);
9461     case TARGET_NR_sethostname:
9462         if (!(p = lock_user_string(arg1)))
9463             return -TARGET_EFAULT;
9464         ret = get_errno(sethostname(p, arg2));
9465         unlock_user(p, arg1, 0);
9466         return ret;
9467 #ifdef TARGET_NR_setrlimit
9468     case TARGET_NR_setrlimit:
9469         {
9470             int resource = target_to_host_resource(arg1);
9471             struct target_rlimit *target_rlim;
9472             struct rlimit rlim;
9473             if (!lock_user_struct(VERIFY_READ, target_rlim, arg2, 1))
9474                 return -TARGET_EFAULT;
9475             rlim.rlim_cur = target_to_host_rlim(target_rlim->rlim_cur);
9476             rlim.rlim_max = target_to_host_rlim(target_rlim->rlim_max);
9477             unlock_user_struct(target_rlim, arg2, 0);
9478             /*
9479              * If we just passed through resource limit settings for memory then
9480              * they would also apply to QEMU's own allocations, and QEMU will
9481              * crash or hang or die if its allocations fail. Ideally we would
9482              * track the guest allocations in QEMU and apply the limits ourselves.
9483              * For now, just tell the guest the call succeeded but don't actually
9484              * limit anything.
9485              */
9486             if (resource != RLIMIT_AS &&
9487                 resource != RLIMIT_DATA &&
9488                 resource != RLIMIT_STACK) {
9489                 return get_errno(setrlimit(resource, &rlim));
9490             } else {
9491                 return 0;
9492             }
9493         }
9494 #endif
9495 #ifdef TARGET_NR_getrlimit
9496     case TARGET_NR_getrlimit:
9497         {
9498             int resource = target_to_host_resource(arg1);
9499             struct target_rlimit *target_rlim;
9500             struct rlimit rlim;
9501 
9502             ret = get_errno(getrlimit(resource, &rlim));
9503             if (!is_error(ret)) {
9504                 if (!lock_user_struct(VERIFY_WRITE, target_rlim, arg2, 0))
9505                     return -TARGET_EFAULT;
9506                 target_rlim->rlim_cur = host_to_target_rlim(rlim.rlim_cur);
9507                 target_rlim->rlim_max = host_to_target_rlim(rlim.rlim_max);
9508                 unlock_user_struct(target_rlim, arg2, 1);
9509             }
9510         }
9511         return ret;
9512 #endif
9513     case TARGET_NR_getrusage:
9514         {
9515             struct rusage rusage;
9516             ret = get_errno(getrusage(arg1, &rusage));
9517             if (!is_error(ret)) {
9518                 ret = host_to_target_rusage(arg2, &rusage);
9519             }
9520         }
9521         return ret;
9522 #if defined(TARGET_NR_gettimeofday)
9523     case TARGET_NR_gettimeofday:
9524         {
9525             struct timeval tv;
9526             struct timezone tz;
9527 
9528             ret = get_errno(gettimeofday(&tv, &tz));
9529             if (!is_error(ret)) {
9530                 if (arg1 && copy_to_user_timeval(arg1, &tv)) {
9531                     return -TARGET_EFAULT;
9532                 }
9533                 if (arg2 && copy_to_user_timezone(arg2, &tz)) {
9534                     return -TARGET_EFAULT;
9535                 }
9536             }
9537         }
9538         return ret;
9539 #endif
9540 #if defined(TARGET_NR_settimeofday)
9541     case TARGET_NR_settimeofday:
9542         {
9543             struct timeval tv, *ptv = NULL;
9544             struct timezone tz, *ptz = NULL;
9545 
9546             if (arg1) {
9547                 if (copy_from_user_timeval(&tv, arg1)) {
9548                     return -TARGET_EFAULT;
9549                 }
9550                 ptv = &tv;
9551             }
9552 
9553             if (arg2) {
9554                 if (copy_from_user_timezone(&tz, arg2)) {
9555                     return -TARGET_EFAULT;
9556                 }
9557                 ptz = &tz;
9558             }
9559 
9560             return get_errno(settimeofday(ptv, ptz));
9561         }
9562 #endif
9563 #if defined(TARGET_NR_select)
9564     case TARGET_NR_select:
9565 #if defined(TARGET_WANT_NI_OLD_SELECT)
9566         /* some architectures used to have old_select here
9567          * but now ENOSYS it.
9568          */
9569         ret = -TARGET_ENOSYS;
9570 #elif defined(TARGET_WANT_OLD_SYS_SELECT)
9571         ret = do_old_select(arg1);
9572 #else
9573         ret = do_select(arg1, arg2, arg3, arg4, arg5);
9574 #endif
9575         return ret;
9576 #endif
9577 #ifdef TARGET_NR_pselect6
9578     case TARGET_NR_pselect6:
9579         return do_pselect6(arg1, arg2, arg3, arg4, arg5, arg6, false);
9580 #endif
9581 #ifdef TARGET_NR_pselect6_time64
9582     case TARGET_NR_pselect6_time64:
9583         return do_pselect6(arg1, arg2, arg3, arg4, arg5, arg6, true);
9584 #endif
9585 #ifdef TARGET_NR_symlink
9586     case TARGET_NR_symlink:
9587         {
9588             void *p2;
9589             p = lock_user_string(arg1);
9590             p2 = lock_user_string(arg2);
9591             if (!p || !p2)
9592                 ret = -TARGET_EFAULT;
9593             else
9594                 ret = get_errno(symlink(p, p2));
9595             unlock_user(p2, arg2, 0);
9596             unlock_user(p, arg1, 0);
9597         }
9598         return ret;
9599 #endif
9600 #if defined(TARGET_NR_symlinkat)
9601     case TARGET_NR_symlinkat:
9602         {
9603             void *p2;
9604             p  = lock_user_string(arg1);
9605             p2 = lock_user_string(arg3);
9606             if (!p || !p2)
9607                 ret = -TARGET_EFAULT;
9608             else
9609                 ret = get_errno(symlinkat(p, arg2, p2));
9610             unlock_user(p2, arg3, 0);
9611             unlock_user(p, arg1, 0);
9612         }
9613         return ret;
9614 #endif
9615 #ifdef TARGET_NR_readlink
9616     case TARGET_NR_readlink:
9617         {
9618             void *p2;
9619             p = lock_user_string(arg1);
9620             p2 = lock_user(VERIFY_WRITE, arg2, arg3, 0);
9621             if (!p || !p2) {
9622                 ret = -TARGET_EFAULT;
9623             } else if (!arg3) {
9624                 /* Short circuit this for the magic exe check. */
9625                 ret = -TARGET_EINVAL;
9626             } else if (is_proc_myself((const char *)p, "exe")) {
9627                 char real[PATH_MAX], *temp;
9628                 temp = realpath(exec_path, real);
9629                 /* Return value is # of bytes that we wrote to the buffer. */
9630                 if (temp == NULL) {
9631                     ret = get_errno(-1);
9632                 } else {
9633                     /* Don't worry about sign mismatch as earlier mapping
9634                      * logic would have thrown a bad address error. */
9635                     ret = MIN(strlen(real), arg3);
9636                     /* We cannot NUL terminate the string. */
9637                     memcpy(p2, real, ret);
9638                 }
9639             } else {
9640                 ret = get_errno(readlink(path(p), p2, arg3));
9641             }
9642             unlock_user(p2, arg2, ret);
9643             unlock_user(p, arg1, 0);
9644         }
9645         return ret;
9646 #endif
9647 #if defined(TARGET_NR_readlinkat)
9648     case TARGET_NR_readlinkat:
9649         {
9650             void *p2;
9651             p  = lock_user_string(arg2);
9652             p2 = lock_user(VERIFY_WRITE, arg3, arg4, 0);
9653             if (!p || !p2) {
9654                 ret = -TARGET_EFAULT;
9655             } else if (is_proc_myself((const char *)p, "exe")) {
9656                 char real[PATH_MAX], *temp;
9657                 temp = realpath(exec_path, real);
9658                 ret = temp == NULL ? get_errno(-1) : strlen(real) ;
9659                 snprintf((char *)p2, arg4, "%s", real);
9660             } else {
9661                 ret = get_errno(readlinkat(arg1, path(p), p2, arg4));
9662             }
9663             unlock_user(p2, arg3, ret);
9664             unlock_user(p, arg2, 0);
9665         }
9666         return ret;
9667 #endif
9668 #ifdef TARGET_NR_swapon
9669     case TARGET_NR_swapon:
9670         if (!(p = lock_user_string(arg1)))
9671             return -TARGET_EFAULT;
9672         ret = get_errno(swapon(p, arg2));
9673         unlock_user(p, arg1, 0);
9674         return ret;
9675 #endif
9676     case TARGET_NR_reboot:
9677         if (arg3 == LINUX_REBOOT_CMD_RESTART2) {
9678            /* arg4 must be ignored in all other cases */
9679            p = lock_user_string(arg4);
9680            if (!p) {
9681                return -TARGET_EFAULT;
9682            }
9683            ret = get_errno(reboot(arg1, arg2, arg3, p));
9684            unlock_user(p, arg4, 0);
9685         } else {
9686            ret = get_errno(reboot(arg1, arg2, arg3, NULL));
9687         }
9688         return ret;
9689 #ifdef TARGET_NR_mmap
9690     case TARGET_NR_mmap:
9691 #if (defined(TARGET_I386) && defined(TARGET_ABI32)) || \
9692     (defined(TARGET_ARM) && defined(TARGET_ABI32)) || \
9693     defined(TARGET_M68K) || defined(TARGET_CRIS) || defined(TARGET_MICROBLAZE) \
9694     || defined(TARGET_S390X)
9695         {
9696             abi_ulong *v;
9697             abi_ulong v1, v2, v3, v4, v5, v6;
9698             if (!(v = lock_user(VERIFY_READ, arg1, 6 * sizeof(abi_ulong), 1)))
9699                 return -TARGET_EFAULT;
9700             v1 = tswapal(v[0]);
9701             v2 = tswapal(v[1]);
9702             v3 = tswapal(v[2]);
9703             v4 = tswapal(v[3]);
9704             v5 = tswapal(v[4]);
9705             v6 = tswapal(v[5]);
9706             unlock_user(v, arg1, 0);
9707             ret = get_errno(target_mmap(v1, v2, v3,
9708                                         target_to_host_bitmask(v4, mmap_flags_tbl),
9709                                         v5, v6));
9710         }
9711 #else
9712         /* mmap pointers are always untagged */
9713         ret = get_errno(target_mmap(arg1, arg2, arg3,
9714                                     target_to_host_bitmask(arg4, mmap_flags_tbl),
9715                                     arg5,
9716                                     arg6));
9717 #endif
9718         return ret;
9719 #endif
9720 #ifdef TARGET_NR_mmap2
9721     case TARGET_NR_mmap2:
9722 #ifndef MMAP_SHIFT
9723 #define MMAP_SHIFT 12
9724 #endif
9725         ret = target_mmap(arg1, arg2, arg3,
9726                           target_to_host_bitmask(arg4, mmap_flags_tbl),
9727                           arg5, arg6 << MMAP_SHIFT);
9728         return get_errno(ret);
9729 #endif
9730     case TARGET_NR_munmap:
9731         arg1 = cpu_untagged_addr(cpu, arg1);
9732         return get_errno(target_munmap(arg1, arg2));
9733     case TARGET_NR_mprotect:
9734         arg1 = cpu_untagged_addr(cpu, arg1);
9735         {
9736             TaskState *ts = cpu->opaque;
9737             /* Special hack to detect libc making the stack executable.  */
9738             if ((arg3 & PROT_GROWSDOWN)
9739                 && arg1 >= ts->info->stack_limit
9740                 && arg1 <= ts->info->start_stack) {
9741                 arg3 &= ~PROT_GROWSDOWN;
9742                 arg2 = arg2 + arg1 - ts->info->stack_limit;
9743                 arg1 = ts->info->stack_limit;
9744             }
9745         }
9746         return get_errno(target_mprotect(arg1, arg2, arg3));
9747 #ifdef TARGET_NR_mremap
9748     case TARGET_NR_mremap:
9749         arg1 = cpu_untagged_addr(cpu, arg1);
9750         /* mremap new_addr (arg5) is always untagged */
9751         return get_errno(target_mremap(arg1, arg2, arg3, arg4, arg5));
9752 #endif
9753         /* ??? msync/mlock/munlock are broken for softmmu.  */
9754 #ifdef TARGET_NR_msync
9755     case TARGET_NR_msync:
9756         return get_errno(msync(g2h(cpu, arg1), arg2, arg3));
9757 #endif
9758 #ifdef TARGET_NR_mlock
9759     case TARGET_NR_mlock:
9760         return get_errno(mlock(g2h(cpu, arg1), arg2));
9761 #endif
9762 #ifdef TARGET_NR_munlock
9763     case TARGET_NR_munlock:
9764         return get_errno(munlock(g2h(cpu, arg1), arg2));
9765 #endif
9766 #ifdef TARGET_NR_mlockall
9767     case TARGET_NR_mlockall:
9768         return get_errno(mlockall(target_to_host_mlockall_arg(arg1)));
9769 #endif
9770 #ifdef TARGET_NR_munlockall
9771     case TARGET_NR_munlockall:
9772         return get_errno(munlockall());
9773 #endif
9774 #ifdef TARGET_NR_truncate
9775     case TARGET_NR_truncate:
9776         if (!(p = lock_user_string(arg1)))
9777             return -TARGET_EFAULT;
9778         ret = get_errno(truncate(p, arg2));
9779         unlock_user(p, arg1, 0);
9780         return ret;
9781 #endif
9782 #ifdef TARGET_NR_ftruncate
9783     case TARGET_NR_ftruncate:
9784         return get_errno(ftruncate(arg1, arg2));
9785 #endif
9786     case TARGET_NR_fchmod:
9787         return get_errno(fchmod(arg1, arg2));
9788 #if defined(TARGET_NR_fchmodat)
9789     case TARGET_NR_fchmodat:
9790         if (!(p = lock_user_string(arg2)))
9791             return -TARGET_EFAULT;
9792         ret = get_errno(fchmodat(arg1, p, arg3, 0));
9793         unlock_user(p, arg2, 0);
9794         return ret;
9795 #endif
9796     case TARGET_NR_getpriority:
9797         /* Note that negative values are valid for getpriority, so we must
9798            differentiate based on errno settings.  */
9799         errno = 0;
9800         ret = getpriority(arg1, arg2);
9801         if (ret == -1 && errno != 0) {
9802             return -host_to_target_errno(errno);
9803         }
9804 #ifdef TARGET_ALPHA
9805         /* Return value is the unbiased priority.  Signal no error.  */
9806         ((CPUAlphaState *)cpu_env)->ir[IR_V0] = 0;
9807 #else
9808         /* Return value is a biased priority to avoid negative numbers.  */
9809         ret = 20 - ret;
9810 #endif
9811         return ret;
9812     case TARGET_NR_setpriority:
9813         return get_errno(setpriority(arg1, arg2, arg3));
9814 #ifdef TARGET_NR_statfs
9815     case TARGET_NR_statfs:
9816         if (!(p = lock_user_string(arg1))) {
9817             return -TARGET_EFAULT;
9818         }
9819         ret = get_errno(statfs(path(p), &stfs));
9820         unlock_user(p, arg1, 0);
9821     convert_statfs:
9822         if (!is_error(ret)) {
9823             struct target_statfs *target_stfs;
9824 
9825             if (!lock_user_struct(VERIFY_WRITE, target_stfs, arg2, 0))
9826                 return -TARGET_EFAULT;
9827             __put_user(stfs.f_type, &target_stfs->f_type);
9828             __put_user(stfs.f_bsize, &target_stfs->f_bsize);
9829             __put_user(stfs.f_blocks, &target_stfs->f_blocks);
9830             __put_user(stfs.f_bfree, &target_stfs->f_bfree);
9831             __put_user(stfs.f_bavail, &target_stfs->f_bavail);
9832             __put_user(stfs.f_files, &target_stfs->f_files);
9833             __put_user(stfs.f_ffree, &target_stfs->f_ffree);
9834             __put_user(stfs.f_fsid.__val[0], &target_stfs->f_fsid.val[0]);
9835             __put_user(stfs.f_fsid.__val[1], &target_stfs->f_fsid.val[1]);
9836             __put_user(stfs.f_namelen, &target_stfs->f_namelen);
9837             __put_user(stfs.f_frsize, &target_stfs->f_frsize);
9838 #ifdef _STATFS_F_FLAGS
9839             __put_user(stfs.f_flags, &target_stfs->f_flags);
9840 #else
9841             __put_user(0, &target_stfs->f_flags);
9842 #endif
9843             memset(target_stfs->f_spare, 0, sizeof(target_stfs->f_spare));
9844             unlock_user_struct(target_stfs, arg2, 1);
9845         }
9846         return ret;
9847 #endif
9848 #ifdef TARGET_NR_fstatfs
9849     case TARGET_NR_fstatfs:
9850         ret = get_errno(fstatfs(arg1, &stfs));
9851         goto convert_statfs;
9852 #endif
9853 #ifdef TARGET_NR_statfs64
9854     case TARGET_NR_statfs64:
9855         if (!(p = lock_user_string(arg1))) {
9856             return -TARGET_EFAULT;
9857         }
9858         ret = get_errno(statfs(path(p), &stfs));
9859         unlock_user(p, arg1, 0);
9860     convert_statfs64:
9861         if (!is_error(ret)) {
9862             struct target_statfs64 *target_stfs;
9863 
9864             if (!lock_user_struct(VERIFY_WRITE, target_stfs, arg3, 0))
9865                 return -TARGET_EFAULT;
9866             __put_user(stfs.f_type, &target_stfs->f_type);
9867             __put_user(stfs.f_bsize, &target_stfs->f_bsize);
9868             __put_user(stfs.f_blocks, &target_stfs->f_blocks);
9869             __put_user(stfs.f_bfree, &target_stfs->f_bfree);
9870             __put_user(stfs.f_bavail, &target_stfs->f_bavail);
9871             __put_user(stfs.f_files, &target_stfs->f_files);
9872             __put_user(stfs.f_ffree, &target_stfs->f_ffree);
9873             __put_user(stfs.f_fsid.__val[0], &target_stfs->f_fsid.val[0]);
9874             __put_user(stfs.f_fsid.__val[1], &target_stfs->f_fsid.val[1]);
9875             __put_user(stfs.f_namelen, &target_stfs->f_namelen);
9876             __put_user(stfs.f_frsize, &target_stfs->f_frsize);
9877 #ifdef _STATFS_F_FLAGS
9878             __put_user(stfs.f_flags, &target_stfs->f_flags);
9879 #else
9880             __put_user(0, &target_stfs->f_flags);
9881 #endif
9882             memset(target_stfs->f_spare, 0, sizeof(target_stfs->f_spare));
9883             unlock_user_struct(target_stfs, arg3, 1);
9884         }
9885         return ret;
9886     case TARGET_NR_fstatfs64:
9887         ret = get_errno(fstatfs(arg1, &stfs));
9888         goto convert_statfs64;
9889 #endif
9890 #ifdef TARGET_NR_socketcall
9891     case TARGET_NR_socketcall:
9892         return do_socketcall(arg1, arg2);
9893 #endif
9894 #ifdef TARGET_NR_accept
9895     case TARGET_NR_accept:
9896         return do_accept4(arg1, arg2, arg3, 0);
9897 #endif
9898 #ifdef TARGET_NR_accept4
9899     case TARGET_NR_accept4:
9900         return do_accept4(arg1, arg2, arg3, arg4);
9901 #endif
9902 #ifdef TARGET_NR_bind
9903     case TARGET_NR_bind:
9904         return do_bind(arg1, arg2, arg3);
9905 #endif
9906 #ifdef TARGET_NR_connect
9907     case TARGET_NR_connect:
9908         return do_connect(arg1, arg2, arg3);
9909 #endif
9910 #ifdef TARGET_NR_getpeername
9911     case TARGET_NR_getpeername:
9912         return do_getpeername(arg1, arg2, arg3);
9913 #endif
9914 #ifdef TARGET_NR_getsockname
9915     case TARGET_NR_getsockname:
9916         return do_getsockname(arg1, arg2, arg3);
9917 #endif
9918 #ifdef TARGET_NR_getsockopt
9919     case TARGET_NR_getsockopt:
9920         return do_getsockopt(arg1, arg2, arg3, arg4, arg5);
9921 #endif
9922 #ifdef TARGET_NR_listen
9923     case TARGET_NR_listen:
9924         return get_errno(listen(arg1, arg2));
9925 #endif
9926 #ifdef TARGET_NR_recv
9927     case TARGET_NR_recv:
9928         return do_recvfrom(arg1, arg2, arg3, arg4, 0, 0);
9929 #endif
9930 #ifdef TARGET_NR_recvfrom
9931     case TARGET_NR_recvfrom:
9932         return do_recvfrom(arg1, arg2, arg3, arg4, arg5, arg6);
9933 #endif
9934 #ifdef TARGET_NR_recvmsg
9935     case TARGET_NR_recvmsg:
9936         return do_sendrecvmsg(arg1, arg2, arg3, 0);
9937 #endif
9938 #ifdef TARGET_NR_send
9939     case TARGET_NR_send:
9940         return do_sendto(arg1, arg2, arg3, arg4, 0, 0);
9941 #endif
9942 #ifdef TARGET_NR_sendmsg
9943     case TARGET_NR_sendmsg:
9944         return do_sendrecvmsg(arg1, arg2, arg3, 1);
9945 #endif
9946 #ifdef TARGET_NR_sendmmsg
9947     case TARGET_NR_sendmmsg:
9948         return do_sendrecvmmsg(arg1, arg2, arg3, arg4, 1);
9949 #endif
9950 #ifdef TARGET_NR_recvmmsg
9951     case TARGET_NR_recvmmsg:
9952         return do_sendrecvmmsg(arg1, arg2, arg3, arg4, 0);
9953 #endif
9954 #ifdef TARGET_NR_sendto
9955     case TARGET_NR_sendto:
9956         return do_sendto(arg1, arg2, arg3, arg4, arg5, arg6);
9957 #endif
9958 #ifdef TARGET_NR_shutdown
9959     case TARGET_NR_shutdown:
9960         return get_errno(shutdown(arg1, arg2));
9961 #endif
9962 #if defined(TARGET_NR_getrandom) && defined(__NR_getrandom)
9963     case TARGET_NR_getrandom:
9964         p = lock_user(VERIFY_WRITE, arg1, arg2, 0);
9965         if (!p) {
9966             return -TARGET_EFAULT;
9967         }
9968         ret = get_errno(getrandom(p, arg2, arg3));
9969         unlock_user(p, arg1, ret);
9970         return ret;
9971 #endif
9972 #ifdef TARGET_NR_socket
9973     case TARGET_NR_socket:
9974         return do_socket(arg1, arg2, arg3);
9975 #endif
9976 #ifdef TARGET_NR_socketpair
9977     case TARGET_NR_socketpair:
9978         return do_socketpair(arg1, arg2, arg3, arg4);
9979 #endif
9980 #ifdef TARGET_NR_setsockopt
9981     case TARGET_NR_setsockopt:
9982         return do_setsockopt(arg1, arg2, arg3, arg4, (socklen_t) arg5);
9983 #endif
9984 #if defined(TARGET_NR_syslog)
9985     case TARGET_NR_syslog:
9986         {
9987             int len = arg2;
9988 
9989             switch (arg1) {
9990             case TARGET_SYSLOG_ACTION_CLOSE:         /* Close log */
9991             case TARGET_SYSLOG_ACTION_OPEN:          /* Open log */
9992             case TARGET_SYSLOG_ACTION_CLEAR:         /* Clear ring buffer */
9993             case TARGET_SYSLOG_ACTION_CONSOLE_OFF:   /* Disable logging */
9994             case TARGET_SYSLOG_ACTION_CONSOLE_ON:    /* Enable logging */
9995             case TARGET_SYSLOG_ACTION_CONSOLE_LEVEL: /* Set messages level */
9996             case TARGET_SYSLOG_ACTION_SIZE_UNREAD:   /* Number of chars */
9997             case TARGET_SYSLOG_ACTION_SIZE_BUFFER:   /* Size of the buffer */
9998                 return get_errno(sys_syslog((int)arg1, NULL, (int)arg3));
9999             case TARGET_SYSLOG_ACTION_READ:          /* Read from log */
10000             case TARGET_SYSLOG_ACTION_READ_CLEAR:    /* Read/clear msgs */
10001             case TARGET_SYSLOG_ACTION_READ_ALL:      /* Read last messages */
10002                 {
10003                     if (len < 0) {
10004                         return -TARGET_EINVAL;
10005                     }
10006                     if (len == 0) {
10007                         return 0;
10008                     }
10009                     p = lock_user(VERIFY_WRITE, arg2, arg3, 0);
10010                     if (!p) {
10011                         return -TARGET_EFAULT;
10012                     }
10013                     ret = get_errno(sys_syslog((int)arg1, p, (int)arg3));
10014                     unlock_user(p, arg2, arg3);
10015                 }
10016                 return ret;
10017             default:
10018                 return -TARGET_EINVAL;
10019             }
10020         }
10021         break;
10022 #endif
10023     case TARGET_NR_setitimer:
10024         {
10025             struct itimerval value, ovalue, *pvalue;
10026 
10027             if (arg2) {
10028                 pvalue = &value;
10029                 if (copy_from_user_timeval(&pvalue->it_interval, arg2)
10030                     || copy_from_user_timeval(&pvalue->it_value,
10031                                               arg2 + sizeof(struct target_timeval)))
10032                     return -TARGET_EFAULT;
10033             } else {
10034                 pvalue = NULL;
10035             }
10036             ret = get_errno(setitimer(arg1, pvalue, &ovalue));
10037             if (!is_error(ret) && arg3) {
10038                 if (copy_to_user_timeval(arg3,
10039                                          &ovalue.it_interval)
10040                     || copy_to_user_timeval(arg3 + sizeof(struct target_timeval),
10041                                             &ovalue.it_value))
10042                     return -TARGET_EFAULT;
10043             }
10044         }
10045         return ret;
10046     case TARGET_NR_getitimer:
10047         {
10048             struct itimerval value;
10049 
10050             ret = get_errno(getitimer(arg1, &value));
10051             if (!is_error(ret) && arg2) {
10052                 if (copy_to_user_timeval(arg2,
10053                                          &value.it_interval)
10054                     || copy_to_user_timeval(arg2 + sizeof(struct target_timeval),
10055                                             &value.it_value))
10056                     return -TARGET_EFAULT;
10057             }
10058         }
10059         return ret;
10060 #ifdef TARGET_NR_stat
10061     case TARGET_NR_stat:
10062         if (!(p = lock_user_string(arg1))) {
10063             return -TARGET_EFAULT;
10064         }
10065         ret = get_errno(stat(path(p), &st));
10066         unlock_user(p, arg1, 0);
10067         goto do_stat;
10068 #endif
10069 #ifdef TARGET_NR_lstat
10070     case TARGET_NR_lstat:
10071         if (!(p = lock_user_string(arg1))) {
10072             return -TARGET_EFAULT;
10073         }
10074         ret = get_errno(lstat(path(p), &st));
10075         unlock_user(p, arg1, 0);
10076         goto do_stat;
10077 #endif
10078 #ifdef TARGET_NR_fstat
10079     case TARGET_NR_fstat:
10080         {
10081             ret = get_errno(fstat(arg1, &st));
10082 #if defined(TARGET_NR_stat) || defined(TARGET_NR_lstat)
10083         do_stat:
10084 #endif
10085             if (!is_error(ret)) {
10086                 struct target_stat *target_st;
10087 
10088                 if (!lock_user_struct(VERIFY_WRITE, target_st, arg2, 0))
10089                     return -TARGET_EFAULT;
10090                 memset(target_st, 0, sizeof(*target_st));
10091                 __put_user(st.st_dev, &target_st->st_dev);
10092                 __put_user(st.st_ino, &target_st->st_ino);
10093                 __put_user(st.st_mode, &target_st->st_mode);
10094                 __put_user(st.st_uid, &target_st->st_uid);
10095                 __put_user(st.st_gid, &target_st->st_gid);
10096                 __put_user(st.st_nlink, &target_st->st_nlink);
10097                 __put_user(st.st_rdev, &target_st->st_rdev);
10098                 __put_user(st.st_size, &target_st->st_size);
10099                 __put_user(st.st_blksize, &target_st->st_blksize);
10100                 __put_user(st.st_blocks, &target_st->st_blocks);
10101                 __put_user(st.st_atime, &target_st->target_st_atime);
10102                 __put_user(st.st_mtime, &target_st->target_st_mtime);
10103                 __put_user(st.st_ctime, &target_st->target_st_ctime);
10104 #if (_POSIX_C_SOURCE >= 200809L || _XOPEN_SOURCE >= 700) && \
10105     defined(TARGET_STAT_HAVE_NSEC)
10106                 __put_user(st.st_atim.tv_nsec,
10107                            &target_st->target_st_atime_nsec);
10108                 __put_user(st.st_mtim.tv_nsec,
10109                            &target_st->target_st_mtime_nsec);
10110                 __put_user(st.st_ctim.tv_nsec,
10111                            &target_st->target_st_ctime_nsec);
10112 #endif
10113                 unlock_user_struct(target_st, arg2, 1);
10114             }
10115         }
10116         return ret;
10117 #endif
10118     case TARGET_NR_vhangup:
10119         return get_errno(vhangup());
10120 #ifdef TARGET_NR_syscall
10121     case TARGET_NR_syscall:
10122         return do_syscall(cpu_env, arg1 & 0xffff, arg2, arg3, arg4, arg5,
10123                           arg6, arg7, arg8, 0);
10124 #endif
10125 #if defined(TARGET_NR_wait4)
10126     case TARGET_NR_wait4:
10127         {
10128             int status;
10129             abi_long status_ptr = arg2;
10130             struct rusage rusage, *rusage_ptr;
10131             abi_ulong target_rusage = arg4;
10132             abi_long rusage_err;
10133             if (target_rusage)
10134                 rusage_ptr = &rusage;
10135             else
10136                 rusage_ptr = NULL;
10137             ret = get_errno(safe_wait4(arg1, &status, arg3, rusage_ptr));
10138             if (!is_error(ret)) {
10139                 if (status_ptr && ret) {
10140                     status = host_to_target_waitstatus(status);
10141                     if (put_user_s32(status, status_ptr))
10142                         return -TARGET_EFAULT;
10143                 }
10144                 if (target_rusage) {
10145                     rusage_err = host_to_target_rusage(target_rusage, &rusage);
10146                     if (rusage_err) {
10147                         ret = rusage_err;
10148                     }
10149                 }
10150             }
10151         }
10152         return ret;
10153 #endif
10154 #ifdef TARGET_NR_swapoff
10155     case TARGET_NR_swapoff:
10156         if (!(p = lock_user_string(arg1)))
10157             return -TARGET_EFAULT;
10158         ret = get_errno(swapoff(p));
10159         unlock_user(p, arg1, 0);
10160         return ret;
10161 #endif
10162     case TARGET_NR_sysinfo:
10163         {
10164             struct target_sysinfo *target_value;
10165             struct sysinfo value;
10166             ret = get_errno(sysinfo(&value));
10167             if (!is_error(ret) && arg1)
10168             {
10169                 if (!lock_user_struct(VERIFY_WRITE, target_value, arg1, 0))
10170                     return -TARGET_EFAULT;
10171                 __put_user(value.uptime, &target_value->uptime);
10172                 __put_user(value.loads[0], &target_value->loads[0]);
10173                 __put_user(value.loads[1], &target_value->loads[1]);
10174                 __put_user(value.loads[2], &target_value->loads[2]);
10175                 __put_user(value.totalram, &target_value->totalram);
10176                 __put_user(value.freeram, &target_value->freeram);
10177                 __put_user(value.sharedram, &target_value->sharedram);
10178                 __put_user(value.bufferram, &target_value->bufferram);
10179                 __put_user(value.totalswap, &target_value->totalswap);
10180                 __put_user(value.freeswap, &target_value->freeswap);
10181                 __put_user(value.procs, &target_value->procs);
10182                 __put_user(value.totalhigh, &target_value->totalhigh);
10183                 __put_user(value.freehigh, &target_value->freehigh);
10184                 __put_user(value.mem_unit, &target_value->mem_unit);
10185                 unlock_user_struct(target_value, arg1, 1);
10186             }
10187         }
10188         return ret;
10189 #ifdef TARGET_NR_ipc
10190     case TARGET_NR_ipc:
10191         return do_ipc(cpu_env, arg1, arg2, arg3, arg4, arg5, arg6);
10192 #endif
10193 #ifdef TARGET_NR_semget
10194     case TARGET_NR_semget:
10195         return get_errno(semget(arg1, arg2, arg3));
10196 #endif
10197 #ifdef TARGET_NR_semop
10198     case TARGET_NR_semop:
10199         return do_semtimedop(arg1, arg2, arg3, 0, false);
10200 #endif
10201 #ifdef TARGET_NR_semtimedop
10202     case TARGET_NR_semtimedop:
10203         return do_semtimedop(arg1, arg2, arg3, arg4, false);
10204 #endif
10205 #ifdef TARGET_NR_semtimedop_time64
10206     case TARGET_NR_semtimedop_time64:
10207         return do_semtimedop(arg1, arg2, arg3, arg4, true);
10208 #endif
10209 #ifdef TARGET_NR_semctl
10210     case TARGET_NR_semctl:
10211         return do_semctl(arg1, arg2, arg3, arg4);
10212 #endif
10213 #ifdef TARGET_NR_msgctl
10214     case TARGET_NR_msgctl:
10215         return do_msgctl(arg1, arg2, arg3);
10216 #endif
10217 #ifdef TARGET_NR_msgget
10218     case TARGET_NR_msgget:
10219         return get_errno(msgget(arg1, arg2));
10220 #endif
10221 #ifdef TARGET_NR_msgrcv
10222     case TARGET_NR_msgrcv:
10223         return do_msgrcv(arg1, arg2, arg3, arg4, arg5);
10224 #endif
10225 #ifdef TARGET_NR_msgsnd
10226     case TARGET_NR_msgsnd:
10227         return do_msgsnd(arg1, arg2, arg3, arg4);
10228 #endif
10229 #ifdef TARGET_NR_shmget
10230     case TARGET_NR_shmget:
10231         return get_errno(shmget(arg1, arg2, arg3));
10232 #endif
10233 #ifdef TARGET_NR_shmctl
10234     case TARGET_NR_shmctl:
10235         return do_shmctl(arg1, arg2, arg3);
10236 #endif
10237 #ifdef TARGET_NR_shmat
10238     case TARGET_NR_shmat:
10239         return do_shmat(cpu_env, arg1, arg2, arg3);
10240 #endif
10241 #ifdef TARGET_NR_shmdt
10242     case TARGET_NR_shmdt:
10243         return do_shmdt(arg1);
10244 #endif
10245     case TARGET_NR_fsync:
10246         return get_errno(fsync(arg1));
10247     case TARGET_NR_clone:
10248         /* Linux manages to have three different orderings for its
10249          * arguments to clone(); the BACKWARDS and BACKWARDS2 defines
10250          * match the kernel's CONFIG_CLONE_* settings.
10251          * Microblaze is further special in that it uses a sixth
10252          * implicit argument to clone for the TLS pointer.
10253          */
10254 #if defined(TARGET_MICROBLAZE)
10255         ret = get_errno(do_fork(cpu_env, arg1, arg2, arg4, arg6, arg5));
10256 #elif defined(TARGET_CLONE_BACKWARDS)
10257         ret = get_errno(do_fork(cpu_env, arg1, arg2, arg3, arg4, arg5));
10258 #elif defined(TARGET_CLONE_BACKWARDS2)
10259         ret = get_errno(do_fork(cpu_env, arg2, arg1, arg3, arg5, arg4));
10260 #else
10261         ret = get_errno(do_fork(cpu_env, arg1, arg2, arg3, arg5, arg4));
10262 #endif
10263         return ret;
10264 #ifdef __NR_exit_group
10265         /* new thread calls */
10266     case TARGET_NR_exit_group:
10267         preexit_cleanup(cpu_env, arg1);
10268         return get_errno(exit_group(arg1));
10269 #endif
10270     case TARGET_NR_setdomainname:
10271         if (!(p = lock_user_string(arg1)))
10272             return -TARGET_EFAULT;
10273         ret = get_errno(setdomainname(p, arg2));
10274         unlock_user(p, arg1, 0);
10275         return ret;
10276     case TARGET_NR_uname:
10277         /* no need to transcode because we use the linux syscall */
10278         {
10279             struct new_utsname * buf;
10280 
10281             if (!lock_user_struct(VERIFY_WRITE, buf, arg1, 0))
10282                 return -TARGET_EFAULT;
10283             ret = get_errno(sys_uname(buf));
10284             if (!is_error(ret)) {
10285                 /* Overwrite the native machine name with whatever is being
10286                    emulated. */
10287                 g_strlcpy(buf->machine, cpu_to_uname_machine(cpu_env),
10288                           sizeof(buf->machine));
10289                 /* Allow the user to override the reported release.  */
10290                 if (qemu_uname_release && *qemu_uname_release) {
10291                     g_strlcpy(buf->release, qemu_uname_release,
10292                               sizeof(buf->release));
10293                 }
10294             }
10295             unlock_user_struct(buf, arg1, 1);
10296         }
10297         return ret;
10298 #ifdef TARGET_I386
10299     case TARGET_NR_modify_ldt:
10300         return do_modify_ldt(cpu_env, arg1, arg2, arg3);
10301 #if !defined(TARGET_X86_64)
10302     case TARGET_NR_vm86:
10303         return do_vm86(cpu_env, arg1, arg2);
10304 #endif
10305 #endif
10306 #if defined(TARGET_NR_adjtimex)
10307     case TARGET_NR_adjtimex:
10308         {
10309             struct timex host_buf;
10310 
10311             if (target_to_host_timex(&host_buf, arg1) != 0) {
10312                 return -TARGET_EFAULT;
10313             }
10314             ret = get_errno(adjtimex(&host_buf));
10315             if (!is_error(ret)) {
10316                 if (host_to_target_timex(arg1, &host_buf) != 0) {
10317                     return -TARGET_EFAULT;
10318                 }
10319             }
10320         }
10321         return ret;
10322 #endif
10323 #if defined(TARGET_NR_clock_adjtime) && defined(CONFIG_CLOCK_ADJTIME)
10324     case TARGET_NR_clock_adjtime:
10325         {
10326             struct timex htx, *phtx = &htx;
10327 
10328             if (target_to_host_timex(phtx, arg2) != 0) {
10329                 return -TARGET_EFAULT;
10330             }
10331             ret = get_errno(clock_adjtime(arg1, phtx));
10332             if (!is_error(ret) && phtx) {
10333                 if (host_to_target_timex(arg2, phtx) != 0) {
10334                     return -TARGET_EFAULT;
10335                 }
10336             }
10337         }
10338         return ret;
10339 #endif
10340 #if defined(TARGET_NR_clock_adjtime64) && defined(CONFIG_CLOCK_ADJTIME)
10341     case TARGET_NR_clock_adjtime64:
10342         {
10343             struct timex htx;
10344 
10345             if (target_to_host_timex64(&htx, arg2) != 0) {
10346                 return -TARGET_EFAULT;
10347             }
10348             ret = get_errno(clock_adjtime(arg1, &htx));
10349             if (!is_error(ret) && host_to_target_timex64(arg2, &htx)) {
10350                     return -TARGET_EFAULT;
10351             }
10352         }
10353         return ret;
10354 #endif
10355     case TARGET_NR_getpgid:
10356         return get_errno(getpgid(arg1));
10357     case TARGET_NR_fchdir:
10358         return get_errno(fchdir(arg1));
10359     case TARGET_NR_personality:
10360         return get_errno(personality(arg1));
10361 #ifdef TARGET_NR__llseek /* Not on alpha */
10362     case TARGET_NR__llseek:
10363         {
10364             int64_t res;
10365 #if !defined(__NR_llseek)
10366             res = lseek(arg1, ((uint64_t)arg2 << 32) | (abi_ulong)arg3, arg5);
10367             if (res == -1) {
10368                 ret = get_errno(res);
10369             } else {
10370                 ret = 0;
10371             }
10372 #else
10373             ret = get_errno(_llseek(arg1, arg2, arg3, &res, arg5));
10374 #endif
10375             if ((ret == 0) && put_user_s64(res, arg4)) {
10376                 return -TARGET_EFAULT;
10377             }
10378         }
10379         return ret;
10380 #endif
10381 #ifdef TARGET_NR_getdents
10382     case TARGET_NR_getdents:
10383 #ifdef EMULATE_GETDENTS_WITH_GETDENTS
10384 #if TARGET_ABI_BITS == 32 && HOST_LONG_BITS == 64
10385         {
10386             struct target_dirent *target_dirp;
10387             struct linux_dirent *dirp;
10388             abi_long count = arg3;
10389 
10390             dirp = g_try_malloc(count);
10391             if (!dirp) {
10392                 return -TARGET_ENOMEM;
10393             }
10394 
10395             ret = get_errno(sys_getdents(arg1, dirp, count));
10396             if (!is_error(ret)) {
10397                 struct linux_dirent *de;
10398 		struct target_dirent *tde;
10399                 int len = ret;
10400                 int reclen, treclen;
10401 		int count1, tnamelen;
10402 
10403 		count1 = 0;
10404                 de = dirp;
10405                 if (!(target_dirp = lock_user(VERIFY_WRITE, arg2, count, 0)))
10406                     return -TARGET_EFAULT;
10407 		tde = target_dirp;
10408                 while (len > 0) {
10409                     reclen = de->d_reclen;
10410                     tnamelen = reclen - offsetof(struct linux_dirent, d_name);
10411                     assert(tnamelen >= 0);
10412                     treclen = tnamelen + offsetof(struct target_dirent, d_name);
10413                     assert(count1 + treclen <= count);
10414                     tde->d_reclen = tswap16(treclen);
10415                     tde->d_ino = tswapal(de->d_ino);
10416                     tde->d_off = tswapal(de->d_off);
10417                     memcpy(tde->d_name, de->d_name, tnamelen);
10418                     de = (struct linux_dirent *)((char *)de + reclen);
10419                     len -= reclen;
10420                     tde = (struct target_dirent *)((char *)tde + treclen);
10421 		    count1 += treclen;
10422                 }
10423 		ret = count1;
10424                 unlock_user(target_dirp, arg2, ret);
10425             }
10426             g_free(dirp);
10427         }
10428 #else
10429         {
10430             struct linux_dirent *dirp;
10431             abi_long count = arg3;
10432 
10433             if (!(dirp = lock_user(VERIFY_WRITE, arg2, count, 0)))
10434                 return -TARGET_EFAULT;
10435             ret = get_errno(sys_getdents(arg1, dirp, count));
10436             if (!is_error(ret)) {
10437                 struct linux_dirent *de;
10438                 int len = ret;
10439                 int reclen;
10440                 de = dirp;
10441                 while (len > 0) {
10442                     reclen = de->d_reclen;
10443                     if (reclen > len)
10444                         break;
10445                     de->d_reclen = tswap16(reclen);
10446                     tswapls(&de->d_ino);
10447                     tswapls(&de->d_off);
10448                     de = (struct linux_dirent *)((char *)de + reclen);
10449                     len -= reclen;
10450                 }
10451             }
10452             unlock_user(dirp, arg2, ret);
10453         }
10454 #endif
10455 #else
10456         /* Implement getdents in terms of getdents64 */
10457         {
10458             struct linux_dirent64 *dirp;
10459             abi_long count = arg3;
10460 
10461             dirp = lock_user(VERIFY_WRITE, arg2, count, 0);
10462             if (!dirp) {
10463                 return -TARGET_EFAULT;
10464             }
10465             ret = get_errno(sys_getdents64(arg1, dirp, count));
10466             if (!is_error(ret)) {
10467                 /* Convert the dirent64 structs to target dirent.  We do this
10468                  * in-place, since we can guarantee that a target_dirent is no
10469                  * larger than a dirent64; however this means we have to be
10470                  * careful to read everything before writing in the new format.
10471                  */
10472                 struct linux_dirent64 *de;
10473                 struct target_dirent *tde;
10474                 int len = ret;
10475                 int tlen = 0;
10476 
10477                 de = dirp;
10478                 tde = (struct target_dirent *)dirp;
10479                 while (len > 0) {
10480                     int namelen, treclen;
10481                     int reclen = de->d_reclen;
10482                     uint64_t ino = de->d_ino;
10483                     int64_t off = de->d_off;
10484                     uint8_t type = de->d_type;
10485 
10486                     namelen = strlen(de->d_name);
10487                     treclen = offsetof(struct target_dirent, d_name)
10488                         + namelen + 2;
10489                     treclen = QEMU_ALIGN_UP(treclen, sizeof(abi_long));
10490 
10491                     memmove(tde->d_name, de->d_name, namelen + 1);
10492                     tde->d_ino = tswapal(ino);
10493                     tde->d_off = tswapal(off);
10494                     tde->d_reclen = tswap16(treclen);
10495                     /* The target_dirent type is in what was formerly a padding
10496                      * byte at the end of the structure:
10497                      */
10498                     *(((char *)tde) + treclen - 1) = type;
10499 
10500                     de = (struct linux_dirent64 *)((char *)de + reclen);
10501                     tde = (struct target_dirent *)((char *)tde + treclen);
10502                     len -= reclen;
10503                     tlen += treclen;
10504                 }
10505                 ret = tlen;
10506             }
10507             unlock_user(dirp, arg2, ret);
10508         }
10509 #endif
10510         return ret;
10511 #endif /* TARGET_NR_getdents */
10512 #if defined(TARGET_NR_getdents64) && defined(__NR_getdents64)
10513     case TARGET_NR_getdents64:
10514         {
10515             struct linux_dirent64 *dirp;
10516             abi_long count = arg3;
10517             if (!(dirp = lock_user(VERIFY_WRITE, arg2, count, 0)))
10518                 return -TARGET_EFAULT;
10519             ret = get_errno(sys_getdents64(arg1, dirp, count));
10520             if (!is_error(ret)) {
10521                 struct linux_dirent64 *de;
10522                 int len = ret;
10523                 int reclen;
10524                 de = dirp;
10525                 while (len > 0) {
10526                     reclen = de->d_reclen;
10527                     if (reclen > len)
10528                         break;
10529                     de->d_reclen = tswap16(reclen);
10530                     tswap64s((uint64_t *)&de->d_ino);
10531                     tswap64s((uint64_t *)&de->d_off);
10532                     de = (struct linux_dirent64 *)((char *)de + reclen);
10533                     len -= reclen;
10534                 }
10535             }
10536             unlock_user(dirp, arg2, ret);
10537         }
10538         return ret;
10539 #endif /* TARGET_NR_getdents64 */
10540 #if defined(TARGET_NR__newselect)
10541     case TARGET_NR__newselect:
10542         return do_select(arg1, arg2, arg3, arg4, arg5);
10543 #endif
10544 #ifdef TARGET_NR_poll
10545     case TARGET_NR_poll:
10546         return do_ppoll(arg1, arg2, arg3, arg4, arg5, false, false);
10547 #endif
10548 #ifdef TARGET_NR_ppoll
10549     case TARGET_NR_ppoll:
10550         return do_ppoll(arg1, arg2, arg3, arg4, arg5, true, false);
10551 #endif
10552 #ifdef TARGET_NR_ppoll_time64
10553     case TARGET_NR_ppoll_time64:
10554         return do_ppoll(arg1, arg2, arg3, arg4, arg5, true, true);
10555 #endif
10556     case TARGET_NR_flock:
10557         /* NOTE: the flock constant seems to be the same for every
10558            Linux platform */
10559         return get_errno(safe_flock(arg1, arg2));
10560     case TARGET_NR_readv:
10561         {
10562             struct iovec *vec = lock_iovec(VERIFY_WRITE, arg2, arg3, 0);
10563             if (vec != NULL) {
10564                 ret = get_errno(safe_readv(arg1, vec, arg3));
10565                 unlock_iovec(vec, arg2, arg3, 1);
10566             } else {
10567                 ret = -host_to_target_errno(errno);
10568             }
10569         }
10570         return ret;
10571     case TARGET_NR_writev:
10572         {
10573             struct iovec *vec = lock_iovec(VERIFY_READ, arg2, arg3, 1);
10574             if (vec != NULL) {
10575                 ret = get_errno(safe_writev(arg1, vec, arg3));
10576                 unlock_iovec(vec, arg2, arg3, 0);
10577             } else {
10578                 ret = -host_to_target_errno(errno);
10579             }
10580         }
10581         return ret;
10582 #if defined(TARGET_NR_preadv)
10583     case TARGET_NR_preadv:
10584         {
10585             struct iovec *vec = lock_iovec(VERIFY_WRITE, arg2, arg3, 0);
10586             if (vec != NULL) {
10587                 unsigned long low, high;
10588 
10589                 target_to_host_low_high(arg4, arg5, &low, &high);
10590                 ret = get_errno(safe_preadv(arg1, vec, arg3, low, high));
10591                 unlock_iovec(vec, arg2, arg3, 1);
10592             } else {
10593                 ret = -host_to_target_errno(errno);
10594            }
10595         }
10596         return ret;
10597 #endif
10598 #if defined(TARGET_NR_pwritev)
10599     case TARGET_NR_pwritev:
10600         {
10601             struct iovec *vec = lock_iovec(VERIFY_READ, arg2, arg3, 1);
10602             if (vec != NULL) {
10603                 unsigned long low, high;
10604 
10605                 target_to_host_low_high(arg4, arg5, &low, &high);
10606                 ret = get_errno(safe_pwritev(arg1, vec, arg3, low, high));
10607                 unlock_iovec(vec, arg2, arg3, 0);
10608             } else {
10609                 ret = -host_to_target_errno(errno);
10610            }
10611         }
10612         return ret;
10613 #endif
10614     case TARGET_NR_getsid:
10615         return get_errno(getsid(arg1));
10616 #if defined(TARGET_NR_fdatasync) /* Not on alpha (osf_datasync ?) */
10617     case TARGET_NR_fdatasync:
10618         return get_errno(fdatasync(arg1));
10619 #endif
10620     case TARGET_NR_sched_getaffinity:
10621         {
10622             unsigned int mask_size;
10623             unsigned long *mask;
10624 
10625             /*
10626              * sched_getaffinity needs multiples of ulong, so need to take
10627              * care of mismatches between target ulong and host ulong sizes.
10628              */
10629             if (arg2 & (sizeof(abi_ulong) - 1)) {
10630                 return -TARGET_EINVAL;
10631             }
10632             mask_size = (arg2 + (sizeof(*mask) - 1)) & ~(sizeof(*mask) - 1);
10633 
10634             mask = alloca(mask_size);
10635             memset(mask, 0, mask_size);
10636             ret = get_errno(sys_sched_getaffinity(arg1, mask_size, mask));
10637 
10638             if (!is_error(ret)) {
10639                 if (ret > arg2) {
10640                     /* More data returned than the caller's buffer will fit.
10641                      * This only happens if sizeof(abi_long) < sizeof(long)
10642                      * and the caller passed us a buffer holding an odd number
10643                      * of abi_longs. If the host kernel is actually using the
10644                      * extra 4 bytes then fail EINVAL; otherwise we can just
10645                      * ignore them and only copy the interesting part.
10646                      */
10647                     int numcpus = sysconf(_SC_NPROCESSORS_CONF);
10648                     if (numcpus > arg2 * 8) {
10649                         return -TARGET_EINVAL;
10650                     }
10651                     ret = arg2;
10652                 }
10653 
10654                 if (host_to_target_cpu_mask(mask, mask_size, arg3, ret)) {
10655                     return -TARGET_EFAULT;
10656                 }
10657             }
10658         }
10659         return ret;
10660     case TARGET_NR_sched_setaffinity:
10661         {
10662             unsigned int mask_size;
10663             unsigned long *mask;
10664 
10665             /*
10666              * sched_setaffinity needs multiples of ulong, so need to take
10667              * care of mismatches between target ulong and host ulong sizes.
10668              */
10669             if (arg2 & (sizeof(abi_ulong) - 1)) {
10670                 return -TARGET_EINVAL;
10671             }
10672             mask_size = (arg2 + (sizeof(*mask) - 1)) & ~(sizeof(*mask) - 1);
10673             mask = alloca(mask_size);
10674 
10675             ret = target_to_host_cpu_mask(mask, mask_size, arg3, arg2);
10676             if (ret) {
10677                 return ret;
10678             }
10679 
10680             return get_errno(sys_sched_setaffinity(arg1, mask_size, mask));
10681         }
10682     case TARGET_NR_getcpu:
10683         {
10684             unsigned cpu, node;
10685             ret = get_errno(sys_getcpu(arg1 ? &cpu : NULL,
10686                                        arg2 ? &node : NULL,
10687                                        NULL));
10688             if (is_error(ret)) {
10689                 return ret;
10690             }
10691             if (arg1 && put_user_u32(cpu, arg1)) {
10692                 return -TARGET_EFAULT;
10693             }
10694             if (arg2 && put_user_u32(node, arg2)) {
10695                 return -TARGET_EFAULT;
10696             }
10697         }
10698         return ret;
10699     case TARGET_NR_sched_setparam:
10700         {
10701             struct sched_param *target_schp;
10702             struct sched_param schp;
10703 
10704             if (arg2 == 0) {
10705                 return -TARGET_EINVAL;
10706             }
10707             if (!lock_user_struct(VERIFY_READ, target_schp, arg2, 1))
10708                 return -TARGET_EFAULT;
10709             schp.sched_priority = tswap32(target_schp->sched_priority);
10710             unlock_user_struct(target_schp, arg2, 0);
10711             return get_errno(sched_setparam(arg1, &schp));
10712         }
10713     case TARGET_NR_sched_getparam:
10714         {
10715             struct sched_param *target_schp;
10716             struct sched_param schp;
10717 
10718             if (arg2 == 0) {
10719                 return -TARGET_EINVAL;
10720             }
10721             ret = get_errno(sched_getparam(arg1, &schp));
10722             if (!is_error(ret)) {
10723                 if (!lock_user_struct(VERIFY_WRITE, target_schp, arg2, 0))
10724                     return -TARGET_EFAULT;
10725                 target_schp->sched_priority = tswap32(schp.sched_priority);
10726                 unlock_user_struct(target_schp, arg2, 1);
10727             }
10728         }
10729         return ret;
10730     case TARGET_NR_sched_setscheduler:
10731         {
10732             struct sched_param *target_schp;
10733             struct sched_param schp;
10734             if (arg3 == 0) {
10735                 return -TARGET_EINVAL;
10736             }
10737             if (!lock_user_struct(VERIFY_READ, target_schp, arg3, 1))
10738                 return -TARGET_EFAULT;
10739             schp.sched_priority = tswap32(target_schp->sched_priority);
10740             unlock_user_struct(target_schp, arg3, 0);
10741             return get_errno(sched_setscheduler(arg1, arg2, &schp));
10742         }
10743     case TARGET_NR_sched_getscheduler:
10744         return get_errno(sched_getscheduler(arg1));
10745     case TARGET_NR_sched_yield:
10746         return get_errno(sched_yield());
10747     case TARGET_NR_sched_get_priority_max:
10748         return get_errno(sched_get_priority_max(arg1));
10749     case TARGET_NR_sched_get_priority_min:
10750         return get_errno(sched_get_priority_min(arg1));
10751 #ifdef TARGET_NR_sched_rr_get_interval
10752     case TARGET_NR_sched_rr_get_interval:
10753         {
10754             struct timespec ts;
10755             ret = get_errno(sched_rr_get_interval(arg1, &ts));
10756             if (!is_error(ret)) {
10757                 ret = host_to_target_timespec(arg2, &ts);
10758             }
10759         }
10760         return ret;
10761 #endif
10762 #ifdef TARGET_NR_sched_rr_get_interval_time64
10763     case TARGET_NR_sched_rr_get_interval_time64:
10764         {
10765             struct timespec ts;
10766             ret = get_errno(sched_rr_get_interval(arg1, &ts));
10767             if (!is_error(ret)) {
10768                 ret = host_to_target_timespec64(arg2, &ts);
10769             }
10770         }
10771         return ret;
10772 #endif
10773 #if defined(TARGET_NR_nanosleep)
10774     case TARGET_NR_nanosleep:
10775         {
10776             struct timespec req, rem;
10777             target_to_host_timespec(&req, arg1);
10778             ret = get_errno(safe_nanosleep(&req, &rem));
10779             if (is_error(ret) && arg2) {
10780                 host_to_target_timespec(arg2, &rem);
10781             }
10782         }
10783         return ret;
10784 #endif
10785     case TARGET_NR_prctl:
10786         switch (arg1) {
10787         case PR_GET_PDEATHSIG:
10788         {
10789             int deathsig;
10790             ret = get_errno(prctl(arg1, &deathsig, arg3, arg4, arg5));
10791             if (!is_error(ret) && arg2
10792                 && put_user_s32(deathsig, arg2)) {
10793                 return -TARGET_EFAULT;
10794             }
10795             return ret;
10796         }
10797 #ifdef PR_GET_NAME
10798         case PR_GET_NAME:
10799         {
10800             void *name = lock_user(VERIFY_WRITE, arg2, 16, 1);
10801             if (!name) {
10802                 return -TARGET_EFAULT;
10803             }
10804             ret = get_errno(prctl(arg1, (unsigned long)name,
10805                                   arg3, arg4, arg5));
10806             unlock_user(name, arg2, 16);
10807             return ret;
10808         }
10809         case PR_SET_NAME:
10810         {
10811             void *name = lock_user(VERIFY_READ, arg2, 16, 1);
10812             if (!name) {
10813                 return -TARGET_EFAULT;
10814             }
10815             ret = get_errno(prctl(arg1, (unsigned long)name,
10816                                   arg3, arg4, arg5));
10817             unlock_user(name, arg2, 0);
10818             return ret;
10819         }
10820 #endif
10821 #ifdef TARGET_MIPS
10822         case TARGET_PR_GET_FP_MODE:
10823         {
10824             CPUMIPSState *env = ((CPUMIPSState *)cpu_env);
10825             ret = 0;
10826             if (env->CP0_Status & (1 << CP0St_FR)) {
10827                 ret |= TARGET_PR_FP_MODE_FR;
10828             }
10829             if (env->CP0_Config5 & (1 << CP0C5_FRE)) {
10830                 ret |= TARGET_PR_FP_MODE_FRE;
10831             }
10832             return ret;
10833         }
10834         case TARGET_PR_SET_FP_MODE:
10835         {
10836             CPUMIPSState *env = ((CPUMIPSState *)cpu_env);
10837             bool old_fr = env->CP0_Status & (1 << CP0St_FR);
10838             bool old_fre = env->CP0_Config5 & (1 << CP0C5_FRE);
10839             bool new_fr = arg2 & TARGET_PR_FP_MODE_FR;
10840             bool new_fre = arg2 & TARGET_PR_FP_MODE_FRE;
10841 
10842             const unsigned int known_bits = TARGET_PR_FP_MODE_FR |
10843                                             TARGET_PR_FP_MODE_FRE;
10844 
10845             /* If nothing to change, return right away, successfully.  */
10846             if (old_fr == new_fr && old_fre == new_fre) {
10847                 return 0;
10848             }
10849             /* Check the value is valid */
10850             if (arg2 & ~known_bits) {
10851                 return -TARGET_EOPNOTSUPP;
10852             }
10853             /* Setting FRE without FR is not supported.  */
10854             if (new_fre && !new_fr) {
10855                 return -TARGET_EOPNOTSUPP;
10856             }
10857             if (new_fr && !(env->active_fpu.fcr0 & (1 << FCR0_F64))) {
10858                 /* FR1 is not supported */
10859                 return -TARGET_EOPNOTSUPP;
10860             }
10861             if (!new_fr && (env->active_fpu.fcr0 & (1 << FCR0_F64))
10862                 && !(env->CP0_Status_rw_bitmask & (1 << CP0St_FR))) {
10863                 /* cannot set FR=0 */
10864                 return -TARGET_EOPNOTSUPP;
10865             }
10866             if (new_fre && !(env->active_fpu.fcr0 & (1 << FCR0_FREP))) {
10867                 /* Cannot set FRE=1 */
10868                 return -TARGET_EOPNOTSUPP;
10869             }
10870 
10871             int i;
10872             fpr_t *fpr = env->active_fpu.fpr;
10873             for (i = 0; i < 32 ; i += 2) {
10874                 if (!old_fr && new_fr) {
10875                     fpr[i].w[!FP_ENDIAN_IDX] = fpr[i + 1].w[FP_ENDIAN_IDX];
10876                 } else if (old_fr && !new_fr) {
10877                     fpr[i + 1].w[FP_ENDIAN_IDX] = fpr[i].w[!FP_ENDIAN_IDX];
10878                 }
10879             }
10880 
10881             if (new_fr) {
10882                 env->CP0_Status |= (1 << CP0St_FR);
10883                 env->hflags |= MIPS_HFLAG_F64;
10884             } else {
10885                 env->CP0_Status &= ~(1 << CP0St_FR);
10886                 env->hflags &= ~MIPS_HFLAG_F64;
10887             }
10888             if (new_fre) {
10889                 env->CP0_Config5 |= (1 << CP0C5_FRE);
10890                 if (env->active_fpu.fcr0 & (1 << FCR0_FREP)) {
10891                     env->hflags |= MIPS_HFLAG_FRE;
10892                 }
10893             } else {
10894                 env->CP0_Config5 &= ~(1 << CP0C5_FRE);
10895                 env->hflags &= ~MIPS_HFLAG_FRE;
10896             }
10897 
10898             return 0;
10899         }
10900 #endif /* MIPS */
10901 #ifdef TARGET_AARCH64
10902         case TARGET_PR_SVE_SET_VL:
10903             /*
10904              * We cannot support either PR_SVE_SET_VL_ONEXEC or
10905              * PR_SVE_VL_INHERIT.  Note the kernel definition
10906              * of sve_vl_valid allows for VQ=512, i.e. VL=8192,
10907              * even though the current architectural maximum is VQ=16.
10908              */
10909             ret = -TARGET_EINVAL;
10910             if (cpu_isar_feature(aa64_sve, env_archcpu(cpu_env))
10911                 && arg2 >= 0 && arg2 <= 512 * 16 && !(arg2 & 15)) {
10912                 CPUARMState *env = cpu_env;
10913                 ARMCPU *cpu = env_archcpu(env);
10914                 uint32_t vq, old_vq;
10915 
10916                 old_vq = (env->vfp.zcr_el[1] & 0xf) + 1;
10917                 vq = MAX(arg2 / 16, 1);
10918                 vq = MIN(vq, cpu->sve_max_vq);
10919 
10920                 if (vq < old_vq) {
10921                     aarch64_sve_narrow_vq(env, vq);
10922                 }
10923                 env->vfp.zcr_el[1] = vq - 1;
10924                 arm_rebuild_hflags(env);
10925                 ret = vq * 16;
10926             }
10927             return ret;
10928         case TARGET_PR_SVE_GET_VL:
10929             ret = -TARGET_EINVAL;
10930             {
10931                 ARMCPU *cpu = env_archcpu(cpu_env);
10932                 if (cpu_isar_feature(aa64_sve, cpu)) {
10933                     ret = ((cpu->env.vfp.zcr_el[1] & 0xf) + 1) * 16;
10934                 }
10935             }
10936             return ret;
10937         case TARGET_PR_PAC_RESET_KEYS:
10938             {
10939                 CPUARMState *env = cpu_env;
10940                 ARMCPU *cpu = env_archcpu(env);
10941 
10942                 if (arg3 || arg4 || arg5) {
10943                     return -TARGET_EINVAL;
10944                 }
10945                 if (cpu_isar_feature(aa64_pauth, cpu)) {
10946                     int all = (TARGET_PR_PAC_APIAKEY | TARGET_PR_PAC_APIBKEY |
10947                                TARGET_PR_PAC_APDAKEY | TARGET_PR_PAC_APDBKEY |
10948                                TARGET_PR_PAC_APGAKEY);
10949                     int ret = 0;
10950                     Error *err = NULL;
10951 
10952                     if (arg2 == 0) {
10953                         arg2 = all;
10954                     } else if (arg2 & ~all) {
10955                         return -TARGET_EINVAL;
10956                     }
10957                     if (arg2 & TARGET_PR_PAC_APIAKEY) {
10958                         ret |= qemu_guest_getrandom(&env->keys.apia,
10959                                                     sizeof(ARMPACKey), &err);
10960                     }
10961                     if (arg2 & TARGET_PR_PAC_APIBKEY) {
10962                         ret |= qemu_guest_getrandom(&env->keys.apib,
10963                                                     sizeof(ARMPACKey), &err);
10964                     }
10965                     if (arg2 & TARGET_PR_PAC_APDAKEY) {
10966                         ret |= qemu_guest_getrandom(&env->keys.apda,
10967                                                     sizeof(ARMPACKey), &err);
10968                     }
10969                     if (arg2 & TARGET_PR_PAC_APDBKEY) {
10970                         ret |= qemu_guest_getrandom(&env->keys.apdb,
10971                                                     sizeof(ARMPACKey), &err);
10972                     }
10973                     if (arg2 & TARGET_PR_PAC_APGAKEY) {
10974                         ret |= qemu_guest_getrandom(&env->keys.apga,
10975                                                     sizeof(ARMPACKey), &err);
10976                     }
10977                     if (ret != 0) {
10978                         /*
10979                          * Some unknown failure in the crypto.  The best
10980                          * we can do is log it and fail the syscall.
10981                          * The real syscall cannot fail this way.
10982                          */
10983                         qemu_log_mask(LOG_UNIMP,
10984                                       "PR_PAC_RESET_KEYS: Crypto failure: %s",
10985                                       error_get_pretty(err));
10986                         error_free(err);
10987                         return -TARGET_EIO;
10988                     }
10989                     return 0;
10990                 }
10991             }
10992             return -TARGET_EINVAL;
10993 #endif /* AARCH64 */
10994         case PR_GET_SECCOMP:
10995         case PR_SET_SECCOMP:
10996             /* Disable seccomp to prevent the target disabling syscalls we
10997              * need. */
10998             return -TARGET_EINVAL;
10999         default:
11000             /* Most prctl options have no pointer arguments */
11001             return get_errno(prctl(arg1, arg2, arg3, arg4, arg5));
11002         }
11003         break;
11004 #ifdef TARGET_NR_arch_prctl
11005     case TARGET_NR_arch_prctl:
11006         return do_arch_prctl(cpu_env, arg1, arg2);
11007 #endif
11008 #ifdef TARGET_NR_pread64
11009     case TARGET_NR_pread64:
11010         if (regpairs_aligned(cpu_env, num)) {
11011             arg4 = arg5;
11012             arg5 = arg6;
11013         }
11014         if (arg2 == 0 && arg3 == 0) {
11015             /* Special-case NULL buffer and zero length, which should succeed */
11016             p = 0;
11017         } else {
11018             p = lock_user(VERIFY_WRITE, arg2, arg3, 0);
11019             if (!p) {
11020                 return -TARGET_EFAULT;
11021             }
11022         }
11023         ret = get_errno(pread64(arg1, p, arg3, target_offset64(arg4, arg5)));
11024         unlock_user(p, arg2, ret);
11025         return ret;
11026     case TARGET_NR_pwrite64:
11027         if (regpairs_aligned(cpu_env, num)) {
11028             arg4 = arg5;
11029             arg5 = arg6;
11030         }
11031         if (arg2 == 0 && arg3 == 0) {
11032             /* Special-case NULL buffer and zero length, which should succeed */
11033             p = 0;
11034         } else {
11035             p = lock_user(VERIFY_READ, arg2, arg3, 1);
11036             if (!p) {
11037                 return -TARGET_EFAULT;
11038             }
11039         }
11040         ret = get_errno(pwrite64(arg1, p, arg3, target_offset64(arg4, arg5)));
11041         unlock_user(p, arg2, 0);
11042         return ret;
11043 #endif
11044     case TARGET_NR_getcwd:
11045         if (!(p = lock_user(VERIFY_WRITE, arg1, arg2, 0)))
11046             return -TARGET_EFAULT;
11047         ret = get_errno(sys_getcwd1(p, arg2));
11048         unlock_user(p, arg1, ret);
11049         return ret;
11050     case TARGET_NR_capget:
11051     case TARGET_NR_capset:
11052     {
11053         struct target_user_cap_header *target_header;
11054         struct target_user_cap_data *target_data = NULL;
11055         struct __user_cap_header_struct header;
11056         struct __user_cap_data_struct data[2];
11057         struct __user_cap_data_struct *dataptr = NULL;
11058         int i, target_datalen;
11059         int data_items = 1;
11060 
11061         if (!lock_user_struct(VERIFY_WRITE, target_header, arg1, 1)) {
11062             return -TARGET_EFAULT;
11063         }
11064         header.version = tswap32(target_header->version);
11065         header.pid = tswap32(target_header->pid);
11066 
11067         if (header.version != _LINUX_CAPABILITY_VERSION) {
11068             /* Version 2 and up takes pointer to two user_data structs */
11069             data_items = 2;
11070         }
11071 
11072         target_datalen = sizeof(*target_data) * data_items;
11073 
11074         if (arg2) {
11075             if (num == TARGET_NR_capget) {
11076                 target_data = lock_user(VERIFY_WRITE, arg2, target_datalen, 0);
11077             } else {
11078                 target_data = lock_user(VERIFY_READ, arg2, target_datalen, 1);
11079             }
11080             if (!target_data) {
11081                 unlock_user_struct(target_header, arg1, 0);
11082                 return -TARGET_EFAULT;
11083             }
11084 
11085             if (num == TARGET_NR_capset) {
11086                 for (i = 0; i < data_items; i++) {
11087                     data[i].effective = tswap32(target_data[i].effective);
11088                     data[i].permitted = tswap32(target_data[i].permitted);
11089                     data[i].inheritable = tswap32(target_data[i].inheritable);
11090                 }
11091             }
11092 
11093             dataptr = data;
11094         }
11095 
11096         if (num == TARGET_NR_capget) {
11097             ret = get_errno(capget(&header, dataptr));
11098         } else {
11099             ret = get_errno(capset(&header, dataptr));
11100         }
11101 
11102         /* The kernel always updates version for both capget and capset */
11103         target_header->version = tswap32(header.version);
11104         unlock_user_struct(target_header, arg1, 1);
11105 
11106         if (arg2) {
11107             if (num == TARGET_NR_capget) {
11108                 for (i = 0; i < data_items; i++) {
11109                     target_data[i].effective = tswap32(data[i].effective);
11110                     target_data[i].permitted = tswap32(data[i].permitted);
11111                     target_data[i].inheritable = tswap32(data[i].inheritable);
11112                 }
11113                 unlock_user(target_data, arg2, target_datalen);
11114             } else {
11115                 unlock_user(target_data, arg2, 0);
11116             }
11117         }
11118         return ret;
11119     }
11120     case TARGET_NR_sigaltstack:
11121         return do_sigaltstack(arg1, arg2,
11122                               get_sp_from_cpustate((CPUArchState *)cpu_env));
11123 
11124 #ifdef CONFIG_SENDFILE
11125 #ifdef TARGET_NR_sendfile
11126     case TARGET_NR_sendfile:
11127     {
11128         off_t *offp = NULL;
11129         off_t off;
11130         if (arg3) {
11131             ret = get_user_sal(off, arg3);
11132             if (is_error(ret)) {
11133                 return ret;
11134             }
11135             offp = &off;
11136         }
11137         ret = get_errno(sendfile(arg1, arg2, offp, arg4));
11138         if (!is_error(ret) && arg3) {
11139             abi_long ret2 = put_user_sal(off, arg3);
11140             if (is_error(ret2)) {
11141                 ret = ret2;
11142             }
11143         }
11144         return ret;
11145     }
11146 #endif
11147 #ifdef TARGET_NR_sendfile64
11148     case TARGET_NR_sendfile64:
11149     {
11150         off_t *offp = NULL;
11151         off_t off;
11152         if (arg3) {
11153             ret = get_user_s64(off, arg3);
11154             if (is_error(ret)) {
11155                 return ret;
11156             }
11157             offp = &off;
11158         }
11159         ret = get_errno(sendfile(arg1, arg2, offp, arg4));
11160         if (!is_error(ret) && arg3) {
11161             abi_long ret2 = put_user_s64(off, arg3);
11162             if (is_error(ret2)) {
11163                 ret = ret2;
11164             }
11165         }
11166         return ret;
11167     }
11168 #endif
11169 #endif
11170 #ifdef TARGET_NR_vfork
11171     case TARGET_NR_vfork:
11172         return get_errno(do_fork(cpu_env,
11173                          CLONE_VFORK | CLONE_VM | TARGET_SIGCHLD,
11174                          0, 0, 0, 0));
11175 #endif
11176 #ifdef TARGET_NR_ugetrlimit
11177     case TARGET_NR_ugetrlimit:
11178     {
11179 	struct rlimit rlim;
11180 	int resource = target_to_host_resource(arg1);
11181 	ret = get_errno(getrlimit(resource, &rlim));
11182 	if (!is_error(ret)) {
11183 	    struct target_rlimit *target_rlim;
11184             if (!lock_user_struct(VERIFY_WRITE, target_rlim, arg2, 0))
11185                 return -TARGET_EFAULT;
11186 	    target_rlim->rlim_cur = host_to_target_rlim(rlim.rlim_cur);
11187 	    target_rlim->rlim_max = host_to_target_rlim(rlim.rlim_max);
11188             unlock_user_struct(target_rlim, arg2, 1);
11189 	}
11190         return ret;
11191     }
11192 #endif
11193 #ifdef TARGET_NR_truncate64
11194     case TARGET_NR_truncate64:
11195         if (!(p = lock_user_string(arg1)))
11196             return -TARGET_EFAULT;
11197 	ret = target_truncate64(cpu_env, p, arg2, arg3, arg4);
11198         unlock_user(p, arg1, 0);
11199         return ret;
11200 #endif
11201 #ifdef TARGET_NR_ftruncate64
11202     case TARGET_NR_ftruncate64:
11203         return target_ftruncate64(cpu_env, arg1, arg2, arg3, arg4);
11204 #endif
11205 #ifdef TARGET_NR_stat64
11206     case TARGET_NR_stat64:
11207         if (!(p = lock_user_string(arg1))) {
11208             return -TARGET_EFAULT;
11209         }
11210         ret = get_errno(stat(path(p), &st));
11211         unlock_user(p, arg1, 0);
11212         if (!is_error(ret))
11213             ret = host_to_target_stat64(cpu_env, arg2, &st);
11214         return ret;
11215 #endif
11216 #ifdef TARGET_NR_lstat64
11217     case TARGET_NR_lstat64:
11218         if (!(p = lock_user_string(arg1))) {
11219             return -TARGET_EFAULT;
11220         }
11221         ret = get_errno(lstat(path(p), &st));
11222         unlock_user(p, arg1, 0);
11223         if (!is_error(ret))
11224             ret = host_to_target_stat64(cpu_env, arg2, &st);
11225         return ret;
11226 #endif
11227 #ifdef TARGET_NR_fstat64
11228     case TARGET_NR_fstat64:
11229         ret = get_errno(fstat(arg1, &st));
11230         if (!is_error(ret))
11231             ret = host_to_target_stat64(cpu_env, arg2, &st);
11232         return ret;
11233 #endif
11234 #if (defined(TARGET_NR_fstatat64) || defined(TARGET_NR_newfstatat))
11235 #ifdef TARGET_NR_fstatat64
11236     case TARGET_NR_fstatat64:
11237 #endif
11238 #ifdef TARGET_NR_newfstatat
11239     case TARGET_NR_newfstatat:
11240 #endif
11241         if (!(p = lock_user_string(arg2))) {
11242             return -TARGET_EFAULT;
11243         }
11244         ret = get_errno(fstatat(arg1, path(p), &st, arg4));
11245         unlock_user(p, arg2, 0);
11246         if (!is_error(ret))
11247             ret = host_to_target_stat64(cpu_env, arg3, &st);
11248         return ret;
11249 #endif
11250 #if defined(TARGET_NR_statx)
11251     case TARGET_NR_statx:
11252         {
11253             struct target_statx *target_stx;
11254             int dirfd = arg1;
11255             int flags = arg3;
11256 
11257             p = lock_user_string(arg2);
11258             if (p == NULL) {
11259                 return -TARGET_EFAULT;
11260             }
11261 #if defined(__NR_statx)
11262             {
11263                 /*
11264                  * It is assumed that struct statx is architecture independent.
11265                  */
11266                 struct target_statx host_stx;
11267                 int mask = arg4;
11268 
11269                 ret = get_errno(sys_statx(dirfd, p, flags, mask, &host_stx));
11270                 if (!is_error(ret)) {
11271                     if (host_to_target_statx(&host_stx, arg5) != 0) {
11272                         unlock_user(p, arg2, 0);
11273                         return -TARGET_EFAULT;
11274                     }
11275                 }
11276 
11277                 if (ret != -TARGET_ENOSYS) {
11278                     unlock_user(p, arg2, 0);
11279                     return ret;
11280                 }
11281             }
11282 #endif
11283             ret = get_errno(fstatat(dirfd, path(p), &st, flags));
11284             unlock_user(p, arg2, 0);
11285 
11286             if (!is_error(ret)) {
11287                 if (!lock_user_struct(VERIFY_WRITE, target_stx, arg5, 0)) {
11288                     return -TARGET_EFAULT;
11289                 }
11290                 memset(target_stx, 0, sizeof(*target_stx));
11291                 __put_user(major(st.st_dev), &target_stx->stx_dev_major);
11292                 __put_user(minor(st.st_dev), &target_stx->stx_dev_minor);
11293                 __put_user(st.st_ino, &target_stx->stx_ino);
11294                 __put_user(st.st_mode, &target_stx->stx_mode);
11295                 __put_user(st.st_uid, &target_stx->stx_uid);
11296                 __put_user(st.st_gid, &target_stx->stx_gid);
11297                 __put_user(st.st_nlink, &target_stx->stx_nlink);
11298                 __put_user(major(st.st_rdev), &target_stx->stx_rdev_major);
11299                 __put_user(minor(st.st_rdev), &target_stx->stx_rdev_minor);
11300                 __put_user(st.st_size, &target_stx->stx_size);
11301                 __put_user(st.st_blksize, &target_stx->stx_blksize);
11302                 __put_user(st.st_blocks, &target_stx->stx_blocks);
11303                 __put_user(st.st_atime, &target_stx->stx_atime.tv_sec);
11304                 __put_user(st.st_mtime, &target_stx->stx_mtime.tv_sec);
11305                 __put_user(st.st_ctime, &target_stx->stx_ctime.tv_sec);
11306                 unlock_user_struct(target_stx, arg5, 1);
11307             }
11308         }
11309         return ret;
11310 #endif
11311 #ifdef TARGET_NR_lchown
11312     case TARGET_NR_lchown:
11313         if (!(p = lock_user_string(arg1)))
11314             return -TARGET_EFAULT;
11315         ret = get_errno(lchown(p, low2highuid(arg2), low2highgid(arg3)));
11316         unlock_user(p, arg1, 0);
11317         return ret;
11318 #endif
11319 #ifdef TARGET_NR_getuid
11320     case TARGET_NR_getuid:
11321         return get_errno(high2lowuid(getuid()));
11322 #endif
11323 #ifdef TARGET_NR_getgid
11324     case TARGET_NR_getgid:
11325         return get_errno(high2lowgid(getgid()));
11326 #endif
11327 #ifdef TARGET_NR_geteuid
11328     case TARGET_NR_geteuid:
11329         return get_errno(high2lowuid(geteuid()));
11330 #endif
11331 #ifdef TARGET_NR_getegid
11332     case TARGET_NR_getegid:
11333         return get_errno(high2lowgid(getegid()));
11334 #endif
11335     case TARGET_NR_setreuid:
11336         return get_errno(setreuid(low2highuid(arg1), low2highuid(arg2)));
11337     case TARGET_NR_setregid:
11338         return get_errno(setregid(low2highgid(arg1), low2highgid(arg2)));
11339     case TARGET_NR_getgroups:
11340         {
11341             int gidsetsize = arg1;
11342             target_id *target_grouplist;
11343             gid_t *grouplist;
11344             int i;
11345 
11346             grouplist = alloca(gidsetsize * sizeof(gid_t));
11347             ret = get_errno(getgroups(gidsetsize, grouplist));
11348             if (gidsetsize == 0)
11349                 return ret;
11350             if (!is_error(ret)) {
11351                 target_grouplist = lock_user(VERIFY_WRITE, arg2, gidsetsize * sizeof(target_id), 0);
11352                 if (!target_grouplist)
11353                     return -TARGET_EFAULT;
11354                 for(i = 0;i < ret; i++)
11355                     target_grouplist[i] = tswapid(high2lowgid(grouplist[i]));
11356                 unlock_user(target_grouplist, arg2, gidsetsize * sizeof(target_id));
11357             }
11358         }
11359         return ret;
11360     case TARGET_NR_setgroups:
11361         {
11362             int gidsetsize = arg1;
11363             target_id *target_grouplist;
11364             gid_t *grouplist = NULL;
11365             int i;
11366             if (gidsetsize) {
11367                 grouplist = alloca(gidsetsize * sizeof(gid_t));
11368                 target_grouplist = lock_user(VERIFY_READ, arg2, gidsetsize * sizeof(target_id), 1);
11369                 if (!target_grouplist) {
11370                     return -TARGET_EFAULT;
11371                 }
11372                 for (i = 0; i < gidsetsize; i++) {
11373                     grouplist[i] = low2highgid(tswapid(target_grouplist[i]));
11374                 }
11375                 unlock_user(target_grouplist, arg2, 0);
11376             }
11377             return get_errno(setgroups(gidsetsize, grouplist));
11378         }
11379     case TARGET_NR_fchown:
11380         return get_errno(fchown(arg1, low2highuid(arg2), low2highgid(arg3)));
11381 #if defined(TARGET_NR_fchownat)
11382     case TARGET_NR_fchownat:
11383         if (!(p = lock_user_string(arg2)))
11384             return -TARGET_EFAULT;
11385         ret = get_errno(fchownat(arg1, p, low2highuid(arg3),
11386                                  low2highgid(arg4), arg5));
11387         unlock_user(p, arg2, 0);
11388         return ret;
11389 #endif
11390 #ifdef TARGET_NR_setresuid
11391     case TARGET_NR_setresuid:
11392         return get_errno(sys_setresuid(low2highuid(arg1),
11393                                        low2highuid(arg2),
11394                                        low2highuid(arg3)));
11395 #endif
11396 #ifdef TARGET_NR_getresuid
11397     case TARGET_NR_getresuid:
11398         {
11399             uid_t ruid, euid, suid;
11400             ret = get_errno(getresuid(&ruid, &euid, &suid));
11401             if (!is_error(ret)) {
11402                 if (put_user_id(high2lowuid(ruid), arg1)
11403                     || put_user_id(high2lowuid(euid), arg2)
11404                     || put_user_id(high2lowuid(suid), arg3))
11405                     return -TARGET_EFAULT;
11406             }
11407         }
11408         return ret;
11409 #endif
11410 #ifdef TARGET_NR_getresgid
11411     case TARGET_NR_setresgid:
11412         return get_errno(sys_setresgid(low2highgid(arg1),
11413                                        low2highgid(arg2),
11414                                        low2highgid(arg3)));
11415 #endif
11416 #ifdef TARGET_NR_getresgid
11417     case TARGET_NR_getresgid:
11418         {
11419             gid_t rgid, egid, sgid;
11420             ret = get_errno(getresgid(&rgid, &egid, &sgid));
11421             if (!is_error(ret)) {
11422                 if (put_user_id(high2lowgid(rgid), arg1)
11423                     || put_user_id(high2lowgid(egid), arg2)
11424                     || put_user_id(high2lowgid(sgid), arg3))
11425                     return -TARGET_EFAULT;
11426             }
11427         }
11428         return ret;
11429 #endif
11430 #ifdef TARGET_NR_chown
11431     case TARGET_NR_chown:
11432         if (!(p = lock_user_string(arg1)))
11433             return -TARGET_EFAULT;
11434         ret = get_errno(chown(p, low2highuid(arg2), low2highgid(arg3)));
11435         unlock_user(p, arg1, 0);
11436         return ret;
11437 #endif
11438     case TARGET_NR_setuid:
11439         return get_errno(sys_setuid(low2highuid(arg1)));
11440     case TARGET_NR_setgid:
11441         return get_errno(sys_setgid(low2highgid(arg1)));
11442     case TARGET_NR_setfsuid:
11443         return get_errno(setfsuid(arg1));
11444     case TARGET_NR_setfsgid:
11445         return get_errno(setfsgid(arg1));
11446 
11447 #ifdef TARGET_NR_lchown32
11448     case TARGET_NR_lchown32:
11449         if (!(p = lock_user_string(arg1)))
11450             return -TARGET_EFAULT;
11451         ret = get_errno(lchown(p, arg2, arg3));
11452         unlock_user(p, arg1, 0);
11453         return ret;
11454 #endif
11455 #ifdef TARGET_NR_getuid32
11456     case TARGET_NR_getuid32:
11457         return get_errno(getuid());
11458 #endif
11459 
11460 #if defined(TARGET_NR_getxuid) && defined(TARGET_ALPHA)
11461    /* Alpha specific */
11462     case TARGET_NR_getxuid:
11463          {
11464             uid_t euid;
11465             euid=geteuid();
11466             ((CPUAlphaState *)cpu_env)->ir[IR_A4]=euid;
11467          }
11468         return get_errno(getuid());
11469 #endif
11470 #if defined(TARGET_NR_getxgid) && defined(TARGET_ALPHA)
11471    /* Alpha specific */
11472     case TARGET_NR_getxgid:
11473          {
11474             uid_t egid;
11475             egid=getegid();
11476             ((CPUAlphaState *)cpu_env)->ir[IR_A4]=egid;
11477          }
11478         return get_errno(getgid());
11479 #endif
11480 #if defined(TARGET_NR_osf_getsysinfo) && defined(TARGET_ALPHA)
11481     /* Alpha specific */
11482     case TARGET_NR_osf_getsysinfo:
11483         ret = -TARGET_EOPNOTSUPP;
11484         switch (arg1) {
11485           case TARGET_GSI_IEEE_FP_CONTROL:
11486             {
11487                 uint64_t fpcr = cpu_alpha_load_fpcr(cpu_env);
11488                 uint64_t swcr = ((CPUAlphaState *)cpu_env)->swcr;
11489 
11490                 swcr &= ~SWCR_STATUS_MASK;
11491                 swcr |= (fpcr >> 35) & SWCR_STATUS_MASK;
11492 
11493                 if (put_user_u64 (swcr, arg2))
11494                         return -TARGET_EFAULT;
11495                 ret = 0;
11496             }
11497             break;
11498 
11499           /* case GSI_IEEE_STATE_AT_SIGNAL:
11500              -- Not implemented in linux kernel.
11501              case GSI_UACPROC:
11502              -- Retrieves current unaligned access state; not much used.
11503              case GSI_PROC_TYPE:
11504              -- Retrieves implver information; surely not used.
11505              case GSI_GET_HWRPB:
11506              -- Grabs a copy of the HWRPB; surely not used.
11507           */
11508         }
11509         return ret;
11510 #endif
11511 #if defined(TARGET_NR_osf_setsysinfo) && defined(TARGET_ALPHA)
11512     /* Alpha specific */
11513     case TARGET_NR_osf_setsysinfo:
11514         ret = -TARGET_EOPNOTSUPP;
11515         switch (arg1) {
11516           case TARGET_SSI_IEEE_FP_CONTROL:
11517             {
11518                 uint64_t swcr, fpcr;
11519 
11520                 if (get_user_u64 (swcr, arg2)) {
11521                     return -TARGET_EFAULT;
11522                 }
11523 
11524                 /*
11525                  * The kernel calls swcr_update_status to update the
11526                  * status bits from the fpcr at every point that it
11527                  * could be queried.  Therefore, we store the status
11528                  * bits only in FPCR.
11529                  */
11530                 ((CPUAlphaState *)cpu_env)->swcr
11531                     = swcr & (SWCR_TRAP_ENABLE_MASK | SWCR_MAP_MASK);
11532 
11533                 fpcr = cpu_alpha_load_fpcr(cpu_env);
11534                 fpcr &= ((uint64_t)FPCR_DYN_MASK << 32);
11535                 fpcr |= alpha_ieee_swcr_to_fpcr(swcr);
11536                 cpu_alpha_store_fpcr(cpu_env, fpcr);
11537                 ret = 0;
11538             }
11539             break;
11540 
11541           case TARGET_SSI_IEEE_RAISE_EXCEPTION:
11542             {
11543                 uint64_t exc, fpcr, fex;
11544 
11545                 if (get_user_u64(exc, arg2)) {
11546                     return -TARGET_EFAULT;
11547                 }
11548                 exc &= SWCR_STATUS_MASK;
11549                 fpcr = cpu_alpha_load_fpcr(cpu_env);
11550 
11551                 /* Old exceptions are not signaled.  */
11552                 fex = alpha_ieee_fpcr_to_swcr(fpcr);
11553                 fex = exc & ~fex;
11554                 fex >>= SWCR_STATUS_TO_EXCSUM_SHIFT;
11555                 fex &= ((CPUArchState *)cpu_env)->swcr;
11556 
11557                 /* Update the hardware fpcr.  */
11558                 fpcr |= alpha_ieee_swcr_to_fpcr(exc);
11559                 cpu_alpha_store_fpcr(cpu_env, fpcr);
11560 
11561                 if (fex) {
11562                     int si_code = TARGET_FPE_FLTUNK;
11563                     target_siginfo_t info;
11564 
11565                     if (fex & SWCR_TRAP_ENABLE_DNO) {
11566                         si_code = TARGET_FPE_FLTUND;
11567                     }
11568                     if (fex & SWCR_TRAP_ENABLE_INE) {
11569                         si_code = TARGET_FPE_FLTRES;
11570                     }
11571                     if (fex & SWCR_TRAP_ENABLE_UNF) {
11572                         si_code = TARGET_FPE_FLTUND;
11573                     }
11574                     if (fex & SWCR_TRAP_ENABLE_OVF) {
11575                         si_code = TARGET_FPE_FLTOVF;
11576                     }
11577                     if (fex & SWCR_TRAP_ENABLE_DZE) {
11578                         si_code = TARGET_FPE_FLTDIV;
11579                     }
11580                     if (fex & SWCR_TRAP_ENABLE_INV) {
11581                         si_code = TARGET_FPE_FLTINV;
11582                     }
11583 
11584                     info.si_signo = SIGFPE;
11585                     info.si_errno = 0;
11586                     info.si_code = si_code;
11587                     info._sifields._sigfault._addr
11588                         = ((CPUArchState *)cpu_env)->pc;
11589                     queue_signal((CPUArchState *)cpu_env, info.si_signo,
11590                                  QEMU_SI_FAULT, &info);
11591                 }
11592                 ret = 0;
11593             }
11594             break;
11595 
11596           /* case SSI_NVPAIRS:
11597              -- Used with SSIN_UACPROC to enable unaligned accesses.
11598              case SSI_IEEE_STATE_AT_SIGNAL:
11599              case SSI_IEEE_IGNORE_STATE_AT_SIGNAL:
11600              -- Not implemented in linux kernel
11601           */
11602         }
11603         return ret;
11604 #endif
11605 #ifdef TARGET_NR_osf_sigprocmask
11606     /* Alpha specific.  */
11607     case TARGET_NR_osf_sigprocmask:
11608         {
11609             abi_ulong mask;
11610             int how;
11611             sigset_t set, oldset;
11612 
11613             switch(arg1) {
11614             case TARGET_SIG_BLOCK:
11615                 how = SIG_BLOCK;
11616                 break;
11617             case TARGET_SIG_UNBLOCK:
11618                 how = SIG_UNBLOCK;
11619                 break;
11620             case TARGET_SIG_SETMASK:
11621                 how = SIG_SETMASK;
11622                 break;
11623             default:
11624                 return -TARGET_EINVAL;
11625             }
11626             mask = arg2;
11627             target_to_host_old_sigset(&set, &mask);
11628             ret = do_sigprocmask(how, &set, &oldset);
11629             if (!ret) {
11630                 host_to_target_old_sigset(&mask, &oldset);
11631                 ret = mask;
11632             }
11633         }
11634         return ret;
11635 #endif
11636 
11637 #ifdef TARGET_NR_getgid32
11638     case TARGET_NR_getgid32:
11639         return get_errno(getgid());
11640 #endif
11641 #ifdef TARGET_NR_geteuid32
11642     case TARGET_NR_geteuid32:
11643         return get_errno(geteuid());
11644 #endif
11645 #ifdef TARGET_NR_getegid32
11646     case TARGET_NR_getegid32:
11647         return get_errno(getegid());
11648 #endif
11649 #ifdef TARGET_NR_setreuid32
11650     case TARGET_NR_setreuid32:
11651         return get_errno(setreuid(arg1, arg2));
11652 #endif
11653 #ifdef TARGET_NR_setregid32
11654     case TARGET_NR_setregid32:
11655         return get_errno(setregid(arg1, arg2));
11656 #endif
11657 #ifdef TARGET_NR_getgroups32
11658     case TARGET_NR_getgroups32:
11659         {
11660             int gidsetsize = arg1;
11661             uint32_t *target_grouplist;
11662             gid_t *grouplist;
11663             int i;
11664 
11665             grouplist = alloca(gidsetsize * sizeof(gid_t));
11666             ret = get_errno(getgroups(gidsetsize, grouplist));
11667             if (gidsetsize == 0)
11668                 return ret;
11669             if (!is_error(ret)) {
11670                 target_grouplist = lock_user(VERIFY_WRITE, arg2, gidsetsize * 4, 0);
11671                 if (!target_grouplist) {
11672                     return -TARGET_EFAULT;
11673                 }
11674                 for(i = 0;i < ret; i++)
11675                     target_grouplist[i] = tswap32(grouplist[i]);
11676                 unlock_user(target_grouplist, arg2, gidsetsize * 4);
11677             }
11678         }
11679         return ret;
11680 #endif
11681 #ifdef TARGET_NR_setgroups32
11682     case TARGET_NR_setgroups32:
11683         {
11684             int gidsetsize = arg1;
11685             uint32_t *target_grouplist;
11686             gid_t *grouplist;
11687             int i;
11688 
11689             grouplist = alloca(gidsetsize * sizeof(gid_t));
11690             target_grouplist = lock_user(VERIFY_READ, arg2, gidsetsize * 4, 1);
11691             if (!target_grouplist) {
11692                 return -TARGET_EFAULT;
11693             }
11694             for(i = 0;i < gidsetsize; i++)
11695                 grouplist[i] = tswap32(target_grouplist[i]);
11696             unlock_user(target_grouplist, arg2, 0);
11697             return get_errno(setgroups(gidsetsize, grouplist));
11698         }
11699 #endif
11700 #ifdef TARGET_NR_fchown32
11701     case TARGET_NR_fchown32:
11702         return get_errno(fchown(arg1, arg2, arg3));
11703 #endif
11704 #ifdef TARGET_NR_setresuid32
11705     case TARGET_NR_setresuid32:
11706         return get_errno(sys_setresuid(arg1, arg2, arg3));
11707 #endif
11708 #ifdef TARGET_NR_getresuid32
11709     case TARGET_NR_getresuid32:
11710         {
11711             uid_t ruid, euid, suid;
11712             ret = get_errno(getresuid(&ruid, &euid, &suid));
11713             if (!is_error(ret)) {
11714                 if (put_user_u32(ruid, arg1)
11715                     || put_user_u32(euid, arg2)
11716                     || put_user_u32(suid, arg3))
11717                     return -TARGET_EFAULT;
11718             }
11719         }
11720         return ret;
11721 #endif
11722 #ifdef TARGET_NR_setresgid32
11723     case TARGET_NR_setresgid32:
11724         return get_errno(sys_setresgid(arg1, arg2, arg3));
11725 #endif
11726 #ifdef TARGET_NR_getresgid32
11727     case TARGET_NR_getresgid32:
11728         {
11729             gid_t rgid, egid, sgid;
11730             ret = get_errno(getresgid(&rgid, &egid, &sgid));
11731             if (!is_error(ret)) {
11732                 if (put_user_u32(rgid, arg1)
11733                     || put_user_u32(egid, arg2)
11734                     || put_user_u32(sgid, arg3))
11735                     return -TARGET_EFAULT;
11736             }
11737         }
11738         return ret;
11739 #endif
11740 #ifdef TARGET_NR_chown32
11741     case TARGET_NR_chown32:
11742         if (!(p = lock_user_string(arg1)))
11743             return -TARGET_EFAULT;
11744         ret = get_errno(chown(p, arg2, arg3));
11745         unlock_user(p, arg1, 0);
11746         return ret;
11747 #endif
11748 #ifdef TARGET_NR_setuid32
11749     case TARGET_NR_setuid32:
11750         return get_errno(sys_setuid(arg1));
11751 #endif
11752 #ifdef TARGET_NR_setgid32
11753     case TARGET_NR_setgid32:
11754         return get_errno(sys_setgid(arg1));
11755 #endif
11756 #ifdef TARGET_NR_setfsuid32
11757     case TARGET_NR_setfsuid32:
11758         return get_errno(setfsuid(arg1));
11759 #endif
11760 #ifdef TARGET_NR_setfsgid32
11761     case TARGET_NR_setfsgid32:
11762         return get_errno(setfsgid(arg1));
11763 #endif
11764 #ifdef TARGET_NR_mincore
11765     case TARGET_NR_mincore:
11766         {
11767             void *a = lock_user(VERIFY_READ, arg1, arg2, 0);
11768             if (!a) {
11769                 return -TARGET_ENOMEM;
11770             }
11771             p = lock_user_string(arg3);
11772             if (!p) {
11773                 ret = -TARGET_EFAULT;
11774             } else {
11775                 ret = get_errno(mincore(a, arg2, p));
11776                 unlock_user(p, arg3, ret);
11777             }
11778             unlock_user(a, arg1, 0);
11779         }
11780         return ret;
11781 #endif
11782 #ifdef TARGET_NR_arm_fadvise64_64
11783     case TARGET_NR_arm_fadvise64_64:
11784         /* arm_fadvise64_64 looks like fadvise64_64 but
11785          * with different argument order: fd, advice, offset, len
11786          * rather than the usual fd, offset, len, advice.
11787          * Note that offset and len are both 64-bit so appear as
11788          * pairs of 32-bit registers.
11789          */
11790         ret = posix_fadvise(arg1, target_offset64(arg3, arg4),
11791                             target_offset64(arg5, arg6), arg2);
11792         return -host_to_target_errno(ret);
11793 #endif
11794 
11795 #if TARGET_ABI_BITS == 32
11796 
11797 #ifdef TARGET_NR_fadvise64_64
11798     case TARGET_NR_fadvise64_64:
11799 #if defined(TARGET_PPC) || defined(TARGET_XTENSA)
11800         /* 6 args: fd, advice, offset (high, low), len (high, low) */
11801         ret = arg2;
11802         arg2 = arg3;
11803         arg3 = arg4;
11804         arg4 = arg5;
11805         arg5 = arg6;
11806         arg6 = ret;
11807 #else
11808         /* 6 args: fd, offset (high, low), len (high, low), advice */
11809         if (regpairs_aligned(cpu_env, num)) {
11810             /* offset is in (3,4), len in (5,6) and advice in 7 */
11811             arg2 = arg3;
11812             arg3 = arg4;
11813             arg4 = arg5;
11814             arg5 = arg6;
11815             arg6 = arg7;
11816         }
11817 #endif
11818         ret = posix_fadvise(arg1, target_offset64(arg2, arg3),
11819                             target_offset64(arg4, arg5), arg6);
11820         return -host_to_target_errno(ret);
11821 #endif
11822 
11823 #ifdef TARGET_NR_fadvise64
11824     case TARGET_NR_fadvise64:
11825         /* 5 args: fd, offset (high, low), len, advice */
11826         if (regpairs_aligned(cpu_env, num)) {
11827             /* offset is in (3,4), len in 5 and advice in 6 */
11828             arg2 = arg3;
11829             arg3 = arg4;
11830             arg4 = arg5;
11831             arg5 = arg6;
11832         }
11833         ret = posix_fadvise(arg1, target_offset64(arg2, arg3), arg4, arg5);
11834         return -host_to_target_errno(ret);
11835 #endif
11836 
11837 #else /* not a 32-bit ABI */
11838 #if defined(TARGET_NR_fadvise64_64) || defined(TARGET_NR_fadvise64)
11839 #ifdef TARGET_NR_fadvise64_64
11840     case TARGET_NR_fadvise64_64:
11841 #endif
11842 #ifdef TARGET_NR_fadvise64
11843     case TARGET_NR_fadvise64:
11844 #endif
11845 #ifdef TARGET_S390X
11846         switch (arg4) {
11847         case 4: arg4 = POSIX_FADV_NOREUSE + 1; break; /* make sure it's an invalid value */
11848         case 5: arg4 = POSIX_FADV_NOREUSE + 2; break; /* ditto */
11849         case 6: arg4 = POSIX_FADV_DONTNEED; break;
11850         case 7: arg4 = POSIX_FADV_NOREUSE; break;
11851         default: break;
11852         }
11853 #endif
11854         return -host_to_target_errno(posix_fadvise(arg1, arg2, arg3, arg4));
11855 #endif
11856 #endif /* end of 64-bit ABI fadvise handling */
11857 
11858 #ifdef TARGET_NR_madvise
11859     case TARGET_NR_madvise:
11860         /* A straight passthrough may not be safe because qemu sometimes
11861            turns private file-backed mappings into anonymous mappings.
11862            This will break MADV_DONTNEED.
11863            This is a hint, so ignoring and returning success is ok.  */
11864         return 0;
11865 #endif
11866 #ifdef TARGET_NR_fcntl64
11867     case TARGET_NR_fcntl64:
11868     {
11869         int cmd;
11870         struct flock64 fl;
11871         from_flock64_fn *copyfrom = copy_from_user_flock64;
11872         to_flock64_fn *copyto = copy_to_user_flock64;
11873 
11874 #ifdef TARGET_ARM
11875         if (!((CPUARMState *)cpu_env)->eabi) {
11876             copyfrom = copy_from_user_oabi_flock64;
11877             copyto = copy_to_user_oabi_flock64;
11878         }
11879 #endif
11880 
11881         cmd = target_to_host_fcntl_cmd(arg2);
11882         if (cmd == -TARGET_EINVAL) {
11883             return cmd;
11884         }
11885 
11886         switch(arg2) {
11887         case TARGET_F_GETLK64:
11888             ret = copyfrom(&fl, arg3);
11889             if (ret) {
11890                 break;
11891             }
11892             ret = get_errno(safe_fcntl(arg1, cmd, &fl));
11893             if (ret == 0) {
11894                 ret = copyto(arg3, &fl);
11895             }
11896 	    break;
11897 
11898         case TARGET_F_SETLK64:
11899         case TARGET_F_SETLKW64:
11900             ret = copyfrom(&fl, arg3);
11901             if (ret) {
11902                 break;
11903             }
11904             ret = get_errno(safe_fcntl(arg1, cmd, &fl));
11905 	    break;
11906         default:
11907             ret = do_fcntl(arg1, arg2, arg3);
11908             break;
11909         }
11910         return ret;
11911     }
11912 #endif
11913 #ifdef TARGET_NR_cacheflush
11914     case TARGET_NR_cacheflush:
11915         /* self-modifying code is handled automatically, so nothing needed */
11916         return 0;
11917 #endif
11918 #ifdef TARGET_NR_getpagesize
11919     case TARGET_NR_getpagesize:
11920         return TARGET_PAGE_SIZE;
11921 #endif
11922     case TARGET_NR_gettid:
11923         return get_errno(sys_gettid());
11924 #ifdef TARGET_NR_readahead
11925     case TARGET_NR_readahead:
11926 #if TARGET_ABI_BITS == 32
11927         if (regpairs_aligned(cpu_env, num)) {
11928             arg2 = arg3;
11929             arg3 = arg4;
11930             arg4 = arg5;
11931         }
11932         ret = get_errno(readahead(arg1, target_offset64(arg2, arg3) , arg4));
11933 #else
11934         ret = get_errno(readahead(arg1, arg2, arg3));
11935 #endif
11936         return ret;
11937 #endif
11938 #ifdef CONFIG_ATTR
11939 #ifdef TARGET_NR_setxattr
11940     case TARGET_NR_listxattr:
11941     case TARGET_NR_llistxattr:
11942     {
11943         void *p, *b = 0;
11944         if (arg2) {
11945             b = lock_user(VERIFY_WRITE, arg2, arg3, 0);
11946             if (!b) {
11947                 return -TARGET_EFAULT;
11948             }
11949         }
11950         p = lock_user_string(arg1);
11951         if (p) {
11952             if (num == TARGET_NR_listxattr) {
11953                 ret = get_errno(listxattr(p, b, arg3));
11954             } else {
11955                 ret = get_errno(llistxattr(p, b, arg3));
11956             }
11957         } else {
11958             ret = -TARGET_EFAULT;
11959         }
11960         unlock_user(p, arg1, 0);
11961         unlock_user(b, arg2, arg3);
11962         return ret;
11963     }
11964     case TARGET_NR_flistxattr:
11965     {
11966         void *b = 0;
11967         if (arg2) {
11968             b = lock_user(VERIFY_WRITE, arg2, arg3, 0);
11969             if (!b) {
11970                 return -TARGET_EFAULT;
11971             }
11972         }
11973         ret = get_errno(flistxattr(arg1, b, arg3));
11974         unlock_user(b, arg2, arg3);
11975         return ret;
11976     }
11977     case TARGET_NR_setxattr:
11978     case TARGET_NR_lsetxattr:
11979         {
11980             void *p, *n, *v = 0;
11981             if (arg3) {
11982                 v = lock_user(VERIFY_READ, arg3, arg4, 1);
11983                 if (!v) {
11984                     return -TARGET_EFAULT;
11985                 }
11986             }
11987             p = lock_user_string(arg1);
11988             n = lock_user_string(arg2);
11989             if (p && n) {
11990                 if (num == TARGET_NR_setxattr) {
11991                     ret = get_errno(setxattr(p, n, v, arg4, arg5));
11992                 } else {
11993                     ret = get_errno(lsetxattr(p, n, v, arg4, arg5));
11994                 }
11995             } else {
11996                 ret = -TARGET_EFAULT;
11997             }
11998             unlock_user(p, arg1, 0);
11999             unlock_user(n, arg2, 0);
12000             unlock_user(v, arg3, 0);
12001         }
12002         return ret;
12003     case TARGET_NR_fsetxattr:
12004         {
12005             void *n, *v = 0;
12006             if (arg3) {
12007                 v = lock_user(VERIFY_READ, arg3, arg4, 1);
12008                 if (!v) {
12009                     return -TARGET_EFAULT;
12010                 }
12011             }
12012             n = lock_user_string(arg2);
12013             if (n) {
12014                 ret = get_errno(fsetxattr(arg1, n, v, arg4, arg5));
12015             } else {
12016                 ret = -TARGET_EFAULT;
12017             }
12018             unlock_user(n, arg2, 0);
12019             unlock_user(v, arg3, 0);
12020         }
12021         return ret;
12022     case TARGET_NR_getxattr:
12023     case TARGET_NR_lgetxattr:
12024         {
12025             void *p, *n, *v = 0;
12026             if (arg3) {
12027                 v = lock_user(VERIFY_WRITE, arg3, arg4, 0);
12028                 if (!v) {
12029                     return -TARGET_EFAULT;
12030                 }
12031             }
12032             p = lock_user_string(arg1);
12033             n = lock_user_string(arg2);
12034             if (p && n) {
12035                 if (num == TARGET_NR_getxattr) {
12036                     ret = get_errno(getxattr(p, n, v, arg4));
12037                 } else {
12038                     ret = get_errno(lgetxattr(p, n, v, arg4));
12039                 }
12040             } else {
12041                 ret = -TARGET_EFAULT;
12042             }
12043             unlock_user(p, arg1, 0);
12044             unlock_user(n, arg2, 0);
12045             unlock_user(v, arg3, arg4);
12046         }
12047         return ret;
12048     case TARGET_NR_fgetxattr:
12049         {
12050             void *n, *v = 0;
12051             if (arg3) {
12052                 v = lock_user(VERIFY_WRITE, arg3, arg4, 0);
12053                 if (!v) {
12054                     return -TARGET_EFAULT;
12055                 }
12056             }
12057             n = lock_user_string(arg2);
12058             if (n) {
12059                 ret = get_errno(fgetxattr(arg1, n, v, arg4));
12060             } else {
12061                 ret = -TARGET_EFAULT;
12062             }
12063             unlock_user(n, arg2, 0);
12064             unlock_user(v, arg3, arg4);
12065         }
12066         return ret;
12067     case TARGET_NR_removexattr:
12068     case TARGET_NR_lremovexattr:
12069         {
12070             void *p, *n;
12071             p = lock_user_string(arg1);
12072             n = lock_user_string(arg2);
12073             if (p && n) {
12074                 if (num == TARGET_NR_removexattr) {
12075                     ret = get_errno(removexattr(p, n));
12076                 } else {
12077                     ret = get_errno(lremovexattr(p, n));
12078                 }
12079             } else {
12080                 ret = -TARGET_EFAULT;
12081             }
12082             unlock_user(p, arg1, 0);
12083             unlock_user(n, arg2, 0);
12084         }
12085         return ret;
12086     case TARGET_NR_fremovexattr:
12087         {
12088             void *n;
12089             n = lock_user_string(arg2);
12090             if (n) {
12091                 ret = get_errno(fremovexattr(arg1, n));
12092             } else {
12093                 ret = -TARGET_EFAULT;
12094             }
12095             unlock_user(n, arg2, 0);
12096         }
12097         return ret;
12098 #endif
12099 #endif /* CONFIG_ATTR */
12100 #ifdef TARGET_NR_set_thread_area
12101     case TARGET_NR_set_thread_area:
12102 #if defined(TARGET_MIPS)
12103       ((CPUMIPSState *) cpu_env)->active_tc.CP0_UserLocal = arg1;
12104       return 0;
12105 #elif defined(TARGET_CRIS)
12106       if (arg1 & 0xff)
12107           ret = -TARGET_EINVAL;
12108       else {
12109           ((CPUCRISState *) cpu_env)->pregs[PR_PID] = arg1;
12110           ret = 0;
12111       }
12112       return ret;
12113 #elif defined(TARGET_I386) && defined(TARGET_ABI32)
12114       return do_set_thread_area(cpu_env, arg1);
12115 #elif defined(TARGET_M68K)
12116       {
12117           TaskState *ts = cpu->opaque;
12118           ts->tp_value = arg1;
12119           return 0;
12120       }
12121 #else
12122       return -TARGET_ENOSYS;
12123 #endif
12124 #endif
12125 #ifdef TARGET_NR_get_thread_area
12126     case TARGET_NR_get_thread_area:
12127 #if defined(TARGET_I386) && defined(TARGET_ABI32)
12128         return do_get_thread_area(cpu_env, arg1);
12129 #elif defined(TARGET_M68K)
12130         {
12131             TaskState *ts = cpu->opaque;
12132             return ts->tp_value;
12133         }
12134 #else
12135         return -TARGET_ENOSYS;
12136 #endif
12137 #endif
12138 #ifdef TARGET_NR_getdomainname
12139     case TARGET_NR_getdomainname:
12140         return -TARGET_ENOSYS;
12141 #endif
12142 
12143 #ifdef TARGET_NR_clock_settime
12144     case TARGET_NR_clock_settime:
12145     {
12146         struct timespec ts;
12147 
12148         ret = target_to_host_timespec(&ts, arg2);
12149         if (!is_error(ret)) {
12150             ret = get_errno(clock_settime(arg1, &ts));
12151         }
12152         return ret;
12153     }
12154 #endif
12155 #ifdef TARGET_NR_clock_settime64
12156     case TARGET_NR_clock_settime64:
12157     {
12158         struct timespec ts;
12159 
12160         ret = target_to_host_timespec64(&ts, arg2);
12161         if (!is_error(ret)) {
12162             ret = get_errno(clock_settime(arg1, &ts));
12163         }
12164         return ret;
12165     }
12166 #endif
12167 #ifdef TARGET_NR_clock_gettime
12168     case TARGET_NR_clock_gettime:
12169     {
12170         struct timespec ts;
12171         ret = get_errno(clock_gettime(arg1, &ts));
12172         if (!is_error(ret)) {
12173             ret = host_to_target_timespec(arg2, &ts);
12174         }
12175         return ret;
12176     }
12177 #endif
12178 #ifdef TARGET_NR_clock_gettime64
12179     case TARGET_NR_clock_gettime64:
12180     {
12181         struct timespec ts;
12182         ret = get_errno(clock_gettime(arg1, &ts));
12183         if (!is_error(ret)) {
12184             ret = host_to_target_timespec64(arg2, &ts);
12185         }
12186         return ret;
12187     }
12188 #endif
12189 #ifdef TARGET_NR_clock_getres
12190     case TARGET_NR_clock_getres:
12191     {
12192         struct timespec ts;
12193         ret = get_errno(clock_getres(arg1, &ts));
12194         if (!is_error(ret)) {
12195             host_to_target_timespec(arg2, &ts);
12196         }
12197         return ret;
12198     }
12199 #endif
12200 #ifdef TARGET_NR_clock_getres_time64
12201     case TARGET_NR_clock_getres_time64:
12202     {
12203         struct timespec ts;
12204         ret = get_errno(clock_getres(arg1, &ts));
12205         if (!is_error(ret)) {
12206             host_to_target_timespec64(arg2, &ts);
12207         }
12208         return ret;
12209     }
12210 #endif
12211 #ifdef TARGET_NR_clock_nanosleep
12212     case TARGET_NR_clock_nanosleep:
12213     {
12214         struct timespec ts;
12215         if (target_to_host_timespec(&ts, arg3)) {
12216             return -TARGET_EFAULT;
12217         }
12218         ret = get_errno(safe_clock_nanosleep(arg1, arg2,
12219                                              &ts, arg4 ? &ts : NULL));
12220         /*
12221          * if the call is interrupted by a signal handler, it fails
12222          * with error -TARGET_EINTR and if arg4 is not NULL and arg2 is not
12223          * TIMER_ABSTIME, it returns the remaining unslept time in arg4.
12224          */
12225         if (ret == -TARGET_EINTR && arg4 && arg2 != TIMER_ABSTIME &&
12226             host_to_target_timespec(arg4, &ts)) {
12227               return -TARGET_EFAULT;
12228         }
12229 
12230         return ret;
12231     }
12232 #endif
12233 #ifdef TARGET_NR_clock_nanosleep_time64
12234     case TARGET_NR_clock_nanosleep_time64:
12235     {
12236         struct timespec ts;
12237 
12238         if (target_to_host_timespec64(&ts, arg3)) {
12239             return -TARGET_EFAULT;
12240         }
12241 
12242         ret = get_errno(safe_clock_nanosleep(arg1, arg2,
12243                                              &ts, arg4 ? &ts : NULL));
12244 
12245         if (ret == -TARGET_EINTR && arg4 && arg2 != TIMER_ABSTIME &&
12246             host_to_target_timespec64(arg4, &ts)) {
12247             return -TARGET_EFAULT;
12248         }
12249         return ret;
12250     }
12251 #endif
12252 
12253 #if defined(TARGET_NR_set_tid_address) && defined(__NR_set_tid_address)
12254     case TARGET_NR_set_tid_address:
12255         return get_errno(set_tid_address((int *)g2h(cpu, arg1)));
12256 #endif
12257 
12258     case TARGET_NR_tkill:
12259         return get_errno(safe_tkill((int)arg1, target_to_host_signal(arg2)));
12260 
12261     case TARGET_NR_tgkill:
12262         return get_errno(safe_tgkill((int)arg1, (int)arg2,
12263                          target_to_host_signal(arg3)));
12264 
12265 #ifdef TARGET_NR_set_robust_list
12266     case TARGET_NR_set_robust_list:
12267     case TARGET_NR_get_robust_list:
12268         /* The ABI for supporting robust futexes has userspace pass
12269          * the kernel a pointer to a linked list which is updated by
12270          * userspace after the syscall; the list is walked by the kernel
12271          * when the thread exits. Since the linked list in QEMU guest
12272          * memory isn't a valid linked list for the host and we have
12273          * no way to reliably intercept the thread-death event, we can't
12274          * support these. Silently return ENOSYS so that guest userspace
12275          * falls back to a non-robust futex implementation (which should
12276          * be OK except in the corner case of the guest crashing while
12277          * holding a mutex that is shared with another process via
12278          * shared memory).
12279          */
12280         return -TARGET_ENOSYS;
12281 #endif
12282 
12283 #if defined(TARGET_NR_utimensat)
12284     case TARGET_NR_utimensat:
12285         {
12286             struct timespec *tsp, ts[2];
12287             if (!arg3) {
12288                 tsp = NULL;
12289             } else {
12290                 if (target_to_host_timespec(ts, arg3)) {
12291                     return -TARGET_EFAULT;
12292                 }
12293                 if (target_to_host_timespec(ts + 1, arg3 +
12294                                             sizeof(struct target_timespec))) {
12295                     return -TARGET_EFAULT;
12296                 }
12297                 tsp = ts;
12298             }
12299             if (!arg2)
12300                 ret = get_errno(sys_utimensat(arg1, NULL, tsp, arg4));
12301             else {
12302                 if (!(p = lock_user_string(arg2))) {
12303                     return -TARGET_EFAULT;
12304                 }
12305                 ret = get_errno(sys_utimensat(arg1, path(p), tsp, arg4));
12306                 unlock_user(p, arg2, 0);
12307             }
12308         }
12309         return ret;
12310 #endif
12311 #ifdef TARGET_NR_utimensat_time64
12312     case TARGET_NR_utimensat_time64:
12313         {
12314             struct timespec *tsp, ts[2];
12315             if (!arg3) {
12316                 tsp = NULL;
12317             } else {
12318                 if (target_to_host_timespec64(ts, arg3)) {
12319                     return -TARGET_EFAULT;
12320                 }
12321                 if (target_to_host_timespec64(ts + 1, arg3 +
12322                                      sizeof(struct target__kernel_timespec))) {
12323                     return -TARGET_EFAULT;
12324                 }
12325                 tsp = ts;
12326             }
12327             if (!arg2)
12328                 ret = get_errno(sys_utimensat(arg1, NULL, tsp, arg4));
12329             else {
12330                 p = lock_user_string(arg2);
12331                 if (!p) {
12332                     return -TARGET_EFAULT;
12333                 }
12334                 ret = get_errno(sys_utimensat(arg1, path(p), tsp, arg4));
12335                 unlock_user(p, arg2, 0);
12336             }
12337         }
12338         return ret;
12339 #endif
12340 #ifdef TARGET_NR_futex
12341     case TARGET_NR_futex:
12342         return do_futex(cpu, arg1, arg2, arg3, arg4, arg5, arg6);
12343 #endif
12344 #ifdef TARGET_NR_futex_time64
12345     case TARGET_NR_futex_time64:
12346         return do_futex_time64(cpu, arg1, arg2, arg3, arg4, arg5, arg6);
12347 #endif
12348 #if defined(TARGET_NR_inotify_init) && defined(__NR_inotify_init)
12349     case TARGET_NR_inotify_init:
12350         ret = get_errno(sys_inotify_init());
12351         if (ret >= 0) {
12352             fd_trans_register(ret, &target_inotify_trans);
12353         }
12354         return ret;
12355 #endif
12356 #ifdef CONFIG_INOTIFY1
12357 #if defined(TARGET_NR_inotify_init1) && defined(__NR_inotify_init1)
12358     case TARGET_NR_inotify_init1:
12359         ret = get_errno(sys_inotify_init1(target_to_host_bitmask(arg1,
12360                                           fcntl_flags_tbl)));
12361         if (ret >= 0) {
12362             fd_trans_register(ret, &target_inotify_trans);
12363         }
12364         return ret;
12365 #endif
12366 #endif
12367 #if defined(TARGET_NR_inotify_add_watch) && defined(__NR_inotify_add_watch)
12368     case TARGET_NR_inotify_add_watch:
12369         p = lock_user_string(arg2);
12370         ret = get_errno(sys_inotify_add_watch(arg1, path(p), arg3));
12371         unlock_user(p, arg2, 0);
12372         return ret;
12373 #endif
12374 #if defined(TARGET_NR_inotify_rm_watch) && defined(__NR_inotify_rm_watch)
12375     case TARGET_NR_inotify_rm_watch:
12376         return get_errno(sys_inotify_rm_watch(arg1, arg2));
12377 #endif
12378 
12379 #if defined(TARGET_NR_mq_open) && defined(__NR_mq_open)
12380     case TARGET_NR_mq_open:
12381         {
12382             struct mq_attr posix_mq_attr;
12383             struct mq_attr *pposix_mq_attr;
12384             int host_flags;
12385 
12386             host_flags = target_to_host_bitmask(arg2, fcntl_flags_tbl);
12387             pposix_mq_attr = NULL;
12388             if (arg4) {
12389                 if (copy_from_user_mq_attr(&posix_mq_attr, arg4) != 0) {
12390                     return -TARGET_EFAULT;
12391                 }
12392                 pposix_mq_attr = &posix_mq_attr;
12393             }
12394             p = lock_user_string(arg1 - 1);
12395             if (!p) {
12396                 return -TARGET_EFAULT;
12397             }
12398             ret = get_errno(mq_open(p, host_flags, arg3, pposix_mq_attr));
12399             unlock_user (p, arg1, 0);
12400         }
12401         return ret;
12402 
12403     case TARGET_NR_mq_unlink:
12404         p = lock_user_string(arg1 - 1);
12405         if (!p) {
12406             return -TARGET_EFAULT;
12407         }
12408         ret = get_errno(mq_unlink(p));
12409         unlock_user (p, arg1, 0);
12410         return ret;
12411 
12412 #ifdef TARGET_NR_mq_timedsend
12413     case TARGET_NR_mq_timedsend:
12414         {
12415             struct timespec ts;
12416 
12417             p = lock_user (VERIFY_READ, arg2, arg3, 1);
12418             if (arg5 != 0) {
12419                 if (target_to_host_timespec(&ts, arg5)) {
12420                     return -TARGET_EFAULT;
12421                 }
12422                 ret = get_errno(safe_mq_timedsend(arg1, p, arg3, arg4, &ts));
12423                 if (!is_error(ret) && host_to_target_timespec(arg5, &ts)) {
12424                     return -TARGET_EFAULT;
12425                 }
12426             } else {
12427                 ret = get_errno(safe_mq_timedsend(arg1, p, arg3, arg4, NULL));
12428             }
12429             unlock_user (p, arg2, arg3);
12430         }
12431         return ret;
12432 #endif
12433 #ifdef TARGET_NR_mq_timedsend_time64
12434     case TARGET_NR_mq_timedsend_time64:
12435         {
12436             struct timespec ts;
12437 
12438             p = lock_user(VERIFY_READ, arg2, arg3, 1);
12439             if (arg5 != 0) {
12440                 if (target_to_host_timespec64(&ts, arg5)) {
12441                     return -TARGET_EFAULT;
12442                 }
12443                 ret = get_errno(safe_mq_timedsend(arg1, p, arg3, arg4, &ts));
12444                 if (!is_error(ret) && host_to_target_timespec64(arg5, &ts)) {
12445                     return -TARGET_EFAULT;
12446                 }
12447             } else {
12448                 ret = get_errno(safe_mq_timedsend(arg1, p, arg3, arg4, NULL));
12449             }
12450             unlock_user(p, arg2, arg3);
12451         }
12452         return ret;
12453 #endif
12454 
12455 #ifdef TARGET_NR_mq_timedreceive
12456     case TARGET_NR_mq_timedreceive:
12457         {
12458             struct timespec ts;
12459             unsigned int prio;
12460 
12461             p = lock_user (VERIFY_READ, arg2, arg3, 1);
12462             if (arg5 != 0) {
12463                 if (target_to_host_timespec(&ts, arg5)) {
12464                     return -TARGET_EFAULT;
12465                 }
12466                 ret = get_errno(safe_mq_timedreceive(arg1, p, arg3,
12467                                                      &prio, &ts));
12468                 if (!is_error(ret) && host_to_target_timespec(arg5, &ts)) {
12469                     return -TARGET_EFAULT;
12470                 }
12471             } else {
12472                 ret = get_errno(safe_mq_timedreceive(arg1, p, arg3,
12473                                                      &prio, NULL));
12474             }
12475             unlock_user (p, arg2, arg3);
12476             if (arg4 != 0)
12477                 put_user_u32(prio, arg4);
12478         }
12479         return ret;
12480 #endif
12481 #ifdef TARGET_NR_mq_timedreceive_time64
12482     case TARGET_NR_mq_timedreceive_time64:
12483         {
12484             struct timespec ts;
12485             unsigned int prio;
12486 
12487             p = lock_user(VERIFY_READ, arg2, arg3, 1);
12488             if (arg5 != 0) {
12489                 if (target_to_host_timespec64(&ts, arg5)) {
12490                     return -TARGET_EFAULT;
12491                 }
12492                 ret = get_errno(safe_mq_timedreceive(arg1, p, arg3,
12493                                                      &prio, &ts));
12494                 if (!is_error(ret) && host_to_target_timespec64(arg5, &ts)) {
12495                     return -TARGET_EFAULT;
12496                 }
12497             } else {
12498                 ret = get_errno(safe_mq_timedreceive(arg1, p, arg3,
12499                                                      &prio, NULL));
12500             }
12501             unlock_user(p, arg2, arg3);
12502             if (arg4 != 0) {
12503                 put_user_u32(prio, arg4);
12504             }
12505         }
12506         return ret;
12507 #endif
12508 
12509     /* Not implemented for now... */
12510 /*     case TARGET_NR_mq_notify: */
12511 /*         break; */
12512 
12513     case TARGET_NR_mq_getsetattr:
12514         {
12515             struct mq_attr posix_mq_attr_in, posix_mq_attr_out;
12516             ret = 0;
12517             if (arg2 != 0) {
12518                 copy_from_user_mq_attr(&posix_mq_attr_in, arg2);
12519                 ret = get_errno(mq_setattr(arg1, &posix_mq_attr_in,
12520                                            &posix_mq_attr_out));
12521             } else if (arg3 != 0) {
12522                 ret = get_errno(mq_getattr(arg1, &posix_mq_attr_out));
12523             }
12524             if (ret == 0 && arg3 != 0) {
12525                 copy_to_user_mq_attr(arg3, &posix_mq_attr_out);
12526             }
12527         }
12528         return ret;
12529 #endif
12530 
12531 #ifdef CONFIG_SPLICE
12532 #ifdef TARGET_NR_tee
12533     case TARGET_NR_tee:
12534         {
12535             ret = get_errno(tee(arg1,arg2,arg3,arg4));
12536         }
12537         return ret;
12538 #endif
12539 #ifdef TARGET_NR_splice
12540     case TARGET_NR_splice:
12541         {
12542             loff_t loff_in, loff_out;
12543             loff_t *ploff_in = NULL, *ploff_out = NULL;
12544             if (arg2) {
12545                 if (get_user_u64(loff_in, arg2)) {
12546                     return -TARGET_EFAULT;
12547                 }
12548                 ploff_in = &loff_in;
12549             }
12550             if (arg4) {
12551                 if (get_user_u64(loff_out, arg4)) {
12552                     return -TARGET_EFAULT;
12553                 }
12554                 ploff_out = &loff_out;
12555             }
12556             ret = get_errno(splice(arg1, ploff_in, arg3, ploff_out, arg5, arg6));
12557             if (arg2) {
12558                 if (put_user_u64(loff_in, arg2)) {
12559                     return -TARGET_EFAULT;
12560                 }
12561             }
12562             if (arg4) {
12563                 if (put_user_u64(loff_out, arg4)) {
12564                     return -TARGET_EFAULT;
12565                 }
12566             }
12567         }
12568         return ret;
12569 #endif
12570 #ifdef TARGET_NR_vmsplice
12571 	case TARGET_NR_vmsplice:
12572         {
12573             struct iovec *vec = lock_iovec(VERIFY_READ, arg2, arg3, 1);
12574             if (vec != NULL) {
12575                 ret = get_errno(vmsplice(arg1, vec, arg3, arg4));
12576                 unlock_iovec(vec, arg2, arg3, 0);
12577             } else {
12578                 ret = -host_to_target_errno(errno);
12579             }
12580         }
12581         return ret;
12582 #endif
12583 #endif /* CONFIG_SPLICE */
12584 #ifdef CONFIG_EVENTFD
12585 #if defined(TARGET_NR_eventfd)
12586     case TARGET_NR_eventfd:
12587         ret = get_errno(eventfd(arg1, 0));
12588         if (ret >= 0) {
12589             fd_trans_register(ret, &target_eventfd_trans);
12590         }
12591         return ret;
12592 #endif
12593 #if defined(TARGET_NR_eventfd2)
12594     case TARGET_NR_eventfd2:
12595     {
12596         int host_flags = arg2 & (~(TARGET_O_NONBLOCK_MASK | TARGET_O_CLOEXEC));
12597         if (arg2 & TARGET_O_NONBLOCK) {
12598             host_flags |= O_NONBLOCK;
12599         }
12600         if (arg2 & TARGET_O_CLOEXEC) {
12601             host_flags |= O_CLOEXEC;
12602         }
12603         ret = get_errno(eventfd(arg1, host_flags));
12604         if (ret >= 0) {
12605             fd_trans_register(ret, &target_eventfd_trans);
12606         }
12607         return ret;
12608     }
12609 #endif
12610 #endif /* CONFIG_EVENTFD  */
12611 #if defined(CONFIG_FALLOCATE) && defined(TARGET_NR_fallocate)
12612     case TARGET_NR_fallocate:
12613 #if TARGET_ABI_BITS == 32
12614         ret = get_errno(fallocate(arg1, arg2, target_offset64(arg3, arg4),
12615                                   target_offset64(arg5, arg6)));
12616 #else
12617         ret = get_errno(fallocate(arg1, arg2, arg3, arg4));
12618 #endif
12619         return ret;
12620 #endif
12621 #if defined(CONFIG_SYNC_FILE_RANGE)
12622 #if defined(TARGET_NR_sync_file_range)
12623     case TARGET_NR_sync_file_range:
12624 #if TARGET_ABI_BITS == 32
12625 #if defined(TARGET_MIPS)
12626         ret = get_errno(sync_file_range(arg1, target_offset64(arg3, arg4),
12627                                         target_offset64(arg5, arg6), arg7));
12628 #else
12629         ret = get_errno(sync_file_range(arg1, target_offset64(arg2, arg3),
12630                                         target_offset64(arg4, arg5), arg6));
12631 #endif /* !TARGET_MIPS */
12632 #else
12633         ret = get_errno(sync_file_range(arg1, arg2, arg3, arg4));
12634 #endif
12635         return ret;
12636 #endif
12637 #if defined(TARGET_NR_sync_file_range2) || \
12638     defined(TARGET_NR_arm_sync_file_range)
12639 #if defined(TARGET_NR_sync_file_range2)
12640     case TARGET_NR_sync_file_range2:
12641 #endif
12642 #if defined(TARGET_NR_arm_sync_file_range)
12643     case TARGET_NR_arm_sync_file_range:
12644 #endif
12645         /* This is like sync_file_range but the arguments are reordered */
12646 #if TARGET_ABI_BITS == 32
12647         ret = get_errno(sync_file_range(arg1, target_offset64(arg3, arg4),
12648                                         target_offset64(arg5, arg6), arg2));
12649 #else
12650         ret = get_errno(sync_file_range(arg1, arg3, arg4, arg2));
12651 #endif
12652         return ret;
12653 #endif
12654 #endif
12655 #if defined(TARGET_NR_signalfd4)
12656     case TARGET_NR_signalfd4:
12657         return do_signalfd4(arg1, arg2, arg4);
12658 #endif
12659 #if defined(TARGET_NR_signalfd)
12660     case TARGET_NR_signalfd:
12661         return do_signalfd4(arg1, arg2, 0);
12662 #endif
12663 #if defined(CONFIG_EPOLL)
12664 #if defined(TARGET_NR_epoll_create)
12665     case TARGET_NR_epoll_create:
12666         return get_errno(epoll_create(arg1));
12667 #endif
12668 #if defined(TARGET_NR_epoll_create1) && defined(CONFIG_EPOLL_CREATE1)
12669     case TARGET_NR_epoll_create1:
12670         return get_errno(epoll_create1(target_to_host_bitmask(arg1, fcntl_flags_tbl)));
12671 #endif
12672 #if defined(TARGET_NR_epoll_ctl)
12673     case TARGET_NR_epoll_ctl:
12674     {
12675         struct epoll_event ep;
12676         struct epoll_event *epp = 0;
12677         if (arg4) {
12678             if (arg2 != EPOLL_CTL_DEL) {
12679                 struct target_epoll_event *target_ep;
12680                 if (!lock_user_struct(VERIFY_READ, target_ep, arg4, 1)) {
12681                     return -TARGET_EFAULT;
12682                 }
12683                 ep.events = tswap32(target_ep->events);
12684                 /*
12685                  * The epoll_data_t union is just opaque data to the kernel,
12686                  * so we transfer all 64 bits across and need not worry what
12687                  * actual data type it is.
12688                  */
12689                 ep.data.u64 = tswap64(target_ep->data.u64);
12690                 unlock_user_struct(target_ep, arg4, 0);
12691             }
12692             /*
12693              * before kernel 2.6.9, EPOLL_CTL_DEL operation required a
12694              * non-null pointer, even though this argument is ignored.
12695              *
12696              */
12697             epp = &ep;
12698         }
12699         return get_errno(epoll_ctl(arg1, arg2, arg3, epp));
12700     }
12701 #endif
12702 
12703 #if defined(TARGET_NR_epoll_wait) || defined(TARGET_NR_epoll_pwait)
12704 #if defined(TARGET_NR_epoll_wait)
12705     case TARGET_NR_epoll_wait:
12706 #endif
12707 #if defined(TARGET_NR_epoll_pwait)
12708     case TARGET_NR_epoll_pwait:
12709 #endif
12710     {
12711         struct target_epoll_event *target_ep;
12712         struct epoll_event *ep;
12713         int epfd = arg1;
12714         int maxevents = arg3;
12715         int timeout = arg4;
12716 
12717         if (maxevents <= 0 || maxevents > TARGET_EP_MAX_EVENTS) {
12718             return -TARGET_EINVAL;
12719         }
12720 
12721         target_ep = lock_user(VERIFY_WRITE, arg2,
12722                               maxevents * sizeof(struct target_epoll_event), 1);
12723         if (!target_ep) {
12724             return -TARGET_EFAULT;
12725         }
12726 
12727         ep = g_try_new(struct epoll_event, maxevents);
12728         if (!ep) {
12729             unlock_user(target_ep, arg2, 0);
12730             return -TARGET_ENOMEM;
12731         }
12732 
12733         switch (num) {
12734 #if defined(TARGET_NR_epoll_pwait)
12735         case TARGET_NR_epoll_pwait:
12736         {
12737             target_sigset_t *target_set;
12738             sigset_t _set, *set = &_set;
12739 
12740             if (arg5) {
12741                 if (arg6 != sizeof(target_sigset_t)) {
12742                     ret = -TARGET_EINVAL;
12743                     break;
12744                 }
12745 
12746                 target_set = lock_user(VERIFY_READ, arg5,
12747                                        sizeof(target_sigset_t), 1);
12748                 if (!target_set) {
12749                     ret = -TARGET_EFAULT;
12750                     break;
12751                 }
12752                 target_to_host_sigset(set, target_set);
12753                 unlock_user(target_set, arg5, 0);
12754             } else {
12755                 set = NULL;
12756             }
12757 
12758             ret = get_errno(safe_epoll_pwait(epfd, ep, maxevents, timeout,
12759                                              set, SIGSET_T_SIZE));
12760             break;
12761         }
12762 #endif
12763 #if defined(TARGET_NR_epoll_wait)
12764         case TARGET_NR_epoll_wait:
12765             ret = get_errno(safe_epoll_pwait(epfd, ep, maxevents, timeout,
12766                                              NULL, 0));
12767             break;
12768 #endif
12769         default:
12770             ret = -TARGET_ENOSYS;
12771         }
12772         if (!is_error(ret)) {
12773             int i;
12774             for (i = 0; i < ret; i++) {
12775                 target_ep[i].events = tswap32(ep[i].events);
12776                 target_ep[i].data.u64 = tswap64(ep[i].data.u64);
12777             }
12778             unlock_user(target_ep, arg2,
12779                         ret * sizeof(struct target_epoll_event));
12780         } else {
12781             unlock_user(target_ep, arg2, 0);
12782         }
12783         g_free(ep);
12784         return ret;
12785     }
12786 #endif
12787 #endif
12788 #ifdef TARGET_NR_prlimit64
12789     case TARGET_NR_prlimit64:
12790     {
12791         /* args: pid, resource number, ptr to new rlimit, ptr to old rlimit */
12792         struct target_rlimit64 *target_rnew, *target_rold;
12793         struct host_rlimit64 rnew, rold, *rnewp = 0;
12794         int resource = target_to_host_resource(arg2);
12795 
12796         if (arg3 && (resource != RLIMIT_AS &&
12797                      resource != RLIMIT_DATA &&
12798                      resource != RLIMIT_STACK)) {
12799             if (!lock_user_struct(VERIFY_READ, target_rnew, arg3, 1)) {
12800                 return -TARGET_EFAULT;
12801             }
12802             rnew.rlim_cur = tswap64(target_rnew->rlim_cur);
12803             rnew.rlim_max = tswap64(target_rnew->rlim_max);
12804             unlock_user_struct(target_rnew, arg3, 0);
12805             rnewp = &rnew;
12806         }
12807 
12808         ret = get_errno(sys_prlimit64(arg1, resource, rnewp, arg4 ? &rold : 0));
12809         if (!is_error(ret) && arg4) {
12810             if (!lock_user_struct(VERIFY_WRITE, target_rold, arg4, 1)) {
12811                 return -TARGET_EFAULT;
12812             }
12813             target_rold->rlim_cur = tswap64(rold.rlim_cur);
12814             target_rold->rlim_max = tswap64(rold.rlim_max);
12815             unlock_user_struct(target_rold, arg4, 1);
12816         }
12817         return ret;
12818     }
12819 #endif
12820 #ifdef TARGET_NR_gethostname
12821     case TARGET_NR_gethostname:
12822     {
12823         char *name = lock_user(VERIFY_WRITE, arg1, arg2, 0);
12824         if (name) {
12825             ret = get_errno(gethostname(name, arg2));
12826             unlock_user(name, arg1, arg2);
12827         } else {
12828             ret = -TARGET_EFAULT;
12829         }
12830         return ret;
12831     }
12832 #endif
12833 #ifdef TARGET_NR_atomic_cmpxchg_32
12834     case TARGET_NR_atomic_cmpxchg_32:
12835     {
12836         /* should use start_exclusive from main.c */
12837         abi_ulong mem_value;
12838         if (get_user_u32(mem_value, arg6)) {
12839             target_siginfo_t info;
12840             info.si_signo = SIGSEGV;
12841             info.si_errno = 0;
12842             info.si_code = TARGET_SEGV_MAPERR;
12843             info._sifields._sigfault._addr = arg6;
12844             queue_signal((CPUArchState *)cpu_env, info.si_signo,
12845                          QEMU_SI_FAULT, &info);
12846             ret = 0xdeadbeef;
12847 
12848         }
12849         if (mem_value == arg2)
12850             put_user_u32(arg1, arg6);
12851         return mem_value;
12852     }
12853 #endif
12854 #ifdef TARGET_NR_atomic_barrier
12855     case TARGET_NR_atomic_barrier:
12856         /* Like the kernel implementation and the
12857            qemu arm barrier, no-op this? */
12858         return 0;
12859 #endif
12860 
12861 #ifdef TARGET_NR_timer_create
12862     case TARGET_NR_timer_create:
12863     {
12864         /* args: clockid_t clockid, struct sigevent *sevp, timer_t *timerid */
12865 
12866         struct sigevent host_sevp = { {0}, }, *phost_sevp = NULL;
12867 
12868         int clkid = arg1;
12869         int timer_index = next_free_host_timer();
12870 
12871         if (timer_index < 0) {
12872             ret = -TARGET_EAGAIN;
12873         } else {
12874             timer_t *phtimer = g_posix_timers  + timer_index;
12875 
12876             if (arg2) {
12877                 phost_sevp = &host_sevp;
12878                 ret = target_to_host_sigevent(phost_sevp, arg2);
12879                 if (ret != 0) {
12880                     return ret;
12881                 }
12882             }
12883 
12884             ret = get_errno(timer_create(clkid, phost_sevp, phtimer));
12885             if (ret) {
12886                 phtimer = NULL;
12887             } else {
12888                 if (put_user(TIMER_MAGIC | timer_index, arg3, target_timer_t)) {
12889                     return -TARGET_EFAULT;
12890                 }
12891             }
12892         }
12893         return ret;
12894     }
12895 #endif
12896 
12897 #ifdef TARGET_NR_timer_settime
12898     case TARGET_NR_timer_settime:
12899     {
12900         /* args: timer_t timerid, int flags, const struct itimerspec *new_value,
12901          * struct itimerspec * old_value */
12902         target_timer_t timerid = get_timer_id(arg1);
12903 
12904         if (timerid < 0) {
12905             ret = timerid;
12906         } else if (arg3 == 0) {
12907             ret = -TARGET_EINVAL;
12908         } else {
12909             timer_t htimer = g_posix_timers[timerid];
12910             struct itimerspec hspec_new = {{0},}, hspec_old = {{0},};
12911 
12912             if (target_to_host_itimerspec(&hspec_new, arg3)) {
12913                 return -TARGET_EFAULT;
12914             }
12915             ret = get_errno(
12916                           timer_settime(htimer, arg2, &hspec_new, &hspec_old));
12917             if (arg4 && host_to_target_itimerspec(arg4, &hspec_old)) {
12918                 return -TARGET_EFAULT;
12919             }
12920         }
12921         return ret;
12922     }
12923 #endif
12924 
12925 #ifdef TARGET_NR_timer_settime64
12926     case TARGET_NR_timer_settime64:
12927     {
12928         target_timer_t timerid = get_timer_id(arg1);
12929 
12930         if (timerid < 0) {
12931             ret = timerid;
12932         } else if (arg3 == 0) {
12933             ret = -TARGET_EINVAL;
12934         } else {
12935             timer_t htimer = g_posix_timers[timerid];
12936             struct itimerspec hspec_new = {{0},}, hspec_old = {{0},};
12937 
12938             if (target_to_host_itimerspec64(&hspec_new, arg3)) {
12939                 return -TARGET_EFAULT;
12940             }
12941             ret = get_errno(
12942                           timer_settime(htimer, arg2, &hspec_new, &hspec_old));
12943             if (arg4 && host_to_target_itimerspec64(arg4, &hspec_old)) {
12944                 return -TARGET_EFAULT;
12945             }
12946         }
12947         return ret;
12948     }
12949 #endif
12950 
12951 #ifdef TARGET_NR_timer_gettime
12952     case TARGET_NR_timer_gettime:
12953     {
12954         /* args: timer_t timerid, struct itimerspec *curr_value */
12955         target_timer_t timerid = get_timer_id(arg1);
12956 
12957         if (timerid < 0) {
12958             ret = timerid;
12959         } else if (!arg2) {
12960             ret = -TARGET_EFAULT;
12961         } else {
12962             timer_t htimer = g_posix_timers[timerid];
12963             struct itimerspec hspec;
12964             ret = get_errno(timer_gettime(htimer, &hspec));
12965 
12966             if (host_to_target_itimerspec(arg2, &hspec)) {
12967                 ret = -TARGET_EFAULT;
12968             }
12969         }
12970         return ret;
12971     }
12972 #endif
12973 
12974 #ifdef TARGET_NR_timer_gettime64
12975     case TARGET_NR_timer_gettime64:
12976     {
12977         /* args: timer_t timerid, struct itimerspec64 *curr_value */
12978         target_timer_t timerid = get_timer_id(arg1);
12979 
12980         if (timerid < 0) {
12981             ret = timerid;
12982         } else if (!arg2) {
12983             ret = -TARGET_EFAULT;
12984         } else {
12985             timer_t htimer = g_posix_timers[timerid];
12986             struct itimerspec hspec;
12987             ret = get_errno(timer_gettime(htimer, &hspec));
12988 
12989             if (host_to_target_itimerspec64(arg2, &hspec)) {
12990                 ret = -TARGET_EFAULT;
12991             }
12992         }
12993         return ret;
12994     }
12995 #endif
12996 
12997 #ifdef TARGET_NR_timer_getoverrun
12998     case TARGET_NR_timer_getoverrun:
12999     {
13000         /* args: timer_t timerid */
13001         target_timer_t timerid = get_timer_id(arg1);
13002 
13003         if (timerid < 0) {
13004             ret = timerid;
13005         } else {
13006             timer_t htimer = g_posix_timers[timerid];
13007             ret = get_errno(timer_getoverrun(htimer));
13008         }
13009         return ret;
13010     }
13011 #endif
13012 
13013 #ifdef TARGET_NR_timer_delete
13014     case TARGET_NR_timer_delete:
13015     {
13016         /* args: timer_t timerid */
13017         target_timer_t timerid = get_timer_id(arg1);
13018 
13019         if (timerid < 0) {
13020             ret = timerid;
13021         } else {
13022             timer_t htimer = g_posix_timers[timerid];
13023             ret = get_errno(timer_delete(htimer));
13024             g_posix_timers[timerid] = 0;
13025         }
13026         return ret;
13027     }
13028 #endif
13029 
13030 #if defined(TARGET_NR_timerfd_create) && defined(CONFIG_TIMERFD)
13031     case TARGET_NR_timerfd_create:
13032         return get_errno(timerfd_create(arg1,
13033                           target_to_host_bitmask(arg2, fcntl_flags_tbl)));
13034 #endif
13035 
13036 #if defined(TARGET_NR_timerfd_gettime) && defined(CONFIG_TIMERFD)
13037     case TARGET_NR_timerfd_gettime:
13038         {
13039             struct itimerspec its_curr;
13040 
13041             ret = get_errno(timerfd_gettime(arg1, &its_curr));
13042 
13043             if (arg2 && host_to_target_itimerspec(arg2, &its_curr)) {
13044                 return -TARGET_EFAULT;
13045             }
13046         }
13047         return ret;
13048 #endif
13049 
13050 #if defined(TARGET_NR_timerfd_gettime64) && defined(CONFIG_TIMERFD)
13051     case TARGET_NR_timerfd_gettime64:
13052         {
13053             struct itimerspec its_curr;
13054 
13055             ret = get_errno(timerfd_gettime(arg1, &its_curr));
13056 
13057             if (arg2 && host_to_target_itimerspec64(arg2, &its_curr)) {
13058                 return -TARGET_EFAULT;
13059             }
13060         }
13061         return ret;
13062 #endif
13063 
13064 #if defined(TARGET_NR_timerfd_settime) && defined(CONFIG_TIMERFD)
13065     case TARGET_NR_timerfd_settime:
13066         {
13067             struct itimerspec its_new, its_old, *p_new;
13068 
13069             if (arg3) {
13070                 if (target_to_host_itimerspec(&its_new, arg3)) {
13071                     return -TARGET_EFAULT;
13072                 }
13073                 p_new = &its_new;
13074             } else {
13075                 p_new = NULL;
13076             }
13077 
13078             ret = get_errno(timerfd_settime(arg1, arg2, p_new, &its_old));
13079 
13080             if (arg4 && host_to_target_itimerspec(arg4, &its_old)) {
13081                 return -TARGET_EFAULT;
13082             }
13083         }
13084         return ret;
13085 #endif
13086 
13087 #if defined(TARGET_NR_timerfd_settime64) && defined(CONFIG_TIMERFD)
13088     case TARGET_NR_timerfd_settime64:
13089         {
13090             struct itimerspec its_new, its_old, *p_new;
13091 
13092             if (arg3) {
13093                 if (target_to_host_itimerspec64(&its_new, arg3)) {
13094                     return -TARGET_EFAULT;
13095                 }
13096                 p_new = &its_new;
13097             } else {
13098                 p_new = NULL;
13099             }
13100 
13101             ret = get_errno(timerfd_settime(arg1, arg2, p_new, &its_old));
13102 
13103             if (arg4 && host_to_target_itimerspec64(arg4, &its_old)) {
13104                 return -TARGET_EFAULT;
13105             }
13106         }
13107         return ret;
13108 #endif
13109 
13110 #if defined(TARGET_NR_ioprio_get) && defined(__NR_ioprio_get)
13111     case TARGET_NR_ioprio_get:
13112         return get_errno(ioprio_get(arg1, arg2));
13113 #endif
13114 
13115 #if defined(TARGET_NR_ioprio_set) && defined(__NR_ioprio_set)
13116     case TARGET_NR_ioprio_set:
13117         return get_errno(ioprio_set(arg1, arg2, arg3));
13118 #endif
13119 
13120 #if defined(TARGET_NR_setns) && defined(CONFIG_SETNS)
13121     case TARGET_NR_setns:
13122         return get_errno(setns(arg1, arg2));
13123 #endif
13124 #if defined(TARGET_NR_unshare) && defined(CONFIG_SETNS)
13125     case TARGET_NR_unshare:
13126         return get_errno(unshare(arg1));
13127 #endif
13128 #if defined(TARGET_NR_kcmp) && defined(__NR_kcmp)
13129     case TARGET_NR_kcmp:
13130         return get_errno(kcmp(arg1, arg2, arg3, arg4, arg5));
13131 #endif
13132 #ifdef TARGET_NR_swapcontext
13133     case TARGET_NR_swapcontext:
13134         /* PowerPC specific.  */
13135         return do_swapcontext(cpu_env, arg1, arg2, arg3);
13136 #endif
13137 #ifdef TARGET_NR_memfd_create
13138     case TARGET_NR_memfd_create:
13139         p = lock_user_string(arg1);
13140         if (!p) {
13141             return -TARGET_EFAULT;
13142         }
13143         ret = get_errno(memfd_create(p, arg2));
13144         fd_trans_unregister(ret);
13145         unlock_user(p, arg1, 0);
13146         return ret;
13147 #endif
13148 #if defined TARGET_NR_membarrier && defined __NR_membarrier
13149     case TARGET_NR_membarrier:
13150         return get_errno(membarrier(arg1, arg2));
13151 #endif
13152 
13153 #if defined(TARGET_NR_copy_file_range) && defined(__NR_copy_file_range)
13154     case TARGET_NR_copy_file_range:
13155         {
13156             loff_t inoff, outoff;
13157             loff_t *pinoff = NULL, *poutoff = NULL;
13158 
13159             if (arg2) {
13160                 if (get_user_u64(inoff, arg2)) {
13161                     return -TARGET_EFAULT;
13162                 }
13163                 pinoff = &inoff;
13164             }
13165             if (arg4) {
13166                 if (get_user_u64(outoff, arg4)) {
13167                     return -TARGET_EFAULT;
13168                 }
13169                 poutoff = &outoff;
13170             }
13171             ret = get_errno(safe_copy_file_range(arg1, pinoff, arg3, poutoff,
13172                                                  arg5, arg6));
13173             if (!is_error(ret) && ret > 0) {
13174                 if (arg2) {
13175                     if (put_user_u64(inoff, arg2)) {
13176                         return -TARGET_EFAULT;
13177                     }
13178                 }
13179                 if (arg4) {
13180                     if (put_user_u64(outoff, arg4)) {
13181                         return -TARGET_EFAULT;
13182                     }
13183                 }
13184             }
13185         }
13186         return ret;
13187 #endif
13188 
13189     default:
13190         qemu_log_mask(LOG_UNIMP, "Unsupported syscall: %d\n", num);
13191         return -TARGET_ENOSYS;
13192     }
13193     return ret;
13194 }
13195 
13196 abi_long do_syscall(void *cpu_env, int num, abi_long arg1,
13197                     abi_long arg2, abi_long arg3, abi_long arg4,
13198                     abi_long arg5, abi_long arg6, abi_long arg7,
13199                     abi_long arg8)
13200 {
13201     CPUState *cpu = env_cpu(cpu_env);
13202     abi_long ret;
13203 
13204 #ifdef DEBUG_ERESTARTSYS
13205     /* Debug-only code for exercising the syscall-restart code paths
13206      * in the per-architecture cpu main loops: restart every syscall
13207      * the guest makes once before letting it through.
13208      */
13209     {
13210         static bool flag;
13211         flag = !flag;
13212         if (flag) {
13213             return -TARGET_ERESTARTSYS;
13214         }
13215     }
13216 #endif
13217 
13218     record_syscall_start(cpu, num, arg1,
13219                          arg2, arg3, arg4, arg5, arg6, arg7, arg8);
13220 
13221     if (unlikely(qemu_loglevel_mask(LOG_STRACE))) {
13222         print_syscall(cpu_env, num, arg1, arg2, arg3, arg4, arg5, arg6);
13223     }
13224 
13225     ret = do_syscall1(cpu_env, num, arg1, arg2, arg3, arg4,
13226                       arg5, arg6, arg7, arg8);
13227 
13228     if (unlikely(qemu_loglevel_mask(LOG_STRACE))) {
13229         print_syscall_ret(cpu_env, num, ret, arg1, arg2,
13230                           arg3, arg4, arg5, arg6);
13231     }
13232 
13233     record_syscall_return(cpu, num, ret);
13234     return ret;
13235 }
13236