xref: /openbmc/qemu/linux-user/syscall.c (revision 4df7b7fa)
1 /*
2  *  Linux syscalls
3  *
4  *  Copyright (c) 2003 Fabrice Bellard
5  *
6  *  This program is free software; you can redistribute it and/or modify
7  *  it under the terms of the GNU General Public License as published by
8  *  the Free Software Foundation; either version 2 of the License, or
9  *  (at your option) any later version.
10  *
11  *  This program is distributed in the hope that it will be useful,
12  *  but WITHOUT ANY WARRANTY; without even the implied warranty of
13  *  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
14  *  GNU General Public License for more details.
15  *
16  *  You should have received a copy of the GNU General Public License
17  *  along with this program; if not, see <http://www.gnu.org/licenses/>.
18  */
19 #define _ATFILE_SOURCE
20 #include "qemu/osdep.h"
21 #include "qemu/cutils.h"
22 #include "qemu/path.h"
23 #include "qemu/memfd.h"
24 #include "qemu/queue.h"
25 #include <elf.h>
26 #include <endian.h>
27 #include <grp.h>
28 #include <sys/ipc.h>
29 #include <sys/msg.h>
30 #include <sys/wait.h>
31 #include <sys/mount.h>
32 #include <sys/file.h>
33 #include <sys/fsuid.h>
34 #include <sys/personality.h>
35 #include <sys/prctl.h>
36 #include <sys/resource.h>
37 #include <sys/swap.h>
38 #include <linux/capability.h>
39 #include <sched.h>
40 #include <sys/timex.h>
41 #include <sys/socket.h>
42 #include <linux/sockios.h>
43 #include <sys/un.h>
44 #include <sys/uio.h>
45 #include <poll.h>
46 #include <sys/times.h>
47 #include <sys/shm.h>
48 #include <sys/sem.h>
49 #include <sys/statfs.h>
50 #include <utime.h>
51 #include <sys/sysinfo.h>
52 #include <sys/signalfd.h>
53 //#include <sys/user.h>
54 #include <netinet/in.h>
55 #include <netinet/ip.h>
56 #include <netinet/tcp.h>
57 #include <netinet/udp.h>
58 #include <linux/wireless.h>
59 #include <linux/icmp.h>
60 #include <linux/icmpv6.h>
61 #include <linux/if_tun.h>
62 #include <linux/in6.h>
63 #include <linux/errqueue.h>
64 #include <linux/random.h>
65 #ifdef CONFIG_TIMERFD
66 #include <sys/timerfd.h>
67 #endif
68 #ifdef CONFIG_EVENTFD
69 #include <sys/eventfd.h>
70 #endif
71 #ifdef CONFIG_EPOLL
72 #include <sys/epoll.h>
73 #endif
74 #ifdef CONFIG_ATTR
75 #include "qemu/xattr.h"
76 #endif
77 #ifdef CONFIG_SENDFILE
78 #include <sys/sendfile.h>
79 #endif
80 #ifdef HAVE_SYS_KCOV_H
81 #include <sys/kcov.h>
82 #endif
83 
84 #define termios host_termios
85 #define winsize host_winsize
86 #define termio host_termio
87 #define sgttyb host_sgttyb /* same as target */
88 #define tchars host_tchars /* same as target */
89 #define ltchars host_ltchars /* same as target */
90 
91 #include <linux/termios.h>
92 #include <linux/unistd.h>
93 #include <linux/cdrom.h>
94 #include <linux/hdreg.h>
95 #include <linux/soundcard.h>
96 #include <linux/kd.h>
97 #include <linux/mtio.h>
98 #include <linux/fs.h>
99 #include <linux/fd.h>
100 #if defined(CONFIG_FIEMAP)
101 #include <linux/fiemap.h>
102 #endif
103 #include <linux/fb.h>
104 #if defined(CONFIG_USBFS)
105 #include <linux/usbdevice_fs.h>
106 #include <linux/usb/ch9.h>
107 #endif
108 #include <linux/vt.h>
109 #include <linux/dm-ioctl.h>
110 #include <linux/reboot.h>
111 #include <linux/route.h>
112 #include <linux/filter.h>
113 #include <linux/blkpg.h>
114 #include <netpacket/packet.h>
115 #include <linux/netlink.h>
116 #include <linux/if_alg.h>
117 #include <linux/rtc.h>
118 #include <sound/asound.h>
119 #ifdef HAVE_BTRFS_H
120 #include <linux/btrfs.h>
121 #endif
122 #ifdef HAVE_DRM_H
123 #include <libdrm/drm.h>
124 #include <libdrm/i915_drm.h>
125 #endif
126 #include "linux_loop.h"
127 #include "uname.h"
128 
129 #include "qemu.h"
130 #include "qemu/guest-random.h"
131 #include "qemu/selfmap.h"
132 #include "user/syscall-trace.h"
133 #include "qapi/error.h"
134 #include "fd-trans.h"
135 #include "tcg/tcg.h"
136 
137 #ifndef CLONE_IO
138 #define CLONE_IO                0x80000000      /* Clone io context */
139 #endif
140 
141 /* We can't directly call the host clone syscall, because this will
142  * badly confuse libc (breaking mutexes, for example). So we must
143  * divide clone flags into:
144  *  * flag combinations that look like pthread_create()
145  *  * flag combinations that look like fork()
146  *  * flags we can implement within QEMU itself
147  *  * flags we can't support and will return an error for
148  */
149 /* For thread creation, all these flags must be present; for
150  * fork, none must be present.
151  */
152 #define CLONE_THREAD_FLAGS                              \
153     (CLONE_VM | CLONE_FS | CLONE_FILES |                \
154      CLONE_SIGHAND | CLONE_THREAD | CLONE_SYSVSEM)
155 
156 /* These flags are ignored:
157  * CLONE_DETACHED is now ignored by the kernel;
158  * CLONE_IO is just an optimisation hint to the I/O scheduler
159  */
160 #define CLONE_IGNORED_FLAGS                     \
161     (CLONE_DETACHED | CLONE_IO)
162 
163 /* Flags for fork which we can implement within QEMU itself */
164 #define CLONE_OPTIONAL_FORK_FLAGS               \
165     (CLONE_SETTLS | CLONE_PARENT_SETTID |       \
166      CLONE_CHILD_CLEARTID | CLONE_CHILD_SETTID)
167 
168 /* Flags for thread creation which we can implement within QEMU itself */
169 #define CLONE_OPTIONAL_THREAD_FLAGS                             \
170     (CLONE_SETTLS | CLONE_PARENT_SETTID |                       \
171      CLONE_CHILD_CLEARTID | CLONE_CHILD_SETTID | CLONE_PARENT)
172 
173 #define CLONE_INVALID_FORK_FLAGS                                        \
174     (~(CSIGNAL | CLONE_OPTIONAL_FORK_FLAGS | CLONE_IGNORED_FLAGS))
175 
176 #define CLONE_INVALID_THREAD_FLAGS                                      \
177     (~(CSIGNAL | CLONE_THREAD_FLAGS | CLONE_OPTIONAL_THREAD_FLAGS |     \
178        CLONE_IGNORED_FLAGS))
179 
180 /* CLONE_VFORK is special cased early in do_fork(). The other flag bits
181  * have almost all been allocated. We cannot support any of
182  * CLONE_NEWNS, CLONE_NEWCGROUP, CLONE_NEWUTS, CLONE_NEWIPC,
183  * CLONE_NEWUSER, CLONE_NEWPID, CLONE_NEWNET, CLONE_PTRACE, CLONE_UNTRACED.
184  * The checks against the invalid thread masks above will catch these.
185  * (The one remaining unallocated bit is 0x1000 which used to be CLONE_PID.)
186  */
187 
188 /* Define DEBUG_ERESTARTSYS to force every syscall to be restarted
189  * once. This exercises the codepaths for restart.
190  */
191 //#define DEBUG_ERESTARTSYS
192 
193 //#include <linux/msdos_fs.h>
194 #define	VFAT_IOCTL_READDIR_BOTH		_IOR('r', 1, struct linux_dirent [2])
195 #define	VFAT_IOCTL_READDIR_SHORT	_IOR('r', 2, struct linux_dirent [2])
196 
197 #undef _syscall0
198 #undef _syscall1
199 #undef _syscall2
200 #undef _syscall3
201 #undef _syscall4
202 #undef _syscall5
203 #undef _syscall6
204 
205 #define _syscall0(type,name)		\
206 static type name (void)			\
207 {					\
208 	return syscall(__NR_##name);	\
209 }
210 
211 #define _syscall1(type,name,type1,arg1)		\
212 static type name (type1 arg1)			\
213 {						\
214 	return syscall(__NR_##name, arg1);	\
215 }
216 
217 #define _syscall2(type,name,type1,arg1,type2,arg2)	\
218 static type name (type1 arg1,type2 arg2)		\
219 {							\
220 	return syscall(__NR_##name, arg1, arg2);	\
221 }
222 
223 #define _syscall3(type,name,type1,arg1,type2,arg2,type3,arg3)	\
224 static type name (type1 arg1,type2 arg2,type3 arg3)		\
225 {								\
226 	return syscall(__NR_##name, arg1, arg2, arg3);		\
227 }
228 
229 #define _syscall4(type,name,type1,arg1,type2,arg2,type3,arg3,type4,arg4)	\
230 static type name (type1 arg1,type2 arg2,type3 arg3,type4 arg4)			\
231 {										\
232 	return syscall(__NR_##name, arg1, arg2, arg3, arg4);			\
233 }
234 
235 #define _syscall5(type,name,type1,arg1,type2,arg2,type3,arg3,type4,arg4,	\
236 		  type5,arg5)							\
237 static type name (type1 arg1,type2 arg2,type3 arg3,type4 arg4,type5 arg5)	\
238 {										\
239 	return syscall(__NR_##name, arg1, arg2, arg3, arg4, arg5);		\
240 }
241 
242 
243 #define _syscall6(type,name,type1,arg1,type2,arg2,type3,arg3,type4,arg4,	\
244 		  type5,arg5,type6,arg6)					\
245 static type name (type1 arg1,type2 arg2,type3 arg3,type4 arg4,type5 arg5,	\
246                   type6 arg6)							\
247 {										\
248 	return syscall(__NR_##name, arg1, arg2, arg3, arg4, arg5, arg6);	\
249 }
250 
251 
252 #define __NR_sys_uname __NR_uname
253 #define __NR_sys_getcwd1 __NR_getcwd
254 #define __NR_sys_getdents __NR_getdents
255 #define __NR_sys_getdents64 __NR_getdents64
256 #define __NR_sys_getpriority __NR_getpriority
257 #define __NR_sys_rt_sigqueueinfo __NR_rt_sigqueueinfo
258 #define __NR_sys_rt_tgsigqueueinfo __NR_rt_tgsigqueueinfo
259 #define __NR_sys_syslog __NR_syslog
260 #if defined(__NR_futex)
261 # define __NR_sys_futex __NR_futex
262 #endif
263 #if defined(__NR_futex_time64)
264 # define __NR_sys_futex_time64 __NR_futex_time64
265 #endif
266 #define __NR_sys_inotify_init __NR_inotify_init
267 #define __NR_sys_inotify_add_watch __NR_inotify_add_watch
268 #define __NR_sys_inotify_rm_watch __NR_inotify_rm_watch
269 #define __NR_sys_statx __NR_statx
270 
271 #if defined(__alpha__) || defined(__x86_64__) || defined(__s390x__)
272 #define __NR__llseek __NR_lseek
273 #endif
274 
275 /* Newer kernel ports have llseek() instead of _llseek() */
276 #if defined(TARGET_NR_llseek) && !defined(TARGET_NR__llseek)
277 #define TARGET_NR__llseek TARGET_NR_llseek
278 #endif
279 
280 /* some platforms need to mask more bits than just TARGET_O_NONBLOCK */
281 #ifndef TARGET_O_NONBLOCK_MASK
282 #define TARGET_O_NONBLOCK_MASK TARGET_O_NONBLOCK
283 #endif
284 
285 #define __NR_sys_gettid __NR_gettid
286 _syscall0(int, sys_gettid)
287 
288 /* For the 64-bit guest on 32-bit host case we must emulate
289  * getdents using getdents64, because otherwise the host
290  * might hand us back more dirent records than we can fit
291  * into the guest buffer after structure format conversion.
292  * Otherwise we emulate getdents with getdents if the host has it.
293  */
294 #if defined(__NR_getdents) && HOST_LONG_BITS >= TARGET_ABI_BITS
295 #define EMULATE_GETDENTS_WITH_GETDENTS
296 #endif
297 
298 #if defined(TARGET_NR_getdents) && defined(EMULATE_GETDENTS_WITH_GETDENTS)
299 _syscall3(int, sys_getdents, uint, fd, struct linux_dirent *, dirp, uint, count);
300 #endif
301 #if (defined(TARGET_NR_getdents) && \
302       !defined(EMULATE_GETDENTS_WITH_GETDENTS)) || \
303     (defined(TARGET_NR_getdents64) && defined(__NR_getdents64))
304 _syscall3(int, sys_getdents64, uint, fd, struct linux_dirent64 *, dirp, uint, count);
305 #endif
306 #if defined(TARGET_NR__llseek) && defined(__NR_llseek)
307 _syscall5(int, _llseek,  uint,  fd, ulong, hi, ulong, lo,
308           loff_t *, res, uint, wh);
309 #endif
310 _syscall3(int, sys_rt_sigqueueinfo, pid_t, pid, int, sig, siginfo_t *, uinfo)
311 _syscall4(int, sys_rt_tgsigqueueinfo, pid_t, pid, pid_t, tid, int, sig,
312           siginfo_t *, uinfo)
313 _syscall3(int,sys_syslog,int,type,char*,bufp,int,len)
314 #ifdef __NR_exit_group
315 _syscall1(int,exit_group,int,error_code)
316 #endif
317 #if defined(TARGET_NR_set_tid_address) && defined(__NR_set_tid_address)
318 _syscall1(int,set_tid_address,int *,tidptr)
319 #endif
320 #if defined(__NR_futex)
321 _syscall6(int,sys_futex,int *,uaddr,int,op,int,val,
322           const struct timespec *,timeout,int *,uaddr2,int,val3)
323 #endif
324 #if defined(__NR_futex_time64)
325 _syscall6(int,sys_futex_time64,int *,uaddr,int,op,int,val,
326           const struct timespec *,timeout,int *,uaddr2,int,val3)
327 #endif
328 #define __NR_sys_sched_getaffinity __NR_sched_getaffinity
329 _syscall3(int, sys_sched_getaffinity, pid_t, pid, unsigned int, len,
330           unsigned long *, user_mask_ptr);
331 #define __NR_sys_sched_setaffinity __NR_sched_setaffinity
332 _syscall3(int, sys_sched_setaffinity, pid_t, pid, unsigned int, len,
333           unsigned long *, user_mask_ptr);
334 #define __NR_sys_getcpu __NR_getcpu
335 _syscall3(int, sys_getcpu, unsigned *, cpu, unsigned *, node, void *, tcache);
336 _syscall4(int, reboot, int, magic1, int, magic2, unsigned int, cmd,
337           void *, arg);
338 _syscall2(int, capget, struct __user_cap_header_struct *, header,
339           struct __user_cap_data_struct *, data);
340 _syscall2(int, capset, struct __user_cap_header_struct *, header,
341           struct __user_cap_data_struct *, data);
342 #if defined(TARGET_NR_ioprio_get) && defined(__NR_ioprio_get)
343 _syscall2(int, ioprio_get, int, which, int, who)
344 #endif
345 #if defined(TARGET_NR_ioprio_set) && defined(__NR_ioprio_set)
346 _syscall3(int, ioprio_set, int, which, int, who, int, ioprio)
347 #endif
348 #if defined(TARGET_NR_getrandom) && defined(__NR_getrandom)
349 _syscall3(int, getrandom, void *, buf, size_t, buflen, unsigned int, flags)
350 #endif
351 
352 #if defined(TARGET_NR_kcmp) && defined(__NR_kcmp)
353 _syscall5(int, kcmp, pid_t, pid1, pid_t, pid2, int, type,
354           unsigned long, idx1, unsigned long, idx2)
355 #endif
356 
357 /*
358  * It is assumed that struct statx is architecture independent.
359  */
360 #if defined(TARGET_NR_statx) && defined(__NR_statx)
361 _syscall5(int, sys_statx, int, dirfd, const char *, pathname, int, flags,
362           unsigned int, mask, struct target_statx *, statxbuf)
363 #endif
364 #if defined(TARGET_NR_membarrier) && defined(__NR_membarrier)
365 _syscall2(int, membarrier, int, cmd, int, flags)
366 #endif
367 
368 static bitmask_transtbl fcntl_flags_tbl[] = {
369   { TARGET_O_ACCMODE,   TARGET_O_WRONLY,    O_ACCMODE,   O_WRONLY,    },
370   { TARGET_O_ACCMODE,   TARGET_O_RDWR,      O_ACCMODE,   O_RDWR,      },
371   { TARGET_O_CREAT,     TARGET_O_CREAT,     O_CREAT,     O_CREAT,     },
372   { TARGET_O_EXCL,      TARGET_O_EXCL,      O_EXCL,      O_EXCL,      },
373   { TARGET_O_NOCTTY,    TARGET_O_NOCTTY,    O_NOCTTY,    O_NOCTTY,    },
374   { TARGET_O_TRUNC,     TARGET_O_TRUNC,     O_TRUNC,     O_TRUNC,     },
375   { TARGET_O_APPEND,    TARGET_O_APPEND,    O_APPEND,    O_APPEND,    },
376   { TARGET_O_NONBLOCK,  TARGET_O_NONBLOCK,  O_NONBLOCK,  O_NONBLOCK,  },
377   { TARGET_O_SYNC,      TARGET_O_DSYNC,     O_SYNC,      O_DSYNC,     },
378   { TARGET_O_SYNC,      TARGET_O_SYNC,      O_SYNC,      O_SYNC,      },
379   { TARGET_FASYNC,      TARGET_FASYNC,      FASYNC,      FASYNC,      },
380   { TARGET_O_DIRECTORY, TARGET_O_DIRECTORY, O_DIRECTORY, O_DIRECTORY, },
381   { TARGET_O_NOFOLLOW,  TARGET_O_NOFOLLOW,  O_NOFOLLOW,  O_NOFOLLOW,  },
382 #if defined(O_DIRECT)
383   { TARGET_O_DIRECT,    TARGET_O_DIRECT,    O_DIRECT,    O_DIRECT,    },
384 #endif
385 #if defined(O_NOATIME)
386   { TARGET_O_NOATIME,   TARGET_O_NOATIME,   O_NOATIME,   O_NOATIME    },
387 #endif
388 #if defined(O_CLOEXEC)
389   { TARGET_O_CLOEXEC,   TARGET_O_CLOEXEC,   O_CLOEXEC,   O_CLOEXEC    },
390 #endif
391 #if defined(O_PATH)
392   { TARGET_O_PATH,      TARGET_O_PATH,      O_PATH,      O_PATH       },
393 #endif
394 #if defined(O_TMPFILE)
395   { TARGET_O_TMPFILE,   TARGET_O_TMPFILE,   O_TMPFILE,   O_TMPFILE    },
396 #endif
397   /* Don't terminate the list prematurely on 64-bit host+guest.  */
398 #if TARGET_O_LARGEFILE != 0 || O_LARGEFILE != 0
399   { TARGET_O_LARGEFILE, TARGET_O_LARGEFILE, O_LARGEFILE, O_LARGEFILE, },
400 #endif
401   { 0, 0, 0, 0 }
402 };
403 
404 _syscall2(int, sys_getcwd1, char *, buf, size_t, size)
405 
406 #if defined(TARGET_NR_utimensat) || defined(TARGET_NR_utimensat_time64)
407 #if defined(__NR_utimensat)
408 #define __NR_sys_utimensat __NR_utimensat
409 _syscall4(int,sys_utimensat,int,dirfd,const char *,pathname,
410           const struct timespec *,tsp,int,flags)
411 #else
412 static int sys_utimensat(int dirfd, const char *pathname,
413                          const struct timespec times[2], int flags)
414 {
415     errno = ENOSYS;
416     return -1;
417 }
418 #endif
419 #endif /* TARGET_NR_utimensat */
420 
421 #ifdef TARGET_NR_renameat2
422 #if defined(__NR_renameat2)
423 #define __NR_sys_renameat2 __NR_renameat2
424 _syscall5(int, sys_renameat2, int, oldfd, const char *, old, int, newfd,
425           const char *, new, unsigned int, flags)
426 #else
427 static int sys_renameat2(int oldfd, const char *old,
428                          int newfd, const char *new, int flags)
429 {
430     if (flags == 0) {
431         return renameat(oldfd, old, newfd, new);
432     }
433     errno = ENOSYS;
434     return -1;
435 }
436 #endif
437 #endif /* TARGET_NR_renameat2 */
438 
439 #ifdef CONFIG_INOTIFY
440 #include <sys/inotify.h>
441 
442 #if defined(TARGET_NR_inotify_init) && defined(__NR_inotify_init)
443 static int sys_inotify_init(void)
444 {
445   return (inotify_init());
446 }
447 #endif
448 #if defined(TARGET_NR_inotify_add_watch) && defined(__NR_inotify_add_watch)
449 static int sys_inotify_add_watch(int fd,const char *pathname, int32_t mask)
450 {
451   return (inotify_add_watch(fd, pathname, mask));
452 }
453 #endif
454 #if defined(TARGET_NR_inotify_rm_watch) && defined(__NR_inotify_rm_watch)
455 static int sys_inotify_rm_watch(int fd, int32_t wd)
456 {
457   return (inotify_rm_watch(fd, wd));
458 }
459 #endif
460 #ifdef CONFIG_INOTIFY1
461 #if defined(TARGET_NR_inotify_init1) && defined(__NR_inotify_init1)
462 static int sys_inotify_init1(int flags)
463 {
464   return (inotify_init1(flags));
465 }
466 #endif
467 #endif
468 #else
469 /* Userspace can usually survive runtime without inotify */
470 #undef TARGET_NR_inotify_init
471 #undef TARGET_NR_inotify_init1
472 #undef TARGET_NR_inotify_add_watch
473 #undef TARGET_NR_inotify_rm_watch
474 #endif /* CONFIG_INOTIFY  */
475 
476 #if defined(TARGET_NR_prlimit64)
477 #ifndef __NR_prlimit64
478 # define __NR_prlimit64 -1
479 #endif
480 #define __NR_sys_prlimit64 __NR_prlimit64
481 /* The glibc rlimit structure may not be that used by the underlying syscall */
482 struct host_rlimit64 {
483     uint64_t rlim_cur;
484     uint64_t rlim_max;
485 };
486 _syscall4(int, sys_prlimit64, pid_t, pid, int, resource,
487           const struct host_rlimit64 *, new_limit,
488           struct host_rlimit64 *, old_limit)
489 #endif
490 
491 
492 #if defined(TARGET_NR_timer_create)
493 /* Maximum of 32 active POSIX timers allowed at any one time. */
494 static timer_t g_posix_timers[32] = { 0, } ;
495 
496 static inline int next_free_host_timer(void)
497 {
498     int k ;
499     /* FIXME: Does finding the next free slot require a lock? */
500     for (k = 0; k < ARRAY_SIZE(g_posix_timers); k++) {
501         if (g_posix_timers[k] == 0) {
502             g_posix_timers[k] = (timer_t) 1;
503             return k;
504         }
505     }
506     return -1;
507 }
508 #endif
509 
510 #define ERRNO_TABLE_SIZE 1200
511 
512 /* target_to_host_errno_table[] is initialized from
513  * host_to_target_errno_table[] in syscall_init(). */
514 static uint16_t target_to_host_errno_table[ERRNO_TABLE_SIZE] = {
515 };
516 
517 /*
518  * This list is the union of errno values overridden in asm-<arch>/errno.h
519  * minus the errnos that are not actually generic to all archs.
520  */
521 static uint16_t host_to_target_errno_table[ERRNO_TABLE_SIZE] = {
522     [EAGAIN]		= TARGET_EAGAIN,
523     [EIDRM]		= TARGET_EIDRM,
524     [ECHRNG]		= TARGET_ECHRNG,
525     [EL2NSYNC]		= TARGET_EL2NSYNC,
526     [EL3HLT]		= TARGET_EL3HLT,
527     [EL3RST]		= TARGET_EL3RST,
528     [ELNRNG]		= TARGET_ELNRNG,
529     [EUNATCH]		= TARGET_EUNATCH,
530     [ENOCSI]		= TARGET_ENOCSI,
531     [EL2HLT]		= TARGET_EL2HLT,
532     [EDEADLK]		= TARGET_EDEADLK,
533     [ENOLCK]		= TARGET_ENOLCK,
534     [EBADE]		= TARGET_EBADE,
535     [EBADR]		= TARGET_EBADR,
536     [EXFULL]		= TARGET_EXFULL,
537     [ENOANO]		= TARGET_ENOANO,
538     [EBADRQC]		= TARGET_EBADRQC,
539     [EBADSLT]		= TARGET_EBADSLT,
540     [EBFONT]		= TARGET_EBFONT,
541     [ENOSTR]		= TARGET_ENOSTR,
542     [ENODATA]		= TARGET_ENODATA,
543     [ETIME]		= TARGET_ETIME,
544     [ENOSR]		= TARGET_ENOSR,
545     [ENONET]		= TARGET_ENONET,
546     [ENOPKG]		= TARGET_ENOPKG,
547     [EREMOTE]		= TARGET_EREMOTE,
548     [ENOLINK]		= TARGET_ENOLINK,
549     [EADV]		= TARGET_EADV,
550     [ESRMNT]		= TARGET_ESRMNT,
551     [ECOMM]		= TARGET_ECOMM,
552     [EPROTO]		= TARGET_EPROTO,
553     [EDOTDOT]		= TARGET_EDOTDOT,
554     [EMULTIHOP]		= TARGET_EMULTIHOP,
555     [EBADMSG]		= TARGET_EBADMSG,
556     [ENAMETOOLONG]	= TARGET_ENAMETOOLONG,
557     [EOVERFLOW]		= TARGET_EOVERFLOW,
558     [ENOTUNIQ]		= TARGET_ENOTUNIQ,
559     [EBADFD]		= TARGET_EBADFD,
560     [EREMCHG]		= TARGET_EREMCHG,
561     [ELIBACC]		= TARGET_ELIBACC,
562     [ELIBBAD]		= TARGET_ELIBBAD,
563     [ELIBSCN]		= TARGET_ELIBSCN,
564     [ELIBMAX]		= TARGET_ELIBMAX,
565     [ELIBEXEC]		= TARGET_ELIBEXEC,
566     [EILSEQ]		= TARGET_EILSEQ,
567     [ENOSYS]		= TARGET_ENOSYS,
568     [ELOOP]		= TARGET_ELOOP,
569     [ERESTART]		= TARGET_ERESTART,
570     [ESTRPIPE]		= TARGET_ESTRPIPE,
571     [ENOTEMPTY]		= TARGET_ENOTEMPTY,
572     [EUSERS]		= TARGET_EUSERS,
573     [ENOTSOCK]		= TARGET_ENOTSOCK,
574     [EDESTADDRREQ]	= TARGET_EDESTADDRREQ,
575     [EMSGSIZE]		= TARGET_EMSGSIZE,
576     [EPROTOTYPE]	= TARGET_EPROTOTYPE,
577     [ENOPROTOOPT]	= TARGET_ENOPROTOOPT,
578     [EPROTONOSUPPORT]	= TARGET_EPROTONOSUPPORT,
579     [ESOCKTNOSUPPORT]	= TARGET_ESOCKTNOSUPPORT,
580     [EOPNOTSUPP]	= TARGET_EOPNOTSUPP,
581     [EPFNOSUPPORT]	= TARGET_EPFNOSUPPORT,
582     [EAFNOSUPPORT]	= TARGET_EAFNOSUPPORT,
583     [EADDRINUSE]	= TARGET_EADDRINUSE,
584     [EADDRNOTAVAIL]	= TARGET_EADDRNOTAVAIL,
585     [ENETDOWN]		= TARGET_ENETDOWN,
586     [ENETUNREACH]	= TARGET_ENETUNREACH,
587     [ENETRESET]		= TARGET_ENETRESET,
588     [ECONNABORTED]	= TARGET_ECONNABORTED,
589     [ECONNRESET]	= TARGET_ECONNRESET,
590     [ENOBUFS]		= TARGET_ENOBUFS,
591     [EISCONN]		= TARGET_EISCONN,
592     [ENOTCONN]		= TARGET_ENOTCONN,
593     [EUCLEAN]		= TARGET_EUCLEAN,
594     [ENOTNAM]		= TARGET_ENOTNAM,
595     [ENAVAIL]		= TARGET_ENAVAIL,
596     [EISNAM]		= TARGET_EISNAM,
597     [EREMOTEIO]		= TARGET_EREMOTEIO,
598     [EDQUOT]            = TARGET_EDQUOT,
599     [ESHUTDOWN]		= TARGET_ESHUTDOWN,
600     [ETOOMANYREFS]	= TARGET_ETOOMANYREFS,
601     [ETIMEDOUT]		= TARGET_ETIMEDOUT,
602     [ECONNREFUSED]	= TARGET_ECONNREFUSED,
603     [EHOSTDOWN]		= TARGET_EHOSTDOWN,
604     [EHOSTUNREACH]	= TARGET_EHOSTUNREACH,
605     [EALREADY]		= TARGET_EALREADY,
606     [EINPROGRESS]	= TARGET_EINPROGRESS,
607     [ESTALE]		= TARGET_ESTALE,
608     [ECANCELED]		= TARGET_ECANCELED,
609     [ENOMEDIUM]		= TARGET_ENOMEDIUM,
610     [EMEDIUMTYPE]	= TARGET_EMEDIUMTYPE,
611 #ifdef ENOKEY
612     [ENOKEY]		= TARGET_ENOKEY,
613 #endif
614 #ifdef EKEYEXPIRED
615     [EKEYEXPIRED]	= TARGET_EKEYEXPIRED,
616 #endif
617 #ifdef EKEYREVOKED
618     [EKEYREVOKED]	= TARGET_EKEYREVOKED,
619 #endif
620 #ifdef EKEYREJECTED
621     [EKEYREJECTED]	= TARGET_EKEYREJECTED,
622 #endif
623 #ifdef EOWNERDEAD
624     [EOWNERDEAD]	= TARGET_EOWNERDEAD,
625 #endif
626 #ifdef ENOTRECOVERABLE
627     [ENOTRECOVERABLE]	= TARGET_ENOTRECOVERABLE,
628 #endif
629 #ifdef ENOMSG
630     [ENOMSG]            = TARGET_ENOMSG,
631 #endif
632 #ifdef ERKFILL
633     [ERFKILL]           = TARGET_ERFKILL,
634 #endif
635 #ifdef EHWPOISON
636     [EHWPOISON]         = TARGET_EHWPOISON,
637 #endif
638 };
639 
640 static inline int host_to_target_errno(int err)
641 {
642     if (err >= 0 && err < ERRNO_TABLE_SIZE &&
643         host_to_target_errno_table[err]) {
644         return host_to_target_errno_table[err];
645     }
646     return err;
647 }
648 
649 static inline int target_to_host_errno(int err)
650 {
651     if (err >= 0 && err < ERRNO_TABLE_SIZE &&
652         target_to_host_errno_table[err]) {
653         return target_to_host_errno_table[err];
654     }
655     return err;
656 }
657 
658 static inline abi_long get_errno(abi_long ret)
659 {
660     if (ret == -1)
661         return -host_to_target_errno(errno);
662     else
663         return ret;
664 }
665 
666 const char *target_strerror(int err)
667 {
668     if (err == TARGET_ERESTARTSYS) {
669         return "To be restarted";
670     }
671     if (err == TARGET_QEMU_ESIGRETURN) {
672         return "Successful exit from sigreturn";
673     }
674 
675     if ((err >= ERRNO_TABLE_SIZE) || (err < 0)) {
676         return NULL;
677     }
678     return strerror(target_to_host_errno(err));
679 }
680 
681 #define safe_syscall0(type, name) \
682 static type safe_##name(void) \
683 { \
684     return safe_syscall(__NR_##name); \
685 }
686 
687 #define safe_syscall1(type, name, type1, arg1) \
688 static type safe_##name(type1 arg1) \
689 { \
690     return safe_syscall(__NR_##name, arg1); \
691 }
692 
693 #define safe_syscall2(type, name, type1, arg1, type2, arg2) \
694 static type safe_##name(type1 arg1, type2 arg2) \
695 { \
696     return safe_syscall(__NR_##name, arg1, arg2); \
697 }
698 
699 #define safe_syscall3(type, name, type1, arg1, type2, arg2, type3, arg3) \
700 static type safe_##name(type1 arg1, type2 arg2, type3 arg3) \
701 { \
702     return safe_syscall(__NR_##name, arg1, arg2, arg3); \
703 }
704 
705 #define safe_syscall4(type, name, type1, arg1, type2, arg2, type3, arg3, \
706     type4, arg4) \
707 static type safe_##name(type1 arg1, type2 arg2, type3 arg3, type4 arg4) \
708 { \
709     return safe_syscall(__NR_##name, arg1, arg2, arg3, arg4); \
710 }
711 
712 #define safe_syscall5(type, name, type1, arg1, type2, arg2, type3, arg3, \
713     type4, arg4, type5, arg5) \
714 static type safe_##name(type1 arg1, type2 arg2, type3 arg3, type4 arg4, \
715     type5 arg5) \
716 { \
717     return safe_syscall(__NR_##name, arg1, arg2, arg3, arg4, arg5); \
718 }
719 
720 #define safe_syscall6(type, name, type1, arg1, type2, arg2, type3, arg3, \
721     type4, arg4, type5, arg5, type6, arg6) \
722 static type safe_##name(type1 arg1, type2 arg2, type3 arg3, type4 arg4, \
723     type5 arg5, type6 arg6) \
724 { \
725     return safe_syscall(__NR_##name, arg1, arg2, arg3, arg4, arg5, arg6); \
726 }
727 
728 safe_syscall3(ssize_t, read, int, fd, void *, buff, size_t, count)
729 safe_syscall3(ssize_t, write, int, fd, const void *, buff, size_t, count)
730 safe_syscall4(int, openat, int, dirfd, const char *, pathname, \
731               int, flags, mode_t, mode)
732 #if defined(TARGET_NR_wait4) || defined(TARGET_NR_waitpid)
733 safe_syscall4(pid_t, wait4, pid_t, pid, int *, status, int, options, \
734               struct rusage *, rusage)
735 #endif
736 safe_syscall5(int, waitid, idtype_t, idtype, id_t, id, siginfo_t *, infop, \
737               int, options, struct rusage *, rusage)
738 safe_syscall3(int, execve, const char *, filename, char **, argv, char **, envp)
739 #if defined(TARGET_NR_select) || defined(TARGET_NR__newselect) || \
740     defined(TARGET_NR_pselect6) || defined(TARGET_NR_pselect6_time64)
741 safe_syscall6(int, pselect6, int, nfds, fd_set *, readfds, fd_set *, writefds, \
742               fd_set *, exceptfds, struct timespec *, timeout, void *, sig)
743 #endif
744 #if defined(TARGET_NR_ppoll) || defined(TARGET_NR_ppoll_time64)
745 safe_syscall5(int, ppoll, struct pollfd *, ufds, unsigned int, nfds,
746               struct timespec *, tsp, const sigset_t *, sigmask,
747               size_t, sigsetsize)
748 #endif
749 safe_syscall6(int, epoll_pwait, int, epfd, struct epoll_event *, events,
750               int, maxevents, int, timeout, const sigset_t *, sigmask,
751               size_t, sigsetsize)
752 #if defined(__NR_futex)
753 safe_syscall6(int,futex,int *,uaddr,int,op,int,val, \
754               const struct timespec *,timeout,int *,uaddr2,int,val3)
755 #endif
756 #if defined(__NR_futex_time64)
757 safe_syscall6(int,futex_time64,int *,uaddr,int,op,int,val, \
758               const struct timespec *,timeout,int *,uaddr2,int,val3)
759 #endif
760 safe_syscall2(int, rt_sigsuspend, sigset_t *, newset, size_t, sigsetsize)
761 safe_syscall2(int, kill, pid_t, pid, int, sig)
762 safe_syscall2(int, tkill, int, tid, int, sig)
763 safe_syscall3(int, tgkill, int, tgid, int, pid, int, sig)
764 safe_syscall3(ssize_t, readv, int, fd, const struct iovec *, iov, int, iovcnt)
765 safe_syscall3(ssize_t, writev, int, fd, const struct iovec *, iov, int, iovcnt)
766 safe_syscall5(ssize_t, preadv, int, fd, const struct iovec *, iov, int, iovcnt,
767               unsigned long, pos_l, unsigned long, pos_h)
768 safe_syscall5(ssize_t, pwritev, int, fd, const struct iovec *, iov, int, iovcnt,
769               unsigned long, pos_l, unsigned long, pos_h)
770 safe_syscall3(int, connect, int, fd, const struct sockaddr *, addr,
771               socklen_t, addrlen)
772 safe_syscall6(ssize_t, sendto, int, fd, const void *, buf, size_t, len,
773               int, flags, const struct sockaddr *, addr, socklen_t, addrlen)
774 safe_syscall6(ssize_t, recvfrom, int, fd, void *, buf, size_t, len,
775               int, flags, struct sockaddr *, addr, socklen_t *, addrlen)
776 safe_syscall3(ssize_t, sendmsg, int, fd, const struct msghdr *, msg, int, flags)
777 safe_syscall3(ssize_t, recvmsg, int, fd, struct msghdr *, msg, int, flags)
778 safe_syscall2(int, flock, int, fd, int, operation)
779 #if defined(TARGET_NR_rt_sigtimedwait) || defined(TARGET_NR_rt_sigtimedwait_time64)
780 safe_syscall4(int, rt_sigtimedwait, const sigset_t *, these, siginfo_t *, uinfo,
781               const struct timespec *, uts, size_t, sigsetsize)
782 #endif
783 safe_syscall4(int, accept4, int, fd, struct sockaddr *, addr, socklen_t *, len,
784               int, flags)
785 #if defined(TARGET_NR_nanosleep)
786 safe_syscall2(int, nanosleep, const struct timespec *, req,
787               struct timespec *, rem)
788 #endif
789 #if defined(TARGET_NR_clock_nanosleep) || \
790     defined(TARGET_NR_clock_nanosleep_time64)
791 safe_syscall4(int, clock_nanosleep, const clockid_t, clock, int, flags,
792               const struct timespec *, req, struct timespec *, rem)
793 #endif
794 #ifdef __NR_ipc
795 #ifdef __s390x__
796 safe_syscall5(int, ipc, int, call, long, first, long, second, long, third,
797               void *, ptr)
798 #else
799 safe_syscall6(int, ipc, int, call, long, first, long, second, long, third,
800               void *, ptr, long, fifth)
801 #endif
802 #endif
803 #ifdef __NR_msgsnd
804 safe_syscall4(int, msgsnd, int, msgid, const void *, msgp, size_t, sz,
805               int, flags)
806 #endif
807 #ifdef __NR_msgrcv
808 safe_syscall5(int, msgrcv, int, msgid, void *, msgp, size_t, sz,
809               long, msgtype, int, flags)
810 #endif
811 #ifdef __NR_semtimedop
812 safe_syscall4(int, semtimedop, int, semid, struct sembuf *, tsops,
813               unsigned, nsops, const struct timespec *, timeout)
814 #endif
815 #if defined(TARGET_NR_mq_timedsend) || \
816     defined(TARGET_NR_mq_timedsend_time64)
817 safe_syscall5(int, mq_timedsend, int, mqdes, const char *, msg_ptr,
818               size_t, len, unsigned, prio, const struct timespec *, timeout)
819 #endif
820 #if defined(TARGET_NR_mq_timedreceive) || \
821     defined(TARGET_NR_mq_timedreceive_time64)
822 safe_syscall5(int, mq_timedreceive, int, mqdes, char *, msg_ptr,
823               size_t, len, unsigned *, prio, const struct timespec *, timeout)
824 #endif
825 #if defined(TARGET_NR_copy_file_range) && defined(__NR_copy_file_range)
826 safe_syscall6(ssize_t, copy_file_range, int, infd, loff_t *, pinoff,
827               int, outfd, loff_t *, poutoff, size_t, length,
828               unsigned int, flags)
829 #endif
830 
831 /* We do ioctl like this rather than via safe_syscall3 to preserve the
832  * "third argument might be integer or pointer or not present" behaviour of
833  * the libc function.
834  */
835 #define safe_ioctl(...) safe_syscall(__NR_ioctl, __VA_ARGS__)
836 /* Similarly for fcntl. Note that callers must always:
837  *  pass the F_GETLK64 etc constants rather than the unsuffixed F_GETLK
838  *  use the flock64 struct rather than unsuffixed flock
839  * This will then work and use a 64-bit offset for both 32-bit and 64-bit hosts.
840  */
841 #ifdef __NR_fcntl64
842 #define safe_fcntl(...) safe_syscall(__NR_fcntl64, __VA_ARGS__)
843 #else
844 #define safe_fcntl(...) safe_syscall(__NR_fcntl, __VA_ARGS__)
845 #endif
846 
847 static inline int host_to_target_sock_type(int host_type)
848 {
849     int target_type;
850 
851     switch (host_type & 0xf /* SOCK_TYPE_MASK */) {
852     case SOCK_DGRAM:
853         target_type = TARGET_SOCK_DGRAM;
854         break;
855     case SOCK_STREAM:
856         target_type = TARGET_SOCK_STREAM;
857         break;
858     default:
859         target_type = host_type & 0xf /* SOCK_TYPE_MASK */;
860         break;
861     }
862 
863 #if defined(SOCK_CLOEXEC)
864     if (host_type & SOCK_CLOEXEC) {
865         target_type |= TARGET_SOCK_CLOEXEC;
866     }
867 #endif
868 
869 #if defined(SOCK_NONBLOCK)
870     if (host_type & SOCK_NONBLOCK) {
871         target_type |= TARGET_SOCK_NONBLOCK;
872     }
873 #endif
874 
875     return target_type;
876 }
877 
878 static abi_ulong target_brk;
879 static abi_ulong target_original_brk;
880 static abi_ulong brk_page;
881 
882 void target_set_brk(abi_ulong new_brk)
883 {
884     target_original_brk = target_brk = HOST_PAGE_ALIGN(new_brk);
885     brk_page = HOST_PAGE_ALIGN(target_brk);
886 }
887 
888 //#define DEBUGF_BRK(message, args...) do { fprintf(stderr, (message), ## args); } while (0)
889 #define DEBUGF_BRK(message, args...)
890 
891 /* do_brk() must return target values and target errnos. */
892 abi_long do_brk(abi_ulong new_brk)
893 {
894     abi_long mapped_addr;
895     abi_ulong new_alloc_size;
896 
897     DEBUGF_BRK("do_brk(" TARGET_ABI_FMT_lx ") -> ", new_brk);
898 
899     if (!new_brk) {
900         DEBUGF_BRK(TARGET_ABI_FMT_lx " (!new_brk)\n", target_brk);
901         return target_brk;
902     }
903     if (new_brk < target_original_brk) {
904         DEBUGF_BRK(TARGET_ABI_FMT_lx " (new_brk < target_original_brk)\n",
905                    target_brk);
906         return target_brk;
907     }
908 
909     /* If the new brk is less than the highest page reserved to the
910      * target heap allocation, set it and we're almost done...  */
911     if (new_brk <= brk_page) {
912         /* Heap contents are initialized to zero, as for anonymous
913          * mapped pages.  */
914         if (new_brk > target_brk) {
915             memset(g2h(target_brk), 0, new_brk - target_brk);
916         }
917 	target_brk = new_brk;
918         DEBUGF_BRK(TARGET_ABI_FMT_lx " (new_brk <= brk_page)\n", target_brk);
919 	return target_brk;
920     }
921 
922     /* We need to allocate more memory after the brk... Note that
923      * we don't use MAP_FIXED because that will map over the top of
924      * any existing mapping (like the one with the host libc or qemu
925      * itself); instead we treat "mapped but at wrong address" as
926      * a failure and unmap again.
927      */
928     new_alloc_size = HOST_PAGE_ALIGN(new_brk - brk_page);
929     mapped_addr = get_errno(target_mmap(brk_page, new_alloc_size,
930                                         PROT_READ|PROT_WRITE,
931                                         MAP_ANON|MAP_PRIVATE, 0, 0));
932 
933     if (mapped_addr == brk_page) {
934         /* Heap contents are initialized to zero, as for anonymous
935          * mapped pages.  Technically the new pages are already
936          * initialized to zero since they *are* anonymous mapped
937          * pages, however we have to take care with the contents that
938          * come from the remaining part of the previous page: it may
939          * contains garbage data due to a previous heap usage (grown
940          * then shrunken).  */
941         memset(g2h(target_brk), 0, brk_page - target_brk);
942 
943         target_brk = new_brk;
944         brk_page = HOST_PAGE_ALIGN(target_brk);
945         DEBUGF_BRK(TARGET_ABI_FMT_lx " (mapped_addr == brk_page)\n",
946             target_brk);
947         return target_brk;
948     } else if (mapped_addr != -1) {
949         /* Mapped but at wrong address, meaning there wasn't actually
950          * enough space for this brk.
951          */
952         target_munmap(mapped_addr, new_alloc_size);
953         mapped_addr = -1;
954         DEBUGF_BRK(TARGET_ABI_FMT_lx " (mapped_addr != -1)\n", target_brk);
955     }
956     else {
957         DEBUGF_BRK(TARGET_ABI_FMT_lx " (otherwise)\n", target_brk);
958     }
959 
960 #if defined(TARGET_ALPHA)
961     /* We (partially) emulate OSF/1 on Alpha, which requires we
962        return a proper errno, not an unchanged brk value.  */
963     return -TARGET_ENOMEM;
964 #endif
965     /* For everything else, return the previous break. */
966     return target_brk;
967 }
968 
969 #if defined(TARGET_NR_select) || defined(TARGET_NR__newselect) || \
970     defined(TARGET_NR_pselect6) || defined(TARGET_NR_pselect6_time64)
971 static inline abi_long copy_from_user_fdset(fd_set *fds,
972                                             abi_ulong target_fds_addr,
973                                             int n)
974 {
975     int i, nw, j, k;
976     abi_ulong b, *target_fds;
977 
978     nw = DIV_ROUND_UP(n, TARGET_ABI_BITS);
979     if (!(target_fds = lock_user(VERIFY_READ,
980                                  target_fds_addr,
981                                  sizeof(abi_ulong) * nw,
982                                  1)))
983         return -TARGET_EFAULT;
984 
985     FD_ZERO(fds);
986     k = 0;
987     for (i = 0; i < nw; i++) {
988         /* grab the abi_ulong */
989         __get_user(b, &target_fds[i]);
990         for (j = 0; j < TARGET_ABI_BITS; j++) {
991             /* check the bit inside the abi_ulong */
992             if ((b >> j) & 1)
993                 FD_SET(k, fds);
994             k++;
995         }
996     }
997 
998     unlock_user(target_fds, target_fds_addr, 0);
999 
1000     return 0;
1001 }
1002 
1003 static inline abi_ulong copy_from_user_fdset_ptr(fd_set *fds, fd_set **fds_ptr,
1004                                                  abi_ulong target_fds_addr,
1005                                                  int n)
1006 {
1007     if (target_fds_addr) {
1008         if (copy_from_user_fdset(fds, target_fds_addr, n))
1009             return -TARGET_EFAULT;
1010         *fds_ptr = fds;
1011     } else {
1012         *fds_ptr = NULL;
1013     }
1014     return 0;
1015 }
1016 
1017 static inline abi_long copy_to_user_fdset(abi_ulong target_fds_addr,
1018                                           const fd_set *fds,
1019                                           int n)
1020 {
1021     int i, nw, j, k;
1022     abi_long v;
1023     abi_ulong *target_fds;
1024 
1025     nw = DIV_ROUND_UP(n, TARGET_ABI_BITS);
1026     if (!(target_fds = lock_user(VERIFY_WRITE,
1027                                  target_fds_addr,
1028                                  sizeof(abi_ulong) * nw,
1029                                  0)))
1030         return -TARGET_EFAULT;
1031 
1032     k = 0;
1033     for (i = 0; i < nw; i++) {
1034         v = 0;
1035         for (j = 0; j < TARGET_ABI_BITS; j++) {
1036             v |= ((abi_ulong)(FD_ISSET(k, fds) != 0) << j);
1037             k++;
1038         }
1039         __put_user(v, &target_fds[i]);
1040     }
1041 
1042     unlock_user(target_fds, target_fds_addr, sizeof(abi_ulong) * nw);
1043 
1044     return 0;
1045 }
1046 #endif
1047 
1048 #if defined(__alpha__)
1049 #define HOST_HZ 1024
1050 #else
1051 #define HOST_HZ 100
1052 #endif
1053 
1054 static inline abi_long host_to_target_clock_t(long ticks)
1055 {
1056 #if HOST_HZ == TARGET_HZ
1057     return ticks;
1058 #else
1059     return ((int64_t)ticks * TARGET_HZ) / HOST_HZ;
1060 #endif
1061 }
1062 
1063 static inline abi_long host_to_target_rusage(abi_ulong target_addr,
1064                                              const struct rusage *rusage)
1065 {
1066     struct target_rusage *target_rusage;
1067 
1068     if (!lock_user_struct(VERIFY_WRITE, target_rusage, target_addr, 0))
1069         return -TARGET_EFAULT;
1070     target_rusage->ru_utime.tv_sec = tswapal(rusage->ru_utime.tv_sec);
1071     target_rusage->ru_utime.tv_usec = tswapal(rusage->ru_utime.tv_usec);
1072     target_rusage->ru_stime.tv_sec = tswapal(rusage->ru_stime.tv_sec);
1073     target_rusage->ru_stime.tv_usec = tswapal(rusage->ru_stime.tv_usec);
1074     target_rusage->ru_maxrss = tswapal(rusage->ru_maxrss);
1075     target_rusage->ru_ixrss = tswapal(rusage->ru_ixrss);
1076     target_rusage->ru_idrss = tswapal(rusage->ru_idrss);
1077     target_rusage->ru_isrss = tswapal(rusage->ru_isrss);
1078     target_rusage->ru_minflt = tswapal(rusage->ru_minflt);
1079     target_rusage->ru_majflt = tswapal(rusage->ru_majflt);
1080     target_rusage->ru_nswap = tswapal(rusage->ru_nswap);
1081     target_rusage->ru_inblock = tswapal(rusage->ru_inblock);
1082     target_rusage->ru_oublock = tswapal(rusage->ru_oublock);
1083     target_rusage->ru_msgsnd = tswapal(rusage->ru_msgsnd);
1084     target_rusage->ru_msgrcv = tswapal(rusage->ru_msgrcv);
1085     target_rusage->ru_nsignals = tswapal(rusage->ru_nsignals);
1086     target_rusage->ru_nvcsw = tswapal(rusage->ru_nvcsw);
1087     target_rusage->ru_nivcsw = tswapal(rusage->ru_nivcsw);
1088     unlock_user_struct(target_rusage, target_addr, 1);
1089 
1090     return 0;
1091 }
1092 
1093 #ifdef TARGET_NR_setrlimit
1094 static inline rlim_t target_to_host_rlim(abi_ulong target_rlim)
1095 {
1096     abi_ulong target_rlim_swap;
1097     rlim_t result;
1098 
1099     target_rlim_swap = tswapal(target_rlim);
1100     if (target_rlim_swap == TARGET_RLIM_INFINITY)
1101         return RLIM_INFINITY;
1102 
1103     result = target_rlim_swap;
1104     if (target_rlim_swap != (rlim_t)result)
1105         return RLIM_INFINITY;
1106 
1107     return result;
1108 }
1109 #endif
1110 
1111 #if defined(TARGET_NR_getrlimit) || defined(TARGET_NR_ugetrlimit)
1112 static inline abi_ulong host_to_target_rlim(rlim_t rlim)
1113 {
1114     abi_ulong target_rlim_swap;
1115     abi_ulong result;
1116 
1117     if (rlim == RLIM_INFINITY || rlim != (abi_long)rlim)
1118         target_rlim_swap = TARGET_RLIM_INFINITY;
1119     else
1120         target_rlim_swap = rlim;
1121     result = tswapal(target_rlim_swap);
1122 
1123     return result;
1124 }
1125 #endif
1126 
1127 static inline int target_to_host_resource(int code)
1128 {
1129     switch (code) {
1130     case TARGET_RLIMIT_AS:
1131         return RLIMIT_AS;
1132     case TARGET_RLIMIT_CORE:
1133         return RLIMIT_CORE;
1134     case TARGET_RLIMIT_CPU:
1135         return RLIMIT_CPU;
1136     case TARGET_RLIMIT_DATA:
1137         return RLIMIT_DATA;
1138     case TARGET_RLIMIT_FSIZE:
1139         return RLIMIT_FSIZE;
1140     case TARGET_RLIMIT_LOCKS:
1141         return RLIMIT_LOCKS;
1142     case TARGET_RLIMIT_MEMLOCK:
1143         return RLIMIT_MEMLOCK;
1144     case TARGET_RLIMIT_MSGQUEUE:
1145         return RLIMIT_MSGQUEUE;
1146     case TARGET_RLIMIT_NICE:
1147         return RLIMIT_NICE;
1148     case TARGET_RLIMIT_NOFILE:
1149         return RLIMIT_NOFILE;
1150     case TARGET_RLIMIT_NPROC:
1151         return RLIMIT_NPROC;
1152     case TARGET_RLIMIT_RSS:
1153         return RLIMIT_RSS;
1154     case TARGET_RLIMIT_RTPRIO:
1155         return RLIMIT_RTPRIO;
1156     case TARGET_RLIMIT_SIGPENDING:
1157         return RLIMIT_SIGPENDING;
1158     case TARGET_RLIMIT_STACK:
1159         return RLIMIT_STACK;
1160     default:
1161         return code;
1162     }
1163 }
1164 
1165 static inline abi_long copy_from_user_timeval(struct timeval *tv,
1166                                               abi_ulong target_tv_addr)
1167 {
1168     struct target_timeval *target_tv;
1169 
1170     if (!lock_user_struct(VERIFY_READ, target_tv, target_tv_addr, 1)) {
1171         return -TARGET_EFAULT;
1172     }
1173 
1174     __get_user(tv->tv_sec, &target_tv->tv_sec);
1175     __get_user(tv->tv_usec, &target_tv->tv_usec);
1176 
1177     unlock_user_struct(target_tv, target_tv_addr, 0);
1178 
1179     return 0;
1180 }
1181 
1182 static inline abi_long copy_to_user_timeval(abi_ulong target_tv_addr,
1183                                             const struct timeval *tv)
1184 {
1185     struct target_timeval *target_tv;
1186 
1187     if (!lock_user_struct(VERIFY_WRITE, target_tv, target_tv_addr, 0)) {
1188         return -TARGET_EFAULT;
1189     }
1190 
1191     __put_user(tv->tv_sec, &target_tv->tv_sec);
1192     __put_user(tv->tv_usec, &target_tv->tv_usec);
1193 
1194     unlock_user_struct(target_tv, target_tv_addr, 1);
1195 
1196     return 0;
1197 }
1198 
1199 #if defined(TARGET_NR_clock_adjtime64) && defined(CONFIG_CLOCK_ADJTIME)
1200 static inline abi_long copy_from_user_timeval64(struct timeval *tv,
1201                                                 abi_ulong target_tv_addr)
1202 {
1203     struct target__kernel_sock_timeval *target_tv;
1204 
1205     if (!lock_user_struct(VERIFY_READ, target_tv, target_tv_addr, 1)) {
1206         return -TARGET_EFAULT;
1207     }
1208 
1209     __get_user(tv->tv_sec, &target_tv->tv_sec);
1210     __get_user(tv->tv_usec, &target_tv->tv_usec);
1211 
1212     unlock_user_struct(target_tv, target_tv_addr, 0);
1213 
1214     return 0;
1215 }
1216 #endif
1217 
1218 static inline abi_long copy_to_user_timeval64(abi_ulong target_tv_addr,
1219                                               const struct timeval *tv)
1220 {
1221     struct target__kernel_sock_timeval *target_tv;
1222 
1223     if (!lock_user_struct(VERIFY_WRITE, target_tv, target_tv_addr, 0)) {
1224         return -TARGET_EFAULT;
1225     }
1226 
1227     __put_user(tv->tv_sec, &target_tv->tv_sec);
1228     __put_user(tv->tv_usec, &target_tv->tv_usec);
1229 
1230     unlock_user_struct(target_tv, target_tv_addr, 1);
1231 
1232     return 0;
1233 }
1234 
1235 #if defined(TARGET_NR_futex) || \
1236     defined(TARGET_NR_rt_sigtimedwait) || \
1237     defined(TARGET_NR_pselect6) || defined(TARGET_NR_pselect6) || \
1238     defined(TARGET_NR_nanosleep) || defined(TARGET_NR_clock_settime) || \
1239     defined(TARGET_NR_utimensat) || defined(TARGET_NR_mq_timedsend) || \
1240     defined(TARGET_NR_mq_timedreceive) || defined(TARGET_NR_ipc) || \
1241     defined(TARGET_NR_semop) || defined(TARGET_NR_semtimedop) || \
1242     defined(TARGET_NR_timer_settime) || \
1243     (defined(TARGET_NR_timerfd_settime) && defined(CONFIG_TIMERFD))
1244 static inline abi_long target_to_host_timespec(struct timespec *host_ts,
1245                                                abi_ulong target_addr)
1246 {
1247     struct target_timespec *target_ts;
1248 
1249     if (!lock_user_struct(VERIFY_READ, target_ts, target_addr, 1)) {
1250         return -TARGET_EFAULT;
1251     }
1252     __get_user(host_ts->tv_sec, &target_ts->tv_sec);
1253     __get_user(host_ts->tv_nsec, &target_ts->tv_nsec);
1254     unlock_user_struct(target_ts, target_addr, 0);
1255     return 0;
1256 }
1257 #endif
1258 
1259 #if defined(TARGET_NR_clock_settime64) || defined(TARGET_NR_futex_time64) || \
1260     defined(TARGET_NR_timer_settime64) || \
1261     defined(TARGET_NR_mq_timedsend_time64) || \
1262     defined(TARGET_NR_mq_timedreceive_time64) || \
1263     (defined(TARGET_NR_timerfd_settime64) && defined(CONFIG_TIMERFD)) || \
1264     defined(TARGET_NR_clock_nanosleep_time64) || \
1265     defined(TARGET_NR_rt_sigtimedwait_time64) || \
1266     defined(TARGET_NR_utimensat) || \
1267     defined(TARGET_NR_utimensat_time64) || \
1268     defined(TARGET_NR_semtimedop_time64) || \
1269     defined(TARGET_NR_pselect6_time64) || defined(TARGET_NR_ppoll_time64)
1270 static inline abi_long target_to_host_timespec64(struct timespec *host_ts,
1271                                                  abi_ulong target_addr)
1272 {
1273     struct target__kernel_timespec *target_ts;
1274 
1275     if (!lock_user_struct(VERIFY_READ, target_ts, target_addr, 1)) {
1276         return -TARGET_EFAULT;
1277     }
1278     __get_user(host_ts->tv_sec, &target_ts->tv_sec);
1279     __get_user(host_ts->tv_nsec, &target_ts->tv_nsec);
1280     /* in 32bit mode, this drops the padding */
1281     host_ts->tv_nsec = (long)(abi_long)host_ts->tv_nsec;
1282     unlock_user_struct(target_ts, target_addr, 0);
1283     return 0;
1284 }
1285 #endif
1286 
1287 static inline abi_long host_to_target_timespec(abi_ulong target_addr,
1288                                                struct timespec *host_ts)
1289 {
1290     struct target_timespec *target_ts;
1291 
1292     if (!lock_user_struct(VERIFY_WRITE, target_ts, target_addr, 0)) {
1293         return -TARGET_EFAULT;
1294     }
1295     __put_user(host_ts->tv_sec, &target_ts->tv_sec);
1296     __put_user(host_ts->tv_nsec, &target_ts->tv_nsec);
1297     unlock_user_struct(target_ts, target_addr, 1);
1298     return 0;
1299 }
1300 
1301 static inline abi_long host_to_target_timespec64(abi_ulong target_addr,
1302                                                  struct timespec *host_ts)
1303 {
1304     struct target__kernel_timespec *target_ts;
1305 
1306     if (!lock_user_struct(VERIFY_WRITE, target_ts, target_addr, 0)) {
1307         return -TARGET_EFAULT;
1308     }
1309     __put_user(host_ts->tv_sec, &target_ts->tv_sec);
1310     __put_user(host_ts->tv_nsec, &target_ts->tv_nsec);
1311     unlock_user_struct(target_ts, target_addr, 1);
1312     return 0;
1313 }
1314 
1315 #if defined(TARGET_NR_gettimeofday)
1316 static inline abi_long copy_to_user_timezone(abi_ulong target_tz_addr,
1317                                              struct timezone *tz)
1318 {
1319     struct target_timezone *target_tz;
1320 
1321     if (!lock_user_struct(VERIFY_WRITE, target_tz, target_tz_addr, 1)) {
1322         return -TARGET_EFAULT;
1323     }
1324 
1325     __put_user(tz->tz_minuteswest, &target_tz->tz_minuteswest);
1326     __put_user(tz->tz_dsttime, &target_tz->tz_dsttime);
1327 
1328     unlock_user_struct(target_tz, target_tz_addr, 1);
1329 
1330     return 0;
1331 }
1332 #endif
1333 
1334 #if defined(TARGET_NR_settimeofday)
1335 static inline abi_long copy_from_user_timezone(struct timezone *tz,
1336                                                abi_ulong target_tz_addr)
1337 {
1338     struct target_timezone *target_tz;
1339 
1340     if (!lock_user_struct(VERIFY_READ, target_tz, target_tz_addr, 1)) {
1341         return -TARGET_EFAULT;
1342     }
1343 
1344     __get_user(tz->tz_minuteswest, &target_tz->tz_minuteswest);
1345     __get_user(tz->tz_dsttime, &target_tz->tz_dsttime);
1346 
1347     unlock_user_struct(target_tz, target_tz_addr, 0);
1348 
1349     return 0;
1350 }
1351 #endif
1352 
1353 #if defined(TARGET_NR_mq_open) && defined(__NR_mq_open)
1354 #include <mqueue.h>
1355 
1356 static inline abi_long copy_from_user_mq_attr(struct mq_attr *attr,
1357                                               abi_ulong target_mq_attr_addr)
1358 {
1359     struct target_mq_attr *target_mq_attr;
1360 
1361     if (!lock_user_struct(VERIFY_READ, target_mq_attr,
1362                           target_mq_attr_addr, 1))
1363         return -TARGET_EFAULT;
1364 
1365     __get_user(attr->mq_flags, &target_mq_attr->mq_flags);
1366     __get_user(attr->mq_maxmsg, &target_mq_attr->mq_maxmsg);
1367     __get_user(attr->mq_msgsize, &target_mq_attr->mq_msgsize);
1368     __get_user(attr->mq_curmsgs, &target_mq_attr->mq_curmsgs);
1369 
1370     unlock_user_struct(target_mq_attr, target_mq_attr_addr, 0);
1371 
1372     return 0;
1373 }
1374 
1375 static inline abi_long copy_to_user_mq_attr(abi_ulong target_mq_attr_addr,
1376                                             const struct mq_attr *attr)
1377 {
1378     struct target_mq_attr *target_mq_attr;
1379 
1380     if (!lock_user_struct(VERIFY_WRITE, target_mq_attr,
1381                           target_mq_attr_addr, 0))
1382         return -TARGET_EFAULT;
1383 
1384     __put_user(attr->mq_flags, &target_mq_attr->mq_flags);
1385     __put_user(attr->mq_maxmsg, &target_mq_attr->mq_maxmsg);
1386     __put_user(attr->mq_msgsize, &target_mq_attr->mq_msgsize);
1387     __put_user(attr->mq_curmsgs, &target_mq_attr->mq_curmsgs);
1388 
1389     unlock_user_struct(target_mq_attr, target_mq_attr_addr, 1);
1390 
1391     return 0;
1392 }
1393 #endif
1394 
1395 #if defined(TARGET_NR_select) || defined(TARGET_NR__newselect)
1396 /* do_select() must return target values and target errnos. */
1397 static abi_long do_select(int n,
1398                           abi_ulong rfd_addr, abi_ulong wfd_addr,
1399                           abi_ulong efd_addr, abi_ulong target_tv_addr)
1400 {
1401     fd_set rfds, wfds, efds;
1402     fd_set *rfds_ptr, *wfds_ptr, *efds_ptr;
1403     struct timeval tv;
1404     struct timespec ts, *ts_ptr;
1405     abi_long ret;
1406 
1407     ret = copy_from_user_fdset_ptr(&rfds, &rfds_ptr, rfd_addr, n);
1408     if (ret) {
1409         return ret;
1410     }
1411     ret = copy_from_user_fdset_ptr(&wfds, &wfds_ptr, wfd_addr, n);
1412     if (ret) {
1413         return ret;
1414     }
1415     ret = copy_from_user_fdset_ptr(&efds, &efds_ptr, efd_addr, n);
1416     if (ret) {
1417         return ret;
1418     }
1419 
1420     if (target_tv_addr) {
1421         if (copy_from_user_timeval(&tv, target_tv_addr))
1422             return -TARGET_EFAULT;
1423         ts.tv_sec = tv.tv_sec;
1424         ts.tv_nsec = tv.tv_usec * 1000;
1425         ts_ptr = &ts;
1426     } else {
1427         ts_ptr = NULL;
1428     }
1429 
1430     ret = get_errno(safe_pselect6(n, rfds_ptr, wfds_ptr, efds_ptr,
1431                                   ts_ptr, NULL));
1432 
1433     if (!is_error(ret)) {
1434         if (rfd_addr && copy_to_user_fdset(rfd_addr, &rfds, n))
1435             return -TARGET_EFAULT;
1436         if (wfd_addr && copy_to_user_fdset(wfd_addr, &wfds, n))
1437             return -TARGET_EFAULT;
1438         if (efd_addr && copy_to_user_fdset(efd_addr, &efds, n))
1439             return -TARGET_EFAULT;
1440 
1441         if (target_tv_addr) {
1442             tv.tv_sec = ts.tv_sec;
1443             tv.tv_usec = ts.tv_nsec / 1000;
1444             if (copy_to_user_timeval(target_tv_addr, &tv)) {
1445                 return -TARGET_EFAULT;
1446             }
1447         }
1448     }
1449 
1450     return ret;
1451 }
1452 
1453 #if defined(TARGET_WANT_OLD_SYS_SELECT)
1454 static abi_long do_old_select(abi_ulong arg1)
1455 {
1456     struct target_sel_arg_struct *sel;
1457     abi_ulong inp, outp, exp, tvp;
1458     long nsel;
1459 
1460     if (!lock_user_struct(VERIFY_READ, sel, arg1, 1)) {
1461         return -TARGET_EFAULT;
1462     }
1463 
1464     nsel = tswapal(sel->n);
1465     inp = tswapal(sel->inp);
1466     outp = tswapal(sel->outp);
1467     exp = tswapal(sel->exp);
1468     tvp = tswapal(sel->tvp);
1469 
1470     unlock_user_struct(sel, arg1, 0);
1471 
1472     return do_select(nsel, inp, outp, exp, tvp);
1473 }
1474 #endif
1475 #endif
1476 
1477 #if defined(TARGET_NR_pselect6) || defined(TARGET_NR_pselect6_time64)
1478 static abi_long do_pselect6(abi_long arg1, abi_long arg2, abi_long arg3,
1479                             abi_long arg4, abi_long arg5, abi_long arg6,
1480                             bool time64)
1481 {
1482     abi_long rfd_addr, wfd_addr, efd_addr, n, ts_addr;
1483     fd_set rfds, wfds, efds;
1484     fd_set *rfds_ptr, *wfds_ptr, *efds_ptr;
1485     struct timespec ts, *ts_ptr;
1486     abi_long ret;
1487 
1488     /*
1489      * The 6th arg is actually two args smashed together,
1490      * so we cannot use the C library.
1491      */
1492     sigset_t set;
1493     struct {
1494         sigset_t *set;
1495         size_t size;
1496     } sig, *sig_ptr;
1497 
1498     abi_ulong arg_sigset, arg_sigsize, *arg7;
1499     target_sigset_t *target_sigset;
1500 
1501     n = arg1;
1502     rfd_addr = arg2;
1503     wfd_addr = arg3;
1504     efd_addr = arg4;
1505     ts_addr = arg5;
1506 
1507     ret = copy_from_user_fdset_ptr(&rfds, &rfds_ptr, rfd_addr, n);
1508     if (ret) {
1509         return ret;
1510     }
1511     ret = copy_from_user_fdset_ptr(&wfds, &wfds_ptr, wfd_addr, n);
1512     if (ret) {
1513         return ret;
1514     }
1515     ret = copy_from_user_fdset_ptr(&efds, &efds_ptr, efd_addr, n);
1516     if (ret) {
1517         return ret;
1518     }
1519 
1520     /*
1521      * This takes a timespec, and not a timeval, so we cannot
1522      * use the do_select() helper ...
1523      */
1524     if (ts_addr) {
1525         if (time64) {
1526             if (target_to_host_timespec64(&ts, ts_addr)) {
1527                 return -TARGET_EFAULT;
1528             }
1529         } else {
1530             if (target_to_host_timespec(&ts, ts_addr)) {
1531                 return -TARGET_EFAULT;
1532             }
1533         }
1534             ts_ptr = &ts;
1535     } else {
1536         ts_ptr = NULL;
1537     }
1538 
1539     /* Extract the two packed args for the sigset */
1540     if (arg6) {
1541         sig_ptr = &sig;
1542         sig.size = SIGSET_T_SIZE;
1543 
1544         arg7 = lock_user(VERIFY_READ, arg6, sizeof(*arg7) * 2, 1);
1545         if (!arg7) {
1546             return -TARGET_EFAULT;
1547         }
1548         arg_sigset = tswapal(arg7[0]);
1549         arg_sigsize = tswapal(arg7[1]);
1550         unlock_user(arg7, arg6, 0);
1551 
1552         if (arg_sigset) {
1553             sig.set = &set;
1554             if (arg_sigsize != sizeof(*target_sigset)) {
1555                 /* Like the kernel, we enforce correct size sigsets */
1556                 return -TARGET_EINVAL;
1557             }
1558             target_sigset = lock_user(VERIFY_READ, arg_sigset,
1559                                       sizeof(*target_sigset), 1);
1560             if (!target_sigset) {
1561                 return -TARGET_EFAULT;
1562             }
1563             target_to_host_sigset(&set, target_sigset);
1564             unlock_user(target_sigset, arg_sigset, 0);
1565         } else {
1566             sig.set = NULL;
1567         }
1568     } else {
1569         sig_ptr = NULL;
1570     }
1571 
1572     ret = get_errno(safe_pselect6(n, rfds_ptr, wfds_ptr, efds_ptr,
1573                                   ts_ptr, sig_ptr));
1574 
1575     if (!is_error(ret)) {
1576         if (rfd_addr && copy_to_user_fdset(rfd_addr, &rfds, n)) {
1577             return -TARGET_EFAULT;
1578         }
1579         if (wfd_addr && copy_to_user_fdset(wfd_addr, &wfds, n)) {
1580             return -TARGET_EFAULT;
1581         }
1582         if (efd_addr && copy_to_user_fdset(efd_addr, &efds, n)) {
1583             return -TARGET_EFAULT;
1584         }
1585         if (time64) {
1586             if (ts_addr && host_to_target_timespec64(ts_addr, &ts)) {
1587                 return -TARGET_EFAULT;
1588             }
1589         } else {
1590             if (ts_addr && host_to_target_timespec(ts_addr, &ts)) {
1591                 return -TARGET_EFAULT;
1592             }
1593         }
1594     }
1595     return ret;
1596 }
1597 #endif
1598 
1599 #if defined(TARGET_NR_poll) || defined(TARGET_NR_ppoll) || \
1600     defined(TARGET_NR_ppoll_time64)
1601 static abi_long do_ppoll(abi_long arg1, abi_long arg2, abi_long arg3,
1602                          abi_long arg4, abi_long arg5, bool ppoll, bool time64)
1603 {
1604     struct target_pollfd *target_pfd;
1605     unsigned int nfds = arg2;
1606     struct pollfd *pfd;
1607     unsigned int i;
1608     abi_long ret;
1609 
1610     pfd = NULL;
1611     target_pfd = NULL;
1612     if (nfds) {
1613         if (nfds > (INT_MAX / sizeof(struct target_pollfd))) {
1614             return -TARGET_EINVAL;
1615         }
1616         target_pfd = lock_user(VERIFY_WRITE, arg1,
1617                                sizeof(struct target_pollfd) * nfds, 1);
1618         if (!target_pfd) {
1619             return -TARGET_EFAULT;
1620         }
1621 
1622         pfd = alloca(sizeof(struct pollfd) * nfds);
1623         for (i = 0; i < nfds; i++) {
1624             pfd[i].fd = tswap32(target_pfd[i].fd);
1625             pfd[i].events = tswap16(target_pfd[i].events);
1626         }
1627     }
1628     if (ppoll) {
1629         struct timespec _timeout_ts, *timeout_ts = &_timeout_ts;
1630         target_sigset_t *target_set;
1631         sigset_t _set, *set = &_set;
1632 
1633         if (arg3) {
1634             if (time64) {
1635                 if (target_to_host_timespec64(timeout_ts, arg3)) {
1636                     unlock_user(target_pfd, arg1, 0);
1637                     return -TARGET_EFAULT;
1638                 }
1639             } else {
1640                 if (target_to_host_timespec(timeout_ts, arg3)) {
1641                     unlock_user(target_pfd, arg1, 0);
1642                     return -TARGET_EFAULT;
1643                 }
1644             }
1645         } else {
1646             timeout_ts = NULL;
1647         }
1648 
1649         if (arg4) {
1650             if (arg5 != sizeof(target_sigset_t)) {
1651                 unlock_user(target_pfd, arg1, 0);
1652                 return -TARGET_EINVAL;
1653             }
1654 
1655             target_set = lock_user(VERIFY_READ, arg4,
1656                                    sizeof(target_sigset_t), 1);
1657             if (!target_set) {
1658                 unlock_user(target_pfd, arg1, 0);
1659                 return -TARGET_EFAULT;
1660             }
1661             target_to_host_sigset(set, target_set);
1662         } else {
1663             set = NULL;
1664         }
1665 
1666         ret = get_errno(safe_ppoll(pfd, nfds, timeout_ts,
1667                                    set, SIGSET_T_SIZE));
1668 
1669         if (!is_error(ret) && arg3) {
1670             if (time64) {
1671                 if (host_to_target_timespec64(arg3, timeout_ts)) {
1672                     return -TARGET_EFAULT;
1673                 }
1674             } else {
1675                 if (host_to_target_timespec(arg3, timeout_ts)) {
1676                     return -TARGET_EFAULT;
1677                 }
1678             }
1679         }
1680         if (arg4) {
1681             unlock_user(target_set, arg4, 0);
1682         }
1683     } else {
1684           struct timespec ts, *pts;
1685 
1686           if (arg3 >= 0) {
1687               /* Convert ms to secs, ns */
1688               ts.tv_sec = arg3 / 1000;
1689               ts.tv_nsec = (arg3 % 1000) * 1000000LL;
1690               pts = &ts;
1691           } else {
1692               /* -ve poll() timeout means "infinite" */
1693               pts = NULL;
1694           }
1695           ret = get_errno(safe_ppoll(pfd, nfds, pts, NULL, 0));
1696     }
1697 
1698     if (!is_error(ret)) {
1699         for (i = 0; i < nfds; i++) {
1700             target_pfd[i].revents = tswap16(pfd[i].revents);
1701         }
1702     }
1703     unlock_user(target_pfd, arg1, sizeof(struct target_pollfd) * nfds);
1704     return ret;
1705 }
1706 #endif
1707 
1708 static abi_long do_pipe2(int host_pipe[], int flags)
1709 {
1710 #ifdef CONFIG_PIPE2
1711     return pipe2(host_pipe, flags);
1712 #else
1713     return -ENOSYS;
1714 #endif
1715 }
1716 
1717 static abi_long do_pipe(void *cpu_env, abi_ulong pipedes,
1718                         int flags, int is_pipe2)
1719 {
1720     int host_pipe[2];
1721     abi_long ret;
1722     ret = flags ? do_pipe2(host_pipe, flags) : pipe(host_pipe);
1723 
1724     if (is_error(ret))
1725         return get_errno(ret);
1726 
1727     /* Several targets have special calling conventions for the original
1728        pipe syscall, but didn't replicate this into the pipe2 syscall.  */
1729     if (!is_pipe2) {
1730 #if defined(TARGET_ALPHA)
1731         ((CPUAlphaState *)cpu_env)->ir[IR_A4] = host_pipe[1];
1732         return host_pipe[0];
1733 #elif defined(TARGET_MIPS)
1734         ((CPUMIPSState*)cpu_env)->active_tc.gpr[3] = host_pipe[1];
1735         return host_pipe[0];
1736 #elif defined(TARGET_SH4)
1737         ((CPUSH4State*)cpu_env)->gregs[1] = host_pipe[1];
1738         return host_pipe[0];
1739 #elif defined(TARGET_SPARC)
1740         ((CPUSPARCState*)cpu_env)->regwptr[1] = host_pipe[1];
1741         return host_pipe[0];
1742 #endif
1743     }
1744 
1745     if (put_user_s32(host_pipe[0], pipedes)
1746         || put_user_s32(host_pipe[1], pipedes + sizeof(host_pipe[0])))
1747         return -TARGET_EFAULT;
1748     return get_errno(ret);
1749 }
1750 
1751 static inline abi_long target_to_host_ip_mreq(struct ip_mreqn *mreqn,
1752                                               abi_ulong target_addr,
1753                                               socklen_t len)
1754 {
1755     struct target_ip_mreqn *target_smreqn;
1756 
1757     target_smreqn = lock_user(VERIFY_READ, target_addr, len, 1);
1758     if (!target_smreqn)
1759         return -TARGET_EFAULT;
1760     mreqn->imr_multiaddr.s_addr = target_smreqn->imr_multiaddr.s_addr;
1761     mreqn->imr_address.s_addr = target_smreqn->imr_address.s_addr;
1762     if (len == sizeof(struct target_ip_mreqn))
1763         mreqn->imr_ifindex = tswapal(target_smreqn->imr_ifindex);
1764     unlock_user(target_smreqn, target_addr, 0);
1765 
1766     return 0;
1767 }
1768 
1769 static inline abi_long target_to_host_sockaddr(int fd, struct sockaddr *addr,
1770                                                abi_ulong target_addr,
1771                                                socklen_t len)
1772 {
1773     const socklen_t unix_maxlen = sizeof (struct sockaddr_un);
1774     sa_family_t sa_family;
1775     struct target_sockaddr *target_saddr;
1776 
1777     if (fd_trans_target_to_host_addr(fd)) {
1778         return fd_trans_target_to_host_addr(fd)(addr, target_addr, len);
1779     }
1780 
1781     target_saddr = lock_user(VERIFY_READ, target_addr, len, 1);
1782     if (!target_saddr)
1783         return -TARGET_EFAULT;
1784 
1785     sa_family = tswap16(target_saddr->sa_family);
1786 
1787     /* Oops. The caller might send a incomplete sun_path; sun_path
1788      * must be terminated by \0 (see the manual page), but
1789      * unfortunately it is quite common to specify sockaddr_un
1790      * length as "strlen(x->sun_path)" while it should be
1791      * "strlen(...) + 1". We'll fix that here if needed.
1792      * Linux kernel has a similar feature.
1793      */
1794 
1795     if (sa_family == AF_UNIX) {
1796         if (len < unix_maxlen && len > 0) {
1797             char *cp = (char*)target_saddr;
1798 
1799             if ( cp[len-1] && !cp[len] )
1800                 len++;
1801         }
1802         if (len > unix_maxlen)
1803             len = unix_maxlen;
1804     }
1805 
1806     memcpy(addr, target_saddr, len);
1807     addr->sa_family = sa_family;
1808     if (sa_family == AF_NETLINK) {
1809         struct sockaddr_nl *nladdr;
1810 
1811         nladdr = (struct sockaddr_nl *)addr;
1812         nladdr->nl_pid = tswap32(nladdr->nl_pid);
1813         nladdr->nl_groups = tswap32(nladdr->nl_groups);
1814     } else if (sa_family == AF_PACKET) {
1815 	struct target_sockaddr_ll *lladdr;
1816 
1817 	lladdr = (struct target_sockaddr_ll *)addr;
1818 	lladdr->sll_ifindex = tswap32(lladdr->sll_ifindex);
1819 	lladdr->sll_hatype = tswap16(lladdr->sll_hatype);
1820     }
1821     unlock_user(target_saddr, target_addr, 0);
1822 
1823     return 0;
1824 }
1825 
1826 static inline abi_long host_to_target_sockaddr(abi_ulong target_addr,
1827                                                struct sockaddr *addr,
1828                                                socklen_t len)
1829 {
1830     struct target_sockaddr *target_saddr;
1831 
1832     if (len == 0) {
1833         return 0;
1834     }
1835     assert(addr);
1836 
1837     target_saddr = lock_user(VERIFY_WRITE, target_addr, len, 0);
1838     if (!target_saddr)
1839         return -TARGET_EFAULT;
1840     memcpy(target_saddr, addr, len);
1841     if (len >= offsetof(struct target_sockaddr, sa_family) +
1842         sizeof(target_saddr->sa_family)) {
1843         target_saddr->sa_family = tswap16(addr->sa_family);
1844     }
1845     if (addr->sa_family == AF_NETLINK &&
1846         len >= sizeof(struct target_sockaddr_nl)) {
1847         struct target_sockaddr_nl *target_nl =
1848                (struct target_sockaddr_nl *)target_saddr;
1849         target_nl->nl_pid = tswap32(target_nl->nl_pid);
1850         target_nl->nl_groups = tswap32(target_nl->nl_groups);
1851     } else if (addr->sa_family == AF_PACKET) {
1852         struct sockaddr_ll *target_ll = (struct sockaddr_ll *)target_saddr;
1853         target_ll->sll_ifindex = tswap32(target_ll->sll_ifindex);
1854         target_ll->sll_hatype = tswap16(target_ll->sll_hatype);
1855     } else if (addr->sa_family == AF_INET6 &&
1856                len >= sizeof(struct target_sockaddr_in6)) {
1857         struct target_sockaddr_in6 *target_in6 =
1858                (struct target_sockaddr_in6 *)target_saddr;
1859         target_in6->sin6_scope_id = tswap16(target_in6->sin6_scope_id);
1860     }
1861     unlock_user(target_saddr, target_addr, len);
1862 
1863     return 0;
1864 }
1865 
1866 static inline abi_long target_to_host_cmsg(struct msghdr *msgh,
1867                                            struct target_msghdr *target_msgh)
1868 {
1869     struct cmsghdr *cmsg = CMSG_FIRSTHDR(msgh);
1870     abi_long msg_controllen;
1871     abi_ulong target_cmsg_addr;
1872     struct target_cmsghdr *target_cmsg, *target_cmsg_start;
1873     socklen_t space = 0;
1874 
1875     msg_controllen = tswapal(target_msgh->msg_controllen);
1876     if (msg_controllen < sizeof (struct target_cmsghdr))
1877         goto the_end;
1878     target_cmsg_addr = tswapal(target_msgh->msg_control);
1879     target_cmsg = lock_user(VERIFY_READ, target_cmsg_addr, msg_controllen, 1);
1880     target_cmsg_start = target_cmsg;
1881     if (!target_cmsg)
1882         return -TARGET_EFAULT;
1883 
1884     while (cmsg && target_cmsg) {
1885         void *data = CMSG_DATA(cmsg);
1886         void *target_data = TARGET_CMSG_DATA(target_cmsg);
1887 
1888         int len = tswapal(target_cmsg->cmsg_len)
1889             - sizeof(struct target_cmsghdr);
1890 
1891         space += CMSG_SPACE(len);
1892         if (space > msgh->msg_controllen) {
1893             space -= CMSG_SPACE(len);
1894             /* This is a QEMU bug, since we allocated the payload
1895              * area ourselves (unlike overflow in host-to-target
1896              * conversion, which is just the guest giving us a buffer
1897              * that's too small). It can't happen for the payload types
1898              * we currently support; if it becomes an issue in future
1899              * we would need to improve our allocation strategy to
1900              * something more intelligent than "twice the size of the
1901              * target buffer we're reading from".
1902              */
1903             qemu_log_mask(LOG_UNIMP,
1904                           ("Unsupported ancillary data %d/%d: "
1905                            "unhandled msg size\n"),
1906                           tswap32(target_cmsg->cmsg_level),
1907                           tswap32(target_cmsg->cmsg_type));
1908             break;
1909         }
1910 
1911         if (tswap32(target_cmsg->cmsg_level) == TARGET_SOL_SOCKET) {
1912             cmsg->cmsg_level = SOL_SOCKET;
1913         } else {
1914             cmsg->cmsg_level = tswap32(target_cmsg->cmsg_level);
1915         }
1916         cmsg->cmsg_type = tswap32(target_cmsg->cmsg_type);
1917         cmsg->cmsg_len = CMSG_LEN(len);
1918 
1919         if (cmsg->cmsg_level == SOL_SOCKET && cmsg->cmsg_type == SCM_RIGHTS) {
1920             int *fd = (int *)data;
1921             int *target_fd = (int *)target_data;
1922             int i, numfds = len / sizeof(int);
1923 
1924             for (i = 0; i < numfds; i++) {
1925                 __get_user(fd[i], target_fd + i);
1926             }
1927         } else if (cmsg->cmsg_level == SOL_SOCKET
1928                &&  cmsg->cmsg_type == SCM_CREDENTIALS) {
1929             struct ucred *cred = (struct ucred *)data;
1930             struct target_ucred *target_cred =
1931                 (struct target_ucred *)target_data;
1932 
1933             __get_user(cred->pid, &target_cred->pid);
1934             __get_user(cred->uid, &target_cred->uid);
1935             __get_user(cred->gid, &target_cred->gid);
1936         } else {
1937             qemu_log_mask(LOG_UNIMP, "Unsupported ancillary data: %d/%d\n",
1938                           cmsg->cmsg_level, cmsg->cmsg_type);
1939             memcpy(data, target_data, len);
1940         }
1941 
1942         cmsg = CMSG_NXTHDR(msgh, cmsg);
1943         target_cmsg = TARGET_CMSG_NXTHDR(target_msgh, target_cmsg,
1944                                          target_cmsg_start);
1945     }
1946     unlock_user(target_cmsg, target_cmsg_addr, 0);
1947  the_end:
1948     msgh->msg_controllen = space;
1949     return 0;
1950 }
1951 
1952 static inline abi_long host_to_target_cmsg(struct target_msghdr *target_msgh,
1953                                            struct msghdr *msgh)
1954 {
1955     struct cmsghdr *cmsg = CMSG_FIRSTHDR(msgh);
1956     abi_long msg_controllen;
1957     abi_ulong target_cmsg_addr;
1958     struct target_cmsghdr *target_cmsg, *target_cmsg_start;
1959     socklen_t space = 0;
1960 
1961     msg_controllen = tswapal(target_msgh->msg_controllen);
1962     if (msg_controllen < sizeof (struct target_cmsghdr))
1963         goto the_end;
1964     target_cmsg_addr = tswapal(target_msgh->msg_control);
1965     target_cmsg = lock_user(VERIFY_WRITE, target_cmsg_addr, msg_controllen, 0);
1966     target_cmsg_start = target_cmsg;
1967     if (!target_cmsg)
1968         return -TARGET_EFAULT;
1969 
1970     while (cmsg && target_cmsg) {
1971         void *data = CMSG_DATA(cmsg);
1972         void *target_data = TARGET_CMSG_DATA(target_cmsg);
1973 
1974         int len = cmsg->cmsg_len - sizeof(struct cmsghdr);
1975         int tgt_len, tgt_space;
1976 
1977         /* We never copy a half-header but may copy half-data;
1978          * this is Linux's behaviour in put_cmsg(). Note that
1979          * truncation here is a guest problem (which we report
1980          * to the guest via the CTRUNC bit), unlike truncation
1981          * in target_to_host_cmsg, which is a QEMU bug.
1982          */
1983         if (msg_controllen < sizeof(struct target_cmsghdr)) {
1984             target_msgh->msg_flags |= tswap32(MSG_CTRUNC);
1985             break;
1986         }
1987 
1988         if (cmsg->cmsg_level == SOL_SOCKET) {
1989             target_cmsg->cmsg_level = tswap32(TARGET_SOL_SOCKET);
1990         } else {
1991             target_cmsg->cmsg_level = tswap32(cmsg->cmsg_level);
1992         }
1993         target_cmsg->cmsg_type = tswap32(cmsg->cmsg_type);
1994 
1995         /* Payload types which need a different size of payload on
1996          * the target must adjust tgt_len here.
1997          */
1998         tgt_len = len;
1999         switch (cmsg->cmsg_level) {
2000         case SOL_SOCKET:
2001             switch (cmsg->cmsg_type) {
2002             case SO_TIMESTAMP:
2003                 tgt_len = sizeof(struct target_timeval);
2004                 break;
2005             default:
2006                 break;
2007             }
2008             break;
2009         default:
2010             break;
2011         }
2012 
2013         if (msg_controllen < TARGET_CMSG_LEN(tgt_len)) {
2014             target_msgh->msg_flags |= tswap32(MSG_CTRUNC);
2015             tgt_len = msg_controllen - sizeof(struct target_cmsghdr);
2016         }
2017 
2018         /* We must now copy-and-convert len bytes of payload
2019          * into tgt_len bytes of destination space. Bear in mind
2020          * that in both source and destination we may be dealing
2021          * with a truncated value!
2022          */
2023         switch (cmsg->cmsg_level) {
2024         case SOL_SOCKET:
2025             switch (cmsg->cmsg_type) {
2026             case SCM_RIGHTS:
2027             {
2028                 int *fd = (int *)data;
2029                 int *target_fd = (int *)target_data;
2030                 int i, numfds = tgt_len / sizeof(int);
2031 
2032                 for (i = 0; i < numfds; i++) {
2033                     __put_user(fd[i], target_fd + i);
2034                 }
2035                 break;
2036             }
2037             case SO_TIMESTAMP:
2038             {
2039                 struct timeval *tv = (struct timeval *)data;
2040                 struct target_timeval *target_tv =
2041                     (struct target_timeval *)target_data;
2042 
2043                 if (len != sizeof(struct timeval) ||
2044                     tgt_len != sizeof(struct target_timeval)) {
2045                     goto unimplemented;
2046                 }
2047 
2048                 /* copy struct timeval to target */
2049                 __put_user(tv->tv_sec, &target_tv->tv_sec);
2050                 __put_user(tv->tv_usec, &target_tv->tv_usec);
2051                 break;
2052             }
2053             case SCM_CREDENTIALS:
2054             {
2055                 struct ucred *cred = (struct ucred *)data;
2056                 struct target_ucred *target_cred =
2057                     (struct target_ucred *)target_data;
2058 
2059                 __put_user(cred->pid, &target_cred->pid);
2060                 __put_user(cred->uid, &target_cred->uid);
2061                 __put_user(cred->gid, &target_cred->gid);
2062                 break;
2063             }
2064             default:
2065                 goto unimplemented;
2066             }
2067             break;
2068 
2069         case SOL_IP:
2070             switch (cmsg->cmsg_type) {
2071             case IP_TTL:
2072             {
2073                 uint32_t *v = (uint32_t *)data;
2074                 uint32_t *t_int = (uint32_t *)target_data;
2075 
2076                 if (len != sizeof(uint32_t) ||
2077                     tgt_len != sizeof(uint32_t)) {
2078                     goto unimplemented;
2079                 }
2080                 __put_user(*v, t_int);
2081                 break;
2082             }
2083             case IP_RECVERR:
2084             {
2085                 struct errhdr_t {
2086                    struct sock_extended_err ee;
2087                    struct sockaddr_in offender;
2088                 };
2089                 struct errhdr_t *errh = (struct errhdr_t *)data;
2090                 struct errhdr_t *target_errh =
2091                     (struct errhdr_t *)target_data;
2092 
2093                 if (len != sizeof(struct errhdr_t) ||
2094                     tgt_len != sizeof(struct errhdr_t)) {
2095                     goto unimplemented;
2096                 }
2097                 __put_user(errh->ee.ee_errno, &target_errh->ee.ee_errno);
2098                 __put_user(errh->ee.ee_origin, &target_errh->ee.ee_origin);
2099                 __put_user(errh->ee.ee_type,  &target_errh->ee.ee_type);
2100                 __put_user(errh->ee.ee_code, &target_errh->ee.ee_code);
2101                 __put_user(errh->ee.ee_pad, &target_errh->ee.ee_pad);
2102                 __put_user(errh->ee.ee_info, &target_errh->ee.ee_info);
2103                 __put_user(errh->ee.ee_data, &target_errh->ee.ee_data);
2104                 host_to_target_sockaddr((unsigned long) &target_errh->offender,
2105                     (void *) &errh->offender, sizeof(errh->offender));
2106                 break;
2107             }
2108             default:
2109                 goto unimplemented;
2110             }
2111             break;
2112 
2113         case SOL_IPV6:
2114             switch (cmsg->cmsg_type) {
2115             case IPV6_HOPLIMIT:
2116             {
2117                 uint32_t *v = (uint32_t *)data;
2118                 uint32_t *t_int = (uint32_t *)target_data;
2119 
2120                 if (len != sizeof(uint32_t) ||
2121                     tgt_len != sizeof(uint32_t)) {
2122                     goto unimplemented;
2123                 }
2124                 __put_user(*v, t_int);
2125                 break;
2126             }
2127             case IPV6_RECVERR:
2128             {
2129                 struct errhdr6_t {
2130                    struct sock_extended_err ee;
2131                    struct sockaddr_in6 offender;
2132                 };
2133                 struct errhdr6_t *errh = (struct errhdr6_t *)data;
2134                 struct errhdr6_t *target_errh =
2135                     (struct errhdr6_t *)target_data;
2136 
2137                 if (len != sizeof(struct errhdr6_t) ||
2138                     tgt_len != sizeof(struct errhdr6_t)) {
2139                     goto unimplemented;
2140                 }
2141                 __put_user(errh->ee.ee_errno, &target_errh->ee.ee_errno);
2142                 __put_user(errh->ee.ee_origin, &target_errh->ee.ee_origin);
2143                 __put_user(errh->ee.ee_type,  &target_errh->ee.ee_type);
2144                 __put_user(errh->ee.ee_code, &target_errh->ee.ee_code);
2145                 __put_user(errh->ee.ee_pad, &target_errh->ee.ee_pad);
2146                 __put_user(errh->ee.ee_info, &target_errh->ee.ee_info);
2147                 __put_user(errh->ee.ee_data, &target_errh->ee.ee_data);
2148                 host_to_target_sockaddr((unsigned long) &target_errh->offender,
2149                     (void *) &errh->offender, sizeof(errh->offender));
2150                 break;
2151             }
2152             default:
2153                 goto unimplemented;
2154             }
2155             break;
2156 
2157         default:
2158         unimplemented:
2159             qemu_log_mask(LOG_UNIMP, "Unsupported ancillary data: %d/%d\n",
2160                           cmsg->cmsg_level, cmsg->cmsg_type);
2161             memcpy(target_data, data, MIN(len, tgt_len));
2162             if (tgt_len > len) {
2163                 memset(target_data + len, 0, tgt_len - len);
2164             }
2165         }
2166 
2167         target_cmsg->cmsg_len = tswapal(TARGET_CMSG_LEN(tgt_len));
2168         tgt_space = TARGET_CMSG_SPACE(tgt_len);
2169         if (msg_controllen < tgt_space) {
2170             tgt_space = msg_controllen;
2171         }
2172         msg_controllen -= tgt_space;
2173         space += tgt_space;
2174         cmsg = CMSG_NXTHDR(msgh, cmsg);
2175         target_cmsg = TARGET_CMSG_NXTHDR(target_msgh, target_cmsg,
2176                                          target_cmsg_start);
2177     }
2178     unlock_user(target_cmsg, target_cmsg_addr, space);
2179  the_end:
2180     target_msgh->msg_controllen = tswapal(space);
2181     return 0;
2182 }
2183 
2184 /* do_setsockopt() Must return target values and target errnos. */
2185 static abi_long do_setsockopt(int sockfd, int level, int optname,
2186                               abi_ulong optval_addr, socklen_t optlen)
2187 {
2188     abi_long ret;
2189     int val;
2190     struct ip_mreqn *ip_mreq;
2191     struct ip_mreq_source *ip_mreq_source;
2192 
2193     switch(level) {
2194     case SOL_TCP:
2195     case SOL_UDP:
2196         /* TCP and UDP options all take an 'int' value.  */
2197         if (optlen < sizeof(uint32_t))
2198             return -TARGET_EINVAL;
2199 
2200         if (get_user_u32(val, optval_addr))
2201             return -TARGET_EFAULT;
2202         ret = get_errno(setsockopt(sockfd, level, optname, &val, sizeof(val)));
2203         break;
2204     case SOL_IP:
2205         switch(optname) {
2206         case IP_TOS:
2207         case IP_TTL:
2208         case IP_HDRINCL:
2209         case IP_ROUTER_ALERT:
2210         case IP_RECVOPTS:
2211         case IP_RETOPTS:
2212         case IP_PKTINFO:
2213         case IP_MTU_DISCOVER:
2214         case IP_RECVERR:
2215         case IP_RECVTTL:
2216         case IP_RECVTOS:
2217 #ifdef IP_FREEBIND
2218         case IP_FREEBIND:
2219 #endif
2220         case IP_MULTICAST_TTL:
2221         case IP_MULTICAST_LOOP:
2222             val = 0;
2223             if (optlen >= sizeof(uint32_t)) {
2224                 if (get_user_u32(val, optval_addr))
2225                     return -TARGET_EFAULT;
2226             } else if (optlen >= 1) {
2227                 if (get_user_u8(val, optval_addr))
2228                     return -TARGET_EFAULT;
2229             }
2230             ret = get_errno(setsockopt(sockfd, level, optname, &val, sizeof(val)));
2231             break;
2232         case IP_ADD_MEMBERSHIP:
2233         case IP_DROP_MEMBERSHIP:
2234             if (optlen < sizeof (struct target_ip_mreq) ||
2235                 optlen > sizeof (struct target_ip_mreqn))
2236                 return -TARGET_EINVAL;
2237 
2238             ip_mreq = (struct ip_mreqn *) alloca(optlen);
2239             target_to_host_ip_mreq(ip_mreq, optval_addr, optlen);
2240             ret = get_errno(setsockopt(sockfd, level, optname, ip_mreq, optlen));
2241             break;
2242 
2243         case IP_BLOCK_SOURCE:
2244         case IP_UNBLOCK_SOURCE:
2245         case IP_ADD_SOURCE_MEMBERSHIP:
2246         case IP_DROP_SOURCE_MEMBERSHIP:
2247             if (optlen != sizeof (struct target_ip_mreq_source))
2248                 return -TARGET_EINVAL;
2249 
2250             ip_mreq_source = lock_user(VERIFY_READ, optval_addr, optlen, 1);
2251             ret = get_errno(setsockopt(sockfd, level, optname, ip_mreq_source, optlen));
2252             unlock_user (ip_mreq_source, optval_addr, 0);
2253             break;
2254 
2255         default:
2256             goto unimplemented;
2257         }
2258         break;
2259     case SOL_IPV6:
2260         switch (optname) {
2261         case IPV6_MTU_DISCOVER:
2262         case IPV6_MTU:
2263         case IPV6_V6ONLY:
2264         case IPV6_RECVPKTINFO:
2265         case IPV6_UNICAST_HOPS:
2266         case IPV6_MULTICAST_HOPS:
2267         case IPV6_MULTICAST_LOOP:
2268         case IPV6_RECVERR:
2269         case IPV6_RECVHOPLIMIT:
2270         case IPV6_2292HOPLIMIT:
2271         case IPV6_CHECKSUM:
2272         case IPV6_ADDRFORM:
2273         case IPV6_2292PKTINFO:
2274         case IPV6_RECVTCLASS:
2275         case IPV6_RECVRTHDR:
2276         case IPV6_2292RTHDR:
2277         case IPV6_RECVHOPOPTS:
2278         case IPV6_2292HOPOPTS:
2279         case IPV6_RECVDSTOPTS:
2280         case IPV6_2292DSTOPTS:
2281         case IPV6_TCLASS:
2282         case IPV6_ADDR_PREFERENCES:
2283 #ifdef IPV6_RECVPATHMTU
2284         case IPV6_RECVPATHMTU:
2285 #endif
2286 #ifdef IPV6_TRANSPARENT
2287         case IPV6_TRANSPARENT:
2288 #endif
2289 #ifdef IPV6_FREEBIND
2290         case IPV6_FREEBIND:
2291 #endif
2292 #ifdef IPV6_RECVORIGDSTADDR
2293         case IPV6_RECVORIGDSTADDR:
2294 #endif
2295             val = 0;
2296             if (optlen < sizeof(uint32_t)) {
2297                 return -TARGET_EINVAL;
2298             }
2299             if (get_user_u32(val, optval_addr)) {
2300                 return -TARGET_EFAULT;
2301             }
2302             ret = get_errno(setsockopt(sockfd, level, optname,
2303                                        &val, sizeof(val)));
2304             break;
2305         case IPV6_PKTINFO:
2306         {
2307             struct in6_pktinfo pki;
2308 
2309             if (optlen < sizeof(pki)) {
2310                 return -TARGET_EINVAL;
2311             }
2312 
2313             if (copy_from_user(&pki, optval_addr, sizeof(pki))) {
2314                 return -TARGET_EFAULT;
2315             }
2316 
2317             pki.ipi6_ifindex = tswap32(pki.ipi6_ifindex);
2318 
2319             ret = get_errno(setsockopt(sockfd, level, optname,
2320                                        &pki, sizeof(pki)));
2321             break;
2322         }
2323         case IPV6_ADD_MEMBERSHIP:
2324         case IPV6_DROP_MEMBERSHIP:
2325         {
2326             struct ipv6_mreq ipv6mreq;
2327 
2328             if (optlen < sizeof(ipv6mreq)) {
2329                 return -TARGET_EINVAL;
2330             }
2331 
2332             if (copy_from_user(&ipv6mreq, optval_addr, sizeof(ipv6mreq))) {
2333                 return -TARGET_EFAULT;
2334             }
2335 
2336             ipv6mreq.ipv6mr_interface = tswap32(ipv6mreq.ipv6mr_interface);
2337 
2338             ret = get_errno(setsockopt(sockfd, level, optname,
2339                                        &ipv6mreq, sizeof(ipv6mreq)));
2340             break;
2341         }
2342         default:
2343             goto unimplemented;
2344         }
2345         break;
2346     case SOL_ICMPV6:
2347         switch (optname) {
2348         case ICMPV6_FILTER:
2349         {
2350             struct icmp6_filter icmp6f;
2351 
2352             if (optlen > sizeof(icmp6f)) {
2353                 optlen = sizeof(icmp6f);
2354             }
2355 
2356             if (copy_from_user(&icmp6f, optval_addr, optlen)) {
2357                 return -TARGET_EFAULT;
2358             }
2359 
2360             for (val = 0; val < 8; val++) {
2361                 icmp6f.data[val] = tswap32(icmp6f.data[val]);
2362             }
2363 
2364             ret = get_errno(setsockopt(sockfd, level, optname,
2365                                        &icmp6f, optlen));
2366             break;
2367         }
2368         default:
2369             goto unimplemented;
2370         }
2371         break;
2372     case SOL_RAW:
2373         switch (optname) {
2374         case ICMP_FILTER:
2375         case IPV6_CHECKSUM:
2376             /* those take an u32 value */
2377             if (optlen < sizeof(uint32_t)) {
2378                 return -TARGET_EINVAL;
2379             }
2380 
2381             if (get_user_u32(val, optval_addr)) {
2382                 return -TARGET_EFAULT;
2383             }
2384             ret = get_errno(setsockopt(sockfd, level, optname,
2385                                        &val, sizeof(val)));
2386             break;
2387 
2388         default:
2389             goto unimplemented;
2390         }
2391         break;
2392 #if defined(SOL_ALG) && defined(ALG_SET_KEY) && defined(ALG_SET_AEAD_AUTHSIZE)
2393     case SOL_ALG:
2394         switch (optname) {
2395         case ALG_SET_KEY:
2396         {
2397             char *alg_key = g_malloc(optlen);
2398 
2399             if (!alg_key) {
2400                 return -TARGET_ENOMEM;
2401             }
2402             if (copy_from_user(alg_key, optval_addr, optlen)) {
2403                 g_free(alg_key);
2404                 return -TARGET_EFAULT;
2405             }
2406             ret = get_errno(setsockopt(sockfd, level, optname,
2407                                        alg_key, optlen));
2408             g_free(alg_key);
2409             break;
2410         }
2411         case ALG_SET_AEAD_AUTHSIZE:
2412         {
2413             ret = get_errno(setsockopt(sockfd, level, optname,
2414                                        NULL, optlen));
2415             break;
2416         }
2417         default:
2418             goto unimplemented;
2419         }
2420         break;
2421 #endif
2422     case TARGET_SOL_SOCKET:
2423         switch (optname) {
2424         case TARGET_SO_RCVTIMEO:
2425         {
2426                 struct timeval tv;
2427 
2428                 optname = SO_RCVTIMEO;
2429 
2430 set_timeout:
2431                 if (optlen != sizeof(struct target_timeval)) {
2432                     return -TARGET_EINVAL;
2433                 }
2434 
2435                 if (copy_from_user_timeval(&tv, optval_addr)) {
2436                     return -TARGET_EFAULT;
2437                 }
2438 
2439                 ret = get_errno(setsockopt(sockfd, SOL_SOCKET, optname,
2440                                 &tv, sizeof(tv)));
2441                 return ret;
2442         }
2443         case TARGET_SO_SNDTIMEO:
2444                 optname = SO_SNDTIMEO;
2445                 goto set_timeout;
2446         case TARGET_SO_ATTACH_FILTER:
2447         {
2448                 struct target_sock_fprog *tfprog;
2449                 struct target_sock_filter *tfilter;
2450                 struct sock_fprog fprog;
2451                 struct sock_filter *filter;
2452                 int i;
2453 
2454                 if (optlen != sizeof(*tfprog)) {
2455                     return -TARGET_EINVAL;
2456                 }
2457                 if (!lock_user_struct(VERIFY_READ, tfprog, optval_addr, 0)) {
2458                     return -TARGET_EFAULT;
2459                 }
2460                 if (!lock_user_struct(VERIFY_READ, tfilter,
2461                                       tswapal(tfprog->filter), 0)) {
2462                     unlock_user_struct(tfprog, optval_addr, 1);
2463                     return -TARGET_EFAULT;
2464                 }
2465 
2466                 fprog.len = tswap16(tfprog->len);
2467                 filter = g_try_new(struct sock_filter, fprog.len);
2468                 if (filter == NULL) {
2469                     unlock_user_struct(tfilter, tfprog->filter, 1);
2470                     unlock_user_struct(tfprog, optval_addr, 1);
2471                     return -TARGET_ENOMEM;
2472                 }
2473                 for (i = 0; i < fprog.len; i++) {
2474                     filter[i].code = tswap16(tfilter[i].code);
2475                     filter[i].jt = tfilter[i].jt;
2476                     filter[i].jf = tfilter[i].jf;
2477                     filter[i].k = tswap32(tfilter[i].k);
2478                 }
2479                 fprog.filter = filter;
2480 
2481                 ret = get_errno(setsockopt(sockfd, SOL_SOCKET,
2482                                 SO_ATTACH_FILTER, &fprog, sizeof(fprog)));
2483                 g_free(filter);
2484 
2485                 unlock_user_struct(tfilter, tfprog->filter, 1);
2486                 unlock_user_struct(tfprog, optval_addr, 1);
2487                 return ret;
2488         }
2489 	case TARGET_SO_BINDTODEVICE:
2490 	{
2491 		char *dev_ifname, *addr_ifname;
2492 
2493 		if (optlen > IFNAMSIZ - 1) {
2494 		    optlen = IFNAMSIZ - 1;
2495 		}
2496 		dev_ifname = lock_user(VERIFY_READ, optval_addr, optlen, 1);
2497 		if (!dev_ifname) {
2498 		    return -TARGET_EFAULT;
2499 		}
2500 		optname = SO_BINDTODEVICE;
2501 		addr_ifname = alloca(IFNAMSIZ);
2502 		memcpy(addr_ifname, dev_ifname, optlen);
2503 		addr_ifname[optlen] = 0;
2504 		ret = get_errno(setsockopt(sockfd, SOL_SOCKET, optname,
2505                                            addr_ifname, optlen));
2506 		unlock_user (dev_ifname, optval_addr, 0);
2507 		return ret;
2508 	}
2509         case TARGET_SO_LINGER:
2510         {
2511                 struct linger lg;
2512                 struct target_linger *tlg;
2513 
2514                 if (optlen != sizeof(struct target_linger)) {
2515                     return -TARGET_EINVAL;
2516                 }
2517                 if (!lock_user_struct(VERIFY_READ, tlg, optval_addr, 1)) {
2518                     return -TARGET_EFAULT;
2519                 }
2520                 __get_user(lg.l_onoff, &tlg->l_onoff);
2521                 __get_user(lg.l_linger, &tlg->l_linger);
2522                 ret = get_errno(setsockopt(sockfd, SOL_SOCKET, SO_LINGER,
2523                                 &lg, sizeof(lg)));
2524                 unlock_user_struct(tlg, optval_addr, 0);
2525                 return ret;
2526         }
2527             /* Options with 'int' argument.  */
2528         case TARGET_SO_DEBUG:
2529 		optname = SO_DEBUG;
2530 		break;
2531         case TARGET_SO_REUSEADDR:
2532 		optname = SO_REUSEADDR;
2533 		break;
2534 #ifdef SO_REUSEPORT
2535         case TARGET_SO_REUSEPORT:
2536                 optname = SO_REUSEPORT;
2537                 break;
2538 #endif
2539         case TARGET_SO_TYPE:
2540 		optname = SO_TYPE;
2541 		break;
2542         case TARGET_SO_ERROR:
2543 		optname = SO_ERROR;
2544 		break;
2545         case TARGET_SO_DONTROUTE:
2546 		optname = SO_DONTROUTE;
2547 		break;
2548         case TARGET_SO_BROADCAST:
2549 		optname = SO_BROADCAST;
2550 		break;
2551         case TARGET_SO_SNDBUF:
2552 		optname = SO_SNDBUF;
2553 		break;
2554         case TARGET_SO_SNDBUFFORCE:
2555                 optname = SO_SNDBUFFORCE;
2556                 break;
2557         case TARGET_SO_RCVBUF:
2558 		optname = SO_RCVBUF;
2559 		break;
2560         case TARGET_SO_RCVBUFFORCE:
2561                 optname = SO_RCVBUFFORCE;
2562                 break;
2563         case TARGET_SO_KEEPALIVE:
2564 		optname = SO_KEEPALIVE;
2565 		break;
2566         case TARGET_SO_OOBINLINE:
2567 		optname = SO_OOBINLINE;
2568 		break;
2569         case TARGET_SO_NO_CHECK:
2570 		optname = SO_NO_CHECK;
2571 		break;
2572         case TARGET_SO_PRIORITY:
2573 		optname = SO_PRIORITY;
2574 		break;
2575 #ifdef SO_BSDCOMPAT
2576         case TARGET_SO_BSDCOMPAT:
2577 		optname = SO_BSDCOMPAT;
2578 		break;
2579 #endif
2580         case TARGET_SO_PASSCRED:
2581 		optname = SO_PASSCRED;
2582 		break;
2583         case TARGET_SO_PASSSEC:
2584                 optname = SO_PASSSEC;
2585                 break;
2586         case TARGET_SO_TIMESTAMP:
2587 		optname = SO_TIMESTAMP;
2588 		break;
2589         case TARGET_SO_RCVLOWAT:
2590 		optname = SO_RCVLOWAT;
2591 		break;
2592         default:
2593             goto unimplemented;
2594         }
2595 	if (optlen < sizeof(uint32_t))
2596             return -TARGET_EINVAL;
2597 
2598 	if (get_user_u32(val, optval_addr))
2599             return -TARGET_EFAULT;
2600 	ret = get_errno(setsockopt(sockfd, SOL_SOCKET, optname, &val, sizeof(val)));
2601         break;
2602 #ifdef SOL_NETLINK
2603     case SOL_NETLINK:
2604         switch (optname) {
2605         case NETLINK_PKTINFO:
2606         case NETLINK_ADD_MEMBERSHIP:
2607         case NETLINK_DROP_MEMBERSHIP:
2608         case NETLINK_BROADCAST_ERROR:
2609         case NETLINK_NO_ENOBUFS:
2610 #if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 2, 0)
2611         case NETLINK_LISTEN_ALL_NSID:
2612         case NETLINK_CAP_ACK:
2613 #endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(4, 2, 0) */
2614 #if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 12, 0)
2615         case NETLINK_EXT_ACK:
2616 #endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(4, 12, 0) */
2617 #if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 20, 0)
2618         case NETLINK_GET_STRICT_CHK:
2619 #endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(4, 12, 0) */
2620             break;
2621         default:
2622             goto unimplemented;
2623         }
2624         val = 0;
2625         if (optlen < sizeof(uint32_t)) {
2626             return -TARGET_EINVAL;
2627         }
2628         if (get_user_u32(val, optval_addr)) {
2629             return -TARGET_EFAULT;
2630         }
2631         ret = get_errno(setsockopt(sockfd, SOL_NETLINK, optname, &val,
2632                                    sizeof(val)));
2633         break;
2634 #endif /* SOL_NETLINK */
2635     default:
2636     unimplemented:
2637         qemu_log_mask(LOG_UNIMP, "Unsupported setsockopt level=%d optname=%d\n",
2638                       level, optname);
2639         ret = -TARGET_ENOPROTOOPT;
2640     }
2641     return ret;
2642 }
2643 
2644 /* do_getsockopt() Must return target values and target errnos. */
2645 static abi_long do_getsockopt(int sockfd, int level, int optname,
2646                               abi_ulong optval_addr, abi_ulong optlen)
2647 {
2648     abi_long ret;
2649     int len, val;
2650     socklen_t lv;
2651 
2652     switch(level) {
2653     case TARGET_SOL_SOCKET:
2654         level = SOL_SOCKET;
2655         switch (optname) {
2656         /* These don't just return a single integer */
2657         case TARGET_SO_PEERNAME:
2658             goto unimplemented;
2659         case TARGET_SO_RCVTIMEO: {
2660             struct timeval tv;
2661             socklen_t tvlen;
2662 
2663             optname = SO_RCVTIMEO;
2664 
2665 get_timeout:
2666             if (get_user_u32(len, optlen)) {
2667                 return -TARGET_EFAULT;
2668             }
2669             if (len < 0) {
2670                 return -TARGET_EINVAL;
2671             }
2672 
2673             tvlen = sizeof(tv);
2674             ret = get_errno(getsockopt(sockfd, level, optname,
2675                                        &tv, &tvlen));
2676             if (ret < 0) {
2677                 return ret;
2678             }
2679             if (len > sizeof(struct target_timeval)) {
2680                 len = sizeof(struct target_timeval);
2681             }
2682             if (copy_to_user_timeval(optval_addr, &tv)) {
2683                 return -TARGET_EFAULT;
2684             }
2685             if (put_user_u32(len, optlen)) {
2686                 return -TARGET_EFAULT;
2687             }
2688             break;
2689         }
2690         case TARGET_SO_SNDTIMEO:
2691             optname = SO_SNDTIMEO;
2692             goto get_timeout;
2693         case TARGET_SO_PEERCRED: {
2694             struct ucred cr;
2695             socklen_t crlen;
2696             struct target_ucred *tcr;
2697 
2698             if (get_user_u32(len, optlen)) {
2699                 return -TARGET_EFAULT;
2700             }
2701             if (len < 0) {
2702                 return -TARGET_EINVAL;
2703             }
2704 
2705             crlen = sizeof(cr);
2706             ret = get_errno(getsockopt(sockfd, level, SO_PEERCRED,
2707                                        &cr, &crlen));
2708             if (ret < 0) {
2709                 return ret;
2710             }
2711             if (len > crlen) {
2712                 len = crlen;
2713             }
2714             if (!lock_user_struct(VERIFY_WRITE, tcr, optval_addr, 0)) {
2715                 return -TARGET_EFAULT;
2716             }
2717             __put_user(cr.pid, &tcr->pid);
2718             __put_user(cr.uid, &tcr->uid);
2719             __put_user(cr.gid, &tcr->gid);
2720             unlock_user_struct(tcr, optval_addr, 1);
2721             if (put_user_u32(len, optlen)) {
2722                 return -TARGET_EFAULT;
2723             }
2724             break;
2725         }
2726         case TARGET_SO_PEERSEC: {
2727             char *name;
2728 
2729             if (get_user_u32(len, optlen)) {
2730                 return -TARGET_EFAULT;
2731             }
2732             if (len < 0) {
2733                 return -TARGET_EINVAL;
2734             }
2735             name = lock_user(VERIFY_WRITE, optval_addr, len, 0);
2736             if (!name) {
2737                 return -TARGET_EFAULT;
2738             }
2739             lv = len;
2740             ret = get_errno(getsockopt(sockfd, level, SO_PEERSEC,
2741                                        name, &lv));
2742             if (put_user_u32(lv, optlen)) {
2743                 ret = -TARGET_EFAULT;
2744             }
2745             unlock_user(name, optval_addr, lv);
2746             break;
2747         }
2748         case TARGET_SO_LINGER:
2749         {
2750             struct linger lg;
2751             socklen_t lglen;
2752             struct target_linger *tlg;
2753 
2754             if (get_user_u32(len, optlen)) {
2755                 return -TARGET_EFAULT;
2756             }
2757             if (len < 0) {
2758                 return -TARGET_EINVAL;
2759             }
2760 
2761             lglen = sizeof(lg);
2762             ret = get_errno(getsockopt(sockfd, level, SO_LINGER,
2763                                        &lg, &lglen));
2764             if (ret < 0) {
2765                 return ret;
2766             }
2767             if (len > lglen) {
2768                 len = lglen;
2769             }
2770             if (!lock_user_struct(VERIFY_WRITE, tlg, optval_addr, 0)) {
2771                 return -TARGET_EFAULT;
2772             }
2773             __put_user(lg.l_onoff, &tlg->l_onoff);
2774             __put_user(lg.l_linger, &tlg->l_linger);
2775             unlock_user_struct(tlg, optval_addr, 1);
2776             if (put_user_u32(len, optlen)) {
2777                 return -TARGET_EFAULT;
2778             }
2779             break;
2780         }
2781         /* Options with 'int' argument.  */
2782         case TARGET_SO_DEBUG:
2783             optname = SO_DEBUG;
2784             goto int_case;
2785         case TARGET_SO_REUSEADDR:
2786             optname = SO_REUSEADDR;
2787             goto int_case;
2788 #ifdef SO_REUSEPORT
2789         case TARGET_SO_REUSEPORT:
2790             optname = SO_REUSEPORT;
2791             goto int_case;
2792 #endif
2793         case TARGET_SO_TYPE:
2794             optname = SO_TYPE;
2795             goto int_case;
2796         case TARGET_SO_ERROR:
2797             optname = SO_ERROR;
2798             goto int_case;
2799         case TARGET_SO_DONTROUTE:
2800             optname = SO_DONTROUTE;
2801             goto int_case;
2802         case TARGET_SO_BROADCAST:
2803             optname = SO_BROADCAST;
2804             goto int_case;
2805         case TARGET_SO_SNDBUF:
2806             optname = SO_SNDBUF;
2807             goto int_case;
2808         case TARGET_SO_RCVBUF:
2809             optname = SO_RCVBUF;
2810             goto int_case;
2811         case TARGET_SO_KEEPALIVE:
2812             optname = SO_KEEPALIVE;
2813             goto int_case;
2814         case TARGET_SO_OOBINLINE:
2815             optname = SO_OOBINLINE;
2816             goto int_case;
2817         case TARGET_SO_NO_CHECK:
2818             optname = SO_NO_CHECK;
2819             goto int_case;
2820         case TARGET_SO_PRIORITY:
2821             optname = SO_PRIORITY;
2822             goto int_case;
2823 #ifdef SO_BSDCOMPAT
2824         case TARGET_SO_BSDCOMPAT:
2825             optname = SO_BSDCOMPAT;
2826             goto int_case;
2827 #endif
2828         case TARGET_SO_PASSCRED:
2829             optname = SO_PASSCRED;
2830             goto int_case;
2831         case TARGET_SO_TIMESTAMP:
2832             optname = SO_TIMESTAMP;
2833             goto int_case;
2834         case TARGET_SO_RCVLOWAT:
2835             optname = SO_RCVLOWAT;
2836             goto int_case;
2837         case TARGET_SO_ACCEPTCONN:
2838             optname = SO_ACCEPTCONN;
2839             goto int_case;
2840         default:
2841             goto int_case;
2842         }
2843         break;
2844     case SOL_TCP:
2845     case SOL_UDP:
2846         /* TCP and UDP options all take an 'int' value.  */
2847     int_case:
2848         if (get_user_u32(len, optlen))
2849             return -TARGET_EFAULT;
2850         if (len < 0)
2851             return -TARGET_EINVAL;
2852         lv = sizeof(lv);
2853         ret = get_errno(getsockopt(sockfd, level, optname, &val, &lv));
2854         if (ret < 0)
2855             return ret;
2856         if (optname == SO_TYPE) {
2857             val = host_to_target_sock_type(val);
2858         }
2859         if (len > lv)
2860             len = lv;
2861         if (len == 4) {
2862             if (put_user_u32(val, optval_addr))
2863                 return -TARGET_EFAULT;
2864         } else {
2865             if (put_user_u8(val, optval_addr))
2866                 return -TARGET_EFAULT;
2867         }
2868         if (put_user_u32(len, optlen))
2869             return -TARGET_EFAULT;
2870         break;
2871     case SOL_IP:
2872         switch(optname) {
2873         case IP_TOS:
2874         case IP_TTL:
2875         case IP_HDRINCL:
2876         case IP_ROUTER_ALERT:
2877         case IP_RECVOPTS:
2878         case IP_RETOPTS:
2879         case IP_PKTINFO:
2880         case IP_MTU_DISCOVER:
2881         case IP_RECVERR:
2882         case IP_RECVTOS:
2883 #ifdef IP_FREEBIND
2884         case IP_FREEBIND:
2885 #endif
2886         case IP_MULTICAST_TTL:
2887         case IP_MULTICAST_LOOP:
2888             if (get_user_u32(len, optlen))
2889                 return -TARGET_EFAULT;
2890             if (len < 0)
2891                 return -TARGET_EINVAL;
2892             lv = sizeof(lv);
2893             ret = get_errno(getsockopt(sockfd, level, optname, &val, &lv));
2894             if (ret < 0)
2895                 return ret;
2896             if (len < sizeof(int) && len > 0 && val >= 0 && val < 255) {
2897                 len = 1;
2898                 if (put_user_u32(len, optlen)
2899                     || put_user_u8(val, optval_addr))
2900                     return -TARGET_EFAULT;
2901             } else {
2902                 if (len > sizeof(int))
2903                     len = sizeof(int);
2904                 if (put_user_u32(len, optlen)
2905                     || put_user_u32(val, optval_addr))
2906                     return -TARGET_EFAULT;
2907             }
2908             break;
2909         default:
2910             ret = -TARGET_ENOPROTOOPT;
2911             break;
2912         }
2913         break;
2914     case SOL_IPV6:
2915         switch (optname) {
2916         case IPV6_MTU_DISCOVER:
2917         case IPV6_MTU:
2918         case IPV6_V6ONLY:
2919         case IPV6_RECVPKTINFO:
2920         case IPV6_UNICAST_HOPS:
2921         case IPV6_MULTICAST_HOPS:
2922         case IPV6_MULTICAST_LOOP:
2923         case IPV6_RECVERR:
2924         case IPV6_RECVHOPLIMIT:
2925         case IPV6_2292HOPLIMIT:
2926         case IPV6_CHECKSUM:
2927         case IPV6_ADDRFORM:
2928         case IPV6_2292PKTINFO:
2929         case IPV6_RECVTCLASS:
2930         case IPV6_RECVRTHDR:
2931         case IPV6_2292RTHDR:
2932         case IPV6_RECVHOPOPTS:
2933         case IPV6_2292HOPOPTS:
2934         case IPV6_RECVDSTOPTS:
2935         case IPV6_2292DSTOPTS:
2936         case IPV6_TCLASS:
2937         case IPV6_ADDR_PREFERENCES:
2938 #ifdef IPV6_RECVPATHMTU
2939         case IPV6_RECVPATHMTU:
2940 #endif
2941 #ifdef IPV6_TRANSPARENT
2942         case IPV6_TRANSPARENT:
2943 #endif
2944 #ifdef IPV6_FREEBIND
2945         case IPV6_FREEBIND:
2946 #endif
2947 #ifdef IPV6_RECVORIGDSTADDR
2948         case IPV6_RECVORIGDSTADDR:
2949 #endif
2950             if (get_user_u32(len, optlen))
2951                 return -TARGET_EFAULT;
2952             if (len < 0)
2953                 return -TARGET_EINVAL;
2954             lv = sizeof(lv);
2955             ret = get_errno(getsockopt(sockfd, level, optname, &val, &lv));
2956             if (ret < 0)
2957                 return ret;
2958             if (len < sizeof(int) && len > 0 && val >= 0 && val < 255) {
2959                 len = 1;
2960                 if (put_user_u32(len, optlen)
2961                     || put_user_u8(val, optval_addr))
2962                     return -TARGET_EFAULT;
2963             } else {
2964                 if (len > sizeof(int))
2965                     len = sizeof(int);
2966                 if (put_user_u32(len, optlen)
2967                     || put_user_u32(val, optval_addr))
2968                     return -TARGET_EFAULT;
2969             }
2970             break;
2971         default:
2972             ret = -TARGET_ENOPROTOOPT;
2973             break;
2974         }
2975         break;
2976 #ifdef SOL_NETLINK
2977     case SOL_NETLINK:
2978         switch (optname) {
2979         case NETLINK_PKTINFO:
2980         case NETLINK_BROADCAST_ERROR:
2981         case NETLINK_NO_ENOBUFS:
2982 #if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 2, 0)
2983         case NETLINK_LISTEN_ALL_NSID:
2984         case NETLINK_CAP_ACK:
2985 #endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(4, 2, 0) */
2986 #if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 12, 0)
2987         case NETLINK_EXT_ACK:
2988 #endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(4, 12, 0) */
2989 #if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 20, 0)
2990         case NETLINK_GET_STRICT_CHK:
2991 #endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(4, 12, 0) */
2992             if (get_user_u32(len, optlen)) {
2993                 return -TARGET_EFAULT;
2994             }
2995             if (len != sizeof(val)) {
2996                 return -TARGET_EINVAL;
2997             }
2998             lv = len;
2999             ret = get_errno(getsockopt(sockfd, level, optname, &val, &lv));
3000             if (ret < 0) {
3001                 return ret;
3002             }
3003             if (put_user_u32(lv, optlen)
3004                 || put_user_u32(val, optval_addr)) {
3005                 return -TARGET_EFAULT;
3006             }
3007             break;
3008 #if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 2, 0)
3009         case NETLINK_LIST_MEMBERSHIPS:
3010         {
3011             uint32_t *results;
3012             int i;
3013             if (get_user_u32(len, optlen)) {
3014                 return -TARGET_EFAULT;
3015             }
3016             if (len < 0) {
3017                 return -TARGET_EINVAL;
3018             }
3019             results = lock_user(VERIFY_WRITE, optval_addr, len, 1);
3020             if (!results) {
3021                 return -TARGET_EFAULT;
3022             }
3023             lv = len;
3024             ret = get_errno(getsockopt(sockfd, level, optname, results, &lv));
3025             if (ret < 0) {
3026                 unlock_user(results, optval_addr, 0);
3027                 return ret;
3028             }
3029             /* swap host endianess to target endianess. */
3030             for (i = 0; i < (len / sizeof(uint32_t)); i++) {
3031                 results[i] = tswap32(results[i]);
3032             }
3033             if (put_user_u32(lv, optlen)) {
3034                 return -TARGET_EFAULT;
3035             }
3036             unlock_user(results, optval_addr, 0);
3037             break;
3038         }
3039 #endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(4, 2, 0) */
3040         default:
3041             goto unimplemented;
3042         }
3043         break;
3044 #endif /* SOL_NETLINK */
3045     default:
3046     unimplemented:
3047         qemu_log_mask(LOG_UNIMP,
3048                       "getsockopt level=%d optname=%d not yet supported\n",
3049                       level, optname);
3050         ret = -TARGET_EOPNOTSUPP;
3051         break;
3052     }
3053     return ret;
3054 }
3055 
3056 /* Convert target low/high pair representing file offset into the host
3057  * low/high pair. This function doesn't handle offsets bigger than 64 bits
3058  * as the kernel doesn't handle them either.
3059  */
3060 static void target_to_host_low_high(abi_ulong tlow,
3061                                     abi_ulong thigh,
3062                                     unsigned long *hlow,
3063                                     unsigned long *hhigh)
3064 {
3065     uint64_t off = tlow |
3066         ((unsigned long long)thigh << TARGET_LONG_BITS / 2) <<
3067         TARGET_LONG_BITS / 2;
3068 
3069     *hlow = off;
3070     *hhigh = (off >> HOST_LONG_BITS / 2) >> HOST_LONG_BITS / 2;
3071 }
3072 
3073 static struct iovec *lock_iovec(int type, abi_ulong target_addr,
3074                                 abi_ulong count, int copy)
3075 {
3076     struct target_iovec *target_vec;
3077     struct iovec *vec;
3078     abi_ulong total_len, max_len;
3079     int i;
3080     int err = 0;
3081     bool bad_address = false;
3082 
3083     if (count == 0) {
3084         errno = 0;
3085         return NULL;
3086     }
3087     if (count > IOV_MAX) {
3088         errno = EINVAL;
3089         return NULL;
3090     }
3091 
3092     vec = g_try_new0(struct iovec, count);
3093     if (vec == NULL) {
3094         errno = ENOMEM;
3095         return NULL;
3096     }
3097 
3098     target_vec = lock_user(VERIFY_READ, target_addr,
3099                            count * sizeof(struct target_iovec), 1);
3100     if (target_vec == NULL) {
3101         err = EFAULT;
3102         goto fail2;
3103     }
3104 
3105     /* ??? If host page size > target page size, this will result in a
3106        value larger than what we can actually support.  */
3107     max_len = 0x7fffffff & TARGET_PAGE_MASK;
3108     total_len = 0;
3109 
3110     for (i = 0; i < count; i++) {
3111         abi_ulong base = tswapal(target_vec[i].iov_base);
3112         abi_long len = tswapal(target_vec[i].iov_len);
3113 
3114         if (len < 0) {
3115             err = EINVAL;
3116             goto fail;
3117         } else if (len == 0) {
3118             /* Zero length pointer is ignored.  */
3119             vec[i].iov_base = 0;
3120         } else {
3121             vec[i].iov_base = lock_user(type, base, len, copy);
3122             /* If the first buffer pointer is bad, this is a fault.  But
3123              * subsequent bad buffers will result in a partial write; this
3124              * is realized by filling the vector with null pointers and
3125              * zero lengths. */
3126             if (!vec[i].iov_base) {
3127                 if (i == 0) {
3128                     err = EFAULT;
3129                     goto fail;
3130                 } else {
3131                     bad_address = true;
3132                 }
3133             }
3134             if (bad_address) {
3135                 len = 0;
3136             }
3137             if (len > max_len - total_len) {
3138                 len = max_len - total_len;
3139             }
3140         }
3141         vec[i].iov_len = len;
3142         total_len += len;
3143     }
3144 
3145     unlock_user(target_vec, target_addr, 0);
3146     return vec;
3147 
3148  fail:
3149     while (--i >= 0) {
3150         if (tswapal(target_vec[i].iov_len) > 0) {
3151             unlock_user(vec[i].iov_base, tswapal(target_vec[i].iov_base), 0);
3152         }
3153     }
3154     unlock_user(target_vec, target_addr, 0);
3155  fail2:
3156     g_free(vec);
3157     errno = err;
3158     return NULL;
3159 }
3160 
3161 static void unlock_iovec(struct iovec *vec, abi_ulong target_addr,
3162                          abi_ulong count, int copy)
3163 {
3164     struct target_iovec *target_vec;
3165     int i;
3166 
3167     target_vec = lock_user(VERIFY_READ, target_addr,
3168                            count * sizeof(struct target_iovec), 1);
3169     if (target_vec) {
3170         for (i = 0; i < count; i++) {
3171             abi_ulong base = tswapal(target_vec[i].iov_base);
3172             abi_long len = tswapal(target_vec[i].iov_len);
3173             if (len < 0) {
3174                 break;
3175             }
3176             unlock_user(vec[i].iov_base, base, copy ? vec[i].iov_len : 0);
3177         }
3178         unlock_user(target_vec, target_addr, 0);
3179     }
3180 
3181     g_free(vec);
3182 }
3183 
3184 static inline int target_to_host_sock_type(int *type)
3185 {
3186     int host_type = 0;
3187     int target_type = *type;
3188 
3189     switch (target_type & TARGET_SOCK_TYPE_MASK) {
3190     case TARGET_SOCK_DGRAM:
3191         host_type = SOCK_DGRAM;
3192         break;
3193     case TARGET_SOCK_STREAM:
3194         host_type = SOCK_STREAM;
3195         break;
3196     default:
3197         host_type = target_type & TARGET_SOCK_TYPE_MASK;
3198         break;
3199     }
3200     if (target_type & TARGET_SOCK_CLOEXEC) {
3201 #if defined(SOCK_CLOEXEC)
3202         host_type |= SOCK_CLOEXEC;
3203 #else
3204         return -TARGET_EINVAL;
3205 #endif
3206     }
3207     if (target_type & TARGET_SOCK_NONBLOCK) {
3208 #if defined(SOCK_NONBLOCK)
3209         host_type |= SOCK_NONBLOCK;
3210 #elif !defined(O_NONBLOCK)
3211         return -TARGET_EINVAL;
3212 #endif
3213     }
3214     *type = host_type;
3215     return 0;
3216 }
3217 
3218 /* Try to emulate socket type flags after socket creation.  */
3219 static int sock_flags_fixup(int fd, int target_type)
3220 {
3221 #if !defined(SOCK_NONBLOCK) && defined(O_NONBLOCK)
3222     if (target_type & TARGET_SOCK_NONBLOCK) {
3223         int flags = fcntl(fd, F_GETFL);
3224         if (fcntl(fd, F_SETFL, O_NONBLOCK | flags) == -1) {
3225             close(fd);
3226             return -TARGET_EINVAL;
3227         }
3228     }
3229 #endif
3230     return fd;
3231 }
3232 
3233 /* do_socket() Must return target values and target errnos. */
3234 static abi_long do_socket(int domain, int type, int protocol)
3235 {
3236     int target_type = type;
3237     int ret;
3238 
3239     ret = target_to_host_sock_type(&type);
3240     if (ret) {
3241         return ret;
3242     }
3243 
3244     if (domain == PF_NETLINK && !(
3245 #ifdef CONFIG_RTNETLINK
3246          protocol == NETLINK_ROUTE ||
3247 #endif
3248          protocol == NETLINK_KOBJECT_UEVENT ||
3249          protocol == NETLINK_AUDIT)) {
3250         return -TARGET_EPROTONOSUPPORT;
3251     }
3252 
3253     if (domain == AF_PACKET ||
3254         (domain == AF_INET && type == SOCK_PACKET)) {
3255         protocol = tswap16(protocol);
3256     }
3257 
3258     ret = get_errno(socket(domain, type, protocol));
3259     if (ret >= 0) {
3260         ret = sock_flags_fixup(ret, target_type);
3261         if (type == SOCK_PACKET) {
3262             /* Manage an obsolete case :
3263              * if socket type is SOCK_PACKET, bind by name
3264              */
3265             fd_trans_register(ret, &target_packet_trans);
3266         } else if (domain == PF_NETLINK) {
3267             switch (protocol) {
3268 #ifdef CONFIG_RTNETLINK
3269             case NETLINK_ROUTE:
3270                 fd_trans_register(ret, &target_netlink_route_trans);
3271                 break;
3272 #endif
3273             case NETLINK_KOBJECT_UEVENT:
3274                 /* nothing to do: messages are strings */
3275                 break;
3276             case NETLINK_AUDIT:
3277                 fd_trans_register(ret, &target_netlink_audit_trans);
3278                 break;
3279             default:
3280                 g_assert_not_reached();
3281             }
3282         }
3283     }
3284     return ret;
3285 }
3286 
3287 /* do_bind() Must return target values and target errnos. */
3288 static abi_long do_bind(int sockfd, abi_ulong target_addr,
3289                         socklen_t addrlen)
3290 {
3291     void *addr;
3292     abi_long ret;
3293 
3294     if ((int)addrlen < 0) {
3295         return -TARGET_EINVAL;
3296     }
3297 
3298     addr = alloca(addrlen+1);
3299 
3300     ret = target_to_host_sockaddr(sockfd, addr, target_addr, addrlen);
3301     if (ret)
3302         return ret;
3303 
3304     return get_errno(bind(sockfd, addr, addrlen));
3305 }
3306 
3307 /* do_connect() Must return target values and target errnos. */
3308 static abi_long do_connect(int sockfd, abi_ulong target_addr,
3309                            socklen_t addrlen)
3310 {
3311     void *addr;
3312     abi_long ret;
3313 
3314     if ((int)addrlen < 0) {
3315         return -TARGET_EINVAL;
3316     }
3317 
3318     addr = alloca(addrlen+1);
3319 
3320     ret = target_to_host_sockaddr(sockfd, addr, target_addr, addrlen);
3321     if (ret)
3322         return ret;
3323 
3324     return get_errno(safe_connect(sockfd, addr, addrlen));
3325 }
3326 
3327 /* do_sendrecvmsg_locked() Must return target values and target errnos. */
3328 static abi_long do_sendrecvmsg_locked(int fd, struct target_msghdr *msgp,
3329                                       int flags, int send)
3330 {
3331     abi_long ret, len;
3332     struct msghdr msg;
3333     abi_ulong count;
3334     struct iovec *vec;
3335     abi_ulong target_vec;
3336 
3337     if (msgp->msg_name) {
3338         msg.msg_namelen = tswap32(msgp->msg_namelen);
3339         msg.msg_name = alloca(msg.msg_namelen+1);
3340         ret = target_to_host_sockaddr(fd, msg.msg_name,
3341                                       tswapal(msgp->msg_name),
3342                                       msg.msg_namelen);
3343         if (ret == -TARGET_EFAULT) {
3344             /* For connected sockets msg_name and msg_namelen must
3345              * be ignored, so returning EFAULT immediately is wrong.
3346              * Instead, pass a bad msg_name to the host kernel, and
3347              * let it decide whether to return EFAULT or not.
3348              */
3349             msg.msg_name = (void *)-1;
3350         } else if (ret) {
3351             goto out2;
3352         }
3353     } else {
3354         msg.msg_name = NULL;
3355         msg.msg_namelen = 0;
3356     }
3357     msg.msg_controllen = 2 * tswapal(msgp->msg_controllen);
3358     msg.msg_control = alloca(msg.msg_controllen);
3359     memset(msg.msg_control, 0, msg.msg_controllen);
3360 
3361     msg.msg_flags = tswap32(msgp->msg_flags);
3362 
3363     count = tswapal(msgp->msg_iovlen);
3364     target_vec = tswapal(msgp->msg_iov);
3365 
3366     if (count > IOV_MAX) {
3367         /* sendrcvmsg returns a different errno for this condition than
3368          * readv/writev, so we must catch it here before lock_iovec() does.
3369          */
3370         ret = -TARGET_EMSGSIZE;
3371         goto out2;
3372     }
3373 
3374     vec = lock_iovec(send ? VERIFY_READ : VERIFY_WRITE,
3375                      target_vec, count, send);
3376     if (vec == NULL) {
3377         ret = -host_to_target_errno(errno);
3378         goto out2;
3379     }
3380     msg.msg_iovlen = count;
3381     msg.msg_iov = vec;
3382 
3383     if (send) {
3384         if (fd_trans_target_to_host_data(fd)) {
3385             void *host_msg;
3386 
3387             host_msg = g_malloc(msg.msg_iov->iov_len);
3388             memcpy(host_msg, msg.msg_iov->iov_base, msg.msg_iov->iov_len);
3389             ret = fd_trans_target_to_host_data(fd)(host_msg,
3390                                                    msg.msg_iov->iov_len);
3391             if (ret >= 0) {
3392                 msg.msg_iov->iov_base = host_msg;
3393                 ret = get_errno(safe_sendmsg(fd, &msg, flags));
3394             }
3395             g_free(host_msg);
3396         } else {
3397             ret = target_to_host_cmsg(&msg, msgp);
3398             if (ret == 0) {
3399                 ret = get_errno(safe_sendmsg(fd, &msg, flags));
3400             }
3401         }
3402     } else {
3403         ret = get_errno(safe_recvmsg(fd, &msg, flags));
3404         if (!is_error(ret)) {
3405             len = ret;
3406             if (fd_trans_host_to_target_data(fd)) {
3407                 ret = fd_trans_host_to_target_data(fd)(msg.msg_iov->iov_base,
3408                                                MIN(msg.msg_iov->iov_len, len));
3409             } else {
3410                 ret = host_to_target_cmsg(msgp, &msg);
3411             }
3412             if (!is_error(ret)) {
3413                 msgp->msg_namelen = tswap32(msg.msg_namelen);
3414                 msgp->msg_flags = tswap32(msg.msg_flags);
3415                 if (msg.msg_name != NULL && msg.msg_name != (void *)-1) {
3416                     ret = host_to_target_sockaddr(tswapal(msgp->msg_name),
3417                                     msg.msg_name, msg.msg_namelen);
3418                     if (ret) {
3419                         goto out;
3420                     }
3421                 }
3422 
3423                 ret = len;
3424             }
3425         }
3426     }
3427 
3428 out:
3429     unlock_iovec(vec, target_vec, count, !send);
3430 out2:
3431     return ret;
3432 }
3433 
3434 static abi_long do_sendrecvmsg(int fd, abi_ulong target_msg,
3435                                int flags, int send)
3436 {
3437     abi_long ret;
3438     struct target_msghdr *msgp;
3439 
3440     if (!lock_user_struct(send ? VERIFY_READ : VERIFY_WRITE,
3441                           msgp,
3442                           target_msg,
3443                           send ? 1 : 0)) {
3444         return -TARGET_EFAULT;
3445     }
3446     ret = do_sendrecvmsg_locked(fd, msgp, flags, send);
3447     unlock_user_struct(msgp, target_msg, send ? 0 : 1);
3448     return ret;
3449 }
3450 
3451 /* We don't rely on the C library to have sendmmsg/recvmmsg support,
3452  * so it might not have this *mmsg-specific flag either.
3453  */
3454 #ifndef MSG_WAITFORONE
3455 #define MSG_WAITFORONE 0x10000
3456 #endif
3457 
3458 static abi_long do_sendrecvmmsg(int fd, abi_ulong target_msgvec,
3459                                 unsigned int vlen, unsigned int flags,
3460                                 int send)
3461 {
3462     struct target_mmsghdr *mmsgp;
3463     abi_long ret = 0;
3464     int i;
3465 
3466     if (vlen > UIO_MAXIOV) {
3467         vlen = UIO_MAXIOV;
3468     }
3469 
3470     mmsgp = lock_user(VERIFY_WRITE, target_msgvec, sizeof(*mmsgp) * vlen, 1);
3471     if (!mmsgp) {
3472         return -TARGET_EFAULT;
3473     }
3474 
3475     for (i = 0; i < vlen; i++) {
3476         ret = do_sendrecvmsg_locked(fd, &mmsgp[i].msg_hdr, flags, send);
3477         if (is_error(ret)) {
3478             break;
3479         }
3480         mmsgp[i].msg_len = tswap32(ret);
3481         /* MSG_WAITFORONE turns on MSG_DONTWAIT after one packet */
3482         if (flags & MSG_WAITFORONE) {
3483             flags |= MSG_DONTWAIT;
3484         }
3485     }
3486 
3487     unlock_user(mmsgp, target_msgvec, sizeof(*mmsgp) * i);
3488 
3489     /* Return number of datagrams sent if we sent any at all;
3490      * otherwise return the error.
3491      */
3492     if (i) {
3493         return i;
3494     }
3495     return ret;
3496 }
3497 
3498 /* do_accept4() Must return target values and target errnos. */
3499 static abi_long do_accept4(int fd, abi_ulong target_addr,
3500                            abi_ulong target_addrlen_addr, int flags)
3501 {
3502     socklen_t addrlen, ret_addrlen;
3503     void *addr;
3504     abi_long ret;
3505     int host_flags;
3506 
3507     host_flags = target_to_host_bitmask(flags, fcntl_flags_tbl);
3508 
3509     if (target_addr == 0) {
3510         return get_errno(safe_accept4(fd, NULL, NULL, host_flags));
3511     }
3512 
3513     /* linux returns EFAULT if addrlen pointer is invalid */
3514     if (get_user_u32(addrlen, target_addrlen_addr))
3515         return -TARGET_EFAULT;
3516 
3517     if ((int)addrlen < 0) {
3518         return -TARGET_EINVAL;
3519     }
3520 
3521     if (!access_ok(VERIFY_WRITE, target_addr, addrlen))
3522         return -TARGET_EFAULT;
3523 
3524     addr = alloca(addrlen);
3525 
3526     ret_addrlen = addrlen;
3527     ret = get_errno(safe_accept4(fd, addr, &ret_addrlen, host_flags));
3528     if (!is_error(ret)) {
3529         host_to_target_sockaddr(target_addr, addr, MIN(addrlen, ret_addrlen));
3530         if (put_user_u32(ret_addrlen, target_addrlen_addr)) {
3531             ret = -TARGET_EFAULT;
3532         }
3533     }
3534     return ret;
3535 }
3536 
3537 /* do_getpeername() Must return target values and target errnos. */
3538 static abi_long do_getpeername(int fd, abi_ulong target_addr,
3539                                abi_ulong target_addrlen_addr)
3540 {
3541     socklen_t addrlen, ret_addrlen;
3542     void *addr;
3543     abi_long ret;
3544 
3545     if (get_user_u32(addrlen, target_addrlen_addr))
3546         return -TARGET_EFAULT;
3547 
3548     if ((int)addrlen < 0) {
3549         return -TARGET_EINVAL;
3550     }
3551 
3552     if (!access_ok(VERIFY_WRITE, target_addr, addrlen))
3553         return -TARGET_EFAULT;
3554 
3555     addr = alloca(addrlen);
3556 
3557     ret_addrlen = addrlen;
3558     ret = get_errno(getpeername(fd, addr, &ret_addrlen));
3559     if (!is_error(ret)) {
3560         host_to_target_sockaddr(target_addr, addr, MIN(addrlen, ret_addrlen));
3561         if (put_user_u32(ret_addrlen, target_addrlen_addr)) {
3562             ret = -TARGET_EFAULT;
3563         }
3564     }
3565     return ret;
3566 }
3567 
3568 /* do_getsockname() Must return target values and target errnos. */
3569 static abi_long do_getsockname(int fd, abi_ulong target_addr,
3570                                abi_ulong target_addrlen_addr)
3571 {
3572     socklen_t addrlen, ret_addrlen;
3573     void *addr;
3574     abi_long ret;
3575 
3576     if (get_user_u32(addrlen, target_addrlen_addr))
3577         return -TARGET_EFAULT;
3578 
3579     if ((int)addrlen < 0) {
3580         return -TARGET_EINVAL;
3581     }
3582 
3583     if (!access_ok(VERIFY_WRITE, target_addr, addrlen))
3584         return -TARGET_EFAULT;
3585 
3586     addr = alloca(addrlen);
3587 
3588     ret_addrlen = addrlen;
3589     ret = get_errno(getsockname(fd, addr, &ret_addrlen));
3590     if (!is_error(ret)) {
3591         host_to_target_sockaddr(target_addr, addr, MIN(addrlen, ret_addrlen));
3592         if (put_user_u32(ret_addrlen, target_addrlen_addr)) {
3593             ret = -TARGET_EFAULT;
3594         }
3595     }
3596     return ret;
3597 }
3598 
3599 /* do_socketpair() Must return target values and target errnos. */
3600 static abi_long do_socketpair(int domain, int type, int protocol,
3601                               abi_ulong target_tab_addr)
3602 {
3603     int tab[2];
3604     abi_long ret;
3605 
3606     target_to_host_sock_type(&type);
3607 
3608     ret = get_errno(socketpair(domain, type, protocol, tab));
3609     if (!is_error(ret)) {
3610         if (put_user_s32(tab[0], target_tab_addr)
3611             || put_user_s32(tab[1], target_tab_addr + sizeof(tab[0])))
3612             ret = -TARGET_EFAULT;
3613     }
3614     return ret;
3615 }
3616 
3617 /* do_sendto() Must return target values and target errnos. */
3618 static abi_long do_sendto(int fd, abi_ulong msg, size_t len, int flags,
3619                           abi_ulong target_addr, socklen_t addrlen)
3620 {
3621     void *addr;
3622     void *host_msg;
3623     void *copy_msg = NULL;
3624     abi_long ret;
3625 
3626     if ((int)addrlen < 0) {
3627         return -TARGET_EINVAL;
3628     }
3629 
3630     host_msg = lock_user(VERIFY_READ, msg, len, 1);
3631     if (!host_msg)
3632         return -TARGET_EFAULT;
3633     if (fd_trans_target_to_host_data(fd)) {
3634         copy_msg = host_msg;
3635         host_msg = g_malloc(len);
3636         memcpy(host_msg, copy_msg, len);
3637         ret = fd_trans_target_to_host_data(fd)(host_msg, len);
3638         if (ret < 0) {
3639             goto fail;
3640         }
3641     }
3642     if (target_addr) {
3643         addr = alloca(addrlen+1);
3644         ret = target_to_host_sockaddr(fd, addr, target_addr, addrlen);
3645         if (ret) {
3646             goto fail;
3647         }
3648         ret = get_errno(safe_sendto(fd, host_msg, len, flags, addr, addrlen));
3649     } else {
3650         ret = get_errno(safe_sendto(fd, host_msg, len, flags, NULL, 0));
3651     }
3652 fail:
3653     if (copy_msg) {
3654         g_free(host_msg);
3655         host_msg = copy_msg;
3656     }
3657     unlock_user(host_msg, msg, 0);
3658     return ret;
3659 }
3660 
3661 /* do_recvfrom() Must return target values and target errnos. */
3662 static abi_long do_recvfrom(int fd, abi_ulong msg, size_t len, int flags,
3663                             abi_ulong target_addr,
3664                             abi_ulong target_addrlen)
3665 {
3666     socklen_t addrlen, ret_addrlen;
3667     void *addr;
3668     void *host_msg;
3669     abi_long ret;
3670 
3671     host_msg = lock_user(VERIFY_WRITE, msg, len, 0);
3672     if (!host_msg)
3673         return -TARGET_EFAULT;
3674     if (target_addr) {
3675         if (get_user_u32(addrlen, target_addrlen)) {
3676             ret = -TARGET_EFAULT;
3677             goto fail;
3678         }
3679         if ((int)addrlen < 0) {
3680             ret = -TARGET_EINVAL;
3681             goto fail;
3682         }
3683         addr = alloca(addrlen);
3684         ret_addrlen = addrlen;
3685         ret = get_errno(safe_recvfrom(fd, host_msg, len, flags,
3686                                       addr, &ret_addrlen));
3687     } else {
3688         addr = NULL; /* To keep compiler quiet.  */
3689         addrlen = 0; /* To keep compiler quiet.  */
3690         ret = get_errno(safe_recvfrom(fd, host_msg, len, flags, NULL, 0));
3691     }
3692     if (!is_error(ret)) {
3693         if (fd_trans_host_to_target_data(fd)) {
3694             abi_long trans;
3695             trans = fd_trans_host_to_target_data(fd)(host_msg, MIN(ret, len));
3696             if (is_error(trans)) {
3697                 ret = trans;
3698                 goto fail;
3699             }
3700         }
3701         if (target_addr) {
3702             host_to_target_sockaddr(target_addr, addr,
3703                                     MIN(addrlen, ret_addrlen));
3704             if (put_user_u32(ret_addrlen, target_addrlen)) {
3705                 ret = -TARGET_EFAULT;
3706                 goto fail;
3707             }
3708         }
3709         unlock_user(host_msg, msg, len);
3710     } else {
3711 fail:
3712         unlock_user(host_msg, msg, 0);
3713     }
3714     return ret;
3715 }
3716 
3717 #ifdef TARGET_NR_socketcall
3718 /* do_socketcall() must return target values and target errnos. */
3719 static abi_long do_socketcall(int num, abi_ulong vptr)
3720 {
3721     static const unsigned nargs[] = { /* number of arguments per operation */
3722         [TARGET_SYS_SOCKET] = 3,      /* domain, type, protocol */
3723         [TARGET_SYS_BIND] = 3,        /* fd, addr, addrlen */
3724         [TARGET_SYS_CONNECT] = 3,     /* fd, addr, addrlen */
3725         [TARGET_SYS_LISTEN] = 2,      /* fd, backlog */
3726         [TARGET_SYS_ACCEPT] = 3,      /* fd, addr, addrlen */
3727         [TARGET_SYS_GETSOCKNAME] = 3, /* fd, addr, addrlen */
3728         [TARGET_SYS_GETPEERNAME] = 3, /* fd, addr, addrlen */
3729         [TARGET_SYS_SOCKETPAIR] = 4,  /* domain, type, protocol, tab */
3730         [TARGET_SYS_SEND] = 4,        /* fd, msg, len, flags */
3731         [TARGET_SYS_RECV] = 4,        /* fd, msg, len, flags */
3732         [TARGET_SYS_SENDTO] = 6,      /* fd, msg, len, flags, addr, addrlen */
3733         [TARGET_SYS_RECVFROM] = 6,    /* fd, msg, len, flags, addr, addrlen */
3734         [TARGET_SYS_SHUTDOWN] = 2,    /* fd, how */
3735         [TARGET_SYS_SETSOCKOPT] = 5,  /* fd, level, optname, optval, optlen */
3736         [TARGET_SYS_GETSOCKOPT] = 5,  /* fd, level, optname, optval, optlen */
3737         [TARGET_SYS_SENDMSG] = 3,     /* fd, msg, flags */
3738         [TARGET_SYS_RECVMSG] = 3,     /* fd, msg, flags */
3739         [TARGET_SYS_ACCEPT4] = 4,     /* fd, addr, addrlen, flags */
3740         [TARGET_SYS_RECVMMSG] = 4,    /* fd, msgvec, vlen, flags */
3741         [TARGET_SYS_SENDMMSG] = 4,    /* fd, msgvec, vlen, flags */
3742     };
3743     abi_long a[6]; /* max 6 args */
3744     unsigned i;
3745 
3746     /* check the range of the first argument num */
3747     /* (TARGET_SYS_SENDMMSG is the highest among TARGET_SYS_xxx) */
3748     if (num < 1 || num > TARGET_SYS_SENDMMSG) {
3749         return -TARGET_EINVAL;
3750     }
3751     /* ensure we have space for args */
3752     if (nargs[num] > ARRAY_SIZE(a)) {
3753         return -TARGET_EINVAL;
3754     }
3755     /* collect the arguments in a[] according to nargs[] */
3756     for (i = 0; i < nargs[num]; ++i) {
3757         if (get_user_ual(a[i], vptr + i * sizeof(abi_long)) != 0) {
3758             return -TARGET_EFAULT;
3759         }
3760     }
3761     /* now when we have the args, invoke the appropriate underlying function */
3762     switch (num) {
3763     case TARGET_SYS_SOCKET: /* domain, type, protocol */
3764         return do_socket(a[0], a[1], a[2]);
3765     case TARGET_SYS_BIND: /* sockfd, addr, addrlen */
3766         return do_bind(a[0], a[1], a[2]);
3767     case TARGET_SYS_CONNECT: /* sockfd, addr, addrlen */
3768         return do_connect(a[0], a[1], a[2]);
3769     case TARGET_SYS_LISTEN: /* sockfd, backlog */
3770         return get_errno(listen(a[0], a[1]));
3771     case TARGET_SYS_ACCEPT: /* sockfd, addr, addrlen */
3772         return do_accept4(a[0], a[1], a[2], 0);
3773     case TARGET_SYS_GETSOCKNAME: /* sockfd, addr, addrlen */
3774         return do_getsockname(a[0], a[1], a[2]);
3775     case TARGET_SYS_GETPEERNAME: /* sockfd, addr, addrlen */
3776         return do_getpeername(a[0], a[1], a[2]);
3777     case TARGET_SYS_SOCKETPAIR: /* domain, type, protocol, tab */
3778         return do_socketpair(a[0], a[1], a[2], a[3]);
3779     case TARGET_SYS_SEND: /* sockfd, msg, len, flags */
3780         return do_sendto(a[0], a[1], a[2], a[3], 0, 0);
3781     case TARGET_SYS_RECV: /* sockfd, msg, len, flags */
3782         return do_recvfrom(a[0], a[1], a[2], a[3], 0, 0);
3783     case TARGET_SYS_SENDTO: /* sockfd, msg, len, flags, addr, addrlen */
3784         return do_sendto(a[0], a[1], a[2], a[3], a[4], a[5]);
3785     case TARGET_SYS_RECVFROM: /* sockfd, msg, len, flags, addr, addrlen */
3786         return do_recvfrom(a[0], a[1], a[2], a[3], a[4], a[5]);
3787     case TARGET_SYS_SHUTDOWN: /* sockfd, how */
3788         return get_errno(shutdown(a[0], a[1]));
3789     case TARGET_SYS_SETSOCKOPT: /* sockfd, level, optname, optval, optlen */
3790         return do_setsockopt(a[0], a[1], a[2], a[3], a[4]);
3791     case TARGET_SYS_GETSOCKOPT: /* sockfd, level, optname, optval, optlen */
3792         return do_getsockopt(a[0], a[1], a[2], a[3], a[4]);
3793     case TARGET_SYS_SENDMSG: /* sockfd, msg, flags */
3794         return do_sendrecvmsg(a[0], a[1], a[2], 1);
3795     case TARGET_SYS_RECVMSG: /* sockfd, msg, flags */
3796         return do_sendrecvmsg(a[0], a[1], a[2], 0);
3797     case TARGET_SYS_ACCEPT4: /* sockfd, addr, addrlen, flags */
3798         return do_accept4(a[0], a[1], a[2], a[3]);
3799     case TARGET_SYS_RECVMMSG: /* sockfd, msgvec, vlen, flags */
3800         return do_sendrecvmmsg(a[0], a[1], a[2], a[3], 0);
3801     case TARGET_SYS_SENDMMSG: /* sockfd, msgvec, vlen, flags */
3802         return do_sendrecvmmsg(a[0], a[1], a[2], a[3], 1);
3803     default:
3804         qemu_log_mask(LOG_UNIMP, "Unsupported socketcall: %d\n", num);
3805         return -TARGET_EINVAL;
3806     }
3807 }
3808 #endif
3809 
3810 #define N_SHM_REGIONS	32
3811 
3812 static struct shm_region {
3813     abi_ulong start;
3814     abi_ulong size;
3815     bool in_use;
3816 } shm_regions[N_SHM_REGIONS];
3817 
3818 #ifndef TARGET_SEMID64_DS
3819 /* asm-generic version of this struct */
3820 struct target_semid64_ds
3821 {
3822   struct target_ipc_perm sem_perm;
3823   abi_ulong sem_otime;
3824 #if TARGET_ABI_BITS == 32
3825   abi_ulong __unused1;
3826 #endif
3827   abi_ulong sem_ctime;
3828 #if TARGET_ABI_BITS == 32
3829   abi_ulong __unused2;
3830 #endif
3831   abi_ulong sem_nsems;
3832   abi_ulong __unused3;
3833   abi_ulong __unused4;
3834 };
3835 #endif
3836 
3837 static inline abi_long target_to_host_ipc_perm(struct ipc_perm *host_ip,
3838                                                abi_ulong target_addr)
3839 {
3840     struct target_ipc_perm *target_ip;
3841     struct target_semid64_ds *target_sd;
3842 
3843     if (!lock_user_struct(VERIFY_READ, target_sd, target_addr, 1))
3844         return -TARGET_EFAULT;
3845     target_ip = &(target_sd->sem_perm);
3846     host_ip->__key = tswap32(target_ip->__key);
3847     host_ip->uid = tswap32(target_ip->uid);
3848     host_ip->gid = tswap32(target_ip->gid);
3849     host_ip->cuid = tswap32(target_ip->cuid);
3850     host_ip->cgid = tswap32(target_ip->cgid);
3851 #if defined(TARGET_ALPHA) || defined(TARGET_MIPS) || defined(TARGET_PPC)
3852     host_ip->mode = tswap32(target_ip->mode);
3853 #else
3854     host_ip->mode = tswap16(target_ip->mode);
3855 #endif
3856 #if defined(TARGET_PPC)
3857     host_ip->__seq = tswap32(target_ip->__seq);
3858 #else
3859     host_ip->__seq = tswap16(target_ip->__seq);
3860 #endif
3861     unlock_user_struct(target_sd, target_addr, 0);
3862     return 0;
3863 }
3864 
3865 static inline abi_long host_to_target_ipc_perm(abi_ulong target_addr,
3866                                                struct ipc_perm *host_ip)
3867 {
3868     struct target_ipc_perm *target_ip;
3869     struct target_semid64_ds *target_sd;
3870 
3871     if (!lock_user_struct(VERIFY_WRITE, target_sd, target_addr, 0))
3872         return -TARGET_EFAULT;
3873     target_ip = &(target_sd->sem_perm);
3874     target_ip->__key = tswap32(host_ip->__key);
3875     target_ip->uid = tswap32(host_ip->uid);
3876     target_ip->gid = tswap32(host_ip->gid);
3877     target_ip->cuid = tswap32(host_ip->cuid);
3878     target_ip->cgid = tswap32(host_ip->cgid);
3879 #if defined(TARGET_ALPHA) || defined(TARGET_MIPS) || defined(TARGET_PPC)
3880     target_ip->mode = tswap32(host_ip->mode);
3881 #else
3882     target_ip->mode = tswap16(host_ip->mode);
3883 #endif
3884 #if defined(TARGET_PPC)
3885     target_ip->__seq = tswap32(host_ip->__seq);
3886 #else
3887     target_ip->__seq = tswap16(host_ip->__seq);
3888 #endif
3889     unlock_user_struct(target_sd, target_addr, 1);
3890     return 0;
3891 }
3892 
3893 static inline abi_long target_to_host_semid_ds(struct semid_ds *host_sd,
3894                                                abi_ulong target_addr)
3895 {
3896     struct target_semid64_ds *target_sd;
3897 
3898     if (!lock_user_struct(VERIFY_READ, target_sd, target_addr, 1))
3899         return -TARGET_EFAULT;
3900     if (target_to_host_ipc_perm(&(host_sd->sem_perm),target_addr))
3901         return -TARGET_EFAULT;
3902     host_sd->sem_nsems = tswapal(target_sd->sem_nsems);
3903     host_sd->sem_otime = tswapal(target_sd->sem_otime);
3904     host_sd->sem_ctime = tswapal(target_sd->sem_ctime);
3905     unlock_user_struct(target_sd, target_addr, 0);
3906     return 0;
3907 }
3908 
3909 static inline abi_long host_to_target_semid_ds(abi_ulong target_addr,
3910                                                struct semid_ds *host_sd)
3911 {
3912     struct target_semid64_ds *target_sd;
3913 
3914     if (!lock_user_struct(VERIFY_WRITE, target_sd, target_addr, 0))
3915         return -TARGET_EFAULT;
3916     if (host_to_target_ipc_perm(target_addr,&(host_sd->sem_perm)))
3917         return -TARGET_EFAULT;
3918     target_sd->sem_nsems = tswapal(host_sd->sem_nsems);
3919     target_sd->sem_otime = tswapal(host_sd->sem_otime);
3920     target_sd->sem_ctime = tswapal(host_sd->sem_ctime);
3921     unlock_user_struct(target_sd, target_addr, 1);
3922     return 0;
3923 }
3924 
3925 struct target_seminfo {
3926     int semmap;
3927     int semmni;
3928     int semmns;
3929     int semmnu;
3930     int semmsl;
3931     int semopm;
3932     int semume;
3933     int semusz;
3934     int semvmx;
3935     int semaem;
3936 };
3937 
3938 static inline abi_long host_to_target_seminfo(abi_ulong target_addr,
3939                                               struct seminfo *host_seminfo)
3940 {
3941     struct target_seminfo *target_seminfo;
3942     if (!lock_user_struct(VERIFY_WRITE, target_seminfo, target_addr, 0))
3943         return -TARGET_EFAULT;
3944     __put_user(host_seminfo->semmap, &target_seminfo->semmap);
3945     __put_user(host_seminfo->semmni, &target_seminfo->semmni);
3946     __put_user(host_seminfo->semmns, &target_seminfo->semmns);
3947     __put_user(host_seminfo->semmnu, &target_seminfo->semmnu);
3948     __put_user(host_seminfo->semmsl, &target_seminfo->semmsl);
3949     __put_user(host_seminfo->semopm, &target_seminfo->semopm);
3950     __put_user(host_seminfo->semume, &target_seminfo->semume);
3951     __put_user(host_seminfo->semusz, &target_seminfo->semusz);
3952     __put_user(host_seminfo->semvmx, &target_seminfo->semvmx);
3953     __put_user(host_seminfo->semaem, &target_seminfo->semaem);
3954     unlock_user_struct(target_seminfo, target_addr, 1);
3955     return 0;
3956 }
3957 
3958 union semun {
3959 	int val;
3960 	struct semid_ds *buf;
3961 	unsigned short *array;
3962 	struct seminfo *__buf;
3963 };
3964 
3965 union target_semun {
3966 	int val;
3967 	abi_ulong buf;
3968 	abi_ulong array;
3969 	abi_ulong __buf;
3970 };
3971 
3972 static inline abi_long target_to_host_semarray(int semid, unsigned short **host_array,
3973                                                abi_ulong target_addr)
3974 {
3975     int nsems;
3976     unsigned short *array;
3977     union semun semun;
3978     struct semid_ds semid_ds;
3979     int i, ret;
3980 
3981     semun.buf = &semid_ds;
3982 
3983     ret = semctl(semid, 0, IPC_STAT, semun);
3984     if (ret == -1)
3985         return get_errno(ret);
3986 
3987     nsems = semid_ds.sem_nsems;
3988 
3989     *host_array = g_try_new(unsigned short, nsems);
3990     if (!*host_array) {
3991         return -TARGET_ENOMEM;
3992     }
3993     array = lock_user(VERIFY_READ, target_addr,
3994                       nsems*sizeof(unsigned short), 1);
3995     if (!array) {
3996         g_free(*host_array);
3997         return -TARGET_EFAULT;
3998     }
3999 
4000     for(i=0; i<nsems; i++) {
4001         __get_user((*host_array)[i], &array[i]);
4002     }
4003     unlock_user(array, target_addr, 0);
4004 
4005     return 0;
4006 }
4007 
4008 static inline abi_long host_to_target_semarray(int semid, abi_ulong target_addr,
4009                                                unsigned short **host_array)
4010 {
4011     int nsems;
4012     unsigned short *array;
4013     union semun semun;
4014     struct semid_ds semid_ds;
4015     int i, ret;
4016 
4017     semun.buf = &semid_ds;
4018 
4019     ret = semctl(semid, 0, IPC_STAT, semun);
4020     if (ret == -1)
4021         return get_errno(ret);
4022 
4023     nsems = semid_ds.sem_nsems;
4024 
4025     array = lock_user(VERIFY_WRITE, target_addr,
4026                       nsems*sizeof(unsigned short), 0);
4027     if (!array)
4028         return -TARGET_EFAULT;
4029 
4030     for(i=0; i<nsems; i++) {
4031         __put_user((*host_array)[i], &array[i]);
4032     }
4033     g_free(*host_array);
4034     unlock_user(array, target_addr, 1);
4035 
4036     return 0;
4037 }
4038 
4039 static inline abi_long do_semctl(int semid, int semnum, int cmd,
4040                                  abi_ulong target_arg)
4041 {
4042     union target_semun target_su = { .buf = target_arg };
4043     union semun arg;
4044     struct semid_ds dsarg;
4045     unsigned short *array = NULL;
4046     struct seminfo seminfo;
4047     abi_long ret = -TARGET_EINVAL;
4048     abi_long err;
4049     cmd &= 0xff;
4050 
4051     switch( cmd ) {
4052 	case GETVAL:
4053 	case SETVAL:
4054             /* In 64 bit cross-endian situations, we will erroneously pick up
4055              * the wrong half of the union for the "val" element.  To rectify
4056              * this, the entire 8-byte structure is byteswapped, followed by
4057 	     * a swap of the 4 byte val field. In other cases, the data is
4058 	     * already in proper host byte order. */
4059 	    if (sizeof(target_su.val) != (sizeof(target_su.buf))) {
4060 		target_su.buf = tswapal(target_su.buf);
4061 		arg.val = tswap32(target_su.val);
4062 	    } else {
4063 		arg.val = target_su.val;
4064 	    }
4065             ret = get_errno(semctl(semid, semnum, cmd, arg));
4066             break;
4067 	case GETALL:
4068 	case SETALL:
4069             err = target_to_host_semarray(semid, &array, target_su.array);
4070             if (err)
4071                 return err;
4072             arg.array = array;
4073             ret = get_errno(semctl(semid, semnum, cmd, arg));
4074             err = host_to_target_semarray(semid, target_su.array, &array);
4075             if (err)
4076                 return err;
4077             break;
4078 	case IPC_STAT:
4079 	case IPC_SET:
4080 	case SEM_STAT:
4081             err = target_to_host_semid_ds(&dsarg, target_su.buf);
4082             if (err)
4083                 return err;
4084             arg.buf = &dsarg;
4085             ret = get_errno(semctl(semid, semnum, cmd, arg));
4086             err = host_to_target_semid_ds(target_su.buf, &dsarg);
4087             if (err)
4088                 return err;
4089             break;
4090 	case IPC_INFO:
4091 	case SEM_INFO:
4092             arg.__buf = &seminfo;
4093             ret = get_errno(semctl(semid, semnum, cmd, arg));
4094             err = host_to_target_seminfo(target_su.__buf, &seminfo);
4095             if (err)
4096                 return err;
4097             break;
4098 	case IPC_RMID:
4099 	case GETPID:
4100 	case GETNCNT:
4101 	case GETZCNT:
4102             ret = get_errno(semctl(semid, semnum, cmd, NULL));
4103             break;
4104     }
4105 
4106     return ret;
4107 }
4108 
4109 struct target_sembuf {
4110     unsigned short sem_num;
4111     short sem_op;
4112     short sem_flg;
4113 };
4114 
4115 static inline abi_long target_to_host_sembuf(struct sembuf *host_sembuf,
4116                                              abi_ulong target_addr,
4117                                              unsigned nsops)
4118 {
4119     struct target_sembuf *target_sembuf;
4120     int i;
4121 
4122     target_sembuf = lock_user(VERIFY_READ, target_addr,
4123                               nsops*sizeof(struct target_sembuf), 1);
4124     if (!target_sembuf)
4125         return -TARGET_EFAULT;
4126 
4127     for(i=0; i<nsops; i++) {
4128         __get_user(host_sembuf[i].sem_num, &target_sembuf[i].sem_num);
4129         __get_user(host_sembuf[i].sem_op, &target_sembuf[i].sem_op);
4130         __get_user(host_sembuf[i].sem_flg, &target_sembuf[i].sem_flg);
4131     }
4132 
4133     unlock_user(target_sembuf, target_addr, 0);
4134 
4135     return 0;
4136 }
4137 
4138 #if defined(TARGET_NR_ipc) || defined(TARGET_NR_semop) || \
4139     defined(TARGET_NR_semtimedop) || defined(TARGET_NR_semtimedop_time64)
4140 
4141 /*
4142  * This macro is required to handle the s390 variants, which passes the
4143  * arguments in a different order than default.
4144  */
4145 #ifdef __s390x__
4146 #define SEMTIMEDOP_IPC_ARGS(__nsops, __sops, __timeout) \
4147   (__nsops), (__timeout), (__sops)
4148 #else
4149 #define SEMTIMEDOP_IPC_ARGS(__nsops, __sops, __timeout) \
4150   (__nsops), 0, (__sops), (__timeout)
4151 #endif
4152 
4153 static inline abi_long do_semtimedop(int semid,
4154                                      abi_long ptr,
4155                                      unsigned nsops,
4156                                      abi_long timeout, bool time64)
4157 {
4158     struct sembuf *sops;
4159     struct timespec ts, *pts = NULL;
4160     abi_long ret;
4161 
4162     if (timeout) {
4163         pts = &ts;
4164         if (time64) {
4165             if (target_to_host_timespec64(pts, timeout)) {
4166                 return -TARGET_EFAULT;
4167             }
4168         } else {
4169             if (target_to_host_timespec(pts, timeout)) {
4170                 return -TARGET_EFAULT;
4171             }
4172         }
4173     }
4174 
4175     if (nsops > TARGET_SEMOPM) {
4176         return -TARGET_E2BIG;
4177     }
4178 
4179     sops = g_new(struct sembuf, nsops);
4180 
4181     if (target_to_host_sembuf(sops, ptr, nsops)) {
4182         g_free(sops);
4183         return -TARGET_EFAULT;
4184     }
4185 
4186     ret = -TARGET_ENOSYS;
4187 #ifdef __NR_semtimedop
4188     ret = get_errno(safe_semtimedop(semid, sops, nsops, pts));
4189 #endif
4190 #ifdef __NR_ipc
4191     if (ret == -TARGET_ENOSYS) {
4192         ret = get_errno(safe_ipc(IPCOP_semtimedop, semid,
4193                                  SEMTIMEDOP_IPC_ARGS(nsops, sops, (long)pts)));
4194     }
4195 #endif
4196     g_free(sops);
4197     return ret;
4198 }
4199 #endif
4200 
4201 struct target_msqid_ds
4202 {
4203     struct target_ipc_perm msg_perm;
4204     abi_ulong msg_stime;
4205 #if TARGET_ABI_BITS == 32
4206     abi_ulong __unused1;
4207 #endif
4208     abi_ulong msg_rtime;
4209 #if TARGET_ABI_BITS == 32
4210     abi_ulong __unused2;
4211 #endif
4212     abi_ulong msg_ctime;
4213 #if TARGET_ABI_BITS == 32
4214     abi_ulong __unused3;
4215 #endif
4216     abi_ulong __msg_cbytes;
4217     abi_ulong msg_qnum;
4218     abi_ulong msg_qbytes;
4219     abi_ulong msg_lspid;
4220     abi_ulong msg_lrpid;
4221     abi_ulong __unused4;
4222     abi_ulong __unused5;
4223 };
4224 
4225 static inline abi_long target_to_host_msqid_ds(struct msqid_ds *host_md,
4226                                                abi_ulong target_addr)
4227 {
4228     struct target_msqid_ds *target_md;
4229 
4230     if (!lock_user_struct(VERIFY_READ, target_md, target_addr, 1))
4231         return -TARGET_EFAULT;
4232     if (target_to_host_ipc_perm(&(host_md->msg_perm),target_addr))
4233         return -TARGET_EFAULT;
4234     host_md->msg_stime = tswapal(target_md->msg_stime);
4235     host_md->msg_rtime = tswapal(target_md->msg_rtime);
4236     host_md->msg_ctime = tswapal(target_md->msg_ctime);
4237     host_md->__msg_cbytes = tswapal(target_md->__msg_cbytes);
4238     host_md->msg_qnum = tswapal(target_md->msg_qnum);
4239     host_md->msg_qbytes = tswapal(target_md->msg_qbytes);
4240     host_md->msg_lspid = tswapal(target_md->msg_lspid);
4241     host_md->msg_lrpid = tswapal(target_md->msg_lrpid);
4242     unlock_user_struct(target_md, target_addr, 0);
4243     return 0;
4244 }
4245 
4246 static inline abi_long host_to_target_msqid_ds(abi_ulong target_addr,
4247                                                struct msqid_ds *host_md)
4248 {
4249     struct target_msqid_ds *target_md;
4250 
4251     if (!lock_user_struct(VERIFY_WRITE, target_md, target_addr, 0))
4252         return -TARGET_EFAULT;
4253     if (host_to_target_ipc_perm(target_addr,&(host_md->msg_perm)))
4254         return -TARGET_EFAULT;
4255     target_md->msg_stime = tswapal(host_md->msg_stime);
4256     target_md->msg_rtime = tswapal(host_md->msg_rtime);
4257     target_md->msg_ctime = tswapal(host_md->msg_ctime);
4258     target_md->__msg_cbytes = tswapal(host_md->__msg_cbytes);
4259     target_md->msg_qnum = tswapal(host_md->msg_qnum);
4260     target_md->msg_qbytes = tswapal(host_md->msg_qbytes);
4261     target_md->msg_lspid = tswapal(host_md->msg_lspid);
4262     target_md->msg_lrpid = tswapal(host_md->msg_lrpid);
4263     unlock_user_struct(target_md, target_addr, 1);
4264     return 0;
4265 }
4266 
4267 struct target_msginfo {
4268     int msgpool;
4269     int msgmap;
4270     int msgmax;
4271     int msgmnb;
4272     int msgmni;
4273     int msgssz;
4274     int msgtql;
4275     unsigned short int msgseg;
4276 };
4277 
4278 static inline abi_long host_to_target_msginfo(abi_ulong target_addr,
4279                                               struct msginfo *host_msginfo)
4280 {
4281     struct target_msginfo *target_msginfo;
4282     if (!lock_user_struct(VERIFY_WRITE, target_msginfo, target_addr, 0))
4283         return -TARGET_EFAULT;
4284     __put_user(host_msginfo->msgpool, &target_msginfo->msgpool);
4285     __put_user(host_msginfo->msgmap, &target_msginfo->msgmap);
4286     __put_user(host_msginfo->msgmax, &target_msginfo->msgmax);
4287     __put_user(host_msginfo->msgmnb, &target_msginfo->msgmnb);
4288     __put_user(host_msginfo->msgmni, &target_msginfo->msgmni);
4289     __put_user(host_msginfo->msgssz, &target_msginfo->msgssz);
4290     __put_user(host_msginfo->msgtql, &target_msginfo->msgtql);
4291     __put_user(host_msginfo->msgseg, &target_msginfo->msgseg);
4292     unlock_user_struct(target_msginfo, target_addr, 1);
4293     return 0;
4294 }
4295 
4296 static inline abi_long do_msgctl(int msgid, int cmd, abi_long ptr)
4297 {
4298     struct msqid_ds dsarg;
4299     struct msginfo msginfo;
4300     abi_long ret = -TARGET_EINVAL;
4301 
4302     cmd &= 0xff;
4303 
4304     switch (cmd) {
4305     case IPC_STAT:
4306     case IPC_SET:
4307     case MSG_STAT:
4308         if (target_to_host_msqid_ds(&dsarg,ptr))
4309             return -TARGET_EFAULT;
4310         ret = get_errno(msgctl(msgid, cmd, &dsarg));
4311         if (host_to_target_msqid_ds(ptr,&dsarg))
4312             return -TARGET_EFAULT;
4313         break;
4314     case IPC_RMID:
4315         ret = get_errno(msgctl(msgid, cmd, NULL));
4316         break;
4317     case IPC_INFO:
4318     case MSG_INFO:
4319         ret = get_errno(msgctl(msgid, cmd, (struct msqid_ds *)&msginfo));
4320         if (host_to_target_msginfo(ptr, &msginfo))
4321             return -TARGET_EFAULT;
4322         break;
4323     }
4324 
4325     return ret;
4326 }
4327 
4328 struct target_msgbuf {
4329     abi_long mtype;
4330     char	mtext[1];
4331 };
4332 
4333 static inline abi_long do_msgsnd(int msqid, abi_long msgp,
4334                                  ssize_t msgsz, int msgflg)
4335 {
4336     struct target_msgbuf *target_mb;
4337     struct msgbuf *host_mb;
4338     abi_long ret = 0;
4339 
4340     if (msgsz < 0) {
4341         return -TARGET_EINVAL;
4342     }
4343 
4344     if (!lock_user_struct(VERIFY_READ, target_mb, msgp, 0))
4345         return -TARGET_EFAULT;
4346     host_mb = g_try_malloc(msgsz + sizeof(long));
4347     if (!host_mb) {
4348         unlock_user_struct(target_mb, msgp, 0);
4349         return -TARGET_ENOMEM;
4350     }
4351     host_mb->mtype = (abi_long) tswapal(target_mb->mtype);
4352     memcpy(host_mb->mtext, target_mb->mtext, msgsz);
4353     ret = -TARGET_ENOSYS;
4354 #ifdef __NR_msgsnd
4355     ret = get_errno(safe_msgsnd(msqid, host_mb, msgsz, msgflg));
4356 #endif
4357 #ifdef __NR_ipc
4358     if (ret == -TARGET_ENOSYS) {
4359 #ifdef __s390x__
4360         ret = get_errno(safe_ipc(IPCOP_msgsnd, msqid, msgsz, msgflg,
4361                                  host_mb));
4362 #else
4363         ret = get_errno(safe_ipc(IPCOP_msgsnd, msqid, msgsz, msgflg,
4364                                  host_mb, 0));
4365 #endif
4366     }
4367 #endif
4368     g_free(host_mb);
4369     unlock_user_struct(target_mb, msgp, 0);
4370 
4371     return ret;
4372 }
4373 
4374 #ifdef __NR_ipc
4375 #if defined(__sparc__)
4376 /* SPARC for msgrcv it does not use the kludge on final 2 arguments.  */
4377 #define MSGRCV_ARGS(__msgp, __msgtyp) __msgp, __msgtyp
4378 #elif defined(__s390x__)
4379 /* The s390 sys_ipc variant has only five parameters.  */
4380 #define MSGRCV_ARGS(__msgp, __msgtyp) \
4381     ((long int[]){(long int)__msgp, __msgtyp})
4382 #else
4383 #define MSGRCV_ARGS(__msgp, __msgtyp) \
4384     ((long int[]){(long int)__msgp, __msgtyp}), 0
4385 #endif
4386 #endif
4387 
4388 static inline abi_long do_msgrcv(int msqid, abi_long msgp,
4389                                  ssize_t msgsz, abi_long msgtyp,
4390                                  int msgflg)
4391 {
4392     struct target_msgbuf *target_mb;
4393     char *target_mtext;
4394     struct msgbuf *host_mb;
4395     abi_long ret = 0;
4396 
4397     if (msgsz < 0) {
4398         return -TARGET_EINVAL;
4399     }
4400 
4401     if (!lock_user_struct(VERIFY_WRITE, target_mb, msgp, 0))
4402         return -TARGET_EFAULT;
4403 
4404     host_mb = g_try_malloc(msgsz + sizeof(long));
4405     if (!host_mb) {
4406         ret = -TARGET_ENOMEM;
4407         goto end;
4408     }
4409     ret = -TARGET_ENOSYS;
4410 #ifdef __NR_msgrcv
4411     ret = get_errno(safe_msgrcv(msqid, host_mb, msgsz, msgtyp, msgflg));
4412 #endif
4413 #ifdef __NR_ipc
4414     if (ret == -TARGET_ENOSYS) {
4415         ret = get_errno(safe_ipc(IPCOP_CALL(1, IPCOP_msgrcv), msqid, msgsz,
4416                         msgflg, MSGRCV_ARGS(host_mb, msgtyp)));
4417     }
4418 #endif
4419 
4420     if (ret > 0) {
4421         abi_ulong target_mtext_addr = msgp + sizeof(abi_ulong);
4422         target_mtext = lock_user(VERIFY_WRITE, target_mtext_addr, ret, 0);
4423         if (!target_mtext) {
4424             ret = -TARGET_EFAULT;
4425             goto end;
4426         }
4427         memcpy(target_mb->mtext, host_mb->mtext, ret);
4428         unlock_user(target_mtext, target_mtext_addr, ret);
4429     }
4430 
4431     target_mb->mtype = tswapal(host_mb->mtype);
4432 
4433 end:
4434     if (target_mb)
4435         unlock_user_struct(target_mb, msgp, 1);
4436     g_free(host_mb);
4437     return ret;
4438 }
4439 
4440 static inline abi_long target_to_host_shmid_ds(struct shmid_ds *host_sd,
4441                                                abi_ulong target_addr)
4442 {
4443     struct target_shmid_ds *target_sd;
4444 
4445     if (!lock_user_struct(VERIFY_READ, target_sd, target_addr, 1))
4446         return -TARGET_EFAULT;
4447     if (target_to_host_ipc_perm(&(host_sd->shm_perm), target_addr))
4448         return -TARGET_EFAULT;
4449     __get_user(host_sd->shm_segsz, &target_sd->shm_segsz);
4450     __get_user(host_sd->shm_atime, &target_sd->shm_atime);
4451     __get_user(host_sd->shm_dtime, &target_sd->shm_dtime);
4452     __get_user(host_sd->shm_ctime, &target_sd->shm_ctime);
4453     __get_user(host_sd->shm_cpid, &target_sd->shm_cpid);
4454     __get_user(host_sd->shm_lpid, &target_sd->shm_lpid);
4455     __get_user(host_sd->shm_nattch, &target_sd->shm_nattch);
4456     unlock_user_struct(target_sd, target_addr, 0);
4457     return 0;
4458 }
4459 
4460 static inline abi_long host_to_target_shmid_ds(abi_ulong target_addr,
4461                                                struct shmid_ds *host_sd)
4462 {
4463     struct target_shmid_ds *target_sd;
4464 
4465     if (!lock_user_struct(VERIFY_WRITE, target_sd, target_addr, 0))
4466         return -TARGET_EFAULT;
4467     if (host_to_target_ipc_perm(target_addr, &(host_sd->shm_perm)))
4468         return -TARGET_EFAULT;
4469     __put_user(host_sd->shm_segsz, &target_sd->shm_segsz);
4470     __put_user(host_sd->shm_atime, &target_sd->shm_atime);
4471     __put_user(host_sd->shm_dtime, &target_sd->shm_dtime);
4472     __put_user(host_sd->shm_ctime, &target_sd->shm_ctime);
4473     __put_user(host_sd->shm_cpid, &target_sd->shm_cpid);
4474     __put_user(host_sd->shm_lpid, &target_sd->shm_lpid);
4475     __put_user(host_sd->shm_nattch, &target_sd->shm_nattch);
4476     unlock_user_struct(target_sd, target_addr, 1);
4477     return 0;
4478 }
4479 
4480 struct  target_shminfo {
4481     abi_ulong shmmax;
4482     abi_ulong shmmin;
4483     abi_ulong shmmni;
4484     abi_ulong shmseg;
4485     abi_ulong shmall;
4486 };
4487 
4488 static inline abi_long host_to_target_shminfo(abi_ulong target_addr,
4489                                               struct shminfo *host_shminfo)
4490 {
4491     struct target_shminfo *target_shminfo;
4492     if (!lock_user_struct(VERIFY_WRITE, target_shminfo, target_addr, 0))
4493         return -TARGET_EFAULT;
4494     __put_user(host_shminfo->shmmax, &target_shminfo->shmmax);
4495     __put_user(host_shminfo->shmmin, &target_shminfo->shmmin);
4496     __put_user(host_shminfo->shmmni, &target_shminfo->shmmni);
4497     __put_user(host_shminfo->shmseg, &target_shminfo->shmseg);
4498     __put_user(host_shminfo->shmall, &target_shminfo->shmall);
4499     unlock_user_struct(target_shminfo, target_addr, 1);
4500     return 0;
4501 }
4502 
4503 struct target_shm_info {
4504     int used_ids;
4505     abi_ulong shm_tot;
4506     abi_ulong shm_rss;
4507     abi_ulong shm_swp;
4508     abi_ulong swap_attempts;
4509     abi_ulong swap_successes;
4510 };
4511 
4512 static inline abi_long host_to_target_shm_info(abi_ulong target_addr,
4513                                                struct shm_info *host_shm_info)
4514 {
4515     struct target_shm_info *target_shm_info;
4516     if (!lock_user_struct(VERIFY_WRITE, target_shm_info, target_addr, 0))
4517         return -TARGET_EFAULT;
4518     __put_user(host_shm_info->used_ids, &target_shm_info->used_ids);
4519     __put_user(host_shm_info->shm_tot, &target_shm_info->shm_tot);
4520     __put_user(host_shm_info->shm_rss, &target_shm_info->shm_rss);
4521     __put_user(host_shm_info->shm_swp, &target_shm_info->shm_swp);
4522     __put_user(host_shm_info->swap_attempts, &target_shm_info->swap_attempts);
4523     __put_user(host_shm_info->swap_successes, &target_shm_info->swap_successes);
4524     unlock_user_struct(target_shm_info, target_addr, 1);
4525     return 0;
4526 }
4527 
4528 static inline abi_long do_shmctl(int shmid, int cmd, abi_long buf)
4529 {
4530     struct shmid_ds dsarg;
4531     struct shminfo shminfo;
4532     struct shm_info shm_info;
4533     abi_long ret = -TARGET_EINVAL;
4534 
4535     cmd &= 0xff;
4536 
4537     switch(cmd) {
4538     case IPC_STAT:
4539     case IPC_SET:
4540     case SHM_STAT:
4541         if (target_to_host_shmid_ds(&dsarg, buf))
4542             return -TARGET_EFAULT;
4543         ret = get_errno(shmctl(shmid, cmd, &dsarg));
4544         if (host_to_target_shmid_ds(buf, &dsarg))
4545             return -TARGET_EFAULT;
4546         break;
4547     case IPC_INFO:
4548         ret = get_errno(shmctl(shmid, cmd, (struct shmid_ds *)&shminfo));
4549         if (host_to_target_shminfo(buf, &shminfo))
4550             return -TARGET_EFAULT;
4551         break;
4552     case SHM_INFO:
4553         ret = get_errno(shmctl(shmid, cmd, (struct shmid_ds *)&shm_info));
4554         if (host_to_target_shm_info(buf, &shm_info))
4555             return -TARGET_EFAULT;
4556         break;
4557     case IPC_RMID:
4558     case SHM_LOCK:
4559     case SHM_UNLOCK:
4560         ret = get_errno(shmctl(shmid, cmd, NULL));
4561         break;
4562     }
4563 
4564     return ret;
4565 }
4566 
4567 #ifndef TARGET_FORCE_SHMLBA
4568 /* For most architectures, SHMLBA is the same as the page size;
4569  * some architectures have larger values, in which case they should
4570  * define TARGET_FORCE_SHMLBA and provide a target_shmlba() function.
4571  * This corresponds to the kernel arch code defining __ARCH_FORCE_SHMLBA
4572  * and defining its own value for SHMLBA.
4573  *
4574  * The kernel also permits SHMLBA to be set by the architecture to a
4575  * value larger than the page size without setting __ARCH_FORCE_SHMLBA;
4576  * this means that addresses are rounded to the large size if
4577  * SHM_RND is set but addresses not aligned to that size are not rejected
4578  * as long as they are at least page-aligned. Since the only architecture
4579  * which uses this is ia64 this code doesn't provide for that oddity.
4580  */
4581 static inline abi_ulong target_shmlba(CPUArchState *cpu_env)
4582 {
4583     return TARGET_PAGE_SIZE;
4584 }
4585 #endif
4586 
4587 static inline abi_ulong do_shmat(CPUArchState *cpu_env,
4588                                  int shmid, abi_ulong shmaddr, int shmflg)
4589 {
4590     abi_long raddr;
4591     void *host_raddr;
4592     struct shmid_ds shm_info;
4593     int i,ret;
4594     abi_ulong shmlba;
4595 
4596     /* find out the length of the shared memory segment */
4597     ret = get_errno(shmctl(shmid, IPC_STAT, &shm_info));
4598     if (is_error(ret)) {
4599         /* can't get length, bail out */
4600         return ret;
4601     }
4602 
4603     shmlba = target_shmlba(cpu_env);
4604 
4605     if (shmaddr & (shmlba - 1)) {
4606         if (shmflg & SHM_RND) {
4607             shmaddr &= ~(shmlba - 1);
4608         } else {
4609             return -TARGET_EINVAL;
4610         }
4611     }
4612     if (!guest_range_valid(shmaddr, shm_info.shm_segsz)) {
4613         return -TARGET_EINVAL;
4614     }
4615 
4616     mmap_lock();
4617 
4618     if (shmaddr)
4619         host_raddr = shmat(shmid, (void *)g2h(shmaddr), shmflg);
4620     else {
4621         abi_ulong mmap_start;
4622 
4623         /* In order to use the host shmat, we need to honor host SHMLBA.  */
4624         mmap_start = mmap_find_vma(0, shm_info.shm_segsz, MAX(SHMLBA, shmlba));
4625 
4626         if (mmap_start == -1) {
4627             errno = ENOMEM;
4628             host_raddr = (void *)-1;
4629         } else
4630             host_raddr = shmat(shmid, g2h(mmap_start), shmflg | SHM_REMAP);
4631     }
4632 
4633     if (host_raddr == (void *)-1) {
4634         mmap_unlock();
4635         return get_errno((long)host_raddr);
4636     }
4637     raddr=h2g((unsigned long)host_raddr);
4638 
4639     page_set_flags(raddr, raddr + shm_info.shm_segsz,
4640                    PAGE_VALID | PAGE_READ |
4641                    ((shmflg & SHM_RDONLY)? 0 : PAGE_WRITE));
4642 
4643     for (i = 0; i < N_SHM_REGIONS; i++) {
4644         if (!shm_regions[i].in_use) {
4645             shm_regions[i].in_use = true;
4646             shm_regions[i].start = raddr;
4647             shm_regions[i].size = shm_info.shm_segsz;
4648             break;
4649         }
4650     }
4651 
4652     mmap_unlock();
4653     return raddr;
4654 
4655 }
4656 
4657 static inline abi_long do_shmdt(abi_ulong shmaddr)
4658 {
4659     int i;
4660     abi_long rv;
4661 
4662     mmap_lock();
4663 
4664     for (i = 0; i < N_SHM_REGIONS; ++i) {
4665         if (shm_regions[i].in_use && shm_regions[i].start == shmaddr) {
4666             shm_regions[i].in_use = false;
4667             page_set_flags(shmaddr, shmaddr + shm_regions[i].size, 0);
4668             break;
4669         }
4670     }
4671     rv = get_errno(shmdt(g2h(shmaddr)));
4672 
4673     mmap_unlock();
4674 
4675     return rv;
4676 }
4677 
4678 #ifdef TARGET_NR_ipc
4679 /* ??? This only works with linear mappings.  */
4680 /* do_ipc() must return target values and target errnos. */
4681 static abi_long do_ipc(CPUArchState *cpu_env,
4682                        unsigned int call, abi_long first,
4683                        abi_long second, abi_long third,
4684                        abi_long ptr, abi_long fifth)
4685 {
4686     int version;
4687     abi_long ret = 0;
4688 
4689     version = call >> 16;
4690     call &= 0xffff;
4691 
4692     switch (call) {
4693     case IPCOP_semop:
4694         ret = do_semtimedop(first, ptr, second, 0, false);
4695         break;
4696     case IPCOP_semtimedop:
4697     /*
4698      * The s390 sys_ipc variant has only five parameters instead of six
4699      * (as for default variant) and the only difference is the handling of
4700      * SEMTIMEDOP where on s390 the third parameter is used as a pointer
4701      * to a struct timespec where the generic variant uses fifth parameter.
4702      */
4703 #if defined(TARGET_S390X)
4704         ret = do_semtimedop(first, ptr, second, third, TARGET_ABI_BITS == 64);
4705 #else
4706         ret = do_semtimedop(first, ptr, second, fifth, TARGET_ABI_BITS == 64);
4707 #endif
4708         break;
4709 
4710     case IPCOP_semget:
4711         ret = get_errno(semget(first, second, third));
4712         break;
4713 
4714     case IPCOP_semctl: {
4715         /* The semun argument to semctl is passed by value, so dereference the
4716          * ptr argument. */
4717         abi_ulong atptr;
4718         get_user_ual(atptr, ptr);
4719         ret = do_semctl(first, second, third, atptr);
4720         break;
4721     }
4722 
4723     case IPCOP_msgget:
4724         ret = get_errno(msgget(first, second));
4725         break;
4726 
4727     case IPCOP_msgsnd:
4728         ret = do_msgsnd(first, ptr, second, third);
4729         break;
4730 
4731     case IPCOP_msgctl:
4732         ret = do_msgctl(first, second, ptr);
4733         break;
4734 
4735     case IPCOP_msgrcv:
4736         switch (version) {
4737         case 0:
4738             {
4739                 struct target_ipc_kludge {
4740                     abi_long msgp;
4741                     abi_long msgtyp;
4742                 } *tmp;
4743 
4744                 if (!lock_user_struct(VERIFY_READ, tmp, ptr, 1)) {
4745                     ret = -TARGET_EFAULT;
4746                     break;
4747                 }
4748 
4749                 ret = do_msgrcv(first, tswapal(tmp->msgp), second, tswapal(tmp->msgtyp), third);
4750 
4751                 unlock_user_struct(tmp, ptr, 0);
4752                 break;
4753             }
4754         default:
4755             ret = do_msgrcv(first, ptr, second, fifth, third);
4756         }
4757         break;
4758 
4759     case IPCOP_shmat:
4760         switch (version) {
4761         default:
4762         {
4763             abi_ulong raddr;
4764             raddr = do_shmat(cpu_env, first, ptr, second);
4765             if (is_error(raddr))
4766                 return get_errno(raddr);
4767             if (put_user_ual(raddr, third))
4768                 return -TARGET_EFAULT;
4769             break;
4770         }
4771         case 1:
4772             ret = -TARGET_EINVAL;
4773             break;
4774         }
4775 	break;
4776     case IPCOP_shmdt:
4777         ret = do_shmdt(ptr);
4778 	break;
4779 
4780     case IPCOP_shmget:
4781 	/* IPC_* flag values are the same on all linux platforms */
4782 	ret = get_errno(shmget(first, second, third));
4783 	break;
4784 
4785 	/* IPC_* and SHM_* command values are the same on all linux platforms */
4786     case IPCOP_shmctl:
4787         ret = do_shmctl(first, second, ptr);
4788         break;
4789     default:
4790         qemu_log_mask(LOG_UNIMP, "Unsupported ipc call: %d (version %d)\n",
4791                       call, version);
4792 	ret = -TARGET_ENOSYS;
4793 	break;
4794     }
4795     return ret;
4796 }
4797 #endif
4798 
4799 /* kernel structure types definitions */
4800 
4801 #define STRUCT(name, ...) STRUCT_ ## name,
4802 #define STRUCT_SPECIAL(name) STRUCT_ ## name,
4803 enum {
4804 #include "syscall_types.h"
4805 STRUCT_MAX
4806 };
4807 #undef STRUCT
4808 #undef STRUCT_SPECIAL
4809 
4810 #define STRUCT(name, ...) static const argtype struct_ ## name ## _def[] = {  __VA_ARGS__, TYPE_NULL };
4811 #define STRUCT_SPECIAL(name)
4812 #include "syscall_types.h"
4813 #undef STRUCT
4814 #undef STRUCT_SPECIAL
4815 
4816 #define MAX_STRUCT_SIZE 4096
4817 
4818 #ifdef CONFIG_FIEMAP
4819 /* So fiemap access checks don't overflow on 32 bit systems.
4820  * This is very slightly smaller than the limit imposed by
4821  * the underlying kernel.
4822  */
4823 #define FIEMAP_MAX_EXTENTS ((UINT_MAX - sizeof(struct fiemap))  \
4824                             / sizeof(struct fiemap_extent))
4825 
4826 static abi_long do_ioctl_fs_ioc_fiemap(const IOCTLEntry *ie, uint8_t *buf_temp,
4827                                        int fd, int cmd, abi_long arg)
4828 {
4829     /* The parameter for this ioctl is a struct fiemap followed
4830      * by an array of struct fiemap_extent whose size is set
4831      * in fiemap->fm_extent_count. The array is filled in by the
4832      * ioctl.
4833      */
4834     int target_size_in, target_size_out;
4835     struct fiemap *fm;
4836     const argtype *arg_type = ie->arg_type;
4837     const argtype extent_arg_type[] = { MK_STRUCT(STRUCT_fiemap_extent) };
4838     void *argptr, *p;
4839     abi_long ret;
4840     int i, extent_size = thunk_type_size(extent_arg_type, 0);
4841     uint32_t outbufsz;
4842     int free_fm = 0;
4843 
4844     assert(arg_type[0] == TYPE_PTR);
4845     assert(ie->access == IOC_RW);
4846     arg_type++;
4847     target_size_in = thunk_type_size(arg_type, 0);
4848     argptr = lock_user(VERIFY_READ, arg, target_size_in, 1);
4849     if (!argptr) {
4850         return -TARGET_EFAULT;
4851     }
4852     thunk_convert(buf_temp, argptr, arg_type, THUNK_HOST);
4853     unlock_user(argptr, arg, 0);
4854     fm = (struct fiemap *)buf_temp;
4855     if (fm->fm_extent_count > FIEMAP_MAX_EXTENTS) {
4856         return -TARGET_EINVAL;
4857     }
4858 
4859     outbufsz = sizeof (*fm) +
4860         (sizeof(struct fiemap_extent) * fm->fm_extent_count);
4861 
4862     if (outbufsz > MAX_STRUCT_SIZE) {
4863         /* We can't fit all the extents into the fixed size buffer.
4864          * Allocate one that is large enough and use it instead.
4865          */
4866         fm = g_try_malloc(outbufsz);
4867         if (!fm) {
4868             return -TARGET_ENOMEM;
4869         }
4870         memcpy(fm, buf_temp, sizeof(struct fiemap));
4871         free_fm = 1;
4872     }
4873     ret = get_errno(safe_ioctl(fd, ie->host_cmd, fm));
4874     if (!is_error(ret)) {
4875         target_size_out = target_size_in;
4876         /* An extent_count of 0 means we were only counting the extents
4877          * so there are no structs to copy
4878          */
4879         if (fm->fm_extent_count != 0) {
4880             target_size_out += fm->fm_mapped_extents * extent_size;
4881         }
4882         argptr = lock_user(VERIFY_WRITE, arg, target_size_out, 0);
4883         if (!argptr) {
4884             ret = -TARGET_EFAULT;
4885         } else {
4886             /* Convert the struct fiemap */
4887             thunk_convert(argptr, fm, arg_type, THUNK_TARGET);
4888             if (fm->fm_extent_count != 0) {
4889                 p = argptr + target_size_in;
4890                 /* ...and then all the struct fiemap_extents */
4891                 for (i = 0; i < fm->fm_mapped_extents; i++) {
4892                     thunk_convert(p, &fm->fm_extents[i], extent_arg_type,
4893                                   THUNK_TARGET);
4894                     p += extent_size;
4895                 }
4896             }
4897             unlock_user(argptr, arg, target_size_out);
4898         }
4899     }
4900     if (free_fm) {
4901         g_free(fm);
4902     }
4903     return ret;
4904 }
4905 #endif
4906 
4907 static abi_long do_ioctl_ifconf(const IOCTLEntry *ie, uint8_t *buf_temp,
4908                                 int fd, int cmd, abi_long arg)
4909 {
4910     const argtype *arg_type = ie->arg_type;
4911     int target_size;
4912     void *argptr;
4913     int ret;
4914     struct ifconf *host_ifconf;
4915     uint32_t outbufsz;
4916     const argtype ifreq_arg_type[] = { MK_STRUCT(STRUCT_sockaddr_ifreq) };
4917     const argtype ifreq_max_type[] = { MK_STRUCT(STRUCT_ifmap_ifreq) };
4918     int target_ifreq_size;
4919     int nb_ifreq;
4920     int free_buf = 0;
4921     int i;
4922     int target_ifc_len;
4923     abi_long target_ifc_buf;
4924     int host_ifc_len;
4925     char *host_ifc_buf;
4926 
4927     assert(arg_type[0] == TYPE_PTR);
4928     assert(ie->access == IOC_RW);
4929 
4930     arg_type++;
4931     target_size = thunk_type_size(arg_type, 0);
4932 
4933     argptr = lock_user(VERIFY_READ, arg, target_size, 1);
4934     if (!argptr)
4935         return -TARGET_EFAULT;
4936     thunk_convert(buf_temp, argptr, arg_type, THUNK_HOST);
4937     unlock_user(argptr, arg, 0);
4938 
4939     host_ifconf = (struct ifconf *)(unsigned long)buf_temp;
4940     target_ifc_buf = (abi_long)(unsigned long)host_ifconf->ifc_buf;
4941     target_ifreq_size = thunk_type_size(ifreq_max_type, 0);
4942 
4943     if (target_ifc_buf != 0) {
4944         target_ifc_len = host_ifconf->ifc_len;
4945         nb_ifreq = target_ifc_len / target_ifreq_size;
4946         host_ifc_len = nb_ifreq * sizeof(struct ifreq);
4947 
4948         outbufsz = sizeof(*host_ifconf) + host_ifc_len;
4949         if (outbufsz > MAX_STRUCT_SIZE) {
4950             /*
4951              * We can't fit all the extents into the fixed size buffer.
4952              * Allocate one that is large enough and use it instead.
4953              */
4954             host_ifconf = malloc(outbufsz);
4955             if (!host_ifconf) {
4956                 return -TARGET_ENOMEM;
4957             }
4958             memcpy(host_ifconf, buf_temp, sizeof(*host_ifconf));
4959             free_buf = 1;
4960         }
4961         host_ifc_buf = (char *)host_ifconf + sizeof(*host_ifconf);
4962 
4963         host_ifconf->ifc_len = host_ifc_len;
4964     } else {
4965       host_ifc_buf = NULL;
4966     }
4967     host_ifconf->ifc_buf = host_ifc_buf;
4968 
4969     ret = get_errno(safe_ioctl(fd, ie->host_cmd, host_ifconf));
4970     if (!is_error(ret)) {
4971 	/* convert host ifc_len to target ifc_len */
4972 
4973         nb_ifreq = host_ifconf->ifc_len / sizeof(struct ifreq);
4974         target_ifc_len = nb_ifreq * target_ifreq_size;
4975         host_ifconf->ifc_len = target_ifc_len;
4976 
4977 	/* restore target ifc_buf */
4978 
4979         host_ifconf->ifc_buf = (char *)(unsigned long)target_ifc_buf;
4980 
4981 	/* copy struct ifconf to target user */
4982 
4983         argptr = lock_user(VERIFY_WRITE, arg, target_size, 0);
4984         if (!argptr)
4985             return -TARGET_EFAULT;
4986         thunk_convert(argptr, host_ifconf, arg_type, THUNK_TARGET);
4987         unlock_user(argptr, arg, target_size);
4988 
4989         if (target_ifc_buf != 0) {
4990             /* copy ifreq[] to target user */
4991             argptr = lock_user(VERIFY_WRITE, target_ifc_buf, target_ifc_len, 0);
4992             for (i = 0; i < nb_ifreq ; i++) {
4993                 thunk_convert(argptr + i * target_ifreq_size,
4994                               host_ifc_buf + i * sizeof(struct ifreq),
4995                               ifreq_arg_type, THUNK_TARGET);
4996             }
4997             unlock_user(argptr, target_ifc_buf, target_ifc_len);
4998         }
4999     }
5000 
5001     if (free_buf) {
5002         free(host_ifconf);
5003     }
5004 
5005     return ret;
5006 }
5007 
5008 #if defined(CONFIG_USBFS)
5009 #if HOST_LONG_BITS > 64
5010 #error USBDEVFS thunks do not support >64 bit hosts yet.
5011 #endif
5012 struct live_urb {
5013     uint64_t target_urb_adr;
5014     uint64_t target_buf_adr;
5015     char *target_buf_ptr;
5016     struct usbdevfs_urb host_urb;
5017 };
5018 
5019 static GHashTable *usbdevfs_urb_hashtable(void)
5020 {
5021     static GHashTable *urb_hashtable;
5022 
5023     if (!urb_hashtable) {
5024         urb_hashtable = g_hash_table_new(g_int64_hash, g_int64_equal);
5025     }
5026     return urb_hashtable;
5027 }
5028 
5029 static void urb_hashtable_insert(struct live_urb *urb)
5030 {
5031     GHashTable *urb_hashtable = usbdevfs_urb_hashtable();
5032     g_hash_table_insert(urb_hashtable, urb, urb);
5033 }
5034 
5035 static struct live_urb *urb_hashtable_lookup(uint64_t target_urb_adr)
5036 {
5037     GHashTable *urb_hashtable = usbdevfs_urb_hashtable();
5038     return g_hash_table_lookup(urb_hashtable, &target_urb_adr);
5039 }
5040 
5041 static void urb_hashtable_remove(struct live_urb *urb)
5042 {
5043     GHashTable *urb_hashtable = usbdevfs_urb_hashtable();
5044     g_hash_table_remove(urb_hashtable, urb);
5045 }
5046 
5047 static abi_long
5048 do_ioctl_usbdevfs_reapurb(const IOCTLEntry *ie, uint8_t *buf_temp,
5049                           int fd, int cmd, abi_long arg)
5050 {
5051     const argtype usbfsurb_arg_type[] = { MK_STRUCT(STRUCT_usbdevfs_urb) };
5052     const argtype ptrvoid_arg_type[] = { TYPE_PTRVOID, 0, 0 };
5053     struct live_urb *lurb;
5054     void *argptr;
5055     uint64_t hurb;
5056     int target_size;
5057     uintptr_t target_urb_adr;
5058     abi_long ret;
5059 
5060     target_size = thunk_type_size(usbfsurb_arg_type, THUNK_TARGET);
5061 
5062     memset(buf_temp, 0, sizeof(uint64_t));
5063     ret = get_errno(safe_ioctl(fd, ie->host_cmd, buf_temp));
5064     if (is_error(ret)) {
5065         return ret;
5066     }
5067 
5068     memcpy(&hurb, buf_temp, sizeof(uint64_t));
5069     lurb = (void *)((uintptr_t)hurb - offsetof(struct live_urb, host_urb));
5070     if (!lurb->target_urb_adr) {
5071         return -TARGET_EFAULT;
5072     }
5073     urb_hashtable_remove(lurb);
5074     unlock_user(lurb->target_buf_ptr, lurb->target_buf_adr,
5075         lurb->host_urb.buffer_length);
5076     lurb->target_buf_ptr = NULL;
5077 
5078     /* restore the guest buffer pointer */
5079     lurb->host_urb.buffer = (void *)(uintptr_t)lurb->target_buf_adr;
5080 
5081     /* update the guest urb struct */
5082     argptr = lock_user(VERIFY_WRITE, lurb->target_urb_adr, target_size, 0);
5083     if (!argptr) {
5084         g_free(lurb);
5085         return -TARGET_EFAULT;
5086     }
5087     thunk_convert(argptr, &lurb->host_urb, usbfsurb_arg_type, THUNK_TARGET);
5088     unlock_user(argptr, lurb->target_urb_adr, target_size);
5089 
5090     target_size = thunk_type_size(ptrvoid_arg_type, THUNK_TARGET);
5091     /* write back the urb handle */
5092     argptr = lock_user(VERIFY_WRITE, arg, target_size, 0);
5093     if (!argptr) {
5094         g_free(lurb);
5095         return -TARGET_EFAULT;
5096     }
5097 
5098     /* GHashTable uses 64-bit keys but thunk_convert expects uintptr_t */
5099     target_urb_adr = lurb->target_urb_adr;
5100     thunk_convert(argptr, &target_urb_adr, ptrvoid_arg_type, THUNK_TARGET);
5101     unlock_user(argptr, arg, target_size);
5102 
5103     g_free(lurb);
5104     return ret;
5105 }
5106 
5107 static abi_long
5108 do_ioctl_usbdevfs_discardurb(const IOCTLEntry *ie,
5109                              uint8_t *buf_temp __attribute__((unused)),
5110                              int fd, int cmd, abi_long arg)
5111 {
5112     struct live_urb *lurb;
5113 
5114     /* map target address back to host URB with metadata. */
5115     lurb = urb_hashtable_lookup(arg);
5116     if (!lurb) {
5117         return -TARGET_EFAULT;
5118     }
5119     return get_errno(safe_ioctl(fd, ie->host_cmd, &lurb->host_urb));
5120 }
5121 
5122 static abi_long
5123 do_ioctl_usbdevfs_submiturb(const IOCTLEntry *ie, uint8_t *buf_temp,
5124                             int fd, int cmd, abi_long arg)
5125 {
5126     const argtype *arg_type = ie->arg_type;
5127     int target_size;
5128     abi_long ret;
5129     void *argptr;
5130     int rw_dir;
5131     struct live_urb *lurb;
5132 
5133     /*
5134      * each submitted URB needs to map to a unique ID for the
5135      * kernel, and that unique ID needs to be a pointer to
5136      * host memory.  hence, we need to malloc for each URB.
5137      * isochronous transfers have a variable length struct.
5138      */
5139     arg_type++;
5140     target_size = thunk_type_size(arg_type, THUNK_TARGET);
5141 
5142     /* construct host copy of urb and metadata */
5143     lurb = g_try_malloc0(sizeof(struct live_urb));
5144     if (!lurb) {
5145         return -TARGET_ENOMEM;
5146     }
5147 
5148     argptr = lock_user(VERIFY_READ, arg, target_size, 1);
5149     if (!argptr) {
5150         g_free(lurb);
5151         return -TARGET_EFAULT;
5152     }
5153     thunk_convert(&lurb->host_urb, argptr, arg_type, THUNK_HOST);
5154     unlock_user(argptr, arg, 0);
5155 
5156     lurb->target_urb_adr = arg;
5157     lurb->target_buf_adr = (uintptr_t)lurb->host_urb.buffer;
5158 
5159     /* buffer space used depends on endpoint type so lock the entire buffer */
5160     /* control type urbs should check the buffer contents for true direction */
5161     rw_dir = lurb->host_urb.endpoint & USB_DIR_IN ? VERIFY_WRITE : VERIFY_READ;
5162     lurb->target_buf_ptr = lock_user(rw_dir, lurb->target_buf_adr,
5163         lurb->host_urb.buffer_length, 1);
5164     if (lurb->target_buf_ptr == NULL) {
5165         g_free(lurb);
5166         return -TARGET_EFAULT;
5167     }
5168 
5169     /* update buffer pointer in host copy */
5170     lurb->host_urb.buffer = lurb->target_buf_ptr;
5171 
5172     ret = get_errno(safe_ioctl(fd, ie->host_cmd, &lurb->host_urb));
5173     if (is_error(ret)) {
5174         unlock_user(lurb->target_buf_ptr, lurb->target_buf_adr, 0);
5175         g_free(lurb);
5176     } else {
5177         urb_hashtable_insert(lurb);
5178     }
5179 
5180     return ret;
5181 }
5182 #endif /* CONFIG_USBFS */
5183 
5184 static abi_long do_ioctl_dm(const IOCTLEntry *ie, uint8_t *buf_temp, int fd,
5185                             int cmd, abi_long arg)
5186 {
5187     void *argptr;
5188     struct dm_ioctl *host_dm;
5189     abi_long guest_data;
5190     uint32_t guest_data_size;
5191     int target_size;
5192     const argtype *arg_type = ie->arg_type;
5193     abi_long ret;
5194     void *big_buf = NULL;
5195     char *host_data;
5196 
5197     arg_type++;
5198     target_size = thunk_type_size(arg_type, 0);
5199     argptr = lock_user(VERIFY_READ, arg, target_size, 1);
5200     if (!argptr) {
5201         ret = -TARGET_EFAULT;
5202         goto out;
5203     }
5204     thunk_convert(buf_temp, argptr, arg_type, THUNK_HOST);
5205     unlock_user(argptr, arg, 0);
5206 
5207     /* buf_temp is too small, so fetch things into a bigger buffer */
5208     big_buf = g_malloc0(((struct dm_ioctl*)buf_temp)->data_size * 2);
5209     memcpy(big_buf, buf_temp, target_size);
5210     buf_temp = big_buf;
5211     host_dm = big_buf;
5212 
5213     guest_data = arg + host_dm->data_start;
5214     if ((guest_data - arg) < 0) {
5215         ret = -TARGET_EINVAL;
5216         goto out;
5217     }
5218     guest_data_size = host_dm->data_size - host_dm->data_start;
5219     host_data = (char*)host_dm + host_dm->data_start;
5220 
5221     argptr = lock_user(VERIFY_READ, guest_data, guest_data_size, 1);
5222     if (!argptr) {
5223         ret = -TARGET_EFAULT;
5224         goto out;
5225     }
5226 
5227     switch (ie->host_cmd) {
5228     case DM_REMOVE_ALL:
5229     case DM_LIST_DEVICES:
5230     case DM_DEV_CREATE:
5231     case DM_DEV_REMOVE:
5232     case DM_DEV_SUSPEND:
5233     case DM_DEV_STATUS:
5234     case DM_DEV_WAIT:
5235     case DM_TABLE_STATUS:
5236     case DM_TABLE_CLEAR:
5237     case DM_TABLE_DEPS:
5238     case DM_LIST_VERSIONS:
5239         /* no input data */
5240         break;
5241     case DM_DEV_RENAME:
5242     case DM_DEV_SET_GEOMETRY:
5243         /* data contains only strings */
5244         memcpy(host_data, argptr, guest_data_size);
5245         break;
5246     case DM_TARGET_MSG:
5247         memcpy(host_data, argptr, guest_data_size);
5248         *(uint64_t*)host_data = tswap64(*(uint64_t*)argptr);
5249         break;
5250     case DM_TABLE_LOAD:
5251     {
5252         void *gspec = argptr;
5253         void *cur_data = host_data;
5254         const argtype arg_type[] = { MK_STRUCT(STRUCT_dm_target_spec) };
5255         int spec_size = thunk_type_size(arg_type, 0);
5256         int i;
5257 
5258         for (i = 0; i < host_dm->target_count; i++) {
5259             struct dm_target_spec *spec = cur_data;
5260             uint32_t next;
5261             int slen;
5262 
5263             thunk_convert(spec, gspec, arg_type, THUNK_HOST);
5264             slen = strlen((char*)gspec + spec_size) + 1;
5265             next = spec->next;
5266             spec->next = sizeof(*spec) + slen;
5267             strcpy((char*)&spec[1], gspec + spec_size);
5268             gspec += next;
5269             cur_data += spec->next;
5270         }
5271         break;
5272     }
5273     default:
5274         ret = -TARGET_EINVAL;
5275         unlock_user(argptr, guest_data, 0);
5276         goto out;
5277     }
5278     unlock_user(argptr, guest_data, 0);
5279 
5280     ret = get_errno(safe_ioctl(fd, ie->host_cmd, buf_temp));
5281     if (!is_error(ret)) {
5282         guest_data = arg + host_dm->data_start;
5283         guest_data_size = host_dm->data_size - host_dm->data_start;
5284         argptr = lock_user(VERIFY_WRITE, guest_data, guest_data_size, 0);
5285         switch (ie->host_cmd) {
5286         case DM_REMOVE_ALL:
5287         case DM_DEV_CREATE:
5288         case DM_DEV_REMOVE:
5289         case DM_DEV_RENAME:
5290         case DM_DEV_SUSPEND:
5291         case DM_DEV_STATUS:
5292         case DM_TABLE_LOAD:
5293         case DM_TABLE_CLEAR:
5294         case DM_TARGET_MSG:
5295         case DM_DEV_SET_GEOMETRY:
5296             /* no return data */
5297             break;
5298         case DM_LIST_DEVICES:
5299         {
5300             struct dm_name_list *nl = (void*)host_dm + host_dm->data_start;
5301             uint32_t remaining_data = guest_data_size;
5302             void *cur_data = argptr;
5303             const argtype arg_type[] = { MK_STRUCT(STRUCT_dm_name_list) };
5304             int nl_size = 12; /* can't use thunk_size due to alignment */
5305 
5306             while (1) {
5307                 uint32_t next = nl->next;
5308                 if (next) {
5309                     nl->next = nl_size + (strlen(nl->name) + 1);
5310                 }
5311                 if (remaining_data < nl->next) {
5312                     host_dm->flags |= DM_BUFFER_FULL_FLAG;
5313                     break;
5314                 }
5315                 thunk_convert(cur_data, nl, arg_type, THUNK_TARGET);
5316                 strcpy(cur_data + nl_size, nl->name);
5317                 cur_data += nl->next;
5318                 remaining_data -= nl->next;
5319                 if (!next) {
5320                     break;
5321                 }
5322                 nl = (void*)nl + next;
5323             }
5324             break;
5325         }
5326         case DM_DEV_WAIT:
5327         case DM_TABLE_STATUS:
5328         {
5329             struct dm_target_spec *spec = (void*)host_dm + host_dm->data_start;
5330             void *cur_data = argptr;
5331             const argtype arg_type[] = { MK_STRUCT(STRUCT_dm_target_spec) };
5332             int spec_size = thunk_type_size(arg_type, 0);
5333             int i;
5334 
5335             for (i = 0; i < host_dm->target_count; i++) {
5336                 uint32_t next = spec->next;
5337                 int slen = strlen((char*)&spec[1]) + 1;
5338                 spec->next = (cur_data - argptr) + spec_size + slen;
5339                 if (guest_data_size < spec->next) {
5340                     host_dm->flags |= DM_BUFFER_FULL_FLAG;
5341                     break;
5342                 }
5343                 thunk_convert(cur_data, spec, arg_type, THUNK_TARGET);
5344                 strcpy(cur_data + spec_size, (char*)&spec[1]);
5345                 cur_data = argptr + spec->next;
5346                 spec = (void*)host_dm + host_dm->data_start + next;
5347             }
5348             break;
5349         }
5350         case DM_TABLE_DEPS:
5351         {
5352             void *hdata = (void*)host_dm + host_dm->data_start;
5353             int count = *(uint32_t*)hdata;
5354             uint64_t *hdev = hdata + 8;
5355             uint64_t *gdev = argptr + 8;
5356             int i;
5357 
5358             *(uint32_t*)argptr = tswap32(count);
5359             for (i = 0; i < count; i++) {
5360                 *gdev = tswap64(*hdev);
5361                 gdev++;
5362                 hdev++;
5363             }
5364             break;
5365         }
5366         case DM_LIST_VERSIONS:
5367         {
5368             struct dm_target_versions *vers = (void*)host_dm + host_dm->data_start;
5369             uint32_t remaining_data = guest_data_size;
5370             void *cur_data = argptr;
5371             const argtype arg_type[] = { MK_STRUCT(STRUCT_dm_target_versions) };
5372             int vers_size = thunk_type_size(arg_type, 0);
5373 
5374             while (1) {
5375                 uint32_t next = vers->next;
5376                 if (next) {
5377                     vers->next = vers_size + (strlen(vers->name) + 1);
5378                 }
5379                 if (remaining_data < vers->next) {
5380                     host_dm->flags |= DM_BUFFER_FULL_FLAG;
5381                     break;
5382                 }
5383                 thunk_convert(cur_data, vers, arg_type, THUNK_TARGET);
5384                 strcpy(cur_data + vers_size, vers->name);
5385                 cur_data += vers->next;
5386                 remaining_data -= vers->next;
5387                 if (!next) {
5388                     break;
5389                 }
5390                 vers = (void*)vers + next;
5391             }
5392             break;
5393         }
5394         default:
5395             unlock_user(argptr, guest_data, 0);
5396             ret = -TARGET_EINVAL;
5397             goto out;
5398         }
5399         unlock_user(argptr, guest_data, guest_data_size);
5400 
5401         argptr = lock_user(VERIFY_WRITE, arg, target_size, 0);
5402         if (!argptr) {
5403             ret = -TARGET_EFAULT;
5404             goto out;
5405         }
5406         thunk_convert(argptr, buf_temp, arg_type, THUNK_TARGET);
5407         unlock_user(argptr, arg, target_size);
5408     }
5409 out:
5410     g_free(big_buf);
5411     return ret;
5412 }
5413 
5414 static abi_long do_ioctl_blkpg(const IOCTLEntry *ie, uint8_t *buf_temp, int fd,
5415                                int cmd, abi_long arg)
5416 {
5417     void *argptr;
5418     int target_size;
5419     const argtype *arg_type = ie->arg_type;
5420     const argtype part_arg_type[] = { MK_STRUCT(STRUCT_blkpg_partition) };
5421     abi_long ret;
5422 
5423     struct blkpg_ioctl_arg *host_blkpg = (void*)buf_temp;
5424     struct blkpg_partition host_part;
5425 
5426     /* Read and convert blkpg */
5427     arg_type++;
5428     target_size = thunk_type_size(arg_type, 0);
5429     argptr = lock_user(VERIFY_READ, arg, target_size, 1);
5430     if (!argptr) {
5431         ret = -TARGET_EFAULT;
5432         goto out;
5433     }
5434     thunk_convert(buf_temp, argptr, arg_type, THUNK_HOST);
5435     unlock_user(argptr, arg, 0);
5436 
5437     switch (host_blkpg->op) {
5438     case BLKPG_ADD_PARTITION:
5439     case BLKPG_DEL_PARTITION:
5440         /* payload is struct blkpg_partition */
5441         break;
5442     default:
5443         /* Unknown opcode */
5444         ret = -TARGET_EINVAL;
5445         goto out;
5446     }
5447 
5448     /* Read and convert blkpg->data */
5449     arg = (abi_long)(uintptr_t)host_blkpg->data;
5450     target_size = thunk_type_size(part_arg_type, 0);
5451     argptr = lock_user(VERIFY_READ, arg, target_size, 1);
5452     if (!argptr) {
5453         ret = -TARGET_EFAULT;
5454         goto out;
5455     }
5456     thunk_convert(&host_part, argptr, part_arg_type, THUNK_HOST);
5457     unlock_user(argptr, arg, 0);
5458 
5459     /* Swizzle the data pointer to our local copy and call! */
5460     host_blkpg->data = &host_part;
5461     ret = get_errno(safe_ioctl(fd, ie->host_cmd, host_blkpg));
5462 
5463 out:
5464     return ret;
5465 }
5466 
5467 static abi_long do_ioctl_rt(const IOCTLEntry *ie, uint8_t *buf_temp,
5468                                 int fd, int cmd, abi_long arg)
5469 {
5470     const argtype *arg_type = ie->arg_type;
5471     const StructEntry *se;
5472     const argtype *field_types;
5473     const int *dst_offsets, *src_offsets;
5474     int target_size;
5475     void *argptr;
5476     abi_ulong *target_rt_dev_ptr = NULL;
5477     unsigned long *host_rt_dev_ptr = NULL;
5478     abi_long ret;
5479     int i;
5480 
5481     assert(ie->access == IOC_W);
5482     assert(*arg_type == TYPE_PTR);
5483     arg_type++;
5484     assert(*arg_type == TYPE_STRUCT);
5485     target_size = thunk_type_size(arg_type, 0);
5486     argptr = lock_user(VERIFY_READ, arg, target_size, 1);
5487     if (!argptr) {
5488         return -TARGET_EFAULT;
5489     }
5490     arg_type++;
5491     assert(*arg_type == (int)STRUCT_rtentry);
5492     se = struct_entries + *arg_type++;
5493     assert(se->convert[0] == NULL);
5494     /* convert struct here to be able to catch rt_dev string */
5495     field_types = se->field_types;
5496     dst_offsets = se->field_offsets[THUNK_HOST];
5497     src_offsets = se->field_offsets[THUNK_TARGET];
5498     for (i = 0; i < se->nb_fields; i++) {
5499         if (dst_offsets[i] == offsetof(struct rtentry, rt_dev)) {
5500             assert(*field_types == TYPE_PTRVOID);
5501             target_rt_dev_ptr = (abi_ulong *)(argptr + src_offsets[i]);
5502             host_rt_dev_ptr = (unsigned long *)(buf_temp + dst_offsets[i]);
5503             if (*target_rt_dev_ptr != 0) {
5504                 *host_rt_dev_ptr = (unsigned long)lock_user_string(
5505                                                   tswapal(*target_rt_dev_ptr));
5506                 if (!*host_rt_dev_ptr) {
5507                     unlock_user(argptr, arg, 0);
5508                     return -TARGET_EFAULT;
5509                 }
5510             } else {
5511                 *host_rt_dev_ptr = 0;
5512             }
5513             field_types++;
5514             continue;
5515         }
5516         field_types = thunk_convert(buf_temp + dst_offsets[i],
5517                                     argptr + src_offsets[i],
5518                                     field_types, THUNK_HOST);
5519     }
5520     unlock_user(argptr, arg, 0);
5521 
5522     ret = get_errno(safe_ioctl(fd, ie->host_cmd, buf_temp));
5523 
5524     assert(host_rt_dev_ptr != NULL);
5525     assert(target_rt_dev_ptr != NULL);
5526     if (*host_rt_dev_ptr != 0) {
5527         unlock_user((void *)*host_rt_dev_ptr,
5528                     *target_rt_dev_ptr, 0);
5529     }
5530     return ret;
5531 }
5532 
5533 static abi_long do_ioctl_kdsigaccept(const IOCTLEntry *ie, uint8_t *buf_temp,
5534                                      int fd, int cmd, abi_long arg)
5535 {
5536     int sig = target_to_host_signal(arg);
5537     return get_errno(safe_ioctl(fd, ie->host_cmd, sig));
5538 }
5539 
5540 static abi_long do_ioctl_SIOCGSTAMP(const IOCTLEntry *ie, uint8_t *buf_temp,
5541                                     int fd, int cmd, abi_long arg)
5542 {
5543     struct timeval tv;
5544     abi_long ret;
5545 
5546     ret = get_errno(safe_ioctl(fd, SIOCGSTAMP, &tv));
5547     if (is_error(ret)) {
5548         return ret;
5549     }
5550 
5551     if (cmd == (int)TARGET_SIOCGSTAMP_OLD) {
5552         if (copy_to_user_timeval(arg, &tv)) {
5553             return -TARGET_EFAULT;
5554         }
5555     } else {
5556         if (copy_to_user_timeval64(arg, &tv)) {
5557             return -TARGET_EFAULT;
5558         }
5559     }
5560 
5561     return ret;
5562 }
5563 
5564 static abi_long do_ioctl_SIOCGSTAMPNS(const IOCTLEntry *ie, uint8_t *buf_temp,
5565                                       int fd, int cmd, abi_long arg)
5566 {
5567     struct timespec ts;
5568     abi_long ret;
5569 
5570     ret = get_errno(safe_ioctl(fd, SIOCGSTAMPNS, &ts));
5571     if (is_error(ret)) {
5572         return ret;
5573     }
5574 
5575     if (cmd == (int)TARGET_SIOCGSTAMPNS_OLD) {
5576         if (host_to_target_timespec(arg, &ts)) {
5577             return -TARGET_EFAULT;
5578         }
5579     } else{
5580         if (host_to_target_timespec64(arg, &ts)) {
5581             return -TARGET_EFAULT;
5582         }
5583     }
5584 
5585     return ret;
5586 }
5587 
5588 #ifdef TIOCGPTPEER
5589 static abi_long do_ioctl_tiocgptpeer(const IOCTLEntry *ie, uint8_t *buf_temp,
5590                                      int fd, int cmd, abi_long arg)
5591 {
5592     int flags = target_to_host_bitmask(arg, fcntl_flags_tbl);
5593     return get_errno(safe_ioctl(fd, ie->host_cmd, flags));
5594 }
5595 #endif
5596 
5597 #ifdef HAVE_DRM_H
5598 
5599 static void unlock_drm_version(struct drm_version *host_ver,
5600                                struct target_drm_version *target_ver,
5601                                bool copy)
5602 {
5603     unlock_user(host_ver->name, target_ver->name,
5604                                 copy ? host_ver->name_len : 0);
5605     unlock_user(host_ver->date, target_ver->date,
5606                                 copy ? host_ver->date_len : 0);
5607     unlock_user(host_ver->desc, target_ver->desc,
5608                                 copy ? host_ver->desc_len : 0);
5609 }
5610 
5611 static inline abi_long target_to_host_drmversion(struct drm_version *host_ver,
5612                                           struct target_drm_version *target_ver)
5613 {
5614     memset(host_ver, 0, sizeof(*host_ver));
5615 
5616     __get_user(host_ver->name_len, &target_ver->name_len);
5617     if (host_ver->name_len) {
5618         host_ver->name = lock_user(VERIFY_WRITE, target_ver->name,
5619                                    target_ver->name_len, 0);
5620         if (!host_ver->name) {
5621             return -EFAULT;
5622         }
5623     }
5624 
5625     __get_user(host_ver->date_len, &target_ver->date_len);
5626     if (host_ver->date_len) {
5627         host_ver->date = lock_user(VERIFY_WRITE, target_ver->date,
5628                                    target_ver->date_len, 0);
5629         if (!host_ver->date) {
5630             goto err;
5631         }
5632     }
5633 
5634     __get_user(host_ver->desc_len, &target_ver->desc_len);
5635     if (host_ver->desc_len) {
5636         host_ver->desc = lock_user(VERIFY_WRITE, target_ver->desc,
5637                                    target_ver->desc_len, 0);
5638         if (!host_ver->desc) {
5639             goto err;
5640         }
5641     }
5642 
5643     return 0;
5644 err:
5645     unlock_drm_version(host_ver, target_ver, false);
5646     return -EFAULT;
5647 }
5648 
5649 static inline void host_to_target_drmversion(
5650                                           struct target_drm_version *target_ver,
5651                                           struct drm_version *host_ver)
5652 {
5653     __put_user(host_ver->version_major, &target_ver->version_major);
5654     __put_user(host_ver->version_minor, &target_ver->version_minor);
5655     __put_user(host_ver->version_patchlevel, &target_ver->version_patchlevel);
5656     __put_user(host_ver->name_len, &target_ver->name_len);
5657     __put_user(host_ver->date_len, &target_ver->date_len);
5658     __put_user(host_ver->desc_len, &target_ver->desc_len);
5659     unlock_drm_version(host_ver, target_ver, true);
5660 }
5661 
5662 static abi_long do_ioctl_drm(const IOCTLEntry *ie, uint8_t *buf_temp,
5663                              int fd, int cmd, abi_long arg)
5664 {
5665     struct drm_version *ver;
5666     struct target_drm_version *target_ver;
5667     abi_long ret;
5668 
5669     switch (ie->host_cmd) {
5670     case DRM_IOCTL_VERSION:
5671         if (!lock_user_struct(VERIFY_WRITE, target_ver, arg, 0)) {
5672             return -TARGET_EFAULT;
5673         }
5674         ver = (struct drm_version *)buf_temp;
5675         ret = target_to_host_drmversion(ver, target_ver);
5676         if (!is_error(ret)) {
5677             ret = get_errno(safe_ioctl(fd, ie->host_cmd, ver));
5678             if (is_error(ret)) {
5679                 unlock_drm_version(ver, target_ver, false);
5680             } else {
5681                 host_to_target_drmversion(target_ver, ver);
5682             }
5683         }
5684         unlock_user_struct(target_ver, arg, 0);
5685         return ret;
5686     }
5687     return -TARGET_ENOSYS;
5688 }
5689 
5690 static abi_long do_ioctl_drm_i915_getparam(const IOCTLEntry *ie,
5691                                            struct drm_i915_getparam *gparam,
5692                                            int fd, abi_long arg)
5693 {
5694     abi_long ret;
5695     int value;
5696     struct target_drm_i915_getparam *target_gparam;
5697 
5698     if (!lock_user_struct(VERIFY_READ, target_gparam, arg, 0)) {
5699         return -TARGET_EFAULT;
5700     }
5701 
5702     __get_user(gparam->param, &target_gparam->param);
5703     gparam->value = &value;
5704     ret = get_errno(safe_ioctl(fd, ie->host_cmd, gparam));
5705     put_user_s32(value, target_gparam->value);
5706 
5707     unlock_user_struct(target_gparam, arg, 0);
5708     return ret;
5709 }
5710 
5711 static abi_long do_ioctl_drm_i915(const IOCTLEntry *ie, uint8_t *buf_temp,
5712                                   int fd, int cmd, abi_long arg)
5713 {
5714     switch (ie->host_cmd) {
5715     case DRM_IOCTL_I915_GETPARAM:
5716         return do_ioctl_drm_i915_getparam(ie,
5717                                           (struct drm_i915_getparam *)buf_temp,
5718                                           fd, arg);
5719     default:
5720         return -TARGET_ENOSYS;
5721     }
5722 }
5723 
5724 #endif
5725 
5726 static abi_long do_ioctl_TUNSETTXFILTER(const IOCTLEntry *ie, uint8_t *buf_temp,
5727                                         int fd, int cmd, abi_long arg)
5728 {
5729     struct tun_filter *filter = (struct tun_filter *)buf_temp;
5730     struct tun_filter *target_filter;
5731     char *target_addr;
5732 
5733     assert(ie->access == IOC_W);
5734 
5735     target_filter = lock_user(VERIFY_READ, arg, sizeof(*target_filter), 1);
5736     if (!target_filter) {
5737         return -TARGET_EFAULT;
5738     }
5739     filter->flags = tswap16(target_filter->flags);
5740     filter->count = tswap16(target_filter->count);
5741     unlock_user(target_filter, arg, 0);
5742 
5743     if (filter->count) {
5744         if (offsetof(struct tun_filter, addr) + filter->count * ETH_ALEN >
5745             MAX_STRUCT_SIZE) {
5746             return -TARGET_EFAULT;
5747         }
5748 
5749         target_addr = lock_user(VERIFY_READ,
5750                                 arg + offsetof(struct tun_filter, addr),
5751                                 filter->count * ETH_ALEN, 1);
5752         if (!target_addr) {
5753             return -TARGET_EFAULT;
5754         }
5755         memcpy(filter->addr, target_addr, filter->count * ETH_ALEN);
5756         unlock_user(target_addr, arg + offsetof(struct tun_filter, addr), 0);
5757     }
5758 
5759     return get_errno(safe_ioctl(fd, ie->host_cmd, filter));
5760 }
5761 
5762 IOCTLEntry ioctl_entries[] = {
5763 #define IOCTL(cmd, access, ...) \
5764     { TARGET_ ## cmd, cmd, #cmd, access, 0, {  __VA_ARGS__ } },
5765 #define IOCTL_SPECIAL(cmd, access, dofn, ...)                      \
5766     { TARGET_ ## cmd, cmd, #cmd, access, dofn, {  __VA_ARGS__ } },
5767 #define IOCTL_IGNORE(cmd) \
5768     { TARGET_ ## cmd, 0, #cmd },
5769 #include "ioctls.h"
5770     { 0, 0, },
5771 };
5772 
5773 /* ??? Implement proper locking for ioctls.  */
5774 /* do_ioctl() Must return target values and target errnos. */
5775 static abi_long do_ioctl(int fd, int cmd, abi_long arg)
5776 {
5777     const IOCTLEntry *ie;
5778     const argtype *arg_type;
5779     abi_long ret;
5780     uint8_t buf_temp[MAX_STRUCT_SIZE];
5781     int target_size;
5782     void *argptr;
5783 
5784     ie = ioctl_entries;
5785     for(;;) {
5786         if (ie->target_cmd == 0) {
5787             qemu_log_mask(
5788                 LOG_UNIMP, "Unsupported ioctl: cmd=0x%04lx\n", (long)cmd);
5789             return -TARGET_ENOSYS;
5790         }
5791         if (ie->target_cmd == cmd)
5792             break;
5793         ie++;
5794     }
5795     arg_type = ie->arg_type;
5796     if (ie->do_ioctl) {
5797         return ie->do_ioctl(ie, buf_temp, fd, cmd, arg);
5798     } else if (!ie->host_cmd) {
5799         /* Some architectures define BSD ioctls in their headers
5800            that are not implemented in Linux.  */
5801         return -TARGET_ENOSYS;
5802     }
5803 
5804     switch(arg_type[0]) {
5805     case TYPE_NULL:
5806         /* no argument */
5807         ret = get_errno(safe_ioctl(fd, ie->host_cmd));
5808         break;
5809     case TYPE_PTRVOID:
5810     case TYPE_INT:
5811     case TYPE_LONG:
5812     case TYPE_ULONG:
5813         ret = get_errno(safe_ioctl(fd, ie->host_cmd, arg));
5814         break;
5815     case TYPE_PTR:
5816         arg_type++;
5817         target_size = thunk_type_size(arg_type, 0);
5818         switch(ie->access) {
5819         case IOC_R:
5820             ret = get_errno(safe_ioctl(fd, ie->host_cmd, buf_temp));
5821             if (!is_error(ret)) {
5822                 argptr = lock_user(VERIFY_WRITE, arg, target_size, 0);
5823                 if (!argptr)
5824                     return -TARGET_EFAULT;
5825                 thunk_convert(argptr, buf_temp, arg_type, THUNK_TARGET);
5826                 unlock_user(argptr, arg, target_size);
5827             }
5828             break;
5829         case IOC_W:
5830             argptr = lock_user(VERIFY_READ, arg, target_size, 1);
5831             if (!argptr)
5832                 return -TARGET_EFAULT;
5833             thunk_convert(buf_temp, argptr, arg_type, THUNK_HOST);
5834             unlock_user(argptr, arg, 0);
5835             ret = get_errno(safe_ioctl(fd, ie->host_cmd, buf_temp));
5836             break;
5837         default:
5838         case IOC_RW:
5839             argptr = lock_user(VERIFY_READ, arg, target_size, 1);
5840             if (!argptr)
5841                 return -TARGET_EFAULT;
5842             thunk_convert(buf_temp, argptr, arg_type, THUNK_HOST);
5843             unlock_user(argptr, arg, 0);
5844             ret = get_errno(safe_ioctl(fd, ie->host_cmd, buf_temp));
5845             if (!is_error(ret)) {
5846                 argptr = lock_user(VERIFY_WRITE, arg, target_size, 0);
5847                 if (!argptr)
5848                     return -TARGET_EFAULT;
5849                 thunk_convert(argptr, buf_temp, arg_type, THUNK_TARGET);
5850                 unlock_user(argptr, arg, target_size);
5851             }
5852             break;
5853         }
5854         break;
5855     default:
5856         qemu_log_mask(LOG_UNIMP,
5857                       "Unsupported ioctl type: cmd=0x%04lx type=%d\n",
5858                       (long)cmd, arg_type[0]);
5859         ret = -TARGET_ENOSYS;
5860         break;
5861     }
5862     return ret;
5863 }
5864 
5865 static const bitmask_transtbl iflag_tbl[] = {
5866         { TARGET_IGNBRK, TARGET_IGNBRK, IGNBRK, IGNBRK },
5867         { TARGET_BRKINT, TARGET_BRKINT, BRKINT, BRKINT },
5868         { TARGET_IGNPAR, TARGET_IGNPAR, IGNPAR, IGNPAR },
5869         { TARGET_PARMRK, TARGET_PARMRK, PARMRK, PARMRK },
5870         { TARGET_INPCK, TARGET_INPCK, INPCK, INPCK },
5871         { TARGET_ISTRIP, TARGET_ISTRIP, ISTRIP, ISTRIP },
5872         { TARGET_INLCR, TARGET_INLCR, INLCR, INLCR },
5873         { TARGET_IGNCR, TARGET_IGNCR, IGNCR, IGNCR },
5874         { TARGET_ICRNL, TARGET_ICRNL, ICRNL, ICRNL },
5875         { TARGET_IUCLC, TARGET_IUCLC, IUCLC, IUCLC },
5876         { TARGET_IXON, TARGET_IXON, IXON, IXON },
5877         { TARGET_IXANY, TARGET_IXANY, IXANY, IXANY },
5878         { TARGET_IXOFF, TARGET_IXOFF, IXOFF, IXOFF },
5879         { TARGET_IMAXBEL, TARGET_IMAXBEL, IMAXBEL, IMAXBEL },
5880         { TARGET_IUTF8, TARGET_IUTF8, IUTF8, IUTF8},
5881         { 0, 0, 0, 0 }
5882 };
5883 
5884 static const bitmask_transtbl oflag_tbl[] = {
5885 	{ TARGET_OPOST, TARGET_OPOST, OPOST, OPOST },
5886 	{ TARGET_OLCUC, TARGET_OLCUC, OLCUC, OLCUC },
5887 	{ TARGET_ONLCR, TARGET_ONLCR, ONLCR, ONLCR },
5888 	{ TARGET_OCRNL, TARGET_OCRNL, OCRNL, OCRNL },
5889 	{ TARGET_ONOCR, TARGET_ONOCR, ONOCR, ONOCR },
5890 	{ TARGET_ONLRET, TARGET_ONLRET, ONLRET, ONLRET },
5891 	{ TARGET_OFILL, TARGET_OFILL, OFILL, OFILL },
5892 	{ TARGET_OFDEL, TARGET_OFDEL, OFDEL, OFDEL },
5893 	{ TARGET_NLDLY, TARGET_NL0, NLDLY, NL0 },
5894 	{ TARGET_NLDLY, TARGET_NL1, NLDLY, NL1 },
5895 	{ TARGET_CRDLY, TARGET_CR0, CRDLY, CR0 },
5896 	{ TARGET_CRDLY, TARGET_CR1, CRDLY, CR1 },
5897 	{ TARGET_CRDLY, TARGET_CR2, CRDLY, CR2 },
5898 	{ TARGET_CRDLY, TARGET_CR3, CRDLY, CR3 },
5899 	{ TARGET_TABDLY, TARGET_TAB0, TABDLY, TAB0 },
5900 	{ TARGET_TABDLY, TARGET_TAB1, TABDLY, TAB1 },
5901 	{ TARGET_TABDLY, TARGET_TAB2, TABDLY, TAB2 },
5902 	{ TARGET_TABDLY, TARGET_TAB3, TABDLY, TAB3 },
5903 	{ TARGET_BSDLY, TARGET_BS0, BSDLY, BS0 },
5904 	{ TARGET_BSDLY, TARGET_BS1, BSDLY, BS1 },
5905 	{ TARGET_VTDLY, TARGET_VT0, VTDLY, VT0 },
5906 	{ TARGET_VTDLY, TARGET_VT1, VTDLY, VT1 },
5907 	{ TARGET_FFDLY, TARGET_FF0, FFDLY, FF0 },
5908 	{ TARGET_FFDLY, TARGET_FF1, FFDLY, FF1 },
5909 	{ 0, 0, 0, 0 }
5910 };
5911 
5912 static const bitmask_transtbl cflag_tbl[] = {
5913 	{ TARGET_CBAUD, TARGET_B0, CBAUD, B0 },
5914 	{ TARGET_CBAUD, TARGET_B50, CBAUD, B50 },
5915 	{ TARGET_CBAUD, TARGET_B75, CBAUD, B75 },
5916 	{ TARGET_CBAUD, TARGET_B110, CBAUD, B110 },
5917 	{ TARGET_CBAUD, TARGET_B134, CBAUD, B134 },
5918 	{ TARGET_CBAUD, TARGET_B150, CBAUD, B150 },
5919 	{ TARGET_CBAUD, TARGET_B200, CBAUD, B200 },
5920 	{ TARGET_CBAUD, TARGET_B300, CBAUD, B300 },
5921 	{ TARGET_CBAUD, TARGET_B600, CBAUD, B600 },
5922 	{ TARGET_CBAUD, TARGET_B1200, CBAUD, B1200 },
5923 	{ TARGET_CBAUD, TARGET_B1800, CBAUD, B1800 },
5924 	{ TARGET_CBAUD, TARGET_B2400, CBAUD, B2400 },
5925 	{ TARGET_CBAUD, TARGET_B4800, CBAUD, B4800 },
5926 	{ TARGET_CBAUD, TARGET_B9600, CBAUD, B9600 },
5927 	{ TARGET_CBAUD, TARGET_B19200, CBAUD, B19200 },
5928 	{ TARGET_CBAUD, TARGET_B38400, CBAUD, B38400 },
5929 	{ TARGET_CBAUD, TARGET_B57600, CBAUD, B57600 },
5930 	{ TARGET_CBAUD, TARGET_B115200, CBAUD, B115200 },
5931 	{ TARGET_CBAUD, TARGET_B230400, CBAUD, B230400 },
5932 	{ TARGET_CBAUD, TARGET_B460800, CBAUD, B460800 },
5933 	{ TARGET_CSIZE, TARGET_CS5, CSIZE, CS5 },
5934 	{ TARGET_CSIZE, TARGET_CS6, CSIZE, CS6 },
5935 	{ TARGET_CSIZE, TARGET_CS7, CSIZE, CS7 },
5936 	{ TARGET_CSIZE, TARGET_CS8, CSIZE, CS8 },
5937 	{ TARGET_CSTOPB, TARGET_CSTOPB, CSTOPB, CSTOPB },
5938 	{ TARGET_CREAD, TARGET_CREAD, CREAD, CREAD },
5939 	{ TARGET_PARENB, TARGET_PARENB, PARENB, PARENB },
5940 	{ TARGET_PARODD, TARGET_PARODD, PARODD, PARODD },
5941 	{ TARGET_HUPCL, TARGET_HUPCL, HUPCL, HUPCL },
5942 	{ TARGET_CLOCAL, TARGET_CLOCAL, CLOCAL, CLOCAL },
5943 	{ TARGET_CRTSCTS, TARGET_CRTSCTS, CRTSCTS, CRTSCTS },
5944 	{ 0, 0, 0, 0 }
5945 };
5946 
5947 static const bitmask_transtbl lflag_tbl[] = {
5948   { TARGET_ISIG, TARGET_ISIG, ISIG, ISIG },
5949   { TARGET_ICANON, TARGET_ICANON, ICANON, ICANON },
5950   { TARGET_XCASE, TARGET_XCASE, XCASE, XCASE },
5951   { TARGET_ECHO, TARGET_ECHO, ECHO, ECHO },
5952   { TARGET_ECHOE, TARGET_ECHOE, ECHOE, ECHOE },
5953   { TARGET_ECHOK, TARGET_ECHOK, ECHOK, ECHOK },
5954   { TARGET_ECHONL, TARGET_ECHONL, ECHONL, ECHONL },
5955   { TARGET_NOFLSH, TARGET_NOFLSH, NOFLSH, NOFLSH },
5956   { TARGET_TOSTOP, TARGET_TOSTOP, TOSTOP, TOSTOP },
5957   { TARGET_ECHOCTL, TARGET_ECHOCTL, ECHOCTL, ECHOCTL },
5958   { TARGET_ECHOPRT, TARGET_ECHOPRT, ECHOPRT, ECHOPRT },
5959   { TARGET_ECHOKE, TARGET_ECHOKE, ECHOKE, ECHOKE },
5960   { TARGET_FLUSHO, TARGET_FLUSHO, FLUSHO, FLUSHO },
5961   { TARGET_PENDIN, TARGET_PENDIN, PENDIN, PENDIN },
5962   { TARGET_IEXTEN, TARGET_IEXTEN, IEXTEN, IEXTEN },
5963   { TARGET_EXTPROC, TARGET_EXTPROC, EXTPROC, EXTPROC},
5964   { 0, 0, 0, 0 }
5965 };
5966 
5967 static void target_to_host_termios (void *dst, const void *src)
5968 {
5969     struct host_termios *host = dst;
5970     const struct target_termios *target = src;
5971 
5972     host->c_iflag =
5973         target_to_host_bitmask(tswap32(target->c_iflag), iflag_tbl);
5974     host->c_oflag =
5975         target_to_host_bitmask(tswap32(target->c_oflag), oflag_tbl);
5976     host->c_cflag =
5977         target_to_host_bitmask(tswap32(target->c_cflag), cflag_tbl);
5978     host->c_lflag =
5979         target_to_host_bitmask(tswap32(target->c_lflag), lflag_tbl);
5980     host->c_line = target->c_line;
5981 
5982     memset(host->c_cc, 0, sizeof(host->c_cc));
5983     host->c_cc[VINTR] = target->c_cc[TARGET_VINTR];
5984     host->c_cc[VQUIT] = target->c_cc[TARGET_VQUIT];
5985     host->c_cc[VERASE] = target->c_cc[TARGET_VERASE];
5986     host->c_cc[VKILL] = target->c_cc[TARGET_VKILL];
5987     host->c_cc[VEOF] = target->c_cc[TARGET_VEOF];
5988     host->c_cc[VTIME] = target->c_cc[TARGET_VTIME];
5989     host->c_cc[VMIN] = target->c_cc[TARGET_VMIN];
5990     host->c_cc[VSWTC] = target->c_cc[TARGET_VSWTC];
5991     host->c_cc[VSTART] = target->c_cc[TARGET_VSTART];
5992     host->c_cc[VSTOP] = target->c_cc[TARGET_VSTOP];
5993     host->c_cc[VSUSP] = target->c_cc[TARGET_VSUSP];
5994     host->c_cc[VEOL] = target->c_cc[TARGET_VEOL];
5995     host->c_cc[VREPRINT] = target->c_cc[TARGET_VREPRINT];
5996     host->c_cc[VDISCARD] = target->c_cc[TARGET_VDISCARD];
5997     host->c_cc[VWERASE] = target->c_cc[TARGET_VWERASE];
5998     host->c_cc[VLNEXT] = target->c_cc[TARGET_VLNEXT];
5999     host->c_cc[VEOL2] = target->c_cc[TARGET_VEOL2];
6000 }
6001 
6002 static void host_to_target_termios (void *dst, const void *src)
6003 {
6004     struct target_termios *target = dst;
6005     const struct host_termios *host = src;
6006 
6007     target->c_iflag =
6008         tswap32(host_to_target_bitmask(host->c_iflag, iflag_tbl));
6009     target->c_oflag =
6010         tswap32(host_to_target_bitmask(host->c_oflag, oflag_tbl));
6011     target->c_cflag =
6012         tswap32(host_to_target_bitmask(host->c_cflag, cflag_tbl));
6013     target->c_lflag =
6014         tswap32(host_to_target_bitmask(host->c_lflag, lflag_tbl));
6015     target->c_line = host->c_line;
6016 
6017     memset(target->c_cc, 0, sizeof(target->c_cc));
6018     target->c_cc[TARGET_VINTR] = host->c_cc[VINTR];
6019     target->c_cc[TARGET_VQUIT] = host->c_cc[VQUIT];
6020     target->c_cc[TARGET_VERASE] = host->c_cc[VERASE];
6021     target->c_cc[TARGET_VKILL] = host->c_cc[VKILL];
6022     target->c_cc[TARGET_VEOF] = host->c_cc[VEOF];
6023     target->c_cc[TARGET_VTIME] = host->c_cc[VTIME];
6024     target->c_cc[TARGET_VMIN] = host->c_cc[VMIN];
6025     target->c_cc[TARGET_VSWTC] = host->c_cc[VSWTC];
6026     target->c_cc[TARGET_VSTART] = host->c_cc[VSTART];
6027     target->c_cc[TARGET_VSTOP] = host->c_cc[VSTOP];
6028     target->c_cc[TARGET_VSUSP] = host->c_cc[VSUSP];
6029     target->c_cc[TARGET_VEOL] = host->c_cc[VEOL];
6030     target->c_cc[TARGET_VREPRINT] = host->c_cc[VREPRINT];
6031     target->c_cc[TARGET_VDISCARD] = host->c_cc[VDISCARD];
6032     target->c_cc[TARGET_VWERASE] = host->c_cc[VWERASE];
6033     target->c_cc[TARGET_VLNEXT] = host->c_cc[VLNEXT];
6034     target->c_cc[TARGET_VEOL2] = host->c_cc[VEOL2];
6035 }
6036 
6037 static const StructEntry struct_termios_def = {
6038     .convert = { host_to_target_termios, target_to_host_termios },
6039     .size = { sizeof(struct target_termios), sizeof(struct host_termios) },
6040     .align = { __alignof__(struct target_termios), __alignof__(struct host_termios) },
6041     .print = print_termios,
6042 };
6043 
6044 static bitmask_transtbl mmap_flags_tbl[] = {
6045     { TARGET_MAP_SHARED, TARGET_MAP_SHARED, MAP_SHARED, MAP_SHARED },
6046     { TARGET_MAP_PRIVATE, TARGET_MAP_PRIVATE, MAP_PRIVATE, MAP_PRIVATE },
6047     { TARGET_MAP_FIXED, TARGET_MAP_FIXED, MAP_FIXED, MAP_FIXED },
6048     { TARGET_MAP_ANONYMOUS, TARGET_MAP_ANONYMOUS,
6049       MAP_ANONYMOUS, MAP_ANONYMOUS },
6050     { TARGET_MAP_GROWSDOWN, TARGET_MAP_GROWSDOWN,
6051       MAP_GROWSDOWN, MAP_GROWSDOWN },
6052     { TARGET_MAP_DENYWRITE, TARGET_MAP_DENYWRITE,
6053       MAP_DENYWRITE, MAP_DENYWRITE },
6054     { TARGET_MAP_EXECUTABLE, TARGET_MAP_EXECUTABLE,
6055       MAP_EXECUTABLE, MAP_EXECUTABLE },
6056     { TARGET_MAP_LOCKED, TARGET_MAP_LOCKED, MAP_LOCKED, MAP_LOCKED },
6057     { TARGET_MAP_NORESERVE, TARGET_MAP_NORESERVE,
6058       MAP_NORESERVE, MAP_NORESERVE },
6059     { TARGET_MAP_HUGETLB, TARGET_MAP_HUGETLB, MAP_HUGETLB, MAP_HUGETLB },
6060     /* MAP_STACK had been ignored by the kernel for quite some time.
6061        Recognize it for the target insofar as we do not want to pass
6062        it through to the host.  */
6063     { TARGET_MAP_STACK, TARGET_MAP_STACK, 0, 0 },
6064     { 0, 0, 0, 0 }
6065 };
6066 
6067 /*
6068  * NOTE: TARGET_ABI32 is defined for TARGET_I386 (but not for TARGET_X86_64)
6069  *       TARGET_I386 is defined if TARGET_X86_64 is defined
6070  */
6071 #if defined(TARGET_I386)
6072 
6073 /* NOTE: there is really one LDT for all the threads */
6074 static uint8_t *ldt_table;
6075 
6076 static abi_long read_ldt(abi_ulong ptr, unsigned long bytecount)
6077 {
6078     int size;
6079     void *p;
6080 
6081     if (!ldt_table)
6082         return 0;
6083     size = TARGET_LDT_ENTRIES * TARGET_LDT_ENTRY_SIZE;
6084     if (size > bytecount)
6085         size = bytecount;
6086     p = lock_user(VERIFY_WRITE, ptr, size, 0);
6087     if (!p)
6088         return -TARGET_EFAULT;
6089     /* ??? Should this by byteswapped?  */
6090     memcpy(p, ldt_table, size);
6091     unlock_user(p, ptr, size);
6092     return size;
6093 }
6094 
6095 /* XXX: add locking support */
6096 static abi_long write_ldt(CPUX86State *env,
6097                           abi_ulong ptr, unsigned long bytecount, int oldmode)
6098 {
6099     struct target_modify_ldt_ldt_s ldt_info;
6100     struct target_modify_ldt_ldt_s *target_ldt_info;
6101     int seg_32bit, contents, read_exec_only, limit_in_pages;
6102     int seg_not_present, useable, lm;
6103     uint32_t *lp, entry_1, entry_2;
6104 
6105     if (bytecount != sizeof(ldt_info))
6106         return -TARGET_EINVAL;
6107     if (!lock_user_struct(VERIFY_READ, target_ldt_info, ptr, 1))
6108         return -TARGET_EFAULT;
6109     ldt_info.entry_number = tswap32(target_ldt_info->entry_number);
6110     ldt_info.base_addr = tswapal(target_ldt_info->base_addr);
6111     ldt_info.limit = tswap32(target_ldt_info->limit);
6112     ldt_info.flags = tswap32(target_ldt_info->flags);
6113     unlock_user_struct(target_ldt_info, ptr, 0);
6114 
6115     if (ldt_info.entry_number >= TARGET_LDT_ENTRIES)
6116         return -TARGET_EINVAL;
6117     seg_32bit = ldt_info.flags & 1;
6118     contents = (ldt_info.flags >> 1) & 3;
6119     read_exec_only = (ldt_info.flags >> 3) & 1;
6120     limit_in_pages = (ldt_info.flags >> 4) & 1;
6121     seg_not_present = (ldt_info.flags >> 5) & 1;
6122     useable = (ldt_info.flags >> 6) & 1;
6123 #ifdef TARGET_ABI32
6124     lm = 0;
6125 #else
6126     lm = (ldt_info.flags >> 7) & 1;
6127 #endif
6128     if (contents == 3) {
6129         if (oldmode)
6130             return -TARGET_EINVAL;
6131         if (seg_not_present == 0)
6132             return -TARGET_EINVAL;
6133     }
6134     /* allocate the LDT */
6135     if (!ldt_table) {
6136         env->ldt.base = target_mmap(0,
6137                                     TARGET_LDT_ENTRIES * TARGET_LDT_ENTRY_SIZE,
6138                                     PROT_READ|PROT_WRITE,
6139                                     MAP_ANONYMOUS|MAP_PRIVATE, -1, 0);
6140         if (env->ldt.base == -1)
6141             return -TARGET_ENOMEM;
6142         memset(g2h(env->ldt.base), 0,
6143                TARGET_LDT_ENTRIES * TARGET_LDT_ENTRY_SIZE);
6144         env->ldt.limit = 0xffff;
6145         ldt_table = g2h(env->ldt.base);
6146     }
6147 
6148     /* NOTE: same code as Linux kernel */
6149     /* Allow LDTs to be cleared by the user. */
6150     if (ldt_info.base_addr == 0 && ldt_info.limit == 0) {
6151         if (oldmode ||
6152             (contents == 0		&&
6153              read_exec_only == 1	&&
6154              seg_32bit == 0		&&
6155              limit_in_pages == 0	&&
6156              seg_not_present == 1	&&
6157              useable == 0 )) {
6158             entry_1 = 0;
6159             entry_2 = 0;
6160             goto install;
6161         }
6162     }
6163 
6164     entry_1 = ((ldt_info.base_addr & 0x0000ffff) << 16) |
6165         (ldt_info.limit & 0x0ffff);
6166     entry_2 = (ldt_info.base_addr & 0xff000000) |
6167         ((ldt_info.base_addr & 0x00ff0000) >> 16) |
6168         (ldt_info.limit & 0xf0000) |
6169         ((read_exec_only ^ 1) << 9) |
6170         (contents << 10) |
6171         ((seg_not_present ^ 1) << 15) |
6172         (seg_32bit << 22) |
6173         (limit_in_pages << 23) |
6174         (lm << 21) |
6175         0x7000;
6176     if (!oldmode)
6177         entry_2 |= (useable << 20);
6178 
6179     /* Install the new entry ...  */
6180 install:
6181     lp = (uint32_t *)(ldt_table + (ldt_info.entry_number << 3));
6182     lp[0] = tswap32(entry_1);
6183     lp[1] = tswap32(entry_2);
6184     return 0;
6185 }
6186 
6187 /* specific and weird i386 syscalls */
6188 static abi_long do_modify_ldt(CPUX86State *env, int func, abi_ulong ptr,
6189                               unsigned long bytecount)
6190 {
6191     abi_long ret;
6192 
6193     switch (func) {
6194     case 0:
6195         ret = read_ldt(ptr, bytecount);
6196         break;
6197     case 1:
6198         ret = write_ldt(env, ptr, bytecount, 1);
6199         break;
6200     case 0x11:
6201         ret = write_ldt(env, ptr, bytecount, 0);
6202         break;
6203     default:
6204         ret = -TARGET_ENOSYS;
6205         break;
6206     }
6207     return ret;
6208 }
6209 
6210 #if defined(TARGET_ABI32)
6211 abi_long do_set_thread_area(CPUX86State *env, abi_ulong ptr)
6212 {
6213     uint64_t *gdt_table = g2h(env->gdt.base);
6214     struct target_modify_ldt_ldt_s ldt_info;
6215     struct target_modify_ldt_ldt_s *target_ldt_info;
6216     int seg_32bit, contents, read_exec_only, limit_in_pages;
6217     int seg_not_present, useable, lm;
6218     uint32_t *lp, entry_1, entry_2;
6219     int i;
6220 
6221     lock_user_struct(VERIFY_WRITE, target_ldt_info, ptr, 1);
6222     if (!target_ldt_info)
6223         return -TARGET_EFAULT;
6224     ldt_info.entry_number = tswap32(target_ldt_info->entry_number);
6225     ldt_info.base_addr = tswapal(target_ldt_info->base_addr);
6226     ldt_info.limit = tswap32(target_ldt_info->limit);
6227     ldt_info.flags = tswap32(target_ldt_info->flags);
6228     if (ldt_info.entry_number == -1) {
6229         for (i=TARGET_GDT_ENTRY_TLS_MIN; i<=TARGET_GDT_ENTRY_TLS_MAX; i++) {
6230             if (gdt_table[i] == 0) {
6231                 ldt_info.entry_number = i;
6232                 target_ldt_info->entry_number = tswap32(i);
6233                 break;
6234             }
6235         }
6236     }
6237     unlock_user_struct(target_ldt_info, ptr, 1);
6238 
6239     if (ldt_info.entry_number < TARGET_GDT_ENTRY_TLS_MIN ||
6240         ldt_info.entry_number > TARGET_GDT_ENTRY_TLS_MAX)
6241            return -TARGET_EINVAL;
6242     seg_32bit = ldt_info.flags & 1;
6243     contents = (ldt_info.flags >> 1) & 3;
6244     read_exec_only = (ldt_info.flags >> 3) & 1;
6245     limit_in_pages = (ldt_info.flags >> 4) & 1;
6246     seg_not_present = (ldt_info.flags >> 5) & 1;
6247     useable = (ldt_info.flags >> 6) & 1;
6248 #ifdef TARGET_ABI32
6249     lm = 0;
6250 #else
6251     lm = (ldt_info.flags >> 7) & 1;
6252 #endif
6253 
6254     if (contents == 3) {
6255         if (seg_not_present == 0)
6256             return -TARGET_EINVAL;
6257     }
6258 
6259     /* NOTE: same code as Linux kernel */
6260     /* Allow LDTs to be cleared by the user. */
6261     if (ldt_info.base_addr == 0 && ldt_info.limit == 0) {
6262         if ((contents == 0             &&
6263              read_exec_only == 1       &&
6264              seg_32bit == 0            &&
6265              limit_in_pages == 0       &&
6266              seg_not_present == 1      &&
6267              useable == 0 )) {
6268             entry_1 = 0;
6269             entry_2 = 0;
6270             goto install;
6271         }
6272     }
6273 
6274     entry_1 = ((ldt_info.base_addr & 0x0000ffff) << 16) |
6275         (ldt_info.limit & 0x0ffff);
6276     entry_2 = (ldt_info.base_addr & 0xff000000) |
6277         ((ldt_info.base_addr & 0x00ff0000) >> 16) |
6278         (ldt_info.limit & 0xf0000) |
6279         ((read_exec_only ^ 1) << 9) |
6280         (contents << 10) |
6281         ((seg_not_present ^ 1) << 15) |
6282         (seg_32bit << 22) |
6283         (limit_in_pages << 23) |
6284         (useable << 20) |
6285         (lm << 21) |
6286         0x7000;
6287 
6288     /* Install the new entry ...  */
6289 install:
6290     lp = (uint32_t *)(gdt_table + ldt_info.entry_number);
6291     lp[0] = tswap32(entry_1);
6292     lp[1] = tswap32(entry_2);
6293     return 0;
6294 }
6295 
6296 static abi_long do_get_thread_area(CPUX86State *env, abi_ulong ptr)
6297 {
6298     struct target_modify_ldt_ldt_s *target_ldt_info;
6299     uint64_t *gdt_table = g2h(env->gdt.base);
6300     uint32_t base_addr, limit, flags;
6301     int seg_32bit, contents, read_exec_only, limit_in_pages, idx;
6302     int seg_not_present, useable, lm;
6303     uint32_t *lp, entry_1, entry_2;
6304 
6305     lock_user_struct(VERIFY_WRITE, target_ldt_info, ptr, 1);
6306     if (!target_ldt_info)
6307         return -TARGET_EFAULT;
6308     idx = tswap32(target_ldt_info->entry_number);
6309     if (idx < TARGET_GDT_ENTRY_TLS_MIN ||
6310         idx > TARGET_GDT_ENTRY_TLS_MAX) {
6311         unlock_user_struct(target_ldt_info, ptr, 1);
6312         return -TARGET_EINVAL;
6313     }
6314     lp = (uint32_t *)(gdt_table + idx);
6315     entry_1 = tswap32(lp[0]);
6316     entry_2 = tswap32(lp[1]);
6317 
6318     read_exec_only = ((entry_2 >> 9) & 1) ^ 1;
6319     contents = (entry_2 >> 10) & 3;
6320     seg_not_present = ((entry_2 >> 15) & 1) ^ 1;
6321     seg_32bit = (entry_2 >> 22) & 1;
6322     limit_in_pages = (entry_2 >> 23) & 1;
6323     useable = (entry_2 >> 20) & 1;
6324 #ifdef TARGET_ABI32
6325     lm = 0;
6326 #else
6327     lm = (entry_2 >> 21) & 1;
6328 #endif
6329     flags = (seg_32bit << 0) | (contents << 1) |
6330         (read_exec_only << 3) | (limit_in_pages << 4) |
6331         (seg_not_present << 5) | (useable << 6) | (lm << 7);
6332     limit = (entry_1 & 0xffff) | (entry_2  & 0xf0000);
6333     base_addr = (entry_1 >> 16) |
6334         (entry_2 & 0xff000000) |
6335         ((entry_2 & 0xff) << 16);
6336     target_ldt_info->base_addr = tswapal(base_addr);
6337     target_ldt_info->limit = tswap32(limit);
6338     target_ldt_info->flags = tswap32(flags);
6339     unlock_user_struct(target_ldt_info, ptr, 1);
6340     return 0;
6341 }
6342 
6343 abi_long do_arch_prctl(CPUX86State *env, int code, abi_ulong addr)
6344 {
6345     return -TARGET_ENOSYS;
6346 }
6347 #else
6348 abi_long do_arch_prctl(CPUX86State *env, int code, abi_ulong addr)
6349 {
6350     abi_long ret = 0;
6351     abi_ulong val;
6352     int idx;
6353 
6354     switch(code) {
6355     case TARGET_ARCH_SET_GS:
6356     case TARGET_ARCH_SET_FS:
6357         if (code == TARGET_ARCH_SET_GS)
6358             idx = R_GS;
6359         else
6360             idx = R_FS;
6361         cpu_x86_load_seg(env, idx, 0);
6362         env->segs[idx].base = addr;
6363         break;
6364     case TARGET_ARCH_GET_GS:
6365     case TARGET_ARCH_GET_FS:
6366         if (code == TARGET_ARCH_GET_GS)
6367             idx = R_GS;
6368         else
6369             idx = R_FS;
6370         val = env->segs[idx].base;
6371         if (put_user(val, addr, abi_ulong))
6372             ret = -TARGET_EFAULT;
6373         break;
6374     default:
6375         ret = -TARGET_EINVAL;
6376         break;
6377     }
6378     return ret;
6379 }
6380 #endif /* defined(TARGET_ABI32 */
6381 
6382 #endif /* defined(TARGET_I386) */
6383 
6384 #define NEW_STACK_SIZE 0x40000
6385 
6386 
6387 static pthread_mutex_t clone_lock = PTHREAD_MUTEX_INITIALIZER;
6388 typedef struct {
6389     CPUArchState *env;
6390     pthread_mutex_t mutex;
6391     pthread_cond_t cond;
6392     pthread_t thread;
6393     uint32_t tid;
6394     abi_ulong child_tidptr;
6395     abi_ulong parent_tidptr;
6396     sigset_t sigmask;
6397 } new_thread_info;
6398 
6399 static void *clone_func(void *arg)
6400 {
6401     new_thread_info *info = arg;
6402     CPUArchState *env;
6403     CPUState *cpu;
6404     TaskState *ts;
6405 
6406     rcu_register_thread();
6407     tcg_register_thread();
6408     env = info->env;
6409     cpu = env_cpu(env);
6410     thread_cpu = cpu;
6411     ts = (TaskState *)cpu->opaque;
6412     info->tid = sys_gettid();
6413     task_settid(ts);
6414     if (info->child_tidptr)
6415         put_user_u32(info->tid, info->child_tidptr);
6416     if (info->parent_tidptr)
6417         put_user_u32(info->tid, info->parent_tidptr);
6418     qemu_guest_random_seed_thread_part2(cpu->random_seed);
6419     /* Enable signals.  */
6420     sigprocmask(SIG_SETMASK, &info->sigmask, NULL);
6421     /* Signal to the parent that we're ready.  */
6422     pthread_mutex_lock(&info->mutex);
6423     pthread_cond_broadcast(&info->cond);
6424     pthread_mutex_unlock(&info->mutex);
6425     /* Wait until the parent has finished initializing the tls state.  */
6426     pthread_mutex_lock(&clone_lock);
6427     pthread_mutex_unlock(&clone_lock);
6428     cpu_loop(env);
6429     /* never exits */
6430     return NULL;
6431 }
6432 
6433 /* do_fork() Must return host values and target errnos (unlike most
6434    do_*() functions). */
6435 static int do_fork(CPUArchState *env, unsigned int flags, abi_ulong newsp,
6436                    abi_ulong parent_tidptr, target_ulong newtls,
6437                    abi_ulong child_tidptr)
6438 {
6439     CPUState *cpu = env_cpu(env);
6440     int ret;
6441     TaskState *ts;
6442     CPUState *new_cpu;
6443     CPUArchState *new_env;
6444     sigset_t sigmask;
6445 
6446     flags &= ~CLONE_IGNORED_FLAGS;
6447 
6448     /* Emulate vfork() with fork() */
6449     if (flags & CLONE_VFORK)
6450         flags &= ~(CLONE_VFORK | CLONE_VM);
6451 
6452     if (flags & CLONE_VM) {
6453         TaskState *parent_ts = (TaskState *)cpu->opaque;
6454         new_thread_info info;
6455         pthread_attr_t attr;
6456 
6457         if (((flags & CLONE_THREAD_FLAGS) != CLONE_THREAD_FLAGS) ||
6458             (flags & CLONE_INVALID_THREAD_FLAGS)) {
6459             return -TARGET_EINVAL;
6460         }
6461 
6462         ts = g_new0(TaskState, 1);
6463         init_task_state(ts);
6464 
6465         /* Grab a mutex so that thread setup appears atomic.  */
6466         pthread_mutex_lock(&clone_lock);
6467 
6468         /* we create a new CPU instance. */
6469         new_env = cpu_copy(env);
6470         /* Init regs that differ from the parent.  */
6471         cpu_clone_regs_child(new_env, newsp, flags);
6472         cpu_clone_regs_parent(env, flags);
6473         new_cpu = env_cpu(new_env);
6474         new_cpu->opaque = ts;
6475         ts->bprm = parent_ts->bprm;
6476         ts->info = parent_ts->info;
6477         ts->signal_mask = parent_ts->signal_mask;
6478 
6479         if (flags & CLONE_CHILD_CLEARTID) {
6480             ts->child_tidptr = child_tidptr;
6481         }
6482 
6483         if (flags & CLONE_SETTLS) {
6484             cpu_set_tls (new_env, newtls);
6485         }
6486 
6487         memset(&info, 0, sizeof(info));
6488         pthread_mutex_init(&info.mutex, NULL);
6489         pthread_mutex_lock(&info.mutex);
6490         pthread_cond_init(&info.cond, NULL);
6491         info.env = new_env;
6492         if (flags & CLONE_CHILD_SETTID) {
6493             info.child_tidptr = child_tidptr;
6494         }
6495         if (flags & CLONE_PARENT_SETTID) {
6496             info.parent_tidptr = parent_tidptr;
6497         }
6498 
6499         ret = pthread_attr_init(&attr);
6500         ret = pthread_attr_setstacksize(&attr, NEW_STACK_SIZE);
6501         ret = pthread_attr_setdetachstate(&attr, PTHREAD_CREATE_DETACHED);
6502         /* It is not safe to deliver signals until the child has finished
6503            initializing, so temporarily block all signals.  */
6504         sigfillset(&sigmask);
6505         sigprocmask(SIG_BLOCK, &sigmask, &info.sigmask);
6506         cpu->random_seed = qemu_guest_random_seed_thread_part1();
6507 
6508         /* If this is our first additional thread, we need to ensure we
6509          * generate code for parallel execution and flush old translations.
6510          */
6511         if (!parallel_cpus) {
6512             parallel_cpus = true;
6513             tb_flush(cpu);
6514         }
6515 
6516         ret = pthread_create(&info.thread, &attr, clone_func, &info);
6517         /* TODO: Free new CPU state if thread creation failed.  */
6518 
6519         sigprocmask(SIG_SETMASK, &info.sigmask, NULL);
6520         pthread_attr_destroy(&attr);
6521         if (ret == 0) {
6522             /* Wait for the child to initialize.  */
6523             pthread_cond_wait(&info.cond, &info.mutex);
6524             ret = info.tid;
6525         } else {
6526             ret = -1;
6527         }
6528         pthread_mutex_unlock(&info.mutex);
6529         pthread_cond_destroy(&info.cond);
6530         pthread_mutex_destroy(&info.mutex);
6531         pthread_mutex_unlock(&clone_lock);
6532     } else {
6533         /* if no CLONE_VM, we consider it is a fork */
6534         if (flags & CLONE_INVALID_FORK_FLAGS) {
6535             return -TARGET_EINVAL;
6536         }
6537 
6538         /* We can't support custom termination signals */
6539         if ((flags & CSIGNAL) != TARGET_SIGCHLD) {
6540             return -TARGET_EINVAL;
6541         }
6542 
6543         if (block_signals()) {
6544             return -TARGET_ERESTARTSYS;
6545         }
6546 
6547         fork_start();
6548         ret = fork();
6549         if (ret == 0) {
6550             /* Child Process.  */
6551             cpu_clone_regs_child(env, newsp, flags);
6552             fork_end(1);
6553             /* There is a race condition here.  The parent process could
6554                theoretically read the TID in the child process before the child
6555                tid is set.  This would require using either ptrace
6556                (not implemented) or having *_tidptr to point at a shared memory
6557                mapping.  We can't repeat the spinlock hack used above because
6558                the child process gets its own copy of the lock.  */
6559             if (flags & CLONE_CHILD_SETTID)
6560                 put_user_u32(sys_gettid(), child_tidptr);
6561             if (flags & CLONE_PARENT_SETTID)
6562                 put_user_u32(sys_gettid(), parent_tidptr);
6563             ts = (TaskState *)cpu->opaque;
6564             if (flags & CLONE_SETTLS)
6565                 cpu_set_tls (env, newtls);
6566             if (flags & CLONE_CHILD_CLEARTID)
6567                 ts->child_tidptr = child_tidptr;
6568         } else {
6569             cpu_clone_regs_parent(env, flags);
6570             fork_end(0);
6571         }
6572     }
6573     return ret;
6574 }
6575 
6576 /* warning : doesn't handle linux specific flags... */
6577 static int target_to_host_fcntl_cmd(int cmd)
6578 {
6579     int ret;
6580 
6581     switch(cmd) {
6582     case TARGET_F_DUPFD:
6583     case TARGET_F_GETFD:
6584     case TARGET_F_SETFD:
6585     case TARGET_F_GETFL:
6586     case TARGET_F_SETFL:
6587     case TARGET_F_OFD_GETLK:
6588     case TARGET_F_OFD_SETLK:
6589     case TARGET_F_OFD_SETLKW:
6590         ret = cmd;
6591         break;
6592     case TARGET_F_GETLK:
6593         ret = F_GETLK64;
6594         break;
6595     case TARGET_F_SETLK:
6596         ret = F_SETLK64;
6597         break;
6598     case TARGET_F_SETLKW:
6599         ret = F_SETLKW64;
6600         break;
6601     case TARGET_F_GETOWN:
6602         ret = F_GETOWN;
6603         break;
6604     case TARGET_F_SETOWN:
6605         ret = F_SETOWN;
6606         break;
6607     case TARGET_F_GETSIG:
6608         ret = F_GETSIG;
6609         break;
6610     case TARGET_F_SETSIG:
6611         ret = F_SETSIG;
6612         break;
6613 #if TARGET_ABI_BITS == 32
6614     case TARGET_F_GETLK64:
6615         ret = F_GETLK64;
6616         break;
6617     case TARGET_F_SETLK64:
6618         ret = F_SETLK64;
6619         break;
6620     case TARGET_F_SETLKW64:
6621         ret = F_SETLKW64;
6622         break;
6623 #endif
6624     case TARGET_F_SETLEASE:
6625         ret = F_SETLEASE;
6626         break;
6627     case TARGET_F_GETLEASE:
6628         ret = F_GETLEASE;
6629         break;
6630 #ifdef F_DUPFD_CLOEXEC
6631     case TARGET_F_DUPFD_CLOEXEC:
6632         ret = F_DUPFD_CLOEXEC;
6633         break;
6634 #endif
6635     case TARGET_F_NOTIFY:
6636         ret = F_NOTIFY;
6637         break;
6638 #ifdef F_GETOWN_EX
6639     case TARGET_F_GETOWN_EX:
6640         ret = F_GETOWN_EX;
6641         break;
6642 #endif
6643 #ifdef F_SETOWN_EX
6644     case TARGET_F_SETOWN_EX:
6645         ret = F_SETOWN_EX;
6646         break;
6647 #endif
6648 #ifdef F_SETPIPE_SZ
6649     case TARGET_F_SETPIPE_SZ:
6650         ret = F_SETPIPE_SZ;
6651         break;
6652     case TARGET_F_GETPIPE_SZ:
6653         ret = F_GETPIPE_SZ;
6654         break;
6655 #endif
6656 #ifdef F_ADD_SEALS
6657     case TARGET_F_ADD_SEALS:
6658         ret = F_ADD_SEALS;
6659         break;
6660     case TARGET_F_GET_SEALS:
6661         ret = F_GET_SEALS;
6662         break;
6663 #endif
6664     default:
6665         ret = -TARGET_EINVAL;
6666         break;
6667     }
6668 
6669 #if defined(__powerpc64__)
6670     /* On PPC64, glibc headers has the F_*LK* defined to 12, 13 and 14 and
6671      * is not supported by kernel. The glibc fcntl call actually adjusts
6672      * them to 5, 6 and 7 before making the syscall(). Since we make the
6673      * syscall directly, adjust to what is supported by the kernel.
6674      */
6675     if (ret >= F_GETLK64 && ret <= F_SETLKW64) {
6676         ret -= F_GETLK64 - 5;
6677     }
6678 #endif
6679 
6680     return ret;
6681 }
6682 
6683 #define FLOCK_TRANSTBL \
6684     switch (type) { \
6685     TRANSTBL_CONVERT(F_RDLCK); \
6686     TRANSTBL_CONVERT(F_WRLCK); \
6687     TRANSTBL_CONVERT(F_UNLCK); \
6688     }
6689 
6690 static int target_to_host_flock(int type)
6691 {
6692 #define TRANSTBL_CONVERT(a) case TARGET_##a: return a
6693     FLOCK_TRANSTBL
6694 #undef  TRANSTBL_CONVERT
6695     return -TARGET_EINVAL;
6696 }
6697 
6698 static int host_to_target_flock(int type)
6699 {
6700 #define TRANSTBL_CONVERT(a) case a: return TARGET_##a
6701     FLOCK_TRANSTBL
6702 #undef  TRANSTBL_CONVERT
6703     /* if we don't know how to convert the value coming
6704      * from the host we copy to the target field as-is
6705      */
6706     return type;
6707 }
6708 
6709 static inline abi_long copy_from_user_flock(struct flock64 *fl,
6710                                             abi_ulong target_flock_addr)
6711 {
6712     struct target_flock *target_fl;
6713     int l_type;
6714 
6715     if (!lock_user_struct(VERIFY_READ, target_fl, target_flock_addr, 1)) {
6716         return -TARGET_EFAULT;
6717     }
6718 
6719     __get_user(l_type, &target_fl->l_type);
6720     l_type = target_to_host_flock(l_type);
6721     if (l_type < 0) {
6722         return l_type;
6723     }
6724     fl->l_type = l_type;
6725     __get_user(fl->l_whence, &target_fl->l_whence);
6726     __get_user(fl->l_start, &target_fl->l_start);
6727     __get_user(fl->l_len, &target_fl->l_len);
6728     __get_user(fl->l_pid, &target_fl->l_pid);
6729     unlock_user_struct(target_fl, target_flock_addr, 0);
6730     return 0;
6731 }
6732 
6733 static inline abi_long copy_to_user_flock(abi_ulong target_flock_addr,
6734                                           const struct flock64 *fl)
6735 {
6736     struct target_flock *target_fl;
6737     short l_type;
6738 
6739     if (!lock_user_struct(VERIFY_WRITE, target_fl, target_flock_addr, 0)) {
6740         return -TARGET_EFAULT;
6741     }
6742 
6743     l_type = host_to_target_flock(fl->l_type);
6744     __put_user(l_type, &target_fl->l_type);
6745     __put_user(fl->l_whence, &target_fl->l_whence);
6746     __put_user(fl->l_start, &target_fl->l_start);
6747     __put_user(fl->l_len, &target_fl->l_len);
6748     __put_user(fl->l_pid, &target_fl->l_pid);
6749     unlock_user_struct(target_fl, target_flock_addr, 1);
6750     return 0;
6751 }
6752 
6753 typedef abi_long from_flock64_fn(struct flock64 *fl, abi_ulong target_addr);
6754 typedef abi_long to_flock64_fn(abi_ulong target_addr, const struct flock64 *fl);
6755 
6756 #if defined(TARGET_ARM) && TARGET_ABI_BITS == 32
6757 static inline abi_long copy_from_user_oabi_flock64(struct flock64 *fl,
6758                                                    abi_ulong target_flock_addr)
6759 {
6760     struct target_oabi_flock64 *target_fl;
6761     int l_type;
6762 
6763     if (!lock_user_struct(VERIFY_READ, target_fl, target_flock_addr, 1)) {
6764         return -TARGET_EFAULT;
6765     }
6766 
6767     __get_user(l_type, &target_fl->l_type);
6768     l_type = target_to_host_flock(l_type);
6769     if (l_type < 0) {
6770         return l_type;
6771     }
6772     fl->l_type = l_type;
6773     __get_user(fl->l_whence, &target_fl->l_whence);
6774     __get_user(fl->l_start, &target_fl->l_start);
6775     __get_user(fl->l_len, &target_fl->l_len);
6776     __get_user(fl->l_pid, &target_fl->l_pid);
6777     unlock_user_struct(target_fl, target_flock_addr, 0);
6778     return 0;
6779 }
6780 
6781 static inline abi_long copy_to_user_oabi_flock64(abi_ulong target_flock_addr,
6782                                                  const struct flock64 *fl)
6783 {
6784     struct target_oabi_flock64 *target_fl;
6785     short l_type;
6786 
6787     if (!lock_user_struct(VERIFY_WRITE, target_fl, target_flock_addr, 0)) {
6788         return -TARGET_EFAULT;
6789     }
6790 
6791     l_type = host_to_target_flock(fl->l_type);
6792     __put_user(l_type, &target_fl->l_type);
6793     __put_user(fl->l_whence, &target_fl->l_whence);
6794     __put_user(fl->l_start, &target_fl->l_start);
6795     __put_user(fl->l_len, &target_fl->l_len);
6796     __put_user(fl->l_pid, &target_fl->l_pid);
6797     unlock_user_struct(target_fl, target_flock_addr, 1);
6798     return 0;
6799 }
6800 #endif
6801 
6802 static inline abi_long copy_from_user_flock64(struct flock64 *fl,
6803                                               abi_ulong target_flock_addr)
6804 {
6805     struct target_flock64 *target_fl;
6806     int l_type;
6807 
6808     if (!lock_user_struct(VERIFY_READ, target_fl, target_flock_addr, 1)) {
6809         return -TARGET_EFAULT;
6810     }
6811 
6812     __get_user(l_type, &target_fl->l_type);
6813     l_type = target_to_host_flock(l_type);
6814     if (l_type < 0) {
6815         return l_type;
6816     }
6817     fl->l_type = l_type;
6818     __get_user(fl->l_whence, &target_fl->l_whence);
6819     __get_user(fl->l_start, &target_fl->l_start);
6820     __get_user(fl->l_len, &target_fl->l_len);
6821     __get_user(fl->l_pid, &target_fl->l_pid);
6822     unlock_user_struct(target_fl, target_flock_addr, 0);
6823     return 0;
6824 }
6825 
6826 static inline abi_long copy_to_user_flock64(abi_ulong target_flock_addr,
6827                                             const struct flock64 *fl)
6828 {
6829     struct target_flock64 *target_fl;
6830     short l_type;
6831 
6832     if (!lock_user_struct(VERIFY_WRITE, target_fl, target_flock_addr, 0)) {
6833         return -TARGET_EFAULT;
6834     }
6835 
6836     l_type = host_to_target_flock(fl->l_type);
6837     __put_user(l_type, &target_fl->l_type);
6838     __put_user(fl->l_whence, &target_fl->l_whence);
6839     __put_user(fl->l_start, &target_fl->l_start);
6840     __put_user(fl->l_len, &target_fl->l_len);
6841     __put_user(fl->l_pid, &target_fl->l_pid);
6842     unlock_user_struct(target_fl, target_flock_addr, 1);
6843     return 0;
6844 }
6845 
6846 static abi_long do_fcntl(int fd, int cmd, abi_ulong arg)
6847 {
6848     struct flock64 fl64;
6849 #ifdef F_GETOWN_EX
6850     struct f_owner_ex fox;
6851     struct target_f_owner_ex *target_fox;
6852 #endif
6853     abi_long ret;
6854     int host_cmd = target_to_host_fcntl_cmd(cmd);
6855 
6856     if (host_cmd == -TARGET_EINVAL)
6857 	    return host_cmd;
6858 
6859     switch(cmd) {
6860     case TARGET_F_GETLK:
6861         ret = copy_from_user_flock(&fl64, arg);
6862         if (ret) {
6863             return ret;
6864         }
6865         ret = get_errno(safe_fcntl(fd, host_cmd, &fl64));
6866         if (ret == 0) {
6867             ret = copy_to_user_flock(arg, &fl64);
6868         }
6869         break;
6870 
6871     case TARGET_F_SETLK:
6872     case TARGET_F_SETLKW:
6873         ret = copy_from_user_flock(&fl64, arg);
6874         if (ret) {
6875             return ret;
6876         }
6877         ret = get_errno(safe_fcntl(fd, host_cmd, &fl64));
6878         break;
6879 
6880     case TARGET_F_GETLK64:
6881     case TARGET_F_OFD_GETLK:
6882         ret = copy_from_user_flock64(&fl64, arg);
6883         if (ret) {
6884             return ret;
6885         }
6886         ret = get_errno(safe_fcntl(fd, host_cmd, &fl64));
6887         if (ret == 0) {
6888             ret = copy_to_user_flock64(arg, &fl64);
6889         }
6890         break;
6891     case TARGET_F_SETLK64:
6892     case TARGET_F_SETLKW64:
6893     case TARGET_F_OFD_SETLK:
6894     case TARGET_F_OFD_SETLKW:
6895         ret = copy_from_user_flock64(&fl64, arg);
6896         if (ret) {
6897             return ret;
6898         }
6899         ret = get_errno(safe_fcntl(fd, host_cmd, &fl64));
6900         break;
6901 
6902     case TARGET_F_GETFL:
6903         ret = get_errno(safe_fcntl(fd, host_cmd, arg));
6904         if (ret >= 0) {
6905             ret = host_to_target_bitmask(ret, fcntl_flags_tbl);
6906         }
6907         break;
6908 
6909     case TARGET_F_SETFL:
6910         ret = get_errno(safe_fcntl(fd, host_cmd,
6911                                    target_to_host_bitmask(arg,
6912                                                           fcntl_flags_tbl)));
6913         break;
6914 
6915 #ifdef F_GETOWN_EX
6916     case TARGET_F_GETOWN_EX:
6917         ret = get_errno(safe_fcntl(fd, host_cmd, &fox));
6918         if (ret >= 0) {
6919             if (!lock_user_struct(VERIFY_WRITE, target_fox, arg, 0))
6920                 return -TARGET_EFAULT;
6921             target_fox->type = tswap32(fox.type);
6922             target_fox->pid = tswap32(fox.pid);
6923             unlock_user_struct(target_fox, arg, 1);
6924         }
6925         break;
6926 #endif
6927 
6928 #ifdef F_SETOWN_EX
6929     case TARGET_F_SETOWN_EX:
6930         if (!lock_user_struct(VERIFY_READ, target_fox, arg, 1))
6931             return -TARGET_EFAULT;
6932         fox.type = tswap32(target_fox->type);
6933         fox.pid = tswap32(target_fox->pid);
6934         unlock_user_struct(target_fox, arg, 0);
6935         ret = get_errno(safe_fcntl(fd, host_cmd, &fox));
6936         break;
6937 #endif
6938 
6939     case TARGET_F_SETSIG:
6940         ret = get_errno(safe_fcntl(fd, host_cmd, target_to_host_signal(arg)));
6941         break;
6942 
6943     case TARGET_F_GETSIG:
6944         ret = host_to_target_signal(get_errno(safe_fcntl(fd, host_cmd, arg)));
6945         break;
6946 
6947     case TARGET_F_SETOWN:
6948     case TARGET_F_GETOWN:
6949     case TARGET_F_SETLEASE:
6950     case TARGET_F_GETLEASE:
6951     case TARGET_F_SETPIPE_SZ:
6952     case TARGET_F_GETPIPE_SZ:
6953     case TARGET_F_ADD_SEALS:
6954     case TARGET_F_GET_SEALS:
6955         ret = get_errno(safe_fcntl(fd, host_cmd, arg));
6956         break;
6957 
6958     default:
6959         ret = get_errno(safe_fcntl(fd, cmd, arg));
6960         break;
6961     }
6962     return ret;
6963 }
6964 
6965 #ifdef USE_UID16
6966 
6967 static inline int high2lowuid(int uid)
6968 {
6969     if (uid > 65535)
6970         return 65534;
6971     else
6972         return uid;
6973 }
6974 
6975 static inline int high2lowgid(int gid)
6976 {
6977     if (gid > 65535)
6978         return 65534;
6979     else
6980         return gid;
6981 }
6982 
6983 static inline int low2highuid(int uid)
6984 {
6985     if ((int16_t)uid == -1)
6986         return -1;
6987     else
6988         return uid;
6989 }
6990 
6991 static inline int low2highgid(int gid)
6992 {
6993     if ((int16_t)gid == -1)
6994         return -1;
6995     else
6996         return gid;
6997 }
6998 static inline int tswapid(int id)
6999 {
7000     return tswap16(id);
7001 }
7002 
7003 #define put_user_id(x, gaddr) put_user_u16(x, gaddr)
7004 
7005 #else /* !USE_UID16 */
7006 static inline int high2lowuid(int uid)
7007 {
7008     return uid;
7009 }
7010 static inline int high2lowgid(int gid)
7011 {
7012     return gid;
7013 }
7014 static inline int low2highuid(int uid)
7015 {
7016     return uid;
7017 }
7018 static inline int low2highgid(int gid)
7019 {
7020     return gid;
7021 }
7022 static inline int tswapid(int id)
7023 {
7024     return tswap32(id);
7025 }
7026 
7027 #define put_user_id(x, gaddr) put_user_u32(x, gaddr)
7028 
7029 #endif /* USE_UID16 */
7030 
7031 /* We must do direct syscalls for setting UID/GID, because we want to
7032  * implement the Linux system call semantics of "change only for this thread",
7033  * not the libc/POSIX semantics of "change for all threads in process".
7034  * (See http://ewontfix.com/17/ for more details.)
7035  * We use the 32-bit version of the syscalls if present; if it is not
7036  * then either the host architecture supports 32-bit UIDs natively with
7037  * the standard syscall, or the 16-bit UID is the best we can do.
7038  */
7039 #ifdef __NR_setuid32
7040 #define __NR_sys_setuid __NR_setuid32
7041 #else
7042 #define __NR_sys_setuid __NR_setuid
7043 #endif
7044 #ifdef __NR_setgid32
7045 #define __NR_sys_setgid __NR_setgid32
7046 #else
7047 #define __NR_sys_setgid __NR_setgid
7048 #endif
7049 #ifdef __NR_setresuid32
7050 #define __NR_sys_setresuid __NR_setresuid32
7051 #else
7052 #define __NR_sys_setresuid __NR_setresuid
7053 #endif
7054 #ifdef __NR_setresgid32
7055 #define __NR_sys_setresgid __NR_setresgid32
7056 #else
7057 #define __NR_sys_setresgid __NR_setresgid
7058 #endif
7059 
7060 _syscall1(int, sys_setuid, uid_t, uid)
7061 _syscall1(int, sys_setgid, gid_t, gid)
7062 _syscall3(int, sys_setresuid, uid_t, ruid, uid_t, euid, uid_t, suid)
7063 _syscall3(int, sys_setresgid, gid_t, rgid, gid_t, egid, gid_t, sgid)
7064 
7065 void syscall_init(void)
7066 {
7067     IOCTLEntry *ie;
7068     const argtype *arg_type;
7069     int size;
7070     int i;
7071 
7072     thunk_init(STRUCT_MAX);
7073 
7074 #define STRUCT(name, ...) thunk_register_struct(STRUCT_ ## name, #name, struct_ ## name ## _def);
7075 #define STRUCT_SPECIAL(name) thunk_register_struct_direct(STRUCT_ ## name, #name, &struct_ ## name ## _def);
7076 #include "syscall_types.h"
7077 #undef STRUCT
7078 #undef STRUCT_SPECIAL
7079 
7080     /* Build target_to_host_errno_table[] table from
7081      * host_to_target_errno_table[]. */
7082     for (i = 0; i < ERRNO_TABLE_SIZE; i++) {
7083         target_to_host_errno_table[host_to_target_errno_table[i]] = i;
7084     }
7085 
7086     /* we patch the ioctl size if necessary. We rely on the fact that
7087        no ioctl has all the bits at '1' in the size field */
7088     ie = ioctl_entries;
7089     while (ie->target_cmd != 0) {
7090         if (((ie->target_cmd >> TARGET_IOC_SIZESHIFT) & TARGET_IOC_SIZEMASK) ==
7091             TARGET_IOC_SIZEMASK) {
7092             arg_type = ie->arg_type;
7093             if (arg_type[0] != TYPE_PTR) {
7094                 fprintf(stderr, "cannot patch size for ioctl 0x%x\n",
7095                         ie->target_cmd);
7096                 exit(1);
7097             }
7098             arg_type++;
7099             size = thunk_type_size(arg_type, 0);
7100             ie->target_cmd = (ie->target_cmd &
7101                               ~(TARGET_IOC_SIZEMASK << TARGET_IOC_SIZESHIFT)) |
7102                 (size << TARGET_IOC_SIZESHIFT);
7103         }
7104 
7105         /* automatic consistency check if same arch */
7106 #if (defined(__i386__) && defined(TARGET_I386) && defined(TARGET_ABI32)) || \
7107     (defined(__x86_64__) && defined(TARGET_X86_64))
7108         if (unlikely(ie->target_cmd != ie->host_cmd)) {
7109             fprintf(stderr, "ERROR: ioctl(%s): target=0x%x host=0x%x\n",
7110                     ie->name, ie->target_cmd, ie->host_cmd);
7111         }
7112 #endif
7113         ie++;
7114     }
7115 }
7116 
7117 #ifdef TARGET_NR_truncate64
7118 static inline abi_long target_truncate64(void *cpu_env, const char *arg1,
7119                                          abi_long arg2,
7120                                          abi_long arg3,
7121                                          abi_long arg4)
7122 {
7123     if (regpairs_aligned(cpu_env, TARGET_NR_truncate64)) {
7124         arg2 = arg3;
7125         arg3 = arg4;
7126     }
7127     return get_errno(truncate64(arg1, target_offset64(arg2, arg3)));
7128 }
7129 #endif
7130 
7131 #ifdef TARGET_NR_ftruncate64
7132 static inline abi_long target_ftruncate64(void *cpu_env, abi_long arg1,
7133                                           abi_long arg2,
7134                                           abi_long arg3,
7135                                           abi_long arg4)
7136 {
7137     if (regpairs_aligned(cpu_env, TARGET_NR_ftruncate64)) {
7138         arg2 = arg3;
7139         arg3 = arg4;
7140     }
7141     return get_errno(ftruncate64(arg1, target_offset64(arg2, arg3)));
7142 }
7143 #endif
7144 
7145 #if defined(TARGET_NR_timer_settime) || \
7146     (defined(TARGET_NR_timerfd_settime) && defined(CONFIG_TIMERFD))
7147 static inline abi_long target_to_host_itimerspec(struct itimerspec *host_its,
7148                                                  abi_ulong target_addr)
7149 {
7150     if (target_to_host_timespec(&host_its->it_interval, target_addr +
7151                                 offsetof(struct target_itimerspec,
7152                                          it_interval)) ||
7153         target_to_host_timespec(&host_its->it_value, target_addr +
7154                                 offsetof(struct target_itimerspec,
7155                                          it_value))) {
7156         return -TARGET_EFAULT;
7157     }
7158 
7159     return 0;
7160 }
7161 #endif
7162 
7163 #if defined(TARGET_NR_timer_settime64) || \
7164     (defined(TARGET_NR_timerfd_settime64) && defined(CONFIG_TIMERFD))
7165 static inline abi_long target_to_host_itimerspec64(struct itimerspec *host_its,
7166                                                    abi_ulong target_addr)
7167 {
7168     if (target_to_host_timespec64(&host_its->it_interval, target_addr +
7169                                   offsetof(struct target__kernel_itimerspec,
7170                                            it_interval)) ||
7171         target_to_host_timespec64(&host_its->it_value, target_addr +
7172                                   offsetof(struct target__kernel_itimerspec,
7173                                            it_value))) {
7174         return -TARGET_EFAULT;
7175     }
7176 
7177     return 0;
7178 }
7179 #endif
7180 
7181 #if ((defined(TARGET_NR_timerfd_gettime) || \
7182       defined(TARGET_NR_timerfd_settime)) && defined(CONFIG_TIMERFD)) || \
7183       defined(TARGET_NR_timer_gettime) || defined(TARGET_NR_timer_settime)
7184 static inline abi_long host_to_target_itimerspec(abi_ulong target_addr,
7185                                                  struct itimerspec *host_its)
7186 {
7187     if (host_to_target_timespec(target_addr + offsetof(struct target_itimerspec,
7188                                                        it_interval),
7189                                 &host_its->it_interval) ||
7190         host_to_target_timespec(target_addr + offsetof(struct target_itimerspec,
7191                                                        it_value),
7192                                 &host_its->it_value)) {
7193         return -TARGET_EFAULT;
7194     }
7195     return 0;
7196 }
7197 #endif
7198 
7199 #if ((defined(TARGET_NR_timerfd_gettime64) || \
7200       defined(TARGET_NR_timerfd_settime64)) && defined(CONFIG_TIMERFD)) || \
7201       defined(TARGET_NR_timer_gettime64) || defined(TARGET_NR_timer_settime64)
7202 static inline abi_long host_to_target_itimerspec64(abi_ulong target_addr,
7203                                                    struct itimerspec *host_its)
7204 {
7205     if (host_to_target_timespec64(target_addr +
7206                                   offsetof(struct target__kernel_itimerspec,
7207                                            it_interval),
7208                                   &host_its->it_interval) ||
7209         host_to_target_timespec64(target_addr +
7210                                   offsetof(struct target__kernel_itimerspec,
7211                                            it_value),
7212                                   &host_its->it_value)) {
7213         return -TARGET_EFAULT;
7214     }
7215     return 0;
7216 }
7217 #endif
7218 
7219 #if defined(TARGET_NR_adjtimex) || \
7220     (defined(TARGET_NR_clock_adjtime) && defined(CONFIG_CLOCK_ADJTIME))
7221 static inline abi_long target_to_host_timex(struct timex *host_tx,
7222                                             abi_long target_addr)
7223 {
7224     struct target_timex *target_tx;
7225 
7226     if (!lock_user_struct(VERIFY_READ, target_tx, target_addr, 1)) {
7227         return -TARGET_EFAULT;
7228     }
7229 
7230     __get_user(host_tx->modes, &target_tx->modes);
7231     __get_user(host_tx->offset, &target_tx->offset);
7232     __get_user(host_tx->freq, &target_tx->freq);
7233     __get_user(host_tx->maxerror, &target_tx->maxerror);
7234     __get_user(host_tx->esterror, &target_tx->esterror);
7235     __get_user(host_tx->status, &target_tx->status);
7236     __get_user(host_tx->constant, &target_tx->constant);
7237     __get_user(host_tx->precision, &target_tx->precision);
7238     __get_user(host_tx->tolerance, &target_tx->tolerance);
7239     __get_user(host_tx->time.tv_sec, &target_tx->time.tv_sec);
7240     __get_user(host_tx->time.tv_usec, &target_tx->time.tv_usec);
7241     __get_user(host_tx->tick, &target_tx->tick);
7242     __get_user(host_tx->ppsfreq, &target_tx->ppsfreq);
7243     __get_user(host_tx->jitter, &target_tx->jitter);
7244     __get_user(host_tx->shift, &target_tx->shift);
7245     __get_user(host_tx->stabil, &target_tx->stabil);
7246     __get_user(host_tx->jitcnt, &target_tx->jitcnt);
7247     __get_user(host_tx->calcnt, &target_tx->calcnt);
7248     __get_user(host_tx->errcnt, &target_tx->errcnt);
7249     __get_user(host_tx->stbcnt, &target_tx->stbcnt);
7250     __get_user(host_tx->tai, &target_tx->tai);
7251 
7252     unlock_user_struct(target_tx, target_addr, 0);
7253     return 0;
7254 }
7255 
7256 static inline abi_long host_to_target_timex(abi_long target_addr,
7257                                             struct timex *host_tx)
7258 {
7259     struct target_timex *target_tx;
7260 
7261     if (!lock_user_struct(VERIFY_WRITE, target_tx, target_addr, 0)) {
7262         return -TARGET_EFAULT;
7263     }
7264 
7265     __put_user(host_tx->modes, &target_tx->modes);
7266     __put_user(host_tx->offset, &target_tx->offset);
7267     __put_user(host_tx->freq, &target_tx->freq);
7268     __put_user(host_tx->maxerror, &target_tx->maxerror);
7269     __put_user(host_tx->esterror, &target_tx->esterror);
7270     __put_user(host_tx->status, &target_tx->status);
7271     __put_user(host_tx->constant, &target_tx->constant);
7272     __put_user(host_tx->precision, &target_tx->precision);
7273     __put_user(host_tx->tolerance, &target_tx->tolerance);
7274     __put_user(host_tx->time.tv_sec, &target_tx->time.tv_sec);
7275     __put_user(host_tx->time.tv_usec, &target_tx->time.tv_usec);
7276     __put_user(host_tx->tick, &target_tx->tick);
7277     __put_user(host_tx->ppsfreq, &target_tx->ppsfreq);
7278     __put_user(host_tx->jitter, &target_tx->jitter);
7279     __put_user(host_tx->shift, &target_tx->shift);
7280     __put_user(host_tx->stabil, &target_tx->stabil);
7281     __put_user(host_tx->jitcnt, &target_tx->jitcnt);
7282     __put_user(host_tx->calcnt, &target_tx->calcnt);
7283     __put_user(host_tx->errcnt, &target_tx->errcnt);
7284     __put_user(host_tx->stbcnt, &target_tx->stbcnt);
7285     __put_user(host_tx->tai, &target_tx->tai);
7286 
7287     unlock_user_struct(target_tx, target_addr, 1);
7288     return 0;
7289 }
7290 #endif
7291 
7292 
7293 #if defined(TARGET_NR_clock_adjtime64) && defined(CONFIG_CLOCK_ADJTIME)
7294 static inline abi_long target_to_host_timex64(struct timex *host_tx,
7295                                               abi_long target_addr)
7296 {
7297     struct target__kernel_timex *target_tx;
7298 
7299     if (copy_from_user_timeval64(&host_tx->time, target_addr +
7300                                  offsetof(struct target__kernel_timex,
7301                                           time))) {
7302         return -TARGET_EFAULT;
7303     }
7304 
7305     if (!lock_user_struct(VERIFY_READ, target_tx, target_addr, 1)) {
7306         return -TARGET_EFAULT;
7307     }
7308 
7309     __get_user(host_tx->modes, &target_tx->modes);
7310     __get_user(host_tx->offset, &target_tx->offset);
7311     __get_user(host_tx->freq, &target_tx->freq);
7312     __get_user(host_tx->maxerror, &target_tx->maxerror);
7313     __get_user(host_tx->esterror, &target_tx->esterror);
7314     __get_user(host_tx->status, &target_tx->status);
7315     __get_user(host_tx->constant, &target_tx->constant);
7316     __get_user(host_tx->precision, &target_tx->precision);
7317     __get_user(host_tx->tolerance, &target_tx->tolerance);
7318     __get_user(host_tx->tick, &target_tx->tick);
7319     __get_user(host_tx->ppsfreq, &target_tx->ppsfreq);
7320     __get_user(host_tx->jitter, &target_tx->jitter);
7321     __get_user(host_tx->shift, &target_tx->shift);
7322     __get_user(host_tx->stabil, &target_tx->stabil);
7323     __get_user(host_tx->jitcnt, &target_tx->jitcnt);
7324     __get_user(host_tx->calcnt, &target_tx->calcnt);
7325     __get_user(host_tx->errcnt, &target_tx->errcnt);
7326     __get_user(host_tx->stbcnt, &target_tx->stbcnt);
7327     __get_user(host_tx->tai, &target_tx->tai);
7328 
7329     unlock_user_struct(target_tx, target_addr, 0);
7330     return 0;
7331 }
7332 
7333 static inline abi_long host_to_target_timex64(abi_long target_addr,
7334                                               struct timex *host_tx)
7335 {
7336     struct target__kernel_timex *target_tx;
7337 
7338    if (copy_to_user_timeval64(target_addr +
7339                               offsetof(struct target__kernel_timex, time),
7340                               &host_tx->time)) {
7341         return -TARGET_EFAULT;
7342     }
7343 
7344     if (!lock_user_struct(VERIFY_WRITE, target_tx, target_addr, 0)) {
7345         return -TARGET_EFAULT;
7346     }
7347 
7348     __put_user(host_tx->modes, &target_tx->modes);
7349     __put_user(host_tx->offset, &target_tx->offset);
7350     __put_user(host_tx->freq, &target_tx->freq);
7351     __put_user(host_tx->maxerror, &target_tx->maxerror);
7352     __put_user(host_tx->esterror, &target_tx->esterror);
7353     __put_user(host_tx->status, &target_tx->status);
7354     __put_user(host_tx->constant, &target_tx->constant);
7355     __put_user(host_tx->precision, &target_tx->precision);
7356     __put_user(host_tx->tolerance, &target_tx->tolerance);
7357     __put_user(host_tx->tick, &target_tx->tick);
7358     __put_user(host_tx->ppsfreq, &target_tx->ppsfreq);
7359     __put_user(host_tx->jitter, &target_tx->jitter);
7360     __put_user(host_tx->shift, &target_tx->shift);
7361     __put_user(host_tx->stabil, &target_tx->stabil);
7362     __put_user(host_tx->jitcnt, &target_tx->jitcnt);
7363     __put_user(host_tx->calcnt, &target_tx->calcnt);
7364     __put_user(host_tx->errcnt, &target_tx->errcnt);
7365     __put_user(host_tx->stbcnt, &target_tx->stbcnt);
7366     __put_user(host_tx->tai, &target_tx->tai);
7367 
7368     unlock_user_struct(target_tx, target_addr, 1);
7369     return 0;
7370 }
7371 #endif
7372 
7373 static inline abi_long target_to_host_sigevent(struct sigevent *host_sevp,
7374                                                abi_ulong target_addr)
7375 {
7376     struct target_sigevent *target_sevp;
7377 
7378     if (!lock_user_struct(VERIFY_READ, target_sevp, target_addr, 1)) {
7379         return -TARGET_EFAULT;
7380     }
7381 
7382     /* This union is awkward on 64 bit systems because it has a 32 bit
7383      * integer and a pointer in it; we follow the conversion approach
7384      * used for handling sigval types in signal.c so the guest should get
7385      * the correct value back even if we did a 64 bit byteswap and it's
7386      * using the 32 bit integer.
7387      */
7388     host_sevp->sigev_value.sival_ptr =
7389         (void *)(uintptr_t)tswapal(target_sevp->sigev_value.sival_ptr);
7390     host_sevp->sigev_signo =
7391         target_to_host_signal(tswap32(target_sevp->sigev_signo));
7392     host_sevp->sigev_notify = tswap32(target_sevp->sigev_notify);
7393     host_sevp->_sigev_un._tid = tswap32(target_sevp->_sigev_un._tid);
7394 
7395     unlock_user_struct(target_sevp, target_addr, 1);
7396     return 0;
7397 }
7398 
7399 #if defined(TARGET_NR_mlockall)
7400 static inline int target_to_host_mlockall_arg(int arg)
7401 {
7402     int result = 0;
7403 
7404     if (arg & TARGET_MCL_CURRENT) {
7405         result |= MCL_CURRENT;
7406     }
7407     if (arg & TARGET_MCL_FUTURE) {
7408         result |= MCL_FUTURE;
7409     }
7410 #ifdef MCL_ONFAULT
7411     if (arg & TARGET_MCL_ONFAULT) {
7412         result |= MCL_ONFAULT;
7413     }
7414 #endif
7415 
7416     return result;
7417 }
7418 #endif
7419 
7420 #if (defined(TARGET_NR_stat64) || defined(TARGET_NR_lstat64) ||     \
7421      defined(TARGET_NR_fstat64) || defined(TARGET_NR_fstatat64) ||  \
7422      defined(TARGET_NR_newfstatat))
7423 static inline abi_long host_to_target_stat64(void *cpu_env,
7424                                              abi_ulong target_addr,
7425                                              struct stat *host_st)
7426 {
7427 #if defined(TARGET_ARM) && defined(TARGET_ABI32)
7428     if (((CPUARMState *)cpu_env)->eabi) {
7429         struct target_eabi_stat64 *target_st;
7430 
7431         if (!lock_user_struct(VERIFY_WRITE, target_st, target_addr, 0))
7432             return -TARGET_EFAULT;
7433         memset(target_st, 0, sizeof(struct target_eabi_stat64));
7434         __put_user(host_st->st_dev, &target_st->st_dev);
7435         __put_user(host_st->st_ino, &target_st->st_ino);
7436 #ifdef TARGET_STAT64_HAS_BROKEN_ST_INO
7437         __put_user(host_st->st_ino, &target_st->__st_ino);
7438 #endif
7439         __put_user(host_st->st_mode, &target_st->st_mode);
7440         __put_user(host_st->st_nlink, &target_st->st_nlink);
7441         __put_user(host_st->st_uid, &target_st->st_uid);
7442         __put_user(host_st->st_gid, &target_st->st_gid);
7443         __put_user(host_st->st_rdev, &target_st->st_rdev);
7444         __put_user(host_st->st_size, &target_st->st_size);
7445         __put_user(host_st->st_blksize, &target_st->st_blksize);
7446         __put_user(host_st->st_blocks, &target_st->st_blocks);
7447         __put_user(host_st->st_atime, &target_st->target_st_atime);
7448         __put_user(host_st->st_mtime, &target_st->target_st_mtime);
7449         __put_user(host_st->st_ctime, &target_st->target_st_ctime);
7450 #if _POSIX_C_SOURCE >= 200809L || _XOPEN_SOURCE >= 700
7451         __put_user(host_st->st_atim.tv_nsec, &target_st->target_st_atime_nsec);
7452         __put_user(host_st->st_mtim.tv_nsec, &target_st->target_st_mtime_nsec);
7453         __put_user(host_st->st_ctim.tv_nsec, &target_st->target_st_ctime_nsec);
7454 #endif
7455         unlock_user_struct(target_st, target_addr, 1);
7456     } else
7457 #endif
7458     {
7459 #if defined(TARGET_HAS_STRUCT_STAT64)
7460         struct target_stat64 *target_st;
7461 #else
7462         struct target_stat *target_st;
7463 #endif
7464 
7465         if (!lock_user_struct(VERIFY_WRITE, target_st, target_addr, 0))
7466             return -TARGET_EFAULT;
7467         memset(target_st, 0, sizeof(*target_st));
7468         __put_user(host_st->st_dev, &target_st->st_dev);
7469         __put_user(host_st->st_ino, &target_st->st_ino);
7470 #ifdef TARGET_STAT64_HAS_BROKEN_ST_INO
7471         __put_user(host_st->st_ino, &target_st->__st_ino);
7472 #endif
7473         __put_user(host_st->st_mode, &target_st->st_mode);
7474         __put_user(host_st->st_nlink, &target_st->st_nlink);
7475         __put_user(host_st->st_uid, &target_st->st_uid);
7476         __put_user(host_st->st_gid, &target_st->st_gid);
7477         __put_user(host_st->st_rdev, &target_st->st_rdev);
7478         /* XXX: better use of kernel struct */
7479         __put_user(host_st->st_size, &target_st->st_size);
7480         __put_user(host_st->st_blksize, &target_st->st_blksize);
7481         __put_user(host_st->st_blocks, &target_st->st_blocks);
7482         __put_user(host_st->st_atime, &target_st->target_st_atime);
7483         __put_user(host_st->st_mtime, &target_st->target_st_mtime);
7484         __put_user(host_st->st_ctime, &target_st->target_st_ctime);
7485 #if _POSIX_C_SOURCE >= 200809L || _XOPEN_SOURCE >= 700
7486         __put_user(host_st->st_atim.tv_nsec, &target_st->target_st_atime_nsec);
7487         __put_user(host_st->st_mtim.tv_nsec, &target_st->target_st_mtime_nsec);
7488         __put_user(host_st->st_ctim.tv_nsec, &target_st->target_st_ctime_nsec);
7489 #endif
7490         unlock_user_struct(target_st, target_addr, 1);
7491     }
7492 
7493     return 0;
7494 }
7495 #endif
7496 
7497 #if defined(TARGET_NR_statx) && defined(__NR_statx)
7498 static inline abi_long host_to_target_statx(struct target_statx *host_stx,
7499                                             abi_ulong target_addr)
7500 {
7501     struct target_statx *target_stx;
7502 
7503     if (!lock_user_struct(VERIFY_WRITE, target_stx, target_addr,  0)) {
7504         return -TARGET_EFAULT;
7505     }
7506     memset(target_stx, 0, sizeof(*target_stx));
7507 
7508     __put_user(host_stx->stx_mask, &target_stx->stx_mask);
7509     __put_user(host_stx->stx_blksize, &target_stx->stx_blksize);
7510     __put_user(host_stx->stx_attributes, &target_stx->stx_attributes);
7511     __put_user(host_stx->stx_nlink, &target_stx->stx_nlink);
7512     __put_user(host_stx->stx_uid, &target_stx->stx_uid);
7513     __put_user(host_stx->stx_gid, &target_stx->stx_gid);
7514     __put_user(host_stx->stx_mode, &target_stx->stx_mode);
7515     __put_user(host_stx->stx_ino, &target_stx->stx_ino);
7516     __put_user(host_stx->stx_size, &target_stx->stx_size);
7517     __put_user(host_stx->stx_blocks, &target_stx->stx_blocks);
7518     __put_user(host_stx->stx_attributes_mask, &target_stx->stx_attributes_mask);
7519     __put_user(host_stx->stx_atime.tv_sec, &target_stx->stx_atime.tv_sec);
7520     __put_user(host_stx->stx_atime.tv_nsec, &target_stx->stx_atime.tv_nsec);
7521     __put_user(host_stx->stx_btime.tv_sec, &target_stx->stx_btime.tv_sec);
7522     __put_user(host_stx->stx_btime.tv_nsec, &target_stx->stx_btime.tv_nsec);
7523     __put_user(host_stx->stx_ctime.tv_sec, &target_stx->stx_ctime.tv_sec);
7524     __put_user(host_stx->stx_ctime.tv_nsec, &target_stx->stx_ctime.tv_nsec);
7525     __put_user(host_stx->stx_mtime.tv_sec, &target_stx->stx_mtime.tv_sec);
7526     __put_user(host_stx->stx_mtime.tv_nsec, &target_stx->stx_mtime.tv_nsec);
7527     __put_user(host_stx->stx_rdev_major, &target_stx->stx_rdev_major);
7528     __put_user(host_stx->stx_rdev_minor, &target_stx->stx_rdev_minor);
7529     __put_user(host_stx->stx_dev_major, &target_stx->stx_dev_major);
7530     __put_user(host_stx->stx_dev_minor, &target_stx->stx_dev_minor);
7531 
7532     unlock_user_struct(target_stx, target_addr, 1);
7533 
7534     return 0;
7535 }
7536 #endif
7537 
7538 static int do_sys_futex(int *uaddr, int op, int val,
7539                          const struct timespec *timeout, int *uaddr2,
7540                          int val3)
7541 {
7542 #if HOST_LONG_BITS == 64
7543 #if defined(__NR_futex)
7544     /* always a 64-bit time_t, it doesn't define _time64 version  */
7545     return sys_futex(uaddr, op, val, timeout, uaddr2, val3);
7546 
7547 #endif
7548 #else /* HOST_LONG_BITS == 64 */
7549 #if defined(__NR_futex_time64)
7550     if (sizeof(timeout->tv_sec) == 8) {
7551         /* _time64 function on 32bit arch */
7552         return sys_futex_time64(uaddr, op, val, timeout, uaddr2, val3);
7553     }
7554 #endif
7555 #if defined(__NR_futex)
7556     /* old function on 32bit arch */
7557     return sys_futex(uaddr, op, val, timeout, uaddr2, val3);
7558 #endif
7559 #endif /* HOST_LONG_BITS == 64 */
7560     g_assert_not_reached();
7561 }
7562 
7563 static int do_safe_futex(int *uaddr, int op, int val,
7564                          const struct timespec *timeout, int *uaddr2,
7565                          int val3)
7566 {
7567 #if HOST_LONG_BITS == 64
7568 #if defined(__NR_futex)
7569     /* always a 64-bit time_t, it doesn't define _time64 version  */
7570     return get_errno(safe_futex(uaddr, op, val, timeout, uaddr2, val3));
7571 #endif
7572 #else /* HOST_LONG_BITS == 64 */
7573 #if defined(__NR_futex_time64)
7574     if (sizeof(timeout->tv_sec) == 8) {
7575         /* _time64 function on 32bit arch */
7576         return get_errno(safe_futex_time64(uaddr, op, val, timeout, uaddr2,
7577                                            val3));
7578     }
7579 #endif
7580 #if defined(__NR_futex)
7581     /* old function on 32bit arch */
7582     return get_errno(safe_futex(uaddr, op, val, timeout, uaddr2, val3));
7583 #endif
7584 #endif /* HOST_LONG_BITS == 64 */
7585     return -TARGET_ENOSYS;
7586 }
7587 
7588 /* ??? Using host futex calls even when target atomic operations
7589    are not really atomic probably breaks things.  However implementing
7590    futexes locally would make futexes shared between multiple processes
7591    tricky.  However they're probably useless because guest atomic
7592    operations won't work either.  */
7593 #if defined(TARGET_NR_futex)
7594 static int do_futex(target_ulong uaddr, int op, int val, target_ulong timeout,
7595                     target_ulong uaddr2, int val3)
7596 {
7597     struct timespec ts, *pts;
7598     int base_op;
7599 
7600     /* ??? We assume FUTEX_* constants are the same on both host
7601        and target.  */
7602 #ifdef FUTEX_CMD_MASK
7603     base_op = op & FUTEX_CMD_MASK;
7604 #else
7605     base_op = op;
7606 #endif
7607     switch (base_op) {
7608     case FUTEX_WAIT:
7609     case FUTEX_WAIT_BITSET:
7610         if (timeout) {
7611             pts = &ts;
7612             target_to_host_timespec(pts, timeout);
7613         } else {
7614             pts = NULL;
7615         }
7616         return do_safe_futex(g2h(uaddr), op, tswap32(val), pts, NULL, val3);
7617     case FUTEX_WAKE:
7618         return do_safe_futex(g2h(uaddr), op, val, NULL, NULL, 0);
7619     case FUTEX_FD:
7620         return do_safe_futex(g2h(uaddr), op, val, NULL, NULL, 0);
7621     case FUTEX_REQUEUE:
7622     case FUTEX_CMP_REQUEUE:
7623     case FUTEX_WAKE_OP:
7624         /* For FUTEX_REQUEUE, FUTEX_CMP_REQUEUE, and FUTEX_WAKE_OP, the
7625            TIMEOUT parameter is interpreted as a uint32_t by the kernel.
7626            But the prototype takes a `struct timespec *'; insert casts
7627            to satisfy the compiler.  We do not need to tswap TIMEOUT
7628            since it's not compared to guest memory.  */
7629         pts = (struct timespec *)(uintptr_t) timeout;
7630         return do_safe_futex(g2h(uaddr), op, val, pts, g2h(uaddr2),
7631                              (base_op == FUTEX_CMP_REQUEUE
7632                                       ? tswap32(val3)
7633                                       : val3));
7634     default:
7635         return -TARGET_ENOSYS;
7636     }
7637 }
7638 #endif
7639 
7640 #if defined(TARGET_NR_futex_time64)
7641 static int do_futex_time64(target_ulong uaddr, int op, int val, target_ulong timeout,
7642                            target_ulong uaddr2, int val3)
7643 {
7644     struct timespec ts, *pts;
7645     int base_op;
7646 
7647     /* ??? We assume FUTEX_* constants are the same on both host
7648        and target.  */
7649 #ifdef FUTEX_CMD_MASK
7650     base_op = op & FUTEX_CMD_MASK;
7651 #else
7652     base_op = op;
7653 #endif
7654     switch (base_op) {
7655     case FUTEX_WAIT:
7656     case FUTEX_WAIT_BITSET:
7657         if (timeout) {
7658             pts = &ts;
7659             if (target_to_host_timespec64(pts, timeout)) {
7660                 return -TARGET_EFAULT;
7661             }
7662         } else {
7663             pts = NULL;
7664         }
7665         return do_safe_futex(g2h(uaddr), op, tswap32(val), pts, NULL, val3);
7666     case FUTEX_WAKE:
7667         return do_safe_futex(g2h(uaddr), op, val, NULL, NULL, 0);
7668     case FUTEX_FD:
7669         return do_safe_futex(g2h(uaddr), op, val, NULL, NULL, 0);
7670     case FUTEX_REQUEUE:
7671     case FUTEX_CMP_REQUEUE:
7672     case FUTEX_WAKE_OP:
7673         /* For FUTEX_REQUEUE, FUTEX_CMP_REQUEUE, and FUTEX_WAKE_OP, the
7674            TIMEOUT parameter is interpreted as a uint32_t by the kernel.
7675            But the prototype takes a `struct timespec *'; insert casts
7676            to satisfy the compiler.  We do not need to tswap TIMEOUT
7677            since it's not compared to guest memory.  */
7678         pts = (struct timespec *)(uintptr_t) timeout;
7679         return do_safe_futex(g2h(uaddr), op, val, pts, g2h(uaddr2),
7680                              (base_op == FUTEX_CMP_REQUEUE
7681                                       ? tswap32(val3)
7682                                       : val3));
7683     default:
7684         return -TARGET_ENOSYS;
7685     }
7686 }
7687 #endif
7688 
7689 #if defined(TARGET_NR_name_to_handle_at) && defined(CONFIG_OPEN_BY_HANDLE)
7690 static abi_long do_name_to_handle_at(abi_long dirfd, abi_long pathname,
7691                                      abi_long handle, abi_long mount_id,
7692                                      abi_long flags)
7693 {
7694     struct file_handle *target_fh;
7695     struct file_handle *fh;
7696     int mid = 0;
7697     abi_long ret;
7698     char *name;
7699     unsigned int size, total_size;
7700 
7701     if (get_user_s32(size, handle)) {
7702         return -TARGET_EFAULT;
7703     }
7704 
7705     name = lock_user_string(pathname);
7706     if (!name) {
7707         return -TARGET_EFAULT;
7708     }
7709 
7710     total_size = sizeof(struct file_handle) + size;
7711     target_fh = lock_user(VERIFY_WRITE, handle, total_size, 0);
7712     if (!target_fh) {
7713         unlock_user(name, pathname, 0);
7714         return -TARGET_EFAULT;
7715     }
7716 
7717     fh = g_malloc0(total_size);
7718     fh->handle_bytes = size;
7719 
7720     ret = get_errno(name_to_handle_at(dirfd, path(name), fh, &mid, flags));
7721     unlock_user(name, pathname, 0);
7722 
7723     /* man name_to_handle_at(2):
7724      * Other than the use of the handle_bytes field, the caller should treat
7725      * the file_handle structure as an opaque data type
7726      */
7727 
7728     memcpy(target_fh, fh, total_size);
7729     target_fh->handle_bytes = tswap32(fh->handle_bytes);
7730     target_fh->handle_type = tswap32(fh->handle_type);
7731     g_free(fh);
7732     unlock_user(target_fh, handle, total_size);
7733 
7734     if (put_user_s32(mid, mount_id)) {
7735         return -TARGET_EFAULT;
7736     }
7737 
7738     return ret;
7739 
7740 }
7741 #endif
7742 
7743 #if defined(TARGET_NR_open_by_handle_at) && defined(CONFIG_OPEN_BY_HANDLE)
7744 static abi_long do_open_by_handle_at(abi_long mount_fd, abi_long handle,
7745                                      abi_long flags)
7746 {
7747     struct file_handle *target_fh;
7748     struct file_handle *fh;
7749     unsigned int size, total_size;
7750     abi_long ret;
7751 
7752     if (get_user_s32(size, handle)) {
7753         return -TARGET_EFAULT;
7754     }
7755 
7756     total_size = sizeof(struct file_handle) + size;
7757     target_fh = lock_user(VERIFY_READ, handle, total_size, 1);
7758     if (!target_fh) {
7759         return -TARGET_EFAULT;
7760     }
7761 
7762     fh = g_memdup(target_fh, total_size);
7763     fh->handle_bytes = size;
7764     fh->handle_type = tswap32(target_fh->handle_type);
7765 
7766     ret = get_errno(open_by_handle_at(mount_fd, fh,
7767                     target_to_host_bitmask(flags, fcntl_flags_tbl)));
7768 
7769     g_free(fh);
7770 
7771     unlock_user(target_fh, handle, total_size);
7772 
7773     return ret;
7774 }
7775 #endif
7776 
7777 #if defined(TARGET_NR_signalfd) || defined(TARGET_NR_signalfd4)
7778 
7779 static abi_long do_signalfd4(int fd, abi_long mask, int flags)
7780 {
7781     int host_flags;
7782     target_sigset_t *target_mask;
7783     sigset_t host_mask;
7784     abi_long ret;
7785 
7786     if (flags & ~(TARGET_O_NONBLOCK_MASK | TARGET_O_CLOEXEC)) {
7787         return -TARGET_EINVAL;
7788     }
7789     if (!lock_user_struct(VERIFY_READ, target_mask, mask, 1)) {
7790         return -TARGET_EFAULT;
7791     }
7792 
7793     target_to_host_sigset(&host_mask, target_mask);
7794 
7795     host_flags = target_to_host_bitmask(flags, fcntl_flags_tbl);
7796 
7797     ret = get_errno(signalfd(fd, &host_mask, host_flags));
7798     if (ret >= 0) {
7799         fd_trans_register(ret, &target_signalfd_trans);
7800     }
7801 
7802     unlock_user_struct(target_mask, mask, 0);
7803 
7804     return ret;
7805 }
7806 #endif
7807 
7808 /* Map host to target signal numbers for the wait family of syscalls.
7809    Assume all other status bits are the same.  */
7810 int host_to_target_waitstatus(int status)
7811 {
7812     if (WIFSIGNALED(status)) {
7813         return host_to_target_signal(WTERMSIG(status)) | (status & ~0x7f);
7814     }
7815     if (WIFSTOPPED(status)) {
7816         return (host_to_target_signal(WSTOPSIG(status)) << 8)
7817                | (status & 0xff);
7818     }
7819     return status;
7820 }
7821 
7822 static int open_self_cmdline(void *cpu_env, int fd)
7823 {
7824     CPUState *cpu = env_cpu((CPUArchState *)cpu_env);
7825     struct linux_binprm *bprm = ((TaskState *)cpu->opaque)->bprm;
7826     int i;
7827 
7828     for (i = 0; i < bprm->argc; i++) {
7829         size_t len = strlen(bprm->argv[i]) + 1;
7830 
7831         if (write(fd, bprm->argv[i], len) != len) {
7832             return -1;
7833         }
7834     }
7835 
7836     return 0;
7837 }
7838 
7839 static int open_self_maps(void *cpu_env, int fd)
7840 {
7841     CPUState *cpu = env_cpu((CPUArchState *)cpu_env);
7842     TaskState *ts = cpu->opaque;
7843     GSList *map_info = read_self_maps();
7844     GSList *s;
7845     int count;
7846 
7847     for (s = map_info; s; s = g_slist_next(s)) {
7848         MapInfo *e = (MapInfo *) s->data;
7849 
7850         if (h2g_valid(e->start)) {
7851             unsigned long min = e->start;
7852             unsigned long max = e->end;
7853             int flags = page_get_flags(h2g(min));
7854             const char *path;
7855 
7856             max = h2g_valid(max - 1) ?
7857                 max : (uintptr_t) g2h(GUEST_ADDR_MAX) + 1;
7858 
7859             if (page_check_range(h2g(min), max - min, flags) == -1) {
7860                 continue;
7861             }
7862 
7863             if (h2g(min) == ts->info->stack_limit) {
7864                 path = "[stack]";
7865             } else {
7866                 path = e->path;
7867             }
7868 
7869             count = dprintf(fd, TARGET_ABI_FMT_ptr "-" TARGET_ABI_FMT_ptr
7870                             " %c%c%c%c %08" PRIx64 " %s %"PRId64,
7871                             h2g(min), h2g(max - 1) + 1,
7872                             e->is_read ? 'r' : '-',
7873                             e->is_write ? 'w' : '-',
7874                             e->is_exec ? 'x' : '-',
7875                             e->is_priv ? 'p' : '-',
7876                             (uint64_t) e->offset, e->dev, e->inode);
7877             if (path) {
7878                 dprintf(fd, "%*s%s\n", 73 - count, "", path);
7879             } else {
7880                 dprintf(fd, "\n");
7881             }
7882         }
7883     }
7884 
7885     free_self_maps(map_info);
7886 
7887 #ifdef TARGET_VSYSCALL_PAGE
7888     /*
7889      * We only support execution from the vsyscall page.
7890      * This is as if CONFIG_LEGACY_VSYSCALL_XONLY=y from v5.3.
7891      */
7892     count = dprintf(fd, TARGET_FMT_lx "-" TARGET_FMT_lx
7893                     " --xp 00000000 00:00 0",
7894                     TARGET_VSYSCALL_PAGE, TARGET_VSYSCALL_PAGE + TARGET_PAGE_SIZE);
7895     dprintf(fd, "%*s%s\n", 73 - count, "",  "[vsyscall]");
7896 #endif
7897 
7898     return 0;
7899 }
7900 
7901 static int open_self_stat(void *cpu_env, int fd)
7902 {
7903     CPUState *cpu = env_cpu((CPUArchState *)cpu_env);
7904     TaskState *ts = cpu->opaque;
7905     g_autoptr(GString) buf = g_string_new(NULL);
7906     int i;
7907 
7908     for (i = 0; i < 44; i++) {
7909         if (i == 0) {
7910             /* pid */
7911             g_string_printf(buf, FMT_pid " ", getpid());
7912         } else if (i == 1) {
7913             /* app name */
7914             gchar *bin = g_strrstr(ts->bprm->argv[0], "/");
7915             bin = bin ? bin + 1 : ts->bprm->argv[0];
7916             g_string_printf(buf, "(%.15s) ", bin);
7917         } else if (i == 27) {
7918             /* stack bottom */
7919             g_string_printf(buf, TARGET_ABI_FMT_ld " ", ts->info->start_stack);
7920         } else {
7921             /* for the rest, there is MasterCard */
7922             g_string_printf(buf, "0%c", i == 43 ? '\n' : ' ');
7923         }
7924 
7925         if (write(fd, buf->str, buf->len) != buf->len) {
7926             return -1;
7927         }
7928     }
7929 
7930     return 0;
7931 }
7932 
7933 static int open_self_auxv(void *cpu_env, int fd)
7934 {
7935     CPUState *cpu = env_cpu((CPUArchState *)cpu_env);
7936     TaskState *ts = cpu->opaque;
7937     abi_ulong auxv = ts->info->saved_auxv;
7938     abi_ulong len = ts->info->auxv_len;
7939     char *ptr;
7940 
7941     /*
7942      * Auxiliary vector is stored in target process stack.
7943      * read in whole auxv vector and copy it to file
7944      */
7945     ptr = lock_user(VERIFY_READ, auxv, len, 0);
7946     if (ptr != NULL) {
7947         while (len > 0) {
7948             ssize_t r;
7949             r = write(fd, ptr, len);
7950             if (r <= 0) {
7951                 break;
7952             }
7953             len -= r;
7954             ptr += r;
7955         }
7956         lseek(fd, 0, SEEK_SET);
7957         unlock_user(ptr, auxv, len);
7958     }
7959 
7960     return 0;
7961 }
7962 
7963 static int is_proc_myself(const char *filename, const char *entry)
7964 {
7965     if (!strncmp(filename, "/proc/", strlen("/proc/"))) {
7966         filename += strlen("/proc/");
7967         if (!strncmp(filename, "self/", strlen("self/"))) {
7968             filename += strlen("self/");
7969         } else if (*filename >= '1' && *filename <= '9') {
7970             char myself[80];
7971             snprintf(myself, sizeof(myself), "%d/", getpid());
7972             if (!strncmp(filename, myself, strlen(myself))) {
7973                 filename += strlen(myself);
7974             } else {
7975                 return 0;
7976             }
7977         } else {
7978             return 0;
7979         }
7980         if (!strcmp(filename, entry)) {
7981             return 1;
7982         }
7983     }
7984     return 0;
7985 }
7986 
7987 #if defined(HOST_WORDS_BIGENDIAN) != defined(TARGET_WORDS_BIGENDIAN) || \
7988     defined(TARGET_SPARC) || defined(TARGET_M68K) || defined(TARGET_HPPA)
7989 static int is_proc(const char *filename, const char *entry)
7990 {
7991     return strcmp(filename, entry) == 0;
7992 }
7993 #endif
7994 
7995 #if defined(HOST_WORDS_BIGENDIAN) != defined(TARGET_WORDS_BIGENDIAN)
7996 static int open_net_route(void *cpu_env, int fd)
7997 {
7998     FILE *fp;
7999     char *line = NULL;
8000     size_t len = 0;
8001     ssize_t read;
8002 
8003     fp = fopen("/proc/net/route", "r");
8004     if (fp == NULL) {
8005         return -1;
8006     }
8007 
8008     /* read header */
8009 
8010     read = getline(&line, &len, fp);
8011     dprintf(fd, "%s", line);
8012 
8013     /* read routes */
8014 
8015     while ((read = getline(&line, &len, fp)) != -1) {
8016         char iface[16];
8017         uint32_t dest, gw, mask;
8018         unsigned int flags, refcnt, use, metric, mtu, window, irtt;
8019         int fields;
8020 
8021         fields = sscanf(line,
8022                         "%s\t%08x\t%08x\t%04x\t%d\t%d\t%d\t%08x\t%d\t%u\t%u\n",
8023                         iface, &dest, &gw, &flags, &refcnt, &use, &metric,
8024                         &mask, &mtu, &window, &irtt);
8025         if (fields != 11) {
8026             continue;
8027         }
8028         dprintf(fd, "%s\t%08x\t%08x\t%04x\t%d\t%d\t%d\t%08x\t%d\t%u\t%u\n",
8029                 iface, tswap32(dest), tswap32(gw), flags, refcnt, use,
8030                 metric, tswap32(mask), mtu, window, irtt);
8031     }
8032 
8033     free(line);
8034     fclose(fp);
8035 
8036     return 0;
8037 }
8038 #endif
8039 
8040 #if defined(TARGET_SPARC)
8041 static int open_cpuinfo(void *cpu_env, int fd)
8042 {
8043     dprintf(fd, "type\t\t: sun4u\n");
8044     return 0;
8045 }
8046 #endif
8047 
8048 #if defined(TARGET_HPPA)
8049 static int open_cpuinfo(void *cpu_env, int fd)
8050 {
8051     dprintf(fd, "cpu family\t: PA-RISC 1.1e\n");
8052     dprintf(fd, "cpu\t\t: PA7300LC (PCX-L2)\n");
8053     dprintf(fd, "capabilities\t: os32\n");
8054     dprintf(fd, "model\t\t: 9000/778/B160L\n");
8055     dprintf(fd, "model name\t: Merlin L2 160 QEMU (9000/778/B160L)\n");
8056     return 0;
8057 }
8058 #endif
8059 
8060 #if defined(TARGET_M68K)
8061 static int open_hardware(void *cpu_env, int fd)
8062 {
8063     dprintf(fd, "Model:\t\tqemu-m68k\n");
8064     return 0;
8065 }
8066 #endif
8067 
8068 static int do_openat(void *cpu_env, int dirfd, const char *pathname, int flags, mode_t mode)
8069 {
8070     struct fake_open {
8071         const char *filename;
8072         int (*fill)(void *cpu_env, int fd);
8073         int (*cmp)(const char *s1, const char *s2);
8074     };
8075     const struct fake_open *fake_open;
8076     static const struct fake_open fakes[] = {
8077         { "maps", open_self_maps, is_proc_myself },
8078         { "stat", open_self_stat, is_proc_myself },
8079         { "auxv", open_self_auxv, is_proc_myself },
8080         { "cmdline", open_self_cmdline, is_proc_myself },
8081 #if defined(HOST_WORDS_BIGENDIAN) != defined(TARGET_WORDS_BIGENDIAN)
8082         { "/proc/net/route", open_net_route, is_proc },
8083 #endif
8084 #if defined(TARGET_SPARC) || defined(TARGET_HPPA)
8085         { "/proc/cpuinfo", open_cpuinfo, is_proc },
8086 #endif
8087 #if defined(TARGET_M68K)
8088         { "/proc/hardware", open_hardware, is_proc },
8089 #endif
8090         { NULL, NULL, NULL }
8091     };
8092 
8093     if (is_proc_myself(pathname, "exe")) {
8094         int execfd = qemu_getauxval(AT_EXECFD);
8095         return execfd ? execfd : safe_openat(dirfd, exec_path, flags, mode);
8096     }
8097 
8098     for (fake_open = fakes; fake_open->filename; fake_open++) {
8099         if (fake_open->cmp(pathname, fake_open->filename)) {
8100             break;
8101         }
8102     }
8103 
8104     if (fake_open->filename) {
8105         const char *tmpdir;
8106         char filename[PATH_MAX];
8107         int fd, r;
8108 
8109         /* create temporary file to map stat to */
8110         tmpdir = getenv("TMPDIR");
8111         if (!tmpdir)
8112             tmpdir = "/tmp";
8113         snprintf(filename, sizeof(filename), "%s/qemu-open.XXXXXX", tmpdir);
8114         fd = mkstemp(filename);
8115         if (fd < 0) {
8116             return fd;
8117         }
8118         unlink(filename);
8119 
8120         if ((r = fake_open->fill(cpu_env, fd))) {
8121             int e = errno;
8122             close(fd);
8123             errno = e;
8124             return r;
8125         }
8126         lseek(fd, 0, SEEK_SET);
8127 
8128         return fd;
8129     }
8130 
8131     return safe_openat(dirfd, path(pathname), flags, mode);
8132 }
8133 
8134 #define TIMER_MAGIC 0x0caf0000
8135 #define TIMER_MAGIC_MASK 0xffff0000
8136 
8137 /* Convert QEMU provided timer ID back to internal 16bit index format */
8138 static target_timer_t get_timer_id(abi_long arg)
8139 {
8140     target_timer_t timerid = arg;
8141 
8142     if ((timerid & TIMER_MAGIC_MASK) != TIMER_MAGIC) {
8143         return -TARGET_EINVAL;
8144     }
8145 
8146     timerid &= 0xffff;
8147 
8148     if (timerid >= ARRAY_SIZE(g_posix_timers)) {
8149         return -TARGET_EINVAL;
8150     }
8151 
8152     return timerid;
8153 }
8154 
8155 static int target_to_host_cpu_mask(unsigned long *host_mask,
8156                                    size_t host_size,
8157                                    abi_ulong target_addr,
8158                                    size_t target_size)
8159 {
8160     unsigned target_bits = sizeof(abi_ulong) * 8;
8161     unsigned host_bits = sizeof(*host_mask) * 8;
8162     abi_ulong *target_mask;
8163     unsigned i, j;
8164 
8165     assert(host_size >= target_size);
8166 
8167     target_mask = lock_user(VERIFY_READ, target_addr, target_size, 1);
8168     if (!target_mask) {
8169         return -TARGET_EFAULT;
8170     }
8171     memset(host_mask, 0, host_size);
8172 
8173     for (i = 0 ; i < target_size / sizeof(abi_ulong); i++) {
8174         unsigned bit = i * target_bits;
8175         abi_ulong val;
8176 
8177         __get_user(val, &target_mask[i]);
8178         for (j = 0; j < target_bits; j++, bit++) {
8179             if (val & (1UL << j)) {
8180                 host_mask[bit / host_bits] |= 1UL << (bit % host_bits);
8181             }
8182         }
8183     }
8184 
8185     unlock_user(target_mask, target_addr, 0);
8186     return 0;
8187 }
8188 
8189 static int host_to_target_cpu_mask(const unsigned long *host_mask,
8190                                    size_t host_size,
8191                                    abi_ulong target_addr,
8192                                    size_t target_size)
8193 {
8194     unsigned target_bits = sizeof(abi_ulong) * 8;
8195     unsigned host_bits = sizeof(*host_mask) * 8;
8196     abi_ulong *target_mask;
8197     unsigned i, j;
8198 
8199     assert(host_size >= target_size);
8200 
8201     target_mask = lock_user(VERIFY_WRITE, target_addr, target_size, 0);
8202     if (!target_mask) {
8203         return -TARGET_EFAULT;
8204     }
8205 
8206     for (i = 0 ; i < target_size / sizeof(abi_ulong); i++) {
8207         unsigned bit = i * target_bits;
8208         abi_ulong val = 0;
8209 
8210         for (j = 0; j < target_bits; j++, bit++) {
8211             if (host_mask[bit / host_bits] & (1UL << (bit % host_bits))) {
8212                 val |= 1UL << j;
8213             }
8214         }
8215         __put_user(val, &target_mask[i]);
8216     }
8217 
8218     unlock_user(target_mask, target_addr, target_size);
8219     return 0;
8220 }
8221 
8222 /* This is an internal helper for do_syscall so that it is easier
8223  * to have a single return point, so that actions, such as logging
8224  * of syscall results, can be performed.
8225  * All errnos that do_syscall() returns must be -TARGET_<errcode>.
8226  */
8227 static abi_long do_syscall1(void *cpu_env, int num, abi_long arg1,
8228                             abi_long arg2, abi_long arg3, abi_long arg4,
8229                             abi_long arg5, abi_long arg6, abi_long arg7,
8230                             abi_long arg8)
8231 {
8232     CPUState *cpu = env_cpu(cpu_env);
8233     abi_long ret;
8234 #if defined(TARGET_NR_stat) || defined(TARGET_NR_stat64) \
8235     || defined(TARGET_NR_lstat) || defined(TARGET_NR_lstat64) \
8236     || defined(TARGET_NR_fstat) || defined(TARGET_NR_fstat64) \
8237     || defined(TARGET_NR_statx)
8238     struct stat st;
8239 #endif
8240 #if defined(TARGET_NR_statfs) || defined(TARGET_NR_statfs64) \
8241     || defined(TARGET_NR_fstatfs)
8242     struct statfs stfs;
8243 #endif
8244     void *p;
8245 
8246     switch(num) {
8247     case TARGET_NR_exit:
8248         /* In old applications this may be used to implement _exit(2).
8249            However in threaded applications it is used for thread termination,
8250            and _exit_group is used for application termination.
8251            Do thread termination if we have more then one thread.  */
8252 
8253         if (block_signals()) {
8254             return -TARGET_ERESTARTSYS;
8255         }
8256 
8257         pthread_mutex_lock(&clone_lock);
8258 
8259         if (CPU_NEXT(first_cpu)) {
8260             TaskState *ts = cpu->opaque;
8261 
8262             object_property_set_bool(OBJECT(cpu), "realized", false, NULL);
8263             object_unref(OBJECT(cpu));
8264             /*
8265              * At this point the CPU should be unrealized and removed
8266              * from cpu lists. We can clean-up the rest of the thread
8267              * data without the lock held.
8268              */
8269 
8270             pthread_mutex_unlock(&clone_lock);
8271 
8272             if (ts->child_tidptr) {
8273                 put_user_u32(0, ts->child_tidptr);
8274                 do_sys_futex(g2h(ts->child_tidptr), FUTEX_WAKE, INT_MAX,
8275                           NULL, NULL, 0);
8276             }
8277             thread_cpu = NULL;
8278             g_free(ts);
8279             rcu_unregister_thread();
8280             pthread_exit(NULL);
8281         }
8282 
8283         pthread_mutex_unlock(&clone_lock);
8284         preexit_cleanup(cpu_env, arg1);
8285         _exit(arg1);
8286         return 0; /* avoid warning */
8287     case TARGET_NR_read:
8288         if (arg2 == 0 && arg3 == 0) {
8289             return get_errno(safe_read(arg1, 0, 0));
8290         } else {
8291             if (!(p = lock_user(VERIFY_WRITE, arg2, arg3, 0)))
8292                 return -TARGET_EFAULT;
8293             ret = get_errno(safe_read(arg1, p, arg3));
8294             if (ret >= 0 &&
8295                 fd_trans_host_to_target_data(arg1)) {
8296                 ret = fd_trans_host_to_target_data(arg1)(p, ret);
8297             }
8298             unlock_user(p, arg2, ret);
8299         }
8300         return ret;
8301     case TARGET_NR_write:
8302         if (arg2 == 0 && arg3 == 0) {
8303             return get_errno(safe_write(arg1, 0, 0));
8304         }
8305         if (!(p = lock_user(VERIFY_READ, arg2, arg3, 1)))
8306             return -TARGET_EFAULT;
8307         if (fd_trans_target_to_host_data(arg1)) {
8308             void *copy = g_malloc(arg3);
8309             memcpy(copy, p, arg3);
8310             ret = fd_trans_target_to_host_data(arg1)(copy, arg3);
8311             if (ret >= 0) {
8312                 ret = get_errno(safe_write(arg1, copy, ret));
8313             }
8314             g_free(copy);
8315         } else {
8316             ret = get_errno(safe_write(arg1, p, arg3));
8317         }
8318         unlock_user(p, arg2, 0);
8319         return ret;
8320 
8321 #ifdef TARGET_NR_open
8322     case TARGET_NR_open:
8323         if (!(p = lock_user_string(arg1)))
8324             return -TARGET_EFAULT;
8325         ret = get_errno(do_openat(cpu_env, AT_FDCWD, p,
8326                                   target_to_host_bitmask(arg2, fcntl_flags_tbl),
8327                                   arg3));
8328         fd_trans_unregister(ret);
8329         unlock_user(p, arg1, 0);
8330         return ret;
8331 #endif
8332     case TARGET_NR_openat:
8333         if (!(p = lock_user_string(arg2)))
8334             return -TARGET_EFAULT;
8335         ret = get_errno(do_openat(cpu_env, arg1, p,
8336                                   target_to_host_bitmask(arg3, fcntl_flags_tbl),
8337                                   arg4));
8338         fd_trans_unregister(ret);
8339         unlock_user(p, arg2, 0);
8340         return ret;
8341 #if defined(TARGET_NR_name_to_handle_at) && defined(CONFIG_OPEN_BY_HANDLE)
8342     case TARGET_NR_name_to_handle_at:
8343         ret = do_name_to_handle_at(arg1, arg2, arg3, arg4, arg5);
8344         return ret;
8345 #endif
8346 #if defined(TARGET_NR_open_by_handle_at) && defined(CONFIG_OPEN_BY_HANDLE)
8347     case TARGET_NR_open_by_handle_at:
8348         ret = do_open_by_handle_at(arg1, arg2, arg3);
8349         fd_trans_unregister(ret);
8350         return ret;
8351 #endif
8352     case TARGET_NR_close:
8353         fd_trans_unregister(arg1);
8354         return get_errno(close(arg1));
8355 
8356     case TARGET_NR_brk:
8357         return do_brk(arg1);
8358 #ifdef TARGET_NR_fork
8359     case TARGET_NR_fork:
8360         return get_errno(do_fork(cpu_env, TARGET_SIGCHLD, 0, 0, 0, 0));
8361 #endif
8362 #ifdef TARGET_NR_waitpid
8363     case TARGET_NR_waitpid:
8364         {
8365             int status;
8366             ret = get_errno(safe_wait4(arg1, &status, arg3, 0));
8367             if (!is_error(ret) && arg2 && ret
8368                 && put_user_s32(host_to_target_waitstatus(status), arg2))
8369                 return -TARGET_EFAULT;
8370         }
8371         return ret;
8372 #endif
8373 #ifdef TARGET_NR_waitid
8374     case TARGET_NR_waitid:
8375         {
8376             siginfo_t info;
8377             info.si_pid = 0;
8378             ret = get_errno(safe_waitid(arg1, arg2, &info, arg4, NULL));
8379             if (!is_error(ret) && arg3 && info.si_pid != 0) {
8380                 if (!(p = lock_user(VERIFY_WRITE, arg3, sizeof(target_siginfo_t), 0)))
8381                     return -TARGET_EFAULT;
8382                 host_to_target_siginfo(p, &info);
8383                 unlock_user(p, arg3, sizeof(target_siginfo_t));
8384             }
8385         }
8386         return ret;
8387 #endif
8388 #ifdef TARGET_NR_creat /* not on alpha */
8389     case TARGET_NR_creat:
8390         if (!(p = lock_user_string(arg1)))
8391             return -TARGET_EFAULT;
8392         ret = get_errno(creat(p, arg2));
8393         fd_trans_unregister(ret);
8394         unlock_user(p, arg1, 0);
8395         return ret;
8396 #endif
8397 #ifdef TARGET_NR_link
8398     case TARGET_NR_link:
8399         {
8400             void * p2;
8401             p = lock_user_string(arg1);
8402             p2 = lock_user_string(arg2);
8403             if (!p || !p2)
8404                 ret = -TARGET_EFAULT;
8405             else
8406                 ret = get_errno(link(p, p2));
8407             unlock_user(p2, arg2, 0);
8408             unlock_user(p, arg1, 0);
8409         }
8410         return ret;
8411 #endif
8412 #if defined(TARGET_NR_linkat)
8413     case TARGET_NR_linkat:
8414         {
8415             void * p2 = NULL;
8416             if (!arg2 || !arg4)
8417                 return -TARGET_EFAULT;
8418             p  = lock_user_string(arg2);
8419             p2 = lock_user_string(arg4);
8420             if (!p || !p2)
8421                 ret = -TARGET_EFAULT;
8422             else
8423                 ret = get_errno(linkat(arg1, p, arg3, p2, arg5));
8424             unlock_user(p, arg2, 0);
8425             unlock_user(p2, arg4, 0);
8426         }
8427         return ret;
8428 #endif
8429 #ifdef TARGET_NR_unlink
8430     case TARGET_NR_unlink:
8431         if (!(p = lock_user_string(arg1)))
8432             return -TARGET_EFAULT;
8433         ret = get_errno(unlink(p));
8434         unlock_user(p, arg1, 0);
8435         return ret;
8436 #endif
8437 #if defined(TARGET_NR_unlinkat)
8438     case TARGET_NR_unlinkat:
8439         if (!(p = lock_user_string(arg2)))
8440             return -TARGET_EFAULT;
8441         ret = get_errno(unlinkat(arg1, p, arg3));
8442         unlock_user(p, arg2, 0);
8443         return ret;
8444 #endif
8445     case TARGET_NR_execve:
8446         {
8447             char **argp, **envp;
8448             int argc, envc;
8449             abi_ulong gp;
8450             abi_ulong guest_argp;
8451             abi_ulong guest_envp;
8452             abi_ulong addr;
8453             char **q;
8454             int total_size = 0;
8455 
8456             argc = 0;
8457             guest_argp = arg2;
8458             for (gp = guest_argp; gp; gp += sizeof(abi_ulong)) {
8459                 if (get_user_ual(addr, gp))
8460                     return -TARGET_EFAULT;
8461                 if (!addr)
8462                     break;
8463                 argc++;
8464             }
8465             envc = 0;
8466             guest_envp = arg3;
8467             for (gp = guest_envp; gp; gp += sizeof(abi_ulong)) {
8468                 if (get_user_ual(addr, gp))
8469                     return -TARGET_EFAULT;
8470                 if (!addr)
8471                     break;
8472                 envc++;
8473             }
8474 
8475             argp = g_new0(char *, argc + 1);
8476             envp = g_new0(char *, envc + 1);
8477 
8478             for (gp = guest_argp, q = argp; gp;
8479                   gp += sizeof(abi_ulong), q++) {
8480                 if (get_user_ual(addr, gp))
8481                     goto execve_efault;
8482                 if (!addr)
8483                     break;
8484                 if (!(*q = lock_user_string(addr)))
8485                     goto execve_efault;
8486                 total_size += strlen(*q) + 1;
8487             }
8488             *q = NULL;
8489 
8490             for (gp = guest_envp, q = envp; gp;
8491                   gp += sizeof(abi_ulong), q++) {
8492                 if (get_user_ual(addr, gp))
8493                     goto execve_efault;
8494                 if (!addr)
8495                     break;
8496                 if (!(*q = lock_user_string(addr)))
8497                     goto execve_efault;
8498                 total_size += strlen(*q) + 1;
8499             }
8500             *q = NULL;
8501 
8502             if (!(p = lock_user_string(arg1)))
8503                 goto execve_efault;
8504             /* Although execve() is not an interruptible syscall it is
8505              * a special case where we must use the safe_syscall wrapper:
8506              * if we allow a signal to happen before we make the host
8507              * syscall then we will 'lose' it, because at the point of
8508              * execve the process leaves QEMU's control. So we use the
8509              * safe syscall wrapper to ensure that we either take the
8510              * signal as a guest signal, or else it does not happen
8511              * before the execve completes and makes it the other
8512              * program's problem.
8513              */
8514             ret = get_errno(safe_execve(p, argp, envp));
8515             unlock_user(p, arg1, 0);
8516 
8517             goto execve_end;
8518 
8519         execve_efault:
8520             ret = -TARGET_EFAULT;
8521 
8522         execve_end:
8523             for (gp = guest_argp, q = argp; *q;
8524                   gp += sizeof(abi_ulong), q++) {
8525                 if (get_user_ual(addr, gp)
8526                     || !addr)
8527                     break;
8528                 unlock_user(*q, addr, 0);
8529             }
8530             for (gp = guest_envp, q = envp; *q;
8531                   gp += sizeof(abi_ulong), q++) {
8532                 if (get_user_ual(addr, gp)
8533                     || !addr)
8534                     break;
8535                 unlock_user(*q, addr, 0);
8536             }
8537 
8538             g_free(argp);
8539             g_free(envp);
8540         }
8541         return ret;
8542     case TARGET_NR_chdir:
8543         if (!(p = lock_user_string(arg1)))
8544             return -TARGET_EFAULT;
8545         ret = get_errno(chdir(p));
8546         unlock_user(p, arg1, 0);
8547         return ret;
8548 #ifdef TARGET_NR_time
8549     case TARGET_NR_time:
8550         {
8551             time_t host_time;
8552             ret = get_errno(time(&host_time));
8553             if (!is_error(ret)
8554                 && arg1
8555                 && put_user_sal(host_time, arg1))
8556                 return -TARGET_EFAULT;
8557         }
8558         return ret;
8559 #endif
8560 #ifdef TARGET_NR_mknod
8561     case TARGET_NR_mknod:
8562         if (!(p = lock_user_string(arg1)))
8563             return -TARGET_EFAULT;
8564         ret = get_errno(mknod(p, arg2, arg3));
8565         unlock_user(p, arg1, 0);
8566         return ret;
8567 #endif
8568 #if defined(TARGET_NR_mknodat)
8569     case TARGET_NR_mknodat:
8570         if (!(p = lock_user_string(arg2)))
8571             return -TARGET_EFAULT;
8572         ret = get_errno(mknodat(arg1, p, arg3, arg4));
8573         unlock_user(p, arg2, 0);
8574         return ret;
8575 #endif
8576 #ifdef TARGET_NR_chmod
8577     case TARGET_NR_chmod:
8578         if (!(p = lock_user_string(arg1)))
8579             return -TARGET_EFAULT;
8580         ret = get_errno(chmod(p, arg2));
8581         unlock_user(p, arg1, 0);
8582         return ret;
8583 #endif
8584 #ifdef TARGET_NR_lseek
8585     case TARGET_NR_lseek:
8586         return get_errno(lseek(arg1, arg2, arg3));
8587 #endif
8588 #if defined(TARGET_NR_getxpid) && defined(TARGET_ALPHA)
8589     /* Alpha specific */
8590     case TARGET_NR_getxpid:
8591         ((CPUAlphaState *)cpu_env)->ir[IR_A4] = getppid();
8592         return get_errno(getpid());
8593 #endif
8594 #ifdef TARGET_NR_getpid
8595     case TARGET_NR_getpid:
8596         return get_errno(getpid());
8597 #endif
8598     case TARGET_NR_mount:
8599         {
8600             /* need to look at the data field */
8601             void *p2, *p3;
8602 
8603             if (arg1) {
8604                 p = lock_user_string(arg1);
8605                 if (!p) {
8606                     return -TARGET_EFAULT;
8607                 }
8608             } else {
8609                 p = NULL;
8610             }
8611 
8612             p2 = lock_user_string(arg2);
8613             if (!p2) {
8614                 if (arg1) {
8615                     unlock_user(p, arg1, 0);
8616                 }
8617                 return -TARGET_EFAULT;
8618             }
8619 
8620             if (arg3) {
8621                 p3 = lock_user_string(arg3);
8622                 if (!p3) {
8623                     if (arg1) {
8624                         unlock_user(p, arg1, 0);
8625                     }
8626                     unlock_user(p2, arg2, 0);
8627                     return -TARGET_EFAULT;
8628                 }
8629             } else {
8630                 p3 = NULL;
8631             }
8632 
8633             /* FIXME - arg5 should be locked, but it isn't clear how to
8634              * do that since it's not guaranteed to be a NULL-terminated
8635              * string.
8636              */
8637             if (!arg5) {
8638                 ret = mount(p, p2, p3, (unsigned long)arg4, NULL);
8639             } else {
8640                 ret = mount(p, p2, p3, (unsigned long)arg4, g2h(arg5));
8641             }
8642             ret = get_errno(ret);
8643 
8644             if (arg1) {
8645                 unlock_user(p, arg1, 0);
8646             }
8647             unlock_user(p2, arg2, 0);
8648             if (arg3) {
8649                 unlock_user(p3, arg3, 0);
8650             }
8651         }
8652         return ret;
8653 #if defined(TARGET_NR_umount) || defined(TARGET_NR_oldumount)
8654 #if defined(TARGET_NR_umount)
8655     case TARGET_NR_umount:
8656 #endif
8657 #if defined(TARGET_NR_oldumount)
8658     case TARGET_NR_oldumount:
8659 #endif
8660         if (!(p = lock_user_string(arg1)))
8661             return -TARGET_EFAULT;
8662         ret = get_errno(umount(p));
8663         unlock_user(p, arg1, 0);
8664         return ret;
8665 #endif
8666 #ifdef TARGET_NR_stime /* not on alpha */
8667     case TARGET_NR_stime:
8668         {
8669             struct timespec ts;
8670             ts.tv_nsec = 0;
8671             if (get_user_sal(ts.tv_sec, arg1)) {
8672                 return -TARGET_EFAULT;
8673             }
8674             return get_errno(clock_settime(CLOCK_REALTIME, &ts));
8675         }
8676 #endif
8677 #ifdef TARGET_NR_alarm /* not on alpha */
8678     case TARGET_NR_alarm:
8679         return alarm(arg1);
8680 #endif
8681 #ifdef TARGET_NR_pause /* not on alpha */
8682     case TARGET_NR_pause:
8683         if (!block_signals()) {
8684             sigsuspend(&((TaskState *)cpu->opaque)->signal_mask);
8685         }
8686         return -TARGET_EINTR;
8687 #endif
8688 #ifdef TARGET_NR_utime
8689     case TARGET_NR_utime:
8690         {
8691             struct utimbuf tbuf, *host_tbuf;
8692             struct target_utimbuf *target_tbuf;
8693             if (arg2) {
8694                 if (!lock_user_struct(VERIFY_READ, target_tbuf, arg2, 1))
8695                     return -TARGET_EFAULT;
8696                 tbuf.actime = tswapal(target_tbuf->actime);
8697                 tbuf.modtime = tswapal(target_tbuf->modtime);
8698                 unlock_user_struct(target_tbuf, arg2, 0);
8699                 host_tbuf = &tbuf;
8700             } else {
8701                 host_tbuf = NULL;
8702             }
8703             if (!(p = lock_user_string(arg1)))
8704                 return -TARGET_EFAULT;
8705             ret = get_errno(utime(p, host_tbuf));
8706             unlock_user(p, arg1, 0);
8707         }
8708         return ret;
8709 #endif
8710 #ifdef TARGET_NR_utimes
8711     case TARGET_NR_utimes:
8712         {
8713             struct timeval *tvp, tv[2];
8714             if (arg2) {
8715                 if (copy_from_user_timeval(&tv[0], arg2)
8716                     || copy_from_user_timeval(&tv[1],
8717                                               arg2 + sizeof(struct target_timeval)))
8718                     return -TARGET_EFAULT;
8719                 tvp = tv;
8720             } else {
8721                 tvp = NULL;
8722             }
8723             if (!(p = lock_user_string(arg1)))
8724                 return -TARGET_EFAULT;
8725             ret = get_errno(utimes(p, tvp));
8726             unlock_user(p, arg1, 0);
8727         }
8728         return ret;
8729 #endif
8730 #if defined(TARGET_NR_futimesat)
8731     case TARGET_NR_futimesat:
8732         {
8733             struct timeval *tvp, tv[2];
8734             if (arg3) {
8735                 if (copy_from_user_timeval(&tv[0], arg3)
8736                     || copy_from_user_timeval(&tv[1],
8737                                               arg3 + sizeof(struct target_timeval)))
8738                     return -TARGET_EFAULT;
8739                 tvp = tv;
8740             } else {
8741                 tvp = NULL;
8742             }
8743             if (!(p = lock_user_string(arg2))) {
8744                 return -TARGET_EFAULT;
8745             }
8746             ret = get_errno(futimesat(arg1, path(p), tvp));
8747             unlock_user(p, arg2, 0);
8748         }
8749         return ret;
8750 #endif
8751 #ifdef TARGET_NR_access
8752     case TARGET_NR_access:
8753         if (!(p = lock_user_string(arg1))) {
8754             return -TARGET_EFAULT;
8755         }
8756         ret = get_errno(access(path(p), arg2));
8757         unlock_user(p, arg1, 0);
8758         return ret;
8759 #endif
8760 #if defined(TARGET_NR_faccessat) && defined(__NR_faccessat)
8761     case TARGET_NR_faccessat:
8762         if (!(p = lock_user_string(arg2))) {
8763             return -TARGET_EFAULT;
8764         }
8765         ret = get_errno(faccessat(arg1, p, arg3, 0));
8766         unlock_user(p, arg2, 0);
8767         return ret;
8768 #endif
8769 #ifdef TARGET_NR_nice /* not on alpha */
8770     case TARGET_NR_nice:
8771         return get_errno(nice(arg1));
8772 #endif
8773     case TARGET_NR_sync:
8774         sync();
8775         return 0;
8776 #if defined(TARGET_NR_syncfs) && defined(CONFIG_SYNCFS)
8777     case TARGET_NR_syncfs:
8778         return get_errno(syncfs(arg1));
8779 #endif
8780     case TARGET_NR_kill:
8781         return get_errno(safe_kill(arg1, target_to_host_signal(arg2)));
8782 #ifdef TARGET_NR_rename
8783     case TARGET_NR_rename:
8784         {
8785             void *p2;
8786             p = lock_user_string(arg1);
8787             p2 = lock_user_string(arg2);
8788             if (!p || !p2)
8789                 ret = -TARGET_EFAULT;
8790             else
8791                 ret = get_errno(rename(p, p2));
8792             unlock_user(p2, arg2, 0);
8793             unlock_user(p, arg1, 0);
8794         }
8795         return ret;
8796 #endif
8797 #if defined(TARGET_NR_renameat)
8798     case TARGET_NR_renameat:
8799         {
8800             void *p2;
8801             p  = lock_user_string(arg2);
8802             p2 = lock_user_string(arg4);
8803             if (!p || !p2)
8804                 ret = -TARGET_EFAULT;
8805             else
8806                 ret = get_errno(renameat(arg1, p, arg3, p2));
8807             unlock_user(p2, arg4, 0);
8808             unlock_user(p, arg2, 0);
8809         }
8810         return ret;
8811 #endif
8812 #if defined(TARGET_NR_renameat2)
8813     case TARGET_NR_renameat2:
8814         {
8815             void *p2;
8816             p  = lock_user_string(arg2);
8817             p2 = lock_user_string(arg4);
8818             if (!p || !p2) {
8819                 ret = -TARGET_EFAULT;
8820             } else {
8821                 ret = get_errno(sys_renameat2(arg1, p, arg3, p2, arg5));
8822             }
8823             unlock_user(p2, arg4, 0);
8824             unlock_user(p, arg2, 0);
8825         }
8826         return ret;
8827 #endif
8828 #ifdef TARGET_NR_mkdir
8829     case TARGET_NR_mkdir:
8830         if (!(p = lock_user_string(arg1)))
8831             return -TARGET_EFAULT;
8832         ret = get_errno(mkdir(p, arg2));
8833         unlock_user(p, arg1, 0);
8834         return ret;
8835 #endif
8836 #if defined(TARGET_NR_mkdirat)
8837     case TARGET_NR_mkdirat:
8838         if (!(p = lock_user_string(arg2)))
8839             return -TARGET_EFAULT;
8840         ret = get_errno(mkdirat(arg1, p, arg3));
8841         unlock_user(p, arg2, 0);
8842         return ret;
8843 #endif
8844 #ifdef TARGET_NR_rmdir
8845     case TARGET_NR_rmdir:
8846         if (!(p = lock_user_string(arg1)))
8847             return -TARGET_EFAULT;
8848         ret = get_errno(rmdir(p));
8849         unlock_user(p, arg1, 0);
8850         return ret;
8851 #endif
8852     case TARGET_NR_dup:
8853         ret = get_errno(dup(arg1));
8854         if (ret >= 0) {
8855             fd_trans_dup(arg1, ret);
8856         }
8857         return ret;
8858 #ifdef TARGET_NR_pipe
8859     case TARGET_NR_pipe:
8860         return do_pipe(cpu_env, arg1, 0, 0);
8861 #endif
8862 #ifdef TARGET_NR_pipe2
8863     case TARGET_NR_pipe2:
8864         return do_pipe(cpu_env, arg1,
8865                        target_to_host_bitmask(arg2, fcntl_flags_tbl), 1);
8866 #endif
8867     case TARGET_NR_times:
8868         {
8869             struct target_tms *tmsp;
8870             struct tms tms;
8871             ret = get_errno(times(&tms));
8872             if (arg1) {
8873                 tmsp = lock_user(VERIFY_WRITE, arg1, sizeof(struct target_tms), 0);
8874                 if (!tmsp)
8875                     return -TARGET_EFAULT;
8876                 tmsp->tms_utime = tswapal(host_to_target_clock_t(tms.tms_utime));
8877                 tmsp->tms_stime = tswapal(host_to_target_clock_t(tms.tms_stime));
8878                 tmsp->tms_cutime = tswapal(host_to_target_clock_t(tms.tms_cutime));
8879                 tmsp->tms_cstime = tswapal(host_to_target_clock_t(tms.tms_cstime));
8880             }
8881             if (!is_error(ret))
8882                 ret = host_to_target_clock_t(ret);
8883         }
8884         return ret;
8885     case TARGET_NR_acct:
8886         if (arg1 == 0) {
8887             ret = get_errno(acct(NULL));
8888         } else {
8889             if (!(p = lock_user_string(arg1))) {
8890                 return -TARGET_EFAULT;
8891             }
8892             ret = get_errno(acct(path(p)));
8893             unlock_user(p, arg1, 0);
8894         }
8895         return ret;
8896 #ifdef TARGET_NR_umount2
8897     case TARGET_NR_umount2:
8898         if (!(p = lock_user_string(arg1)))
8899             return -TARGET_EFAULT;
8900         ret = get_errno(umount2(p, arg2));
8901         unlock_user(p, arg1, 0);
8902         return ret;
8903 #endif
8904     case TARGET_NR_ioctl:
8905         return do_ioctl(arg1, arg2, arg3);
8906 #ifdef TARGET_NR_fcntl
8907     case TARGET_NR_fcntl:
8908         return do_fcntl(arg1, arg2, arg3);
8909 #endif
8910     case TARGET_NR_setpgid:
8911         return get_errno(setpgid(arg1, arg2));
8912     case TARGET_NR_umask:
8913         return get_errno(umask(arg1));
8914     case TARGET_NR_chroot:
8915         if (!(p = lock_user_string(arg1)))
8916             return -TARGET_EFAULT;
8917         ret = get_errno(chroot(p));
8918         unlock_user(p, arg1, 0);
8919         return ret;
8920 #ifdef TARGET_NR_dup2
8921     case TARGET_NR_dup2:
8922         ret = get_errno(dup2(arg1, arg2));
8923         if (ret >= 0) {
8924             fd_trans_dup(arg1, arg2);
8925         }
8926         return ret;
8927 #endif
8928 #if defined(CONFIG_DUP3) && defined(TARGET_NR_dup3)
8929     case TARGET_NR_dup3:
8930     {
8931         int host_flags;
8932 
8933         if ((arg3 & ~TARGET_O_CLOEXEC) != 0) {
8934             return -EINVAL;
8935         }
8936         host_flags = target_to_host_bitmask(arg3, fcntl_flags_tbl);
8937         ret = get_errno(dup3(arg1, arg2, host_flags));
8938         if (ret >= 0) {
8939             fd_trans_dup(arg1, arg2);
8940         }
8941         return ret;
8942     }
8943 #endif
8944 #ifdef TARGET_NR_getppid /* not on alpha */
8945     case TARGET_NR_getppid:
8946         return get_errno(getppid());
8947 #endif
8948 #ifdef TARGET_NR_getpgrp
8949     case TARGET_NR_getpgrp:
8950         return get_errno(getpgrp());
8951 #endif
8952     case TARGET_NR_setsid:
8953         return get_errno(setsid());
8954 #ifdef TARGET_NR_sigaction
8955     case TARGET_NR_sigaction:
8956         {
8957 #if defined(TARGET_ALPHA)
8958             struct target_sigaction act, oact, *pact = 0;
8959             struct target_old_sigaction *old_act;
8960             if (arg2) {
8961                 if (!lock_user_struct(VERIFY_READ, old_act, arg2, 1))
8962                     return -TARGET_EFAULT;
8963                 act._sa_handler = old_act->_sa_handler;
8964                 target_siginitset(&act.sa_mask, old_act->sa_mask);
8965                 act.sa_flags = old_act->sa_flags;
8966                 act.sa_restorer = 0;
8967                 unlock_user_struct(old_act, arg2, 0);
8968                 pact = &act;
8969             }
8970             ret = get_errno(do_sigaction(arg1, pact, &oact));
8971             if (!is_error(ret) && arg3) {
8972                 if (!lock_user_struct(VERIFY_WRITE, old_act, arg3, 0))
8973                     return -TARGET_EFAULT;
8974                 old_act->_sa_handler = oact._sa_handler;
8975                 old_act->sa_mask = oact.sa_mask.sig[0];
8976                 old_act->sa_flags = oact.sa_flags;
8977                 unlock_user_struct(old_act, arg3, 1);
8978             }
8979 #elif defined(TARGET_MIPS)
8980 	    struct target_sigaction act, oact, *pact, *old_act;
8981 
8982 	    if (arg2) {
8983                 if (!lock_user_struct(VERIFY_READ, old_act, arg2, 1))
8984                     return -TARGET_EFAULT;
8985 		act._sa_handler = old_act->_sa_handler;
8986 		target_siginitset(&act.sa_mask, old_act->sa_mask.sig[0]);
8987 		act.sa_flags = old_act->sa_flags;
8988 		unlock_user_struct(old_act, arg2, 0);
8989 		pact = &act;
8990 	    } else {
8991 		pact = NULL;
8992 	    }
8993 
8994 	    ret = get_errno(do_sigaction(arg1, pact, &oact));
8995 
8996 	    if (!is_error(ret) && arg3) {
8997                 if (!lock_user_struct(VERIFY_WRITE, old_act, arg3, 0))
8998                     return -TARGET_EFAULT;
8999 		old_act->_sa_handler = oact._sa_handler;
9000 		old_act->sa_flags = oact.sa_flags;
9001 		old_act->sa_mask.sig[0] = oact.sa_mask.sig[0];
9002 		old_act->sa_mask.sig[1] = 0;
9003 		old_act->sa_mask.sig[2] = 0;
9004 		old_act->sa_mask.sig[3] = 0;
9005 		unlock_user_struct(old_act, arg3, 1);
9006 	    }
9007 #else
9008             struct target_old_sigaction *old_act;
9009             struct target_sigaction act, oact, *pact;
9010             if (arg2) {
9011                 if (!lock_user_struct(VERIFY_READ, old_act, arg2, 1))
9012                     return -TARGET_EFAULT;
9013                 act._sa_handler = old_act->_sa_handler;
9014                 target_siginitset(&act.sa_mask, old_act->sa_mask);
9015                 act.sa_flags = old_act->sa_flags;
9016                 act.sa_restorer = old_act->sa_restorer;
9017 #ifdef TARGET_ARCH_HAS_KA_RESTORER
9018                 act.ka_restorer = 0;
9019 #endif
9020                 unlock_user_struct(old_act, arg2, 0);
9021                 pact = &act;
9022             } else {
9023                 pact = NULL;
9024             }
9025             ret = get_errno(do_sigaction(arg1, pact, &oact));
9026             if (!is_error(ret) && arg3) {
9027                 if (!lock_user_struct(VERIFY_WRITE, old_act, arg3, 0))
9028                     return -TARGET_EFAULT;
9029                 old_act->_sa_handler = oact._sa_handler;
9030                 old_act->sa_mask = oact.sa_mask.sig[0];
9031                 old_act->sa_flags = oact.sa_flags;
9032                 old_act->sa_restorer = oact.sa_restorer;
9033                 unlock_user_struct(old_act, arg3, 1);
9034             }
9035 #endif
9036         }
9037         return ret;
9038 #endif
9039     case TARGET_NR_rt_sigaction:
9040         {
9041 #if defined(TARGET_ALPHA)
9042             /* For Alpha and SPARC this is a 5 argument syscall, with
9043              * a 'restorer' parameter which must be copied into the
9044              * sa_restorer field of the sigaction struct.
9045              * For Alpha that 'restorer' is arg5; for SPARC it is arg4,
9046              * and arg5 is the sigsetsize.
9047              * Alpha also has a separate rt_sigaction struct that it uses
9048              * here; SPARC uses the usual sigaction struct.
9049              */
9050             struct target_rt_sigaction *rt_act;
9051             struct target_sigaction act, oact, *pact = 0;
9052 
9053             if (arg4 != sizeof(target_sigset_t)) {
9054                 return -TARGET_EINVAL;
9055             }
9056             if (arg2) {
9057                 if (!lock_user_struct(VERIFY_READ, rt_act, arg2, 1))
9058                     return -TARGET_EFAULT;
9059                 act._sa_handler = rt_act->_sa_handler;
9060                 act.sa_mask = rt_act->sa_mask;
9061                 act.sa_flags = rt_act->sa_flags;
9062                 act.sa_restorer = arg5;
9063                 unlock_user_struct(rt_act, arg2, 0);
9064                 pact = &act;
9065             }
9066             ret = get_errno(do_sigaction(arg1, pact, &oact));
9067             if (!is_error(ret) && arg3) {
9068                 if (!lock_user_struct(VERIFY_WRITE, rt_act, arg3, 0))
9069                     return -TARGET_EFAULT;
9070                 rt_act->_sa_handler = oact._sa_handler;
9071                 rt_act->sa_mask = oact.sa_mask;
9072                 rt_act->sa_flags = oact.sa_flags;
9073                 unlock_user_struct(rt_act, arg3, 1);
9074             }
9075 #else
9076 #ifdef TARGET_SPARC
9077             target_ulong restorer = arg4;
9078             target_ulong sigsetsize = arg5;
9079 #else
9080             target_ulong sigsetsize = arg4;
9081 #endif
9082             struct target_sigaction *act;
9083             struct target_sigaction *oact;
9084 
9085             if (sigsetsize != sizeof(target_sigset_t)) {
9086                 return -TARGET_EINVAL;
9087             }
9088             if (arg2) {
9089                 if (!lock_user_struct(VERIFY_READ, act, arg2, 1)) {
9090                     return -TARGET_EFAULT;
9091                 }
9092 #ifdef TARGET_ARCH_HAS_KA_RESTORER
9093                 act->ka_restorer = restorer;
9094 #endif
9095             } else {
9096                 act = NULL;
9097             }
9098             if (arg3) {
9099                 if (!lock_user_struct(VERIFY_WRITE, oact, arg3, 0)) {
9100                     ret = -TARGET_EFAULT;
9101                     goto rt_sigaction_fail;
9102                 }
9103             } else
9104                 oact = NULL;
9105             ret = get_errno(do_sigaction(arg1, act, oact));
9106 	rt_sigaction_fail:
9107             if (act)
9108                 unlock_user_struct(act, arg2, 0);
9109             if (oact)
9110                 unlock_user_struct(oact, arg3, 1);
9111 #endif
9112         }
9113         return ret;
9114 #ifdef TARGET_NR_sgetmask /* not on alpha */
9115     case TARGET_NR_sgetmask:
9116         {
9117             sigset_t cur_set;
9118             abi_ulong target_set;
9119             ret = do_sigprocmask(0, NULL, &cur_set);
9120             if (!ret) {
9121                 host_to_target_old_sigset(&target_set, &cur_set);
9122                 ret = target_set;
9123             }
9124         }
9125         return ret;
9126 #endif
9127 #ifdef TARGET_NR_ssetmask /* not on alpha */
9128     case TARGET_NR_ssetmask:
9129         {
9130             sigset_t set, oset;
9131             abi_ulong target_set = arg1;
9132             target_to_host_old_sigset(&set, &target_set);
9133             ret = do_sigprocmask(SIG_SETMASK, &set, &oset);
9134             if (!ret) {
9135                 host_to_target_old_sigset(&target_set, &oset);
9136                 ret = target_set;
9137             }
9138         }
9139         return ret;
9140 #endif
9141 #ifdef TARGET_NR_sigprocmask
9142     case TARGET_NR_sigprocmask:
9143         {
9144 #if defined(TARGET_ALPHA)
9145             sigset_t set, oldset;
9146             abi_ulong mask;
9147             int how;
9148 
9149             switch (arg1) {
9150             case TARGET_SIG_BLOCK:
9151                 how = SIG_BLOCK;
9152                 break;
9153             case TARGET_SIG_UNBLOCK:
9154                 how = SIG_UNBLOCK;
9155                 break;
9156             case TARGET_SIG_SETMASK:
9157                 how = SIG_SETMASK;
9158                 break;
9159             default:
9160                 return -TARGET_EINVAL;
9161             }
9162             mask = arg2;
9163             target_to_host_old_sigset(&set, &mask);
9164 
9165             ret = do_sigprocmask(how, &set, &oldset);
9166             if (!is_error(ret)) {
9167                 host_to_target_old_sigset(&mask, &oldset);
9168                 ret = mask;
9169                 ((CPUAlphaState *)cpu_env)->ir[IR_V0] = 0; /* force no error */
9170             }
9171 #else
9172             sigset_t set, oldset, *set_ptr;
9173             int how;
9174 
9175             if (arg2) {
9176                 switch (arg1) {
9177                 case TARGET_SIG_BLOCK:
9178                     how = SIG_BLOCK;
9179                     break;
9180                 case TARGET_SIG_UNBLOCK:
9181                     how = SIG_UNBLOCK;
9182                     break;
9183                 case TARGET_SIG_SETMASK:
9184                     how = SIG_SETMASK;
9185                     break;
9186                 default:
9187                     return -TARGET_EINVAL;
9188                 }
9189                 if (!(p = lock_user(VERIFY_READ, arg2, sizeof(target_sigset_t), 1)))
9190                     return -TARGET_EFAULT;
9191                 target_to_host_old_sigset(&set, p);
9192                 unlock_user(p, arg2, 0);
9193                 set_ptr = &set;
9194             } else {
9195                 how = 0;
9196                 set_ptr = NULL;
9197             }
9198             ret = do_sigprocmask(how, set_ptr, &oldset);
9199             if (!is_error(ret) && arg3) {
9200                 if (!(p = lock_user(VERIFY_WRITE, arg3, sizeof(target_sigset_t), 0)))
9201                     return -TARGET_EFAULT;
9202                 host_to_target_old_sigset(p, &oldset);
9203                 unlock_user(p, arg3, sizeof(target_sigset_t));
9204             }
9205 #endif
9206         }
9207         return ret;
9208 #endif
9209     case TARGET_NR_rt_sigprocmask:
9210         {
9211             int how = arg1;
9212             sigset_t set, oldset, *set_ptr;
9213 
9214             if (arg4 != sizeof(target_sigset_t)) {
9215                 return -TARGET_EINVAL;
9216             }
9217 
9218             if (arg2) {
9219                 switch(how) {
9220                 case TARGET_SIG_BLOCK:
9221                     how = SIG_BLOCK;
9222                     break;
9223                 case TARGET_SIG_UNBLOCK:
9224                     how = SIG_UNBLOCK;
9225                     break;
9226                 case TARGET_SIG_SETMASK:
9227                     how = SIG_SETMASK;
9228                     break;
9229                 default:
9230                     return -TARGET_EINVAL;
9231                 }
9232                 if (!(p = lock_user(VERIFY_READ, arg2, sizeof(target_sigset_t), 1)))
9233                     return -TARGET_EFAULT;
9234                 target_to_host_sigset(&set, p);
9235                 unlock_user(p, arg2, 0);
9236                 set_ptr = &set;
9237             } else {
9238                 how = 0;
9239                 set_ptr = NULL;
9240             }
9241             ret = do_sigprocmask(how, set_ptr, &oldset);
9242             if (!is_error(ret) && arg3) {
9243                 if (!(p = lock_user(VERIFY_WRITE, arg3, sizeof(target_sigset_t), 0)))
9244                     return -TARGET_EFAULT;
9245                 host_to_target_sigset(p, &oldset);
9246                 unlock_user(p, arg3, sizeof(target_sigset_t));
9247             }
9248         }
9249         return ret;
9250 #ifdef TARGET_NR_sigpending
9251     case TARGET_NR_sigpending:
9252         {
9253             sigset_t set;
9254             ret = get_errno(sigpending(&set));
9255             if (!is_error(ret)) {
9256                 if (!(p = lock_user(VERIFY_WRITE, arg1, sizeof(target_sigset_t), 0)))
9257                     return -TARGET_EFAULT;
9258                 host_to_target_old_sigset(p, &set);
9259                 unlock_user(p, arg1, sizeof(target_sigset_t));
9260             }
9261         }
9262         return ret;
9263 #endif
9264     case TARGET_NR_rt_sigpending:
9265         {
9266             sigset_t set;
9267 
9268             /* Yes, this check is >, not != like most. We follow the kernel's
9269              * logic and it does it like this because it implements
9270              * NR_sigpending through the same code path, and in that case
9271              * the old_sigset_t is smaller in size.
9272              */
9273             if (arg2 > sizeof(target_sigset_t)) {
9274                 return -TARGET_EINVAL;
9275             }
9276 
9277             ret = get_errno(sigpending(&set));
9278             if (!is_error(ret)) {
9279                 if (!(p = lock_user(VERIFY_WRITE, arg1, sizeof(target_sigset_t), 0)))
9280                     return -TARGET_EFAULT;
9281                 host_to_target_sigset(p, &set);
9282                 unlock_user(p, arg1, sizeof(target_sigset_t));
9283             }
9284         }
9285         return ret;
9286 #ifdef TARGET_NR_sigsuspend
9287     case TARGET_NR_sigsuspend:
9288         {
9289             TaskState *ts = cpu->opaque;
9290 #if defined(TARGET_ALPHA)
9291             abi_ulong mask = arg1;
9292             target_to_host_old_sigset(&ts->sigsuspend_mask, &mask);
9293 #else
9294             if (!(p = lock_user(VERIFY_READ, arg1, sizeof(target_sigset_t), 1)))
9295                 return -TARGET_EFAULT;
9296             target_to_host_old_sigset(&ts->sigsuspend_mask, p);
9297             unlock_user(p, arg1, 0);
9298 #endif
9299             ret = get_errno(safe_rt_sigsuspend(&ts->sigsuspend_mask,
9300                                                SIGSET_T_SIZE));
9301             if (ret != -TARGET_ERESTARTSYS) {
9302                 ts->in_sigsuspend = 1;
9303             }
9304         }
9305         return ret;
9306 #endif
9307     case TARGET_NR_rt_sigsuspend:
9308         {
9309             TaskState *ts = cpu->opaque;
9310 
9311             if (arg2 != sizeof(target_sigset_t)) {
9312                 return -TARGET_EINVAL;
9313             }
9314             if (!(p = lock_user(VERIFY_READ, arg1, sizeof(target_sigset_t), 1)))
9315                 return -TARGET_EFAULT;
9316             target_to_host_sigset(&ts->sigsuspend_mask, p);
9317             unlock_user(p, arg1, 0);
9318             ret = get_errno(safe_rt_sigsuspend(&ts->sigsuspend_mask,
9319                                                SIGSET_T_SIZE));
9320             if (ret != -TARGET_ERESTARTSYS) {
9321                 ts->in_sigsuspend = 1;
9322             }
9323         }
9324         return ret;
9325 #ifdef TARGET_NR_rt_sigtimedwait
9326     case TARGET_NR_rt_sigtimedwait:
9327         {
9328             sigset_t set;
9329             struct timespec uts, *puts;
9330             siginfo_t uinfo;
9331 
9332             if (arg4 != sizeof(target_sigset_t)) {
9333                 return -TARGET_EINVAL;
9334             }
9335 
9336             if (!(p = lock_user(VERIFY_READ, arg1, sizeof(target_sigset_t), 1)))
9337                 return -TARGET_EFAULT;
9338             target_to_host_sigset(&set, p);
9339             unlock_user(p, arg1, 0);
9340             if (arg3) {
9341                 puts = &uts;
9342                 if (target_to_host_timespec(puts, arg3)) {
9343                     return -TARGET_EFAULT;
9344                 }
9345             } else {
9346                 puts = NULL;
9347             }
9348             ret = get_errno(safe_rt_sigtimedwait(&set, &uinfo, puts,
9349                                                  SIGSET_T_SIZE));
9350             if (!is_error(ret)) {
9351                 if (arg2) {
9352                     p = lock_user(VERIFY_WRITE, arg2, sizeof(target_siginfo_t),
9353                                   0);
9354                     if (!p) {
9355                         return -TARGET_EFAULT;
9356                     }
9357                     host_to_target_siginfo(p, &uinfo);
9358                     unlock_user(p, arg2, sizeof(target_siginfo_t));
9359                 }
9360                 ret = host_to_target_signal(ret);
9361             }
9362         }
9363         return ret;
9364 #endif
9365 #ifdef TARGET_NR_rt_sigtimedwait_time64
9366     case TARGET_NR_rt_sigtimedwait_time64:
9367         {
9368             sigset_t set;
9369             struct timespec uts, *puts;
9370             siginfo_t uinfo;
9371 
9372             if (arg4 != sizeof(target_sigset_t)) {
9373                 return -TARGET_EINVAL;
9374             }
9375 
9376             p = lock_user(VERIFY_READ, arg1, sizeof(target_sigset_t), 1);
9377             if (!p) {
9378                 return -TARGET_EFAULT;
9379             }
9380             target_to_host_sigset(&set, p);
9381             unlock_user(p, arg1, 0);
9382             if (arg3) {
9383                 puts = &uts;
9384                 if (target_to_host_timespec64(puts, arg3)) {
9385                     return -TARGET_EFAULT;
9386                 }
9387             } else {
9388                 puts = NULL;
9389             }
9390             ret = get_errno(safe_rt_sigtimedwait(&set, &uinfo, puts,
9391                                                  SIGSET_T_SIZE));
9392             if (!is_error(ret)) {
9393                 if (arg2) {
9394                     p = lock_user(VERIFY_WRITE, arg2,
9395                                   sizeof(target_siginfo_t), 0);
9396                     if (!p) {
9397                         return -TARGET_EFAULT;
9398                     }
9399                     host_to_target_siginfo(p, &uinfo);
9400                     unlock_user(p, arg2, sizeof(target_siginfo_t));
9401                 }
9402                 ret = host_to_target_signal(ret);
9403             }
9404         }
9405         return ret;
9406 #endif
9407     case TARGET_NR_rt_sigqueueinfo:
9408         {
9409             siginfo_t uinfo;
9410 
9411             p = lock_user(VERIFY_READ, arg3, sizeof(target_siginfo_t), 1);
9412             if (!p) {
9413                 return -TARGET_EFAULT;
9414             }
9415             target_to_host_siginfo(&uinfo, p);
9416             unlock_user(p, arg3, 0);
9417             ret = get_errno(sys_rt_sigqueueinfo(arg1, arg2, &uinfo));
9418         }
9419         return ret;
9420     case TARGET_NR_rt_tgsigqueueinfo:
9421         {
9422             siginfo_t uinfo;
9423 
9424             p = lock_user(VERIFY_READ, arg4, sizeof(target_siginfo_t), 1);
9425             if (!p) {
9426                 return -TARGET_EFAULT;
9427             }
9428             target_to_host_siginfo(&uinfo, p);
9429             unlock_user(p, arg4, 0);
9430             ret = get_errno(sys_rt_tgsigqueueinfo(arg1, arg2, arg3, &uinfo));
9431         }
9432         return ret;
9433 #ifdef TARGET_NR_sigreturn
9434     case TARGET_NR_sigreturn:
9435         if (block_signals()) {
9436             return -TARGET_ERESTARTSYS;
9437         }
9438         return do_sigreturn(cpu_env);
9439 #endif
9440     case TARGET_NR_rt_sigreturn:
9441         if (block_signals()) {
9442             return -TARGET_ERESTARTSYS;
9443         }
9444         return do_rt_sigreturn(cpu_env);
9445     case TARGET_NR_sethostname:
9446         if (!(p = lock_user_string(arg1)))
9447             return -TARGET_EFAULT;
9448         ret = get_errno(sethostname(p, arg2));
9449         unlock_user(p, arg1, 0);
9450         return ret;
9451 #ifdef TARGET_NR_setrlimit
9452     case TARGET_NR_setrlimit:
9453         {
9454             int resource = target_to_host_resource(arg1);
9455             struct target_rlimit *target_rlim;
9456             struct rlimit rlim;
9457             if (!lock_user_struct(VERIFY_READ, target_rlim, arg2, 1))
9458                 return -TARGET_EFAULT;
9459             rlim.rlim_cur = target_to_host_rlim(target_rlim->rlim_cur);
9460             rlim.rlim_max = target_to_host_rlim(target_rlim->rlim_max);
9461             unlock_user_struct(target_rlim, arg2, 0);
9462             /*
9463              * If we just passed through resource limit settings for memory then
9464              * they would also apply to QEMU's own allocations, and QEMU will
9465              * crash or hang or die if its allocations fail. Ideally we would
9466              * track the guest allocations in QEMU and apply the limits ourselves.
9467              * For now, just tell the guest the call succeeded but don't actually
9468              * limit anything.
9469              */
9470             if (resource != RLIMIT_AS &&
9471                 resource != RLIMIT_DATA &&
9472                 resource != RLIMIT_STACK) {
9473                 return get_errno(setrlimit(resource, &rlim));
9474             } else {
9475                 return 0;
9476             }
9477         }
9478 #endif
9479 #ifdef TARGET_NR_getrlimit
9480     case TARGET_NR_getrlimit:
9481         {
9482             int resource = target_to_host_resource(arg1);
9483             struct target_rlimit *target_rlim;
9484             struct rlimit rlim;
9485 
9486             ret = get_errno(getrlimit(resource, &rlim));
9487             if (!is_error(ret)) {
9488                 if (!lock_user_struct(VERIFY_WRITE, target_rlim, arg2, 0))
9489                     return -TARGET_EFAULT;
9490                 target_rlim->rlim_cur = host_to_target_rlim(rlim.rlim_cur);
9491                 target_rlim->rlim_max = host_to_target_rlim(rlim.rlim_max);
9492                 unlock_user_struct(target_rlim, arg2, 1);
9493             }
9494         }
9495         return ret;
9496 #endif
9497     case TARGET_NR_getrusage:
9498         {
9499             struct rusage rusage;
9500             ret = get_errno(getrusage(arg1, &rusage));
9501             if (!is_error(ret)) {
9502                 ret = host_to_target_rusage(arg2, &rusage);
9503             }
9504         }
9505         return ret;
9506 #if defined(TARGET_NR_gettimeofday)
9507     case TARGET_NR_gettimeofday:
9508         {
9509             struct timeval tv;
9510             struct timezone tz;
9511 
9512             ret = get_errno(gettimeofday(&tv, &tz));
9513             if (!is_error(ret)) {
9514                 if (arg1 && copy_to_user_timeval(arg1, &tv)) {
9515                     return -TARGET_EFAULT;
9516                 }
9517                 if (arg2 && copy_to_user_timezone(arg2, &tz)) {
9518                     return -TARGET_EFAULT;
9519                 }
9520             }
9521         }
9522         return ret;
9523 #endif
9524 #if defined(TARGET_NR_settimeofday)
9525     case TARGET_NR_settimeofday:
9526         {
9527             struct timeval tv, *ptv = NULL;
9528             struct timezone tz, *ptz = NULL;
9529 
9530             if (arg1) {
9531                 if (copy_from_user_timeval(&tv, arg1)) {
9532                     return -TARGET_EFAULT;
9533                 }
9534                 ptv = &tv;
9535             }
9536 
9537             if (arg2) {
9538                 if (copy_from_user_timezone(&tz, arg2)) {
9539                     return -TARGET_EFAULT;
9540                 }
9541                 ptz = &tz;
9542             }
9543 
9544             return get_errno(settimeofday(ptv, ptz));
9545         }
9546 #endif
9547 #if defined(TARGET_NR_select)
9548     case TARGET_NR_select:
9549 #if defined(TARGET_WANT_NI_OLD_SELECT)
9550         /* some architectures used to have old_select here
9551          * but now ENOSYS it.
9552          */
9553         ret = -TARGET_ENOSYS;
9554 #elif defined(TARGET_WANT_OLD_SYS_SELECT)
9555         ret = do_old_select(arg1);
9556 #else
9557         ret = do_select(arg1, arg2, arg3, arg4, arg5);
9558 #endif
9559         return ret;
9560 #endif
9561 #ifdef TARGET_NR_pselect6
9562     case TARGET_NR_pselect6:
9563         return do_pselect6(arg1, arg2, arg3, arg4, arg5, arg6, false);
9564 #endif
9565 #ifdef TARGET_NR_pselect6_time64
9566     case TARGET_NR_pselect6_time64:
9567         return do_pselect6(arg1, arg2, arg3, arg4, arg5, arg6, true);
9568 #endif
9569 #ifdef TARGET_NR_symlink
9570     case TARGET_NR_symlink:
9571         {
9572             void *p2;
9573             p = lock_user_string(arg1);
9574             p2 = lock_user_string(arg2);
9575             if (!p || !p2)
9576                 ret = -TARGET_EFAULT;
9577             else
9578                 ret = get_errno(symlink(p, p2));
9579             unlock_user(p2, arg2, 0);
9580             unlock_user(p, arg1, 0);
9581         }
9582         return ret;
9583 #endif
9584 #if defined(TARGET_NR_symlinkat)
9585     case TARGET_NR_symlinkat:
9586         {
9587             void *p2;
9588             p  = lock_user_string(arg1);
9589             p2 = lock_user_string(arg3);
9590             if (!p || !p2)
9591                 ret = -TARGET_EFAULT;
9592             else
9593                 ret = get_errno(symlinkat(p, arg2, p2));
9594             unlock_user(p2, arg3, 0);
9595             unlock_user(p, arg1, 0);
9596         }
9597         return ret;
9598 #endif
9599 #ifdef TARGET_NR_readlink
9600     case TARGET_NR_readlink:
9601         {
9602             void *p2;
9603             p = lock_user_string(arg1);
9604             p2 = lock_user(VERIFY_WRITE, arg2, arg3, 0);
9605             if (!p || !p2) {
9606                 ret = -TARGET_EFAULT;
9607             } else if (!arg3) {
9608                 /* Short circuit this for the magic exe check. */
9609                 ret = -TARGET_EINVAL;
9610             } else if (is_proc_myself((const char *)p, "exe")) {
9611                 char real[PATH_MAX], *temp;
9612                 temp = realpath(exec_path, real);
9613                 /* Return value is # of bytes that we wrote to the buffer. */
9614                 if (temp == NULL) {
9615                     ret = get_errno(-1);
9616                 } else {
9617                     /* Don't worry about sign mismatch as earlier mapping
9618                      * logic would have thrown a bad address error. */
9619                     ret = MIN(strlen(real), arg3);
9620                     /* We cannot NUL terminate the string. */
9621                     memcpy(p2, real, ret);
9622                 }
9623             } else {
9624                 ret = get_errno(readlink(path(p), p2, arg3));
9625             }
9626             unlock_user(p2, arg2, ret);
9627             unlock_user(p, arg1, 0);
9628         }
9629         return ret;
9630 #endif
9631 #if defined(TARGET_NR_readlinkat)
9632     case TARGET_NR_readlinkat:
9633         {
9634             void *p2;
9635             p  = lock_user_string(arg2);
9636             p2 = lock_user(VERIFY_WRITE, arg3, arg4, 0);
9637             if (!p || !p2) {
9638                 ret = -TARGET_EFAULT;
9639             } else if (is_proc_myself((const char *)p, "exe")) {
9640                 char real[PATH_MAX], *temp;
9641                 temp = realpath(exec_path, real);
9642                 ret = temp == NULL ? get_errno(-1) : strlen(real) ;
9643                 snprintf((char *)p2, arg4, "%s", real);
9644             } else {
9645                 ret = get_errno(readlinkat(arg1, path(p), p2, arg4));
9646             }
9647             unlock_user(p2, arg3, ret);
9648             unlock_user(p, arg2, 0);
9649         }
9650         return ret;
9651 #endif
9652 #ifdef TARGET_NR_swapon
9653     case TARGET_NR_swapon:
9654         if (!(p = lock_user_string(arg1)))
9655             return -TARGET_EFAULT;
9656         ret = get_errno(swapon(p, arg2));
9657         unlock_user(p, arg1, 0);
9658         return ret;
9659 #endif
9660     case TARGET_NR_reboot:
9661         if (arg3 == LINUX_REBOOT_CMD_RESTART2) {
9662            /* arg4 must be ignored in all other cases */
9663            p = lock_user_string(arg4);
9664            if (!p) {
9665                return -TARGET_EFAULT;
9666            }
9667            ret = get_errno(reboot(arg1, arg2, arg3, p));
9668            unlock_user(p, arg4, 0);
9669         } else {
9670            ret = get_errno(reboot(arg1, arg2, arg3, NULL));
9671         }
9672         return ret;
9673 #ifdef TARGET_NR_mmap
9674     case TARGET_NR_mmap:
9675 #if (defined(TARGET_I386) && defined(TARGET_ABI32)) || \
9676     (defined(TARGET_ARM) && defined(TARGET_ABI32)) || \
9677     defined(TARGET_M68K) || defined(TARGET_CRIS) || defined(TARGET_MICROBLAZE) \
9678     || defined(TARGET_S390X)
9679         {
9680             abi_ulong *v;
9681             abi_ulong v1, v2, v3, v4, v5, v6;
9682             if (!(v = lock_user(VERIFY_READ, arg1, 6 * sizeof(abi_ulong), 1)))
9683                 return -TARGET_EFAULT;
9684             v1 = tswapal(v[0]);
9685             v2 = tswapal(v[1]);
9686             v3 = tswapal(v[2]);
9687             v4 = tswapal(v[3]);
9688             v5 = tswapal(v[4]);
9689             v6 = tswapal(v[5]);
9690             unlock_user(v, arg1, 0);
9691             ret = get_errno(target_mmap(v1, v2, v3,
9692                                         target_to_host_bitmask(v4, mmap_flags_tbl),
9693                                         v5, v6));
9694         }
9695 #else
9696         ret = get_errno(target_mmap(arg1, arg2, arg3,
9697                                     target_to_host_bitmask(arg4, mmap_flags_tbl),
9698                                     arg5,
9699                                     arg6));
9700 #endif
9701         return ret;
9702 #endif
9703 #ifdef TARGET_NR_mmap2
9704     case TARGET_NR_mmap2:
9705 #ifndef MMAP_SHIFT
9706 #define MMAP_SHIFT 12
9707 #endif
9708         ret = target_mmap(arg1, arg2, arg3,
9709                           target_to_host_bitmask(arg4, mmap_flags_tbl),
9710                           arg5, arg6 << MMAP_SHIFT);
9711         return get_errno(ret);
9712 #endif
9713     case TARGET_NR_munmap:
9714         return get_errno(target_munmap(arg1, arg2));
9715     case TARGET_NR_mprotect:
9716         {
9717             TaskState *ts = cpu->opaque;
9718             /* Special hack to detect libc making the stack executable.  */
9719             if ((arg3 & PROT_GROWSDOWN)
9720                 && arg1 >= ts->info->stack_limit
9721                 && arg1 <= ts->info->start_stack) {
9722                 arg3 &= ~PROT_GROWSDOWN;
9723                 arg2 = arg2 + arg1 - ts->info->stack_limit;
9724                 arg1 = ts->info->stack_limit;
9725             }
9726         }
9727         return get_errno(target_mprotect(arg1, arg2, arg3));
9728 #ifdef TARGET_NR_mremap
9729     case TARGET_NR_mremap:
9730         return get_errno(target_mremap(arg1, arg2, arg3, arg4, arg5));
9731 #endif
9732         /* ??? msync/mlock/munlock are broken for softmmu.  */
9733 #ifdef TARGET_NR_msync
9734     case TARGET_NR_msync:
9735         return get_errno(msync(g2h(arg1), arg2, arg3));
9736 #endif
9737 #ifdef TARGET_NR_mlock
9738     case TARGET_NR_mlock:
9739         return get_errno(mlock(g2h(arg1), arg2));
9740 #endif
9741 #ifdef TARGET_NR_munlock
9742     case TARGET_NR_munlock:
9743         return get_errno(munlock(g2h(arg1), arg2));
9744 #endif
9745 #ifdef TARGET_NR_mlockall
9746     case TARGET_NR_mlockall:
9747         return get_errno(mlockall(target_to_host_mlockall_arg(arg1)));
9748 #endif
9749 #ifdef TARGET_NR_munlockall
9750     case TARGET_NR_munlockall:
9751         return get_errno(munlockall());
9752 #endif
9753 #ifdef TARGET_NR_truncate
9754     case TARGET_NR_truncate:
9755         if (!(p = lock_user_string(arg1)))
9756             return -TARGET_EFAULT;
9757         ret = get_errno(truncate(p, arg2));
9758         unlock_user(p, arg1, 0);
9759         return ret;
9760 #endif
9761 #ifdef TARGET_NR_ftruncate
9762     case TARGET_NR_ftruncate:
9763         return get_errno(ftruncate(arg1, arg2));
9764 #endif
9765     case TARGET_NR_fchmod:
9766         return get_errno(fchmod(arg1, arg2));
9767 #if defined(TARGET_NR_fchmodat)
9768     case TARGET_NR_fchmodat:
9769         if (!(p = lock_user_string(arg2)))
9770             return -TARGET_EFAULT;
9771         ret = get_errno(fchmodat(arg1, p, arg3, 0));
9772         unlock_user(p, arg2, 0);
9773         return ret;
9774 #endif
9775     case TARGET_NR_getpriority:
9776         /* Note that negative values are valid for getpriority, so we must
9777            differentiate based on errno settings.  */
9778         errno = 0;
9779         ret = getpriority(arg1, arg2);
9780         if (ret == -1 && errno != 0) {
9781             return -host_to_target_errno(errno);
9782         }
9783 #ifdef TARGET_ALPHA
9784         /* Return value is the unbiased priority.  Signal no error.  */
9785         ((CPUAlphaState *)cpu_env)->ir[IR_V0] = 0;
9786 #else
9787         /* Return value is a biased priority to avoid negative numbers.  */
9788         ret = 20 - ret;
9789 #endif
9790         return ret;
9791     case TARGET_NR_setpriority:
9792         return get_errno(setpriority(arg1, arg2, arg3));
9793 #ifdef TARGET_NR_statfs
9794     case TARGET_NR_statfs:
9795         if (!(p = lock_user_string(arg1))) {
9796             return -TARGET_EFAULT;
9797         }
9798         ret = get_errno(statfs(path(p), &stfs));
9799         unlock_user(p, arg1, 0);
9800     convert_statfs:
9801         if (!is_error(ret)) {
9802             struct target_statfs *target_stfs;
9803 
9804             if (!lock_user_struct(VERIFY_WRITE, target_stfs, arg2, 0))
9805                 return -TARGET_EFAULT;
9806             __put_user(stfs.f_type, &target_stfs->f_type);
9807             __put_user(stfs.f_bsize, &target_stfs->f_bsize);
9808             __put_user(stfs.f_blocks, &target_stfs->f_blocks);
9809             __put_user(stfs.f_bfree, &target_stfs->f_bfree);
9810             __put_user(stfs.f_bavail, &target_stfs->f_bavail);
9811             __put_user(stfs.f_files, &target_stfs->f_files);
9812             __put_user(stfs.f_ffree, &target_stfs->f_ffree);
9813             __put_user(stfs.f_fsid.__val[0], &target_stfs->f_fsid.val[0]);
9814             __put_user(stfs.f_fsid.__val[1], &target_stfs->f_fsid.val[1]);
9815             __put_user(stfs.f_namelen, &target_stfs->f_namelen);
9816             __put_user(stfs.f_frsize, &target_stfs->f_frsize);
9817 #ifdef _STATFS_F_FLAGS
9818             __put_user(stfs.f_flags, &target_stfs->f_flags);
9819 #else
9820             __put_user(0, &target_stfs->f_flags);
9821 #endif
9822             memset(target_stfs->f_spare, 0, sizeof(target_stfs->f_spare));
9823             unlock_user_struct(target_stfs, arg2, 1);
9824         }
9825         return ret;
9826 #endif
9827 #ifdef TARGET_NR_fstatfs
9828     case TARGET_NR_fstatfs:
9829         ret = get_errno(fstatfs(arg1, &stfs));
9830         goto convert_statfs;
9831 #endif
9832 #ifdef TARGET_NR_statfs64
9833     case TARGET_NR_statfs64:
9834         if (!(p = lock_user_string(arg1))) {
9835             return -TARGET_EFAULT;
9836         }
9837         ret = get_errno(statfs(path(p), &stfs));
9838         unlock_user(p, arg1, 0);
9839     convert_statfs64:
9840         if (!is_error(ret)) {
9841             struct target_statfs64 *target_stfs;
9842 
9843             if (!lock_user_struct(VERIFY_WRITE, target_stfs, arg3, 0))
9844                 return -TARGET_EFAULT;
9845             __put_user(stfs.f_type, &target_stfs->f_type);
9846             __put_user(stfs.f_bsize, &target_stfs->f_bsize);
9847             __put_user(stfs.f_blocks, &target_stfs->f_blocks);
9848             __put_user(stfs.f_bfree, &target_stfs->f_bfree);
9849             __put_user(stfs.f_bavail, &target_stfs->f_bavail);
9850             __put_user(stfs.f_files, &target_stfs->f_files);
9851             __put_user(stfs.f_ffree, &target_stfs->f_ffree);
9852             __put_user(stfs.f_fsid.__val[0], &target_stfs->f_fsid.val[0]);
9853             __put_user(stfs.f_fsid.__val[1], &target_stfs->f_fsid.val[1]);
9854             __put_user(stfs.f_namelen, &target_stfs->f_namelen);
9855             __put_user(stfs.f_frsize, &target_stfs->f_frsize);
9856 #ifdef _STATFS_F_FLAGS
9857             __put_user(stfs.f_flags, &target_stfs->f_flags);
9858 #else
9859             __put_user(0, &target_stfs->f_flags);
9860 #endif
9861             memset(target_stfs->f_spare, 0, sizeof(target_stfs->f_spare));
9862             unlock_user_struct(target_stfs, arg3, 1);
9863         }
9864         return ret;
9865     case TARGET_NR_fstatfs64:
9866         ret = get_errno(fstatfs(arg1, &stfs));
9867         goto convert_statfs64;
9868 #endif
9869 #ifdef TARGET_NR_socketcall
9870     case TARGET_NR_socketcall:
9871         return do_socketcall(arg1, arg2);
9872 #endif
9873 #ifdef TARGET_NR_accept
9874     case TARGET_NR_accept:
9875         return do_accept4(arg1, arg2, arg3, 0);
9876 #endif
9877 #ifdef TARGET_NR_accept4
9878     case TARGET_NR_accept4:
9879         return do_accept4(arg1, arg2, arg3, arg4);
9880 #endif
9881 #ifdef TARGET_NR_bind
9882     case TARGET_NR_bind:
9883         return do_bind(arg1, arg2, arg3);
9884 #endif
9885 #ifdef TARGET_NR_connect
9886     case TARGET_NR_connect:
9887         return do_connect(arg1, arg2, arg3);
9888 #endif
9889 #ifdef TARGET_NR_getpeername
9890     case TARGET_NR_getpeername:
9891         return do_getpeername(arg1, arg2, arg3);
9892 #endif
9893 #ifdef TARGET_NR_getsockname
9894     case TARGET_NR_getsockname:
9895         return do_getsockname(arg1, arg2, arg3);
9896 #endif
9897 #ifdef TARGET_NR_getsockopt
9898     case TARGET_NR_getsockopt:
9899         return do_getsockopt(arg1, arg2, arg3, arg4, arg5);
9900 #endif
9901 #ifdef TARGET_NR_listen
9902     case TARGET_NR_listen:
9903         return get_errno(listen(arg1, arg2));
9904 #endif
9905 #ifdef TARGET_NR_recv
9906     case TARGET_NR_recv:
9907         return do_recvfrom(arg1, arg2, arg3, arg4, 0, 0);
9908 #endif
9909 #ifdef TARGET_NR_recvfrom
9910     case TARGET_NR_recvfrom:
9911         return do_recvfrom(arg1, arg2, arg3, arg4, arg5, arg6);
9912 #endif
9913 #ifdef TARGET_NR_recvmsg
9914     case TARGET_NR_recvmsg:
9915         return do_sendrecvmsg(arg1, arg2, arg3, 0);
9916 #endif
9917 #ifdef TARGET_NR_send
9918     case TARGET_NR_send:
9919         return do_sendto(arg1, arg2, arg3, arg4, 0, 0);
9920 #endif
9921 #ifdef TARGET_NR_sendmsg
9922     case TARGET_NR_sendmsg:
9923         return do_sendrecvmsg(arg1, arg2, arg3, 1);
9924 #endif
9925 #ifdef TARGET_NR_sendmmsg
9926     case TARGET_NR_sendmmsg:
9927         return do_sendrecvmmsg(arg1, arg2, arg3, arg4, 1);
9928 #endif
9929 #ifdef TARGET_NR_recvmmsg
9930     case TARGET_NR_recvmmsg:
9931         return do_sendrecvmmsg(arg1, arg2, arg3, arg4, 0);
9932 #endif
9933 #ifdef TARGET_NR_sendto
9934     case TARGET_NR_sendto:
9935         return do_sendto(arg1, arg2, arg3, arg4, arg5, arg6);
9936 #endif
9937 #ifdef TARGET_NR_shutdown
9938     case TARGET_NR_shutdown:
9939         return get_errno(shutdown(arg1, arg2));
9940 #endif
9941 #if defined(TARGET_NR_getrandom) && defined(__NR_getrandom)
9942     case TARGET_NR_getrandom:
9943         p = lock_user(VERIFY_WRITE, arg1, arg2, 0);
9944         if (!p) {
9945             return -TARGET_EFAULT;
9946         }
9947         ret = get_errno(getrandom(p, arg2, arg3));
9948         unlock_user(p, arg1, ret);
9949         return ret;
9950 #endif
9951 #ifdef TARGET_NR_socket
9952     case TARGET_NR_socket:
9953         return do_socket(arg1, arg2, arg3);
9954 #endif
9955 #ifdef TARGET_NR_socketpair
9956     case TARGET_NR_socketpair:
9957         return do_socketpair(arg1, arg2, arg3, arg4);
9958 #endif
9959 #ifdef TARGET_NR_setsockopt
9960     case TARGET_NR_setsockopt:
9961         return do_setsockopt(arg1, arg2, arg3, arg4, (socklen_t) arg5);
9962 #endif
9963 #if defined(TARGET_NR_syslog)
9964     case TARGET_NR_syslog:
9965         {
9966             int len = arg2;
9967 
9968             switch (arg1) {
9969             case TARGET_SYSLOG_ACTION_CLOSE:         /* Close log */
9970             case TARGET_SYSLOG_ACTION_OPEN:          /* Open log */
9971             case TARGET_SYSLOG_ACTION_CLEAR:         /* Clear ring buffer */
9972             case TARGET_SYSLOG_ACTION_CONSOLE_OFF:   /* Disable logging */
9973             case TARGET_SYSLOG_ACTION_CONSOLE_ON:    /* Enable logging */
9974             case TARGET_SYSLOG_ACTION_CONSOLE_LEVEL: /* Set messages level */
9975             case TARGET_SYSLOG_ACTION_SIZE_UNREAD:   /* Number of chars */
9976             case TARGET_SYSLOG_ACTION_SIZE_BUFFER:   /* Size of the buffer */
9977                 return get_errno(sys_syslog((int)arg1, NULL, (int)arg3));
9978             case TARGET_SYSLOG_ACTION_READ:          /* Read from log */
9979             case TARGET_SYSLOG_ACTION_READ_CLEAR:    /* Read/clear msgs */
9980             case TARGET_SYSLOG_ACTION_READ_ALL:      /* Read last messages */
9981                 {
9982                     if (len < 0) {
9983                         return -TARGET_EINVAL;
9984                     }
9985                     if (len == 0) {
9986                         return 0;
9987                     }
9988                     p = lock_user(VERIFY_WRITE, arg2, arg3, 0);
9989                     if (!p) {
9990                         return -TARGET_EFAULT;
9991                     }
9992                     ret = get_errno(sys_syslog((int)arg1, p, (int)arg3));
9993                     unlock_user(p, arg2, arg3);
9994                 }
9995                 return ret;
9996             default:
9997                 return -TARGET_EINVAL;
9998             }
9999         }
10000         break;
10001 #endif
10002     case TARGET_NR_setitimer:
10003         {
10004             struct itimerval value, ovalue, *pvalue;
10005 
10006             if (arg2) {
10007                 pvalue = &value;
10008                 if (copy_from_user_timeval(&pvalue->it_interval, arg2)
10009                     || copy_from_user_timeval(&pvalue->it_value,
10010                                               arg2 + sizeof(struct target_timeval)))
10011                     return -TARGET_EFAULT;
10012             } else {
10013                 pvalue = NULL;
10014             }
10015             ret = get_errno(setitimer(arg1, pvalue, &ovalue));
10016             if (!is_error(ret) && arg3) {
10017                 if (copy_to_user_timeval(arg3,
10018                                          &ovalue.it_interval)
10019                     || copy_to_user_timeval(arg3 + sizeof(struct target_timeval),
10020                                             &ovalue.it_value))
10021                     return -TARGET_EFAULT;
10022             }
10023         }
10024         return ret;
10025     case TARGET_NR_getitimer:
10026         {
10027             struct itimerval value;
10028 
10029             ret = get_errno(getitimer(arg1, &value));
10030             if (!is_error(ret) && arg2) {
10031                 if (copy_to_user_timeval(arg2,
10032                                          &value.it_interval)
10033                     || copy_to_user_timeval(arg2 + sizeof(struct target_timeval),
10034                                             &value.it_value))
10035                     return -TARGET_EFAULT;
10036             }
10037         }
10038         return ret;
10039 #ifdef TARGET_NR_stat
10040     case TARGET_NR_stat:
10041         if (!(p = lock_user_string(arg1))) {
10042             return -TARGET_EFAULT;
10043         }
10044         ret = get_errno(stat(path(p), &st));
10045         unlock_user(p, arg1, 0);
10046         goto do_stat;
10047 #endif
10048 #ifdef TARGET_NR_lstat
10049     case TARGET_NR_lstat:
10050         if (!(p = lock_user_string(arg1))) {
10051             return -TARGET_EFAULT;
10052         }
10053         ret = get_errno(lstat(path(p), &st));
10054         unlock_user(p, arg1, 0);
10055         goto do_stat;
10056 #endif
10057 #ifdef TARGET_NR_fstat
10058     case TARGET_NR_fstat:
10059         {
10060             ret = get_errno(fstat(arg1, &st));
10061 #if defined(TARGET_NR_stat) || defined(TARGET_NR_lstat)
10062         do_stat:
10063 #endif
10064             if (!is_error(ret)) {
10065                 struct target_stat *target_st;
10066 
10067                 if (!lock_user_struct(VERIFY_WRITE, target_st, arg2, 0))
10068                     return -TARGET_EFAULT;
10069                 memset(target_st, 0, sizeof(*target_st));
10070                 __put_user(st.st_dev, &target_st->st_dev);
10071                 __put_user(st.st_ino, &target_st->st_ino);
10072                 __put_user(st.st_mode, &target_st->st_mode);
10073                 __put_user(st.st_uid, &target_st->st_uid);
10074                 __put_user(st.st_gid, &target_st->st_gid);
10075                 __put_user(st.st_nlink, &target_st->st_nlink);
10076                 __put_user(st.st_rdev, &target_st->st_rdev);
10077                 __put_user(st.st_size, &target_st->st_size);
10078                 __put_user(st.st_blksize, &target_st->st_blksize);
10079                 __put_user(st.st_blocks, &target_st->st_blocks);
10080                 __put_user(st.st_atime, &target_st->target_st_atime);
10081                 __put_user(st.st_mtime, &target_st->target_st_mtime);
10082                 __put_user(st.st_ctime, &target_st->target_st_ctime);
10083 #if (_POSIX_C_SOURCE >= 200809L || _XOPEN_SOURCE >= 700) && \
10084     defined(TARGET_STAT_HAVE_NSEC)
10085                 __put_user(st.st_atim.tv_nsec,
10086                            &target_st->target_st_atime_nsec);
10087                 __put_user(st.st_mtim.tv_nsec,
10088                            &target_st->target_st_mtime_nsec);
10089                 __put_user(st.st_ctim.tv_nsec,
10090                            &target_st->target_st_ctime_nsec);
10091 #endif
10092                 unlock_user_struct(target_st, arg2, 1);
10093             }
10094         }
10095         return ret;
10096 #endif
10097     case TARGET_NR_vhangup:
10098         return get_errno(vhangup());
10099 #ifdef TARGET_NR_syscall
10100     case TARGET_NR_syscall:
10101         return do_syscall(cpu_env, arg1 & 0xffff, arg2, arg3, arg4, arg5,
10102                           arg6, arg7, arg8, 0);
10103 #endif
10104 #if defined(TARGET_NR_wait4)
10105     case TARGET_NR_wait4:
10106         {
10107             int status;
10108             abi_long status_ptr = arg2;
10109             struct rusage rusage, *rusage_ptr;
10110             abi_ulong target_rusage = arg4;
10111             abi_long rusage_err;
10112             if (target_rusage)
10113                 rusage_ptr = &rusage;
10114             else
10115                 rusage_ptr = NULL;
10116             ret = get_errno(safe_wait4(arg1, &status, arg3, rusage_ptr));
10117             if (!is_error(ret)) {
10118                 if (status_ptr && ret) {
10119                     status = host_to_target_waitstatus(status);
10120                     if (put_user_s32(status, status_ptr))
10121                         return -TARGET_EFAULT;
10122                 }
10123                 if (target_rusage) {
10124                     rusage_err = host_to_target_rusage(target_rusage, &rusage);
10125                     if (rusage_err) {
10126                         ret = rusage_err;
10127                     }
10128                 }
10129             }
10130         }
10131         return ret;
10132 #endif
10133 #ifdef TARGET_NR_swapoff
10134     case TARGET_NR_swapoff:
10135         if (!(p = lock_user_string(arg1)))
10136             return -TARGET_EFAULT;
10137         ret = get_errno(swapoff(p));
10138         unlock_user(p, arg1, 0);
10139         return ret;
10140 #endif
10141     case TARGET_NR_sysinfo:
10142         {
10143             struct target_sysinfo *target_value;
10144             struct sysinfo value;
10145             ret = get_errno(sysinfo(&value));
10146             if (!is_error(ret) && arg1)
10147             {
10148                 if (!lock_user_struct(VERIFY_WRITE, target_value, arg1, 0))
10149                     return -TARGET_EFAULT;
10150                 __put_user(value.uptime, &target_value->uptime);
10151                 __put_user(value.loads[0], &target_value->loads[0]);
10152                 __put_user(value.loads[1], &target_value->loads[1]);
10153                 __put_user(value.loads[2], &target_value->loads[2]);
10154                 __put_user(value.totalram, &target_value->totalram);
10155                 __put_user(value.freeram, &target_value->freeram);
10156                 __put_user(value.sharedram, &target_value->sharedram);
10157                 __put_user(value.bufferram, &target_value->bufferram);
10158                 __put_user(value.totalswap, &target_value->totalswap);
10159                 __put_user(value.freeswap, &target_value->freeswap);
10160                 __put_user(value.procs, &target_value->procs);
10161                 __put_user(value.totalhigh, &target_value->totalhigh);
10162                 __put_user(value.freehigh, &target_value->freehigh);
10163                 __put_user(value.mem_unit, &target_value->mem_unit);
10164                 unlock_user_struct(target_value, arg1, 1);
10165             }
10166         }
10167         return ret;
10168 #ifdef TARGET_NR_ipc
10169     case TARGET_NR_ipc:
10170         return do_ipc(cpu_env, arg1, arg2, arg3, arg4, arg5, arg6);
10171 #endif
10172 #ifdef TARGET_NR_semget
10173     case TARGET_NR_semget:
10174         return get_errno(semget(arg1, arg2, arg3));
10175 #endif
10176 #ifdef TARGET_NR_semop
10177     case TARGET_NR_semop:
10178         return do_semtimedop(arg1, arg2, arg3, 0, false);
10179 #endif
10180 #ifdef TARGET_NR_semtimedop
10181     case TARGET_NR_semtimedop:
10182         return do_semtimedop(arg1, arg2, arg3, arg4, false);
10183 #endif
10184 #ifdef TARGET_NR_semtimedop_time64
10185     case TARGET_NR_semtimedop_time64:
10186         return do_semtimedop(arg1, arg2, arg3, arg4, true);
10187 #endif
10188 #ifdef TARGET_NR_semctl
10189     case TARGET_NR_semctl:
10190         return do_semctl(arg1, arg2, arg3, arg4);
10191 #endif
10192 #ifdef TARGET_NR_msgctl
10193     case TARGET_NR_msgctl:
10194         return do_msgctl(arg1, arg2, arg3);
10195 #endif
10196 #ifdef TARGET_NR_msgget
10197     case TARGET_NR_msgget:
10198         return get_errno(msgget(arg1, arg2));
10199 #endif
10200 #ifdef TARGET_NR_msgrcv
10201     case TARGET_NR_msgrcv:
10202         return do_msgrcv(arg1, arg2, arg3, arg4, arg5);
10203 #endif
10204 #ifdef TARGET_NR_msgsnd
10205     case TARGET_NR_msgsnd:
10206         return do_msgsnd(arg1, arg2, arg3, arg4);
10207 #endif
10208 #ifdef TARGET_NR_shmget
10209     case TARGET_NR_shmget:
10210         return get_errno(shmget(arg1, arg2, arg3));
10211 #endif
10212 #ifdef TARGET_NR_shmctl
10213     case TARGET_NR_shmctl:
10214         return do_shmctl(arg1, arg2, arg3);
10215 #endif
10216 #ifdef TARGET_NR_shmat
10217     case TARGET_NR_shmat:
10218         return do_shmat(cpu_env, arg1, arg2, arg3);
10219 #endif
10220 #ifdef TARGET_NR_shmdt
10221     case TARGET_NR_shmdt:
10222         return do_shmdt(arg1);
10223 #endif
10224     case TARGET_NR_fsync:
10225         return get_errno(fsync(arg1));
10226     case TARGET_NR_clone:
10227         /* Linux manages to have three different orderings for its
10228          * arguments to clone(); the BACKWARDS and BACKWARDS2 defines
10229          * match the kernel's CONFIG_CLONE_* settings.
10230          * Microblaze is further special in that it uses a sixth
10231          * implicit argument to clone for the TLS pointer.
10232          */
10233 #if defined(TARGET_MICROBLAZE)
10234         ret = get_errno(do_fork(cpu_env, arg1, arg2, arg4, arg6, arg5));
10235 #elif defined(TARGET_CLONE_BACKWARDS)
10236         ret = get_errno(do_fork(cpu_env, arg1, arg2, arg3, arg4, arg5));
10237 #elif defined(TARGET_CLONE_BACKWARDS2)
10238         ret = get_errno(do_fork(cpu_env, arg2, arg1, arg3, arg5, arg4));
10239 #else
10240         ret = get_errno(do_fork(cpu_env, arg1, arg2, arg3, arg5, arg4));
10241 #endif
10242         return ret;
10243 #ifdef __NR_exit_group
10244         /* new thread calls */
10245     case TARGET_NR_exit_group:
10246         preexit_cleanup(cpu_env, arg1);
10247         return get_errno(exit_group(arg1));
10248 #endif
10249     case TARGET_NR_setdomainname:
10250         if (!(p = lock_user_string(arg1)))
10251             return -TARGET_EFAULT;
10252         ret = get_errno(setdomainname(p, arg2));
10253         unlock_user(p, arg1, 0);
10254         return ret;
10255     case TARGET_NR_uname:
10256         /* no need to transcode because we use the linux syscall */
10257         {
10258             struct new_utsname * buf;
10259 
10260             if (!lock_user_struct(VERIFY_WRITE, buf, arg1, 0))
10261                 return -TARGET_EFAULT;
10262             ret = get_errno(sys_uname(buf));
10263             if (!is_error(ret)) {
10264                 /* Overwrite the native machine name with whatever is being
10265                    emulated. */
10266                 g_strlcpy(buf->machine, cpu_to_uname_machine(cpu_env),
10267                           sizeof(buf->machine));
10268                 /* Allow the user to override the reported release.  */
10269                 if (qemu_uname_release && *qemu_uname_release) {
10270                     g_strlcpy(buf->release, qemu_uname_release,
10271                               sizeof(buf->release));
10272                 }
10273             }
10274             unlock_user_struct(buf, arg1, 1);
10275         }
10276         return ret;
10277 #ifdef TARGET_I386
10278     case TARGET_NR_modify_ldt:
10279         return do_modify_ldt(cpu_env, arg1, arg2, arg3);
10280 #if !defined(TARGET_X86_64)
10281     case TARGET_NR_vm86:
10282         return do_vm86(cpu_env, arg1, arg2);
10283 #endif
10284 #endif
10285 #if defined(TARGET_NR_adjtimex)
10286     case TARGET_NR_adjtimex:
10287         {
10288             struct timex host_buf;
10289 
10290             if (target_to_host_timex(&host_buf, arg1) != 0) {
10291                 return -TARGET_EFAULT;
10292             }
10293             ret = get_errno(adjtimex(&host_buf));
10294             if (!is_error(ret)) {
10295                 if (host_to_target_timex(arg1, &host_buf) != 0) {
10296                     return -TARGET_EFAULT;
10297                 }
10298             }
10299         }
10300         return ret;
10301 #endif
10302 #if defined(TARGET_NR_clock_adjtime) && defined(CONFIG_CLOCK_ADJTIME)
10303     case TARGET_NR_clock_adjtime:
10304         {
10305             struct timex htx, *phtx = &htx;
10306 
10307             if (target_to_host_timex(phtx, arg2) != 0) {
10308                 return -TARGET_EFAULT;
10309             }
10310             ret = get_errno(clock_adjtime(arg1, phtx));
10311             if (!is_error(ret) && phtx) {
10312                 if (host_to_target_timex(arg2, phtx) != 0) {
10313                     return -TARGET_EFAULT;
10314                 }
10315             }
10316         }
10317         return ret;
10318 #endif
10319 #if defined(TARGET_NR_clock_adjtime64) && defined(CONFIG_CLOCK_ADJTIME)
10320     case TARGET_NR_clock_adjtime64:
10321         {
10322             struct timex htx;
10323 
10324             if (target_to_host_timex64(&htx, arg2) != 0) {
10325                 return -TARGET_EFAULT;
10326             }
10327             ret = get_errno(clock_adjtime(arg1, &htx));
10328             if (!is_error(ret) && host_to_target_timex64(arg2, &htx)) {
10329                     return -TARGET_EFAULT;
10330             }
10331         }
10332         return ret;
10333 #endif
10334     case TARGET_NR_getpgid:
10335         return get_errno(getpgid(arg1));
10336     case TARGET_NR_fchdir:
10337         return get_errno(fchdir(arg1));
10338     case TARGET_NR_personality:
10339         return get_errno(personality(arg1));
10340 #ifdef TARGET_NR__llseek /* Not on alpha */
10341     case TARGET_NR__llseek:
10342         {
10343             int64_t res;
10344 #if !defined(__NR_llseek)
10345             res = lseek(arg1, ((uint64_t)arg2 << 32) | (abi_ulong)arg3, arg5);
10346             if (res == -1) {
10347                 ret = get_errno(res);
10348             } else {
10349                 ret = 0;
10350             }
10351 #else
10352             ret = get_errno(_llseek(arg1, arg2, arg3, &res, arg5));
10353 #endif
10354             if ((ret == 0) && put_user_s64(res, arg4)) {
10355                 return -TARGET_EFAULT;
10356             }
10357         }
10358         return ret;
10359 #endif
10360 #ifdef TARGET_NR_getdents
10361     case TARGET_NR_getdents:
10362 #ifdef EMULATE_GETDENTS_WITH_GETDENTS
10363 #if TARGET_ABI_BITS == 32 && HOST_LONG_BITS == 64
10364         {
10365             struct target_dirent *target_dirp;
10366             struct linux_dirent *dirp;
10367             abi_long count = arg3;
10368 
10369             dirp = g_try_malloc(count);
10370             if (!dirp) {
10371                 return -TARGET_ENOMEM;
10372             }
10373 
10374             ret = get_errno(sys_getdents(arg1, dirp, count));
10375             if (!is_error(ret)) {
10376                 struct linux_dirent *de;
10377 		struct target_dirent *tde;
10378                 int len = ret;
10379                 int reclen, treclen;
10380 		int count1, tnamelen;
10381 
10382 		count1 = 0;
10383                 de = dirp;
10384                 if (!(target_dirp = lock_user(VERIFY_WRITE, arg2, count, 0)))
10385                     return -TARGET_EFAULT;
10386 		tde = target_dirp;
10387                 while (len > 0) {
10388                     reclen = de->d_reclen;
10389                     tnamelen = reclen - offsetof(struct linux_dirent, d_name);
10390                     assert(tnamelen >= 0);
10391                     treclen = tnamelen + offsetof(struct target_dirent, d_name);
10392                     assert(count1 + treclen <= count);
10393                     tde->d_reclen = tswap16(treclen);
10394                     tde->d_ino = tswapal(de->d_ino);
10395                     tde->d_off = tswapal(de->d_off);
10396                     memcpy(tde->d_name, de->d_name, tnamelen);
10397                     de = (struct linux_dirent *)((char *)de + reclen);
10398                     len -= reclen;
10399                     tde = (struct target_dirent *)((char *)tde + treclen);
10400 		    count1 += treclen;
10401                 }
10402 		ret = count1;
10403                 unlock_user(target_dirp, arg2, ret);
10404             }
10405             g_free(dirp);
10406         }
10407 #else
10408         {
10409             struct linux_dirent *dirp;
10410             abi_long count = arg3;
10411 
10412             if (!(dirp = lock_user(VERIFY_WRITE, arg2, count, 0)))
10413                 return -TARGET_EFAULT;
10414             ret = get_errno(sys_getdents(arg1, dirp, count));
10415             if (!is_error(ret)) {
10416                 struct linux_dirent *de;
10417                 int len = ret;
10418                 int reclen;
10419                 de = dirp;
10420                 while (len > 0) {
10421                     reclen = de->d_reclen;
10422                     if (reclen > len)
10423                         break;
10424                     de->d_reclen = tswap16(reclen);
10425                     tswapls(&de->d_ino);
10426                     tswapls(&de->d_off);
10427                     de = (struct linux_dirent *)((char *)de + reclen);
10428                     len -= reclen;
10429                 }
10430             }
10431             unlock_user(dirp, arg2, ret);
10432         }
10433 #endif
10434 #else
10435         /* Implement getdents in terms of getdents64 */
10436         {
10437             struct linux_dirent64 *dirp;
10438             abi_long count = arg3;
10439 
10440             dirp = lock_user(VERIFY_WRITE, arg2, count, 0);
10441             if (!dirp) {
10442                 return -TARGET_EFAULT;
10443             }
10444             ret = get_errno(sys_getdents64(arg1, dirp, count));
10445             if (!is_error(ret)) {
10446                 /* Convert the dirent64 structs to target dirent.  We do this
10447                  * in-place, since we can guarantee that a target_dirent is no
10448                  * larger than a dirent64; however this means we have to be
10449                  * careful to read everything before writing in the new format.
10450                  */
10451                 struct linux_dirent64 *de;
10452                 struct target_dirent *tde;
10453                 int len = ret;
10454                 int tlen = 0;
10455 
10456                 de = dirp;
10457                 tde = (struct target_dirent *)dirp;
10458                 while (len > 0) {
10459                     int namelen, treclen;
10460                     int reclen = de->d_reclen;
10461                     uint64_t ino = de->d_ino;
10462                     int64_t off = de->d_off;
10463                     uint8_t type = de->d_type;
10464 
10465                     namelen = strlen(de->d_name);
10466                     treclen = offsetof(struct target_dirent, d_name)
10467                         + namelen + 2;
10468                     treclen = QEMU_ALIGN_UP(treclen, sizeof(abi_long));
10469 
10470                     memmove(tde->d_name, de->d_name, namelen + 1);
10471                     tde->d_ino = tswapal(ino);
10472                     tde->d_off = tswapal(off);
10473                     tde->d_reclen = tswap16(treclen);
10474                     /* The target_dirent type is in what was formerly a padding
10475                      * byte at the end of the structure:
10476                      */
10477                     *(((char *)tde) + treclen - 1) = type;
10478 
10479                     de = (struct linux_dirent64 *)((char *)de + reclen);
10480                     tde = (struct target_dirent *)((char *)tde + treclen);
10481                     len -= reclen;
10482                     tlen += treclen;
10483                 }
10484                 ret = tlen;
10485             }
10486             unlock_user(dirp, arg2, ret);
10487         }
10488 #endif
10489         return ret;
10490 #endif /* TARGET_NR_getdents */
10491 #if defined(TARGET_NR_getdents64) && defined(__NR_getdents64)
10492     case TARGET_NR_getdents64:
10493         {
10494             struct linux_dirent64 *dirp;
10495             abi_long count = arg3;
10496             if (!(dirp = lock_user(VERIFY_WRITE, arg2, count, 0)))
10497                 return -TARGET_EFAULT;
10498             ret = get_errno(sys_getdents64(arg1, dirp, count));
10499             if (!is_error(ret)) {
10500                 struct linux_dirent64 *de;
10501                 int len = ret;
10502                 int reclen;
10503                 de = dirp;
10504                 while (len > 0) {
10505                     reclen = de->d_reclen;
10506                     if (reclen > len)
10507                         break;
10508                     de->d_reclen = tswap16(reclen);
10509                     tswap64s((uint64_t *)&de->d_ino);
10510                     tswap64s((uint64_t *)&de->d_off);
10511                     de = (struct linux_dirent64 *)((char *)de + reclen);
10512                     len -= reclen;
10513                 }
10514             }
10515             unlock_user(dirp, arg2, ret);
10516         }
10517         return ret;
10518 #endif /* TARGET_NR_getdents64 */
10519 #if defined(TARGET_NR__newselect)
10520     case TARGET_NR__newselect:
10521         return do_select(arg1, arg2, arg3, arg4, arg5);
10522 #endif
10523 #ifdef TARGET_NR_poll
10524     case TARGET_NR_poll:
10525         return do_ppoll(arg1, arg2, arg3, arg4, arg5, false, false);
10526 #endif
10527 #ifdef TARGET_NR_ppoll
10528     case TARGET_NR_ppoll:
10529         return do_ppoll(arg1, arg2, arg3, arg4, arg5, true, false);
10530 #endif
10531 #ifdef TARGET_NR_ppoll_time64
10532     case TARGET_NR_ppoll_time64:
10533         return do_ppoll(arg1, arg2, arg3, arg4, arg5, true, true);
10534 #endif
10535     case TARGET_NR_flock:
10536         /* NOTE: the flock constant seems to be the same for every
10537            Linux platform */
10538         return get_errno(safe_flock(arg1, arg2));
10539     case TARGET_NR_readv:
10540         {
10541             struct iovec *vec = lock_iovec(VERIFY_WRITE, arg2, arg3, 0);
10542             if (vec != NULL) {
10543                 ret = get_errno(safe_readv(arg1, vec, arg3));
10544                 unlock_iovec(vec, arg2, arg3, 1);
10545             } else {
10546                 ret = -host_to_target_errno(errno);
10547             }
10548         }
10549         return ret;
10550     case TARGET_NR_writev:
10551         {
10552             struct iovec *vec = lock_iovec(VERIFY_READ, arg2, arg3, 1);
10553             if (vec != NULL) {
10554                 ret = get_errno(safe_writev(arg1, vec, arg3));
10555                 unlock_iovec(vec, arg2, arg3, 0);
10556             } else {
10557                 ret = -host_to_target_errno(errno);
10558             }
10559         }
10560         return ret;
10561 #if defined(TARGET_NR_preadv)
10562     case TARGET_NR_preadv:
10563         {
10564             struct iovec *vec = lock_iovec(VERIFY_WRITE, arg2, arg3, 0);
10565             if (vec != NULL) {
10566                 unsigned long low, high;
10567 
10568                 target_to_host_low_high(arg4, arg5, &low, &high);
10569                 ret = get_errno(safe_preadv(arg1, vec, arg3, low, high));
10570                 unlock_iovec(vec, arg2, arg3, 1);
10571             } else {
10572                 ret = -host_to_target_errno(errno);
10573            }
10574         }
10575         return ret;
10576 #endif
10577 #if defined(TARGET_NR_pwritev)
10578     case TARGET_NR_pwritev:
10579         {
10580             struct iovec *vec = lock_iovec(VERIFY_READ, arg2, arg3, 1);
10581             if (vec != NULL) {
10582                 unsigned long low, high;
10583 
10584                 target_to_host_low_high(arg4, arg5, &low, &high);
10585                 ret = get_errno(safe_pwritev(arg1, vec, arg3, low, high));
10586                 unlock_iovec(vec, arg2, arg3, 0);
10587             } else {
10588                 ret = -host_to_target_errno(errno);
10589            }
10590         }
10591         return ret;
10592 #endif
10593     case TARGET_NR_getsid:
10594         return get_errno(getsid(arg1));
10595 #if defined(TARGET_NR_fdatasync) /* Not on alpha (osf_datasync ?) */
10596     case TARGET_NR_fdatasync:
10597         return get_errno(fdatasync(arg1));
10598 #endif
10599     case TARGET_NR_sched_getaffinity:
10600         {
10601             unsigned int mask_size;
10602             unsigned long *mask;
10603 
10604             /*
10605              * sched_getaffinity needs multiples of ulong, so need to take
10606              * care of mismatches between target ulong and host ulong sizes.
10607              */
10608             if (arg2 & (sizeof(abi_ulong) - 1)) {
10609                 return -TARGET_EINVAL;
10610             }
10611             mask_size = (arg2 + (sizeof(*mask) - 1)) & ~(sizeof(*mask) - 1);
10612 
10613             mask = alloca(mask_size);
10614             memset(mask, 0, mask_size);
10615             ret = get_errno(sys_sched_getaffinity(arg1, mask_size, mask));
10616 
10617             if (!is_error(ret)) {
10618                 if (ret > arg2) {
10619                     /* More data returned than the caller's buffer will fit.
10620                      * This only happens if sizeof(abi_long) < sizeof(long)
10621                      * and the caller passed us a buffer holding an odd number
10622                      * of abi_longs. If the host kernel is actually using the
10623                      * extra 4 bytes then fail EINVAL; otherwise we can just
10624                      * ignore them and only copy the interesting part.
10625                      */
10626                     int numcpus = sysconf(_SC_NPROCESSORS_CONF);
10627                     if (numcpus > arg2 * 8) {
10628                         return -TARGET_EINVAL;
10629                     }
10630                     ret = arg2;
10631                 }
10632 
10633                 if (host_to_target_cpu_mask(mask, mask_size, arg3, ret)) {
10634                     return -TARGET_EFAULT;
10635                 }
10636             }
10637         }
10638         return ret;
10639     case TARGET_NR_sched_setaffinity:
10640         {
10641             unsigned int mask_size;
10642             unsigned long *mask;
10643 
10644             /*
10645              * sched_setaffinity needs multiples of ulong, so need to take
10646              * care of mismatches between target ulong and host ulong sizes.
10647              */
10648             if (arg2 & (sizeof(abi_ulong) - 1)) {
10649                 return -TARGET_EINVAL;
10650             }
10651             mask_size = (arg2 + (sizeof(*mask) - 1)) & ~(sizeof(*mask) - 1);
10652             mask = alloca(mask_size);
10653 
10654             ret = target_to_host_cpu_mask(mask, mask_size, arg3, arg2);
10655             if (ret) {
10656                 return ret;
10657             }
10658 
10659             return get_errno(sys_sched_setaffinity(arg1, mask_size, mask));
10660         }
10661     case TARGET_NR_getcpu:
10662         {
10663             unsigned cpu, node;
10664             ret = get_errno(sys_getcpu(arg1 ? &cpu : NULL,
10665                                        arg2 ? &node : NULL,
10666                                        NULL));
10667             if (is_error(ret)) {
10668                 return ret;
10669             }
10670             if (arg1 && put_user_u32(cpu, arg1)) {
10671                 return -TARGET_EFAULT;
10672             }
10673             if (arg2 && put_user_u32(node, arg2)) {
10674                 return -TARGET_EFAULT;
10675             }
10676         }
10677         return ret;
10678     case TARGET_NR_sched_setparam:
10679         {
10680             struct sched_param *target_schp;
10681             struct sched_param schp;
10682 
10683             if (arg2 == 0) {
10684                 return -TARGET_EINVAL;
10685             }
10686             if (!lock_user_struct(VERIFY_READ, target_schp, arg2, 1))
10687                 return -TARGET_EFAULT;
10688             schp.sched_priority = tswap32(target_schp->sched_priority);
10689             unlock_user_struct(target_schp, arg2, 0);
10690             return get_errno(sched_setparam(arg1, &schp));
10691         }
10692     case TARGET_NR_sched_getparam:
10693         {
10694             struct sched_param *target_schp;
10695             struct sched_param schp;
10696 
10697             if (arg2 == 0) {
10698                 return -TARGET_EINVAL;
10699             }
10700             ret = get_errno(sched_getparam(arg1, &schp));
10701             if (!is_error(ret)) {
10702                 if (!lock_user_struct(VERIFY_WRITE, target_schp, arg2, 0))
10703                     return -TARGET_EFAULT;
10704                 target_schp->sched_priority = tswap32(schp.sched_priority);
10705                 unlock_user_struct(target_schp, arg2, 1);
10706             }
10707         }
10708         return ret;
10709     case TARGET_NR_sched_setscheduler:
10710         {
10711             struct sched_param *target_schp;
10712             struct sched_param schp;
10713             if (arg3 == 0) {
10714                 return -TARGET_EINVAL;
10715             }
10716             if (!lock_user_struct(VERIFY_READ, target_schp, arg3, 1))
10717                 return -TARGET_EFAULT;
10718             schp.sched_priority = tswap32(target_schp->sched_priority);
10719             unlock_user_struct(target_schp, arg3, 0);
10720             return get_errno(sched_setscheduler(arg1, arg2, &schp));
10721         }
10722     case TARGET_NR_sched_getscheduler:
10723         return get_errno(sched_getscheduler(arg1));
10724     case TARGET_NR_sched_yield:
10725         return get_errno(sched_yield());
10726     case TARGET_NR_sched_get_priority_max:
10727         return get_errno(sched_get_priority_max(arg1));
10728     case TARGET_NR_sched_get_priority_min:
10729         return get_errno(sched_get_priority_min(arg1));
10730 #ifdef TARGET_NR_sched_rr_get_interval
10731     case TARGET_NR_sched_rr_get_interval:
10732         {
10733             struct timespec ts;
10734             ret = get_errno(sched_rr_get_interval(arg1, &ts));
10735             if (!is_error(ret)) {
10736                 ret = host_to_target_timespec(arg2, &ts);
10737             }
10738         }
10739         return ret;
10740 #endif
10741 #ifdef TARGET_NR_sched_rr_get_interval_time64
10742     case TARGET_NR_sched_rr_get_interval_time64:
10743         {
10744             struct timespec ts;
10745             ret = get_errno(sched_rr_get_interval(arg1, &ts));
10746             if (!is_error(ret)) {
10747                 ret = host_to_target_timespec64(arg2, &ts);
10748             }
10749         }
10750         return ret;
10751 #endif
10752 #if defined(TARGET_NR_nanosleep)
10753     case TARGET_NR_nanosleep:
10754         {
10755             struct timespec req, rem;
10756             target_to_host_timespec(&req, arg1);
10757             ret = get_errno(safe_nanosleep(&req, &rem));
10758             if (is_error(ret) && arg2) {
10759                 host_to_target_timespec(arg2, &rem);
10760             }
10761         }
10762         return ret;
10763 #endif
10764     case TARGET_NR_prctl:
10765         switch (arg1) {
10766         case PR_GET_PDEATHSIG:
10767         {
10768             int deathsig;
10769             ret = get_errno(prctl(arg1, &deathsig, arg3, arg4, arg5));
10770             if (!is_error(ret) && arg2
10771                 && put_user_s32(deathsig, arg2)) {
10772                 return -TARGET_EFAULT;
10773             }
10774             return ret;
10775         }
10776 #ifdef PR_GET_NAME
10777         case PR_GET_NAME:
10778         {
10779             void *name = lock_user(VERIFY_WRITE, arg2, 16, 1);
10780             if (!name) {
10781                 return -TARGET_EFAULT;
10782             }
10783             ret = get_errno(prctl(arg1, (unsigned long)name,
10784                                   arg3, arg4, arg5));
10785             unlock_user(name, arg2, 16);
10786             return ret;
10787         }
10788         case PR_SET_NAME:
10789         {
10790             void *name = lock_user(VERIFY_READ, arg2, 16, 1);
10791             if (!name) {
10792                 return -TARGET_EFAULT;
10793             }
10794             ret = get_errno(prctl(arg1, (unsigned long)name,
10795                                   arg3, arg4, arg5));
10796             unlock_user(name, arg2, 0);
10797             return ret;
10798         }
10799 #endif
10800 #ifdef TARGET_MIPS
10801         case TARGET_PR_GET_FP_MODE:
10802         {
10803             CPUMIPSState *env = ((CPUMIPSState *)cpu_env);
10804             ret = 0;
10805             if (env->CP0_Status & (1 << CP0St_FR)) {
10806                 ret |= TARGET_PR_FP_MODE_FR;
10807             }
10808             if (env->CP0_Config5 & (1 << CP0C5_FRE)) {
10809                 ret |= TARGET_PR_FP_MODE_FRE;
10810             }
10811             return ret;
10812         }
10813         case TARGET_PR_SET_FP_MODE:
10814         {
10815             CPUMIPSState *env = ((CPUMIPSState *)cpu_env);
10816             bool old_fr = env->CP0_Status & (1 << CP0St_FR);
10817             bool old_fre = env->CP0_Config5 & (1 << CP0C5_FRE);
10818             bool new_fr = arg2 & TARGET_PR_FP_MODE_FR;
10819             bool new_fre = arg2 & TARGET_PR_FP_MODE_FRE;
10820 
10821             const unsigned int known_bits = TARGET_PR_FP_MODE_FR |
10822                                             TARGET_PR_FP_MODE_FRE;
10823 
10824             /* If nothing to change, return right away, successfully.  */
10825             if (old_fr == new_fr && old_fre == new_fre) {
10826                 return 0;
10827             }
10828             /* Check the value is valid */
10829             if (arg2 & ~known_bits) {
10830                 return -TARGET_EOPNOTSUPP;
10831             }
10832             /* Setting FRE without FR is not supported.  */
10833             if (new_fre && !new_fr) {
10834                 return -TARGET_EOPNOTSUPP;
10835             }
10836             if (new_fr && !(env->active_fpu.fcr0 & (1 << FCR0_F64))) {
10837                 /* FR1 is not supported */
10838                 return -TARGET_EOPNOTSUPP;
10839             }
10840             if (!new_fr && (env->active_fpu.fcr0 & (1 << FCR0_F64))
10841                 && !(env->CP0_Status_rw_bitmask & (1 << CP0St_FR))) {
10842                 /* cannot set FR=0 */
10843                 return -TARGET_EOPNOTSUPP;
10844             }
10845             if (new_fre && !(env->active_fpu.fcr0 & (1 << FCR0_FREP))) {
10846                 /* Cannot set FRE=1 */
10847                 return -TARGET_EOPNOTSUPP;
10848             }
10849 
10850             int i;
10851             fpr_t *fpr = env->active_fpu.fpr;
10852             for (i = 0; i < 32 ; i += 2) {
10853                 if (!old_fr && new_fr) {
10854                     fpr[i].w[!FP_ENDIAN_IDX] = fpr[i + 1].w[FP_ENDIAN_IDX];
10855                 } else if (old_fr && !new_fr) {
10856                     fpr[i + 1].w[FP_ENDIAN_IDX] = fpr[i].w[!FP_ENDIAN_IDX];
10857                 }
10858             }
10859 
10860             if (new_fr) {
10861                 env->CP0_Status |= (1 << CP0St_FR);
10862                 env->hflags |= MIPS_HFLAG_F64;
10863             } else {
10864                 env->CP0_Status &= ~(1 << CP0St_FR);
10865                 env->hflags &= ~MIPS_HFLAG_F64;
10866             }
10867             if (new_fre) {
10868                 env->CP0_Config5 |= (1 << CP0C5_FRE);
10869                 if (env->active_fpu.fcr0 & (1 << FCR0_FREP)) {
10870                     env->hflags |= MIPS_HFLAG_FRE;
10871                 }
10872             } else {
10873                 env->CP0_Config5 &= ~(1 << CP0C5_FRE);
10874                 env->hflags &= ~MIPS_HFLAG_FRE;
10875             }
10876 
10877             return 0;
10878         }
10879 #endif /* MIPS */
10880 #ifdef TARGET_AARCH64
10881         case TARGET_PR_SVE_SET_VL:
10882             /*
10883              * We cannot support either PR_SVE_SET_VL_ONEXEC or
10884              * PR_SVE_VL_INHERIT.  Note the kernel definition
10885              * of sve_vl_valid allows for VQ=512, i.e. VL=8192,
10886              * even though the current architectural maximum is VQ=16.
10887              */
10888             ret = -TARGET_EINVAL;
10889             if (cpu_isar_feature(aa64_sve, env_archcpu(cpu_env))
10890                 && arg2 >= 0 && arg2 <= 512 * 16 && !(arg2 & 15)) {
10891                 CPUARMState *env = cpu_env;
10892                 ARMCPU *cpu = env_archcpu(env);
10893                 uint32_t vq, old_vq;
10894 
10895                 old_vq = (env->vfp.zcr_el[1] & 0xf) + 1;
10896                 vq = MAX(arg2 / 16, 1);
10897                 vq = MIN(vq, cpu->sve_max_vq);
10898 
10899                 if (vq < old_vq) {
10900                     aarch64_sve_narrow_vq(env, vq);
10901                 }
10902                 env->vfp.zcr_el[1] = vq - 1;
10903                 arm_rebuild_hflags(env);
10904                 ret = vq * 16;
10905             }
10906             return ret;
10907         case TARGET_PR_SVE_GET_VL:
10908             ret = -TARGET_EINVAL;
10909             {
10910                 ARMCPU *cpu = env_archcpu(cpu_env);
10911                 if (cpu_isar_feature(aa64_sve, cpu)) {
10912                     ret = ((cpu->env.vfp.zcr_el[1] & 0xf) + 1) * 16;
10913                 }
10914             }
10915             return ret;
10916         case TARGET_PR_PAC_RESET_KEYS:
10917             {
10918                 CPUARMState *env = cpu_env;
10919                 ARMCPU *cpu = env_archcpu(env);
10920 
10921                 if (arg3 || arg4 || arg5) {
10922                     return -TARGET_EINVAL;
10923                 }
10924                 if (cpu_isar_feature(aa64_pauth, cpu)) {
10925                     int all = (TARGET_PR_PAC_APIAKEY | TARGET_PR_PAC_APIBKEY |
10926                                TARGET_PR_PAC_APDAKEY | TARGET_PR_PAC_APDBKEY |
10927                                TARGET_PR_PAC_APGAKEY);
10928                     int ret = 0;
10929                     Error *err = NULL;
10930 
10931                     if (arg2 == 0) {
10932                         arg2 = all;
10933                     } else if (arg2 & ~all) {
10934                         return -TARGET_EINVAL;
10935                     }
10936                     if (arg2 & TARGET_PR_PAC_APIAKEY) {
10937                         ret |= qemu_guest_getrandom(&env->keys.apia,
10938                                                     sizeof(ARMPACKey), &err);
10939                     }
10940                     if (arg2 & TARGET_PR_PAC_APIBKEY) {
10941                         ret |= qemu_guest_getrandom(&env->keys.apib,
10942                                                     sizeof(ARMPACKey), &err);
10943                     }
10944                     if (arg2 & TARGET_PR_PAC_APDAKEY) {
10945                         ret |= qemu_guest_getrandom(&env->keys.apda,
10946                                                     sizeof(ARMPACKey), &err);
10947                     }
10948                     if (arg2 & TARGET_PR_PAC_APDBKEY) {
10949                         ret |= qemu_guest_getrandom(&env->keys.apdb,
10950                                                     sizeof(ARMPACKey), &err);
10951                     }
10952                     if (arg2 & TARGET_PR_PAC_APGAKEY) {
10953                         ret |= qemu_guest_getrandom(&env->keys.apga,
10954                                                     sizeof(ARMPACKey), &err);
10955                     }
10956                     if (ret != 0) {
10957                         /*
10958                          * Some unknown failure in the crypto.  The best
10959                          * we can do is log it and fail the syscall.
10960                          * The real syscall cannot fail this way.
10961                          */
10962                         qemu_log_mask(LOG_UNIMP,
10963                                       "PR_PAC_RESET_KEYS: Crypto failure: %s",
10964                                       error_get_pretty(err));
10965                         error_free(err);
10966                         return -TARGET_EIO;
10967                     }
10968                     return 0;
10969                 }
10970             }
10971             return -TARGET_EINVAL;
10972 #endif /* AARCH64 */
10973         case PR_GET_SECCOMP:
10974         case PR_SET_SECCOMP:
10975             /* Disable seccomp to prevent the target disabling syscalls we
10976              * need. */
10977             return -TARGET_EINVAL;
10978         default:
10979             /* Most prctl options have no pointer arguments */
10980             return get_errno(prctl(arg1, arg2, arg3, arg4, arg5));
10981         }
10982         break;
10983 #ifdef TARGET_NR_arch_prctl
10984     case TARGET_NR_arch_prctl:
10985         return do_arch_prctl(cpu_env, arg1, arg2);
10986 #endif
10987 #ifdef TARGET_NR_pread64
10988     case TARGET_NR_pread64:
10989         if (regpairs_aligned(cpu_env, num)) {
10990             arg4 = arg5;
10991             arg5 = arg6;
10992         }
10993         if (arg2 == 0 && arg3 == 0) {
10994             /* Special-case NULL buffer and zero length, which should succeed */
10995             p = 0;
10996         } else {
10997             p = lock_user(VERIFY_WRITE, arg2, arg3, 0);
10998             if (!p) {
10999                 return -TARGET_EFAULT;
11000             }
11001         }
11002         ret = get_errno(pread64(arg1, p, arg3, target_offset64(arg4, arg5)));
11003         unlock_user(p, arg2, ret);
11004         return ret;
11005     case TARGET_NR_pwrite64:
11006         if (regpairs_aligned(cpu_env, num)) {
11007             arg4 = arg5;
11008             arg5 = arg6;
11009         }
11010         if (arg2 == 0 && arg3 == 0) {
11011             /* Special-case NULL buffer and zero length, which should succeed */
11012             p = 0;
11013         } else {
11014             p = lock_user(VERIFY_READ, arg2, arg3, 1);
11015             if (!p) {
11016                 return -TARGET_EFAULT;
11017             }
11018         }
11019         ret = get_errno(pwrite64(arg1, p, arg3, target_offset64(arg4, arg5)));
11020         unlock_user(p, arg2, 0);
11021         return ret;
11022 #endif
11023     case TARGET_NR_getcwd:
11024         if (!(p = lock_user(VERIFY_WRITE, arg1, arg2, 0)))
11025             return -TARGET_EFAULT;
11026         ret = get_errno(sys_getcwd1(p, arg2));
11027         unlock_user(p, arg1, ret);
11028         return ret;
11029     case TARGET_NR_capget:
11030     case TARGET_NR_capset:
11031     {
11032         struct target_user_cap_header *target_header;
11033         struct target_user_cap_data *target_data = NULL;
11034         struct __user_cap_header_struct header;
11035         struct __user_cap_data_struct data[2];
11036         struct __user_cap_data_struct *dataptr = NULL;
11037         int i, target_datalen;
11038         int data_items = 1;
11039 
11040         if (!lock_user_struct(VERIFY_WRITE, target_header, arg1, 1)) {
11041             return -TARGET_EFAULT;
11042         }
11043         header.version = tswap32(target_header->version);
11044         header.pid = tswap32(target_header->pid);
11045 
11046         if (header.version != _LINUX_CAPABILITY_VERSION) {
11047             /* Version 2 and up takes pointer to two user_data structs */
11048             data_items = 2;
11049         }
11050 
11051         target_datalen = sizeof(*target_data) * data_items;
11052 
11053         if (arg2) {
11054             if (num == TARGET_NR_capget) {
11055                 target_data = lock_user(VERIFY_WRITE, arg2, target_datalen, 0);
11056             } else {
11057                 target_data = lock_user(VERIFY_READ, arg2, target_datalen, 1);
11058             }
11059             if (!target_data) {
11060                 unlock_user_struct(target_header, arg1, 0);
11061                 return -TARGET_EFAULT;
11062             }
11063 
11064             if (num == TARGET_NR_capset) {
11065                 for (i = 0; i < data_items; i++) {
11066                     data[i].effective = tswap32(target_data[i].effective);
11067                     data[i].permitted = tswap32(target_data[i].permitted);
11068                     data[i].inheritable = tswap32(target_data[i].inheritable);
11069                 }
11070             }
11071 
11072             dataptr = data;
11073         }
11074 
11075         if (num == TARGET_NR_capget) {
11076             ret = get_errno(capget(&header, dataptr));
11077         } else {
11078             ret = get_errno(capset(&header, dataptr));
11079         }
11080 
11081         /* The kernel always updates version for both capget and capset */
11082         target_header->version = tswap32(header.version);
11083         unlock_user_struct(target_header, arg1, 1);
11084 
11085         if (arg2) {
11086             if (num == TARGET_NR_capget) {
11087                 for (i = 0; i < data_items; i++) {
11088                     target_data[i].effective = tswap32(data[i].effective);
11089                     target_data[i].permitted = tswap32(data[i].permitted);
11090                     target_data[i].inheritable = tswap32(data[i].inheritable);
11091                 }
11092                 unlock_user(target_data, arg2, target_datalen);
11093             } else {
11094                 unlock_user(target_data, arg2, 0);
11095             }
11096         }
11097         return ret;
11098     }
11099     case TARGET_NR_sigaltstack:
11100         return do_sigaltstack(arg1, arg2,
11101                               get_sp_from_cpustate((CPUArchState *)cpu_env));
11102 
11103 #ifdef CONFIG_SENDFILE
11104 #ifdef TARGET_NR_sendfile
11105     case TARGET_NR_sendfile:
11106     {
11107         off_t *offp = NULL;
11108         off_t off;
11109         if (arg3) {
11110             ret = get_user_sal(off, arg3);
11111             if (is_error(ret)) {
11112                 return ret;
11113             }
11114             offp = &off;
11115         }
11116         ret = get_errno(sendfile(arg1, arg2, offp, arg4));
11117         if (!is_error(ret) && arg3) {
11118             abi_long ret2 = put_user_sal(off, arg3);
11119             if (is_error(ret2)) {
11120                 ret = ret2;
11121             }
11122         }
11123         return ret;
11124     }
11125 #endif
11126 #ifdef TARGET_NR_sendfile64
11127     case TARGET_NR_sendfile64:
11128     {
11129         off_t *offp = NULL;
11130         off_t off;
11131         if (arg3) {
11132             ret = get_user_s64(off, arg3);
11133             if (is_error(ret)) {
11134                 return ret;
11135             }
11136             offp = &off;
11137         }
11138         ret = get_errno(sendfile(arg1, arg2, offp, arg4));
11139         if (!is_error(ret) && arg3) {
11140             abi_long ret2 = put_user_s64(off, arg3);
11141             if (is_error(ret2)) {
11142                 ret = ret2;
11143             }
11144         }
11145         return ret;
11146     }
11147 #endif
11148 #endif
11149 #ifdef TARGET_NR_vfork
11150     case TARGET_NR_vfork:
11151         return get_errno(do_fork(cpu_env,
11152                          CLONE_VFORK | CLONE_VM | TARGET_SIGCHLD,
11153                          0, 0, 0, 0));
11154 #endif
11155 #ifdef TARGET_NR_ugetrlimit
11156     case TARGET_NR_ugetrlimit:
11157     {
11158 	struct rlimit rlim;
11159 	int resource = target_to_host_resource(arg1);
11160 	ret = get_errno(getrlimit(resource, &rlim));
11161 	if (!is_error(ret)) {
11162 	    struct target_rlimit *target_rlim;
11163             if (!lock_user_struct(VERIFY_WRITE, target_rlim, arg2, 0))
11164                 return -TARGET_EFAULT;
11165 	    target_rlim->rlim_cur = host_to_target_rlim(rlim.rlim_cur);
11166 	    target_rlim->rlim_max = host_to_target_rlim(rlim.rlim_max);
11167             unlock_user_struct(target_rlim, arg2, 1);
11168 	}
11169         return ret;
11170     }
11171 #endif
11172 #ifdef TARGET_NR_truncate64
11173     case TARGET_NR_truncate64:
11174         if (!(p = lock_user_string(arg1)))
11175             return -TARGET_EFAULT;
11176 	ret = target_truncate64(cpu_env, p, arg2, arg3, arg4);
11177         unlock_user(p, arg1, 0);
11178         return ret;
11179 #endif
11180 #ifdef TARGET_NR_ftruncate64
11181     case TARGET_NR_ftruncate64:
11182         return target_ftruncate64(cpu_env, arg1, arg2, arg3, arg4);
11183 #endif
11184 #ifdef TARGET_NR_stat64
11185     case TARGET_NR_stat64:
11186         if (!(p = lock_user_string(arg1))) {
11187             return -TARGET_EFAULT;
11188         }
11189         ret = get_errno(stat(path(p), &st));
11190         unlock_user(p, arg1, 0);
11191         if (!is_error(ret))
11192             ret = host_to_target_stat64(cpu_env, arg2, &st);
11193         return ret;
11194 #endif
11195 #ifdef TARGET_NR_lstat64
11196     case TARGET_NR_lstat64:
11197         if (!(p = lock_user_string(arg1))) {
11198             return -TARGET_EFAULT;
11199         }
11200         ret = get_errno(lstat(path(p), &st));
11201         unlock_user(p, arg1, 0);
11202         if (!is_error(ret))
11203             ret = host_to_target_stat64(cpu_env, arg2, &st);
11204         return ret;
11205 #endif
11206 #ifdef TARGET_NR_fstat64
11207     case TARGET_NR_fstat64:
11208         ret = get_errno(fstat(arg1, &st));
11209         if (!is_error(ret))
11210             ret = host_to_target_stat64(cpu_env, arg2, &st);
11211         return ret;
11212 #endif
11213 #if (defined(TARGET_NR_fstatat64) || defined(TARGET_NR_newfstatat))
11214 #ifdef TARGET_NR_fstatat64
11215     case TARGET_NR_fstatat64:
11216 #endif
11217 #ifdef TARGET_NR_newfstatat
11218     case TARGET_NR_newfstatat:
11219 #endif
11220         if (!(p = lock_user_string(arg2))) {
11221             return -TARGET_EFAULT;
11222         }
11223         ret = get_errno(fstatat(arg1, path(p), &st, arg4));
11224         unlock_user(p, arg2, 0);
11225         if (!is_error(ret))
11226             ret = host_to_target_stat64(cpu_env, arg3, &st);
11227         return ret;
11228 #endif
11229 #if defined(TARGET_NR_statx)
11230     case TARGET_NR_statx:
11231         {
11232             struct target_statx *target_stx;
11233             int dirfd = arg1;
11234             int flags = arg3;
11235 
11236             p = lock_user_string(arg2);
11237             if (p == NULL) {
11238                 return -TARGET_EFAULT;
11239             }
11240 #if defined(__NR_statx)
11241             {
11242                 /*
11243                  * It is assumed that struct statx is architecture independent.
11244                  */
11245                 struct target_statx host_stx;
11246                 int mask = arg4;
11247 
11248                 ret = get_errno(sys_statx(dirfd, p, flags, mask, &host_stx));
11249                 if (!is_error(ret)) {
11250                     if (host_to_target_statx(&host_stx, arg5) != 0) {
11251                         unlock_user(p, arg2, 0);
11252                         return -TARGET_EFAULT;
11253                     }
11254                 }
11255 
11256                 if (ret != -TARGET_ENOSYS) {
11257                     unlock_user(p, arg2, 0);
11258                     return ret;
11259                 }
11260             }
11261 #endif
11262             ret = get_errno(fstatat(dirfd, path(p), &st, flags));
11263             unlock_user(p, arg2, 0);
11264 
11265             if (!is_error(ret)) {
11266                 if (!lock_user_struct(VERIFY_WRITE, target_stx, arg5, 0)) {
11267                     return -TARGET_EFAULT;
11268                 }
11269                 memset(target_stx, 0, sizeof(*target_stx));
11270                 __put_user(major(st.st_dev), &target_stx->stx_dev_major);
11271                 __put_user(minor(st.st_dev), &target_stx->stx_dev_minor);
11272                 __put_user(st.st_ino, &target_stx->stx_ino);
11273                 __put_user(st.st_mode, &target_stx->stx_mode);
11274                 __put_user(st.st_uid, &target_stx->stx_uid);
11275                 __put_user(st.st_gid, &target_stx->stx_gid);
11276                 __put_user(st.st_nlink, &target_stx->stx_nlink);
11277                 __put_user(major(st.st_rdev), &target_stx->stx_rdev_major);
11278                 __put_user(minor(st.st_rdev), &target_stx->stx_rdev_minor);
11279                 __put_user(st.st_size, &target_stx->stx_size);
11280                 __put_user(st.st_blksize, &target_stx->stx_blksize);
11281                 __put_user(st.st_blocks, &target_stx->stx_blocks);
11282                 __put_user(st.st_atime, &target_stx->stx_atime.tv_sec);
11283                 __put_user(st.st_mtime, &target_stx->stx_mtime.tv_sec);
11284                 __put_user(st.st_ctime, &target_stx->stx_ctime.tv_sec);
11285                 unlock_user_struct(target_stx, arg5, 1);
11286             }
11287         }
11288         return ret;
11289 #endif
11290 #ifdef TARGET_NR_lchown
11291     case TARGET_NR_lchown:
11292         if (!(p = lock_user_string(arg1)))
11293             return -TARGET_EFAULT;
11294         ret = get_errno(lchown(p, low2highuid(arg2), low2highgid(arg3)));
11295         unlock_user(p, arg1, 0);
11296         return ret;
11297 #endif
11298 #ifdef TARGET_NR_getuid
11299     case TARGET_NR_getuid:
11300         return get_errno(high2lowuid(getuid()));
11301 #endif
11302 #ifdef TARGET_NR_getgid
11303     case TARGET_NR_getgid:
11304         return get_errno(high2lowgid(getgid()));
11305 #endif
11306 #ifdef TARGET_NR_geteuid
11307     case TARGET_NR_geteuid:
11308         return get_errno(high2lowuid(geteuid()));
11309 #endif
11310 #ifdef TARGET_NR_getegid
11311     case TARGET_NR_getegid:
11312         return get_errno(high2lowgid(getegid()));
11313 #endif
11314     case TARGET_NR_setreuid:
11315         return get_errno(setreuid(low2highuid(arg1), low2highuid(arg2)));
11316     case TARGET_NR_setregid:
11317         return get_errno(setregid(low2highgid(arg1), low2highgid(arg2)));
11318     case TARGET_NR_getgroups:
11319         {
11320             int gidsetsize = arg1;
11321             target_id *target_grouplist;
11322             gid_t *grouplist;
11323             int i;
11324 
11325             grouplist = alloca(gidsetsize * sizeof(gid_t));
11326             ret = get_errno(getgroups(gidsetsize, grouplist));
11327             if (gidsetsize == 0)
11328                 return ret;
11329             if (!is_error(ret)) {
11330                 target_grouplist = lock_user(VERIFY_WRITE, arg2, gidsetsize * sizeof(target_id), 0);
11331                 if (!target_grouplist)
11332                     return -TARGET_EFAULT;
11333                 for(i = 0;i < ret; i++)
11334                     target_grouplist[i] = tswapid(high2lowgid(grouplist[i]));
11335                 unlock_user(target_grouplist, arg2, gidsetsize * sizeof(target_id));
11336             }
11337         }
11338         return ret;
11339     case TARGET_NR_setgroups:
11340         {
11341             int gidsetsize = arg1;
11342             target_id *target_grouplist;
11343             gid_t *grouplist = NULL;
11344             int i;
11345             if (gidsetsize) {
11346                 grouplist = alloca(gidsetsize * sizeof(gid_t));
11347                 target_grouplist = lock_user(VERIFY_READ, arg2, gidsetsize * sizeof(target_id), 1);
11348                 if (!target_grouplist) {
11349                     return -TARGET_EFAULT;
11350                 }
11351                 for (i = 0; i < gidsetsize; i++) {
11352                     grouplist[i] = low2highgid(tswapid(target_grouplist[i]));
11353                 }
11354                 unlock_user(target_grouplist, arg2, 0);
11355             }
11356             return get_errno(setgroups(gidsetsize, grouplist));
11357         }
11358     case TARGET_NR_fchown:
11359         return get_errno(fchown(arg1, low2highuid(arg2), low2highgid(arg3)));
11360 #if defined(TARGET_NR_fchownat)
11361     case TARGET_NR_fchownat:
11362         if (!(p = lock_user_string(arg2)))
11363             return -TARGET_EFAULT;
11364         ret = get_errno(fchownat(arg1, p, low2highuid(arg3),
11365                                  low2highgid(arg4), arg5));
11366         unlock_user(p, arg2, 0);
11367         return ret;
11368 #endif
11369 #ifdef TARGET_NR_setresuid
11370     case TARGET_NR_setresuid:
11371         return get_errno(sys_setresuid(low2highuid(arg1),
11372                                        low2highuid(arg2),
11373                                        low2highuid(arg3)));
11374 #endif
11375 #ifdef TARGET_NR_getresuid
11376     case TARGET_NR_getresuid:
11377         {
11378             uid_t ruid, euid, suid;
11379             ret = get_errno(getresuid(&ruid, &euid, &suid));
11380             if (!is_error(ret)) {
11381                 if (put_user_id(high2lowuid(ruid), arg1)
11382                     || put_user_id(high2lowuid(euid), arg2)
11383                     || put_user_id(high2lowuid(suid), arg3))
11384                     return -TARGET_EFAULT;
11385             }
11386         }
11387         return ret;
11388 #endif
11389 #ifdef TARGET_NR_getresgid
11390     case TARGET_NR_setresgid:
11391         return get_errno(sys_setresgid(low2highgid(arg1),
11392                                        low2highgid(arg2),
11393                                        low2highgid(arg3)));
11394 #endif
11395 #ifdef TARGET_NR_getresgid
11396     case TARGET_NR_getresgid:
11397         {
11398             gid_t rgid, egid, sgid;
11399             ret = get_errno(getresgid(&rgid, &egid, &sgid));
11400             if (!is_error(ret)) {
11401                 if (put_user_id(high2lowgid(rgid), arg1)
11402                     || put_user_id(high2lowgid(egid), arg2)
11403                     || put_user_id(high2lowgid(sgid), arg3))
11404                     return -TARGET_EFAULT;
11405             }
11406         }
11407         return ret;
11408 #endif
11409 #ifdef TARGET_NR_chown
11410     case TARGET_NR_chown:
11411         if (!(p = lock_user_string(arg1)))
11412             return -TARGET_EFAULT;
11413         ret = get_errno(chown(p, low2highuid(arg2), low2highgid(arg3)));
11414         unlock_user(p, arg1, 0);
11415         return ret;
11416 #endif
11417     case TARGET_NR_setuid:
11418         return get_errno(sys_setuid(low2highuid(arg1)));
11419     case TARGET_NR_setgid:
11420         return get_errno(sys_setgid(low2highgid(arg1)));
11421     case TARGET_NR_setfsuid:
11422         return get_errno(setfsuid(arg1));
11423     case TARGET_NR_setfsgid:
11424         return get_errno(setfsgid(arg1));
11425 
11426 #ifdef TARGET_NR_lchown32
11427     case TARGET_NR_lchown32:
11428         if (!(p = lock_user_string(arg1)))
11429             return -TARGET_EFAULT;
11430         ret = get_errno(lchown(p, arg2, arg3));
11431         unlock_user(p, arg1, 0);
11432         return ret;
11433 #endif
11434 #ifdef TARGET_NR_getuid32
11435     case TARGET_NR_getuid32:
11436         return get_errno(getuid());
11437 #endif
11438 
11439 #if defined(TARGET_NR_getxuid) && defined(TARGET_ALPHA)
11440    /* Alpha specific */
11441     case TARGET_NR_getxuid:
11442          {
11443             uid_t euid;
11444             euid=geteuid();
11445             ((CPUAlphaState *)cpu_env)->ir[IR_A4]=euid;
11446          }
11447         return get_errno(getuid());
11448 #endif
11449 #if defined(TARGET_NR_getxgid) && defined(TARGET_ALPHA)
11450    /* Alpha specific */
11451     case TARGET_NR_getxgid:
11452          {
11453             uid_t egid;
11454             egid=getegid();
11455             ((CPUAlphaState *)cpu_env)->ir[IR_A4]=egid;
11456          }
11457         return get_errno(getgid());
11458 #endif
11459 #if defined(TARGET_NR_osf_getsysinfo) && defined(TARGET_ALPHA)
11460     /* Alpha specific */
11461     case TARGET_NR_osf_getsysinfo:
11462         ret = -TARGET_EOPNOTSUPP;
11463         switch (arg1) {
11464           case TARGET_GSI_IEEE_FP_CONTROL:
11465             {
11466                 uint64_t fpcr = cpu_alpha_load_fpcr(cpu_env);
11467                 uint64_t swcr = ((CPUAlphaState *)cpu_env)->swcr;
11468 
11469                 swcr &= ~SWCR_STATUS_MASK;
11470                 swcr |= (fpcr >> 35) & SWCR_STATUS_MASK;
11471 
11472                 if (put_user_u64 (swcr, arg2))
11473                         return -TARGET_EFAULT;
11474                 ret = 0;
11475             }
11476             break;
11477 
11478           /* case GSI_IEEE_STATE_AT_SIGNAL:
11479              -- Not implemented in linux kernel.
11480              case GSI_UACPROC:
11481              -- Retrieves current unaligned access state; not much used.
11482              case GSI_PROC_TYPE:
11483              -- Retrieves implver information; surely not used.
11484              case GSI_GET_HWRPB:
11485              -- Grabs a copy of the HWRPB; surely not used.
11486           */
11487         }
11488         return ret;
11489 #endif
11490 #if defined(TARGET_NR_osf_setsysinfo) && defined(TARGET_ALPHA)
11491     /* Alpha specific */
11492     case TARGET_NR_osf_setsysinfo:
11493         ret = -TARGET_EOPNOTSUPP;
11494         switch (arg1) {
11495           case TARGET_SSI_IEEE_FP_CONTROL:
11496             {
11497                 uint64_t swcr, fpcr;
11498 
11499                 if (get_user_u64 (swcr, arg2)) {
11500                     return -TARGET_EFAULT;
11501                 }
11502 
11503                 /*
11504                  * The kernel calls swcr_update_status to update the
11505                  * status bits from the fpcr at every point that it
11506                  * could be queried.  Therefore, we store the status
11507                  * bits only in FPCR.
11508                  */
11509                 ((CPUAlphaState *)cpu_env)->swcr
11510                     = swcr & (SWCR_TRAP_ENABLE_MASK | SWCR_MAP_MASK);
11511 
11512                 fpcr = cpu_alpha_load_fpcr(cpu_env);
11513                 fpcr &= ((uint64_t)FPCR_DYN_MASK << 32);
11514                 fpcr |= alpha_ieee_swcr_to_fpcr(swcr);
11515                 cpu_alpha_store_fpcr(cpu_env, fpcr);
11516                 ret = 0;
11517             }
11518             break;
11519 
11520           case TARGET_SSI_IEEE_RAISE_EXCEPTION:
11521             {
11522                 uint64_t exc, fpcr, fex;
11523 
11524                 if (get_user_u64(exc, arg2)) {
11525                     return -TARGET_EFAULT;
11526                 }
11527                 exc &= SWCR_STATUS_MASK;
11528                 fpcr = cpu_alpha_load_fpcr(cpu_env);
11529 
11530                 /* Old exceptions are not signaled.  */
11531                 fex = alpha_ieee_fpcr_to_swcr(fpcr);
11532                 fex = exc & ~fex;
11533                 fex >>= SWCR_STATUS_TO_EXCSUM_SHIFT;
11534                 fex &= ((CPUArchState *)cpu_env)->swcr;
11535 
11536                 /* Update the hardware fpcr.  */
11537                 fpcr |= alpha_ieee_swcr_to_fpcr(exc);
11538                 cpu_alpha_store_fpcr(cpu_env, fpcr);
11539 
11540                 if (fex) {
11541                     int si_code = TARGET_FPE_FLTUNK;
11542                     target_siginfo_t info;
11543 
11544                     if (fex & SWCR_TRAP_ENABLE_DNO) {
11545                         si_code = TARGET_FPE_FLTUND;
11546                     }
11547                     if (fex & SWCR_TRAP_ENABLE_INE) {
11548                         si_code = TARGET_FPE_FLTRES;
11549                     }
11550                     if (fex & SWCR_TRAP_ENABLE_UNF) {
11551                         si_code = TARGET_FPE_FLTUND;
11552                     }
11553                     if (fex & SWCR_TRAP_ENABLE_OVF) {
11554                         si_code = TARGET_FPE_FLTOVF;
11555                     }
11556                     if (fex & SWCR_TRAP_ENABLE_DZE) {
11557                         si_code = TARGET_FPE_FLTDIV;
11558                     }
11559                     if (fex & SWCR_TRAP_ENABLE_INV) {
11560                         si_code = TARGET_FPE_FLTINV;
11561                     }
11562 
11563                     info.si_signo = SIGFPE;
11564                     info.si_errno = 0;
11565                     info.si_code = si_code;
11566                     info._sifields._sigfault._addr
11567                         = ((CPUArchState *)cpu_env)->pc;
11568                     queue_signal((CPUArchState *)cpu_env, info.si_signo,
11569                                  QEMU_SI_FAULT, &info);
11570                 }
11571                 ret = 0;
11572             }
11573             break;
11574 
11575           /* case SSI_NVPAIRS:
11576              -- Used with SSIN_UACPROC to enable unaligned accesses.
11577              case SSI_IEEE_STATE_AT_SIGNAL:
11578              case SSI_IEEE_IGNORE_STATE_AT_SIGNAL:
11579              -- Not implemented in linux kernel
11580           */
11581         }
11582         return ret;
11583 #endif
11584 #ifdef TARGET_NR_osf_sigprocmask
11585     /* Alpha specific.  */
11586     case TARGET_NR_osf_sigprocmask:
11587         {
11588             abi_ulong mask;
11589             int how;
11590             sigset_t set, oldset;
11591 
11592             switch(arg1) {
11593             case TARGET_SIG_BLOCK:
11594                 how = SIG_BLOCK;
11595                 break;
11596             case TARGET_SIG_UNBLOCK:
11597                 how = SIG_UNBLOCK;
11598                 break;
11599             case TARGET_SIG_SETMASK:
11600                 how = SIG_SETMASK;
11601                 break;
11602             default:
11603                 return -TARGET_EINVAL;
11604             }
11605             mask = arg2;
11606             target_to_host_old_sigset(&set, &mask);
11607             ret = do_sigprocmask(how, &set, &oldset);
11608             if (!ret) {
11609                 host_to_target_old_sigset(&mask, &oldset);
11610                 ret = mask;
11611             }
11612         }
11613         return ret;
11614 #endif
11615 
11616 #ifdef TARGET_NR_getgid32
11617     case TARGET_NR_getgid32:
11618         return get_errno(getgid());
11619 #endif
11620 #ifdef TARGET_NR_geteuid32
11621     case TARGET_NR_geteuid32:
11622         return get_errno(geteuid());
11623 #endif
11624 #ifdef TARGET_NR_getegid32
11625     case TARGET_NR_getegid32:
11626         return get_errno(getegid());
11627 #endif
11628 #ifdef TARGET_NR_setreuid32
11629     case TARGET_NR_setreuid32:
11630         return get_errno(setreuid(arg1, arg2));
11631 #endif
11632 #ifdef TARGET_NR_setregid32
11633     case TARGET_NR_setregid32:
11634         return get_errno(setregid(arg1, arg2));
11635 #endif
11636 #ifdef TARGET_NR_getgroups32
11637     case TARGET_NR_getgroups32:
11638         {
11639             int gidsetsize = arg1;
11640             uint32_t *target_grouplist;
11641             gid_t *grouplist;
11642             int i;
11643 
11644             grouplist = alloca(gidsetsize * sizeof(gid_t));
11645             ret = get_errno(getgroups(gidsetsize, grouplist));
11646             if (gidsetsize == 0)
11647                 return ret;
11648             if (!is_error(ret)) {
11649                 target_grouplist = lock_user(VERIFY_WRITE, arg2, gidsetsize * 4, 0);
11650                 if (!target_grouplist) {
11651                     return -TARGET_EFAULT;
11652                 }
11653                 for(i = 0;i < ret; i++)
11654                     target_grouplist[i] = tswap32(grouplist[i]);
11655                 unlock_user(target_grouplist, arg2, gidsetsize * 4);
11656             }
11657         }
11658         return ret;
11659 #endif
11660 #ifdef TARGET_NR_setgroups32
11661     case TARGET_NR_setgroups32:
11662         {
11663             int gidsetsize = arg1;
11664             uint32_t *target_grouplist;
11665             gid_t *grouplist;
11666             int i;
11667 
11668             grouplist = alloca(gidsetsize * sizeof(gid_t));
11669             target_grouplist = lock_user(VERIFY_READ, arg2, gidsetsize * 4, 1);
11670             if (!target_grouplist) {
11671                 return -TARGET_EFAULT;
11672             }
11673             for(i = 0;i < gidsetsize; i++)
11674                 grouplist[i] = tswap32(target_grouplist[i]);
11675             unlock_user(target_grouplist, arg2, 0);
11676             return get_errno(setgroups(gidsetsize, grouplist));
11677         }
11678 #endif
11679 #ifdef TARGET_NR_fchown32
11680     case TARGET_NR_fchown32:
11681         return get_errno(fchown(arg1, arg2, arg3));
11682 #endif
11683 #ifdef TARGET_NR_setresuid32
11684     case TARGET_NR_setresuid32:
11685         return get_errno(sys_setresuid(arg1, arg2, arg3));
11686 #endif
11687 #ifdef TARGET_NR_getresuid32
11688     case TARGET_NR_getresuid32:
11689         {
11690             uid_t ruid, euid, suid;
11691             ret = get_errno(getresuid(&ruid, &euid, &suid));
11692             if (!is_error(ret)) {
11693                 if (put_user_u32(ruid, arg1)
11694                     || put_user_u32(euid, arg2)
11695                     || put_user_u32(suid, arg3))
11696                     return -TARGET_EFAULT;
11697             }
11698         }
11699         return ret;
11700 #endif
11701 #ifdef TARGET_NR_setresgid32
11702     case TARGET_NR_setresgid32:
11703         return get_errno(sys_setresgid(arg1, arg2, arg3));
11704 #endif
11705 #ifdef TARGET_NR_getresgid32
11706     case TARGET_NR_getresgid32:
11707         {
11708             gid_t rgid, egid, sgid;
11709             ret = get_errno(getresgid(&rgid, &egid, &sgid));
11710             if (!is_error(ret)) {
11711                 if (put_user_u32(rgid, arg1)
11712                     || put_user_u32(egid, arg2)
11713                     || put_user_u32(sgid, arg3))
11714                     return -TARGET_EFAULT;
11715             }
11716         }
11717         return ret;
11718 #endif
11719 #ifdef TARGET_NR_chown32
11720     case TARGET_NR_chown32:
11721         if (!(p = lock_user_string(arg1)))
11722             return -TARGET_EFAULT;
11723         ret = get_errno(chown(p, arg2, arg3));
11724         unlock_user(p, arg1, 0);
11725         return ret;
11726 #endif
11727 #ifdef TARGET_NR_setuid32
11728     case TARGET_NR_setuid32:
11729         return get_errno(sys_setuid(arg1));
11730 #endif
11731 #ifdef TARGET_NR_setgid32
11732     case TARGET_NR_setgid32:
11733         return get_errno(sys_setgid(arg1));
11734 #endif
11735 #ifdef TARGET_NR_setfsuid32
11736     case TARGET_NR_setfsuid32:
11737         return get_errno(setfsuid(arg1));
11738 #endif
11739 #ifdef TARGET_NR_setfsgid32
11740     case TARGET_NR_setfsgid32:
11741         return get_errno(setfsgid(arg1));
11742 #endif
11743 #ifdef TARGET_NR_mincore
11744     case TARGET_NR_mincore:
11745         {
11746             void *a = lock_user(VERIFY_READ, arg1, arg2, 0);
11747             if (!a) {
11748                 return -TARGET_ENOMEM;
11749             }
11750             p = lock_user_string(arg3);
11751             if (!p) {
11752                 ret = -TARGET_EFAULT;
11753             } else {
11754                 ret = get_errno(mincore(a, arg2, p));
11755                 unlock_user(p, arg3, ret);
11756             }
11757             unlock_user(a, arg1, 0);
11758         }
11759         return ret;
11760 #endif
11761 #ifdef TARGET_NR_arm_fadvise64_64
11762     case TARGET_NR_arm_fadvise64_64:
11763         /* arm_fadvise64_64 looks like fadvise64_64 but
11764          * with different argument order: fd, advice, offset, len
11765          * rather than the usual fd, offset, len, advice.
11766          * Note that offset and len are both 64-bit so appear as
11767          * pairs of 32-bit registers.
11768          */
11769         ret = posix_fadvise(arg1, target_offset64(arg3, arg4),
11770                             target_offset64(arg5, arg6), arg2);
11771         return -host_to_target_errno(ret);
11772 #endif
11773 
11774 #if TARGET_ABI_BITS == 32
11775 
11776 #ifdef TARGET_NR_fadvise64_64
11777     case TARGET_NR_fadvise64_64:
11778 #if defined(TARGET_PPC) || defined(TARGET_XTENSA)
11779         /* 6 args: fd, advice, offset (high, low), len (high, low) */
11780         ret = arg2;
11781         arg2 = arg3;
11782         arg3 = arg4;
11783         arg4 = arg5;
11784         arg5 = arg6;
11785         arg6 = ret;
11786 #else
11787         /* 6 args: fd, offset (high, low), len (high, low), advice */
11788         if (regpairs_aligned(cpu_env, num)) {
11789             /* offset is in (3,4), len in (5,6) and advice in 7 */
11790             arg2 = arg3;
11791             arg3 = arg4;
11792             arg4 = arg5;
11793             arg5 = arg6;
11794             arg6 = arg7;
11795         }
11796 #endif
11797         ret = posix_fadvise(arg1, target_offset64(arg2, arg3),
11798                             target_offset64(arg4, arg5), arg6);
11799         return -host_to_target_errno(ret);
11800 #endif
11801 
11802 #ifdef TARGET_NR_fadvise64
11803     case TARGET_NR_fadvise64:
11804         /* 5 args: fd, offset (high, low), len, advice */
11805         if (regpairs_aligned(cpu_env, num)) {
11806             /* offset is in (3,4), len in 5 and advice in 6 */
11807             arg2 = arg3;
11808             arg3 = arg4;
11809             arg4 = arg5;
11810             arg5 = arg6;
11811         }
11812         ret = posix_fadvise(arg1, target_offset64(arg2, arg3), arg4, arg5);
11813         return -host_to_target_errno(ret);
11814 #endif
11815 
11816 #else /* not a 32-bit ABI */
11817 #if defined(TARGET_NR_fadvise64_64) || defined(TARGET_NR_fadvise64)
11818 #ifdef TARGET_NR_fadvise64_64
11819     case TARGET_NR_fadvise64_64:
11820 #endif
11821 #ifdef TARGET_NR_fadvise64
11822     case TARGET_NR_fadvise64:
11823 #endif
11824 #ifdef TARGET_S390X
11825         switch (arg4) {
11826         case 4: arg4 = POSIX_FADV_NOREUSE + 1; break; /* make sure it's an invalid value */
11827         case 5: arg4 = POSIX_FADV_NOREUSE + 2; break; /* ditto */
11828         case 6: arg4 = POSIX_FADV_DONTNEED; break;
11829         case 7: arg4 = POSIX_FADV_NOREUSE; break;
11830         default: break;
11831         }
11832 #endif
11833         return -host_to_target_errno(posix_fadvise(arg1, arg2, arg3, arg4));
11834 #endif
11835 #endif /* end of 64-bit ABI fadvise handling */
11836 
11837 #ifdef TARGET_NR_madvise
11838     case TARGET_NR_madvise:
11839         /* A straight passthrough may not be safe because qemu sometimes
11840            turns private file-backed mappings into anonymous mappings.
11841            This will break MADV_DONTNEED.
11842            This is a hint, so ignoring and returning success is ok.  */
11843         return 0;
11844 #endif
11845 #ifdef TARGET_NR_fcntl64
11846     case TARGET_NR_fcntl64:
11847     {
11848         int cmd;
11849         struct flock64 fl;
11850         from_flock64_fn *copyfrom = copy_from_user_flock64;
11851         to_flock64_fn *copyto = copy_to_user_flock64;
11852 
11853 #ifdef TARGET_ARM
11854         if (!((CPUARMState *)cpu_env)->eabi) {
11855             copyfrom = copy_from_user_oabi_flock64;
11856             copyto = copy_to_user_oabi_flock64;
11857         }
11858 #endif
11859 
11860         cmd = target_to_host_fcntl_cmd(arg2);
11861         if (cmd == -TARGET_EINVAL) {
11862             return cmd;
11863         }
11864 
11865         switch(arg2) {
11866         case TARGET_F_GETLK64:
11867             ret = copyfrom(&fl, arg3);
11868             if (ret) {
11869                 break;
11870             }
11871             ret = get_errno(safe_fcntl(arg1, cmd, &fl));
11872             if (ret == 0) {
11873                 ret = copyto(arg3, &fl);
11874             }
11875 	    break;
11876 
11877         case TARGET_F_SETLK64:
11878         case TARGET_F_SETLKW64:
11879             ret = copyfrom(&fl, arg3);
11880             if (ret) {
11881                 break;
11882             }
11883             ret = get_errno(safe_fcntl(arg1, cmd, &fl));
11884 	    break;
11885         default:
11886             ret = do_fcntl(arg1, arg2, arg3);
11887             break;
11888         }
11889         return ret;
11890     }
11891 #endif
11892 #ifdef TARGET_NR_cacheflush
11893     case TARGET_NR_cacheflush:
11894         /* self-modifying code is handled automatically, so nothing needed */
11895         return 0;
11896 #endif
11897 #ifdef TARGET_NR_getpagesize
11898     case TARGET_NR_getpagesize:
11899         return TARGET_PAGE_SIZE;
11900 #endif
11901     case TARGET_NR_gettid:
11902         return get_errno(sys_gettid());
11903 #ifdef TARGET_NR_readahead
11904     case TARGET_NR_readahead:
11905 #if TARGET_ABI_BITS == 32
11906         if (regpairs_aligned(cpu_env, num)) {
11907             arg2 = arg3;
11908             arg3 = arg4;
11909             arg4 = arg5;
11910         }
11911         ret = get_errno(readahead(arg1, target_offset64(arg2, arg3) , arg4));
11912 #else
11913         ret = get_errno(readahead(arg1, arg2, arg3));
11914 #endif
11915         return ret;
11916 #endif
11917 #ifdef CONFIG_ATTR
11918 #ifdef TARGET_NR_setxattr
11919     case TARGET_NR_listxattr:
11920     case TARGET_NR_llistxattr:
11921     {
11922         void *p, *b = 0;
11923         if (arg2) {
11924             b = lock_user(VERIFY_WRITE, arg2, arg3, 0);
11925             if (!b) {
11926                 return -TARGET_EFAULT;
11927             }
11928         }
11929         p = lock_user_string(arg1);
11930         if (p) {
11931             if (num == TARGET_NR_listxattr) {
11932                 ret = get_errno(listxattr(p, b, arg3));
11933             } else {
11934                 ret = get_errno(llistxattr(p, b, arg3));
11935             }
11936         } else {
11937             ret = -TARGET_EFAULT;
11938         }
11939         unlock_user(p, arg1, 0);
11940         unlock_user(b, arg2, arg3);
11941         return ret;
11942     }
11943     case TARGET_NR_flistxattr:
11944     {
11945         void *b = 0;
11946         if (arg2) {
11947             b = lock_user(VERIFY_WRITE, arg2, arg3, 0);
11948             if (!b) {
11949                 return -TARGET_EFAULT;
11950             }
11951         }
11952         ret = get_errno(flistxattr(arg1, b, arg3));
11953         unlock_user(b, arg2, arg3);
11954         return ret;
11955     }
11956     case TARGET_NR_setxattr:
11957     case TARGET_NR_lsetxattr:
11958         {
11959             void *p, *n, *v = 0;
11960             if (arg3) {
11961                 v = lock_user(VERIFY_READ, arg3, arg4, 1);
11962                 if (!v) {
11963                     return -TARGET_EFAULT;
11964                 }
11965             }
11966             p = lock_user_string(arg1);
11967             n = lock_user_string(arg2);
11968             if (p && n) {
11969                 if (num == TARGET_NR_setxattr) {
11970                     ret = get_errno(setxattr(p, n, v, arg4, arg5));
11971                 } else {
11972                     ret = get_errno(lsetxattr(p, n, v, arg4, arg5));
11973                 }
11974             } else {
11975                 ret = -TARGET_EFAULT;
11976             }
11977             unlock_user(p, arg1, 0);
11978             unlock_user(n, arg2, 0);
11979             unlock_user(v, arg3, 0);
11980         }
11981         return ret;
11982     case TARGET_NR_fsetxattr:
11983         {
11984             void *n, *v = 0;
11985             if (arg3) {
11986                 v = lock_user(VERIFY_READ, arg3, arg4, 1);
11987                 if (!v) {
11988                     return -TARGET_EFAULT;
11989                 }
11990             }
11991             n = lock_user_string(arg2);
11992             if (n) {
11993                 ret = get_errno(fsetxattr(arg1, n, v, arg4, arg5));
11994             } else {
11995                 ret = -TARGET_EFAULT;
11996             }
11997             unlock_user(n, arg2, 0);
11998             unlock_user(v, arg3, 0);
11999         }
12000         return ret;
12001     case TARGET_NR_getxattr:
12002     case TARGET_NR_lgetxattr:
12003         {
12004             void *p, *n, *v = 0;
12005             if (arg3) {
12006                 v = lock_user(VERIFY_WRITE, arg3, arg4, 0);
12007                 if (!v) {
12008                     return -TARGET_EFAULT;
12009                 }
12010             }
12011             p = lock_user_string(arg1);
12012             n = lock_user_string(arg2);
12013             if (p && n) {
12014                 if (num == TARGET_NR_getxattr) {
12015                     ret = get_errno(getxattr(p, n, v, arg4));
12016                 } else {
12017                     ret = get_errno(lgetxattr(p, n, v, arg4));
12018                 }
12019             } else {
12020                 ret = -TARGET_EFAULT;
12021             }
12022             unlock_user(p, arg1, 0);
12023             unlock_user(n, arg2, 0);
12024             unlock_user(v, arg3, arg4);
12025         }
12026         return ret;
12027     case TARGET_NR_fgetxattr:
12028         {
12029             void *n, *v = 0;
12030             if (arg3) {
12031                 v = lock_user(VERIFY_WRITE, arg3, arg4, 0);
12032                 if (!v) {
12033                     return -TARGET_EFAULT;
12034                 }
12035             }
12036             n = lock_user_string(arg2);
12037             if (n) {
12038                 ret = get_errno(fgetxattr(arg1, n, v, arg4));
12039             } else {
12040                 ret = -TARGET_EFAULT;
12041             }
12042             unlock_user(n, arg2, 0);
12043             unlock_user(v, arg3, arg4);
12044         }
12045         return ret;
12046     case TARGET_NR_removexattr:
12047     case TARGET_NR_lremovexattr:
12048         {
12049             void *p, *n;
12050             p = lock_user_string(arg1);
12051             n = lock_user_string(arg2);
12052             if (p && n) {
12053                 if (num == TARGET_NR_removexattr) {
12054                     ret = get_errno(removexattr(p, n));
12055                 } else {
12056                     ret = get_errno(lremovexattr(p, n));
12057                 }
12058             } else {
12059                 ret = -TARGET_EFAULT;
12060             }
12061             unlock_user(p, arg1, 0);
12062             unlock_user(n, arg2, 0);
12063         }
12064         return ret;
12065     case TARGET_NR_fremovexattr:
12066         {
12067             void *n;
12068             n = lock_user_string(arg2);
12069             if (n) {
12070                 ret = get_errno(fremovexattr(arg1, n));
12071             } else {
12072                 ret = -TARGET_EFAULT;
12073             }
12074             unlock_user(n, arg2, 0);
12075         }
12076         return ret;
12077 #endif
12078 #endif /* CONFIG_ATTR */
12079 #ifdef TARGET_NR_set_thread_area
12080     case TARGET_NR_set_thread_area:
12081 #if defined(TARGET_MIPS)
12082       ((CPUMIPSState *) cpu_env)->active_tc.CP0_UserLocal = arg1;
12083       return 0;
12084 #elif defined(TARGET_CRIS)
12085       if (arg1 & 0xff)
12086           ret = -TARGET_EINVAL;
12087       else {
12088           ((CPUCRISState *) cpu_env)->pregs[PR_PID] = arg1;
12089           ret = 0;
12090       }
12091       return ret;
12092 #elif defined(TARGET_I386) && defined(TARGET_ABI32)
12093       return do_set_thread_area(cpu_env, arg1);
12094 #elif defined(TARGET_M68K)
12095       {
12096           TaskState *ts = cpu->opaque;
12097           ts->tp_value = arg1;
12098           return 0;
12099       }
12100 #else
12101       return -TARGET_ENOSYS;
12102 #endif
12103 #endif
12104 #ifdef TARGET_NR_get_thread_area
12105     case TARGET_NR_get_thread_area:
12106 #if defined(TARGET_I386) && defined(TARGET_ABI32)
12107         return do_get_thread_area(cpu_env, arg1);
12108 #elif defined(TARGET_M68K)
12109         {
12110             TaskState *ts = cpu->opaque;
12111             return ts->tp_value;
12112         }
12113 #else
12114         return -TARGET_ENOSYS;
12115 #endif
12116 #endif
12117 #ifdef TARGET_NR_getdomainname
12118     case TARGET_NR_getdomainname:
12119         return -TARGET_ENOSYS;
12120 #endif
12121 
12122 #ifdef TARGET_NR_clock_settime
12123     case TARGET_NR_clock_settime:
12124     {
12125         struct timespec ts;
12126 
12127         ret = target_to_host_timespec(&ts, arg2);
12128         if (!is_error(ret)) {
12129             ret = get_errno(clock_settime(arg1, &ts));
12130         }
12131         return ret;
12132     }
12133 #endif
12134 #ifdef TARGET_NR_clock_settime64
12135     case TARGET_NR_clock_settime64:
12136     {
12137         struct timespec ts;
12138 
12139         ret = target_to_host_timespec64(&ts, arg2);
12140         if (!is_error(ret)) {
12141             ret = get_errno(clock_settime(arg1, &ts));
12142         }
12143         return ret;
12144     }
12145 #endif
12146 #ifdef TARGET_NR_clock_gettime
12147     case TARGET_NR_clock_gettime:
12148     {
12149         struct timespec ts;
12150         ret = get_errno(clock_gettime(arg1, &ts));
12151         if (!is_error(ret)) {
12152             ret = host_to_target_timespec(arg2, &ts);
12153         }
12154         return ret;
12155     }
12156 #endif
12157 #ifdef TARGET_NR_clock_gettime64
12158     case TARGET_NR_clock_gettime64:
12159     {
12160         struct timespec ts;
12161         ret = get_errno(clock_gettime(arg1, &ts));
12162         if (!is_error(ret)) {
12163             ret = host_to_target_timespec64(arg2, &ts);
12164         }
12165         return ret;
12166     }
12167 #endif
12168 #ifdef TARGET_NR_clock_getres
12169     case TARGET_NR_clock_getres:
12170     {
12171         struct timespec ts;
12172         ret = get_errno(clock_getres(arg1, &ts));
12173         if (!is_error(ret)) {
12174             host_to_target_timespec(arg2, &ts);
12175         }
12176         return ret;
12177     }
12178 #endif
12179 #ifdef TARGET_NR_clock_getres_time64
12180     case TARGET_NR_clock_getres_time64:
12181     {
12182         struct timespec ts;
12183         ret = get_errno(clock_getres(arg1, &ts));
12184         if (!is_error(ret)) {
12185             host_to_target_timespec64(arg2, &ts);
12186         }
12187         return ret;
12188     }
12189 #endif
12190 #ifdef TARGET_NR_clock_nanosleep
12191     case TARGET_NR_clock_nanosleep:
12192     {
12193         struct timespec ts;
12194         if (target_to_host_timespec(&ts, arg3)) {
12195             return -TARGET_EFAULT;
12196         }
12197         ret = get_errno(safe_clock_nanosleep(arg1, arg2,
12198                                              &ts, arg4 ? &ts : NULL));
12199         /*
12200          * if the call is interrupted by a signal handler, it fails
12201          * with error -TARGET_EINTR and if arg4 is not NULL and arg2 is not
12202          * TIMER_ABSTIME, it returns the remaining unslept time in arg4.
12203          */
12204         if (ret == -TARGET_EINTR && arg4 && arg2 != TIMER_ABSTIME &&
12205             host_to_target_timespec(arg4, &ts)) {
12206               return -TARGET_EFAULT;
12207         }
12208 
12209         return ret;
12210     }
12211 #endif
12212 #ifdef TARGET_NR_clock_nanosleep_time64
12213     case TARGET_NR_clock_nanosleep_time64:
12214     {
12215         struct timespec ts;
12216 
12217         if (target_to_host_timespec64(&ts, arg3)) {
12218             return -TARGET_EFAULT;
12219         }
12220 
12221         ret = get_errno(safe_clock_nanosleep(arg1, arg2,
12222                                              &ts, arg4 ? &ts : NULL));
12223 
12224         if (ret == -TARGET_EINTR && arg4 && arg2 != TIMER_ABSTIME &&
12225             host_to_target_timespec64(arg4, &ts)) {
12226             return -TARGET_EFAULT;
12227         }
12228         return ret;
12229     }
12230 #endif
12231 
12232 #if defined(TARGET_NR_set_tid_address) && defined(__NR_set_tid_address)
12233     case TARGET_NR_set_tid_address:
12234         return get_errno(set_tid_address((int *)g2h(arg1)));
12235 #endif
12236 
12237     case TARGET_NR_tkill:
12238         return get_errno(safe_tkill((int)arg1, target_to_host_signal(arg2)));
12239 
12240     case TARGET_NR_tgkill:
12241         return get_errno(safe_tgkill((int)arg1, (int)arg2,
12242                          target_to_host_signal(arg3)));
12243 
12244 #ifdef TARGET_NR_set_robust_list
12245     case TARGET_NR_set_robust_list:
12246     case TARGET_NR_get_robust_list:
12247         /* The ABI for supporting robust futexes has userspace pass
12248          * the kernel a pointer to a linked list which is updated by
12249          * userspace after the syscall; the list is walked by the kernel
12250          * when the thread exits. Since the linked list in QEMU guest
12251          * memory isn't a valid linked list for the host and we have
12252          * no way to reliably intercept the thread-death event, we can't
12253          * support these. Silently return ENOSYS so that guest userspace
12254          * falls back to a non-robust futex implementation (which should
12255          * be OK except in the corner case of the guest crashing while
12256          * holding a mutex that is shared with another process via
12257          * shared memory).
12258          */
12259         return -TARGET_ENOSYS;
12260 #endif
12261 
12262 #if defined(TARGET_NR_utimensat)
12263     case TARGET_NR_utimensat:
12264         {
12265             struct timespec *tsp, ts[2];
12266             if (!arg3) {
12267                 tsp = NULL;
12268             } else {
12269                 if (target_to_host_timespec(ts, arg3)) {
12270                     return -TARGET_EFAULT;
12271                 }
12272                 if (target_to_host_timespec(ts + 1, arg3 +
12273                                             sizeof(struct target_timespec))) {
12274                     return -TARGET_EFAULT;
12275                 }
12276                 tsp = ts;
12277             }
12278             if (!arg2)
12279                 ret = get_errno(sys_utimensat(arg1, NULL, tsp, arg4));
12280             else {
12281                 if (!(p = lock_user_string(arg2))) {
12282                     return -TARGET_EFAULT;
12283                 }
12284                 ret = get_errno(sys_utimensat(arg1, path(p), tsp, arg4));
12285                 unlock_user(p, arg2, 0);
12286             }
12287         }
12288         return ret;
12289 #endif
12290 #ifdef TARGET_NR_utimensat_time64
12291     case TARGET_NR_utimensat_time64:
12292         {
12293             struct timespec *tsp, ts[2];
12294             if (!arg3) {
12295                 tsp = NULL;
12296             } else {
12297                 if (target_to_host_timespec64(ts, arg3)) {
12298                     return -TARGET_EFAULT;
12299                 }
12300                 if (target_to_host_timespec64(ts + 1, arg3 +
12301                                      sizeof(struct target__kernel_timespec))) {
12302                     return -TARGET_EFAULT;
12303                 }
12304                 tsp = ts;
12305             }
12306             if (!arg2)
12307                 ret = get_errno(sys_utimensat(arg1, NULL, tsp, arg4));
12308             else {
12309                 p = lock_user_string(arg2);
12310                 if (!p) {
12311                     return -TARGET_EFAULT;
12312                 }
12313                 ret = get_errno(sys_utimensat(arg1, path(p), tsp, arg4));
12314                 unlock_user(p, arg2, 0);
12315             }
12316         }
12317         return ret;
12318 #endif
12319 #ifdef TARGET_NR_futex
12320     case TARGET_NR_futex:
12321         return do_futex(arg1, arg2, arg3, arg4, arg5, arg6);
12322 #endif
12323 #ifdef TARGET_NR_futex_time64
12324     case TARGET_NR_futex_time64:
12325         return do_futex_time64(arg1, arg2, arg3, arg4, arg5, arg6);
12326 #endif
12327 #if defined(TARGET_NR_inotify_init) && defined(__NR_inotify_init)
12328     case TARGET_NR_inotify_init:
12329         ret = get_errno(sys_inotify_init());
12330         if (ret >= 0) {
12331             fd_trans_register(ret, &target_inotify_trans);
12332         }
12333         return ret;
12334 #endif
12335 #ifdef CONFIG_INOTIFY1
12336 #if defined(TARGET_NR_inotify_init1) && defined(__NR_inotify_init1)
12337     case TARGET_NR_inotify_init1:
12338         ret = get_errno(sys_inotify_init1(target_to_host_bitmask(arg1,
12339                                           fcntl_flags_tbl)));
12340         if (ret >= 0) {
12341             fd_trans_register(ret, &target_inotify_trans);
12342         }
12343         return ret;
12344 #endif
12345 #endif
12346 #if defined(TARGET_NR_inotify_add_watch) && defined(__NR_inotify_add_watch)
12347     case TARGET_NR_inotify_add_watch:
12348         p = lock_user_string(arg2);
12349         ret = get_errno(sys_inotify_add_watch(arg1, path(p), arg3));
12350         unlock_user(p, arg2, 0);
12351         return ret;
12352 #endif
12353 #if defined(TARGET_NR_inotify_rm_watch) && defined(__NR_inotify_rm_watch)
12354     case TARGET_NR_inotify_rm_watch:
12355         return get_errno(sys_inotify_rm_watch(arg1, arg2));
12356 #endif
12357 
12358 #if defined(TARGET_NR_mq_open) && defined(__NR_mq_open)
12359     case TARGET_NR_mq_open:
12360         {
12361             struct mq_attr posix_mq_attr;
12362             struct mq_attr *pposix_mq_attr;
12363             int host_flags;
12364 
12365             host_flags = target_to_host_bitmask(arg2, fcntl_flags_tbl);
12366             pposix_mq_attr = NULL;
12367             if (arg4) {
12368                 if (copy_from_user_mq_attr(&posix_mq_attr, arg4) != 0) {
12369                     return -TARGET_EFAULT;
12370                 }
12371                 pposix_mq_attr = &posix_mq_attr;
12372             }
12373             p = lock_user_string(arg1 - 1);
12374             if (!p) {
12375                 return -TARGET_EFAULT;
12376             }
12377             ret = get_errno(mq_open(p, host_flags, arg3, pposix_mq_attr));
12378             unlock_user (p, arg1, 0);
12379         }
12380         return ret;
12381 
12382     case TARGET_NR_mq_unlink:
12383         p = lock_user_string(arg1 - 1);
12384         if (!p) {
12385             return -TARGET_EFAULT;
12386         }
12387         ret = get_errno(mq_unlink(p));
12388         unlock_user (p, arg1, 0);
12389         return ret;
12390 
12391 #ifdef TARGET_NR_mq_timedsend
12392     case TARGET_NR_mq_timedsend:
12393         {
12394             struct timespec ts;
12395 
12396             p = lock_user (VERIFY_READ, arg2, arg3, 1);
12397             if (arg5 != 0) {
12398                 if (target_to_host_timespec(&ts, arg5)) {
12399                     return -TARGET_EFAULT;
12400                 }
12401                 ret = get_errno(safe_mq_timedsend(arg1, p, arg3, arg4, &ts));
12402                 if (!is_error(ret) && host_to_target_timespec(arg5, &ts)) {
12403                     return -TARGET_EFAULT;
12404                 }
12405             } else {
12406                 ret = get_errno(safe_mq_timedsend(arg1, p, arg3, arg4, NULL));
12407             }
12408             unlock_user (p, arg2, arg3);
12409         }
12410         return ret;
12411 #endif
12412 #ifdef TARGET_NR_mq_timedsend_time64
12413     case TARGET_NR_mq_timedsend_time64:
12414         {
12415             struct timespec ts;
12416 
12417             p = lock_user(VERIFY_READ, arg2, arg3, 1);
12418             if (arg5 != 0) {
12419                 if (target_to_host_timespec64(&ts, arg5)) {
12420                     return -TARGET_EFAULT;
12421                 }
12422                 ret = get_errno(safe_mq_timedsend(arg1, p, arg3, arg4, &ts));
12423                 if (!is_error(ret) && host_to_target_timespec64(arg5, &ts)) {
12424                     return -TARGET_EFAULT;
12425                 }
12426             } else {
12427                 ret = get_errno(safe_mq_timedsend(arg1, p, arg3, arg4, NULL));
12428             }
12429             unlock_user(p, arg2, arg3);
12430         }
12431         return ret;
12432 #endif
12433 
12434 #ifdef TARGET_NR_mq_timedreceive
12435     case TARGET_NR_mq_timedreceive:
12436         {
12437             struct timespec ts;
12438             unsigned int prio;
12439 
12440             p = lock_user (VERIFY_READ, arg2, arg3, 1);
12441             if (arg5 != 0) {
12442                 if (target_to_host_timespec(&ts, arg5)) {
12443                     return -TARGET_EFAULT;
12444                 }
12445                 ret = get_errno(safe_mq_timedreceive(arg1, p, arg3,
12446                                                      &prio, &ts));
12447                 if (!is_error(ret) && host_to_target_timespec(arg5, &ts)) {
12448                     return -TARGET_EFAULT;
12449                 }
12450             } else {
12451                 ret = get_errno(safe_mq_timedreceive(arg1, p, arg3,
12452                                                      &prio, NULL));
12453             }
12454             unlock_user (p, arg2, arg3);
12455             if (arg4 != 0)
12456                 put_user_u32(prio, arg4);
12457         }
12458         return ret;
12459 #endif
12460 #ifdef TARGET_NR_mq_timedreceive_time64
12461     case TARGET_NR_mq_timedreceive_time64:
12462         {
12463             struct timespec ts;
12464             unsigned int prio;
12465 
12466             p = lock_user(VERIFY_READ, arg2, arg3, 1);
12467             if (arg5 != 0) {
12468                 if (target_to_host_timespec64(&ts, arg5)) {
12469                     return -TARGET_EFAULT;
12470                 }
12471                 ret = get_errno(safe_mq_timedreceive(arg1, p, arg3,
12472                                                      &prio, &ts));
12473                 if (!is_error(ret) && host_to_target_timespec64(arg5, &ts)) {
12474                     return -TARGET_EFAULT;
12475                 }
12476             } else {
12477                 ret = get_errno(safe_mq_timedreceive(arg1, p, arg3,
12478                                                      &prio, NULL));
12479             }
12480             unlock_user(p, arg2, arg3);
12481             if (arg4 != 0) {
12482                 put_user_u32(prio, arg4);
12483             }
12484         }
12485         return ret;
12486 #endif
12487 
12488     /* Not implemented for now... */
12489 /*     case TARGET_NR_mq_notify: */
12490 /*         break; */
12491 
12492     case TARGET_NR_mq_getsetattr:
12493         {
12494             struct mq_attr posix_mq_attr_in, posix_mq_attr_out;
12495             ret = 0;
12496             if (arg2 != 0) {
12497                 copy_from_user_mq_attr(&posix_mq_attr_in, arg2);
12498                 ret = get_errno(mq_setattr(arg1, &posix_mq_attr_in,
12499                                            &posix_mq_attr_out));
12500             } else if (arg3 != 0) {
12501                 ret = get_errno(mq_getattr(arg1, &posix_mq_attr_out));
12502             }
12503             if (ret == 0 && arg3 != 0) {
12504                 copy_to_user_mq_attr(arg3, &posix_mq_attr_out);
12505             }
12506         }
12507         return ret;
12508 #endif
12509 
12510 #ifdef CONFIG_SPLICE
12511 #ifdef TARGET_NR_tee
12512     case TARGET_NR_tee:
12513         {
12514             ret = get_errno(tee(arg1,arg2,arg3,arg4));
12515         }
12516         return ret;
12517 #endif
12518 #ifdef TARGET_NR_splice
12519     case TARGET_NR_splice:
12520         {
12521             loff_t loff_in, loff_out;
12522             loff_t *ploff_in = NULL, *ploff_out = NULL;
12523             if (arg2) {
12524                 if (get_user_u64(loff_in, arg2)) {
12525                     return -TARGET_EFAULT;
12526                 }
12527                 ploff_in = &loff_in;
12528             }
12529             if (arg4) {
12530                 if (get_user_u64(loff_out, arg4)) {
12531                     return -TARGET_EFAULT;
12532                 }
12533                 ploff_out = &loff_out;
12534             }
12535             ret = get_errno(splice(arg1, ploff_in, arg3, ploff_out, arg5, arg6));
12536             if (arg2) {
12537                 if (put_user_u64(loff_in, arg2)) {
12538                     return -TARGET_EFAULT;
12539                 }
12540             }
12541             if (arg4) {
12542                 if (put_user_u64(loff_out, arg4)) {
12543                     return -TARGET_EFAULT;
12544                 }
12545             }
12546         }
12547         return ret;
12548 #endif
12549 #ifdef TARGET_NR_vmsplice
12550 	case TARGET_NR_vmsplice:
12551         {
12552             struct iovec *vec = lock_iovec(VERIFY_READ, arg2, arg3, 1);
12553             if (vec != NULL) {
12554                 ret = get_errno(vmsplice(arg1, vec, arg3, arg4));
12555                 unlock_iovec(vec, arg2, arg3, 0);
12556             } else {
12557                 ret = -host_to_target_errno(errno);
12558             }
12559         }
12560         return ret;
12561 #endif
12562 #endif /* CONFIG_SPLICE */
12563 #ifdef CONFIG_EVENTFD
12564 #if defined(TARGET_NR_eventfd)
12565     case TARGET_NR_eventfd:
12566         ret = get_errno(eventfd(arg1, 0));
12567         if (ret >= 0) {
12568             fd_trans_register(ret, &target_eventfd_trans);
12569         }
12570         return ret;
12571 #endif
12572 #if defined(TARGET_NR_eventfd2)
12573     case TARGET_NR_eventfd2:
12574     {
12575         int host_flags = arg2 & (~(TARGET_O_NONBLOCK_MASK | TARGET_O_CLOEXEC));
12576         if (arg2 & TARGET_O_NONBLOCK) {
12577             host_flags |= O_NONBLOCK;
12578         }
12579         if (arg2 & TARGET_O_CLOEXEC) {
12580             host_flags |= O_CLOEXEC;
12581         }
12582         ret = get_errno(eventfd(arg1, host_flags));
12583         if (ret >= 0) {
12584             fd_trans_register(ret, &target_eventfd_trans);
12585         }
12586         return ret;
12587     }
12588 #endif
12589 #endif /* CONFIG_EVENTFD  */
12590 #if defined(CONFIG_FALLOCATE) && defined(TARGET_NR_fallocate)
12591     case TARGET_NR_fallocate:
12592 #if TARGET_ABI_BITS == 32
12593         ret = get_errno(fallocate(arg1, arg2, target_offset64(arg3, arg4),
12594                                   target_offset64(arg5, arg6)));
12595 #else
12596         ret = get_errno(fallocate(arg1, arg2, arg3, arg4));
12597 #endif
12598         return ret;
12599 #endif
12600 #if defined(CONFIG_SYNC_FILE_RANGE)
12601 #if defined(TARGET_NR_sync_file_range)
12602     case TARGET_NR_sync_file_range:
12603 #if TARGET_ABI_BITS == 32
12604 #if defined(TARGET_MIPS)
12605         ret = get_errno(sync_file_range(arg1, target_offset64(arg3, arg4),
12606                                         target_offset64(arg5, arg6), arg7));
12607 #else
12608         ret = get_errno(sync_file_range(arg1, target_offset64(arg2, arg3),
12609                                         target_offset64(arg4, arg5), arg6));
12610 #endif /* !TARGET_MIPS */
12611 #else
12612         ret = get_errno(sync_file_range(arg1, arg2, arg3, arg4));
12613 #endif
12614         return ret;
12615 #endif
12616 #if defined(TARGET_NR_sync_file_range2) || \
12617     defined(TARGET_NR_arm_sync_file_range)
12618 #if defined(TARGET_NR_sync_file_range2)
12619     case TARGET_NR_sync_file_range2:
12620 #endif
12621 #if defined(TARGET_NR_arm_sync_file_range)
12622     case TARGET_NR_arm_sync_file_range:
12623 #endif
12624         /* This is like sync_file_range but the arguments are reordered */
12625 #if TARGET_ABI_BITS == 32
12626         ret = get_errno(sync_file_range(arg1, target_offset64(arg3, arg4),
12627                                         target_offset64(arg5, arg6), arg2));
12628 #else
12629         ret = get_errno(sync_file_range(arg1, arg3, arg4, arg2));
12630 #endif
12631         return ret;
12632 #endif
12633 #endif
12634 #if defined(TARGET_NR_signalfd4)
12635     case TARGET_NR_signalfd4:
12636         return do_signalfd4(arg1, arg2, arg4);
12637 #endif
12638 #if defined(TARGET_NR_signalfd)
12639     case TARGET_NR_signalfd:
12640         return do_signalfd4(arg1, arg2, 0);
12641 #endif
12642 #if defined(CONFIG_EPOLL)
12643 #if defined(TARGET_NR_epoll_create)
12644     case TARGET_NR_epoll_create:
12645         return get_errno(epoll_create(arg1));
12646 #endif
12647 #if defined(TARGET_NR_epoll_create1) && defined(CONFIG_EPOLL_CREATE1)
12648     case TARGET_NR_epoll_create1:
12649         return get_errno(epoll_create1(target_to_host_bitmask(arg1, fcntl_flags_tbl)));
12650 #endif
12651 #if defined(TARGET_NR_epoll_ctl)
12652     case TARGET_NR_epoll_ctl:
12653     {
12654         struct epoll_event ep;
12655         struct epoll_event *epp = 0;
12656         if (arg4) {
12657             if (arg2 != EPOLL_CTL_DEL) {
12658                 struct target_epoll_event *target_ep;
12659                 if (!lock_user_struct(VERIFY_READ, target_ep, arg4, 1)) {
12660                     return -TARGET_EFAULT;
12661                 }
12662                 ep.events = tswap32(target_ep->events);
12663                 /*
12664                  * The epoll_data_t union is just opaque data to the kernel,
12665                  * so we transfer all 64 bits across and need not worry what
12666                  * actual data type it is.
12667                  */
12668                 ep.data.u64 = tswap64(target_ep->data.u64);
12669                 unlock_user_struct(target_ep, arg4, 0);
12670             }
12671             /*
12672              * before kernel 2.6.9, EPOLL_CTL_DEL operation required a
12673              * non-null pointer, even though this argument is ignored.
12674              *
12675              */
12676             epp = &ep;
12677         }
12678         return get_errno(epoll_ctl(arg1, arg2, arg3, epp));
12679     }
12680 #endif
12681 
12682 #if defined(TARGET_NR_epoll_wait) || defined(TARGET_NR_epoll_pwait)
12683 #if defined(TARGET_NR_epoll_wait)
12684     case TARGET_NR_epoll_wait:
12685 #endif
12686 #if defined(TARGET_NR_epoll_pwait)
12687     case TARGET_NR_epoll_pwait:
12688 #endif
12689     {
12690         struct target_epoll_event *target_ep;
12691         struct epoll_event *ep;
12692         int epfd = arg1;
12693         int maxevents = arg3;
12694         int timeout = arg4;
12695 
12696         if (maxevents <= 0 || maxevents > TARGET_EP_MAX_EVENTS) {
12697             return -TARGET_EINVAL;
12698         }
12699 
12700         target_ep = lock_user(VERIFY_WRITE, arg2,
12701                               maxevents * sizeof(struct target_epoll_event), 1);
12702         if (!target_ep) {
12703             return -TARGET_EFAULT;
12704         }
12705 
12706         ep = g_try_new(struct epoll_event, maxevents);
12707         if (!ep) {
12708             unlock_user(target_ep, arg2, 0);
12709             return -TARGET_ENOMEM;
12710         }
12711 
12712         switch (num) {
12713 #if defined(TARGET_NR_epoll_pwait)
12714         case TARGET_NR_epoll_pwait:
12715         {
12716             target_sigset_t *target_set;
12717             sigset_t _set, *set = &_set;
12718 
12719             if (arg5) {
12720                 if (arg6 != sizeof(target_sigset_t)) {
12721                     ret = -TARGET_EINVAL;
12722                     break;
12723                 }
12724 
12725                 target_set = lock_user(VERIFY_READ, arg5,
12726                                        sizeof(target_sigset_t), 1);
12727                 if (!target_set) {
12728                     ret = -TARGET_EFAULT;
12729                     break;
12730                 }
12731                 target_to_host_sigset(set, target_set);
12732                 unlock_user(target_set, arg5, 0);
12733             } else {
12734                 set = NULL;
12735             }
12736 
12737             ret = get_errno(safe_epoll_pwait(epfd, ep, maxevents, timeout,
12738                                              set, SIGSET_T_SIZE));
12739             break;
12740         }
12741 #endif
12742 #if defined(TARGET_NR_epoll_wait)
12743         case TARGET_NR_epoll_wait:
12744             ret = get_errno(safe_epoll_pwait(epfd, ep, maxevents, timeout,
12745                                              NULL, 0));
12746             break;
12747 #endif
12748         default:
12749             ret = -TARGET_ENOSYS;
12750         }
12751         if (!is_error(ret)) {
12752             int i;
12753             for (i = 0; i < ret; i++) {
12754                 target_ep[i].events = tswap32(ep[i].events);
12755                 target_ep[i].data.u64 = tswap64(ep[i].data.u64);
12756             }
12757             unlock_user(target_ep, arg2,
12758                         ret * sizeof(struct target_epoll_event));
12759         } else {
12760             unlock_user(target_ep, arg2, 0);
12761         }
12762         g_free(ep);
12763         return ret;
12764     }
12765 #endif
12766 #endif
12767 #ifdef TARGET_NR_prlimit64
12768     case TARGET_NR_prlimit64:
12769     {
12770         /* args: pid, resource number, ptr to new rlimit, ptr to old rlimit */
12771         struct target_rlimit64 *target_rnew, *target_rold;
12772         struct host_rlimit64 rnew, rold, *rnewp = 0;
12773         int resource = target_to_host_resource(arg2);
12774 
12775         if (arg3 && (resource != RLIMIT_AS &&
12776                      resource != RLIMIT_DATA &&
12777                      resource != RLIMIT_STACK)) {
12778             if (!lock_user_struct(VERIFY_READ, target_rnew, arg3, 1)) {
12779                 return -TARGET_EFAULT;
12780             }
12781             rnew.rlim_cur = tswap64(target_rnew->rlim_cur);
12782             rnew.rlim_max = tswap64(target_rnew->rlim_max);
12783             unlock_user_struct(target_rnew, arg3, 0);
12784             rnewp = &rnew;
12785         }
12786 
12787         ret = get_errno(sys_prlimit64(arg1, resource, rnewp, arg4 ? &rold : 0));
12788         if (!is_error(ret) && arg4) {
12789             if (!lock_user_struct(VERIFY_WRITE, target_rold, arg4, 1)) {
12790                 return -TARGET_EFAULT;
12791             }
12792             target_rold->rlim_cur = tswap64(rold.rlim_cur);
12793             target_rold->rlim_max = tswap64(rold.rlim_max);
12794             unlock_user_struct(target_rold, arg4, 1);
12795         }
12796         return ret;
12797     }
12798 #endif
12799 #ifdef TARGET_NR_gethostname
12800     case TARGET_NR_gethostname:
12801     {
12802         char *name = lock_user(VERIFY_WRITE, arg1, arg2, 0);
12803         if (name) {
12804             ret = get_errno(gethostname(name, arg2));
12805             unlock_user(name, arg1, arg2);
12806         } else {
12807             ret = -TARGET_EFAULT;
12808         }
12809         return ret;
12810     }
12811 #endif
12812 #ifdef TARGET_NR_atomic_cmpxchg_32
12813     case TARGET_NR_atomic_cmpxchg_32:
12814     {
12815         /* should use start_exclusive from main.c */
12816         abi_ulong mem_value;
12817         if (get_user_u32(mem_value, arg6)) {
12818             target_siginfo_t info;
12819             info.si_signo = SIGSEGV;
12820             info.si_errno = 0;
12821             info.si_code = TARGET_SEGV_MAPERR;
12822             info._sifields._sigfault._addr = arg6;
12823             queue_signal((CPUArchState *)cpu_env, info.si_signo,
12824                          QEMU_SI_FAULT, &info);
12825             ret = 0xdeadbeef;
12826 
12827         }
12828         if (mem_value == arg2)
12829             put_user_u32(arg1, arg6);
12830         return mem_value;
12831     }
12832 #endif
12833 #ifdef TARGET_NR_atomic_barrier
12834     case TARGET_NR_atomic_barrier:
12835         /* Like the kernel implementation and the
12836            qemu arm barrier, no-op this? */
12837         return 0;
12838 #endif
12839 
12840 #ifdef TARGET_NR_timer_create
12841     case TARGET_NR_timer_create:
12842     {
12843         /* args: clockid_t clockid, struct sigevent *sevp, timer_t *timerid */
12844 
12845         struct sigevent host_sevp = { {0}, }, *phost_sevp = NULL;
12846 
12847         int clkid = arg1;
12848         int timer_index = next_free_host_timer();
12849 
12850         if (timer_index < 0) {
12851             ret = -TARGET_EAGAIN;
12852         } else {
12853             timer_t *phtimer = g_posix_timers  + timer_index;
12854 
12855             if (arg2) {
12856                 phost_sevp = &host_sevp;
12857                 ret = target_to_host_sigevent(phost_sevp, arg2);
12858                 if (ret != 0) {
12859                     return ret;
12860                 }
12861             }
12862 
12863             ret = get_errno(timer_create(clkid, phost_sevp, phtimer));
12864             if (ret) {
12865                 phtimer = NULL;
12866             } else {
12867                 if (put_user(TIMER_MAGIC | timer_index, arg3, target_timer_t)) {
12868                     return -TARGET_EFAULT;
12869                 }
12870             }
12871         }
12872         return ret;
12873     }
12874 #endif
12875 
12876 #ifdef TARGET_NR_timer_settime
12877     case TARGET_NR_timer_settime:
12878     {
12879         /* args: timer_t timerid, int flags, const struct itimerspec *new_value,
12880          * struct itimerspec * old_value */
12881         target_timer_t timerid = get_timer_id(arg1);
12882 
12883         if (timerid < 0) {
12884             ret = timerid;
12885         } else if (arg3 == 0) {
12886             ret = -TARGET_EINVAL;
12887         } else {
12888             timer_t htimer = g_posix_timers[timerid];
12889             struct itimerspec hspec_new = {{0},}, hspec_old = {{0},};
12890 
12891             if (target_to_host_itimerspec(&hspec_new, arg3)) {
12892                 return -TARGET_EFAULT;
12893             }
12894             ret = get_errno(
12895                           timer_settime(htimer, arg2, &hspec_new, &hspec_old));
12896             if (arg4 && host_to_target_itimerspec(arg4, &hspec_old)) {
12897                 return -TARGET_EFAULT;
12898             }
12899         }
12900         return ret;
12901     }
12902 #endif
12903 
12904 #ifdef TARGET_NR_timer_settime64
12905     case TARGET_NR_timer_settime64:
12906     {
12907         target_timer_t timerid = get_timer_id(arg1);
12908 
12909         if (timerid < 0) {
12910             ret = timerid;
12911         } else if (arg3 == 0) {
12912             ret = -TARGET_EINVAL;
12913         } else {
12914             timer_t htimer = g_posix_timers[timerid];
12915             struct itimerspec hspec_new = {{0},}, hspec_old = {{0},};
12916 
12917             if (target_to_host_itimerspec64(&hspec_new, arg3)) {
12918                 return -TARGET_EFAULT;
12919             }
12920             ret = get_errno(
12921                           timer_settime(htimer, arg2, &hspec_new, &hspec_old));
12922             if (arg4 && host_to_target_itimerspec64(arg4, &hspec_old)) {
12923                 return -TARGET_EFAULT;
12924             }
12925         }
12926         return ret;
12927     }
12928 #endif
12929 
12930 #ifdef TARGET_NR_timer_gettime
12931     case TARGET_NR_timer_gettime:
12932     {
12933         /* args: timer_t timerid, struct itimerspec *curr_value */
12934         target_timer_t timerid = get_timer_id(arg1);
12935 
12936         if (timerid < 0) {
12937             ret = timerid;
12938         } else if (!arg2) {
12939             ret = -TARGET_EFAULT;
12940         } else {
12941             timer_t htimer = g_posix_timers[timerid];
12942             struct itimerspec hspec;
12943             ret = get_errno(timer_gettime(htimer, &hspec));
12944 
12945             if (host_to_target_itimerspec(arg2, &hspec)) {
12946                 ret = -TARGET_EFAULT;
12947             }
12948         }
12949         return ret;
12950     }
12951 #endif
12952 
12953 #ifdef TARGET_NR_timer_gettime64
12954     case TARGET_NR_timer_gettime64:
12955     {
12956         /* args: timer_t timerid, struct itimerspec64 *curr_value */
12957         target_timer_t timerid = get_timer_id(arg1);
12958 
12959         if (timerid < 0) {
12960             ret = timerid;
12961         } else if (!arg2) {
12962             ret = -TARGET_EFAULT;
12963         } else {
12964             timer_t htimer = g_posix_timers[timerid];
12965             struct itimerspec hspec;
12966             ret = get_errno(timer_gettime(htimer, &hspec));
12967 
12968             if (host_to_target_itimerspec64(arg2, &hspec)) {
12969                 ret = -TARGET_EFAULT;
12970             }
12971         }
12972         return ret;
12973     }
12974 #endif
12975 
12976 #ifdef TARGET_NR_timer_getoverrun
12977     case TARGET_NR_timer_getoverrun:
12978     {
12979         /* args: timer_t timerid */
12980         target_timer_t timerid = get_timer_id(arg1);
12981 
12982         if (timerid < 0) {
12983             ret = timerid;
12984         } else {
12985             timer_t htimer = g_posix_timers[timerid];
12986             ret = get_errno(timer_getoverrun(htimer));
12987         }
12988         return ret;
12989     }
12990 #endif
12991 
12992 #ifdef TARGET_NR_timer_delete
12993     case TARGET_NR_timer_delete:
12994     {
12995         /* args: timer_t timerid */
12996         target_timer_t timerid = get_timer_id(arg1);
12997 
12998         if (timerid < 0) {
12999             ret = timerid;
13000         } else {
13001             timer_t htimer = g_posix_timers[timerid];
13002             ret = get_errno(timer_delete(htimer));
13003             g_posix_timers[timerid] = 0;
13004         }
13005         return ret;
13006     }
13007 #endif
13008 
13009 #if defined(TARGET_NR_timerfd_create) && defined(CONFIG_TIMERFD)
13010     case TARGET_NR_timerfd_create:
13011         return get_errno(timerfd_create(arg1,
13012                           target_to_host_bitmask(arg2, fcntl_flags_tbl)));
13013 #endif
13014 
13015 #if defined(TARGET_NR_timerfd_gettime) && defined(CONFIG_TIMERFD)
13016     case TARGET_NR_timerfd_gettime:
13017         {
13018             struct itimerspec its_curr;
13019 
13020             ret = get_errno(timerfd_gettime(arg1, &its_curr));
13021 
13022             if (arg2 && host_to_target_itimerspec(arg2, &its_curr)) {
13023                 return -TARGET_EFAULT;
13024             }
13025         }
13026         return ret;
13027 #endif
13028 
13029 #if defined(TARGET_NR_timerfd_gettime64) && defined(CONFIG_TIMERFD)
13030     case TARGET_NR_timerfd_gettime64:
13031         {
13032             struct itimerspec its_curr;
13033 
13034             ret = get_errno(timerfd_gettime(arg1, &its_curr));
13035 
13036             if (arg2 && host_to_target_itimerspec64(arg2, &its_curr)) {
13037                 return -TARGET_EFAULT;
13038             }
13039         }
13040         return ret;
13041 #endif
13042 
13043 #if defined(TARGET_NR_timerfd_settime) && defined(CONFIG_TIMERFD)
13044     case TARGET_NR_timerfd_settime:
13045         {
13046             struct itimerspec its_new, its_old, *p_new;
13047 
13048             if (arg3) {
13049                 if (target_to_host_itimerspec(&its_new, arg3)) {
13050                     return -TARGET_EFAULT;
13051                 }
13052                 p_new = &its_new;
13053             } else {
13054                 p_new = NULL;
13055             }
13056 
13057             ret = get_errno(timerfd_settime(arg1, arg2, p_new, &its_old));
13058 
13059             if (arg4 && host_to_target_itimerspec(arg4, &its_old)) {
13060                 return -TARGET_EFAULT;
13061             }
13062         }
13063         return ret;
13064 #endif
13065 
13066 #if defined(TARGET_NR_timerfd_settime64) && defined(CONFIG_TIMERFD)
13067     case TARGET_NR_timerfd_settime64:
13068         {
13069             struct itimerspec its_new, its_old, *p_new;
13070 
13071             if (arg3) {
13072                 if (target_to_host_itimerspec64(&its_new, arg3)) {
13073                     return -TARGET_EFAULT;
13074                 }
13075                 p_new = &its_new;
13076             } else {
13077                 p_new = NULL;
13078             }
13079 
13080             ret = get_errno(timerfd_settime(arg1, arg2, p_new, &its_old));
13081 
13082             if (arg4 && host_to_target_itimerspec64(arg4, &its_old)) {
13083                 return -TARGET_EFAULT;
13084             }
13085         }
13086         return ret;
13087 #endif
13088 
13089 #if defined(TARGET_NR_ioprio_get) && defined(__NR_ioprio_get)
13090     case TARGET_NR_ioprio_get:
13091         return get_errno(ioprio_get(arg1, arg2));
13092 #endif
13093 
13094 #if defined(TARGET_NR_ioprio_set) && defined(__NR_ioprio_set)
13095     case TARGET_NR_ioprio_set:
13096         return get_errno(ioprio_set(arg1, arg2, arg3));
13097 #endif
13098 
13099 #if defined(TARGET_NR_setns) && defined(CONFIG_SETNS)
13100     case TARGET_NR_setns:
13101         return get_errno(setns(arg1, arg2));
13102 #endif
13103 #if defined(TARGET_NR_unshare) && defined(CONFIG_SETNS)
13104     case TARGET_NR_unshare:
13105         return get_errno(unshare(arg1));
13106 #endif
13107 #if defined(TARGET_NR_kcmp) && defined(__NR_kcmp)
13108     case TARGET_NR_kcmp:
13109         return get_errno(kcmp(arg1, arg2, arg3, arg4, arg5));
13110 #endif
13111 #ifdef TARGET_NR_swapcontext
13112     case TARGET_NR_swapcontext:
13113         /* PowerPC specific.  */
13114         return do_swapcontext(cpu_env, arg1, arg2, arg3);
13115 #endif
13116 #ifdef TARGET_NR_memfd_create
13117     case TARGET_NR_memfd_create:
13118         p = lock_user_string(arg1);
13119         if (!p) {
13120             return -TARGET_EFAULT;
13121         }
13122         ret = get_errno(memfd_create(p, arg2));
13123         fd_trans_unregister(ret);
13124         unlock_user(p, arg1, 0);
13125         return ret;
13126 #endif
13127 #if defined TARGET_NR_membarrier && defined __NR_membarrier
13128     case TARGET_NR_membarrier:
13129         return get_errno(membarrier(arg1, arg2));
13130 #endif
13131 
13132 #if defined(TARGET_NR_copy_file_range) && defined(__NR_copy_file_range)
13133     case TARGET_NR_copy_file_range:
13134         {
13135             loff_t inoff, outoff;
13136             loff_t *pinoff = NULL, *poutoff = NULL;
13137 
13138             if (arg2) {
13139                 if (get_user_u64(inoff, arg2)) {
13140                     return -TARGET_EFAULT;
13141                 }
13142                 pinoff = &inoff;
13143             }
13144             if (arg4) {
13145                 if (get_user_u64(outoff, arg4)) {
13146                     return -TARGET_EFAULT;
13147                 }
13148                 poutoff = &outoff;
13149             }
13150             ret = get_errno(safe_copy_file_range(arg1, pinoff, arg3, poutoff,
13151                                                  arg5, arg6));
13152             if (!is_error(ret) && ret > 0) {
13153                 if (arg2) {
13154                     if (put_user_u64(inoff, arg2)) {
13155                         return -TARGET_EFAULT;
13156                     }
13157                 }
13158                 if (arg4) {
13159                     if (put_user_u64(outoff, arg4)) {
13160                         return -TARGET_EFAULT;
13161                     }
13162                 }
13163             }
13164         }
13165         return ret;
13166 #endif
13167 
13168     default:
13169         qemu_log_mask(LOG_UNIMP, "Unsupported syscall: %d\n", num);
13170         return -TARGET_ENOSYS;
13171     }
13172     return ret;
13173 }
13174 
13175 abi_long do_syscall(void *cpu_env, int num, abi_long arg1,
13176                     abi_long arg2, abi_long arg3, abi_long arg4,
13177                     abi_long arg5, abi_long arg6, abi_long arg7,
13178                     abi_long arg8)
13179 {
13180     CPUState *cpu = env_cpu(cpu_env);
13181     abi_long ret;
13182 
13183 #ifdef DEBUG_ERESTARTSYS
13184     /* Debug-only code for exercising the syscall-restart code paths
13185      * in the per-architecture cpu main loops: restart every syscall
13186      * the guest makes once before letting it through.
13187      */
13188     {
13189         static bool flag;
13190         flag = !flag;
13191         if (flag) {
13192             return -TARGET_ERESTARTSYS;
13193         }
13194     }
13195 #endif
13196 
13197     record_syscall_start(cpu, num, arg1,
13198                          arg2, arg3, arg4, arg5, arg6, arg7, arg8);
13199 
13200     if (unlikely(qemu_loglevel_mask(LOG_STRACE))) {
13201         print_syscall(cpu_env, num, arg1, arg2, arg3, arg4, arg5, arg6);
13202     }
13203 
13204     ret = do_syscall1(cpu_env, num, arg1, arg2, arg3, arg4,
13205                       arg5, arg6, arg7, arg8);
13206 
13207     if (unlikely(qemu_loglevel_mask(LOG_STRACE))) {
13208         print_syscall_ret(cpu_env, num, ret, arg1, arg2,
13209                           arg3, arg4, arg5, arg6);
13210     }
13211 
13212     record_syscall_return(cpu, num, ret);
13213     return ret;
13214 }
13215