xref: /openbmc/qemu/linux-user/syscall.c (revision 78721301)
1 /*
2  *  Linux syscalls
3  *
4  *  Copyright (c) 2003 Fabrice Bellard
5  *
6  *  This program is free software; you can redistribute it and/or modify
7  *  it under the terms of the GNU General Public License as published by
8  *  the Free Software Foundation; either version 2 of the License, or
9  *  (at your option) any later version.
10  *
11  *  This program is distributed in the hope that it will be useful,
12  *  but WITHOUT ANY WARRANTY; without even the implied warranty of
13  *  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
14  *  GNU General Public License for more details.
15  *
16  *  You should have received a copy of the GNU General Public License
17  *  along with this program; if not, see <http://www.gnu.org/licenses/>.
18  */
19 #define _ATFILE_SOURCE
20 #include "qemu/osdep.h"
21 #include "qemu/cutils.h"
22 #include "qemu/path.h"
23 #include "qemu/memfd.h"
24 #include "qemu/queue.h"
25 #include <elf.h>
26 #include <endian.h>
27 #include <grp.h>
28 #include <sys/ipc.h>
29 #include <sys/msg.h>
30 #include <sys/wait.h>
31 #include <sys/mount.h>
32 #include <sys/file.h>
33 #include <sys/fsuid.h>
34 #include <sys/personality.h>
35 #include <sys/prctl.h>
36 #include <sys/resource.h>
37 #include <sys/swap.h>
38 #include <linux/capability.h>
39 #include <sched.h>
40 #include <sys/timex.h>
41 #include <sys/socket.h>
42 #include <linux/sockios.h>
43 #include <sys/un.h>
44 #include <sys/uio.h>
45 #include <poll.h>
46 #include <sys/times.h>
47 #include <sys/shm.h>
48 #include <sys/sem.h>
49 #include <sys/statfs.h>
50 #include <utime.h>
51 #include <sys/sysinfo.h>
52 #include <sys/signalfd.h>
53 //#include <sys/user.h>
54 #include <netinet/in.h>
55 #include <netinet/ip.h>
56 #include <netinet/tcp.h>
57 #include <netinet/udp.h>
58 #include <linux/wireless.h>
59 #include <linux/icmp.h>
60 #include <linux/icmpv6.h>
61 #include <linux/if_tun.h>
62 #include <linux/in6.h>
63 #include <linux/errqueue.h>
64 #include <linux/random.h>
65 #ifdef CONFIG_TIMERFD
66 #include <sys/timerfd.h>
67 #endif
68 #ifdef CONFIG_EVENTFD
69 #include <sys/eventfd.h>
70 #endif
71 #ifdef CONFIG_EPOLL
72 #include <sys/epoll.h>
73 #endif
74 #ifdef CONFIG_ATTR
75 #include "qemu/xattr.h"
76 #endif
77 #ifdef CONFIG_SENDFILE
78 #include <sys/sendfile.h>
79 #endif
80 #ifdef HAVE_SYS_KCOV_H
81 #include <sys/kcov.h>
82 #endif
83 
84 #define termios host_termios
85 #define winsize host_winsize
86 #define termio host_termio
87 #define sgttyb host_sgttyb /* same as target */
88 #define tchars host_tchars /* same as target */
89 #define ltchars host_ltchars /* same as target */
90 
91 #include <linux/termios.h>
92 #include <linux/unistd.h>
93 #include <linux/cdrom.h>
94 #include <linux/hdreg.h>
95 #include <linux/soundcard.h>
96 #include <linux/kd.h>
97 #include <linux/mtio.h>
98 #include <linux/fs.h>
99 #include <linux/fd.h>
100 #if defined(CONFIG_FIEMAP)
101 #include <linux/fiemap.h>
102 #endif
103 #include <linux/fb.h>
104 #if defined(CONFIG_USBFS)
105 #include <linux/usbdevice_fs.h>
106 #include <linux/usb/ch9.h>
107 #endif
108 #include <linux/vt.h>
109 #include <linux/dm-ioctl.h>
110 #include <linux/reboot.h>
111 #include <linux/route.h>
112 #include <linux/filter.h>
113 #include <linux/blkpg.h>
114 #include <netpacket/packet.h>
115 #include <linux/netlink.h>
116 #include <linux/if_alg.h>
117 #include <linux/rtc.h>
118 #include <sound/asound.h>
119 #ifdef HAVE_BTRFS_H
120 #include <linux/btrfs.h>
121 #endif
122 #ifdef HAVE_DRM_H
123 #include <libdrm/drm.h>
124 #include <libdrm/i915_drm.h>
125 #endif
126 #include "linux_loop.h"
127 #include "uname.h"
128 
129 #include "qemu.h"
130 #include "qemu/guest-random.h"
131 #include "qemu/selfmap.h"
132 #include "user/syscall-trace.h"
133 #include "qapi/error.h"
134 #include "fd-trans.h"
135 #include "tcg/tcg.h"
136 
137 #ifndef CLONE_IO
138 #define CLONE_IO                0x80000000      /* Clone io context */
139 #endif
140 
141 /* We can't directly call the host clone syscall, because this will
142  * badly confuse libc (breaking mutexes, for example). So we must
143  * divide clone flags into:
144  *  * flag combinations that look like pthread_create()
145  *  * flag combinations that look like fork()
146  *  * flags we can implement within QEMU itself
147  *  * flags we can't support and will return an error for
148  */
149 /* For thread creation, all these flags must be present; for
150  * fork, none must be present.
151  */
152 #define CLONE_THREAD_FLAGS                              \
153     (CLONE_VM | CLONE_FS | CLONE_FILES |                \
154      CLONE_SIGHAND | CLONE_THREAD | CLONE_SYSVSEM)
155 
156 /* These flags are ignored:
157  * CLONE_DETACHED is now ignored by the kernel;
158  * CLONE_IO is just an optimisation hint to the I/O scheduler
159  */
160 #define CLONE_IGNORED_FLAGS                     \
161     (CLONE_DETACHED | CLONE_IO)
162 
163 /* Flags for fork which we can implement within QEMU itself */
164 #define CLONE_OPTIONAL_FORK_FLAGS               \
165     (CLONE_SETTLS | CLONE_PARENT_SETTID |       \
166      CLONE_CHILD_CLEARTID | CLONE_CHILD_SETTID)
167 
168 /* Flags for thread creation which we can implement within QEMU itself */
169 #define CLONE_OPTIONAL_THREAD_FLAGS                             \
170     (CLONE_SETTLS | CLONE_PARENT_SETTID |                       \
171      CLONE_CHILD_CLEARTID | CLONE_CHILD_SETTID | CLONE_PARENT)
172 
173 #define CLONE_INVALID_FORK_FLAGS                                        \
174     (~(CSIGNAL | CLONE_OPTIONAL_FORK_FLAGS | CLONE_IGNORED_FLAGS))
175 
176 #define CLONE_INVALID_THREAD_FLAGS                                      \
177     (~(CSIGNAL | CLONE_THREAD_FLAGS | CLONE_OPTIONAL_THREAD_FLAGS |     \
178        CLONE_IGNORED_FLAGS))
179 
180 /* CLONE_VFORK is special cased early in do_fork(). The other flag bits
181  * have almost all been allocated. We cannot support any of
182  * CLONE_NEWNS, CLONE_NEWCGROUP, CLONE_NEWUTS, CLONE_NEWIPC,
183  * CLONE_NEWUSER, CLONE_NEWPID, CLONE_NEWNET, CLONE_PTRACE, CLONE_UNTRACED.
184  * The checks against the invalid thread masks above will catch these.
185  * (The one remaining unallocated bit is 0x1000 which used to be CLONE_PID.)
186  */
187 
188 /* Define DEBUG_ERESTARTSYS to force every syscall to be restarted
189  * once. This exercises the codepaths for restart.
190  */
191 //#define DEBUG_ERESTARTSYS
192 
193 //#include <linux/msdos_fs.h>
194 #define	VFAT_IOCTL_READDIR_BOTH		_IOR('r', 1, struct linux_dirent [2])
195 #define	VFAT_IOCTL_READDIR_SHORT	_IOR('r', 2, struct linux_dirent [2])
196 
197 #undef _syscall0
198 #undef _syscall1
199 #undef _syscall2
200 #undef _syscall3
201 #undef _syscall4
202 #undef _syscall5
203 #undef _syscall6
204 
205 #define _syscall0(type,name)		\
206 static type name (void)			\
207 {					\
208 	return syscall(__NR_##name);	\
209 }
210 
211 #define _syscall1(type,name,type1,arg1)		\
212 static type name (type1 arg1)			\
213 {						\
214 	return syscall(__NR_##name, arg1);	\
215 }
216 
217 #define _syscall2(type,name,type1,arg1,type2,arg2)	\
218 static type name (type1 arg1,type2 arg2)		\
219 {							\
220 	return syscall(__NR_##name, arg1, arg2);	\
221 }
222 
223 #define _syscall3(type,name,type1,arg1,type2,arg2,type3,arg3)	\
224 static type name (type1 arg1,type2 arg2,type3 arg3)		\
225 {								\
226 	return syscall(__NR_##name, arg1, arg2, arg3);		\
227 }
228 
229 #define _syscall4(type,name,type1,arg1,type2,arg2,type3,arg3,type4,arg4)	\
230 static type name (type1 arg1,type2 arg2,type3 arg3,type4 arg4)			\
231 {										\
232 	return syscall(__NR_##name, arg1, arg2, arg3, arg4);			\
233 }
234 
235 #define _syscall5(type,name,type1,arg1,type2,arg2,type3,arg3,type4,arg4,	\
236 		  type5,arg5)							\
237 static type name (type1 arg1,type2 arg2,type3 arg3,type4 arg4,type5 arg5)	\
238 {										\
239 	return syscall(__NR_##name, arg1, arg2, arg3, arg4, arg5);		\
240 }
241 
242 
243 #define _syscall6(type,name,type1,arg1,type2,arg2,type3,arg3,type4,arg4,	\
244 		  type5,arg5,type6,arg6)					\
245 static type name (type1 arg1,type2 arg2,type3 arg3,type4 arg4,type5 arg5,	\
246                   type6 arg6)							\
247 {										\
248 	return syscall(__NR_##name, arg1, arg2, arg3, arg4, arg5, arg6);	\
249 }
250 
251 
252 #define __NR_sys_uname __NR_uname
253 #define __NR_sys_getcwd1 __NR_getcwd
254 #define __NR_sys_getdents __NR_getdents
255 #define __NR_sys_getdents64 __NR_getdents64
256 #define __NR_sys_getpriority __NR_getpriority
257 #define __NR_sys_rt_sigqueueinfo __NR_rt_sigqueueinfo
258 #define __NR_sys_rt_tgsigqueueinfo __NR_rt_tgsigqueueinfo
259 #define __NR_sys_syslog __NR_syslog
260 #if defined(__NR_futex)
261 # define __NR_sys_futex __NR_futex
262 #endif
263 #if defined(__NR_futex_time64)
264 # define __NR_sys_futex_time64 __NR_futex_time64
265 #endif
266 #define __NR_sys_inotify_init __NR_inotify_init
267 #define __NR_sys_inotify_add_watch __NR_inotify_add_watch
268 #define __NR_sys_inotify_rm_watch __NR_inotify_rm_watch
269 #define __NR_sys_statx __NR_statx
270 
271 #if defined(__alpha__) || defined(__x86_64__) || defined(__s390x__)
272 #define __NR__llseek __NR_lseek
273 #endif
274 
275 /* Newer kernel ports have llseek() instead of _llseek() */
276 #if defined(TARGET_NR_llseek) && !defined(TARGET_NR__llseek)
277 #define TARGET_NR__llseek TARGET_NR_llseek
278 #endif
279 
280 /* some platforms need to mask more bits than just TARGET_O_NONBLOCK */
281 #ifndef TARGET_O_NONBLOCK_MASK
282 #define TARGET_O_NONBLOCK_MASK TARGET_O_NONBLOCK
283 #endif
284 
285 #define __NR_sys_gettid __NR_gettid
286 _syscall0(int, sys_gettid)
287 
288 /* For the 64-bit guest on 32-bit host case we must emulate
289  * getdents using getdents64, because otherwise the host
290  * might hand us back more dirent records than we can fit
291  * into the guest buffer after structure format conversion.
292  * Otherwise we emulate getdents with getdents if the host has it.
293  */
294 #if defined(__NR_getdents) && HOST_LONG_BITS >= TARGET_ABI_BITS
295 #define EMULATE_GETDENTS_WITH_GETDENTS
296 #endif
297 
298 #if defined(TARGET_NR_getdents) && defined(EMULATE_GETDENTS_WITH_GETDENTS)
299 _syscall3(int, sys_getdents, uint, fd, struct linux_dirent *, dirp, uint, count);
300 #endif
301 #if (defined(TARGET_NR_getdents) && \
302       !defined(EMULATE_GETDENTS_WITH_GETDENTS)) || \
303     (defined(TARGET_NR_getdents64) && defined(__NR_getdents64))
304 _syscall3(int, sys_getdents64, uint, fd, struct linux_dirent64 *, dirp, uint, count);
305 #endif
306 #if defined(TARGET_NR__llseek) && defined(__NR_llseek)
307 _syscall5(int, _llseek,  uint,  fd, ulong, hi, ulong, lo,
308           loff_t *, res, uint, wh);
309 #endif
310 _syscall3(int, sys_rt_sigqueueinfo, pid_t, pid, int, sig, siginfo_t *, uinfo)
311 _syscall4(int, sys_rt_tgsigqueueinfo, pid_t, pid, pid_t, tid, int, sig,
312           siginfo_t *, uinfo)
313 _syscall3(int,sys_syslog,int,type,char*,bufp,int,len)
314 #ifdef __NR_exit_group
315 _syscall1(int,exit_group,int,error_code)
316 #endif
317 #if defined(TARGET_NR_set_tid_address) && defined(__NR_set_tid_address)
318 _syscall1(int,set_tid_address,int *,tidptr)
319 #endif
320 #if defined(__NR_futex)
321 _syscall6(int,sys_futex,int *,uaddr,int,op,int,val,
322           const struct timespec *,timeout,int *,uaddr2,int,val3)
323 #endif
324 #if defined(__NR_futex_time64)
325 _syscall6(int,sys_futex_time64,int *,uaddr,int,op,int,val,
326           const struct timespec *,timeout,int *,uaddr2,int,val3)
327 #endif
328 #define __NR_sys_sched_getaffinity __NR_sched_getaffinity
329 _syscall3(int, sys_sched_getaffinity, pid_t, pid, unsigned int, len,
330           unsigned long *, user_mask_ptr);
331 #define __NR_sys_sched_setaffinity __NR_sched_setaffinity
332 _syscall3(int, sys_sched_setaffinity, pid_t, pid, unsigned int, len,
333           unsigned long *, user_mask_ptr);
334 #define __NR_sys_getcpu __NR_getcpu
335 _syscall3(int, sys_getcpu, unsigned *, cpu, unsigned *, node, void *, tcache);
336 _syscall4(int, reboot, int, magic1, int, magic2, unsigned int, cmd,
337           void *, arg);
338 _syscall2(int, capget, struct __user_cap_header_struct *, header,
339           struct __user_cap_data_struct *, data);
340 _syscall2(int, capset, struct __user_cap_header_struct *, header,
341           struct __user_cap_data_struct *, data);
342 #if defined(TARGET_NR_ioprio_get) && defined(__NR_ioprio_get)
343 _syscall2(int, ioprio_get, int, which, int, who)
344 #endif
345 #if defined(TARGET_NR_ioprio_set) && defined(__NR_ioprio_set)
346 _syscall3(int, ioprio_set, int, which, int, who, int, ioprio)
347 #endif
348 #if defined(TARGET_NR_getrandom) && defined(__NR_getrandom)
349 _syscall3(int, getrandom, void *, buf, size_t, buflen, unsigned int, flags)
350 #endif
351 
352 #if defined(TARGET_NR_kcmp) && defined(__NR_kcmp)
353 _syscall5(int, kcmp, pid_t, pid1, pid_t, pid2, int, type,
354           unsigned long, idx1, unsigned long, idx2)
355 #endif
356 
357 /*
358  * It is assumed that struct statx is architecture independent.
359  */
360 #if defined(TARGET_NR_statx) && defined(__NR_statx)
361 _syscall5(int, sys_statx, int, dirfd, const char *, pathname, int, flags,
362           unsigned int, mask, struct target_statx *, statxbuf)
363 #endif
364 #if defined(TARGET_NR_membarrier) && defined(__NR_membarrier)
365 _syscall2(int, membarrier, int, cmd, int, flags)
366 #endif
367 
368 static bitmask_transtbl fcntl_flags_tbl[] = {
369   { TARGET_O_ACCMODE,   TARGET_O_WRONLY,    O_ACCMODE,   O_WRONLY,    },
370   { TARGET_O_ACCMODE,   TARGET_O_RDWR,      O_ACCMODE,   O_RDWR,      },
371   { TARGET_O_CREAT,     TARGET_O_CREAT,     O_CREAT,     O_CREAT,     },
372   { TARGET_O_EXCL,      TARGET_O_EXCL,      O_EXCL,      O_EXCL,      },
373   { TARGET_O_NOCTTY,    TARGET_O_NOCTTY,    O_NOCTTY,    O_NOCTTY,    },
374   { TARGET_O_TRUNC,     TARGET_O_TRUNC,     O_TRUNC,     O_TRUNC,     },
375   { TARGET_O_APPEND,    TARGET_O_APPEND,    O_APPEND,    O_APPEND,    },
376   { TARGET_O_NONBLOCK,  TARGET_O_NONBLOCK,  O_NONBLOCK,  O_NONBLOCK,  },
377   { TARGET_O_SYNC,      TARGET_O_DSYNC,     O_SYNC,      O_DSYNC,     },
378   { TARGET_O_SYNC,      TARGET_O_SYNC,      O_SYNC,      O_SYNC,      },
379   { TARGET_FASYNC,      TARGET_FASYNC,      FASYNC,      FASYNC,      },
380   { TARGET_O_DIRECTORY, TARGET_O_DIRECTORY, O_DIRECTORY, O_DIRECTORY, },
381   { TARGET_O_NOFOLLOW,  TARGET_O_NOFOLLOW,  O_NOFOLLOW,  O_NOFOLLOW,  },
382 #if defined(O_DIRECT)
383   { TARGET_O_DIRECT,    TARGET_O_DIRECT,    O_DIRECT,    O_DIRECT,    },
384 #endif
385 #if defined(O_NOATIME)
386   { TARGET_O_NOATIME,   TARGET_O_NOATIME,   O_NOATIME,   O_NOATIME    },
387 #endif
388 #if defined(O_CLOEXEC)
389   { TARGET_O_CLOEXEC,   TARGET_O_CLOEXEC,   O_CLOEXEC,   O_CLOEXEC    },
390 #endif
391 #if defined(O_PATH)
392   { TARGET_O_PATH,      TARGET_O_PATH,      O_PATH,      O_PATH       },
393 #endif
394 #if defined(O_TMPFILE)
395   { TARGET_O_TMPFILE,   TARGET_O_TMPFILE,   O_TMPFILE,   O_TMPFILE    },
396 #endif
397   /* Don't terminate the list prematurely on 64-bit host+guest.  */
398 #if TARGET_O_LARGEFILE != 0 || O_LARGEFILE != 0
399   { TARGET_O_LARGEFILE, TARGET_O_LARGEFILE, O_LARGEFILE, O_LARGEFILE, },
400 #endif
401   { 0, 0, 0, 0 }
402 };
403 
404 _syscall2(int, sys_getcwd1, char *, buf, size_t, size)
405 
406 #if defined(TARGET_NR_utimensat) || defined(TARGET_NR_utimensat_time64)
407 #if defined(__NR_utimensat)
408 #define __NR_sys_utimensat __NR_utimensat
409 _syscall4(int,sys_utimensat,int,dirfd,const char *,pathname,
410           const struct timespec *,tsp,int,flags)
411 #else
412 static int sys_utimensat(int dirfd, const char *pathname,
413                          const struct timespec times[2], int flags)
414 {
415     errno = ENOSYS;
416     return -1;
417 }
418 #endif
419 #endif /* TARGET_NR_utimensat */
420 
421 #ifdef TARGET_NR_renameat2
422 #if defined(__NR_renameat2)
423 #define __NR_sys_renameat2 __NR_renameat2
424 _syscall5(int, sys_renameat2, int, oldfd, const char *, old, int, newfd,
425           const char *, new, unsigned int, flags)
426 #else
427 static int sys_renameat2(int oldfd, const char *old,
428                          int newfd, const char *new, int flags)
429 {
430     if (flags == 0) {
431         return renameat(oldfd, old, newfd, new);
432     }
433     errno = ENOSYS;
434     return -1;
435 }
436 #endif
437 #endif /* TARGET_NR_renameat2 */
438 
439 #ifdef CONFIG_INOTIFY
440 #include <sys/inotify.h>
441 
442 #if defined(TARGET_NR_inotify_init) && defined(__NR_inotify_init)
443 static int sys_inotify_init(void)
444 {
445   return (inotify_init());
446 }
447 #endif
448 #if defined(TARGET_NR_inotify_add_watch) && defined(__NR_inotify_add_watch)
449 static int sys_inotify_add_watch(int fd,const char *pathname, int32_t mask)
450 {
451   return (inotify_add_watch(fd, pathname, mask));
452 }
453 #endif
454 #if defined(TARGET_NR_inotify_rm_watch) && defined(__NR_inotify_rm_watch)
455 static int sys_inotify_rm_watch(int fd, int32_t wd)
456 {
457   return (inotify_rm_watch(fd, wd));
458 }
459 #endif
460 #ifdef CONFIG_INOTIFY1
461 #if defined(TARGET_NR_inotify_init1) && defined(__NR_inotify_init1)
462 static int sys_inotify_init1(int flags)
463 {
464   return (inotify_init1(flags));
465 }
466 #endif
467 #endif
468 #else
469 /* Userspace can usually survive runtime without inotify */
470 #undef TARGET_NR_inotify_init
471 #undef TARGET_NR_inotify_init1
472 #undef TARGET_NR_inotify_add_watch
473 #undef TARGET_NR_inotify_rm_watch
474 #endif /* CONFIG_INOTIFY  */
475 
476 #if defined(TARGET_NR_prlimit64)
477 #ifndef __NR_prlimit64
478 # define __NR_prlimit64 -1
479 #endif
480 #define __NR_sys_prlimit64 __NR_prlimit64
481 /* The glibc rlimit structure may not be that used by the underlying syscall */
482 struct host_rlimit64 {
483     uint64_t rlim_cur;
484     uint64_t rlim_max;
485 };
486 _syscall4(int, sys_prlimit64, pid_t, pid, int, resource,
487           const struct host_rlimit64 *, new_limit,
488           struct host_rlimit64 *, old_limit)
489 #endif
490 
491 
492 #if defined(TARGET_NR_timer_create)
493 /* Maximum of 32 active POSIX timers allowed at any one time. */
494 static timer_t g_posix_timers[32] = { 0, } ;
495 
496 static inline int next_free_host_timer(void)
497 {
498     int k ;
499     /* FIXME: Does finding the next free slot require a lock? */
500     for (k = 0; k < ARRAY_SIZE(g_posix_timers); k++) {
501         if (g_posix_timers[k] == 0) {
502             g_posix_timers[k] = (timer_t) 1;
503             return k;
504         }
505     }
506     return -1;
507 }
508 #endif
509 
510 #define ERRNO_TABLE_SIZE 1200
511 
512 /* target_to_host_errno_table[] is initialized from
513  * host_to_target_errno_table[] in syscall_init(). */
514 static uint16_t target_to_host_errno_table[ERRNO_TABLE_SIZE] = {
515 };
516 
517 /*
518  * This list is the union of errno values overridden in asm-<arch>/errno.h
519  * minus the errnos that are not actually generic to all archs.
520  */
521 static uint16_t host_to_target_errno_table[ERRNO_TABLE_SIZE] = {
522     [EAGAIN]		= TARGET_EAGAIN,
523     [EIDRM]		= TARGET_EIDRM,
524     [ECHRNG]		= TARGET_ECHRNG,
525     [EL2NSYNC]		= TARGET_EL2NSYNC,
526     [EL3HLT]		= TARGET_EL3HLT,
527     [EL3RST]		= TARGET_EL3RST,
528     [ELNRNG]		= TARGET_ELNRNG,
529     [EUNATCH]		= TARGET_EUNATCH,
530     [ENOCSI]		= TARGET_ENOCSI,
531     [EL2HLT]		= TARGET_EL2HLT,
532     [EDEADLK]		= TARGET_EDEADLK,
533     [ENOLCK]		= TARGET_ENOLCK,
534     [EBADE]		= TARGET_EBADE,
535     [EBADR]		= TARGET_EBADR,
536     [EXFULL]		= TARGET_EXFULL,
537     [ENOANO]		= TARGET_ENOANO,
538     [EBADRQC]		= TARGET_EBADRQC,
539     [EBADSLT]		= TARGET_EBADSLT,
540     [EBFONT]		= TARGET_EBFONT,
541     [ENOSTR]		= TARGET_ENOSTR,
542     [ENODATA]		= TARGET_ENODATA,
543     [ETIME]		= TARGET_ETIME,
544     [ENOSR]		= TARGET_ENOSR,
545     [ENONET]		= TARGET_ENONET,
546     [ENOPKG]		= TARGET_ENOPKG,
547     [EREMOTE]		= TARGET_EREMOTE,
548     [ENOLINK]		= TARGET_ENOLINK,
549     [EADV]		= TARGET_EADV,
550     [ESRMNT]		= TARGET_ESRMNT,
551     [ECOMM]		= TARGET_ECOMM,
552     [EPROTO]		= TARGET_EPROTO,
553     [EDOTDOT]		= TARGET_EDOTDOT,
554     [EMULTIHOP]		= TARGET_EMULTIHOP,
555     [EBADMSG]		= TARGET_EBADMSG,
556     [ENAMETOOLONG]	= TARGET_ENAMETOOLONG,
557     [EOVERFLOW]		= TARGET_EOVERFLOW,
558     [ENOTUNIQ]		= TARGET_ENOTUNIQ,
559     [EBADFD]		= TARGET_EBADFD,
560     [EREMCHG]		= TARGET_EREMCHG,
561     [ELIBACC]		= TARGET_ELIBACC,
562     [ELIBBAD]		= TARGET_ELIBBAD,
563     [ELIBSCN]		= TARGET_ELIBSCN,
564     [ELIBMAX]		= TARGET_ELIBMAX,
565     [ELIBEXEC]		= TARGET_ELIBEXEC,
566     [EILSEQ]		= TARGET_EILSEQ,
567     [ENOSYS]		= TARGET_ENOSYS,
568     [ELOOP]		= TARGET_ELOOP,
569     [ERESTART]		= TARGET_ERESTART,
570     [ESTRPIPE]		= TARGET_ESTRPIPE,
571     [ENOTEMPTY]		= TARGET_ENOTEMPTY,
572     [EUSERS]		= TARGET_EUSERS,
573     [ENOTSOCK]		= TARGET_ENOTSOCK,
574     [EDESTADDRREQ]	= TARGET_EDESTADDRREQ,
575     [EMSGSIZE]		= TARGET_EMSGSIZE,
576     [EPROTOTYPE]	= TARGET_EPROTOTYPE,
577     [ENOPROTOOPT]	= TARGET_ENOPROTOOPT,
578     [EPROTONOSUPPORT]	= TARGET_EPROTONOSUPPORT,
579     [ESOCKTNOSUPPORT]	= TARGET_ESOCKTNOSUPPORT,
580     [EOPNOTSUPP]	= TARGET_EOPNOTSUPP,
581     [EPFNOSUPPORT]	= TARGET_EPFNOSUPPORT,
582     [EAFNOSUPPORT]	= TARGET_EAFNOSUPPORT,
583     [EADDRINUSE]	= TARGET_EADDRINUSE,
584     [EADDRNOTAVAIL]	= TARGET_EADDRNOTAVAIL,
585     [ENETDOWN]		= TARGET_ENETDOWN,
586     [ENETUNREACH]	= TARGET_ENETUNREACH,
587     [ENETRESET]		= TARGET_ENETRESET,
588     [ECONNABORTED]	= TARGET_ECONNABORTED,
589     [ECONNRESET]	= TARGET_ECONNRESET,
590     [ENOBUFS]		= TARGET_ENOBUFS,
591     [EISCONN]		= TARGET_EISCONN,
592     [ENOTCONN]		= TARGET_ENOTCONN,
593     [EUCLEAN]		= TARGET_EUCLEAN,
594     [ENOTNAM]		= TARGET_ENOTNAM,
595     [ENAVAIL]		= TARGET_ENAVAIL,
596     [EISNAM]		= TARGET_EISNAM,
597     [EREMOTEIO]		= TARGET_EREMOTEIO,
598     [EDQUOT]            = TARGET_EDQUOT,
599     [ESHUTDOWN]		= TARGET_ESHUTDOWN,
600     [ETOOMANYREFS]	= TARGET_ETOOMANYREFS,
601     [ETIMEDOUT]		= TARGET_ETIMEDOUT,
602     [ECONNREFUSED]	= TARGET_ECONNREFUSED,
603     [EHOSTDOWN]		= TARGET_EHOSTDOWN,
604     [EHOSTUNREACH]	= TARGET_EHOSTUNREACH,
605     [EALREADY]		= TARGET_EALREADY,
606     [EINPROGRESS]	= TARGET_EINPROGRESS,
607     [ESTALE]		= TARGET_ESTALE,
608     [ECANCELED]		= TARGET_ECANCELED,
609     [ENOMEDIUM]		= TARGET_ENOMEDIUM,
610     [EMEDIUMTYPE]	= TARGET_EMEDIUMTYPE,
611 #ifdef ENOKEY
612     [ENOKEY]		= TARGET_ENOKEY,
613 #endif
614 #ifdef EKEYEXPIRED
615     [EKEYEXPIRED]	= TARGET_EKEYEXPIRED,
616 #endif
617 #ifdef EKEYREVOKED
618     [EKEYREVOKED]	= TARGET_EKEYREVOKED,
619 #endif
620 #ifdef EKEYREJECTED
621     [EKEYREJECTED]	= TARGET_EKEYREJECTED,
622 #endif
623 #ifdef EOWNERDEAD
624     [EOWNERDEAD]	= TARGET_EOWNERDEAD,
625 #endif
626 #ifdef ENOTRECOVERABLE
627     [ENOTRECOVERABLE]	= TARGET_ENOTRECOVERABLE,
628 #endif
629 #ifdef ENOMSG
630     [ENOMSG]            = TARGET_ENOMSG,
631 #endif
632 #ifdef ERKFILL
633     [ERFKILL]           = TARGET_ERFKILL,
634 #endif
635 #ifdef EHWPOISON
636     [EHWPOISON]         = TARGET_EHWPOISON,
637 #endif
638 };
639 
640 static inline int host_to_target_errno(int err)
641 {
642     if (err >= 0 && err < ERRNO_TABLE_SIZE &&
643         host_to_target_errno_table[err]) {
644         return host_to_target_errno_table[err];
645     }
646     return err;
647 }
648 
649 static inline int target_to_host_errno(int err)
650 {
651     if (err >= 0 && err < ERRNO_TABLE_SIZE &&
652         target_to_host_errno_table[err]) {
653         return target_to_host_errno_table[err];
654     }
655     return err;
656 }
657 
658 static inline abi_long get_errno(abi_long ret)
659 {
660     if (ret == -1)
661         return -host_to_target_errno(errno);
662     else
663         return ret;
664 }
665 
666 const char *target_strerror(int err)
667 {
668     if (err == TARGET_ERESTARTSYS) {
669         return "To be restarted";
670     }
671     if (err == TARGET_QEMU_ESIGRETURN) {
672         return "Successful exit from sigreturn";
673     }
674 
675     if ((err >= ERRNO_TABLE_SIZE) || (err < 0)) {
676         return NULL;
677     }
678     return strerror(target_to_host_errno(err));
679 }
680 
681 #define safe_syscall0(type, name) \
682 static type safe_##name(void) \
683 { \
684     return safe_syscall(__NR_##name); \
685 }
686 
687 #define safe_syscall1(type, name, type1, arg1) \
688 static type safe_##name(type1 arg1) \
689 { \
690     return safe_syscall(__NR_##name, arg1); \
691 }
692 
693 #define safe_syscall2(type, name, type1, arg1, type2, arg2) \
694 static type safe_##name(type1 arg1, type2 arg2) \
695 { \
696     return safe_syscall(__NR_##name, arg1, arg2); \
697 }
698 
699 #define safe_syscall3(type, name, type1, arg1, type2, arg2, type3, arg3) \
700 static type safe_##name(type1 arg1, type2 arg2, type3 arg3) \
701 { \
702     return safe_syscall(__NR_##name, arg1, arg2, arg3); \
703 }
704 
705 #define safe_syscall4(type, name, type1, arg1, type2, arg2, type3, arg3, \
706     type4, arg4) \
707 static type safe_##name(type1 arg1, type2 arg2, type3 arg3, type4 arg4) \
708 { \
709     return safe_syscall(__NR_##name, arg1, arg2, arg3, arg4); \
710 }
711 
712 #define safe_syscall5(type, name, type1, arg1, type2, arg2, type3, arg3, \
713     type4, arg4, type5, arg5) \
714 static type safe_##name(type1 arg1, type2 arg2, type3 arg3, type4 arg4, \
715     type5 arg5) \
716 { \
717     return safe_syscall(__NR_##name, arg1, arg2, arg3, arg4, arg5); \
718 }
719 
720 #define safe_syscall6(type, name, type1, arg1, type2, arg2, type3, arg3, \
721     type4, arg4, type5, arg5, type6, arg6) \
722 static type safe_##name(type1 arg1, type2 arg2, type3 arg3, type4 arg4, \
723     type5 arg5, type6 arg6) \
724 { \
725     return safe_syscall(__NR_##name, arg1, arg2, arg3, arg4, arg5, arg6); \
726 }
727 
728 safe_syscall3(ssize_t, read, int, fd, void *, buff, size_t, count)
729 safe_syscall3(ssize_t, write, int, fd, const void *, buff, size_t, count)
730 safe_syscall4(int, openat, int, dirfd, const char *, pathname, \
731               int, flags, mode_t, mode)
732 #if defined(TARGET_NR_wait4) || defined(TARGET_NR_waitpid)
733 safe_syscall4(pid_t, wait4, pid_t, pid, int *, status, int, options, \
734               struct rusage *, rusage)
735 #endif
736 safe_syscall5(int, waitid, idtype_t, idtype, id_t, id, siginfo_t *, infop, \
737               int, options, struct rusage *, rusage)
738 safe_syscall3(int, execve, const char *, filename, char **, argv, char **, envp)
739 #if defined(TARGET_NR_select) || defined(TARGET_NR__newselect) || \
740     defined(TARGET_NR_pselect6) || defined(TARGET_NR_pselect6_time64)
741 safe_syscall6(int, pselect6, int, nfds, fd_set *, readfds, fd_set *, writefds, \
742               fd_set *, exceptfds, struct timespec *, timeout, void *, sig)
743 #endif
744 #if defined(TARGET_NR_ppoll) || defined(TARGET_NR_ppoll_time64)
745 safe_syscall5(int, ppoll, struct pollfd *, ufds, unsigned int, nfds,
746               struct timespec *, tsp, const sigset_t *, sigmask,
747               size_t, sigsetsize)
748 #endif
749 safe_syscall6(int, epoll_pwait, int, epfd, struct epoll_event *, events,
750               int, maxevents, int, timeout, const sigset_t *, sigmask,
751               size_t, sigsetsize)
752 #if defined(__NR_futex)
753 safe_syscall6(int,futex,int *,uaddr,int,op,int,val, \
754               const struct timespec *,timeout,int *,uaddr2,int,val3)
755 #endif
756 #if defined(__NR_futex_time64)
757 safe_syscall6(int,futex_time64,int *,uaddr,int,op,int,val, \
758               const struct timespec *,timeout,int *,uaddr2,int,val3)
759 #endif
760 safe_syscall2(int, rt_sigsuspend, sigset_t *, newset, size_t, sigsetsize)
761 safe_syscall2(int, kill, pid_t, pid, int, sig)
762 safe_syscall2(int, tkill, int, tid, int, sig)
763 safe_syscall3(int, tgkill, int, tgid, int, pid, int, sig)
764 safe_syscall3(ssize_t, readv, int, fd, const struct iovec *, iov, int, iovcnt)
765 safe_syscall3(ssize_t, writev, int, fd, const struct iovec *, iov, int, iovcnt)
766 safe_syscall5(ssize_t, preadv, int, fd, const struct iovec *, iov, int, iovcnt,
767               unsigned long, pos_l, unsigned long, pos_h)
768 safe_syscall5(ssize_t, pwritev, int, fd, const struct iovec *, iov, int, iovcnt,
769               unsigned long, pos_l, unsigned long, pos_h)
770 safe_syscall3(int, connect, int, fd, const struct sockaddr *, addr,
771               socklen_t, addrlen)
772 safe_syscall6(ssize_t, sendto, int, fd, const void *, buf, size_t, len,
773               int, flags, const struct sockaddr *, addr, socklen_t, addrlen)
774 safe_syscall6(ssize_t, recvfrom, int, fd, void *, buf, size_t, len,
775               int, flags, struct sockaddr *, addr, socklen_t *, addrlen)
776 safe_syscall3(ssize_t, sendmsg, int, fd, const struct msghdr *, msg, int, flags)
777 safe_syscall3(ssize_t, recvmsg, int, fd, struct msghdr *, msg, int, flags)
778 safe_syscall2(int, flock, int, fd, int, operation)
779 #if defined(TARGET_NR_rt_sigtimedwait) || defined(TARGET_NR_rt_sigtimedwait_time64)
780 safe_syscall4(int, rt_sigtimedwait, const sigset_t *, these, siginfo_t *, uinfo,
781               const struct timespec *, uts, size_t, sigsetsize)
782 #endif
783 safe_syscall4(int, accept4, int, fd, struct sockaddr *, addr, socklen_t *, len,
784               int, flags)
785 #if defined(TARGET_NR_nanosleep)
786 safe_syscall2(int, nanosleep, const struct timespec *, req,
787               struct timespec *, rem)
788 #endif
789 #if defined(TARGET_NR_clock_nanosleep) || \
790     defined(TARGET_NR_clock_nanosleep_time64)
791 safe_syscall4(int, clock_nanosleep, const clockid_t, clock, int, flags,
792               const struct timespec *, req, struct timespec *, rem)
793 #endif
794 #ifdef __NR_ipc
795 #ifdef __s390x__
796 safe_syscall5(int, ipc, int, call, long, first, long, second, long, third,
797               void *, ptr)
798 #else
799 safe_syscall6(int, ipc, int, call, long, first, long, second, long, third,
800               void *, ptr, long, fifth)
801 #endif
802 #endif
803 #ifdef __NR_msgsnd
804 safe_syscall4(int, msgsnd, int, msgid, const void *, msgp, size_t, sz,
805               int, flags)
806 #endif
807 #ifdef __NR_msgrcv
808 safe_syscall5(int, msgrcv, int, msgid, void *, msgp, size_t, sz,
809               long, msgtype, int, flags)
810 #endif
811 #ifdef __NR_semtimedop
812 safe_syscall4(int, semtimedop, int, semid, struct sembuf *, tsops,
813               unsigned, nsops, const struct timespec *, timeout)
814 #endif
815 #if defined(TARGET_NR_mq_timedsend) || \
816     defined(TARGET_NR_mq_timedsend_time64)
817 safe_syscall5(int, mq_timedsend, int, mqdes, const char *, msg_ptr,
818               size_t, len, unsigned, prio, const struct timespec *, timeout)
819 #endif
820 #if defined(TARGET_NR_mq_timedreceive) || \
821     defined(TARGET_NR_mq_timedreceive_time64)
822 safe_syscall5(int, mq_timedreceive, int, mqdes, char *, msg_ptr,
823               size_t, len, unsigned *, prio, const struct timespec *, timeout)
824 #endif
825 #if defined(TARGET_NR_copy_file_range) && defined(__NR_copy_file_range)
826 safe_syscall6(ssize_t, copy_file_range, int, infd, loff_t *, pinoff,
827               int, outfd, loff_t *, poutoff, size_t, length,
828               unsigned int, flags)
829 #endif
830 
831 /* We do ioctl like this rather than via safe_syscall3 to preserve the
832  * "third argument might be integer or pointer or not present" behaviour of
833  * the libc function.
834  */
835 #define safe_ioctl(...) safe_syscall(__NR_ioctl, __VA_ARGS__)
836 /* Similarly for fcntl. Note that callers must always:
837  *  pass the F_GETLK64 etc constants rather than the unsuffixed F_GETLK
838  *  use the flock64 struct rather than unsuffixed flock
839  * This will then work and use a 64-bit offset for both 32-bit and 64-bit hosts.
840  */
841 #ifdef __NR_fcntl64
842 #define safe_fcntl(...) safe_syscall(__NR_fcntl64, __VA_ARGS__)
843 #else
844 #define safe_fcntl(...) safe_syscall(__NR_fcntl, __VA_ARGS__)
845 #endif
846 
847 static inline int host_to_target_sock_type(int host_type)
848 {
849     int target_type;
850 
851     switch (host_type & 0xf /* SOCK_TYPE_MASK */) {
852     case SOCK_DGRAM:
853         target_type = TARGET_SOCK_DGRAM;
854         break;
855     case SOCK_STREAM:
856         target_type = TARGET_SOCK_STREAM;
857         break;
858     default:
859         target_type = host_type & 0xf /* SOCK_TYPE_MASK */;
860         break;
861     }
862 
863 #if defined(SOCK_CLOEXEC)
864     if (host_type & SOCK_CLOEXEC) {
865         target_type |= TARGET_SOCK_CLOEXEC;
866     }
867 #endif
868 
869 #if defined(SOCK_NONBLOCK)
870     if (host_type & SOCK_NONBLOCK) {
871         target_type |= TARGET_SOCK_NONBLOCK;
872     }
873 #endif
874 
875     return target_type;
876 }
877 
878 static abi_ulong target_brk;
879 static abi_ulong target_original_brk;
880 static abi_ulong brk_page;
881 
882 void target_set_brk(abi_ulong new_brk)
883 {
884     target_original_brk = target_brk = HOST_PAGE_ALIGN(new_brk);
885     brk_page = HOST_PAGE_ALIGN(target_brk);
886 }
887 
888 //#define DEBUGF_BRK(message, args...) do { fprintf(stderr, (message), ## args); } while (0)
889 #define DEBUGF_BRK(message, args...)
890 
891 /* do_brk() must return target values and target errnos. */
892 abi_long do_brk(abi_ulong new_brk)
893 {
894     abi_long mapped_addr;
895     abi_ulong new_alloc_size;
896 
897     DEBUGF_BRK("do_brk(" TARGET_ABI_FMT_lx ") -> ", new_brk);
898 
899     if (!new_brk) {
900         DEBUGF_BRK(TARGET_ABI_FMT_lx " (!new_brk)\n", target_brk);
901         return target_brk;
902     }
903     if (new_brk < target_original_brk) {
904         DEBUGF_BRK(TARGET_ABI_FMT_lx " (new_brk < target_original_brk)\n",
905                    target_brk);
906         return target_brk;
907     }
908 
909     /* If the new brk is less than the highest page reserved to the
910      * target heap allocation, set it and we're almost done...  */
911     if (new_brk <= brk_page) {
912         /* Heap contents are initialized to zero, as for anonymous
913          * mapped pages.  */
914         if (new_brk > target_brk) {
915             memset(g2h(target_brk), 0, new_brk - target_brk);
916         }
917 	target_brk = new_brk;
918         DEBUGF_BRK(TARGET_ABI_FMT_lx " (new_brk <= brk_page)\n", target_brk);
919 	return target_brk;
920     }
921 
922     /* We need to allocate more memory after the brk... Note that
923      * we don't use MAP_FIXED because that will map over the top of
924      * any existing mapping (like the one with the host libc or qemu
925      * itself); instead we treat "mapped but at wrong address" as
926      * a failure and unmap again.
927      */
928     new_alloc_size = HOST_PAGE_ALIGN(new_brk - brk_page);
929     mapped_addr = get_errno(target_mmap(brk_page, new_alloc_size,
930                                         PROT_READ|PROT_WRITE,
931                                         MAP_ANON|MAP_PRIVATE, 0, 0));
932 
933     if (mapped_addr == brk_page) {
934         /* Heap contents are initialized to zero, as for anonymous
935          * mapped pages.  Technically the new pages are already
936          * initialized to zero since they *are* anonymous mapped
937          * pages, however we have to take care with the contents that
938          * come from the remaining part of the previous page: it may
939          * contains garbage data due to a previous heap usage (grown
940          * then shrunken).  */
941         memset(g2h(target_brk), 0, brk_page - target_brk);
942 
943         target_brk = new_brk;
944         brk_page = HOST_PAGE_ALIGN(target_brk);
945         DEBUGF_BRK(TARGET_ABI_FMT_lx " (mapped_addr == brk_page)\n",
946             target_brk);
947         return target_brk;
948     } else if (mapped_addr != -1) {
949         /* Mapped but at wrong address, meaning there wasn't actually
950          * enough space for this brk.
951          */
952         target_munmap(mapped_addr, new_alloc_size);
953         mapped_addr = -1;
954         DEBUGF_BRK(TARGET_ABI_FMT_lx " (mapped_addr != -1)\n", target_brk);
955     }
956     else {
957         DEBUGF_BRK(TARGET_ABI_FMT_lx " (otherwise)\n", target_brk);
958     }
959 
960 #if defined(TARGET_ALPHA)
961     /* We (partially) emulate OSF/1 on Alpha, which requires we
962        return a proper errno, not an unchanged brk value.  */
963     return -TARGET_ENOMEM;
964 #endif
965     /* For everything else, return the previous break. */
966     return target_brk;
967 }
968 
969 #if defined(TARGET_NR_select) || defined(TARGET_NR__newselect) || \
970     defined(TARGET_NR_pselect6) || defined(TARGET_NR_pselect6_time64)
971 static inline abi_long copy_from_user_fdset(fd_set *fds,
972                                             abi_ulong target_fds_addr,
973                                             int n)
974 {
975     int i, nw, j, k;
976     abi_ulong b, *target_fds;
977 
978     nw = DIV_ROUND_UP(n, TARGET_ABI_BITS);
979     if (!(target_fds = lock_user(VERIFY_READ,
980                                  target_fds_addr,
981                                  sizeof(abi_ulong) * nw,
982                                  1)))
983         return -TARGET_EFAULT;
984 
985     FD_ZERO(fds);
986     k = 0;
987     for (i = 0; i < nw; i++) {
988         /* grab the abi_ulong */
989         __get_user(b, &target_fds[i]);
990         for (j = 0; j < TARGET_ABI_BITS; j++) {
991             /* check the bit inside the abi_ulong */
992             if ((b >> j) & 1)
993                 FD_SET(k, fds);
994             k++;
995         }
996     }
997 
998     unlock_user(target_fds, target_fds_addr, 0);
999 
1000     return 0;
1001 }
1002 
1003 static inline abi_ulong copy_from_user_fdset_ptr(fd_set *fds, fd_set **fds_ptr,
1004                                                  abi_ulong target_fds_addr,
1005                                                  int n)
1006 {
1007     if (target_fds_addr) {
1008         if (copy_from_user_fdset(fds, target_fds_addr, n))
1009             return -TARGET_EFAULT;
1010         *fds_ptr = fds;
1011     } else {
1012         *fds_ptr = NULL;
1013     }
1014     return 0;
1015 }
1016 
1017 static inline abi_long copy_to_user_fdset(abi_ulong target_fds_addr,
1018                                           const fd_set *fds,
1019                                           int n)
1020 {
1021     int i, nw, j, k;
1022     abi_long v;
1023     abi_ulong *target_fds;
1024 
1025     nw = DIV_ROUND_UP(n, TARGET_ABI_BITS);
1026     if (!(target_fds = lock_user(VERIFY_WRITE,
1027                                  target_fds_addr,
1028                                  sizeof(abi_ulong) * nw,
1029                                  0)))
1030         return -TARGET_EFAULT;
1031 
1032     k = 0;
1033     for (i = 0; i < nw; i++) {
1034         v = 0;
1035         for (j = 0; j < TARGET_ABI_BITS; j++) {
1036             v |= ((abi_ulong)(FD_ISSET(k, fds) != 0) << j);
1037             k++;
1038         }
1039         __put_user(v, &target_fds[i]);
1040     }
1041 
1042     unlock_user(target_fds, target_fds_addr, sizeof(abi_ulong) * nw);
1043 
1044     return 0;
1045 }
1046 #endif
1047 
1048 #if defined(__alpha__)
1049 #define HOST_HZ 1024
1050 #else
1051 #define HOST_HZ 100
1052 #endif
1053 
1054 static inline abi_long host_to_target_clock_t(long ticks)
1055 {
1056 #if HOST_HZ == TARGET_HZ
1057     return ticks;
1058 #else
1059     return ((int64_t)ticks * TARGET_HZ) / HOST_HZ;
1060 #endif
1061 }
1062 
1063 static inline abi_long host_to_target_rusage(abi_ulong target_addr,
1064                                              const struct rusage *rusage)
1065 {
1066     struct target_rusage *target_rusage;
1067 
1068     if (!lock_user_struct(VERIFY_WRITE, target_rusage, target_addr, 0))
1069         return -TARGET_EFAULT;
1070     target_rusage->ru_utime.tv_sec = tswapal(rusage->ru_utime.tv_sec);
1071     target_rusage->ru_utime.tv_usec = tswapal(rusage->ru_utime.tv_usec);
1072     target_rusage->ru_stime.tv_sec = tswapal(rusage->ru_stime.tv_sec);
1073     target_rusage->ru_stime.tv_usec = tswapal(rusage->ru_stime.tv_usec);
1074     target_rusage->ru_maxrss = tswapal(rusage->ru_maxrss);
1075     target_rusage->ru_ixrss = tswapal(rusage->ru_ixrss);
1076     target_rusage->ru_idrss = tswapal(rusage->ru_idrss);
1077     target_rusage->ru_isrss = tswapal(rusage->ru_isrss);
1078     target_rusage->ru_minflt = tswapal(rusage->ru_minflt);
1079     target_rusage->ru_majflt = tswapal(rusage->ru_majflt);
1080     target_rusage->ru_nswap = tswapal(rusage->ru_nswap);
1081     target_rusage->ru_inblock = tswapal(rusage->ru_inblock);
1082     target_rusage->ru_oublock = tswapal(rusage->ru_oublock);
1083     target_rusage->ru_msgsnd = tswapal(rusage->ru_msgsnd);
1084     target_rusage->ru_msgrcv = tswapal(rusage->ru_msgrcv);
1085     target_rusage->ru_nsignals = tswapal(rusage->ru_nsignals);
1086     target_rusage->ru_nvcsw = tswapal(rusage->ru_nvcsw);
1087     target_rusage->ru_nivcsw = tswapal(rusage->ru_nivcsw);
1088     unlock_user_struct(target_rusage, target_addr, 1);
1089 
1090     return 0;
1091 }
1092 
1093 #ifdef TARGET_NR_setrlimit
1094 static inline rlim_t target_to_host_rlim(abi_ulong target_rlim)
1095 {
1096     abi_ulong target_rlim_swap;
1097     rlim_t result;
1098 
1099     target_rlim_swap = tswapal(target_rlim);
1100     if (target_rlim_swap == TARGET_RLIM_INFINITY)
1101         return RLIM_INFINITY;
1102 
1103     result = target_rlim_swap;
1104     if (target_rlim_swap != (rlim_t)result)
1105         return RLIM_INFINITY;
1106 
1107     return result;
1108 }
1109 #endif
1110 
1111 #if defined(TARGET_NR_getrlimit) || defined(TARGET_NR_ugetrlimit)
1112 static inline abi_ulong host_to_target_rlim(rlim_t rlim)
1113 {
1114     abi_ulong target_rlim_swap;
1115     abi_ulong result;
1116 
1117     if (rlim == RLIM_INFINITY || rlim != (abi_long)rlim)
1118         target_rlim_swap = TARGET_RLIM_INFINITY;
1119     else
1120         target_rlim_swap = rlim;
1121     result = tswapal(target_rlim_swap);
1122 
1123     return result;
1124 }
1125 #endif
1126 
1127 static inline int target_to_host_resource(int code)
1128 {
1129     switch (code) {
1130     case TARGET_RLIMIT_AS:
1131         return RLIMIT_AS;
1132     case TARGET_RLIMIT_CORE:
1133         return RLIMIT_CORE;
1134     case TARGET_RLIMIT_CPU:
1135         return RLIMIT_CPU;
1136     case TARGET_RLIMIT_DATA:
1137         return RLIMIT_DATA;
1138     case TARGET_RLIMIT_FSIZE:
1139         return RLIMIT_FSIZE;
1140     case TARGET_RLIMIT_LOCKS:
1141         return RLIMIT_LOCKS;
1142     case TARGET_RLIMIT_MEMLOCK:
1143         return RLIMIT_MEMLOCK;
1144     case TARGET_RLIMIT_MSGQUEUE:
1145         return RLIMIT_MSGQUEUE;
1146     case TARGET_RLIMIT_NICE:
1147         return RLIMIT_NICE;
1148     case TARGET_RLIMIT_NOFILE:
1149         return RLIMIT_NOFILE;
1150     case TARGET_RLIMIT_NPROC:
1151         return RLIMIT_NPROC;
1152     case TARGET_RLIMIT_RSS:
1153         return RLIMIT_RSS;
1154     case TARGET_RLIMIT_RTPRIO:
1155         return RLIMIT_RTPRIO;
1156     case TARGET_RLIMIT_SIGPENDING:
1157         return RLIMIT_SIGPENDING;
1158     case TARGET_RLIMIT_STACK:
1159         return RLIMIT_STACK;
1160     default:
1161         return code;
1162     }
1163 }
1164 
1165 static inline abi_long copy_from_user_timeval(struct timeval *tv,
1166                                               abi_ulong target_tv_addr)
1167 {
1168     struct target_timeval *target_tv;
1169 
1170     if (!lock_user_struct(VERIFY_READ, target_tv, target_tv_addr, 1)) {
1171         return -TARGET_EFAULT;
1172     }
1173 
1174     __get_user(tv->tv_sec, &target_tv->tv_sec);
1175     __get_user(tv->tv_usec, &target_tv->tv_usec);
1176 
1177     unlock_user_struct(target_tv, target_tv_addr, 0);
1178 
1179     return 0;
1180 }
1181 
1182 static inline abi_long copy_to_user_timeval(abi_ulong target_tv_addr,
1183                                             const struct timeval *tv)
1184 {
1185     struct target_timeval *target_tv;
1186 
1187     if (!lock_user_struct(VERIFY_WRITE, target_tv, target_tv_addr, 0)) {
1188         return -TARGET_EFAULT;
1189     }
1190 
1191     __put_user(tv->tv_sec, &target_tv->tv_sec);
1192     __put_user(tv->tv_usec, &target_tv->tv_usec);
1193 
1194     unlock_user_struct(target_tv, target_tv_addr, 1);
1195 
1196     return 0;
1197 }
1198 
1199 #if defined(TARGET_NR_clock_adjtime64) && defined(CONFIG_CLOCK_ADJTIME)
1200 static inline abi_long copy_from_user_timeval64(struct timeval *tv,
1201                                                 abi_ulong target_tv_addr)
1202 {
1203     struct target__kernel_sock_timeval *target_tv;
1204 
1205     if (!lock_user_struct(VERIFY_READ, target_tv, target_tv_addr, 1)) {
1206         return -TARGET_EFAULT;
1207     }
1208 
1209     __get_user(tv->tv_sec, &target_tv->tv_sec);
1210     __get_user(tv->tv_usec, &target_tv->tv_usec);
1211 
1212     unlock_user_struct(target_tv, target_tv_addr, 0);
1213 
1214     return 0;
1215 }
1216 #endif
1217 
1218 static inline abi_long copy_to_user_timeval64(abi_ulong target_tv_addr,
1219                                               const struct timeval *tv)
1220 {
1221     struct target__kernel_sock_timeval *target_tv;
1222 
1223     if (!lock_user_struct(VERIFY_WRITE, target_tv, target_tv_addr, 0)) {
1224         return -TARGET_EFAULT;
1225     }
1226 
1227     __put_user(tv->tv_sec, &target_tv->tv_sec);
1228     __put_user(tv->tv_usec, &target_tv->tv_usec);
1229 
1230     unlock_user_struct(target_tv, target_tv_addr, 1);
1231 
1232     return 0;
1233 }
1234 
1235 #if defined(TARGET_NR_futex) || \
1236     defined(TARGET_NR_rt_sigtimedwait) || \
1237     defined(TARGET_NR_pselect6) || defined(TARGET_NR_pselect6) || \
1238     defined(TARGET_NR_nanosleep) || defined(TARGET_NR_clock_settime) || \
1239     defined(TARGET_NR_utimensat) || defined(TARGET_NR_mq_timedsend) || \
1240     defined(TARGET_NR_mq_timedreceive) || defined(TARGET_NR_ipc) || \
1241     defined(TARGET_NR_semop) || defined(TARGET_NR_semtimedop) || \
1242     defined(TARGET_NR_timer_settime) || \
1243     (defined(TARGET_NR_timerfd_settime) && defined(CONFIG_TIMERFD))
1244 static inline abi_long target_to_host_timespec(struct timespec *host_ts,
1245                                                abi_ulong target_addr)
1246 {
1247     struct target_timespec *target_ts;
1248 
1249     if (!lock_user_struct(VERIFY_READ, target_ts, target_addr, 1)) {
1250         return -TARGET_EFAULT;
1251     }
1252     __get_user(host_ts->tv_sec, &target_ts->tv_sec);
1253     __get_user(host_ts->tv_nsec, &target_ts->tv_nsec);
1254     unlock_user_struct(target_ts, target_addr, 0);
1255     return 0;
1256 }
1257 #endif
1258 
1259 #if defined(TARGET_NR_clock_settime64) || defined(TARGET_NR_futex_time64) || \
1260     defined(TARGET_NR_timer_settime64) || \
1261     defined(TARGET_NR_mq_timedsend_time64) || \
1262     defined(TARGET_NR_mq_timedreceive_time64) || \
1263     (defined(TARGET_NR_timerfd_settime64) && defined(CONFIG_TIMERFD)) || \
1264     defined(TARGET_NR_clock_nanosleep_time64) || \
1265     defined(TARGET_NR_rt_sigtimedwait_time64) || \
1266     defined(TARGET_NR_utimensat) || \
1267     defined(TARGET_NR_utimensat_time64) || \
1268     defined(TARGET_NR_semtimedop_time64) || \
1269     defined(TARGET_NR_pselect6_time64) || defined(TARGET_NR_ppoll_time64)
1270 static inline abi_long target_to_host_timespec64(struct timespec *host_ts,
1271                                                  abi_ulong target_addr)
1272 {
1273     struct target__kernel_timespec *target_ts;
1274 
1275     if (!lock_user_struct(VERIFY_READ, target_ts, target_addr, 1)) {
1276         return -TARGET_EFAULT;
1277     }
1278     __get_user(host_ts->tv_sec, &target_ts->tv_sec);
1279     __get_user(host_ts->tv_nsec, &target_ts->tv_nsec);
1280     /* in 32bit mode, this drops the padding */
1281     host_ts->tv_nsec = (long)(abi_long)host_ts->tv_nsec;
1282     unlock_user_struct(target_ts, target_addr, 0);
1283     return 0;
1284 }
1285 #endif
1286 
1287 static inline abi_long host_to_target_timespec(abi_ulong target_addr,
1288                                                struct timespec *host_ts)
1289 {
1290     struct target_timespec *target_ts;
1291 
1292     if (!lock_user_struct(VERIFY_WRITE, target_ts, target_addr, 0)) {
1293         return -TARGET_EFAULT;
1294     }
1295     __put_user(host_ts->tv_sec, &target_ts->tv_sec);
1296     __put_user(host_ts->tv_nsec, &target_ts->tv_nsec);
1297     unlock_user_struct(target_ts, target_addr, 1);
1298     return 0;
1299 }
1300 
1301 static inline abi_long host_to_target_timespec64(abi_ulong target_addr,
1302                                                  struct timespec *host_ts)
1303 {
1304     struct target__kernel_timespec *target_ts;
1305 
1306     if (!lock_user_struct(VERIFY_WRITE, target_ts, target_addr, 0)) {
1307         return -TARGET_EFAULT;
1308     }
1309     __put_user(host_ts->tv_sec, &target_ts->tv_sec);
1310     __put_user(host_ts->tv_nsec, &target_ts->tv_nsec);
1311     unlock_user_struct(target_ts, target_addr, 1);
1312     return 0;
1313 }
1314 
1315 #if defined(TARGET_NR_gettimeofday)
1316 static inline abi_long copy_to_user_timezone(abi_ulong target_tz_addr,
1317                                              struct timezone *tz)
1318 {
1319     struct target_timezone *target_tz;
1320 
1321     if (!lock_user_struct(VERIFY_WRITE, target_tz, target_tz_addr, 1)) {
1322         return -TARGET_EFAULT;
1323     }
1324 
1325     __put_user(tz->tz_minuteswest, &target_tz->tz_minuteswest);
1326     __put_user(tz->tz_dsttime, &target_tz->tz_dsttime);
1327 
1328     unlock_user_struct(target_tz, target_tz_addr, 1);
1329 
1330     return 0;
1331 }
1332 #endif
1333 
1334 #if defined(TARGET_NR_settimeofday)
1335 static inline abi_long copy_from_user_timezone(struct timezone *tz,
1336                                                abi_ulong target_tz_addr)
1337 {
1338     struct target_timezone *target_tz;
1339 
1340     if (!lock_user_struct(VERIFY_READ, target_tz, target_tz_addr, 1)) {
1341         return -TARGET_EFAULT;
1342     }
1343 
1344     __get_user(tz->tz_minuteswest, &target_tz->tz_minuteswest);
1345     __get_user(tz->tz_dsttime, &target_tz->tz_dsttime);
1346 
1347     unlock_user_struct(target_tz, target_tz_addr, 0);
1348 
1349     return 0;
1350 }
1351 #endif
1352 
1353 #if defined(TARGET_NR_mq_open) && defined(__NR_mq_open)
1354 #include <mqueue.h>
1355 
1356 static inline abi_long copy_from_user_mq_attr(struct mq_attr *attr,
1357                                               abi_ulong target_mq_attr_addr)
1358 {
1359     struct target_mq_attr *target_mq_attr;
1360 
1361     if (!lock_user_struct(VERIFY_READ, target_mq_attr,
1362                           target_mq_attr_addr, 1))
1363         return -TARGET_EFAULT;
1364 
1365     __get_user(attr->mq_flags, &target_mq_attr->mq_flags);
1366     __get_user(attr->mq_maxmsg, &target_mq_attr->mq_maxmsg);
1367     __get_user(attr->mq_msgsize, &target_mq_attr->mq_msgsize);
1368     __get_user(attr->mq_curmsgs, &target_mq_attr->mq_curmsgs);
1369 
1370     unlock_user_struct(target_mq_attr, target_mq_attr_addr, 0);
1371 
1372     return 0;
1373 }
1374 
1375 static inline abi_long copy_to_user_mq_attr(abi_ulong target_mq_attr_addr,
1376                                             const struct mq_attr *attr)
1377 {
1378     struct target_mq_attr *target_mq_attr;
1379 
1380     if (!lock_user_struct(VERIFY_WRITE, target_mq_attr,
1381                           target_mq_attr_addr, 0))
1382         return -TARGET_EFAULT;
1383 
1384     __put_user(attr->mq_flags, &target_mq_attr->mq_flags);
1385     __put_user(attr->mq_maxmsg, &target_mq_attr->mq_maxmsg);
1386     __put_user(attr->mq_msgsize, &target_mq_attr->mq_msgsize);
1387     __put_user(attr->mq_curmsgs, &target_mq_attr->mq_curmsgs);
1388 
1389     unlock_user_struct(target_mq_attr, target_mq_attr_addr, 1);
1390 
1391     return 0;
1392 }
1393 #endif
1394 
1395 #if defined(TARGET_NR_select) || defined(TARGET_NR__newselect)
1396 /* do_select() must return target values and target errnos. */
1397 static abi_long do_select(int n,
1398                           abi_ulong rfd_addr, abi_ulong wfd_addr,
1399                           abi_ulong efd_addr, abi_ulong target_tv_addr)
1400 {
1401     fd_set rfds, wfds, efds;
1402     fd_set *rfds_ptr, *wfds_ptr, *efds_ptr;
1403     struct timeval tv;
1404     struct timespec ts, *ts_ptr;
1405     abi_long ret;
1406 
1407     ret = copy_from_user_fdset_ptr(&rfds, &rfds_ptr, rfd_addr, n);
1408     if (ret) {
1409         return ret;
1410     }
1411     ret = copy_from_user_fdset_ptr(&wfds, &wfds_ptr, wfd_addr, n);
1412     if (ret) {
1413         return ret;
1414     }
1415     ret = copy_from_user_fdset_ptr(&efds, &efds_ptr, efd_addr, n);
1416     if (ret) {
1417         return ret;
1418     }
1419 
1420     if (target_tv_addr) {
1421         if (copy_from_user_timeval(&tv, target_tv_addr))
1422             return -TARGET_EFAULT;
1423         ts.tv_sec = tv.tv_sec;
1424         ts.tv_nsec = tv.tv_usec * 1000;
1425         ts_ptr = &ts;
1426     } else {
1427         ts_ptr = NULL;
1428     }
1429 
1430     ret = get_errno(safe_pselect6(n, rfds_ptr, wfds_ptr, efds_ptr,
1431                                   ts_ptr, NULL));
1432 
1433     if (!is_error(ret)) {
1434         if (rfd_addr && copy_to_user_fdset(rfd_addr, &rfds, n))
1435             return -TARGET_EFAULT;
1436         if (wfd_addr && copy_to_user_fdset(wfd_addr, &wfds, n))
1437             return -TARGET_EFAULT;
1438         if (efd_addr && copy_to_user_fdset(efd_addr, &efds, n))
1439             return -TARGET_EFAULT;
1440 
1441         if (target_tv_addr) {
1442             tv.tv_sec = ts.tv_sec;
1443             tv.tv_usec = ts.tv_nsec / 1000;
1444             if (copy_to_user_timeval(target_tv_addr, &tv)) {
1445                 return -TARGET_EFAULT;
1446             }
1447         }
1448     }
1449 
1450     return ret;
1451 }
1452 
1453 #if defined(TARGET_WANT_OLD_SYS_SELECT)
1454 static abi_long do_old_select(abi_ulong arg1)
1455 {
1456     struct target_sel_arg_struct *sel;
1457     abi_ulong inp, outp, exp, tvp;
1458     long nsel;
1459 
1460     if (!lock_user_struct(VERIFY_READ, sel, arg1, 1)) {
1461         return -TARGET_EFAULT;
1462     }
1463 
1464     nsel = tswapal(sel->n);
1465     inp = tswapal(sel->inp);
1466     outp = tswapal(sel->outp);
1467     exp = tswapal(sel->exp);
1468     tvp = tswapal(sel->tvp);
1469 
1470     unlock_user_struct(sel, arg1, 0);
1471 
1472     return do_select(nsel, inp, outp, exp, tvp);
1473 }
1474 #endif
1475 #endif
1476 
1477 #if defined(TARGET_NR_pselect6) || defined(TARGET_NR_pselect6_time64)
1478 static abi_long do_pselect6(abi_long arg1, abi_long arg2, abi_long arg3,
1479                             abi_long arg4, abi_long arg5, abi_long arg6,
1480                             bool time64)
1481 {
1482     abi_long rfd_addr, wfd_addr, efd_addr, n, ts_addr;
1483     fd_set rfds, wfds, efds;
1484     fd_set *rfds_ptr, *wfds_ptr, *efds_ptr;
1485     struct timespec ts, *ts_ptr;
1486     abi_long ret;
1487 
1488     /*
1489      * The 6th arg is actually two args smashed together,
1490      * so we cannot use the C library.
1491      */
1492     sigset_t set;
1493     struct {
1494         sigset_t *set;
1495         size_t size;
1496     } sig, *sig_ptr;
1497 
1498     abi_ulong arg_sigset, arg_sigsize, *arg7;
1499     target_sigset_t *target_sigset;
1500 
1501     n = arg1;
1502     rfd_addr = arg2;
1503     wfd_addr = arg3;
1504     efd_addr = arg4;
1505     ts_addr = arg5;
1506 
1507     ret = copy_from_user_fdset_ptr(&rfds, &rfds_ptr, rfd_addr, n);
1508     if (ret) {
1509         return ret;
1510     }
1511     ret = copy_from_user_fdset_ptr(&wfds, &wfds_ptr, wfd_addr, n);
1512     if (ret) {
1513         return ret;
1514     }
1515     ret = copy_from_user_fdset_ptr(&efds, &efds_ptr, efd_addr, n);
1516     if (ret) {
1517         return ret;
1518     }
1519 
1520     /*
1521      * This takes a timespec, and not a timeval, so we cannot
1522      * use the do_select() helper ...
1523      */
1524     if (ts_addr) {
1525         if (time64) {
1526             if (target_to_host_timespec64(&ts, ts_addr)) {
1527                 return -TARGET_EFAULT;
1528             }
1529         } else {
1530             if (target_to_host_timespec(&ts, ts_addr)) {
1531                 return -TARGET_EFAULT;
1532             }
1533         }
1534             ts_ptr = &ts;
1535     } else {
1536         ts_ptr = NULL;
1537     }
1538 
1539     /* Extract the two packed args for the sigset */
1540     if (arg6) {
1541         sig_ptr = &sig;
1542         sig.size = SIGSET_T_SIZE;
1543 
1544         arg7 = lock_user(VERIFY_READ, arg6, sizeof(*arg7) * 2, 1);
1545         if (!arg7) {
1546             return -TARGET_EFAULT;
1547         }
1548         arg_sigset = tswapal(arg7[0]);
1549         arg_sigsize = tswapal(arg7[1]);
1550         unlock_user(arg7, arg6, 0);
1551 
1552         if (arg_sigset) {
1553             sig.set = &set;
1554             if (arg_sigsize != sizeof(*target_sigset)) {
1555                 /* Like the kernel, we enforce correct size sigsets */
1556                 return -TARGET_EINVAL;
1557             }
1558             target_sigset = lock_user(VERIFY_READ, arg_sigset,
1559                                       sizeof(*target_sigset), 1);
1560             if (!target_sigset) {
1561                 return -TARGET_EFAULT;
1562             }
1563             target_to_host_sigset(&set, target_sigset);
1564             unlock_user(target_sigset, arg_sigset, 0);
1565         } else {
1566             sig.set = NULL;
1567         }
1568     } else {
1569         sig_ptr = NULL;
1570     }
1571 
1572     ret = get_errno(safe_pselect6(n, rfds_ptr, wfds_ptr, efds_ptr,
1573                                   ts_ptr, sig_ptr));
1574 
1575     if (!is_error(ret)) {
1576         if (rfd_addr && copy_to_user_fdset(rfd_addr, &rfds, n)) {
1577             return -TARGET_EFAULT;
1578         }
1579         if (wfd_addr && copy_to_user_fdset(wfd_addr, &wfds, n)) {
1580             return -TARGET_EFAULT;
1581         }
1582         if (efd_addr && copy_to_user_fdset(efd_addr, &efds, n)) {
1583             return -TARGET_EFAULT;
1584         }
1585         if (time64) {
1586             if (ts_addr && host_to_target_timespec64(ts_addr, &ts)) {
1587                 return -TARGET_EFAULT;
1588             }
1589         } else {
1590             if (ts_addr && host_to_target_timespec(ts_addr, &ts)) {
1591                 return -TARGET_EFAULT;
1592             }
1593         }
1594     }
1595     return ret;
1596 }
1597 #endif
1598 
1599 #if defined(TARGET_NR_poll) || defined(TARGET_NR_ppoll) || \
1600     defined(TARGET_NR_ppoll_time64)
1601 static abi_long do_ppoll(abi_long arg1, abi_long arg2, abi_long arg3,
1602                          abi_long arg4, abi_long arg5, bool ppoll, bool time64)
1603 {
1604     struct target_pollfd *target_pfd;
1605     unsigned int nfds = arg2;
1606     struct pollfd *pfd;
1607     unsigned int i;
1608     abi_long ret;
1609 
1610     pfd = NULL;
1611     target_pfd = NULL;
1612     if (nfds) {
1613         if (nfds > (INT_MAX / sizeof(struct target_pollfd))) {
1614             return -TARGET_EINVAL;
1615         }
1616         target_pfd = lock_user(VERIFY_WRITE, arg1,
1617                                sizeof(struct target_pollfd) * nfds, 1);
1618         if (!target_pfd) {
1619             return -TARGET_EFAULT;
1620         }
1621 
1622         pfd = alloca(sizeof(struct pollfd) * nfds);
1623         for (i = 0; i < nfds; i++) {
1624             pfd[i].fd = tswap32(target_pfd[i].fd);
1625             pfd[i].events = tswap16(target_pfd[i].events);
1626         }
1627     }
1628     if (ppoll) {
1629         struct timespec _timeout_ts, *timeout_ts = &_timeout_ts;
1630         target_sigset_t *target_set;
1631         sigset_t _set, *set = &_set;
1632 
1633         if (arg3) {
1634             if (time64) {
1635                 if (target_to_host_timespec64(timeout_ts, arg3)) {
1636                     unlock_user(target_pfd, arg1, 0);
1637                     return -TARGET_EFAULT;
1638                 }
1639             } else {
1640                 if (target_to_host_timespec(timeout_ts, arg3)) {
1641                     unlock_user(target_pfd, arg1, 0);
1642                     return -TARGET_EFAULT;
1643                 }
1644             }
1645         } else {
1646             timeout_ts = NULL;
1647         }
1648 
1649         if (arg4) {
1650             if (arg5 != sizeof(target_sigset_t)) {
1651                 unlock_user(target_pfd, arg1, 0);
1652                 return -TARGET_EINVAL;
1653             }
1654 
1655             target_set = lock_user(VERIFY_READ, arg4,
1656                                    sizeof(target_sigset_t), 1);
1657             if (!target_set) {
1658                 unlock_user(target_pfd, arg1, 0);
1659                 return -TARGET_EFAULT;
1660             }
1661             target_to_host_sigset(set, target_set);
1662         } else {
1663             set = NULL;
1664         }
1665 
1666         ret = get_errno(safe_ppoll(pfd, nfds, timeout_ts,
1667                                    set, SIGSET_T_SIZE));
1668 
1669         if (!is_error(ret) && arg3) {
1670             if (time64) {
1671                 if (host_to_target_timespec64(arg3, timeout_ts)) {
1672                     return -TARGET_EFAULT;
1673                 }
1674             } else {
1675                 if (host_to_target_timespec(arg3, timeout_ts)) {
1676                     return -TARGET_EFAULT;
1677                 }
1678             }
1679         }
1680         if (arg4) {
1681             unlock_user(target_set, arg4, 0);
1682         }
1683     } else {
1684           struct timespec ts, *pts;
1685 
1686           if (arg3 >= 0) {
1687               /* Convert ms to secs, ns */
1688               ts.tv_sec = arg3 / 1000;
1689               ts.tv_nsec = (arg3 % 1000) * 1000000LL;
1690               pts = &ts;
1691           } else {
1692               /* -ve poll() timeout means "infinite" */
1693               pts = NULL;
1694           }
1695           ret = get_errno(safe_ppoll(pfd, nfds, pts, NULL, 0));
1696     }
1697 
1698     if (!is_error(ret)) {
1699         for (i = 0; i < nfds; i++) {
1700             target_pfd[i].revents = tswap16(pfd[i].revents);
1701         }
1702     }
1703     unlock_user(target_pfd, arg1, sizeof(struct target_pollfd) * nfds);
1704     return ret;
1705 }
1706 #endif
1707 
1708 static abi_long do_pipe2(int host_pipe[], int flags)
1709 {
1710 #ifdef CONFIG_PIPE2
1711     return pipe2(host_pipe, flags);
1712 #else
1713     return -ENOSYS;
1714 #endif
1715 }
1716 
1717 static abi_long do_pipe(void *cpu_env, abi_ulong pipedes,
1718                         int flags, int is_pipe2)
1719 {
1720     int host_pipe[2];
1721     abi_long ret;
1722     ret = flags ? do_pipe2(host_pipe, flags) : pipe(host_pipe);
1723 
1724     if (is_error(ret))
1725         return get_errno(ret);
1726 
1727     /* Several targets have special calling conventions for the original
1728        pipe syscall, but didn't replicate this into the pipe2 syscall.  */
1729     if (!is_pipe2) {
1730 #if defined(TARGET_ALPHA)
1731         ((CPUAlphaState *)cpu_env)->ir[IR_A4] = host_pipe[1];
1732         return host_pipe[0];
1733 #elif defined(TARGET_MIPS)
1734         ((CPUMIPSState*)cpu_env)->active_tc.gpr[3] = host_pipe[1];
1735         return host_pipe[0];
1736 #elif defined(TARGET_SH4)
1737         ((CPUSH4State*)cpu_env)->gregs[1] = host_pipe[1];
1738         return host_pipe[0];
1739 #elif defined(TARGET_SPARC)
1740         ((CPUSPARCState*)cpu_env)->regwptr[1] = host_pipe[1];
1741         return host_pipe[0];
1742 #endif
1743     }
1744 
1745     if (put_user_s32(host_pipe[0], pipedes)
1746         || put_user_s32(host_pipe[1], pipedes + sizeof(host_pipe[0])))
1747         return -TARGET_EFAULT;
1748     return get_errno(ret);
1749 }
1750 
1751 static inline abi_long target_to_host_ip_mreq(struct ip_mreqn *mreqn,
1752                                               abi_ulong target_addr,
1753                                               socklen_t len)
1754 {
1755     struct target_ip_mreqn *target_smreqn;
1756 
1757     target_smreqn = lock_user(VERIFY_READ, target_addr, len, 1);
1758     if (!target_smreqn)
1759         return -TARGET_EFAULT;
1760     mreqn->imr_multiaddr.s_addr = target_smreqn->imr_multiaddr.s_addr;
1761     mreqn->imr_address.s_addr = target_smreqn->imr_address.s_addr;
1762     if (len == sizeof(struct target_ip_mreqn))
1763         mreqn->imr_ifindex = tswapal(target_smreqn->imr_ifindex);
1764     unlock_user(target_smreqn, target_addr, 0);
1765 
1766     return 0;
1767 }
1768 
1769 static inline abi_long target_to_host_sockaddr(int fd, struct sockaddr *addr,
1770                                                abi_ulong target_addr,
1771                                                socklen_t len)
1772 {
1773     const socklen_t unix_maxlen = sizeof (struct sockaddr_un);
1774     sa_family_t sa_family;
1775     struct target_sockaddr *target_saddr;
1776 
1777     if (fd_trans_target_to_host_addr(fd)) {
1778         return fd_trans_target_to_host_addr(fd)(addr, target_addr, len);
1779     }
1780 
1781     target_saddr = lock_user(VERIFY_READ, target_addr, len, 1);
1782     if (!target_saddr)
1783         return -TARGET_EFAULT;
1784 
1785     sa_family = tswap16(target_saddr->sa_family);
1786 
1787     /* Oops. The caller might send a incomplete sun_path; sun_path
1788      * must be terminated by \0 (see the manual page), but
1789      * unfortunately it is quite common to specify sockaddr_un
1790      * length as "strlen(x->sun_path)" while it should be
1791      * "strlen(...) + 1". We'll fix that here if needed.
1792      * Linux kernel has a similar feature.
1793      */
1794 
1795     if (sa_family == AF_UNIX) {
1796         if (len < unix_maxlen && len > 0) {
1797             char *cp = (char*)target_saddr;
1798 
1799             if ( cp[len-1] && !cp[len] )
1800                 len++;
1801         }
1802         if (len > unix_maxlen)
1803             len = unix_maxlen;
1804     }
1805 
1806     memcpy(addr, target_saddr, len);
1807     addr->sa_family = sa_family;
1808     if (sa_family == AF_NETLINK) {
1809         struct sockaddr_nl *nladdr;
1810 
1811         nladdr = (struct sockaddr_nl *)addr;
1812         nladdr->nl_pid = tswap32(nladdr->nl_pid);
1813         nladdr->nl_groups = tswap32(nladdr->nl_groups);
1814     } else if (sa_family == AF_PACKET) {
1815 	struct target_sockaddr_ll *lladdr;
1816 
1817 	lladdr = (struct target_sockaddr_ll *)addr;
1818 	lladdr->sll_ifindex = tswap32(lladdr->sll_ifindex);
1819 	lladdr->sll_hatype = tswap16(lladdr->sll_hatype);
1820     }
1821     unlock_user(target_saddr, target_addr, 0);
1822 
1823     return 0;
1824 }
1825 
1826 static inline abi_long host_to_target_sockaddr(abi_ulong target_addr,
1827                                                struct sockaddr *addr,
1828                                                socklen_t len)
1829 {
1830     struct target_sockaddr *target_saddr;
1831 
1832     if (len == 0) {
1833         return 0;
1834     }
1835     assert(addr);
1836 
1837     target_saddr = lock_user(VERIFY_WRITE, target_addr, len, 0);
1838     if (!target_saddr)
1839         return -TARGET_EFAULT;
1840     memcpy(target_saddr, addr, len);
1841     if (len >= offsetof(struct target_sockaddr, sa_family) +
1842         sizeof(target_saddr->sa_family)) {
1843         target_saddr->sa_family = tswap16(addr->sa_family);
1844     }
1845     if (addr->sa_family == AF_NETLINK &&
1846         len >= sizeof(struct target_sockaddr_nl)) {
1847         struct target_sockaddr_nl *target_nl =
1848                (struct target_sockaddr_nl *)target_saddr;
1849         target_nl->nl_pid = tswap32(target_nl->nl_pid);
1850         target_nl->nl_groups = tswap32(target_nl->nl_groups);
1851     } else if (addr->sa_family == AF_PACKET) {
1852         struct sockaddr_ll *target_ll = (struct sockaddr_ll *)target_saddr;
1853         target_ll->sll_ifindex = tswap32(target_ll->sll_ifindex);
1854         target_ll->sll_hatype = tswap16(target_ll->sll_hatype);
1855     } else if (addr->sa_family == AF_INET6 &&
1856                len >= sizeof(struct target_sockaddr_in6)) {
1857         struct target_sockaddr_in6 *target_in6 =
1858                (struct target_sockaddr_in6 *)target_saddr;
1859         target_in6->sin6_scope_id = tswap16(target_in6->sin6_scope_id);
1860     }
1861     unlock_user(target_saddr, target_addr, len);
1862 
1863     return 0;
1864 }
1865 
1866 static inline abi_long target_to_host_cmsg(struct msghdr *msgh,
1867                                            struct target_msghdr *target_msgh)
1868 {
1869     struct cmsghdr *cmsg = CMSG_FIRSTHDR(msgh);
1870     abi_long msg_controllen;
1871     abi_ulong target_cmsg_addr;
1872     struct target_cmsghdr *target_cmsg, *target_cmsg_start;
1873     socklen_t space = 0;
1874 
1875     msg_controllen = tswapal(target_msgh->msg_controllen);
1876     if (msg_controllen < sizeof (struct target_cmsghdr))
1877         goto the_end;
1878     target_cmsg_addr = tswapal(target_msgh->msg_control);
1879     target_cmsg = lock_user(VERIFY_READ, target_cmsg_addr, msg_controllen, 1);
1880     target_cmsg_start = target_cmsg;
1881     if (!target_cmsg)
1882         return -TARGET_EFAULT;
1883 
1884     while (cmsg && target_cmsg) {
1885         void *data = CMSG_DATA(cmsg);
1886         void *target_data = TARGET_CMSG_DATA(target_cmsg);
1887 
1888         int len = tswapal(target_cmsg->cmsg_len)
1889             - sizeof(struct target_cmsghdr);
1890 
1891         space += CMSG_SPACE(len);
1892         if (space > msgh->msg_controllen) {
1893             space -= CMSG_SPACE(len);
1894             /* This is a QEMU bug, since we allocated the payload
1895              * area ourselves (unlike overflow in host-to-target
1896              * conversion, which is just the guest giving us a buffer
1897              * that's too small). It can't happen for the payload types
1898              * we currently support; if it becomes an issue in future
1899              * we would need to improve our allocation strategy to
1900              * something more intelligent than "twice the size of the
1901              * target buffer we're reading from".
1902              */
1903             qemu_log_mask(LOG_UNIMP,
1904                           ("Unsupported ancillary data %d/%d: "
1905                            "unhandled msg size\n"),
1906                           tswap32(target_cmsg->cmsg_level),
1907                           tswap32(target_cmsg->cmsg_type));
1908             break;
1909         }
1910 
1911         if (tswap32(target_cmsg->cmsg_level) == TARGET_SOL_SOCKET) {
1912             cmsg->cmsg_level = SOL_SOCKET;
1913         } else {
1914             cmsg->cmsg_level = tswap32(target_cmsg->cmsg_level);
1915         }
1916         cmsg->cmsg_type = tswap32(target_cmsg->cmsg_type);
1917         cmsg->cmsg_len = CMSG_LEN(len);
1918 
1919         if (cmsg->cmsg_level == SOL_SOCKET && cmsg->cmsg_type == SCM_RIGHTS) {
1920             int *fd = (int *)data;
1921             int *target_fd = (int *)target_data;
1922             int i, numfds = len / sizeof(int);
1923 
1924             for (i = 0; i < numfds; i++) {
1925                 __get_user(fd[i], target_fd + i);
1926             }
1927         } else if (cmsg->cmsg_level == SOL_SOCKET
1928                &&  cmsg->cmsg_type == SCM_CREDENTIALS) {
1929             struct ucred *cred = (struct ucred *)data;
1930             struct target_ucred *target_cred =
1931                 (struct target_ucred *)target_data;
1932 
1933             __get_user(cred->pid, &target_cred->pid);
1934             __get_user(cred->uid, &target_cred->uid);
1935             __get_user(cred->gid, &target_cred->gid);
1936         } else {
1937             qemu_log_mask(LOG_UNIMP, "Unsupported ancillary data: %d/%d\n",
1938                           cmsg->cmsg_level, cmsg->cmsg_type);
1939             memcpy(data, target_data, len);
1940         }
1941 
1942         cmsg = CMSG_NXTHDR(msgh, cmsg);
1943         target_cmsg = TARGET_CMSG_NXTHDR(target_msgh, target_cmsg,
1944                                          target_cmsg_start);
1945     }
1946     unlock_user(target_cmsg, target_cmsg_addr, 0);
1947  the_end:
1948     msgh->msg_controllen = space;
1949     return 0;
1950 }
1951 
1952 static inline abi_long host_to_target_cmsg(struct target_msghdr *target_msgh,
1953                                            struct msghdr *msgh)
1954 {
1955     struct cmsghdr *cmsg = CMSG_FIRSTHDR(msgh);
1956     abi_long msg_controllen;
1957     abi_ulong target_cmsg_addr;
1958     struct target_cmsghdr *target_cmsg, *target_cmsg_start;
1959     socklen_t space = 0;
1960 
1961     msg_controllen = tswapal(target_msgh->msg_controllen);
1962     if (msg_controllen < sizeof (struct target_cmsghdr))
1963         goto the_end;
1964     target_cmsg_addr = tswapal(target_msgh->msg_control);
1965     target_cmsg = lock_user(VERIFY_WRITE, target_cmsg_addr, msg_controllen, 0);
1966     target_cmsg_start = target_cmsg;
1967     if (!target_cmsg)
1968         return -TARGET_EFAULT;
1969 
1970     while (cmsg && target_cmsg) {
1971         void *data = CMSG_DATA(cmsg);
1972         void *target_data = TARGET_CMSG_DATA(target_cmsg);
1973 
1974         int len = cmsg->cmsg_len - sizeof(struct cmsghdr);
1975         int tgt_len, tgt_space;
1976 
1977         /* We never copy a half-header but may copy half-data;
1978          * this is Linux's behaviour in put_cmsg(). Note that
1979          * truncation here is a guest problem (which we report
1980          * to the guest via the CTRUNC bit), unlike truncation
1981          * in target_to_host_cmsg, which is a QEMU bug.
1982          */
1983         if (msg_controllen < sizeof(struct target_cmsghdr)) {
1984             target_msgh->msg_flags |= tswap32(MSG_CTRUNC);
1985             break;
1986         }
1987 
1988         if (cmsg->cmsg_level == SOL_SOCKET) {
1989             target_cmsg->cmsg_level = tswap32(TARGET_SOL_SOCKET);
1990         } else {
1991             target_cmsg->cmsg_level = tswap32(cmsg->cmsg_level);
1992         }
1993         target_cmsg->cmsg_type = tswap32(cmsg->cmsg_type);
1994 
1995         /* Payload types which need a different size of payload on
1996          * the target must adjust tgt_len here.
1997          */
1998         tgt_len = len;
1999         switch (cmsg->cmsg_level) {
2000         case SOL_SOCKET:
2001             switch (cmsg->cmsg_type) {
2002             case SO_TIMESTAMP:
2003                 tgt_len = sizeof(struct target_timeval);
2004                 break;
2005             default:
2006                 break;
2007             }
2008             break;
2009         default:
2010             break;
2011         }
2012 
2013         if (msg_controllen < TARGET_CMSG_LEN(tgt_len)) {
2014             target_msgh->msg_flags |= tswap32(MSG_CTRUNC);
2015             tgt_len = msg_controllen - sizeof(struct target_cmsghdr);
2016         }
2017 
2018         /* We must now copy-and-convert len bytes of payload
2019          * into tgt_len bytes of destination space. Bear in mind
2020          * that in both source and destination we may be dealing
2021          * with a truncated value!
2022          */
2023         switch (cmsg->cmsg_level) {
2024         case SOL_SOCKET:
2025             switch (cmsg->cmsg_type) {
2026             case SCM_RIGHTS:
2027             {
2028                 int *fd = (int *)data;
2029                 int *target_fd = (int *)target_data;
2030                 int i, numfds = tgt_len / sizeof(int);
2031 
2032                 for (i = 0; i < numfds; i++) {
2033                     __put_user(fd[i], target_fd + i);
2034                 }
2035                 break;
2036             }
2037             case SO_TIMESTAMP:
2038             {
2039                 struct timeval *tv = (struct timeval *)data;
2040                 struct target_timeval *target_tv =
2041                     (struct target_timeval *)target_data;
2042 
2043                 if (len != sizeof(struct timeval) ||
2044                     tgt_len != sizeof(struct target_timeval)) {
2045                     goto unimplemented;
2046                 }
2047 
2048                 /* copy struct timeval to target */
2049                 __put_user(tv->tv_sec, &target_tv->tv_sec);
2050                 __put_user(tv->tv_usec, &target_tv->tv_usec);
2051                 break;
2052             }
2053             case SCM_CREDENTIALS:
2054             {
2055                 struct ucred *cred = (struct ucred *)data;
2056                 struct target_ucred *target_cred =
2057                     (struct target_ucred *)target_data;
2058 
2059                 __put_user(cred->pid, &target_cred->pid);
2060                 __put_user(cred->uid, &target_cred->uid);
2061                 __put_user(cred->gid, &target_cred->gid);
2062                 break;
2063             }
2064             default:
2065                 goto unimplemented;
2066             }
2067             break;
2068 
2069         case SOL_IP:
2070             switch (cmsg->cmsg_type) {
2071             case IP_TTL:
2072             {
2073                 uint32_t *v = (uint32_t *)data;
2074                 uint32_t *t_int = (uint32_t *)target_data;
2075 
2076                 if (len != sizeof(uint32_t) ||
2077                     tgt_len != sizeof(uint32_t)) {
2078                     goto unimplemented;
2079                 }
2080                 __put_user(*v, t_int);
2081                 break;
2082             }
2083             case IP_RECVERR:
2084             {
2085                 struct errhdr_t {
2086                    struct sock_extended_err ee;
2087                    struct sockaddr_in offender;
2088                 };
2089                 struct errhdr_t *errh = (struct errhdr_t *)data;
2090                 struct errhdr_t *target_errh =
2091                     (struct errhdr_t *)target_data;
2092 
2093                 if (len != sizeof(struct errhdr_t) ||
2094                     tgt_len != sizeof(struct errhdr_t)) {
2095                     goto unimplemented;
2096                 }
2097                 __put_user(errh->ee.ee_errno, &target_errh->ee.ee_errno);
2098                 __put_user(errh->ee.ee_origin, &target_errh->ee.ee_origin);
2099                 __put_user(errh->ee.ee_type,  &target_errh->ee.ee_type);
2100                 __put_user(errh->ee.ee_code, &target_errh->ee.ee_code);
2101                 __put_user(errh->ee.ee_pad, &target_errh->ee.ee_pad);
2102                 __put_user(errh->ee.ee_info, &target_errh->ee.ee_info);
2103                 __put_user(errh->ee.ee_data, &target_errh->ee.ee_data);
2104                 host_to_target_sockaddr((unsigned long) &target_errh->offender,
2105                     (void *) &errh->offender, sizeof(errh->offender));
2106                 break;
2107             }
2108             default:
2109                 goto unimplemented;
2110             }
2111             break;
2112 
2113         case SOL_IPV6:
2114             switch (cmsg->cmsg_type) {
2115             case IPV6_HOPLIMIT:
2116             {
2117                 uint32_t *v = (uint32_t *)data;
2118                 uint32_t *t_int = (uint32_t *)target_data;
2119 
2120                 if (len != sizeof(uint32_t) ||
2121                     tgt_len != sizeof(uint32_t)) {
2122                     goto unimplemented;
2123                 }
2124                 __put_user(*v, t_int);
2125                 break;
2126             }
2127             case IPV6_RECVERR:
2128             {
2129                 struct errhdr6_t {
2130                    struct sock_extended_err ee;
2131                    struct sockaddr_in6 offender;
2132                 };
2133                 struct errhdr6_t *errh = (struct errhdr6_t *)data;
2134                 struct errhdr6_t *target_errh =
2135                     (struct errhdr6_t *)target_data;
2136 
2137                 if (len != sizeof(struct errhdr6_t) ||
2138                     tgt_len != sizeof(struct errhdr6_t)) {
2139                     goto unimplemented;
2140                 }
2141                 __put_user(errh->ee.ee_errno, &target_errh->ee.ee_errno);
2142                 __put_user(errh->ee.ee_origin, &target_errh->ee.ee_origin);
2143                 __put_user(errh->ee.ee_type,  &target_errh->ee.ee_type);
2144                 __put_user(errh->ee.ee_code, &target_errh->ee.ee_code);
2145                 __put_user(errh->ee.ee_pad, &target_errh->ee.ee_pad);
2146                 __put_user(errh->ee.ee_info, &target_errh->ee.ee_info);
2147                 __put_user(errh->ee.ee_data, &target_errh->ee.ee_data);
2148                 host_to_target_sockaddr((unsigned long) &target_errh->offender,
2149                     (void *) &errh->offender, sizeof(errh->offender));
2150                 break;
2151             }
2152             default:
2153                 goto unimplemented;
2154             }
2155             break;
2156 
2157         default:
2158         unimplemented:
2159             qemu_log_mask(LOG_UNIMP, "Unsupported ancillary data: %d/%d\n",
2160                           cmsg->cmsg_level, cmsg->cmsg_type);
2161             memcpy(target_data, data, MIN(len, tgt_len));
2162             if (tgt_len > len) {
2163                 memset(target_data + len, 0, tgt_len - len);
2164             }
2165         }
2166 
2167         target_cmsg->cmsg_len = tswapal(TARGET_CMSG_LEN(tgt_len));
2168         tgt_space = TARGET_CMSG_SPACE(tgt_len);
2169         if (msg_controllen < tgt_space) {
2170             tgt_space = msg_controllen;
2171         }
2172         msg_controllen -= tgt_space;
2173         space += tgt_space;
2174         cmsg = CMSG_NXTHDR(msgh, cmsg);
2175         target_cmsg = TARGET_CMSG_NXTHDR(target_msgh, target_cmsg,
2176                                          target_cmsg_start);
2177     }
2178     unlock_user(target_cmsg, target_cmsg_addr, space);
2179  the_end:
2180     target_msgh->msg_controllen = tswapal(space);
2181     return 0;
2182 }
2183 
2184 /* do_setsockopt() Must return target values and target errnos. */
2185 static abi_long do_setsockopt(int sockfd, int level, int optname,
2186                               abi_ulong optval_addr, socklen_t optlen)
2187 {
2188     abi_long ret;
2189     int val;
2190     struct ip_mreqn *ip_mreq;
2191     struct ip_mreq_source *ip_mreq_source;
2192 
2193     switch(level) {
2194     case SOL_TCP:
2195     case SOL_UDP:
2196         /* TCP and UDP options all take an 'int' value.  */
2197         if (optlen < sizeof(uint32_t))
2198             return -TARGET_EINVAL;
2199 
2200         if (get_user_u32(val, optval_addr))
2201             return -TARGET_EFAULT;
2202         ret = get_errno(setsockopt(sockfd, level, optname, &val, sizeof(val)));
2203         break;
2204     case SOL_IP:
2205         switch(optname) {
2206         case IP_TOS:
2207         case IP_TTL:
2208         case IP_HDRINCL:
2209         case IP_ROUTER_ALERT:
2210         case IP_RECVOPTS:
2211         case IP_RETOPTS:
2212         case IP_PKTINFO:
2213         case IP_MTU_DISCOVER:
2214         case IP_RECVERR:
2215         case IP_RECVTTL:
2216         case IP_RECVTOS:
2217 #ifdef IP_FREEBIND
2218         case IP_FREEBIND:
2219 #endif
2220         case IP_MULTICAST_TTL:
2221         case IP_MULTICAST_LOOP:
2222             val = 0;
2223             if (optlen >= sizeof(uint32_t)) {
2224                 if (get_user_u32(val, optval_addr))
2225                     return -TARGET_EFAULT;
2226             } else if (optlen >= 1) {
2227                 if (get_user_u8(val, optval_addr))
2228                     return -TARGET_EFAULT;
2229             }
2230             ret = get_errno(setsockopt(sockfd, level, optname, &val, sizeof(val)));
2231             break;
2232         case IP_ADD_MEMBERSHIP:
2233         case IP_DROP_MEMBERSHIP:
2234             if (optlen < sizeof (struct target_ip_mreq) ||
2235                 optlen > sizeof (struct target_ip_mreqn))
2236                 return -TARGET_EINVAL;
2237 
2238             ip_mreq = (struct ip_mreqn *) alloca(optlen);
2239             target_to_host_ip_mreq(ip_mreq, optval_addr, optlen);
2240             ret = get_errno(setsockopt(sockfd, level, optname, ip_mreq, optlen));
2241             break;
2242 
2243         case IP_BLOCK_SOURCE:
2244         case IP_UNBLOCK_SOURCE:
2245         case IP_ADD_SOURCE_MEMBERSHIP:
2246         case IP_DROP_SOURCE_MEMBERSHIP:
2247             if (optlen != sizeof (struct target_ip_mreq_source))
2248                 return -TARGET_EINVAL;
2249 
2250             ip_mreq_source = lock_user(VERIFY_READ, optval_addr, optlen, 1);
2251             ret = get_errno(setsockopt(sockfd, level, optname, ip_mreq_source, optlen));
2252             unlock_user (ip_mreq_source, optval_addr, 0);
2253             break;
2254 
2255         default:
2256             goto unimplemented;
2257         }
2258         break;
2259     case SOL_IPV6:
2260         switch (optname) {
2261         case IPV6_MTU_DISCOVER:
2262         case IPV6_MTU:
2263         case IPV6_V6ONLY:
2264         case IPV6_RECVPKTINFO:
2265         case IPV6_UNICAST_HOPS:
2266         case IPV6_MULTICAST_HOPS:
2267         case IPV6_MULTICAST_LOOP:
2268         case IPV6_RECVERR:
2269         case IPV6_RECVHOPLIMIT:
2270         case IPV6_2292HOPLIMIT:
2271         case IPV6_CHECKSUM:
2272         case IPV6_ADDRFORM:
2273         case IPV6_2292PKTINFO:
2274         case IPV6_RECVTCLASS:
2275         case IPV6_RECVRTHDR:
2276         case IPV6_2292RTHDR:
2277         case IPV6_RECVHOPOPTS:
2278         case IPV6_2292HOPOPTS:
2279         case IPV6_RECVDSTOPTS:
2280         case IPV6_2292DSTOPTS:
2281         case IPV6_TCLASS:
2282         case IPV6_ADDR_PREFERENCES:
2283 #ifdef IPV6_RECVPATHMTU
2284         case IPV6_RECVPATHMTU:
2285 #endif
2286 #ifdef IPV6_TRANSPARENT
2287         case IPV6_TRANSPARENT:
2288 #endif
2289 #ifdef IPV6_FREEBIND
2290         case IPV6_FREEBIND:
2291 #endif
2292 #ifdef IPV6_RECVORIGDSTADDR
2293         case IPV6_RECVORIGDSTADDR:
2294 #endif
2295             val = 0;
2296             if (optlen < sizeof(uint32_t)) {
2297                 return -TARGET_EINVAL;
2298             }
2299             if (get_user_u32(val, optval_addr)) {
2300                 return -TARGET_EFAULT;
2301             }
2302             ret = get_errno(setsockopt(sockfd, level, optname,
2303                                        &val, sizeof(val)));
2304             break;
2305         case IPV6_PKTINFO:
2306         {
2307             struct in6_pktinfo pki;
2308 
2309             if (optlen < sizeof(pki)) {
2310                 return -TARGET_EINVAL;
2311             }
2312 
2313             if (copy_from_user(&pki, optval_addr, sizeof(pki))) {
2314                 return -TARGET_EFAULT;
2315             }
2316 
2317             pki.ipi6_ifindex = tswap32(pki.ipi6_ifindex);
2318 
2319             ret = get_errno(setsockopt(sockfd, level, optname,
2320                                        &pki, sizeof(pki)));
2321             break;
2322         }
2323         case IPV6_ADD_MEMBERSHIP:
2324         case IPV6_DROP_MEMBERSHIP:
2325         {
2326             struct ipv6_mreq ipv6mreq;
2327 
2328             if (optlen < sizeof(ipv6mreq)) {
2329                 return -TARGET_EINVAL;
2330             }
2331 
2332             if (copy_from_user(&ipv6mreq, optval_addr, sizeof(ipv6mreq))) {
2333                 return -TARGET_EFAULT;
2334             }
2335 
2336             ipv6mreq.ipv6mr_interface = tswap32(ipv6mreq.ipv6mr_interface);
2337 
2338             ret = get_errno(setsockopt(sockfd, level, optname,
2339                                        &ipv6mreq, sizeof(ipv6mreq)));
2340             break;
2341         }
2342         default:
2343             goto unimplemented;
2344         }
2345         break;
2346     case SOL_ICMPV6:
2347         switch (optname) {
2348         case ICMPV6_FILTER:
2349         {
2350             struct icmp6_filter icmp6f;
2351 
2352             if (optlen > sizeof(icmp6f)) {
2353                 optlen = sizeof(icmp6f);
2354             }
2355 
2356             if (copy_from_user(&icmp6f, optval_addr, optlen)) {
2357                 return -TARGET_EFAULT;
2358             }
2359 
2360             for (val = 0; val < 8; val++) {
2361                 icmp6f.data[val] = tswap32(icmp6f.data[val]);
2362             }
2363 
2364             ret = get_errno(setsockopt(sockfd, level, optname,
2365                                        &icmp6f, optlen));
2366             break;
2367         }
2368         default:
2369             goto unimplemented;
2370         }
2371         break;
2372     case SOL_RAW:
2373         switch (optname) {
2374         case ICMP_FILTER:
2375         case IPV6_CHECKSUM:
2376             /* those take an u32 value */
2377             if (optlen < sizeof(uint32_t)) {
2378                 return -TARGET_EINVAL;
2379             }
2380 
2381             if (get_user_u32(val, optval_addr)) {
2382                 return -TARGET_EFAULT;
2383             }
2384             ret = get_errno(setsockopt(sockfd, level, optname,
2385                                        &val, sizeof(val)));
2386             break;
2387 
2388         default:
2389             goto unimplemented;
2390         }
2391         break;
2392 #if defined(SOL_ALG) && defined(ALG_SET_KEY) && defined(ALG_SET_AEAD_AUTHSIZE)
2393     case SOL_ALG:
2394         switch (optname) {
2395         case ALG_SET_KEY:
2396         {
2397             char *alg_key = g_malloc(optlen);
2398 
2399             if (!alg_key) {
2400                 return -TARGET_ENOMEM;
2401             }
2402             if (copy_from_user(alg_key, optval_addr, optlen)) {
2403                 g_free(alg_key);
2404                 return -TARGET_EFAULT;
2405             }
2406             ret = get_errno(setsockopt(sockfd, level, optname,
2407                                        alg_key, optlen));
2408             g_free(alg_key);
2409             break;
2410         }
2411         case ALG_SET_AEAD_AUTHSIZE:
2412         {
2413             ret = get_errno(setsockopt(sockfd, level, optname,
2414                                        NULL, optlen));
2415             break;
2416         }
2417         default:
2418             goto unimplemented;
2419         }
2420         break;
2421 #endif
2422     case TARGET_SOL_SOCKET:
2423         switch (optname) {
2424         case TARGET_SO_RCVTIMEO:
2425         {
2426                 struct timeval tv;
2427 
2428                 optname = SO_RCVTIMEO;
2429 
2430 set_timeout:
2431                 if (optlen != sizeof(struct target_timeval)) {
2432                     return -TARGET_EINVAL;
2433                 }
2434 
2435                 if (copy_from_user_timeval(&tv, optval_addr)) {
2436                     return -TARGET_EFAULT;
2437                 }
2438 
2439                 ret = get_errno(setsockopt(sockfd, SOL_SOCKET, optname,
2440                                 &tv, sizeof(tv)));
2441                 return ret;
2442         }
2443         case TARGET_SO_SNDTIMEO:
2444                 optname = SO_SNDTIMEO;
2445                 goto set_timeout;
2446         case TARGET_SO_ATTACH_FILTER:
2447         {
2448                 struct target_sock_fprog *tfprog;
2449                 struct target_sock_filter *tfilter;
2450                 struct sock_fprog fprog;
2451                 struct sock_filter *filter;
2452                 int i;
2453 
2454                 if (optlen != sizeof(*tfprog)) {
2455                     return -TARGET_EINVAL;
2456                 }
2457                 if (!lock_user_struct(VERIFY_READ, tfprog, optval_addr, 0)) {
2458                     return -TARGET_EFAULT;
2459                 }
2460                 if (!lock_user_struct(VERIFY_READ, tfilter,
2461                                       tswapal(tfprog->filter), 0)) {
2462                     unlock_user_struct(tfprog, optval_addr, 1);
2463                     return -TARGET_EFAULT;
2464                 }
2465 
2466                 fprog.len = tswap16(tfprog->len);
2467                 filter = g_try_new(struct sock_filter, fprog.len);
2468                 if (filter == NULL) {
2469                     unlock_user_struct(tfilter, tfprog->filter, 1);
2470                     unlock_user_struct(tfprog, optval_addr, 1);
2471                     return -TARGET_ENOMEM;
2472                 }
2473                 for (i = 0; i < fprog.len; i++) {
2474                     filter[i].code = tswap16(tfilter[i].code);
2475                     filter[i].jt = tfilter[i].jt;
2476                     filter[i].jf = tfilter[i].jf;
2477                     filter[i].k = tswap32(tfilter[i].k);
2478                 }
2479                 fprog.filter = filter;
2480 
2481                 ret = get_errno(setsockopt(sockfd, SOL_SOCKET,
2482                                 SO_ATTACH_FILTER, &fprog, sizeof(fprog)));
2483                 g_free(filter);
2484 
2485                 unlock_user_struct(tfilter, tfprog->filter, 1);
2486                 unlock_user_struct(tfprog, optval_addr, 1);
2487                 return ret;
2488         }
2489 	case TARGET_SO_BINDTODEVICE:
2490 	{
2491 		char *dev_ifname, *addr_ifname;
2492 
2493 		if (optlen > IFNAMSIZ - 1) {
2494 		    optlen = IFNAMSIZ - 1;
2495 		}
2496 		dev_ifname = lock_user(VERIFY_READ, optval_addr, optlen, 1);
2497 		if (!dev_ifname) {
2498 		    return -TARGET_EFAULT;
2499 		}
2500 		optname = SO_BINDTODEVICE;
2501 		addr_ifname = alloca(IFNAMSIZ);
2502 		memcpy(addr_ifname, dev_ifname, optlen);
2503 		addr_ifname[optlen] = 0;
2504 		ret = get_errno(setsockopt(sockfd, SOL_SOCKET, optname,
2505                                            addr_ifname, optlen));
2506 		unlock_user (dev_ifname, optval_addr, 0);
2507 		return ret;
2508 	}
2509         case TARGET_SO_LINGER:
2510         {
2511                 struct linger lg;
2512                 struct target_linger *tlg;
2513 
2514                 if (optlen != sizeof(struct target_linger)) {
2515                     return -TARGET_EINVAL;
2516                 }
2517                 if (!lock_user_struct(VERIFY_READ, tlg, optval_addr, 1)) {
2518                     return -TARGET_EFAULT;
2519                 }
2520                 __get_user(lg.l_onoff, &tlg->l_onoff);
2521                 __get_user(lg.l_linger, &tlg->l_linger);
2522                 ret = get_errno(setsockopt(sockfd, SOL_SOCKET, SO_LINGER,
2523                                 &lg, sizeof(lg)));
2524                 unlock_user_struct(tlg, optval_addr, 0);
2525                 return ret;
2526         }
2527             /* Options with 'int' argument.  */
2528         case TARGET_SO_DEBUG:
2529 		optname = SO_DEBUG;
2530 		break;
2531         case TARGET_SO_REUSEADDR:
2532 		optname = SO_REUSEADDR;
2533 		break;
2534 #ifdef SO_REUSEPORT
2535         case TARGET_SO_REUSEPORT:
2536                 optname = SO_REUSEPORT;
2537                 break;
2538 #endif
2539         case TARGET_SO_TYPE:
2540 		optname = SO_TYPE;
2541 		break;
2542         case TARGET_SO_ERROR:
2543 		optname = SO_ERROR;
2544 		break;
2545         case TARGET_SO_DONTROUTE:
2546 		optname = SO_DONTROUTE;
2547 		break;
2548         case TARGET_SO_BROADCAST:
2549 		optname = SO_BROADCAST;
2550 		break;
2551         case TARGET_SO_SNDBUF:
2552 		optname = SO_SNDBUF;
2553 		break;
2554         case TARGET_SO_SNDBUFFORCE:
2555                 optname = SO_SNDBUFFORCE;
2556                 break;
2557         case TARGET_SO_RCVBUF:
2558 		optname = SO_RCVBUF;
2559 		break;
2560         case TARGET_SO_RCVBUFFORCE:
2561                 optname = SO_RCVBUFFORCE;
2562                 break;
2563         case TARGET_SO_KEEPALIVE:
2564 		optname = SO_KEEPALIVE;
2565 		break;
2566         case TARGET_SO_OOBINLINE:
2567 		optname = SO_OOBINLINE;
2568 		break;
2569         case TARGET_SO_NO_CHECK:
2570 		optname = SO_NO_CHECK;
2571 		break;
2572         case TARGET_SO_PRIORITY:
2573 		optname = SO_PRIORITY;
2574 		break;
2575 #ifdef SO_BSDCOMPAT
2576         case TARGET_SO_BSDCOMPAT:
2577 		optname = SO_BSDCOMPAT;
2578 		break;
2579 #endif
2580         case TARGET_SO_PASSCRED:
2581 		optname = SO_PASSCRED;
2582 		break;
2583         case TARGET_SO_PASSSEC:
2584                 optname = SO_PASSSEC;
2585                 break;
2586         case TARGET_SO_TIMESTAMP:
2587 		optname = SO_TIMESTAMP;
2588 		break;
2589         case TARGET_SO_RCVLOWAT:
2590 		optname = SO_RCVLOWAT;
2591 		break;
2592         default:
2593             goto unimplemented;
2594         }
2595 	if (optlen < sizeof(uint32_t))
2596             return -TARGET_EINVAL;
2597 
2598 	if (get_user_u32(val, optval_addr))
2599             return -TARGET_EFAULT;
2600 	ret = get_errno(setsockopt(sockfd, SOL_SOCKET, optname, &val, sizeof(val)));
2601         break;
2602 #ifdef SOL_NETLINK
2603     case SOL_NETLINK:
2604         switch (optname) {
2605         case NETLINK_PKTINFO:
2606         case NETLINK_ADD_MEMBERSHIP:
2607         case NETLINK_DROP_MEMBERSHIP:
2608         case NETLINK_BROADCAST_ERROR:
2609         case NETLINK_NO_ENOBUFS:
2610 #if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 2, 0)
2611         case NETLINK_LISTEN_ALL_NSID:
2612         case NETLINK_CAP_ACK:
2613 #endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(4, 2, 0) */
2614 #if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 12, 0)
2615         case NETLINK_EXT_ACK:
2616 #endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(4, 12, 0) */
2617 #if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 20, 0)
2618         case NETLINK_GET_STRICT_CHK:
2619 #endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(4, 12, 0) */
2620             break;
2621         default:
2622             goto unimplemented;
2623         }
2624         val = 0;
2625         if (optlen < sizeof(uint32_t)) {
2626             return -TARGET_EINVAL;
2627         }
2628         if (get_user_u32(val, optval_addr)) {
2629             return -TARGET_EFAULT;
2630         }
2631         ret = get_errno(setsockopt(sockfd, SOL_NETLINK, optname, &val,
2632                                    sizeof(val)));
2633         break;
2634 #endif /* SOL_NETLINK */
2635     default:
2636     unimplemented:
2637         qemu_log_mask(LOG_UNIMP, "Unsupported setsockopt level=%d optname=%d\n",
2638                       level, optname);
2639         ret = -TARGET_ENOPROTOOPT;
2640     }
2641     return ret;
2642 }
2643 
2644 /* do_getsockopt() Must return target values and target errnos. */
2645 static abi_long do_getsockopt(int sockfd, int level, int optname,
2646                               abi_ulong optval_addr, abi_ulong optlen)
2647 {
2648     abi_long ret;
2649     int len, val;
2650     socklen_t lv;
2651 
2652     switch(level) {
2653     case TARGET_SOL_SOCKET:
2654         level = SOL_SOCKET;
2655         switch (optname) {
2656         /* These don't just return a single integer */
2657         case TARGET_SO_PEERNAME:
2658             goto unimplemented;
2659         case TARGET_SO_RCVTIMEO: {
2660             struct timeval tv;
2661             socklen_t tvlen;
2662 
2663             optname = SO_RCVTIMEO;
2664 
2665 get_timeout:
2666             if (get_user_u32(len, optlen)) {
2667                 return -TARGET_EFAULT;
2668             }
2669             if (len < 0) {
2670                 return -TARGET_EINVAL;
2671             }
2672 
2673             tvlen = sizeof(tv);
2674             ret = get_errno(getsockopt(sockfd, level, optname,
2675                                        &tv, &tvlen));
2676             if (ret < 0) {
2677                 return ret;
2678             }
2679             if (len > sizeof(struct target_timeval)) {
2680                 len = sizeof(struct target_timeval);
2681             }
2682             if (copy_to_user_timeval(optval_addr, &tv)) {
2683                 return -TARGET_EFAULT;
2684             }
2685             if (put_user_u32(len, optlen)) {
2686                 return -TARGET_EFAULT;
2687             }
2688             break;
2689         }
2690         case TARGET_SO_SNDTIMEO:
2691             optname = SO_SNDTIMEO;
2692             goto get_timeout;
2693         case TARGET_SO_PEERCRED: {
2694             struct ucred cr;
2695             socklen_t crlen;
2696             struct target_ucred *tcr;
2697 
2698             if (get_user_u32(len, optlen)) {
2699                 return -TARGET_EFAULT;
2700             }
2701             if (len < 0) {
2702                 return -TARGET_EINVAL;
2703             }
2704 
2705             crlen = sizeof(cr);
2706             ret = get_errno(getsockopt(sockfd, level, SO_PEERCRED,
2707                                        &cr, &crlen));
2708             if (ret < 0) {
2709                 return ret;
2710             }
2711             if (len > crlen) {
2712                 len = crlen;
2713             }
2714             if (!lock_user_struct(VERIFY_WRITE, tcr, optval_addr, 0)) {
2715                 return -TARGET_EFAULT;
2716             }
2717             __put_user(cr.pid, &tcr->pid);
2718             __put_user(cr.uid, &tcr->uid);
2719             __put_user(cr.gid, &tcr->gid);
2720             unlock_user_struct(tcr, optval_addr, 1);
2721             if (put_user_u32(len, optlen)) {
2722                 return -TARGET_EFAULT;
2723             }
2724             break;
2725         }
2726         case TARGET_SO_PEERSEC: {
2727             char *name;
2728 
2729             if (get_user_u32(len, optlen)) {
2730                 return -TARGET_EFAULT;
2731             }
2732             if (len < 0) {
2733                 return -TARGET_EINVAL;
2734             }
2735             name = lock_user(VERIFY_WRITE, optval_addr, len, 0);
2736             if (!name) {
2737                 return -TARGET_EFAULT;
2738             }
2739             lv = len;
2740             ret = get_errno(getsockopt(sockfd, level, SO_PEERSEC,
2741                                        name, &lv));
2742             if (put_user_u32(lv, optlen)) {
2743                 ret = -TARGET_EFAULT;
2744             }
2745             unlock_user(name, optval_addr, lv);
2746             break;
2747         }
2748         case TARGET_SO_LINGER:
2749         {
2750             struct linger lg;
2751             socklen_t lglen;
2752             struct target_linger *tlg;
2753 
2754             if (get_user_u32(len, optlen)) {
2755                 return -TARGET_EFAULT;
2756             }
2757             if (len < 0) {
2758                 return -TARGET_EINVAL;
2759             }
2760 
2761             lglen = sizeof(lg);
2762             ret = get_errno(getsockopt(sockfd, level, SO_LINGER,
2763                                        &lg, &lglen));
2764             if (ret < 0) {
2765                 return ret;
2766             }
2767             if (len > lglen) {
2768                 len = lglen;
2769             }
2770             if (!lock_user_struct(VERIFY_WRITE, tlg, optval_addr, 0)) {
2771                 return -TARGET_EFAULT;
2772             }
2773             __put_user(lg.l_onoff, &tlg->l_onoff);
2774             __put_user(lg.l_linger, &tlg->l_linger);
2775             unlock_user_struct(tlg, optval_addr, 1);
2776             if (put_user_u32(len, optlen)) {
2777                 return -TARGET_EFAULT;
2778             }
2779             break;
2780         }
2781         /* Options with 'int' argument.  */
2782         case TARGET_SO_DEBUG:
2783             optname = SO_DEBUG;
2784             goto int_case;
2785         case TARGET_SO_REUSEADDR:
2786             optname = SO_REUSEADDR;
2787             goto int_case;
2788 #ifdef SO_REUSEPORT
2789         case TARGET_SO_REUSEPORT:
2790             optname = SO_REUSEPORT;
2791             goto int_case;
2792 #endif
2793         case TARGET_SO_TYPE:
2794             optname = SO_TYPE;
2795             goto int_case;
2796         case TARGET_SO_ERROR:
2797             optname = SO_ERROR;
2798             goto int_case;
2799         case TARGET_SO_DONTROUTE:
2800             optname = SO_DONTROUTE;
2801             goto int_case;
2802         case TARGET_SO_BROADCAST:
2803             optname = SO_BROADCAST;
2804             goto int_case;
2805         case TARGET_SO_SNDBUF:
2806             optname = SO_SNDBUF;
2807             goto int_case;
2808         case TARGET_SO_RCVBUF:
2809             optname = SO_RCVBUF;
2810             goto int_case;
2811         case TARGET_SO_KEEPALIVE:
2812             optname = SO_KEEPALIVE;
2813             goto int_case;
2814         case TARGET_SO_OOBINLINE:
2815             optname = SO_OOBINLINE;
2816             goto int_case;
2817         case TARGET_SO_NO_CHECK:
2818             optname = SO_NO_CHECK;
2819             goto int_case;
2820         case TARGET_SO_PRIORITY:
2821             optname = SO_PRIORITY;
2822             goto int_case;
2823 #ifdef SO_BSDCOMPAT
2824         case TARGET_SO_BSDCOMPAT:
2825             optname = SO_BSDCOMPAT;
2826             goto int_case;
2827 #endif
2828         case TARGET_SO_PASSCRED:
2829             optname = SO_PASSCRED;
2830             goto int_case;
2831         case TARGET_SO_TIMESTAMP:
2832             optname = SO_TIMESTAMP;
2833             goto int_case;
2834         case TARGET_SO_RCVLOWAT:
2835             optname = SO_RCVLOWAT;
2836             goto int_case;
2837         case TARGET_SO_ACCEPTCONN:
2838             optname = SO_ACCEPTCONN;
2839             goto int_case;
2840         default:
2841             goto int_case;
2842         }
2843         break;
2844     case SOL_TCP:
2845     case SOL_UDP:
2846         /* TCP and UDP options all take an 'int' value.  */
2847     int_case:
2848         if (get_user_u32(len, optlen))
2849             return -TARGET_EFAULT;
2850         if (len < 0)
2851             return -TARGET_EINVAL;
2852         lv = sizeof(lv);
2853         ret = get_errno(getsockopt(sockfd, level, optname, &val, &lv));
2854         if (ret < 0)
2855             return ret;
2856         if (optname == SO_TYPE) {
2857             val = host_to_target_sock_type(val);
2858         }
2859         if (len > lv)
2860             len = lv;
2861         if (len == 4) {
2862             if (put_user_u32(val, optval_addr))
2863                 return -TARGET_EFAULT;
2864         } else {
2865             if (put_user_u8(val, optval_addr))
2866                 return -TARGET_EFAULT;
2867         }
2868         if (put_user_u32(len, optlen))
2869             return -TARGET_EFAULT;
2870         break;
2871     case SOL_IP:
2872         switch(optname) {
2873         case IP_TOS:
2874         case IP_TTL:
2875         case IP_HDRINCL:
2876         case IP_ROUTER_ALERT:
2877         case IP_RECVOPTS:
2878         case IP_RETOPTS:
2879         case IP_PKTINFO:
2880         case IP_MTU_DISCOVER:
2881         case IP_RECVERR:
2882         case IP_RECVTOS:
2883 #ifdef IP_FREEBIND
2884         case IP_FREEBIND:
2885 #endif
2886         case IP_MULTICAST_TTL:
2887         case IP_MULTICAST_LOOP:
2888             if (get_user_u32(len, optlen))
2889                 return -TARGET_EFAULT;
2890             if (len < 0)
2891                 return -TARGET_EINVAL;
2892             lv = sizeof(lv);
2893             ret = get_errno(getsockopt(sockfd, level, optname, &val, &lv));
2894             if (ret < 0)
2895                 return ret;
2896             if (len < sizeof(int) && len > 0 && val >= 0 && val < 255) {
2897                 len = 1;
2898                 if (put_user_u32(len, optlen)
2899                     || put_user_u8(val, optval_addr))
2900                     return -TARGET_EFAULT;
2901             } else {
2902                 if (len > sizeof(int))
2903                     len = sizeof(int);
2904                 if (put_user_u32(len, optlen)
2905                     || put_user_u32(val, optval_addr))
2906                     return -TARGET_EFAULT;
2907             }
2908             break;
2909         default:
2910             ret = -TARGET_ENOPROTOOPT;
2911             break;
2912         }
2913         break;
2914     case SOL_IPV6:
2915         switch (optname) {
2916         case IPV6_MTU_DISCOVER:
2917         case IPV6_MTU:
2918         case IPV6_V6ONLY:
2919         case IPV6_RECVPKTINFO:
2920         case IPV6_UNICAST_HOPS:
2921         case IPV6_MULTICAST_HOPS:
2922         case IPV6_MULTICAST_LOOP:
2923         case IPV6_RECVERR:
2924         case IPV6_RECVHOPLIMIT:
2925         case IPV6_2292HOPLIMIT:
2926         case IPV6_CHECKSUM:
2927         case IPV6_ADDRFORM:
2928         case IPV6_2292PKTINFO:
2929         case IPV6_RECVTCLASS:
2930         case IPV6_RECVRTHDR:
2931         case IPV6_2292RTHDR:
2932         case IPV6_RECVHOPOPTS:
2933         case IPV6_2292HOPOPTS:
2934         case IPV6_RECVDSTOPTS:
2935         case IPV6_2292DSTOPTS:
2936         case IPV6_TCLASS:
2937         case IPV6_ADDR_PREFERENCES:
2938 #ifdef IPV6_RECVPATHMTU
2939         case IPV6_RECVPATHMTU:
2940 #endif
2941 #ifdef IPV6_TRANSPARENT
2942         case IPV6_TRANSPARENT:
2943 #endif
2944 #ifdef IPV6_FREEBIND
2945         case IPV6_FREEBIND:
2946 #endif
2947 #ifdef IPV6_RECVORIGDSTADDR
2948         case IPV6_RECVORIGDSTADDR:
2949 #endif
2950             if (get_user_u32(len, optlen))
2951                 return -TARGET_EFAULT;
2952             if (len < 0)
2953                 return -TARGET_EINVAL;
2954             lv = sizeof(lv);
2955             ret = get_errno(getsockopt(sockfd, level, optname, &val, &lv));
2956             if (ret < 0)
2957                 return ret;
2958             if (len < sizeof(int) && len > 0 && val >= 0 && val < 255) {
2959                 len = 1;
2960                 if (put_user_u32(len, optlen)
2961                     || put_user_u8(val, optval_addr))
2962                     return -TARGET_EFAULT;
2963             } else {
2964                 if (len > sizeof(int))
2965                     len = sizeof(int);
2966                 if (put_user_u32(len, optlen)
2967                     || put_user_u32(val, optval_addr))
2968                     return -TARGET_EFAULT;
2969             }
2970             break;
2971         default:
2972             ret = -TARGET_ENOPROTOOPT;
2973             break;
2974         }
2975         break;
2976 #ifdef SOL_NETLINK
2977     case SOL_NETLINK:
2978         switch (optname) {
2979         case NETLINK_PKTINFO:
2980         case NETLINK_BROADCAST_ERROR:
2981         case NETLINK_NO_ENOBUFS:
2982 #if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 2, 0)
2983         case NETLINK_LISTEN_ALL_NSID:
2984         case NETLINK_CAP_ACK:
2985 #endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(4, 2, 0) */
2986 #if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 12, 0)
2987         case NETLINK_EXT_ACK:
2988 #endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(4, 12, 0) */
2989 #if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 20, 0)
2990         case NETLINK_GET_STRICT_CHK:
2991 #endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(4, 12, 0) */
2992             if (get_user_u32(len, optlen)) {
2993                 return -TARGET_EFAULT;
2994             }
2995             if (len != sizeof(val)) {
2996                 return -TARGET_EINVAL;
2997             }
2998             lv = len;
2999             ret = get_errno(getsockopt(sockfd, level, optname, &val, &lv));
3000             if (ret < 0) {
3001                 return ret;
3002             }
3003             if (put_user_u32(lv, optlen)
3004                 || put_user_u32(val, optval_addr)) {
3005                 return -TARGET_EFAULT;
3006             }
3007             break;
3008 #if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 2, 0)
3009         case NETLINK_LIST_MEMBERSHIPS:
3010         {
3011             uint32_t *results;
3012             int i;
3013             if (get_user_u32(len, optlen)) {
3014                 return -TARGET_EFAULT;
3015             }
3016             if (len < 0) {
3017                 return -TARGET_EINVAL;
3018             }
3019             results = lock_user(VERIFY_WRITE, optval_addr, len, 1);
3020             if (!results) {
3021                 return -TARGET_EFAULT;
3022             }
3023             lv = len;
3024             ret = get_errno(getsockopt(sockfd, level, optname, results, &lv));
3025             if (ret < 0) {
3026                 unlock_user(results, optval_addr, 0);
3027                 return ret;
3028             }
3029             /* swap host endianess to target endianess. */
3030             for (i = 0; i < (len / sizeof(uint32_t)); i++) {
3031                 results[i] = tswap32(results[i]);
3032             }
3033             if (put_user_u32(lv, optlen)) {
3034                 return -TARGET_EFAULT;
3035             }
3036             unlock_user(results, optval_addr, 0);
3037             break;
3038         }
3039 #endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(4, 2, 0) */
3040         default:
3041             goto unimplemented;
3042         }
3043         break;
3044 #endif /* SOL_NETLINK */
3045     default:
3046     unimplemented:
3047         qemu_log_mask(LOG_UNIMP,
3048                       "getsockopt level=%d optname=%d not yet supported\n",
3049                       level, optname);
3050         ret = -TARGET_EOPNOTSUPP;
3051         break;
3052     }
3053     return ret;
3054 }
3055 
3056 /* Convert target low/high pair representing file offset into the host
3057  * low/high pair. This function doesn't handle offsets bigger than 64 bits
3058  * as the kernel doesn't handle them either.
3059  */
3060 static void target_to_host_low_high(abi_ulong tlow,
3061                                     abi_ulong thigh,
3062                                     unsigned long *hlow,
3063                                     unsigned long *hhigh)
3064 {
3065     uint64_t off = tlow |
3066         ((unsigned long long)thigh << TARGET_LONG_BITS / 2) <<
3067         TARGET_LONG_BITS / 2;
3068 
3069     *hlow = off;
3070     *hhigh = (off >> HOST_LONG_BITS / 2) >> HOST_LONG_BITS / 2;
3071 }
3072 
3073 static struct iovec *lock_iovec(int type, abi_ulong target_addr,
3074                                 abi_ulong count, int copy)
3075 {
3076     struct target_iovec *target_vec;
3077     struct iovec *vec;
3078     abi_ulong total_len, max_len;
3079     int i;
3080     int err = 0;
3081     bool bad_address = false;
3082 
3083     if (count == 0) {
3084         errno = 0;
3085         return NULL;
3086     }
3087     if (count > IOV_MAX) {
3088         errno = EINVAL;
3089         return NULL;
3090     }
3091 
3092     vec = g_try_new0(struct iovec, count);
3093     if (vec == NULL) {
3094         errno = ENOMEM;
3095         return NULL;
3096     }
3097 
3098     target_vec = lock_user(VERIFY_READ, target_addr,
3099                            count * sizeof(struct target_iovec), 1);
3100     if (target_vec == NULL) {
3101         err = EFAULT;
3102         goto fail2;
3103     }
3104 
3105     /* ??? If host page size > target page size, this will result in a
3106        value larger than what we can actually support.  */
3107     max_len = 0x7fffffff & TARGET_PAGE_MASK;
3108     total_len = 0;
3109 
3110     for (i = 0; i < count; i++) {
3111         abi_ulong base = tswapal(target_vec[i].iov_base);
3112         abi_long len = tswapal(target_vec[i].iov_len);
3113 
3114         if (len < 0) {
3115             err = EINVAL;
3116             goto fail;
3117         } else if (len == 0) {
3118             /* Zero length pointer is ignored.  */
3119             vec[i].iov_base = 0;
3120         } else {
3121             vec[i].iov_base = lock_user(type, base, len, copy);
3122             /* If the first buffer pointer is bad, this is a fault.  But
3123              * subsequent bad buffers will result in a partial write; this
3124              * is realized by filling the vector with null pointers and
3125              * zero lengths. */
3126             if (!vec[i].iov_base) {
3127                 if (i == 0) {
3128                     err = EFAULT;
3129                     goto fail;
3130                 } else {
3131                     bad_address = true;
3132                 }
3133             }
3134             if (bad_address) {
3135                 len = 0;
3136             }
3137             if (len > max_len - total_len) {
3138                 len = max_len - total_len;
3139             }
3140         }
3141         vec[i].iov_len = len;
3142         total_len += len;
3143     }
3144 
3145     unlock_user(target_vec, target_addr, 0);
3146     return vec;
3147 
3148  fail:
3149     while (--i >= 0) {
3150         if (tswapal(target_vec[i].iov_len) > 0) {
3151             unlock_user(vec[i].iov_base, tswapal(target_vec[i].iov_base), 0);
3152         }
3153     }
3154     unlock_user(target_vec, target_addr, 0);
3155  fail2:
3156     g_free(vec);
3157     errno = err;
3158     return NULL;
3159 }
3160 
3161 static void unlock_iovec(struct iovec *vec, abi_ulong target_addr,
3162                          abi_ulong count, int copy)
3163 {
3164     struct target_iovec *target_vec;
3165     int i;
3166 
3167     target_vec = lock_user(VERIFY_READ, target_addr,
3168                            count * sizeof(struct target_iovec), 1);
3169     if (target_vec) {
3170         for (i = 0; i < count; i++) {
3171             abi_ulong base = tswapal(target_vec[i].iov_base);
3172             abi_long len = tswapal(target_vec[i].iov_len);
3173             if (len < 0) {
3174                 break;
3175             }
3176             unlock_user(vec[i].iov_base, base, copy ? vec[i].iov_len : 0);
3177         }
3178         unlock_user(target_vec, target_addr, 0);
3179     }
3180 
3181     g_free(vec);
3182 }
3183 
3184 static inline int target_to_host_sock_type(int *type)
3185 {
3186     int host_type = 0;
3187     int target_type = *type;
3188 
3189     switch (target_type & TARGET_SOCK_TYPE_MASK) {
3190     case TARGET_SOCK_DGRAM:
3191         host_type = SOCK_DGRAM;
3192         break;
3193     case TARGET_SOCK_STREAM:
3194         host_type = SOCK_STREAM;
3195         break;
3196     default:
3197         host_type = target_type & TARGET_SOCK_TYPE_MASK;
3198         break;
3199     }
3200     if (target_type & TARGET_SOCK_CLOEXEC) {
3201 #if defined(SOCK_CLOEXEC)
3202         host_type |= SOCK_CLOEXEC;
3203 #else
3204         return -TARGET_EINVAL;
3205 #endif
3206     }
3207     if (target_type & TARGET_SOCK_NONBLOCK) {
3208 #if defined(SOCK_NONBLOCK)
3209         host_type |= SOCK_NONBLOCK;
3210 #elif !defined(O_NONBLOCK)
3211         return -TARGET_EINVAL;
3212 #endif
3213     }
3214     *type = host_type;
3215     return 0;
3216 }
3217 
3218 /* Try to emulate socket type flags after socket creation.  */
3219 static int sock_flags_fixup(int fd, int target_type)
3220 {
3221 #if !defined(SOCK_NONBLOCK) && defined(O_NONBLOCK)
3222     if (target_type & TARGET_SOCK_NONBLOCK) {
3223         int flags = fcntl(fd, F_GETFL);
3224         if (fcntl(fd, F_SETFL, O_NONBLOCK | flags) == -1) {
3225             close(fd);
3226             return -TARGET_EINVAL;
3227         }
3228     }
3229 #endif
3230     return fd;
3231 }
3232 
3233 /* do_socket() Must return target values and target errnos. */
3234 static abi_long do_socket(int domain, int type, int protocol)
3235 {
3236     int target_type = type;
3237     int ret;
3238 
3239     ret = target_to_host_sock_type(&type);
3240     if (ret) {
3241         return ret;
3242     }
3243 
3244     if (domain == PF_NETLINK && !(
3245 #ifdef CONFIG_RTNETLINK
3246          protocol == NETLINK_ROUTE ||
3247 #endif
3248          protocol == NETLINK_KOBJECT_UEVENT ||
3249          protocol == NETLINK_AUDIT)) {
3250         return -TARGET_EPROTONOSUPPORT;
3251     }
3252 
3253     if (domain == AF_PACKET ||
3254         (domain == AF_INET && type == SOCK_PACKET)) {
3255         protocol = tswap16(protocol);
3256     }
3257 
3258     ret = get_errno(socket(domain, type, protocol));
3259     if (ret >= 0) {
3260         ret = sock_flags_fixup(ret, target_type);
3261         if (type == SOCK_PACKET) {
3262             /* Manage an obsolete case :
3263              * if socket type is SOCK_PACKET, bind by name
3264              */
3265             fd_trans_register(ret, &target_packet_trans);
3266         } else if (domain == PF_NETLINK) {
3267             switch (protocol) {
3268 #ifdef CONFIG_RTNETLINK
3269             case NETLINK_ROUTE:
3270                 fd_trans_register(ret, &target_netlink_route_trans);
3271                 break;
3272 #endif
3273             case NETLINK_KOBJECT_UEVENT:
3274                 /* nothing to do: messages are strings */
3275                 break;
3276             case NETLINK_AUDIT:
3277                 fd_trans_register(ret, &target_netlink_audit_trans);
3278                 break;
3279             default:
3280                 g_assert_not_reached();
3281             }
3282         }
3283     }
3284     return ret;
3285 }
3286 
3287 /* do_bind() Must return target values and target errnos. */
3288 static abi_long do_bind(int sockfd, abi_ulong target_addr,
3289                         socklen_t addrlen)
3290 {
3291     void *addr;
3292     abi_long ret;
3293 
3294     if ((int)addrlen < 0) {
3295         return -TARGET_EINVAL;
3296     }
3297 
3298     addr = alloca(addrlen+1);
3299 
3300     ret = target_to_host_sockaddr(sockfd, addr, target_addr, addrlen);
3301     if (ret)
3302         return ret;
3303 
3304     return get_errno(bind(sockfd, addr, addrlen));
3305 }
3306 
3307 /* do_connect() Must return target values and target errnos. */
3308 static abi_long do_connect(int sockfd, abi_ulong target_addr,
3309                            socklen_t addrlen)
3310 {
3311     void *addr;
3312     abi_long ret;
3313 
3314     if ((int)addrlen < 0) {
3315         return -TARGET_EINVAL;
3316     }
3317 
3318     addr = alloca(addrlen+1);
3319 
3320     ret = target_to_host_sockaddr(sockfd, addr, target_addr, addrlen);
3321     if (ret)
3322         return ret;
3323 
3324     return get_errno(safe_connect(sockfd, addr, addrlen));
3325 }
3326 
3327 /* do_sendrecvmsg_locked() Must return target values and target errnos. */
3328 static abi_long do_sendrecvmsg_locked(int fd, struct target_msghdr *msgp,
3329                                       int flags, int send)
3330 {
3331     abi_long ret, len;
3332     struct msghdr msg;
3333     abi_ulong count;
3334     struct iovec *vec;
3335     abi_ulong target_vec;
3336 
3337     if (msgp->msg_name) {
3338         msg.msg_namelen = tswap32(msgp->msg_namelen);
3339         msg.msg_name = alloca(msg.msg_namelen+1);
3340         ret = target_to_host_sockaddr(fd, msg.msg_name,
3341                                       tswapal(msgp->msg_name),
3342                                       msg.msg_namelen);
3343         if (ret == -TARGET_EFAULT) {
3344             /* For connected sockets msg_name and msg_namelen must
3345              * be ignored, so returning EFAULT immediately is wrong.
3346              * Instead, pass a bad msg_name to the host kernel, and
3347              * let it decide whether to return EFAULT or not.
3348              */
3349             msg.msg_name = (void *)-1;
3350         } else if (ret) {
3351             goto out2;
3352         }
3353     } else {
3354         msg.msg_name = NULL;
3355         msg.msg_namelen = 0;
3356     }
3357     msg.msg_controllen = 2 * tswapal(msgp->msg_controllen);
3358     msg.msg_control = alloca(msg.msg_controllen);
3359     memset(msg.msg_control, 0, msg.msg_controllen);
3360 
3361     msg.msg_flags = tswap32(msgp->msg_flags);
3362 
3363     count = tswapal(msgp->msg_iovlen);
3364     target_vec = tswapal(msgp->msg_iov);
3365 
3366     if (count > IOV_MAX) {
3367         /* sendrcvmsg returns a different errno for this condition than
3368          * readv/writev, so we must catch it here before lock_iovec() does.
3369          */
3370         ret = -TARGET_EMSGSIZE;
3371         goto out2;
3372     }
3373 
3374     vec = lock_iovec(send ? VERIFY_READ : VERIFY_WRITE,
3375                      target_vec, count, send);
3376     if (vec == NULL) {
3377         ret = -host_to_target_errno(errno);
3378         goto out2;
3379     }
3380     msg.msg_iovlen = count;
3381     msg.msg_iov = vec;
3382 
3383     if (send) {
3384         if (fd_trans_target_to_host_data(fd)) {
3385             void *host_msg;
3386 
3387             host_msg = g_malloc(msg.msg_iov->iov_len);
3388             memcpy(host_msg, msg.msg_iov->iov_base, msg.msg_iov->iov_len);
3389             ret = fd_trans_target_to_host_data(fd)(host_msg,
3390                                                    msg.msg_iov->iov_len);
3391             if (ret >= 0) {
3392                 msg.msg_iov->iov_base = host_msg;
3393                 ret = get_errno(safe_sendmsg(fd, &msg, flags));
3394             }
3395             g_free(host_msg);
3396         } else {
3397             ret = target_to_host_cmsg(&msg, msgp);
3398             if (ret == 0) {
3399                 ret = get_errno(safe_sendmsg(fd, &msg, flags));
3400             }
3401         }
3402     } else {
3403         ret = get_errno(safe_recvmsg(fd, &msg, flags));
3404         if (!is_error(ret)) {
3405             len = ret;
3406             if (fd_trans_host_to_target_data(fd)) {
3407                 ret = fd_trans_host_to_target_data(fd)(msg.msg_iov->iov_base,
3408                                                MIN(msg.msg_iov->iov_len, len));
3409             } else {
3410                 ret = host_to_target_cmsg(msgp, &msg);
3411             }
3412             if (!is_error(ret)) {
3413                 msgp->msg_namelen = tswap32(msg.msg_namelen);
3414                 msgp->msg_flags = tswap32(msg.msg_flags);
3415                 if (msg.msg_name != NULL && msg.msg_name != (void *)-1) {
3416                     ret = host_to_target_sockaddr(tswapal(msgp->msg_name),
3417                                     msg.msg_name, msg.msg_namelen);
3418                     if (ret) {
3419                         goto out;
3420                     }
3421                 }
3422 
3423                 ret = len;
3424             }
3425         }
3426     }
3427 
3428 out:
3429     unlock_iovec(vec, target_vec, count, !send);
3430 out2:
3431     return ret;
3432 }
3433 
3434 static abi_long do_sendrecvmsg(int fd, abi_ulong target_msg,
3435                                int flags, int send)
3436 {
3437     abi_long ret;
3438     struct target_msghdr *msgp;
3439 
3440     if (!lock_user_struct(send ? VERIFY_READ : VERIFY_WRITE,
3441                           msgp,
3442                           target_msg,
3443                           send ? 1 : 0)) {
3444         return -TARGET_EFAULT;
3445     }
3446     ret = do_sendrecvmsg_locked(fd, msgp, flags, send);
3447     unlock_user_struct(msgp, target_msg, send ? 0 : 1);
3448     return ret;
3449 }
3450 
3451 /* We don't rely on the C library to have sendmmsg/recvmmsg support,
3452  * so it might not have this *mmsg-specific flag either.
3453  */
3454 #ifndef MSG_WAITFORONE
3455 #define MSG_WAITFORONE 0x10000
3456 #endif
3457 
3458 static abi_long do_sendrecvmmsg(int fd, abi_ulong target_msgvec,
3459                                 unsigned int vlen, unsigned int flags,
3460                                 int send)
3461 {
3462     struct target_mmsghdr *mmsgp;
3463     abi_long ret = 0;
3464     int i;
3465 
3466     if (vlen > UIO_MAXIOV) {
3467         vlen = UIO_MAXIOV;
3468     }
3469 
3470     mmsgp = lock_user(VERIFY_WRITE, target_msgvec, sizeof(*mmsgp) * vlen, 1);
3471     if (!mmsgp) {
3472         return -TARGET_EFAULT;
3473     }
3474 
3475     for (i = 0; i < vlen; i++) {
3476         ret = do_sendrecvmsg_locked(fd, &mmsgp[i].msg_hdr, flags, send);
3477         if (is_error(ret)) {
3478             break;
3479         }
3480         mmsgp[i].msg_len = tswap32(ret);
3481         /* MSG_WAITFORONE turns on MSG_DONTWAIT after one packet */
3482         if (flags & MSG_WAITFORONE) {
3483             flags |= MSG_DONTWAIT;
3484         }
3485     }
3486 
3487     unlock_user(mmsgp, target_msgvec, sizeof(*mmsgp) * i);
3488 
3489     /* Return number of datagrams sent if we sent any at all;
3490      * otherwise return the error.
3491      */
3492     if (i) {
3493         return i;
3494     }
3495     return ret;
3496 }
3497 
3498 /* do_accept4() Must return target values and target errnos. */
3499 static abi_long do_accept4(int fd, abi_ulong target_addr,
3500                            abi_ulong target_addrlen_addr, int flags)
3501 {
3502     socklen_t addrlen, ret_addrlen;
3503     void *addr;
3504     abi_long ret;
3505     int host_flags;
3506 
3507     host_flags = target_to_host_bitmask(flags, fcntl_flags_tbl);
3508 
3509     if (target_addr == 0) {
3510         return get_errno(safe_accept4(fd, NULL, NULL, host_flags));
3511     }
3512 
3513     /* linux returns EFAULT if addrlen pointer is invalid */
3514     if (get_user_u32(addrlen, target_addrlen_addr))
3515         return -TARGET_EFAULT;
3516 
3517     if ((int)addrlen < 0) {
3518         return -TARGET_EINVAL;
3519     }
3520 
3521     if (!access_ok(VERIFY_WRITE, target_addr, addrlen))
3522         return -TARGET_EFAULT;
3523 
3524     addr = alloca(addrlen);
3525 
3526     ret_addrlen = addrlen;
3527     ret = get_errno(safe_accept4(fd, addr, &ret_addrlen, host_flags));
3528     if (!is_error(ret)) {
3529         host_to_target_sockaddr(target_addr, addr, MIN(addrlen, ret_addrlen));
3530         if (put_user_u32(ret_addrlen, target_addrlen_addr)) {
3531             ret = -TARGET_EFAULT;
3532         }
3533     }
3534     return ret;
3535 }
3536 
3537 /* do_getpeername() Must return target values and target errnos. */
3538 static abi_long do_getpeername(int fd, abi_ulong target_addr,
3539                                abi_ulong target_addrlen_addr)
3540 {
3541     socklen_t addrlen, ret_addrlen;
3542     void *addr;
3543     abi_long ret;
3544 
3545     if (get_user_u32(addrlen, target_addrlen_addr))
3546         return -TARGET_EFAULT;
3547 
3548     if ((int)addrlen < 0) {
3549         return -TARGET_EINVAL;
3550     }
3551 
3552     if (!access_ok(VERIFY_WRITE, target_addr, addrlen))
3553         return -TARGET_EFAULT;
3554 
3555     addr = alloca(addrlen);
3556 
3557     ret_addrlen = addrlen;
3558     ret = get_errno(getpeername(fd, addr, &ret_addrlen));
3559     if (!is_error(ret)) {
3560         host_to_target_sockaddr(target_addr, addr, MIN(addrlen, ret_addrlen));
3561         if (put_user_u32(ret_addrlen, target_addrlen_addr)) {
3562             ret = -TARGET_EFAULT;
3563         }
3564     }
3565     return ret;
3566 }
3567 
3568 /* do_getsockname() Must return target values and target errnos. */
3569 static abi_long do_getsockname(int fd, abi_ulong target_addr,
3570                                abi_ulong target_addrlen_addr)
3571 {
3572     socklen_t addrlen, ret_addrlen;
3573     void *addr;
3574     abi_long ret;
3575 
3576     if (get_user_u32(addrlen, target_addrlen_addr))
3577         return -TARGET_EFAULT;
3578 
3579     if ((int)addrlen < 0) {
3580         return -TARGET_EINVAL;
3581     }
3582 
3583     if (!access_ok(VERIFY_WRITE, target_addr, addrlen))
3584         return -TARGET_EFAULT;
3585 
3586     addr = alloca(addrlen);
3587 
3588     ret_addrlen = addrlen;
3589     ret = get_errno(getsockname(fd, addr, &ret_addrlen));
3590     if (!is_error(ret)) {
3591         host_to_target_sockaddr(target_addr, addr, MIN(addrlen, ret_addrlen));
3592         if (put_user_u32(ret_addrlen, target_addrlen_addr)) {
3593             ret = -TARGET_EFAULT;
3594         }
3595     }
3596     return ret;
3597 }
3598 
3599 /* do_socketpair() Must return target values and target errnos. */
3600 static abi_long do_socketpair(int domain, int type, int protocol,
3601                               abi_ulong target_tab_addr)
3602 {
3603     int tab[2];
3604     abi_long ret;
3605 
3606     target_to_host_sock_type(&type);
3607 
3608     ret = get_errno(socketpair(domain, type, protocol, tab));
3609     if (!is_error(ret)) {
3610         if (put_user_s32(tab[0], target_tab_addr)
3611             || put_user_s32(tab[1], target_tab_addr + sizeof(tab[0])))
3612             ret = -TARGET_EFAULT;
3613     }
3614     return ret;
3615 }
3616 
3617 /* do_sendto() Must return target values and target errnos. */
3618 static abi_long do_sendto(int fd, abi_ulong msg, size_t len, int flags,
3619                           abi_ulong target_addr, socklen_t addrlen)
3620 {
3621     void *addr;
3622     void *host_msg;
3623     void *copy_msg = NULL;
3624     abi_long ret;
3625 
3626     if ((int)addrlen < 0) {
3627         return -TARGET_EINVAL;
3628     }
3629 
3630     host_msg = lock_user(VERIFY_READ, msg, len, 1);
3631     if (!host_msg)
3632         return -TARGET_EFAULT;
3633     if (fd_trans_target_to_host_data(fd)) {
3634         copy_msg = host_msg;
3635         host_msg = g_malloc(len);
3636         memcpy(host_msg, copy_msg, len);
3637         ret = fd_trans_target_to_host_data(fd)(host_msg, len);
3638         if (ret < 0) {
3639             goto fail;
3640         }
3641     }
3642     if (target_addr) {
3643         addr = alloca(addrlen+1);
3644         ret = target_to_host_sockaddr(fd, addr, target_addr, addrlen);
3645         if (ret) {
3646             goto fail;
3647         }
3648         ret = get_errno(safe_sendto(fd, host_msg, len, flags, addr, addrlen));
3649     } else {
3650         ret = get_errno(safe_sendto(fd, host_msg, len, flags, NULL, 0));
3651     }
3652 fail:
3653     if (copy_msg) {
3654         g_free(host_msg);
3655         host_msg = copy_msg;
3656     }
3657     unlock_user(host_msg, msg, 0);
3658     return ret;
3659 }
3660 
3661 /* do_recvfrom() Must return target values and target errnos. */
3662 static abi_long do_recvfrom(int fd, abi_ulong msg, size_t len, int flags,
3663                             abi_ulong target_addr,
3664                             abi_ulong target_addrlen)
3665 {
3666     socklen_t addrlen, ret_addrlen;
3667     void *addr;
3668     void *host_msg;
3669     abi_long ret;
3670 
3671     host_msg = lock_user(VERIFY_WRITE, msg, len, 0);
3672     if (!host_msg)
3673         return -TARGET_EFAULT;
3674     if (target_addr) {
3675         if (get_user_u32(addrlen, target_addrlen)) {
3676             ret = -TARGET_EFAULT;
3677             goto fail;
3678         }
3679         if ((int)addrlen < 0) {
3680             ret = -TARGET_EINVAL;
3681             goto fail;
3682         }
3683         addr = alloca(addrlen);
3684         ret_addrlen = addrlen;
3685         ret = get_errno(safe_recvfrom(fd, host_msg, len, flags,
3686                                       addr, &ret_addrlen));
3687     } else {
3688         addr = NULL; /* To keep compiler quiet.  */
3689         addrlen = 0; /* To keep compiler quiet.  */
3690         ret = get_errno(safe_recvfrom(fd, host_msg, len, flags, NULL, 0));
3691     }
3692     if (!is_error(ret)) {
3693         if (fd_trans_host_to_target_data(fd)) {
3694             abi_long trans;
3695             trans = fd_trans_host_to_target_data(fd)(host_msg, MIN(ret, len));
3696             if (is_error(trans)) {
3697                 ret = trans;
3698                 goto fail;
3699             }
3700         }
3701         if (target_addr) {
3702             host_to_target_sockaddr(target_addr, addr,
3703                                     MIN(addrlen, ret_addrlen));
3704             if (put_user_u32(ret_addrlen, target_addrlen)) {
3705                 ret = -TARGET_EFAULT;
3706                 goto fail;
3707             }
3708         }
3709         unlock_user(host_msg, msg, len);
3710     } else {
3711 fail:
3712         unlock_user(host_msg, msg, 0);
3713     }
3714     return ret;
3715 }
3716 
3717 #ifdef TARGET_NR_socketcall
3718 /* do_socketcall() must return target values and target errnos. */
3719 static abi_long do_socketcall(int num, abi_ulong vptr)
3720 {
3721     static const unsigned nargs[] = { /* number of arguments per operation */
3722         [TARGET_SYS_SOCKET] = 3,      /* domain, type, protocol */
3723         [TARGET_SYS_BIND] = 3,        /* fd, addr, addrlen */
3724         [TARGET_SYS_CONNECT] = 3,     /* fd, addr, addrlen */
3725         [TARGET_SYS_LISTEN] = 2,      /* fd, backlog */
3726         [TARGET_SYS_ACCEPT] = 3,      /* fd, addr, addrlen */
3727         [TARGET_SYS_GETSOCKNAME] = 3, /* fd, addr, addrlen */
3728         [TARGET_SYS_GETPEERNAME] = 3, /* fd, addr, addrlen */
3729         [TARGET_SYS_SOCKETPAIR] = 4,  /* domain, type, protocol, tab */
3730         [TARGET_SYS_SEND] = 4,        /* fd, msg, len, flags */
3731         [TARGET_SYS_RECV] = 4,        /* fd, msg, len, flags */
3732         [TARGET_SYS_SENDTO] = 6,      /* fd, msg, len, flags, addr, addrlen */
3733         [TARGET_SYS_RECVFROM] = 6,    /* fd, msg, len, flags, addr, addrlen */
3734         [TARGET_SYS_SHUTDOWN] = 2,    /* fd, how */
3735         [TARGET_SYS_SETSOCKOPT] = 5,  /* fd, level, optname, optval, optlen */
3736         [TARGET_SYS_GETSOCKOPT] = 5,  /* fd, level, optname, optval, optlen */
3737         [TARGET_SYS_SENDMSG] = 3,     /* fd, msg, flags */
3738         [TARGET_SYS_RECVMSG] = 3,     /* fd, msg, flags */
3739         [TARGET_SYS_ACCEPT4] = 4,     /* fd, addr, addrlen, flags */
3740         [TARGET_SYS_RECVMMSG] = 4,    /* fd, msgvec, vlen, flags */
3741         [TARGET_SYS_SENDMMSG] = 4,    /* fd, msgvec, vlen, flags */
3742     };
3743     abi_long a[6]; /* max 6 args */
3744     unsigned i;
3745 
3746     /* check the range of the first argument num */
3747     /* (TARGET_SYS_SENDMMSG is the highest among TARGET_SYS_xxx) */
3748     if (num < 1 || num > TARGET_SYS_SENDMMSG) {
3749         return -TARGET_EINVAL;
3750     }
3751     /* ensure we have space for args */
3752     if (nargs[num] > ARRAY_SIZE(a)) {
3753         return -TARGET_EINVAL;
3754     }
3755     /* collect the arguments in a[] according to nargs[] */
3756     for (i = 0; i < nargs[num]; ++i) {
3757         if (get_user_ual(a[i], vptr + i * sizeof(abi_long)) != 0) {
3758             return -TARGET_EFAULT;
3759         }
3760     }
3761     /* now when we have the args, invoke the appropriate underlying function */
3762     switch (num) {
3763     case TARGET_SYS_SOCKET: /* domain, type, protocol */
3764         return do_socket(a[0], a[1], a[2]);
3765     case TARGET_SYS_BIND: /* sockfd, addr, addrlen */
3766         return do_bind(a[0], a[1], a[2]);
3767     case TARGET_SYS_CONNECT: /* sockfd, addr, addrlen */
3768         return do_connect(a[0], a[1], a[2]);
3769     case TARGET_SYS_LISTEN: /* sockfd, backlog */
3770         return get_errno(listen(a[0], a[1]));
3771     case TARGET_SYS_ACCEPT: /* sockfd, addr, addrlen */
3772         return do_accept4(a[0], a[1], a[2], 0);
3773     case TARGET_SYS_GETSOCKNAME: /* sockfd, addr, addrlen */
3774         return do_getsockname(a[0], a[1], a[2]);
3775     case TARGET_SYS_GETPEERNAME: /* sockfd, addr, addrlen */
3776         return do_getpeername(a[0], a[1], a[2]);
3777     case TARGET_SYS_SOCKETPAIR: /* domain, type, protocol, tab */
3778         return do_socketpair(a[0], a[1], a[2], a[3]);
3779     case TARGET_SYS_SEND: /* sockfd, msg, len, flags */
3780         return do_sendto(a[0], a[1], a[2], a[3], 0, 0);
3781     case TARGET_SYS_RECV: /* sockfd, msg, len, flags */
3782         return do_recvfrom(a[0], a[1], a[2], a[3], 0, 0);
3783     case TARGET_SYS_SENDTO: /* sockfd, msg, len, flags, addr, addrlen */
3784         return do_sendto(a[0], a[1], a[2], a[3], a[4], a[5]);
3785     case TARGET_SYS_RECVFROM: /* sockfd, msg, len, flags, addr, addrlen */
3786         return do_recvfrom(a[0], a[1], a[2], a[3], a[4], a[5]);
3787     case TARGET_SYS_SHUTDOWN: /* sockfd, how */
3788         return get_errno(shutdown(a[0], a[1]));
3789     case TARGET_SYS_SETSOCKOPT: /* sockfd, level, optname, optval, optlen */
3790         return do_setsockopt(a[0], a[1], a[2], a[3], a[4]);
3791     case TARGET_SYS_GETSOCKOPT: /* sockfd, level, optname, optval, optlen */
3792         return do_getsockopt(a[0], a[1], a[2], a[3], a[4]);
3793     case TARGET_SYS_SENDMSG: /* sockfd, msg, flags */
3794         return do_sendrecvmsg(a[0], a[1], a[2], 1);
3795     case TARGET_SYS_RECVMSG: /* sockfd, msg, flags */
3796         return do_sendrecvmsg(a[0], a[1], a[2], 0);
3797     case TARGET_SYS_ACCEPT4: /* sockfd, addr, addrlen, flags */
3798         return do_accept4(a[0], a[1], a[2], a[3]);
3799     case TARGET_SYS_RECVMMSG: /* sockfd, msgvec, vlen, flags */
3800         return do_sendrecvmmsg(a[0], a[1], a[2], a[3], 0);
3801     case TARGET_SYS_SENDMMSG: /* sockfd, msgvec, vlen, flags */
3802         return do_sendrecvmmsg(a[0], a[1], a[2], a[3], 1);
3803     default:
3804         qemu_log_mask(LOG_UNIMP, "Unsupported socketcall: %d\n", num);
3805         return -TARGET_EINVAL;
3806     }
3807 }
3808 #endif
3809 
3810 #define N_SHM_REGIONS	32
3811 
3812 static struct shm_region {
3813     abi_ulong start;
3814     abi_ulong size;
3815     bool in_use;
3816 } shm_regions[N_SHM_REGIONS];
3817 
3818 #ifndef TARGET_SEMID64_DS
3819 /* asm-generic version of this struct */
3820 struct target_semid64_ds
3821 {
3822   struct target_ipc_perm sem_perm;
3823   abi_ulong sem_otime;
3824 #if TARGET_ABI_BITS == 32
3825   abi_ulong __unused1;
3826 #endif
3827   abi_ulong sem_ctime;
3828 #if TARGET_ABI_BITS == 32
3829   abi_ulong __unused2;
3830 #endif
3831   abi_ulong sem_nsems;
3832   abi_ulong __unused3;
3833   abi_ulong __unused4;
3834 };
3835 #endif
3836 
3837 static inline abi_long target_to_host_ipc_perm(struct ipc_perm *host_ip,
3838                                                abi_ulong target_addr)
3839 {
3840     struct target_ipc_perm *target_ip;
3841     struct target_semid64_ds *target_sd;
3842 
3843     if (!lock_user_struct(VERIFY_READ, target_sd, target_addr, 1))
3844         return -TARGET_EFAULT;
3845     target_ip = &(target_sd->sem_perm);
3846     host_ip->__key = tswap32(target_ip->__key);
3847     host_ip->uid = tswap32(target_ip->uid);
3848     host_ip->gid = tswap32(target_ip->gid);
3849     host_ip->cuid = tswap32(target_ip->cuid);
3850     host_ip->cgid = tswap32(target_ip->cgid);
3851 #if defined(TARGET_ALPHA) || defined(TARGET_MIPS) || defined(TARGET_PPC)
3852     host_ip->mode = tswap32(target_ip->mode);
3853 #else
3854     host_ip->mode = tswap16(target_ip->mode);
3855 #endif
3856 #if defined(TARGET_PPC)
3857     host_ip->__seq = tswap32(target_ip->__seq);
3858 #else
3859     host_ip->__seq = tswap16(target_ip->__seq);
3860 #endif
3861     unlock_user_struct(target_sd, target_addr, 0);
3862     return 0;
3863 }
3864 
3865 static inline abi_long host_to_target_ipc_perm(abi_ulong target_addr,
3866                                                struct ipc_perm *host_ip)
3867 {
3868     struct target_ipc_perm *target_ip;
3869     struct target_semid64_ds *target_sd;
3870 
3871     if (!lock_user_struct(VERIFY_WRITE, target_sd, target_addr, 0))
3872         return -TARGET_EFAULT;
3873     target_ip = &(target_sd->sem_perm);
3874     target_ip->__key = tswap32(host_ip->__key);
3875     target_ip->uid = tswap32(host_ip->uid);
3876     target_ip->gid = tswap32(host_ip->gid);
3877     target_ip->cuid = tswap32(host_ip->cuid);
3878     target_ip->cgid = tswap32(host_ip->cgid);
3879 #if defined(TARGET_ALPHA) || defined(TARGET_MIPS) || defined(TARGET_PPC)
3880     target_ip->mode = tswap32(host_ip->mode);
3881 #else
3882     target_ip->mode = tswap16(host_ip->mode);
3883 #endif
3884 #if defined(TARGET_PPC)
3885     target_ip->__seq = tswap32(host_ip->__seq);
3886 #else
3887     target_ip->__seq = tswap16(host_ip->__seq);
3888 #endif
3889     unlock_user_struct(target_sd, target_addr, 1);
3890     return 0;
3891 }
3892 
3893 static inline abi_long target_to_host_semid_ds(struct semid_ds *host_sd,
3894                                                abi_ulong target_addr)
3895 {
3896     struct target_semid64_ds *target_sd;
3897 
3898     if (!lock_user_struct(VERIFY_READ, target_sd, target_addr, 1))
3899         return -TARGET_EFAULT;
3900     if (target_to_host_ipc_perm(&(host_sd->sem_perm),target_addr))
3901         return -TARGET_EFAULT;
3902     host_sd->sem_nsems = tswapal(target_sd->sem_nsems);
3903     host_sd->sem_otime = tswapal(target_sd->sem_otime);
3904     host_sd->sem_ctime = tswapal(target_sd->sem_ctime);
3905     unlock_user_struct(target_sd, target_addr, 0);
3906     return 0;
3907 }
3908 
3909 static inline abi_long host_to_target_semid_ds(abi_ulong target_addr,
3910                                                struct semid_ds *host_sd)
3911 {
3912     struct target_semid64_ds *target_sd;
3913 
3914     if (!lock_user_struct(VERIFY_WRITE, target_sd, target_addr, 0))
3915         return -TARGET_EFAULT;
3916     if (host_to_target_ipc_perm(target_addr,&(host_sd->sem_perm)))
3917         return -TARGET_EFAULT;
3918     target_sd->sem_nsems = tswapal(host_sd->sem_nsems);
3919     target_sd->sem_otime = tswapal(host_sd->sem_otime);
3920     target_sd->sem_ctime = tswapal(host_sd->sem_ctime);
3921     unlock_user_struct(target_sd, target_addr, 1);
3922     return 0;
3923 }
3924 
3925 struct target_seminfo {
3926     int semmap;
3927     int semmni;
3928     int semmns;
3929     int semmnu;
3930     int semmsl;
3931     int semopm;
3932     int semume;
3933     int semusz;
3934     int semvmx;
3935     int semaem;
3936 };
3937 
3938 static inline abi_long host_to_target_seminfo(abi_ulong target_addr,
3939                                               struct seminfo *host_seminfo)
3940 {
3941     struct target_seminfo *target_seminfo;
3942     if (!lock_user_struct(VERIFY_WRITE, target_seminfo, target_addr, 0))
3943         return -TARGET_EFAULT;
3944     __put_user(host_seminfo->semmap, &target_seminfo->semmap);
3945     __put_user(host_seminfo->semmni, &target_seminfo->semmni);
3946     __put_user(host_seminfo->semmns, &target_seminfo->semmns);
3947     __put_user(host_seminfo->semmnu, &target_seminfo->semmnu);
3948     __put_user(host_seminfo->semmsl, &target_seminfo->semmsl);
3949     __put_user(host_seminfo->semopm, &target_seminfo->semopm);
3950     __put_user(host_seminfo->semume, &target_seminfo->semume);
3951     __put_user(host_seminfo->semusz, &target_seminfo->semusz);
3952     __put_user(host_seminfo->semvmx, &target_seminfo->semvmx);
3953     __put_user(host_seminfo->semaem, &target_seminfo->semaem);
3954     unlock_user_struct(target_seminfo, target_addr, 1);
3955     return 0;
3956 }
3957 
3958 union semun {
3959 	int val;
3960 	struct semid_ds *buf;
3961 	unsigned short *array;
3962 	struct seminfo *__buf;
3963 };
3964 
3965 union target_semun {
3966 	int val;
3967 	abi_ulong buf;
3968 	abi_ulong array;
3969 	abi_ulong __buf;
3970 };
3971 
3972 static inline abi_long target_to_host_semarray(int semid, unsigned short **host_array,
3973                                                abi_ulong target_addr)
3974 {
3975     int nsems;
3976     unsigned short *array;
3977     union semun semun;
3978     struct semid_ds semid_ds;
3979     int i, ret;
3980 
3981     semun.buf = &semid_ds;
3982 
3983     ret = semctl(semid, 0, IPC_STAT, semun);
3984     if (ret == -1)
3985         return get_errno(ret);
3986 
3987     nsems = semid_ds.sem_nsems;
3988 
3989     *host_array = g_try_new(unsigned short, nsems);
3990     if (!*host_array) {
3991         return -TARGET_ENOMEM;
3992     }
3993     array = lock_user(VERIFY_READ, target_addr,
3994                       nsems*sizeof(unsigned short), 1);
3995     if (!array) {
3996         g_free(*host_array);
3997         return -TARGET_EFAULT;
3998     }
3999 
4000     for(i=0; i<nsems; i++) {
4001         __get_user((*host_array)[i], &array[i]);
4002     }
4003     unlock_user(array, target_addr, 0);
4004 
4005     return 0;
4006 }
4007 
4008 static inline abi_long host_to_target_semarray(int semid, abi_ulong target_addr,
4009                                                unsigned short **host_array)
4010 {
4011     int nsems;
4012     unsigned short *array;
4013     union semun semun;
4014     struct semid_ds semid_ds;
4015     int i, ret;
4016 
4017     semun.buf = &semid_ds;
4018 
4019     ret = semctl(semid, 0, IPC_STAT, semun);
4020     if (ret == -1)
4021         return get_errno(ret);
4022 
4023     nsems = semid_ds.sem_nsems;
4024 
4025     array = lock_user(VERIFY_WRITE, target_addr,
4026                       nsems*sizeof(unsigned short), 0);
4027     if (!array)
4028         return -TARGET_EFAULT;
4029 
4030     for(i=0; i<nsems; i++) {
4031         __put_user((*host_array)[i], &array[i]);
4032     }
4033     g_free(*host_array);
4034     unlock_user(array, target_addr, 1);
4035 
4036     return 0;
4037 }
4038 
4039 static inline abi_long do_semctl(int semid, int semnum, int cmd,
4040                                  abi_ulong target_arg)
4041 {
4042     union target_semun target_su = { .buf = target_arg };
4043     union semun arg;
4044     struct semid_ds dsarg;
4045     unsigned short *array = NULL;
4046     struct seminfo seminfo;
4047     abi_long ret = -TARGET_EINVAL;
4048     abi_long err;
4049     cmd &= 0xff;
4050 
4051     switch( cmd ) {
4052 	case GETVAL:
4053 	case SETVAL:
4054             /* In 64 bit cross-endian situations, we will erroneously pick up
4055              * the wrong half of the union for the "val" element.  To rectify
4056              * this, the entire 8-byte structure is byteswapped, followed by
4057 	     * a swap of the 4 byte val field. In other cases, the data is
4058 	     * already in proper host byte order. */
4059 	    if (sizeof(target_su.val) != (sizeof(target_su.buf))) {
4060 		target_su.buf = tswapal(target_su.buf);
4061 		arg.val = tswap32(target_su.val);
4062 	    } else {
4063 		arg.val = target_su.val;
4064 	    }
4065             ret = get_errno(semctl(semid, semnum, cmd, arg));
4066             break;
4067 	case GETALL:
4068 	case SETALL:
4069             err = target_to_host_semarray(semid, &array, target_su.array);
4070             if (err)
4071                 return err;
4072             arg.array = array;
4073             ret = get_errno(semctl(semid, semnum, cmd, arg));
4074             err = host_to_target_semarray(semid, target_su.array, &array);
4075             if (err)
4076                 return err;
4077             break;
4078 	case IPC_STAT:
4079 	case IPC_SET:
4080 	case SEM_STAT:
4081             err = target_to_host_semid_ds(&dsarg, target_su.buf);
4082             if (err)
4083                 return err;
4084             arg.buf = &dsarg;
4085             ret = get_errno(semctl(semid, semnum, cmd, arg));
4086             err = host_to_target_semid_ds(target_su.buf, &dsarg);
4087             if (err)
4088                 return err;
4089             break;
4090 	case IPC_INFO:
4091 	case SEM_INFO:
4092             arg.__buf = &seminfo;
4093             ret = get_errno(semctl(semid, semnum, cmd, arg));
4094             err = host_to_target_seminfo(target_su.__buf, &seminfo);
4095             if (err)
4096                 return err;
4097             break;
4098 	case IPC_RMID:
4099 	case GETPID:
4100 	case GETNCNT:
4101 	case GETZCNT:
4102             ret = get_errno(semctl(semid, semnum, cmd, NULL));
4103             break;
4104     }
4105 
4106     return ret;
4107 }
4108 
4109 struct target_sembuf {
4110     unsigned short sem_num;
4111     short sem_op;
4112     short sem_flg;
4113 };
4114 
4115 static inline abi_long target_to_host_sembuf(struct sembuf *host_sembuf,
4116                                              abi_ulong target_addr,
4117                                              unsigned nsops)
4118 {
4119     struct target_sembuf *target_sembuf;
4120     int i;
4121 
4122     target_sembuf = lock_user(VERIFY_READ, target_addr,
4123                               nsops*sizeof(struct target_sembuf), 1);
4124     if (!target_sembuf)
4125         return -TARGET_EFAULT;
4126 
4127     for(i=0; i<nsops; i++) {
4128         __get_user(host_sembuf[i].sem_num, &target_sembuf[i].sem_num);
4129         __get_user(host_sembuf[i].sem_op, &target_sembuf[i].sem_op);
4130         __get_user(host_sembuf[i].sem_flg, &target_sembuf[i].sem_flg);
4131     }
4132 
4133     unlock_user(target_sembuf, target_addr, 0);
4134 
4135     return 0;
4136 }
4137 
4138 #if defined(TARGET_NR_ipc) || defined(TARGET_NR_semop) || \
4139     defined(TARGET_NR_semtimedop) || defined(TARGET_NR_semtimedop_time64)
4140 
4141 /*
4142  * This macro is required to handle the s390 variants, which passes the
4143  * arguments in a different order than default.
4144  */
4145 #ifdef __s390x__
4146 #define SEMTIMEDOP_IPC_ARGS(__nsops, __sops, __timeout) \
4147   (__nsops), (__timeout), (__sops)
4148 #else
4149 #define SEMTIMEDOP_IPC_ARGS(__nsops, __sops, __timeout) \
4150   (__nsops), 0, (__sops), (__timeout)
4151 #endif
4152 
4153 static inline abi_long do_semtimedop(int semid,
4154                                      abi_long ptr,
4155                                      unsigned nsops,
4156                                      abi_long timeout, bool time64)
4157 {
4158     struct sembuf *sops;
4159     struct timespec ts, *pts = NULL;
4160     abi_long ret;
4161 
4162     if (timeout) {
4163         pts = &ts;
4164         if (time64) {
4165             if (target_to_host_timespec64(pts, timeout)) {
4166                 return -TARGET_EFAULT;
4167             }
4168         } else {
4169             if (target_to_host_timespec(pts, timeout)) {
4170                 return -TARGET_EFAULT;
4171             }
4172         }
4173     }
4174 
4175     if (nsops > TARGET_SEMOPM) {
4176         return -TARGET_E2BIG;
4177     }
4178 
4179     sops = g_new(struct sembuf, nsops);
4180 
4181     if (target_to_host_sembuf(sops, ptr, nsops)) {
4182         g_free(sops);
4183         return -TARGET_EFAULT;
4184     }
4185 
4186     ret = -TARGET_ENOSYS;
4187 #ifdef __NR_semtimedop
4188     ret = get_errno(safe_semtimedop(semid, sops, nsops, pts));
4189 #endif
4190 #ifdef __NR_ipc
4191     if (ret == -TARGET_ENOSYS) {
4192         ret = get_errno(safe_ipc(IPCOP_semtimedop, semid,
4193                                  SEMTIMEDOP_IPC_ARGS(nsops, sops, (long)pts)));
4194     }
4195 #endif
4196     g_free(sops);
4197     return ret;
4198 }
4199 #endif
4200 
4201 struct target_msqid_ds
4202 {
4203     struct target_ipc_perm msg_perm;
4204     abi_ulong msg_stime;
4205 #if TARGET_ABI_BITS == 32
4206     abi_ulong __unused1;
4207 #endif
4208     abi_ulong msg_rtime;
4209 #if TARGET_ABI_BITS == 32
4210     abi_ulong __unused2;
4211 #endif
4212     abi_ulong msg_ctime;
4213 #if TARGET_ABI_BITS == 32
4214     abi_ulong __unused3;
4215 #endif
4216     abi_ulong __msg_cbytes;
4217     abi_ulong msg_qnum;
4218     abi_ulong msg_qbytes;
4219     abi_ulong msg_lspid;
4220     abi_ulong msg_lrpid;
4221     abi_ulong __unused4;
4222     abi_ulong __unused5;
4223 };
4224 
4225 static inline abi_long target_to_host_msqid_ds(struct msqid_ds *host_md,
4226                                                abi_ulong target_addr)
4227 {
4228     struct target_msqid_ds *target_md;
4229 
4230     if (!lock_user_struct(VERIFY_READ, target_md, target_addr, 1))
4231         return -TARGET_EFAULT;
4232     if (target_to_host_ipc_perm(&(host_md->msg_perm),target_addr))
4233         return -TARGET_EFAULT;
4234     host_md->msg_stime = tswapal(target_md->msg_stime);
4235     host_md->msg_rtime = tswapal(target_md->msg_rtime);
4236     host_md->msg_ctime = tswapal(target_md->msg_ctime);
4237     host_md->__msg_cbytes = tswapal(target_md->__msg_cbytes);
4238     host_md->msg_qnum = tswapal(target_md->msg_qnum);
4239     host_md->msg_qbytes = tswapal(target_md->msg_qbytes);
4240     host_md->msg_lspid = tswapal(target_md->msg_lspid);
4241     host_md->msg_lrpid = tswapal(target_md->msg_lrpid);
4242     unlock_user_struct(target_md, target_addr, 0);
4243     return 0;
4244 }
4245 
4246 static inline abi_long host_to_target_msqid_ds(abi_ulong target_addr,
4247                                                struct msqid_ds *host_md)
4248 {
4249     struct target_msqid_ds *target_md;
4250 
4251     if (!lock_user_struct(VERIFY_WRITE, target_md, target_addr, 0))
4252         return -TARGET_EFAULT;
4253     if (host_to_target_ipc_perm(target_addr,&(host_md->msg_perm)))
4254         return -TARGET_EFAULT;
4255     target_md->msg_stime = tswapal(host_md->msg_stime);
4256     target_md->msg_rtime = tswapal(host_md->msg_rtime);
4257     target_md->msg_ctime = tswapal(host_md->msg_ctime);
4258     target_md->__msg_cbytes = tswapal(host_md->__msg_cbytes);
4259     target_md->msg_qnum = tswapal(host_md->msg_qnum);
4260     target_md->msg_qbytes = tswapal(host_md->msg_qbytes);
4261     target_md->msg_lspid = tswapal(host_md->msg_lspid);
4262     target_md->msg_lrpid = tswapal(host_md->msg_lrpid);
4263     unlock_user_struct(target_md, target_addr, 1);
4264     return 0;
4265 }
4266 
4267 struct target_msginfo {
4268     int msgpool;
4269     int msgmap;
4270     int msgmax;
4271     int msgmnb;
4272     int msgmni;
4273     int msgssz;
4274     int msgtql;
4275     unsigned short int msgseg;
4276 };
4277 
4278 static inline abi_long host_to_target_msginfo(abi_ulong target_addr,
4279                                               struct msginfo *host_msginfo)
4280 {
4281     struct target_msginfo *target_msginfo;
4282     if (!lock_user_struct(VERIFY_WRITE, target_msginfo, target_addr, 0))
4283         return -TARGET_EFAULT;
4284     __put_user(host_msginfo->msgpool, &target_msginfo->msgpool);
4285     __put_user(host_msginfo->msgmap, &target_msginfo->msgmap);
4286     __put_user(host_msginfo->msgmax, &target_msginfo->msgmax);
4287     __put_user(host_msginfo->msgmnb, &target_msginfo->msgmnb);
4288     __put_user(host_msginfo->msgmni, &target_msginfo->msgmni);
4289     __put_user(host_msginfo->msgssz, &target_msginfo->msgssz);
4290     __put_user(host_msginfo->msgtql, &target_msginfo->msgtql);
4291     __put_user(host_msginfo->msgseg, &target_msginfo->msgseg);
4292     unlock_user_struct(target_msginfo, target_addr, 1);
4293     return 0;
4294 }
4295 
4296 static inline abi_long do_msgctl(int msgid, int cmd, abi_long ptr)
4297 {
4298     struct msqid_ds dsarg;
4299     struct msginfo msginfo;
4300     abi_long ret = -TARGET_EINVAL;
4301 
4302     cmd &= 0xff;
4303 
4304     switch (cmd) {
4305     case IPC_STAT:
4306     case IPC_SET:
4307     case MSG_STAT:
4308         if (target_to_host_msqid_ds(&dsarg,ptr))
4309             return -TARGET_EFAULT;
4310         ret = get_errno(msgctl(msgid, cmd, &dsarg));
4311         if (host_to_target_msqid_ds(ptr,&dsarg))
4312             return -TARGET_EFAULT;
4313         break;
4314     case IPC_RMID:
4315         ret = get_errno(msgctl(msgid, cmd, NULL));
4316         break;
4317     case IPC_INFO:
4318     case MSG_INFO:
4319         ret = get_errno(msgctl(msgid, cmd, (struct msqid_ds *)&msginfo));
4320         if (host_to_target_msginfo(ptr, &msginfo))
4321             return -TARGET_EFAULT;
4322         break;
4323     }
4324 
4325     return ret;
4326 }
4327 
4328 struct target_msgbuf {
4329     abi_long mtype;
4330     char	mtext[1];
4331 };
4332 
4333 static inline abi_long do_msgsnd(int msqid, abi_long msgp,
4334                                  ssize_t msgsz, int msgflg)
4335 {
4336     struct target_msgbuf *target_mb;
4337     struct msgbuf *host_mb;
4338     abi_long ret = 0;
4339 
4340     if (msgsz < 0) {
4341         return -TARGET_EINVAL;
4342     }
4343 
4344     if (!lock_user_struct(VERIFY_READ, target_mb, msgp, 0))
4345         return -TARGET_EFAULT;
4346     host_mb = g_try_malloc(msgsz + sizeof(long));
4347     if (!host_mb) {
4348         unlock_user_struct(target_mb, msgp, 0);
4349         return -TARGET_ENOMEM;
4350     }
4351     host_mb->mtype = (abi_long) tswapal(target_mb->mtype);
4352     memcpy(host_mb->mtext, target_mb->mtext, msgsz);
4353     ret = -TARGET_ENOSYS;
4354 #ifdef __NR_msgsnd
4355     ret = get_errno(safe_msgsnd(msqid, host_mb, msgsz, msgflg));
4356 #endif
4357 #ifdef __NR_ipc
4358     if (ret == -TARGET_ENOSYS) {
4359 #ifdef __s390x__
4360         ret = get_errno(safe_ipc(IPCOP_msgsnd, msqid, msgsz, msgflg,
4361                                  host_mb));
4362 #else
4363         ret = get_errno(safe_ipc(IPCOP_msgsnd, msqid, msgsz, msgflg,
4364                                  host_mb, 0));
4365 #endif
4366     }
4367 #endif
4368     g_free(host_mb);
4369     unlock_user_struct(target_mb, msgp, 0);
4370 
4371     return ret;
4372 }
4373 
4374 #ifdef __NR_ipc
4375 #if defined(__sparc__)
4376 /* SPARC for msgrcv it does not use the kludge on final 2 arguments.  */
4377 #define MSGRCV_ARGS(__msgp, __msgtyp) __msgp, __msgtyp
4378 #elif defined(__s390x__)
4379 /* The s390 sys_ipc variant has only five parameters.  */
4380 #define MSGRCV_ARGS(__msgp, __msgtyp) \
4381     ((long int[]){(long int)__msgp, __msgtyp})
4382 #else
4383 #define MSGRCV_ARGS(__msgp, __msgtyp) \
4384     ((long int[]){(long int)__msgp, __msgtyp}), 0
4385 #endif
4386 #endif
4387 
4388 static inline abi_long do_msgrcv(int msqid, abi_long msgp,
4389                                  ssize_t msgsz, abi_long msgtyp,
4390                                  int msgflg)
4391 {
4392     struct target_msgbuf *target_mb;
4393     char *target_mtext;
4394     struct msgbuf *host_mb;
4395     abi_long ret = 0;
4396 
4397     if (msgsz < 0) {
4398         return -TARGET_EINVAL;
4399     }
4400 
4401     if (!lock_user_struct(VERIFY_WRITE, target_mb, msgp, 0))
4402         return -TARGET_EFAULT;
4403 
4404     host_mb = g_try_malloc(msgsz + sizeof(long));
4405     if (!host_mb) {
4406         ret = -TARGET_ENOMEM;
4407         goto end;
4408     }
4409     ret = -TARGET_ENOSYS;
4410 #ifdef __NR_msgrcv
4411     ret = get_errno(safe_msgrcv(msqid, host_mb, msgsz, msgtyp, msgflg));
4412 #endif
4413 #ifdef __NR_ipc
4414     if (ret == -TARGET_ENOSYS) {
4415         ret = get_errno(safe_ipc(IPCOP_CALL(1, IPCOP_msgrcv), msqid, msgsz,
4416                         msgflg, MSGRCV_ARGS(host_mb, msgtyp)));
4417     }
4418 #endif
4419 
4420     if (ret > 0) {
4421         abi_ulong target_mtext_addr = msgp + sizeof(abi_ulong);
4422         target_mtext = lock_user(VERIFY_WRITE, target_mtext_addr, ret, 0);
4423         if (!target_mtext) {
4424             ret = -TARGET_EFAULT;
4425             goto end;
4426         }
4427         memcpy(target_mb->mtext, host_mb->mtext, ret);
4428         unlock_user(target_mtext, target_mtext_addr, ret);
4429     }
4430 
4431     target_mb->mtype = tswapal(host_mb->mtype);
4432 
4433 end:
4434     if (target_mb)
4435         unlock_user_struct(target_mb, msgp, 1);
4436     g_free(host_mb);
4437     return ret;
4438 }
4439 
4440 static inline abi_long target_to_host_shmid_ds(struct shmid_ds *host_sd,
4441                                                abi_ulong target_addr)
4442 {
4443     struct target_shmid_ds *target_sd;
4444 
4445     if (!lock_user_struct(VERIFY_READ, target_sd, target_addr, 1))
4446         return -TARGET_EFAULT;
4447     if (target_to_host_ipc_perm(&(host_sd->shm_perm), target_addr))
4448         return -TARGET_EFAULT;
4449     __get_user(host_sd->shm_segsz, &target_sd->shm_segsz);
4450     __get_user(host_sd->shm_atime, &target_sd->shm_atime);
4451     __get_user(host_sd->shm_dtime, &target_sd->shm_dtime);
4452     __get_user(host_sd->shm_ctime, &target_sd->shm_ctime);
4453     __get_user(host_sd->shm_cpid, &target_sd->shm_cpid);
4454     __get_user(host_sd->shm_lpid, &target_sd->shm_lpid);
4455     __get_user(host_sd->shm_nattch, &target_sd->shm_nattch);
4456     unlock_user_struct(target_sd, target_addr, 0);
4457     return 0;
4458 }
4459 
4460 static inline abi_long host_to_target_shmid_ds(abi_ulong target_addr,
4461                                                struct shmid_ds *host_sd)
4462 {
4463     struct target_shmid_ds *target_sd;
4464 
4465     if (!lock_user_struct(VERIFY_WRITE, target_sd, target_addr, 0))
4466         return -TARGET_EFAULT;
4467     if (host_to_target_ipc_perm(target_addr, &(host_sd->shm_perm)))
4468         return -TARGET_EFAULT;
4469     __put_user(host_sd->shm_segsz, &target_sd->shm_segsz);
4470     __put_user(host_sd->shm_atime, &target_sd->shm_atime);
4471     __put_user(host_sd->shm_dtime, &target_sd->shm_dtime);
4472     __put_user(host_sd->shm_ctime, &target_sd->shm_ctime);
4473     __put_user(host_sd->shm_cpid, &target_sd->shm_cpid);
4474     __put_user(host_sd->shm_lpid, &target_sd->shm_lpid);
4475     __put_user(host_sd->shm_nattch, &target_sd->shm_nattch);
4476     unlock_user_struct(target_sd, target_addr, 1);
4477     return 0;
4478 }
4479 
4480 struct  target_shminfo {
4481     abi_ulong shmmax;
4482     abi_ulong shmmin;
4483     abi_ulong shmmni;
4484     abi_ulong shmseg;
4485     abi_ulong shmall;
4486 };
4487 
4488 static inline abi_long host_to_target_shminfo(abi_ulong target_addr,
4489                                               struct shminfo *host_shminfo)
4490 {
4491     struct target_shminfo *target_shminfo;
4492     if (!lock_user_struct(VERIFY_WRITE, target_shminfo, target_addr, 0))
4493         return -TARGET_EFAULT;
4494     __put_user(host_shminfo->shmmax, &target_shminfo->shmmax);
4495     __put_user(host_shminfo->shmmin, &target_shminfo->shmmin);
4496     __put_user(host_shminfo->shmmni, &target_shminfo->shmmni);
4497     __put_user(host_shminfo->shmseg, &target_shminfo->shmseg);
4498     __put_user(host_shminfo->shmall, &target_shminfo->shmall);
4499     unlock_user_struct(target_shminfo, target_addr, 1);
4500     return 0;
4501 }
4502 
4503 struct target_shm_info {
4504     int used_ids;
4505     abi_ulong shm_tot;
4506     abi_ulong shm_rss;
4507     abi_ulong shm_swp;
4508     abi_ulong swap_attempts;
4509     abi_ulong swap_successes;
4510 };
4511 
4512 static inline abi_long host_to_target_shm_info(abi_ulong target_addr,
4513                                                struct shm_info *host_shm_info)
4514 {
4515     struct target_shm_info *target_shm_info;
4516     if (!lock_user_struct(VERIFY_WRITE, target_shm_info, target_addr, 0))
4517         return -TARGET_EFAULT;
4518     __put_user(host_shm_info->used_ids, &target_shm_info->used_ids);
4519     __put_user(host_shm_info->shm_tot, &target_shm_info->shm_tot);
4520     __put_user(host_shm_info->shm_rss, &target_shm_info->shm_rss);
4521     __put_user(host_shm_info->shm_swp, &target_shm_info->shm_swp);
4522     __put_user(host_shm_info->swap_attempts, &target_shm_info->swap_attempts);
4523     __put_user(host_shm_info->swap_successes, &target_shm_info->swap_successes);
4524     unlock_user_struct(target_shm_info, target_addr, 1);
4525     return 0;
4526 }
4527 
4528 static inline abi_long do_shmctl(int shmid, int cmd, abi_long buf)
4529 {
4530     struct shmid_ds dsarg;
4531     struct shminfo shminfo;
4532     struct shm_info shm_info;
4533     abi_long ret = -TARGET_EINVAL;
4534 
4535     cmd &= 0xff;
4536 
4537     switch(cmd) {
4538     case IPC_STAT:
4539     case IPC_SET:
4540     case SHM_STAT:
4541         if (target_to_host_shmid_ds(&dsarg, buf))
4542             return -TARGET_EFAULT;
4543         ret = get_errno(shmctl(shmid, cmd, &dsarg));
4544         if (host_to_target_shmid_ds(buf, &dsarg))
4545             return -TARGET_EFAULT;
4546         break;
4547     case IPC_INFO:
4548         ret = get_errno(shmctl(shmid, cmd, (struct shmid_ds *)&shminfo));
4549         if (host_to_target_shminfo(buf, &shminfo))
4550             return -TARGET_EFAULT;
4551         break;
4552     case SHM_INFO:
4553         ret = get_errno(shmctl(shmid, cmd, (struct shmid_ds *)&shm_info));
4554         if (host_to_target_shm_info(buf, &shm_info))
4555             return -TARGET_EFAULT;
4556         break;
4557     case IPC_RMID:
4558     case SHM_LOCK:
4559     case SHM_UNLOCK:
4560         ret = get_errno(shmctl(shmid, cmd, NULL));
4561         break;
4562     }
4563 
4564     return ret;
4565 }
4566 
4567 #ifndef TARGET_FORCE_SHMLBA
4568 /* For most architectures, SHMLBA is the same as the page size;
4569  * some architectures have larger values, in which case they should
4570  * define TARGET_FORCE_SHMLBA and provide a target_shmlba() function.
4571  * This corresponds to the kernel arch code defining __ARCH_FORCE_SHMLBA
4572  * and defining its own value for SHMLBA.
4573  *
4574  * The kernel also permits SHMLBA to be set by the architecture to a
4575  * value larger than the page size without setting __ARCH_FORCE_SHMLBA;
4576  * this means that addresses are rounded to the large size if
4577  * SHM_RND is set but addresses not aligned to that size are not rejected
4578  * as long as they are at least page-aligned. Since the only architecture
4579  * which uses this is ia64 this code doesn't provide for that oddity.
4580  */
4581 static inline abi_ulong target_shmlba(CPUArchState *cpu_env)
4582 {
4583     return TARGET_PAGE_SIZE;
4584 }
4585 #endif
4586 
4587 static inline abi_ulong do_shmat(CPUArchState *cpu_env,
4588                                  int shmid, abi_ulong shmaddr, int shmflg)
4589 {
4590     abi_long raddr;
4591     void *host_raddr;
4592     struct shmid_ds shm_info;
4593     int i,ret;
4594     abi_ulong shmlba;
4595 
4596     /* find out the length of the shared memory segment */
4597     ret = get_errno(shmctl(shmid, IPC_STAT, &shm_info));
4598     if (is_error(ret)) {
4599         /* can't get length, bail out */
4600         return ret;
4601     }
4602 
4603     shmlba = target_shmlba(cpu_env);
4604 
4605     if (shmaddr & (shmlba - 1)) {
4606         if (shmflg & SHM_RND) {
4607             shmaddr &= ~(shmlba - 1);
4608         } else {
4609             return -TARGET_EINVAL;
4610         }
4611     }
4612     if (!guest_range_valid(shmaddr, shm_info.shm_segsz)) {
4613         return -TARGET_EINVAL;
4614     }
4615 
4616     mmap_lock();
4617 
4618     if (shmaddr)
4619         host_raddr = shmat(shmid, (void *)g2h(shmaddr), shmflg);
4620     else {
4621         abi_ulong mmap_start;
4622 
4623         /* In order to use the host shmat, we need to honor host SHMLBA.  */
4624         mmap_start = mmap_find_vma(0, shm_info.shm_segsz, MAX(SHMLBA, shmlba));
4625 
4626         if (mmap_start == -1) {
4627             errno = ENOMEM;
4628             host_raddr = (void *)-1;
4629         } else
4630             host_raddr = shmat(shmid, g2h(mmap_start), shmflg | SHM_REMAP);
4631     }
4632 
4633     if (host_raddr == (void *)-1) {
4634         mmap_unlock();
4635         return get_errno((long)host_raddr);
4636     }
4637     raddr=h2g((unsigned long)host_raddr);
4638 
4639     page_set_flags(raddr, raddr + shm_info.shm_segsz,
4640                    PAGE_VALID | PAGE_READ |
4641                    ((shmflg & SHM_RDONLY)? 0 : PAGE_WRITE));
4642 
4643     for (i = 0; i < N_SHM_REGIONS; i++) {
4644         if (!shm_regions[i].in_use) {
4645             shm_regions[i].in_use = true;
4646             shm_regions[i].start = raddr;
4647             shm_regions[i].size = shm_info.shm_segsz;
4648             break;
4649         }
4650     }
4651 
4652     mmap_unlock();
4653     return raddr;
4654 
4655 }
4656 
4657 static inline abi_long do_shmdt(abi_ulong shmaddr)
4658 {
4659     int i;
4660     abi_long rv;
4661 
4662     mmap_lock();
4663 
4664     for (i = 0; i < N_SHM_REGIONS; ++i) {
4665         if (shm_regions[i].in_use && shm_regions[i].start == shmaddr) {
4666             shm_regions[i].in_use = false;
4667             page_set_flags(shmaddr, shmaddr + shm_regions[i].size, 0);
4668             break;
4669         }
4670     }
4671     rv = get_errno(shmdt(g2h(shmaddr)));
4672 
4673     mmap_unlock();
4674 
4675     return rv;
4676 }
4677 
4678 #ifdef TARGET_NR_ipc
4679 /* ??? This only works with linear mappings.  */
4680 /* do_ipc() must return target values and target errnos. */
4681 static abi_long do_ipc(CPUArchState *cpu_env,
4682                        unsigned int call, abi_long first,
4683                        abi_long second, abi_long third,
4684                        abi_long ptr, abi_long fifth)
4685 {
4686     int version;
4687     abi_long ret = 0;
4688 
4689     version = call >> 16;
4690     call &= 0xffff;
4691 
4692     switch (call) {
4693     case IPCOP_semop:
4694         ret = do_semtimedop(first, ptr, second, 0, false);
4695         break;
4696     case IPCOP_semtimedop:
4697     /*
4698      * The s390 sys_ipc variant has only five parameters instead of six
4699      * (as for default variant) and the only difference is the handling of
4700      * SEMTIMEDOP where on s390 the third parameter is used as a pointer
4701      * to a struct timespec where the generic variant uses fifth parameter.
4702      */
4703 #if defined(TARGET_S390X)
4704         ret = do_semtimedop(first, ptr, second, third, TARGET_ABI_BITS == 64);
4705 #else
4706         ret = do_semtimedop(first, ptr, second, fifth, TARGET_ABI_BITS == 64);
4707 #endif
4708         break;
4709 
4710     case IPCOP_semget:
4711         ret = get_errno(semget(first, second, third));
4712         break;
4713 
4714     case IPCOP_semctl: {
4715         /* The semun argument to semctl is passed by value, so dereference the
4716          * ptr argument. */
4717         abi_ulong atptr;
4718         get_user_ual(atptr, ptr);
4719         ret = do_semctl(first, second, third, atptr);
4720         break;
4721     }
4722 
4723     case IPCOP_msgget:
4724         ret = get_errno(msgget(first, second));
4725         break;
4726 
4727     case IPCOP_msgsnd:
4728         ret = do_msgsnd(first, ptr, second, third);
4729         break;
4730 
4731     case IPCOP_msgctl:
4732         ret = do_msgctl(first, second, ptr);
4733         break;
4734 
4735     case IPCOP_msgrcv:
4736         switch (version) {
4737         case 0:
4738             {
4739                 struct target_ipc_kludge {
4740                     abi_long msgp;
4741                     abi_long msgtyp;
4742                 } *tmp;
4743 
4744                 if (!lock_user_struct(VERIFY_READ, tmp, ptr, 1)) {
4745                     ret = -TARGET_EFAULT;
4746                     break;
4747                 }
4748 
4749                 ret = do_msgrcv(first, tswapal(tmp->msgp), second, tswapal(tmp->msgtyp), third);
4750 
4751                 unlock_user_struct(tmp, ptr, 0);
4752                 break;
4753             }
4754         default:
4755             ret = do_msgrcv(first, ptr, second, fifth, third);
4756         }
4757         break;
4758 
4759     case IPCOP_shmat:
4760         switch (version) {
4761         default:
4762         {
4763             abi_ulong raddr;
4764             raddr = do_shmat(cpu_env, first, ptr, second);
4765             if (is_error(raddr))
4766                 return get_errno(raddr);
4767             if (put_user_ual(raddr, third))
4768                 return -TARGET_EFAULT;
4769             break;
4770         }
4771         case 1:
4772             ret = -TARGET_EINVAL;
4773             break;
4774         }
4775 	break;
4776     case IPCOP_shmdt:
4777         ret = do_shmdt(ptr);
4778 	break;
4779 
4780     case IPCOP_shmget:
4781 	/* IPC_* flag values are the same on all linux platforms */
4782 	ret = get_errno(shmget(first, second, third));
4783 	break;
4784 
4785 	/* IPC_* and SHM_* command values are the same on all linux platforms */
4786     case IPCOP_shmctl:
4787         ret = do_shmctl(first, second, ptr);
4788         break;
4789     default:
4790         qemu_log_mask(LOG_UNIMP, "Unsupported ipc call: %d (version %d)\n",
4791                       call, version);
4792 	ret = -TARGET_ENOSYS;
4793 	break;
4794     }
4795     return ret;
4796 }
4797 #endif
4798 
4799 /* kernel structure types definitions */
4800 
4801 #define STRUCT(name, ...) STRUCT_ ## name,
4802 #define STRUCT_SPECIAL(name) STRUCT_ ## name,
4803 enum {
4804 #include "syscall_types.h"
4805 STRUCT_MAX
4806 };
4807 #undef STRUCT
4808 #undef STRUCT_SPECIAL
4809 
4810 #define STRUCT(name, ...) static const argtype struct_ ## name ## _def[] = {  __VA_ARGS__, TYPE_NULL };
4811 #define STRUCT_SPECIAL(name)
4812 #include "syscall_types.h"
4813 #undef STRUCT
4814 #undef STRUCT_SPECIAL
4815 
4816 #define MAX_STRUCT_SIZE 4096
4817 
4818 #ifdef CONFIG_FIEMAP
4819 /* So fiemap access checks don't overflow on 32 bit systems.
4820  * This is very slightly smaller than the limit imposed by
4821  * the underlying kernel.
4822  */
4823 #define FIEMAP_MAX_EXTENTS ((UINT_MAX - sizeof(struct fiemap))  \
4824                             / sizeof(struct fiemap_extent))
4825 
4826 static abi_long do_ioctl_fs_ioc_fiemap(const IOCTLEntry *ie, uint8_t *buf_temp,
4827                                        int fd, int cmd, abi_long arg)
4828 {
4829     /* The parameter for this ioctl is a struct fiemap followed
4830      * by an array of struct fiemap_extent whose size is set
4831      * in fiemap->fm_extent_count. The array is filled in by the
4832      * ioctl.
4833      */
4834     int target_size_in, target_size_out;
4835     struct fiemap *fm;
4836     const argtype *arg_type = ie->arg_type;
4837     const argtype extent_arg_type[] = { MK_STRUCT(STRUCT_fiemap_extent) };
4838     void *argptr, *p;
4839     abi_long ret;
4840     int i, extent_size = thunk_type_size(extent_arg_type, 0);
4841     uint32_t outbufsz;
4842     int free_fm = 0;
4843 
4844     assert(arg_type[0] == TYPE_PTR);
4845     assert(ie->access == IOC_RW);
4846     arg_type++;
4847     target_size_in = thunk_type_size(arg_type, 0);
4848     argptr = lock_user(VERIFY_READ, arg, target_size_in, 1);
4849     if (!argptr) {
4850         return -TARGET_EFAULT;
4851     }
4852     thunk_convert(buf_temp, argptr, arg_type, THUNK_HOST);
4853     unlock_user(argptr, arg, 0);
4854     fm = (struct fiemap *)buf_temp;
4855     if (fm->fm_extent_count > FIEMAP_MAX_EXTENTS) {
4856         return -TARGET_EINVAL;
4857     }
4858 
4859     outbufsz = sizeof (*fm) +
4860         (sizeof(struct fiemap_extent) * fm->fm_extent_count);
4861 
4862     if (outbufsz > MAX_STRUCT_SIZE) {
4863         /* We can't fit all the extents into the fixed size buffer.
4864          * Allocate one that is large enough and use it instead.
4865          */
4866         fm = g_try_malloc(outbufsz);
4867         if (!fm) {
4868             return -TARGET_ENOMEM;
4869         }
4870         memcpy(fm, buf_temp, sizeof(struct fiemap));
4871         free_fm = 1;
4872     }
4873     ret = get_errno(safe_ioctl(fd, ie->host_cmd, fm));
4874     if (!is_error(ret)) {
4875         target_size_out = target_size_in;
4876         /* An extent_count of 0 means we were only counting the extents
4877          * so there are no structs to copy
4878          */
4879         if (fm->fm_extent_count != 0) {
4880             target_size_out += fm->fm_mapped_extents * extent_size;
4881         }
4882         argptr = lock_user(VERIFY_WRITE, arg, target_size_out, 0);
4883         if (!argptr) {
4884             ret = -TARGET_EFAULT;
4885         } else {
4886             /* Convert the struct fiemap */
4887             thunk_convert(argptr, fm, arg_type, THUNK_TARGET);
4888             if (fm->fm_extent_count != 0) {
4889                 p = argptr + target_size_in;
4890                 /* ...and then all the struct fiemap_extents */
4891                 for (i = 0; i < fm->fm_mapped_extents; i++) {
4892                     thunk_convert(p, &fm->fm_extents[i], extent_arg_type,
4893                                   THUNK_TARGET);
4894                     p += extent_size;
4895                 }
4896             }
4897             unlock_user(argptr, arg, target_size_out);
4898         }
4899     }
4900     if (free_fm) {
4901         g_free(fm);
4902     }
4903     return ret;
4904 }
4905 #endif
4906 
4907 static abi_long do_ioctl_ifconf(const IOCTLEntry *ie, uint8_t *buf_temp,
4908                                 int fd, int cmd, abi_long arg)
4909 {
4910     const argtype *arg_type = ie->arg_type;
4911     int target_size;
4912     void *argptr;
4913     int ret;
4914     struct ifconf *host_ifconf;
4915     uint32_t outbufsz;
4916     const argtype ifreq_arg_type[] = { MK_STRUCT(STRUCT_sockaddr_ifreq) };
4917     int target_ifreq_size;
4918     int nb_ifreq;
4919     int free_buf = 0;
4920     int i;
4921     int target_ifc_len;
4922     abi_long target_ifc_buf;
4923     int host_ifc_len;
4924     char *host_ifc_buf;
4925 
4926     assert(arg_type[0] == TYPE_PTR);
4927     assert(ie->access == IOC_RW);
4928 
4929     arg_type++;
4930     target_size = thunk_type_size(arg_type, 0);
4931 
4932     argptr = lock_user(VERIFY_READ, arg, target_size, 1);
4933     if (!argptr)
4934         return -TARGET_EFAULT;
4935     thunk_convert(buf_temp, argptr, arg_type, THUNK_HOST);
4936     unlock_user(argptr, arg, 0);
4937 
4938     host_ifconf = (struct ifconf *)(unsigned long)buf_temp;
4939     target_ifc_buf = (abi_long)(unsigned long)host_ifconf->ifc_buf;
4940     target_ifreq_size = thunk_type_size(ifreq_arg_type, 0);
4941 
4942     if (target_ifc_buf != 0) {
4943         target_ifc_len = host_ifconf->ifc_len;
4944         nb_ifreq = target_ifc_len / target_ifreq_size;
4945         host_ifc_len = nb_ifreq * sizeof(struct ifreq);
4946 
4947         outbufsz = sizeof(*host_ifconf) + host_ifc_len;
4948         if (outbufsz > MAX_STRUCT_SIZE) {
4949             /*
4950              * We can't fit all the extents into the fixed size buffer.
4951              * Allocate one that is large enough and use it instead.
4952              */
4953             host_ifconf = malloc(outbufsz);
4954             if (!host_ifconf) {
4955                 return -TARGET_ENOMEM;
4956             }
4957             memcpy(host_ifconf, buf_temp, sizeof(*host_ifconf));
4958             free_buf = 1;
4959         }
4960         host_ifc_buf = (char *)host_ifconf + sizeof(*host_ifconf);
4961 
4962         host_ifconf->ifc_len = host_ifc_len;
4963     } else {
4964       host_ifc_buf = NULL;
4965     }
4966     host_ifconf->ifc_buf = host_ifc_buf;
4967 
4968     ret = get_errno(safe_ioctl(fd, ie->host_cmd, host_ifconf));
4969     if (!is_error(ret)) {
4970 	/* convert host ifc_len to target ifc_len */
4971 
4972         nb_ifreq = host_ifconf->ifc_len / sizeof(struct ifreq);
4973         target_ifc_len = nb_ifreq * target_ifreq_size;
4974         host_ifconf->ifc_len = target_ifc_len;
4975 
4976 	/* restore target ifc_buf */
4977 
4978         host_ifconf->ifc_buf = (char *)(unsigned long)target_ifc_buf;
4979 
4980 	/* copy struct ifconf to target user */
4981 
4982         argptr = lock_user(VERIFY_WRITE, arg, target_size, 0);
4983         if (!argptr)
4984             return -TARGET_EFAULT;
4985         thunk_convert(argptr, host_ifconf, arg_type, THUNK_TARGET);
4986         unlock_user(argptr, arg, target_size);
4987 
4988         if (target_ifc_buf != 0) {
4989             /* copy ifreq[] to target user */
4990             argptr = lock_user(VERIFY_WRITE, target_ifc_buf, target_ifc_len, 0);
4991             for (i = 0; i < nb_ifreq ; i++) {
4992                 thunk_convert(argptr + i * target_ifreq_size,
4993                               host_ifc_buf + i * sizeof(struct ifreq),
4994                               ifreq_arg_type, THUNK_TARGET);
4995             }
4996             unlock_user(argptr, target_ifc_buf, target_ifc_len);
4997         }
4998     }
4999 
5000     if (free_buf) {
5001         free(host_ifconf);
5002     }
5003 
5004     return ret;
5005 }
5006 
5007 #if defined(CONFIG_USBFS)
5008 #if HOST_LONG_BITS > 64
5009 #error USBDEVFS thunks do not support >64 bit hosts yet.
5010 #endif
5011 struct live_urb {
5012     uint64_t target_urb_adr;
5013     uint64_t target_buf_adr;
5014     char *target_buf_ptr;
5015     struct usbdevfs_urb host_urb;
5016 };
5017 
5018 static GHashTable *usbdevfs_urb_hashtable(void)
5019 {
5020     static GHashTable *urb_hashtable;
5021 
5022     if (!urb_hashtable) {
5023         urb_hashtable = g_hash_table_new(g_int64_hash, g_int64_equal);
5024     }
5025     return urb_hashtable;
5026 }
5027 
5028 static void urb_hashtable_insert(struct live_urb *urb)
5029 {
5030     GHashTable *urb_hashtable = usbdevfs_urb_hashtable();
5031     g_hash_table_insert(urb_hashtable, urb, urb);
5032 }
5033 
5034 static struct live_urb *urb_hashtable_lookup(uint64_t target_urb_adr)
5035 {
5036     GHashTable *urb_hashtable = usbdevfs_urb_hashtable();
5037     return g_hash_table_lookup(urb_hashtable, &target_urb_adr);
5038 }
5039 
5040 static void urb_hashtable_remove(struct live_urb *urb)
5041 {
5042     GHashTable *urb_hashtable = usbdevfs_urb_hashtable();
5043     g_hash_table_remove(urb_hashtable, urb);
5044 }
5045 
5046 static abi_long
5047 do_ioctl_usbdevfs_reapurb(const IOCTLEntry *ie, uint8_t *buf_temp,
5048                           int fd, int cmd, abi_long arg)
5049 {
5050     const argtype usbfsurb_arg_type[] = { MK_STRUCT(STRUCT_usbdevfs_urb) };
5051     const argtype ptrvoid_arg_type[] = { TYPE_PTRVOID, 0, 0 };
5052     struct live_urb *lurb;
5053     void *argptr;
5054     uint64_t hurb;
5055     int target_size;
5056     uintptr_t target_urb_adr;
5057     abi_long ret;
5058 
5059     target_size = thunk_type_size(usbfsurb_arg_type, THUNK_TARGET);
5060 
5061     memset(buf_temp, 0, sizeof(uint64_t));
5062     ret = get_errno(safe_ioctl(fd, ie->host_cmd, buf_temp));
5063     if (is_error(ret)) {
5064         return ret;
5065     }
5066 
5067     memcpy(&hurb, buf_temp, sizeof(uint64_t));
5068     lurb = (void *)((uintptr_t)hurb - offsetof(struct live_urb, host_urb));
5069     if (!lurb->target_urb_adr) {
5070         return -TARGET_EFAULT;
5071     }
5072     urb_hashtable_remove(lurb);
5073     unlock_user(lurb->target_buf_ptr, lurb->target_buf_adr,
5074         lurb->host_urb.buffer_length);
5075     lurb->target_buf_ptr = NULL;
5076 
5077     /* restore the guest buffer pointer */
5078     lurb->host_urb.buffer = (void *)(uintptr_t)lurb->target_buf_adr;
5079 
5080     /* update the guest urb struct */
5081     argptr = lock_user(VERIFY_WRITE, lurb->target_urb_adr, target_size, 0);
5082     if (!argptr) {
5083         g_free(lurb);
5084         return -TARGET_EFAULT;
5085     }
5086     thunk_convert(argptr, &lurb->host_urb, usbfsurb_arg_type, THUNK_TARGET);
5087     unlock_user(argptr, lurb->target_urb_adr, target_size);
5088 
5089     target_size = thunk_type_size(ptrvoid_arg_type, THUNK_TARGET);
5090     /* write back the urb handle */
5091     argptr = lock_user(VERIFY_WRITE, arg, target_size, 0);
5092     if (!argptr) {
5093         g_free(lurb);
5094         return -TARGET_EFAULT;
5095     }
5096 
5097     /* GHashTable uses 64-bit keys but thunk_convert expects uintptr_t */
5098     target_urb_adr = lurb->target_urb_adr;
5099     thunk_convert(argptr, &target_urb_adr, ptrvoid_arg_type, THUNK_TARGET);
5100     unlock_user(argptr, arg, target_size);
5101 
5102     g_free(lurb);
5103     return ret;
5104 }
5105 
5106 static abi_long
5107 do_ioctl_usbdevfs_discardurb(const IOCTLEntry *ie,
5108                              uint8_t *buf_temp __attribute__((unused)),
5109                              int fd, int cmd, abi_long arg)
5110 {
5111     struct live_urb *lurb;
5112 
5113     /* map target address back to host URB with metadata. */
5114     lurb = urb_hashtable_lookup(arg);
5115     if (!lurb) {
5116         return -TARGET_EFAULT;
5117     }
5118     return get_errno(safe_ioctl(fd, ie->host_cmd, &lurb->host_urb));
5119 }
5120 
5121 static abi_long
5122 do_ioctl_usbdevfs_submiturb(const IOCTLEntry *ie, uint8_t *buf_temp,
5123                             int fd, int cmd, abi_long arg)
5124 {
5125     const argtype *arg_type = ie->arg_type;
5126     int target_size;
5127     abi_long ret;
5128     void *argptr;
5129     int rw_dir;
5130     struct live_urb *lurb;
5131 
5132     /*
5133      * each submitted URB needs to map to a unique ID for the
5134      * kernel, and that unique ID needs to be a pointer to
5135      * host memory.  hence, we need to malloc for each URB.
5136      * isochronous transfers have a variable length struct.
5137      */
5138     arg_type++;
5139     target_size = thunk_type_size(arg_type, THUNK_TARGET);
5140 
5141     /* construct host copy of urb and metadata */
5142     lurb = g_try_malloc0(sizeof(struct live_urb));
5143     if (!lurb) {
5144         return -TARGET_ENOMEM;
5145     }
5146 
5147     argptr = lock_user(VERIFY_READ, arg, target_size, 1);
5148     if (!argptr) {
5149         g_free(lurb);
5150         return -TARGET_EFAULT;
5151     }
5152     thunk_convert(&lurb->host_urb, argptr, arg_type, THUNK_HOST);
5153     unlock_user(argptr, arg, 0);
5154 
5155     lurb->target_urb_adr = arg;
5156     lurb->target_buf_adr = (uintptr_t)lurb->host_urb.buffer;
5157 
5158     /* buffer space used depends on endpoint type so lock the entire buffer */
5159     /* control type urbs should check the buffer contents for true direction */
5160     rw_dir = lurb->host_urb.endpoint & USB_DIR_IN ? VERIFY_WRITE : VERIFY_READ;
5161     lurb->target_buf_ptr = lock_user(rw_dir, lurb->target_buf_adr,
5162         lurb->host_urb.buffer_length, 1);
5163     if (lurb->target_buf_ptr == NULL) {
5164         g_free(lurb);
5165         return -TARGET_EFAULT;
5166     }
5167 
5168     /* update buffer pointer in host copy */
5169     lurb->host_urb.buffer = lurb->target_buf_ptr;
5170 
5171     ret = get_errno(safe_ioctl(fd, ie->host_cmd, &lurb->host_urb));
5172     if (is_error(ret)) {
5173         unlock_user(lurb->target_buf_ptr, lurb->target_buf_adr, 0);
5174         g_free(lurb);
5175     } else {
5176         urb_hashtable_insert(lurb);
5177     }
5178 
5179     return ret;
5180 }
5181 #endif /* CONFIG_USBFS */
5182 
5183 static abi_long do_ioctl_dm(const IOCTLEntry *ie, uint8_t *buf_temp, int fd,
5184                             int cmd, abi_long arg)
5185 {
5186     void *argptr;
5187     struct dm_ioctl *host_dm;
5188     abi_long guest_data;
5189     uint32_t guest_data_size;
5190     int target_size;
5191     const argtype *arg_type = ie->arg_type;
5192     abi_long ret;
5193     void *big_buf = NULL;
5194     char *host_data;
5195 
5196     arg_type++;
5197     target_size = thunk_type_size(arg_type, 0);
5198     argptr = lock_user(VERIFY_READ, arg, target_size, 1);
5199     if (!argptr) {
5200         ret = -TARGET_EFAULT;
5201         goto out;
5202     }
5203     thunk_convert(buf_temp, argptr, arg_type, THUNK_HOST);
5204     unlock_user(argptr, arg, 0);
5205 
5206     /* buf_temp is too small, so fetch things into a bigger buffer */
5207     big_buf = g_malloc0(((struct dm_ioctl*)buf_temp)->data_size * 2);
5208     memcpy(big_buf, buf_temp, target_size);
5209     buf_temp = big_buf;
5210     host_dm = big_buf;
5211 
5212     guest_data = arg + host_dm->data_start;
5213     if ((guest_data - arg) < 0) {
5214         ret = -TARGET_EINVAL;
5215         goto out;
5216     }
5217     guest_data_size = host_dm->data_size - host_dm->data_start;
5218     host_data = (char*)host_dm + host_dm->data_start;
5219 
5220     argptr = lock_user(VERIFY_READ, guest_data, guest_data_size, 1);
5221     if (!argptr) {
5222         ret = -TARGET_EFAULT;
5223         goto out;
5224     }
5225 
5226     switch (ie->host_cmd) {
5227     case DM_REMOVE_ALL:
5228     case DM_LIST_DEVICES:
5229     case DM_DEV_CREATE:
5230     case DM_DEV_REMOVE:
5231     case DM_DEV_SUSPEND:
5232     case DM_DEV_STATUS:
5233     case DM_DEV_WAIT:
5234     case DM_TABLE_STATUS:
5235     case DM_TABLE_CLEAR:
5236     case DM_TABLE_DEPS:
5237     case DM_LIST_VERSIONS:
5238         /* no input data */
5239         break;
5240     case DM_DEV_RENAME:
5241     case DM_DEV_SET_GEOMETRY:
5242         /* data contains only strings */
5243         memcpy(host_data, argptr, guest_data_size);
5244         break;
5245     case DM_TARGET_MSG:
5246         memcpy(host_data, argptr, guest_data_size);
5247         *(uint64_t*)host_data = tswap64(*(uint64_t*)argptr);
5248         break;
5249     case DM_TABLE_LOAD:
5250     {
5251         void *gspec = argptr;
5252         void *cur_data = host_data;
5253         const argtype arg_type[] = { MK_STRUCT(STRUCT_dm_target_spec) };
5254         int spec_size = thunk_type_size(arg_type, 0);
5255         int i;
5256 
5257         for (i = 0; i < host_dm->target_count; i++) {
5258             struct dm_target_spec *spec = cur_data;
5259             uint32_t next;
5260             int slen;
5261 
5262             thunk_convert(spec, gspec, arg_type, THUNK_HOST);
5263             slen = strlen((char*)gspec + spec_size) + 1;
5264             next = spec->next;
5265             spec->next = sizeof(*spec) + slen;
5266             strcpy((char*)&spec[1], gspec + spec_size);
5267             gspec += next;
5268             cur_data += spec->next;
5269         }
5270         break;
5271     }
5272     default:
5273         ret = -TARGET_EINVAL;
5274         unlock_user(argptr, guest_data, 0);
5275         goto out;
5276     }
5277     unlock_user(argptr, guest_data, 0);
5278 
5279     ret = get_errno(safe_ioctl(fd, ie->host_cmd, buf_temp));
5280     if (!is_error(ret)) {
5281         guest_data = arg + host_dm->data_start;
5282         guest_data_size = host_dm->data_size - host_dm->data_start;
5283         argptr = lock_user(VERIFY_WRITE, guest_data, guest_data_size, 0);
5284         switch (ie->host_cmd) {
5285         case DM_REMOVE_ALL:
5286         case DM_DEV_CREATE:
5287         case DM_DEV_REMOVE:
5288         case DM_DEV_RENAME:
5289         case DM_DEV_SUSPEND:
5290         case DM_DEV_STATUS:
5291         case DM_TABLE_LOAD:
5292         case DM_TABLE_CLEAR:
5293         case DM_TARGET_MSG:
5294         case DM_DEV_SET_GEOMETRY:
5295             /* no return data */
5296             break;
5297         case DM_LIST_DEVICES:
5298         {
5299             struct dm_name_list *nl = (void*)host_dm + host_dm->data_start;
5300             uint32_t remaining_data = guest_data_size;
5301             void *cur_data = argptr;
5302             const argtype arg_type[] = { MK_STRUCT(STRUCT_dm_name_list) };
5303             int nl_size = 12; /* can't use thunk_size due to alignment */
5304 
5305             while (1) {
5306                 uint32_t next = nl->next;
5307                 if (next) {
5308                     nl->next = nl_size + (strlen(nl->name) + 1);
5309                 }
5310                 if (remaining_data < nl->next) {
5311                     host_dm->flags |= DM_BUFFER_FULL_FLAG;
5312                     break;
5313                 }
5314                 thunk_convert(cur_data, nl, arg_type, THUNK_TARGET);
5315                 strcpy(cur_data + nl_size, nl->name);
5316                 cur_data += nl->next;
5317                 remaining_data -= nl->next;
5318                 if (!next) {
5319                     break;
5320                 }
5321                 nl = (void*)nl + next;
5322             }
5323             break;
5324         }
5325         case DM_DEV_WAIT:
5326         case DM_TABLE_STATUS:
5327         {
5328             struct dm_target_spec *spec = (void*)host_dm + host_dm->data_start;
5329             void *cur_data = argptr;
5330             const argtype arg_type[] = { MK_STRUCT(STRUCT_dm_target_spec) };
5331             int spec_size = thunk_type_size(arg_type, 0);
5332             int i;
5333 
5334             for (i = 0; i < host_dm->target_count; i++) {
5335                 uint32_t next = spec->next;
5336                 int slen = strlen((char*)&spec[1]) + 1;
5337                 spec->next = (cur_data - argptr) + spec_size + slen;
5338                 if (guest_data_size < spec->next) {
5339                     host_dm->flags |= DM_BUFFER_FULL_FLAG;
5340                     break;
5341                 }
5342                 thunk_convert(cur_data, spec, arg_type, THUNK_TARGET);
5343                 strcpy(cur_data + spec_size, (char*)&spec[1]);
5344                 cur_data = argptr + spec->next;
5345                 spec = (void*)host_dm + host_dm->data_start + next;
5346             }
5347             break;
5348         }
5349         case DM_TABLE_DEPS:
5350         {
5351             void *hdata = (void*)host_dm + host_dm->data_start;
5352             int count = *(uint32_t*)hdata;
5353             uint64_t *hdev = hdata + 8;
5354             uint64_t *gdev = argptr + 8;
5355             int i;
5356 
5357             *(uint32_t*)argptr = tswap32(count);
5358             for (i = 0; i < count; i++) {
5359                 *gdev = tswap64(*hdev);
5360                 gdev++;
5361                 hdev++;
5362             }
5363             break;
5364         }
5365         case DM_LIST_VERSIONS:
5366         {
5367             struct dm_target_versions *vers = (void*)host_dm + host_dm->data_start;
5368             uint32_t remaining_data = guest_data_size;
5369             void *cur_data = argptr;
5370             const argtype arg_type[] = { MK_STRUCT(STRUCT_dm_target_versions) };
5371             int vers_size = thunk_type_size(arg_type, 0);
5372 
5373             while (1) {
5374                 uint32_t next = vers->next;
5375                 if (next) {
5376                     vers->next = vers_size + (strlen(vers->name) + 1);
5377                 }
5378                 if (remaining_data < vers->next) {
5379                     host_dm->flags |= DM_BUFFER_FULL_FLAG;
5380                     break;
5381                 }
5382                 thunk_convert(cur_data, vers, arg_type, THUNK_TARGET);
5383                 strcpy(cur_data + vers_size, vers->name);
5384                 cur_data += vers->next;
5385                 remaining_data -= vers->next;
5386                 if (!next) {
5387                     break;
5388                 }
5389                 vers = (void*)vers + next;
5390             }
5391             break;
5392         }
5393         default:
5394             unlock_user(argptr, guest_data, 0);
5395             ret = -TARGET_EINVAL;
5396             goto out;
5397         }
5398         unlock_user(argptr, guest_data, guest_data_size);
5399 
5400         argptr = lock_user(VERIFY_WRITE, arg, target_size, 0);
5401         if (!argptr) {
5402             ret = -TARGET_EFAULT;
5403             goto out;
5404         }
5405         thunk_convert(argptr, buf_temp, arg_type, THUNK_TARGET);
5406         unlock_user(argptr, arg, target_size);
5407     }
5408 out:
5409     g_free(big_buf);
5410     return ret;
5411 }
5412 
5413 static abi_long do_ioctl_blkpg(const IOCTLEntry *ie, uint8_t *buf_temp, int fd,
5414                                int cmd, abi_long arg)
5415 {
5416     void *argptr;
5417     int target_size;
5418     const argtype *arg_type = ie->arg_type;
5419     const argtype part_arg_type[] = { MK_STRUCT(STRUCT_blkpg_partition) };
5420     abi_long ret;
5421 
5422     struct blkpg_ioctl_arg *host_blkpg = (void*)buf_temp;
5423     struct blkpg_partition host_part;
5424 
5425     /* Read and convert blkpg */
5426     arg_type++;
5427     target_size = thunk_type_size(arg_type, 0);
5428     argptr = lock_user(VERIFY_READ, arg, target_size, 1);
5429     if (!argptr) {
5430         ret = -TARGET_EFAULT;
5431         goto out;
5432     }
5433     thunk_convert(buf_temp, argptr, arg_type, THUNK_HOST);
5434     unlock_user(argptr, arg, 0);
5435 
5436     switch (host_blkpg->op) {
5437     case BLKPG_ADD_PARTITION:
5438     case BLKPG_DEL_PARTITION:
5439         /* payload is struct blkpg_partition */
5440         break;
5441     default:
5442         /* Unknown opcode */
5443         ret = -TARGET_EINVAL;
5444         goto out;
5445     }
5446 
5447     /* Read and convert blkpg->data */
5448     arg = (abi_long)(uintptr_t)host_blkpg->data;
5449     target_size = thunk_type_size(part_arg_type, 0);
5450     argptr = lock_user(VERIFY_READ, arg, target_size, 1);
5451     if (!argptr) {
5452         ret = -TARGET_EFAULT;
5453         goto out;
5454     }
5455     thunk_convert(&host_part, argptr, part_arg_type, THUNK_HOST);
5456     unlock_user(argptr, arg, 0);
5457 
5458     /* Swizzle the data pointer to our local copy and call! */
5459     host_blkpg->data = &host_part;
5460     ret = get_errno(safe_ioctl(fd, ie->host_cmd, host_blkpg));
5461 
5462 out:
5463     return ret;
5464 }
5465 
5466 static abi_long do_ioctl_rt(const IOCTLEntry *ie, uint8_t *buf_temp,
5467                                 int fd, int cmd, abi_long arg)
5468 {
5469     const argtype *arg_type = ie->arg_type;
5470     const StructEntry *se;
5471     const argtype *field_types;
5472     const int *dst_offsets, *src_offsets;
5473     int target_size;
5474     void *argptr;
5475     abi_ulong *target_rt_dev_ptr = NULL;
5476     unsigned long *host_rt_dev_ptr = NULL;
5477     abi_long ret;
5478     int i;
5479 
5480     assert(ie->access == IOC_W);
5481     assert(*arg_type == TYPE_PTR);
5482     arg_type++;
5483     assert(*arg_type == TYPE_STRUCT);
5484     target_size = thunk_type_size(arg_type, 0);
5485     argptr = lock_user(VERIFY_READ, arg, target_size, 1);
5486     if (!argptr) {
5487         return -TARGET_EFAULT;
5488     }
5489     arg_type++;
5490     assert(*arg_type == (int)STRUCT_rtentry);
5491     se = struct_entries + *arg_type++;
5492     assert(se->convert[0] == NULL);
5493     /* convert struct here to be able to catch rt_dev string */
5494     field_types = se->field_types;
5495     dst_offsets = se->field_offsets[THUNK_HOST];
5496     src_offsets = se->field_offsets[THUNK_TARGET];
5497     for (i = 0; i < se->nb_fields; i++) {
5498         if (dst_offsets[i] == offsetof(struct rtentry, rt_dev)) {
5499             assert(*field_types == TYPE_PTRVOID);
5500             target_rt_dev_ptr = (abi_ulong *)(argptr + src_offsets[i]);
5501             host_rt_dev_ptr = (unsigned long *)(buf_temp + dst_offsets[i]);
5502             if (*target_rt_dev_ptr != 0) {
5503                 *host_rt_dev_ptr = (unsigned long)lock_user_string(
5504                                                   tswapal(*target_rt_dev_ptr));
5505                 if (!*host_rt_dev_ptr) {
5506                     unlock_user(argptr, arg, 0);
5507                     return -TARGET_EFAULT;
5508                 }
5509             } else {
5510                 *host_rt_dev_ptr = 0;
5511             }
5512             field_types++;
5513             continue;
5514         }
5515         field_types = thunk_convert(buf_temp + dst_offsets[i],
5516                                     argptr + src_offsets[i],
5517                                     field_types, THUNK_HOST);
5518     }
5519     unlock_user(argptr, arg, 0);
5520 
5521     ret = get_errno(safe_ioctl(fd, ie->host_cmd, buf_temp));
5522 
5523     assert(host_rt_dev_ptr != NULL);
5524     assert(target_rt_dev_ptr != NULL);
5525     if (*host_rt_dev_ptr != 0) {
5526         unlock_user((void *)*host_rt_dev_ptr,
5527                     *target_rt_dev_ptr, 0);
5528     }
5529     return ret;
5530 }
5531 
5532 static abi_long do_ioctl_kdsigaccept(const IOCTLEntry *ie, uint8_t *buf_temp,
5533                                      int fd, int cmd, abi_long arg)
5534 {
5535     int sig = target_to_host_signal(arg);
5536     return get_errno(safe_ioctl(fd, ie->host_cmd, sig));
5537 }
5538 
5539 static abi_long do_ioctl_SIOCGSTAMP(const IOCTLEntry *ie, uint8_t *buf_temp,
5540                                     int fd, int cmd, abi_long arg)
5541 {
5542     struct timeval tv;
5543     abi_long ret;
5544 
5545     ret = get_errno(safe_ioctl(fd, SIOCGSTAMP, &tv));
5546     if (is_error(ret)) {
5547         return ret;
5548     }
5549 
5550     if (cmd == (int)TARGET_SIOCGSTAMP_OLD) {
5551         if (copy_to_user_timeval(arg, &tv)) {
5552             return -TARGET_EFAULT;
5553         }
5554     } else {
5555         if (copy_to_user_timeval64(arg, &tv)) {
5556             return -TARGET_EFAULT;
5557         }
5558     }
5559 
5560     return ret;
5561 }
5562 
5563 static abi_long do_ioctl_SIOCGSTAMPNS(const IOCTLEntry *ie, uint8_t *buf_temp,
5564                                       int fd, int cmd, abi_long arg)
5565 {
5566     struct timespec ts;
5567     abi_long ret;
5568 
5569     ret = get_errno(safe_ioctl(fd, SIOCGSTAMPNS, &ts));
5570     if (is_error(ret)) {
5571         return ret;
5572     }
5573 
5574     if (cmd == (int)TARGET_SIOCGSTAMPNS_OLD) {
5575         if (host_to_target_timespec(arg, &ts)) {
5576             return -TARGET_EFAULT;
5577         }
5578     } else{
5579         if (host_to_target_timespec64(arg, &ts)) {
5580             return -TARGET_EFAULT;
5581         }
5582     }
5583 
5584     return ret;
5585 }
5586 
5587 #ifdef TIOCGPTPEER
5588 static abi_long do_ioctl_tiocgptpeer(const IOCTLEntry *ie, uint8_t *buf_temp,
5589                                      int fd, int cmd, abi_long arg)
5590 {
5591     int flags = target_to_host_bitmask(arg, fcntl_flags_tbl);
5592     return get_errno(safe_ioctl(fd, ie->host_cmd, flags));
5593 }
5594 #endif
5595 
5596 #ifdef HAVE_DRM_H
5597 
5598 static void unlock_drm_version(struct drm_version *host_ver,
5599                                struct target_drm_version *target_ver,
5600                                bool copy)
5601 {
5602     unlock_user(host_ver->name, target_ver->name,
5603                                 copy ? host_ver->name_len : 0);
5604     unlock_user(host_ver->date, target_ver->date,
5605                                 copy ? host_ver->date_len : 0);
5606     unlock_user(host_ver->desc, target_ver->desc,
5607                                 copy ? host_ver->desc_len : 0);
5608 }
5609 
5610 static inline abi_long target_to_host_drmversion(struct drm_version *host_ver,
5611                                           struct target_drm_version *target_ver)
5612 {
5613     memset(host_ver, 0, sizeof(*host_ver));
5614 
5615     __get_user(host_ver->name_len, &target_ver->name_len);
5616     if (host_ver->name_len) {
5617         host_ver->name = lock_user(VERIFY_WRITE, target_ver->name,
5618                                    target_ver->name_len, 0);
5619         if (!host_ver->name) {
5620             return -EFAULT;
5621         }
5622     }
5623 
5624     __get_user(host_ver->date_len, &target_ver->date_len);
5625     if (host_ver->date_len) {
5626         host_ver->date = lock_user(VERIFY_WRITE, target_ver->date,
5627                                    target_ver->date_len, 0);
5628         if (!host_ver->date) {
5629             goto err;
5630         }
5631     }
5632 
5633     __get_user(host_ver->desc_len, &target_ver->desc_len);
5634     if (host_ver->desc_len) {
5635         host_ver->desc = lock_user(VERIFY_WRITE, target_ver->desc,
5636                                    target_ver->desc_len, 0);
5637         if (!host_ver->desc) {
5638             goto err;
5639         }
5640     }
5641 
5642     return 0;
5643 err:
5644     unlock_drm_version(host_ver, target_ver, false);
5645     return -EFAULT;
5646 }
5647 
5648 static inline void host_to_target_drmversion(
5649                                           struct target_drm_version *target_ver,
5650                                           struct drm_version *host_ver)
5651 {
5652     __put_user(host_ver->version_major, &target_ver->version_major);
5653     __put_user(host_ver->version_minor, &target_ver->version_minor);
5654     __put_user(host_ver->version_patchlevel, &target_ver->version_patchlevel);
5655     __put_user(host_ver->name_len, &target_ver->name_len);
5656     __put_user(host_ver->date_len, &target_ver->date_len);
5657     __put_user(host_ver->desc_len, &target_ver->desc_len);
5658     unlock_drm_version(host_ver, target_ver, true);
5659 }
5660 
5661 static abi_long do_ioctl_drm(const IOCTLEntry *ie, uint8_t *buf_temp,
5662                              int fd, int cmd, abi_long arg)
5663 {
5664     struct drm_version *ver;
5665     struct target_drm_version *target_ver;
5666     abi_long ret;
5667 
5668     switch (ie->host_cmd) {
5669     case DRM_IOCTL_VERSION:
5670         if (!lock_user_struct(VERIFY_WRITE, target_ver, arg, 0)) {
5671             return -TARGET_EFAULT;
5672         }
5673         ver = (struct drm_version *)buf_temp;
5674         ret = target_to_host_drmversion(ver, target_ver);
5675         if (!is_error(ret)) {
5676             ret = get_errno(safe_ioctl(fd, ie->host_cmd, ver));
5677             if (is_error(ret)) {
5678                 unlock_drm_version(ver, target_ver, false);
5679             } else {
5680                 host_to_target_drmversion(target_ver, ver);
5681             }
5682         }
5683         unlock_user_struct(target_ver, arg, 0);
5684         return ret;
5685     }
5686     return -TARGET_ENOSYS;
5687 }
5688 
5689 static abi_long do_ioctl_drm_i915_getparam(const IOCTLEntry *ie,
5690                                            struct drm_i915_getparam *gparam,
5691                                            int fd, abi_long arg)
5692 {
5693     abi_long ret;
5694     int value;
5695     struct target_drm_i915_getparam *target_gparam;
5696 
5697     if (!lock_user_struct(VERIFY_READ, target_gparam, arg, 0)) {
5698         return -TARGET_EFAULT;
5699     }
5700 
5701     __get_user(gparam->param, &target_gparam->param);
5702     gparam->value = &value;
5703     ret = get_errno(safe_ioctl(fd, ie->host_cmd, gparam));
5704     put_user_s32(value, target_gparam->value);
5705 
5706     unlock_user_struct(target_gparam, arg, 0);
5707     return ret;
5708 }
5709 
5710 static abi_long do_ioctl_drm_i915(const IOCTLEntry *ie, uint8_t *buf_temp,
5711                                   int fd, int cmd, abi_long arg)
5712 {
5713     switch (ie->host_cmd) {
5714     case DRM_IOCTL_I915_GETPARAM:
5715         return do_ioctl_drm_i915_getparam(ie,
5716                                           (struct drm_i915_getparam *)buf_temp,
5717                                           fd, arg);
5718     default:
5719         return -TARGET_ENOSYS;
5720     }
5721 }
5722 
5723 #endif
5724 
5725 static abi_long do_ioctl_TUNSETTXFILTER(const IOCTLEntry *ie, uint8_t *buf_temp,
5726                                         int fd, int cmd, abi_long arg)
5727 {
5728     struct tun_filter *filter = (struct tun_filter *)buf_temp;
5729     struct tun_filter *target_filter;
5730     char *target_addr;
5731 
5732     assert(ie->access == IOC_W);
5733 
5734     target_filter = lock_user(VERIFY_READ, arg, sizeof(*target_filter), 1);
5735     if (!target_filter) {
5736         return -TARGET_EFAULT;
5737     }
5738     filter->flags = tswap16(target_filter->flags);
5739     filter->count = tswap16(target_filter->count);
5740     unlock_user(target_filter, arg, 0);
5741 
5742     if (filter->count) {
5743         if (offsetof(struct tun_filter, addr) + filter->count * ETH_ALEN >
5744             MAX_STRUCT_SIZE) {
5745             return -TARGET_EFAULT;
5746         }
5747 
5748         target_addr = lock_user(VERIFY_READ,
5749                                 arg + offsetof(struct tun_filter, addr),
5750                                 filter->count * ETH_ALEN, 1);
5751         if (!target_addr) {
5752             return -TARGET_EFAULT;
5753         }
5754         memcpy(filter->addr, target_addr, filter->count * ETH_ALEN);
5755         unlock_user(target_addr, arg + offsetof(struct tun_filter, addr), 0);
5756     }
5757 
5758     return get_errno(safe_ioctl(fd, ie->host_cmd, filter));
5759 }
5760 
5761 IOCTLEntry ioctl_entries[] = {
5762 #define IOCTL(cmd, access, ...) \
5763     { TARGET_ ## cmd, cmd, #cmd, access, 0, {  __VA_ARGS__ } },
5764 #define IOCTL_SPECIAL(cmd, access, dofn, ...)                      \
5765     { TARGET_ ## cmd, cmd, #cmd, access, dofn, {  __VA_ARGS__ } },
5766 #define IOCTL_IGNORE(cmd) \
5767     { TARGET_ ## cmd, 0, #cmd },
5768 #include "ioctls.h"
5769     { 0, 0, },
5770 };
5771 
5772 /* ??? Implement proper locking for ioctls.  */
5773 /* do_ioctl() Must return target values and target errnos. */
5774 static abi_long do_ioctl(int fd, int cmd, abi_long arg)
5775 {
5776     const IOCTLEntry *ie;
5777     const argtype *arg_type;
5778     abi_long ret;
5779     uint8_t buf_temp[MAX_STRUCT_SIZE];
5780     int target_size;
5781     void *argptr;
5782 
5783     ie = ioctl_entries;
5784     for(;;) {
5785         if (ie->target_cmd == 0) {
5786             qemu_log_mask(
5787                 LOG_UNIMP, "Unsupported ioctl: cmd=0x%04lx\n", (long)cmd);
5788             return -TARGET_ENOSYS;
5789         }
5790         if (ie->target_cmd == cmd)
5791             break;
5792         ie++;
5793     }
5794     arg_type = ie->arg_type;
5795     if (ie->do_ioctl) {
5796         return ie->do_ioctl(ie, buf_temp, fd, cmd, arg);
5797     } else if (!ie->host_cmd) {
5798         /* Some architectures define BSD ioctls in their headers
5799            that are not implemented in Linux.  */
5800         return -TARGET_ENOSYS;
5801     }
5802 
5803     switch(arg_type[0]) {
5804     case TYPE_NULL:
5805         /* no argument */
5806         ret = get_errno(safe_ioctl(fd, ie->host_cmd));
5807         break;
5808     case TYPE_PTRVOID:
5809     case TYPE_INT:
5810     case TYPE_LONG:
5811     case TYPE_ULONG:
5812         ret = get_errno(safe_ioctl(fd, ie->host_cmd, arg));
5813         break;
5814     case TYPE_PTR:
5815         arg_type++;
5816         target_size = thunk_type_size(arg_type, 0);
5817         switch(ie->access) {
5818         case IOC_R:
5819             ret = get_errno(safe_ioctl(fd, ie->host_cmd, buf_temp));
5820             if (!is_error(ret)) {
5821                 argptr = lock_user(VERIFY_WRITE, arg, target_size, 0);
5822                 if (!argptr)
5823                     return -TARGET_EFAULT;
5824                 thunk_convert(argptr, buf_temp, arg_type, THUNK_TARGET);
5825                 unlock_user(argptr, arg, target_size);
5826             }
5827             break;
5828         case IOC_W:
5829             argptr = lock_user(VERIFY_READ, arg, target_size, 1);
5830             if (!argptr)
5831                 return -TARGET_EFAULT;
5832             thunk_convert(buf_temp, argptr, arg_type, THUNK_HOST);
5833             unlock_user(argptr, arg, 0);
5834             ret = get_errno(safe_ioctl(fd, ie->host_cmd, buf_temp));
5835             break;
5836         default:
5837         case IOC_RW:
5838             argptr = lock_user(VERIFY_READ, arg, target_size, 1);
5839             if (!argptr)
5840                 return -TARGET_EFAULT;
5841             thunk_convert(buf_temp, argptr, arg_type, THUNK_HOST);
5842             unlock_user(argptr, arg, 0);
5843             ret = get_errno(safe_ioctl(fd, ie->host_cmd, buf_temp));
5844             if (!is_error(ret)) {
5845                 argptr = lock_user(VERIFY_WRITE, arg, target_size, 0);
5846                 if (!argptr)
5847                     return -TARGET_EFAULT;
5848                 thunk_convert(argptr, buf_temp, arg_type, THUNK_TARGET);
5849                 unlock_user(argptr, arg, target_size);
5850             }
5851             break;
5852         }
5853         break;
5854     default:
5855         qemu_log_mask(LOG_UNIMP,
5856                       "Unsupported ioctl type: cmd=0x%04lx type=%d\n",
5857                       (long)cmd, arg_type[0]);
5858         ret = -TARGET_ENOSYS;
5859         break;
5860     }
5861     return ret;
5862 }
5863 
5864 static const bitmask_transtbl iflag_tbl[] = {
5865         { TARGET_IGNBRK, TARGET_IGNBRK, IGNBRK, IGNBRK },
5866         { TARGET_BRKINT, TARGET_BRKINT, BRKINT, BRKINT },
5867         { TARGET_IGNPAR, TARGET_IGNPAR, IGNPAR, IGNPAR },
5868         { TARGET_PARMRK, TARGET_PARMRK, PARMRK, PARMRK },
5869         { TARGET_INPCK, TARGET_INPCK, INPCK, INPCK },
5870         { TARGET_ISTRIP, TARGET_ISTRIP, ISTRIP, ISTRIP },
5871         { TARGET_INLCR, TARGET_INLCR, INLCR, INLCR },
5872         { TARGET_IGNCR, TARGET_IGNCR, IGNCR, IGNCR },
5873         { TARGET_ICRNL, TARGET_ICRNL, ICRNL, ICRNL },
5874         { TARGET_IUCLC, TARGET_IUCLC, IUCLC, IUCLC },
5875         { TARGET_IXON, TARGET_IXON, IXON, IXON },
5876         { TARGET_IXANY, TARGET_IXANY, IXANY, IXANY },
5877         { TARGET_IXOFF, TARGET_IXOFF, IXOFF, IXOFF },
5878         { TARGET_IMAXBEL, TARGET_IMAXBEL, IMAXBEL, IMAXBEL },
5879         { TARGET_IUTF8, TARGET_IUTF8, IUTF8, IUTF8},
5880         { 0, 0, 0, 0 }
5881 };
5882 
5883 static const bitmask_transtbl oflag_tbl[] = {
5884 	{ TARGET_OPOST, TARGET_OPOST, OPOST, OPOST },
5885 	{ TARGET_OLCUC, TARGET_OLCUC, OLCUC, OLCUC },
5886 	{ TARGET_ONLCR, TARGET_ONLCR, ONLCR, ONLCR },
5887 	{ TARGET_OCRNL, TARGET_OCRNL, OCRNL, OCRNL },
5888 	{ TARGET_ONOCR, TARGET_ONOCR, ONOCR, ONOCR },
5889 	{ TARGET_ONLRET, TARGET_ONLRET, ONLRET, ONLRET },
5890 	{ TARGET_OFILL, TARGET_OFILL, OFILL, OFILL },
5891 	{ TARGET_OFDEL, TARGET_OFDEL, OFDEL, OFDEL },
5892 	{ TARGET_NLDLY, TARGET_NL0, NLDLY, NL0 },
5893 	{ TARGET_NLDLY, TARGET_NL1, NLDLY, NL1 },
5894 	{ TARGET_CRDLY, TARGET_CR0, CRDLY, CR0 },
5895 	{ TARGET_CRDLY, TARGET_CR1, CRDLY, CR1 },
5896 	{ TARGET_CRDLY, TARGET_CR2, CRDLY, CR2 },
5897 	{ TARGET_CRDLY, TARGET_CR3, CRDLY, CR3 },
5898 	{ TARGET_TABDLY, TARGET_TAB0, TABDLY, TAB0 },
5899 	{ TARGET_TABDLY, TARGET_TAB1, TABDLY, TAB1 },
5900 	{ TARGET_TABDLY, TARGET_TAB2, TABDLY, TAB2 },
5901 	{ TARGET_TABDLY, TARGET_TAB3, TABDLY, TAB3 },
5902 	{ TARGET_BSDLY, TARGET_BS0, BSDLY, BS0 },
5903 	{ TARGET_BSDLY, TARGET_BS1, BSDLY, BS1 },
5904 	{ TARGET_VTDLY, TARGET_VT0, VTDLY, VT0 },
5905 	{ TARGET_VTDLY, TARGET_VT1, VTDLY, VT1 },
5906 	{ TARGET_FFDLY, TARGET_FF0, FFDLY, FF0 },
5907 	{ TARGET_FFDLY, TARGET_FF1, FFDLY, FF1 },
5908 	{ 0, 0, 0, 0 }
5909 };
5910 
5911 static const bitmask_transtbl cflag_tbl[] = {
5912 	{ TARGET_CBAUD, TARGET_B0, CBAUD, B0 },
5913 	{ TARGET_CBAUD, TARGET_B50, CBAUD, B50 },
5914 	{ TARGET_CBAUD, TARGET_B75, CBAUD, B75 },
5915 	{ TARGET_CBAUD, TARGET_B110, CBAUD, B110 },
5916 	{ TARGET_CBAUD, TARGET_B134, CBAUD, B134 },
5917 	{ TARGET_CBAUD, TARGET_B150, CBAUD, B150 },
5918 	{ TARGET_CBAUD, TARGET_B200, CBAUD, B200 },
5919 	{ TARGET_CBAUD, TARGET_B300, CBAUD, B300 },
5920 	{ TARGET_CBAUD, TARGET_B600, CBAUD, B600 },
5921 	{ TARGET_CBAUD, TARGET_B1200, CBAUD, B1200 },
5922 	{ TARGET_CBAUD, TARGET_B1800, CBAUD, B1800 },
5923 	{ TARGET_CBAUD, TARGET_B2400, CBAUD, B2400 },
5924 	{ TARGET_CBAUD, TARGET_B4800, CBAUD, B4800 },
5925 	{ TARGET_CBAUD, TARGET_B9600, CBAUD, B9600 },
5926 	{ TARGET_CBAUD, TARGET_B19200, CBAUD, B19200 },
5927 	{ TARGET_CBAUD, TARGET_B38400, CBAUD, B38400 },
5928 	{ TARGET_CBAUD, TARGET_B57600, CBAUD, B57600 },
5929 	{ TARGET_CBAUD, TARGET_B115200, CBAUD, B115200 },
5930 	{ TARGET_CBAUD, TARGET_B230400, CBAUD, B230400 },
5931 	{ TARGET_CBAUD, TARGET_B460800, CBAUD, B460800 },
5932 	{ TARGET_CSIZE, TARGET_CS5, CSIZE, CS5 },
5933 	{ TARGET_CSIZE, TARGET_CS6, CSIZE, CS6 },
5934 	{ TARGET_CSIZE, TARGET_CS7, CSIZE, CS7 },
5935 	{ TARGET_CSIZE, TARGET_CS8, CSIZE, CS8 },
5936 	{ TARGET_CSTOPB, TARGET_CSTOPB, CSTOPB, CSTOPB },
5937 	{ TARGET_CREAD, TARGET_CREAD, CREAD, CREAD },
5938 	{ TARGET_PARENB, TARGET_PARENB, PARENB, PARENB },
5939 	{ TARGET_PARODD, TARGET_PARODD, PARODD, PARODD },
5940 	{ TARGET_HUPCL, TARGET_HUPCL, HUPCL, HUPCL },
5941 	{ TARGET_CLOCAL, TARGET_CLOCAL, CLOCAL, CLOCAL },
5942 	{ TARGET_CRTSCTS, TARGET_CRTSCTS, CRTSCTS, CRTSCTS },
5943 	{ 0, 0, 0, 0 }
5944 };
5945 
5946 static const bitmask_transtbl lflag_tbl[] = {
5947   { TARGET_ISIG, TARGET_ISIG, ISIG, ISIG },
5948   { TARGET_ICANON, TARGET_ICANON, ICANON, ICANON },
5949   { TARGET_XCASE, TARGET_XCASE, XCASE, XCASE },
5950   { TARGET_ECHO, TARGET_ECHO, ECHO, ECHO },
5951   { TARGET_ECHOE, TARGET_ECHOE, ECHOE, ECHOE },
5952   { TARGET_ECHOK, TARGET_ECHOK, ECHOK, ECHOK },
5953   { TARGET_ECHONL, TARGET_ECHONL, ECHONL, ECHONL },
5954   { TARGET_NOFLSH, TARGET_NOFLSH, NOFLSH, NOFLSH },
5955   { TARGET_TOSTOP, TARGET_TOSTOP, TOSTOP, TOSTOP },
5956   { TARGET_ECHOCTL, TARGET_ECHOCTL, ECHOCTL, ECHOCTL },
5957   { TARGET_ECHOPRT, TARGET_ECHOPRT, ECHOPRT, ECHOPRT },
5958   { TARGET_ECHOKE, TARGET_ECHOKE, ECHOKE, ECHOKE },
5959   { TARGET_FLUSHO, TARGET_FLUSHO, FLUSHO, FLUSHO },
5960   { TARGET_PENDIN, TARGET_PENDIN, PENDIN, PENDIN },
5961   { TARGET_IEXTEN, TARGET_IEXTEN, IEXTEN, IEXTEN },
5962   { TARGET_EXTPROC, TARGET_EXTPROC, EXTPROC, EXTPROC},
5963   { 0, 0, 0, 0 }
5964 };
5965 
5966 static void target_to_host_termios (void *dst, const void *src)
5967 {
5968     struct host_termios *host = dst;
5969     const struct target_termios *target = src;
5970 
5971     host->c_iflag =
5972         target_to_host_bitmask(tswap32(target->c_iflag), iflag_tbl);
5973     host->c_oflag =
5974         target_to_host_bitmask(tswap32(target->c_oflag), oflag_tbl);
5975     host->c_cflag =
5976         target_to_host_bitmask(tswap32(target->c_cflag), cflag_tbl);
5977     host->c_lflag =
5978         target_to_host_bitmask(tswap32(target->c_lflag), lflag_tbl);
5979     host->c_line = target->c_line;
5980 
5981     memset(host->c_cc, 0, sizeof(host->c_cc));
5982     host->c_cc[VINTR] = target->c_cc[TARGET_VINTR];
5983     host->c_cc[VQUIT] = target->c_cc[TARGET_VQUIT];
5984     host->c_cc[VERASE] = target->c_cc[TARGET_VERASE];
5985     host->c_cc[VKILL] = target->c_cc[TARGET_VKILL];
5986     host->c_cc[VEOF] = target->c_cc[TARGET_VEOF];
5987     host->c_cc[VTIME] = target->c_cc[TARGET_VTIME];
5988     host->c_cc[VMIN] = target->c_cc[TARGET_VMIN];
5989     host->c_cc[VSWTC] = target->c_cc[TARGET_VSWTC];
5990     host->c_cc[VSTART] = target->c_cc[TARGET_VSTART];
5991     host->c_cc[VSTOP] = target->c_cc[TARGET_VSTOP];
5992     host->c_cc[VSUSP] = target->c_cc[TARGET_VSUSP];
5993     host->c_cc[VEOL] = target->c_cc[TARGET_VEOL];
5994     host->c_cc[VREPRINT] = target->c_cc[TARGET_VREPRINT];
5995     host->c_cc[VDISCARD] = target->c_cc[TARGET_VDISCARD];
5996     host->c_cc[VWERASE] = target->c_cc[TARGET_VWERASE];
5997     host->c_cc[VLNEXT] = target->c_cc[TARGET_VLNEXT];
5998     host->c_cc[VEOL2] = target->c_cc[TARGET_VEOL2];
5999 }
6000 
6001 static void host_to_target_termios (void *dst, const void *src)
6002 {
6003     struct target_termios *target = dst;
6004     const struct host_termios *host = src;
6005 
6006     target->c_iflag =
6007         tswap32(host_to_target_bitmask(host->c_iflag, iflag_tbl));
6008     target->c_oflag =
6009         tswap32(host_to_target_bitmask(host->c_oflag, oflag_tbl));
6010     target->c_cflag =
6011         tswap32(host_to_target_bitmask(host->c_cflag, cflag_tbl));
6012     target->c_lflag =
6013         tswap32(host_to_target_bitmask(host->c_lflag, lflag_tbl));
6014     target->c_line = host->c_line;
6015 
6016     memset(target->c_cc, 0, sizeof(target->c_cc));
6017     target->c_cc[TARGET_VINTR] = host->c_cc[VINTR];
6018     target->c_cc[TARGET_VQUIT] = host->c_cc[VQUIT];
6019     target->c_cc[TARGET_VERASE] = host->c_cc[VERASE];
6020     target->c_cc[TARGET_VKILL] = host->c_cc[VKILL];
6021     target->c_cc[TARGET_VEOF] = host->c_cc[VEOF];
6022     target->c_cc[TARGET_VTIME] = host->c_cc[VTIME];
6023     target->c_cc[TARGET_VMIN] = host->c_cc[VMIN];
6024     target->c_cc[TARGET_VSWTC] = host->c_cc[VSWTC];
6025     target->c_cc[TARGET_VSTART] = host->c_cc[VSTART];
6026     target->c_cc[TARGET_VSTOP] = host->c_cc[VSTOP];
6027     target->c_cc[TARGET_VSUSP] = host->c_cc[VSUSP];
6028     target->c_cc[TARGET_VEOL] = host->c_cc[VEOL];
6029     target->c_cc[TARGET_VREPRINT] = host->c_cc[VREPRINT];
6030     target->c_cc[TARGET_VDISCARD] = host->c_cc[VDISCARD];
6031     target->c_cc[TARGET_VWERASE] = host->c_cc[VWERASE];
6032     target->c_cc[TARGET_VLNEXT] = host->c_cc[VLNEXT];
6033     target->c_cc[TARGET_VEOL2] = host->c_cc[VEOL2];
6034 }
6035 
6036 static const StructEntry struct_termios_def = {
6037     .convert = { host_to_target_termios, target_to_host_termios },
6038     .size = { sizeof(struct target_termios), sizeof(struct host_termios) },
6039     .align = { __alignof__(struct target_termios), __alignof__(struct host_termios) },
6040     .print = print_termios,
6041 };
6042 
6043 static bitmask_transtbl mmap_flags_tbl[] = {
6044     { TARGET_MAP_SHARED, TARGET_MAP_SHARED, MAP_SHARED, MAP_SHARED },
6045     { TARGET_MAP_PRIVATE, TARGET_MAP_PRIVATE, MAP_PRIVATE, MAP_PRIVATE },
6046     { TARGET_MAP_FIXED, TARGET_MAP_FIXED, MAP_FIXED, MAP_FIXED },
6047     { TARGET_MAP_ANONYMOUS, TARGET_MAP_ANONYMOUS,
6048       MAP_ANONYMOUS, MAP_ANONYMOUS },
6049     { TARGET_MAP_GROWSDOWN, TARGET_MAP_GROWSDOWN,
6050       MAP_GROWSDOWN, MAP_GROWSDOWN },
6051     { TARGET_MAP_DENYWRITE, TARGET_MAP_DENYWRITE,
6052       MAP_DENYWRITE, MAP_DENYWRITE },
6053     { TARGET_MAP_EXECUTABLE, TARGET_MAP_EXECUTABLE,
6054       MAP_EXECUTABLE, MAP_EXECUTABLE },
6055     { TARGET_MAP_LOCKED, TARGET_MAP_LOCKED, MAP_LOCKED, MAP_LOCKED },
6056     { TARGET_MAP_NORESERVE, TARGET_MAP_NORESERVE,
6057       MAP_NORESERVE, MAP_NORESERVE },
6058     { TARGET_MAP_HUGETLB, TARGET_MAP_HUGETLB, MAP_HUGETLB, MAP_HUGETLB },
6059     /* MAP_STACK had been ignored by the kernel for quite some time.
6060        Recognize it for the target insofar as we do not want to pass
6061        it through to the host.  */
6062     { TARGET_MAP_STACK, TARGET_MAP_STACK, 0, 0 },
6063     { 0, 0, 0, 0 }
6064 };
6065 
6066 /*
6067  * NOTE: TARGET_ABI32 is defined for TARGET_I386 (but not for TARGET_X86_64)
6068  *       TARGET_I386 is defined if TARGET_X86_64 is defined
6069  */
6070 #if defined(TARGET_I386)
6071 
6072 /* NOTE: there is really one LDT for all the threads */
6073 static uint8_t *ldt_table;
6074 
6075 static abi_long read_ldt(abi_ulong ptr, unsigned long bytecount)
6076 {
6077     int size;
6078     void *p;
6079 
6080     if (!ldt_table)
6081         return 0;
6082     size = TARGET_LDT_ENTRIES * TARGET_LDT_ENTRY_SIZE;
6083     if (size > bytecount)
6084         size = bytecount;
6085     p = lock_user(VERIFY_WRITE, ptr, size, 0);
6086     if (!p)
6087         return -TARGET_EFAULT;
6088     /* ??? Should this by byteswapped?  */
6089     memcpy(p, ldt_table, size);
6090     unlock_user(p, ptr, size);
6091     return size;
6092 }
6093 
6094 /* XXX: add locking support */
6095 static abi_long write_ldt(CPUX86State *env,
6096                           abi_ulong ptr, unsigned long bytecount, int oldmode)
6097 {
6098     struct target_modify_ldt_ldt_s ldt_info;
6099     struct target_modify_ldt_ldt_s *target_ldt_info;
6100     int seg_32bit, contents, read_exec_only, limit_in_pages;
6101     int seg_not_present, useable, lm;
6102     uint32_t *lp, entry_1, entry_2;
6103 
6104     if (bytecount != sizeof(ldt_info))
6105         return -TARGET_EINVAL;
6106     if (!lock_user_struct(VERIFY_READ, target_ldt_info, ptr, 1))
6107         return -TARGET_EFAULT;
6108     ldt_info.entry_number = tswap32(target_ldt_info->entry_number);
6109     ldt_info.base_addr = tswapal(target_ldt_info->base_addr);
6110     ldt_info.limit = tswap32(target_ldt_info->limit);
6111     ldt_info.flags = tswap32(target_ldt_info->flags);
6112     unlock_user_struct(target_ldt_info, ptr, 0);
6113 
6114     if (ldt_info.entry_number >= TARGET_LDT_ENTRIES)
6115         return -TARGET_EINVAL;
6116     seg_32bit = ldt_info.flags & 1;
6117     contents = (ldt_info.flags >> 1) & 3;
6118     read_exec_only = (ldt_info.flags >> 3) & 1;
6119     limit_in_pages = (ldt_info.flags >> 4) & 1;
6120     seg_not_present = (ldt_info.flags >> 5) & 1;
6121     useable = (ldt_info.flags >> 6) & 1;
6122 #ifdef TARGET_ABI32
6123     lm = 0;
6124 #else
6125     lm = (ldt_info.flags >> 7) & 1;
6126 #endif
6127     if (contents == 3) {
6128         if (oldmode)
6129             return -TARGET_EINVAL;
6130         if (seg_not_present == 0)
6131             return -TARGET_EINVAL;
6132     }
6133     /* allocate the LDT */
6134     if (!ldt_table) {
6135         env->ldt.base = target_mmap(0,
6136                                     TARGET_LDT_ENTRIES * TARGET_LDT_ENTRY_SIZE,
6137                                     PROT_READ|PROT_WRITE,
6138                                     MAP_ANONYMOUS|MAP_PRIVATE, -1, 0);
6139         if (env->ldt.base == -1)
6140             return -TARGET_ENOMEM;
6141         memset(g2h(env->ldt.base), 0,
6142                TARGET_LDT_ENTRIES * TARGET_LDT_ENTRY_SIZE);
6143         env->ldt.limit = 0xffff;
6144         ldt_table = g2h(env->ldt.base);
6145     }
6146 
6147     /* NOTE: same code as Linux kernel */
6148     /* Allow LDTs to be cleared by the user. */
6149     if (ldt_info.base_addr == 0 && ldt_info.limit == 0) {
6150         if (oldmode ||
6151             (contents == 0		&&
6152              read_exec_only == 1	&&
6153              seg_32bit == 0		&&
6154              limit_in_pages == 0	&&
6155              seg_not_present == 1	&&
6156              useable == 0 )) {
6157             entry_1 = 0;
6158             entry_2 = 0;
6159             goto install;
6160         }
6161     }
6162 
6163     entry_1 = ((ldt_info.base_addr & 0x0000ffff) << 16) |
6164         (ldt_info.limit & 0x0ffff);
6165     entry_2 = (ldt_info.base_addr & 0xff000000) |
6166         ((ldt_info.base_addr & 0x00ff0000) >> 16) |
6167         (ldt_info.limit & 0xf0000) |
6168         ((read_exec_only ^ 1) << 9) |
6169         (contents << 10) |
6170         ((seg_not_present ^ 1) << 15) |
6171         (seg_32bit << 22) |
6172         (limit_in_pages << 23) |
6173         (lm << 21) |
6174         0x7000;
6175     if (!oldmode)
6176         entry_2 |= (useable << 20);
6177 
6178     /* Install the new entry ...  */
6179 install:
6180     lp = (uint32_t *)(ldt_table + (ldt_info.entry_number << 3));
6181     lp[0] = tswap32(entry_1);
6182     lp[1] = tswap32(entry_2);
6183     return 0;
6184 }
6185 
6186 /* specific and weird i386 syscalls */
6187 static abi_long do_modify_ldt(CPUX86State *env, int func, abi_ulong ptr,
6188                               unsigned long bytecount)
6189 {
6190     abi_long ret;
6191 
6192     switch (func) {
6193     case 0:
6194         ret = read_ldt(ptr, bytecount);
6195         break;
6196     case 1:
6197         ret = write_ldt(env, ptr, bytecount, 1);
6198         break;
6199     case 0x11:
6200         ret = write_ldt(env, ptr, bytecount, 0);
6201         break;
6202     default:
6203         ret = -TARGET_ENOSYS;
6204         break;
6205     }
6206     return ret;
6207 }
6208 
6209 #if defined(TARGET_ABI32)
6210 abi_long do_set_thread_area(CPUX86State *env, abi_ulong ptr)
6211 {
6212     uint64_t *gdt_table = g2h(env->gdt.base);
6213     struct target_modify_ldt_ldt_s ldt_info;
6214     struct target_modify_ldt_ldt_s *target_ldt_info;
6215     int seg_32bit, contents, read_exec_only, limit_in_pages;
6216     int seg_not_present, useable, lm;
6217     uint32_t *lp, entry_1, entry_2;
6218     int i;
6219 
6220     lock_user_struct(VERIFY_WRITE, target_ldt_info, ptr, 1);
6221     if (!target_ldt_info)
6222         return -TARGET_EFAULT;
6223     ldt_info.entry_number = tswap32(target_ldt_info->entry_number);
6224     ldt_info.base_addr = tswapal(target_ldt_info->base_addr);
6225     ldt_info.limit = tswap32(target_ldt_info->limit);
6226     ldt_info.flags = tswap32(target_ldt_info->flags);
6227     if (ldt_info.entry_number == -1) {
6228         for (i=TARGET_GDT_ENTRY_TLS_MIN; i<=TARGET_GDT_ENTRY_TLS_MAX; i++) {
6229             if (gdt_table[i] == 0) {
6230                 ldt_info.entry_number = i;
6231                 target_ldt_info->entry_number = tswap32(i);
6232                 break;
6233             }
6234         }
6235     }
6236     unlock_user_struct(target_ldt_info, ptr, 1);
6237 
6238     if (ldt_info.entry_number < TARGET_GDT_ENTRY_TLS_MIN ||
6239         ldt_info.entry_number > TARGET_GDT_ENTRY_TLS_MAX)
6240            return -TARGET_EINVAL;
6241     seg_32bit = ldt_info.flags & 1;
6242     contents = (ldt_info.flags >> 1) & 3;
6243     read_exec_only = (ldt_info.flags >> 3) & 1;
6244     limit_in_pages = (ldt_info.flags >> 4) & 1;
6245     seg_not_present = (ldt_info.flags >> 5) & 1;
6246     useable = (ldt_info.flags >> 6) & 1;
6247 #ifdef TARGET_ABI32
6248     lm = 0;
6249 #else
6250     lm = (ldt_info.flags >> 7) & 1;
6251 #endif
6252 
6253     if (contents == 3) {
6254         if (seg_not_present == 0)
6255             return -TARGET_EINVAL;
6256     }
6257 
6258     /* NOTE: same code as Linux kernel */
6259     /* Allow LDTs to be cleared by the user. */
6260     if (ldt_info.base_addr == 0 && ldt_info.limit == 0) {
6261         if ((contents == 0             &&
6262              read_exec_only == 1       &&
6263              seg_32bit == 0            &&
6264              limit_in_pages == 0       &&
6265              seg_not_present == 1      &&
6266              useable == 0 )) {
6267             entry_1 = 0;
6268             entry_2 = 0;
6269             goto install;
6270         }
6271     }
6272 
6273     entry_1 = ((ldt_info.base_addr & 0x0000ffff) << 16) |
6274         (ldt_info.limit & 0x0ffff);
6275     entry_2 = (ldt_info.base_addr & 0xff000000) |
6276         ((ldt_info.base_addr & 0x00ff0000) >> 16) |
6277         (ldt_info.limit & 0xf0000) |
6278         ((read_exec_only ^ 1) << 9) |
6279         (contents << 10) |
6280         ((seg_not_present ^ 1) << 15) |
6281         (seg_32bit << 22) |
6282         (limit_in_pages << 23) |
6283         (useable << 20) |
6284         (lm << 21) |
6285         0x7000;
6286 
6287     /* Install the new entry ...  */
6288 install:
6289     lp = (uint32_t *)(gdt_table + ldt_info.entry_number);
6290     lp[0] = tswap32(entry_1);
6291     lp[1] = tswap32(entry_2);
6292     return 0;
6293 }
6294 
6295 static abi_long do_get_thread_area(CPUX86State *env, abi_ulong ptr)
6296 {
6297     struct target_modify_ldt_ldt_s *target_ldt_info;
6298     uint64_t *gdt_table = g2h(env->gdt.base);
6299     uint32_t base_addr, limit, flags;
6300     int seg_32bit, contents, read_exec_only, limit_in_pages, idx;
6301     int seg_not_present, useable, lm;
6302     uint32_t *lp, entry_1, entry_2;
6303 
6304     lock_user_struct(VERIFY_WRITE, target_ldt_info, ptr, 1);
6305     if (!target_ldt_info)
6306         return -TARGET_EFAULT;
6307     idx = tswap32(target_ldt_info->entry_number);
6308     if (idx < TARGET_GDT_ENTRY_TLS_MIN ||
6309         idx > TARGET_GDT_ENTRY_TLS_MAX) {
6310         unlock_user_struct(target_ldt_info, ptr, 1);
6311         return -TARGET_EINVAL;
6312     }
6313     lp = (uint32_t *)(gdt_table + idx);
6314     entry_1 = tswap32(lp[0]);
6315     entry_2 = tswap32(lp[1]);
6316 
6317     read_exec_only = ((entry_2 >> 9) & 1) ^ 1;
6318     contents = (entry_2 >> 10) & 3;
6319     seg_not_present = ((entry_2 >> 15) & 1) ^ 1;
6320     seg_32bit = (entry_2 >> 22) & 1;
6321     limit_in_pages = (entry_2 >> 23) & 1;
6322     useable = (entry_2 >> 20) & 1;
6323 #ifdef TARGET_ABI32
6324     lm = 0;
6325 #else
6326     lm = (entry_2 >> 21) & 1;
6327 #endif
6328     flags = (seg_32bit << 0) | (contents << 1) |
6329         (read_exec_only << 3) | (limit_in_pages << 4) |
6330         (seg_not_present << 5) | (useable << 6) | (lm << 7);
6331     limit = (entry_1 & 0xffff) | (entry_2  & 0xf0000);
6332     base_addr = (entry_1 >> 16) |
6333         (entry_2 & 0xff000000) |
6334         ((entry_2 & 0xff) << 16);
6335     target_ldt_info->base_addr = tswapal(base_addr);
6336     target_ldt_info->limit = tswap32(limit);
6337     target_ldt_info->flags = tswap32(flags);
6338     unlock_user_struct(target_ldt_info, ptr, 1);
6339     return 0;
6340 }
6341 
6342 abi_long do_arch_prctl(CPUX86State *env, int code, abi_ulong addr)
6343 {
6344     return -TARGET_ENOSYS;
6345 }
6346 #else
6347 abi_long do_arch_prctl(CPUX86State *env, int code, abi_ulong addr)
6348 {
6349     abi_long ret = 0;
6350     abi_ulong val;
6351     int idx;
6352 
6353     switch(code) {
6354     case TARGET_ARCH_SET_GS:
6355     case TARGET_ARCH_SET_FS:
6356         if (code == TARGET_ARCH_SET_GS)
6357             idx = R_GS;
6358         else
6359             idx = R_FS;
6360         cpu_x86_load_seg(env, idx, 0);
6361         env->segs[idx].base = addr;
6362         break;
6363     case TARGET_ARCH_GET_GS:
6364     case TARGET_ARCH_GET_FS:
6365         if (code == TARGET_ARCH_GET_GS)
6366             idx = R_GS;
6367         else
6368             idx = R_FS;
6369         val = env->segs[idx].base;
6370         if (put_user(val, addr, abi_ulong))
6371             ret = -TARGET_EFAULT;
6372         break;
6373     default:
6374         ret = -TARGET_EINVAL;
6375         break;
6376     }
6377     return ret;
6378 }
6379 #endif /* defined(TARGET_ABI32 */
6380 
6381 #endif /* defined(TARGET_I386) */
6382 
6383 #define NEW_STACK_SIZE 0x40000
6384 
6385 
6386 static pthread_mutex_t clone_lock = PTHREAD_MUTEX_INITIALIZER;
6387 typedef struct {
6388     CPUArchState *env;
6389     pthread_mutex_t mutex;
6390     pthread_cond_t cond;
6391     pthread_t thread;
6392     uint32_t tid;
6393     abi_ulong child_tidptr;
6394     abi_ulong parent_tidptr;
6395     sigset_t sigmask;
6396 } new_thread_info;
6397 
6398 static void *clone_func(void *arg)
6399 {
6400     new_thread_info *info = arg;
6401     CPUArchState *env;
6402     CPUState *cpu;
6403     TaskState *ts;
6404 
6405     rcu_register_thread();
6406     tcg_register_thread();
6407     env = info->env;
6408     cpu = env_cpu(env);
6409     thread_cpu = cpu;
6410     ts = (TaskState *)cpu->opaque;
6411     info->tid = sys_gettid();
6412     task_settid(ts);
6413     if (info->child_tidptr)
6414         put_user_u32(info->tid, info->child_tidptr);
6415     if (info->parent_tidptr)
6416         put_user_u32(info->tid, info->parent_tidptr);
6417     qemu_guest_random_seed_thread_part2(cpu->random_seed);
6418     /* Enable signals.  */
6419     sigprocmask(SIG_SETMASK, &info->sigmask, NULL);
6420     /* Signal to the parent that we're ready.  */
6421     pthread_mutex_lock(&info->mutex);
6422     pthread_cond_broadcast(&info->cond);
6423     pthread_mutex_unlock(&info->mutex);
6424     /* Wait until the parent has finished initializing the tls state.  */
6425     pthread_mutex_lock(&clone_lock);
6426     pthread_mutex_unlock(&clone_lock);
6427     cpu_loop(env);
6428     /* never exits */
6429     return NULL;
6430 }
6431 
6432 /* do_fork() Must return host values and target errnos (unlike most
6433    do_*() functions). */
6434 static int do_fork(CPUArchState *env, unsigned int flags, abi_ulong newsp,
6435                    abi_ulong parent_tidptr, target_ulong newtls,
6436                    abi_ulong child_tidptr)
6437 {
6438     CPUState *cpu = env_cpu(env);
6439     int ret;
6440     TaskState *ts;
6441     CPUState *new_cpu;
6442     CPUArchState *new_env;
6443     sigset_t sigmask;
6444 
6445     flags &= ~CLONE_IGNORED_FLAGS;
6446 
6447     /* Emulate vfork() with fork() */
6448     if (flags & CLONE_VFORK)
6449         flags &= ~(CLONE_VFORK | CLONE_VM);
6450 
6451     if (flags & CLONE_VM) {
6452         TaskState *parent_ts = (TaskState *)cpu->opaque;
6453         new_thread_info info;
6454         pthread_attr_t attr;
6455 
6456         if (((flags & CLONE_THREAD_FLAGS) != CLONE_THREAD_FLAGS) ||
6457             (flags & CLONE_INVALID_THREAD_FLAGS)) {
6458             return -TARGET_EINVAL;
6459         }
6460 
6461         ts = g_new0(TaskState, 1);
6462         init_task_state(ts);
6463 
6464         /* Grab a mutex so that thread setup appears atomic.  */
6465         pthread_mutex_lock(&clone_lock);
6466 
6467         /* we create a new CPU instance. */
6468         new_env = cpu_copy(env);
6469         /* Init regs that differ from the parent.  */
6470         cpu_clone_regs_child(new_env, newsp, flags);
6471         cpu_clone_regs_parent(env, flags);
6472         new_cpu = env_cpu(new_env);
6473         new_cpu->opaque = ts;
6474         ts->bprm = parent_ts->bprm;
6475         ts->info = parent_ts->info;
6476         ts->signal_mask = parent_ts->signal_mask;
6477 
6478         if (flags & CLONE_CHILD_CLEARTID) {
6479             ts->child_tidptr = child_tidptr;
6480         }
6481 
6482         if (flags & CLONE_SETTLS) {
6483             cpu_set_tls (new_env, newtls);
6484         }
6485 
6486         memset(&info, 0, sizeof(info));
6487         pthread_mutex_init(&info.mutex, NULL);
6488         pthread_mutex_lock(&info.mutex);
6489         pthread_cond_init(&info.cond, NULL);
6490         info.env = new_env;
6491         if (flags & CLONE_CHILD_SETTID) {
6492             info.child_tidptr = child_tidptr;
6493         }
6494         if (flags & CLONE_PARENT_SETTID) {
6495             info.parent_tidptr = parent_tidptr;
6496         }
6497 
6498         ret = pthread_attr_init(&attr);
6499         ret = pthread_attr_setstacksize(&attr, NEW_STACK_SIZE);
6500         ret = pthread_attr_setdetachstate(&attr, PTHREAD_CREATE_DETACHED);
6501         /* It is not safe to deliver signals until the child has finished
6502            initializing, so temporarily block all signals.  */
6503         sigfillset(&sigmask);
6504         sigprocmask(SIG_BLOCK, &sigmask, &info.sigmask);
6505         cpu->random_seed = qemu_guest_random_seed_thread_part1();
6506 
6507         /* If this is our first additional thread, we need to ensure we
6508          * generate code for parallel execution and flush old translations.
6509          */
6510         if (!parallel_cpus) {
6511             parallel_cpus = true;
6512             tb_flush(cpu);
6513         }
6514 
6515         ret = pthread_create(&info.thread, &attr, clone_func, &info);
6516         /* TODO: Free new CPU state if thread creation failed.  */
6517 
6518         sigprocmask(SIG_SETMASK, &info.sigmask, NULL);
6519         pthread_attr_destroy(&attr);
6520         if (ret == 0) {
6521             /* Wait for the child to initialize.  */
6522             pthread_cond_wait(&info.cond, &info.mutex);
6523             ret = info.tid;
6524         } else {
6525             ret = -1;
6526         }
6527         pthread_mutex_unlock(&info.mutex);
6528         pthread_cond_destroy(&info.cond);
6529         pthread_mutex_destroy(&info.mutex);
6530         pthread_mutex_unlock(&clone_lock);
6531     } else {
6532         /* if no CLONE_VM, we consider it is a fork */
6533         if (flags & CLONE_INVALID_FORK_FLAGS) {
6534             return -TARGET_EINVAL;
6535         }
6536 
6537         /* We can't support custom termination signals */
6538         if ((flags & CSIGNAL) != TARGET_SIGCHLD) {
6539             return -TARGET_EINVAL;
6540         }
6541 
6542         if (block_signals()) {
6543             return -TARGET_ERESTARTSYS;
6544         }
6545 
6546         fork_start();
6547         ret = fork();
6548         if (ret == 0) {
6549             /* Child Process.  */
6550             cpu_clone_regs_child(env, newsp, flags);
6551             fork_end(1);
6552             /* There is a race condition here.  The parent process could
6553                theoretically read the TID in the child process before the child
6554                tid is set.  This would require using either ptrace
6555                (not implemented) or having *_tidptr to point at a shared memory
6556                mapping.  We can't repeat the spinlock hack used above because
6557                the child process gets its own copy of the lock.  */
6558             if (flags & CLONE_CHILD_SETTID)
6559                 put_user_u32(sys_gettid(), child_tidptr);
6560             if (flags & CLONE_PARENT_SETTID)
6561                 put_user_u32(sys_gettid(), parent_tidptr);
6562             ts = (TaskState *)cpu->opaque;
6563             if (flags & CLONE_SETTLS)
6564                 cpu_set_tls (env, newtls);
6565             if (flags & CLONE_CHILD_CLEARTID)
6566                 ts->child_tidptr = child_tidptr;
6567         } else {
6568             cpu_clone_regs_parent(env, flags);
6569             fork_end(0);
6570         }
6571     }
6572     return ret;
6573 }
6574 
6575 /* warning : doesn't handle linux specific flags... */
6576 static int target_to_host_fcntl_cmd(int cmd)
6577 {
6578     int ret;
6579 
6580     switch(cmd) {
6581     case TARGET_F_DUPFD:
6582     case TARGET_F_GETFD:
6583     case TARGET_F_SETFD:
6584     case TARGET_F_GETFL:
6585     case TARGET_F_SETFL:
6586     case TARGET_F_OFD_GETLK:
6587     case TARGET_F_OFD_SETLK:
6588     case TARGET_F_OFD_SETLKW:
6589         ret = cmd;
6590         break;
6591     case TARGET_F_GETLK:
6592         ret = F_GETLK64;
6593         break;
6594     case TARGET_F_SETLK:
6595         ret = F_SETLK64;
6596         break;
6597     case TARGET_F_SETLKW:
6598         ret = F_SETLKW64;
6599         break;
6600     case TARGET_F_GETOWN:
6601         ret = F_GETOWN;
6602         break;
6603     case TARGET_F_SETOWN:
6604         ret = F_SETOWN;
6605         break;
6606     case TARGET_F_GETSIG:
6607         ret = F_GETSIG;
6608         break;
6609     case TARGET_F_SETSIG:
6610         ret = F_SETSIG;
6611         break;
6612 #if TARGET_ABI_BITS == 32
6613     case TARGET_F_GETLK64:
6614         ret = F_GETLK64;
6615         break;
6616     case TARGET_F_SETLK64:
6617         ret = F_SETLK64;
6618         break;
6619     case TARGET_F_SETLKW64:
6620         ret = F_SETLKW64;
6621         break;
6622 #endif
6623     case TARGET_F_SETLEASE:
6624         ret = F_SETLEASE;
6625         break;
6626     case TARGET_F_GETLEASE:
6627         ret = F_GETLEASE;
6628         break;
6629 #ifdef F_DUPFD_CLOEXEC
6630     case TARGET_F_DUPFD_CLOEXEC:
6631         ret = F_DUPFD_CLOEXEC;
6632         break;
6633 #endif
6634     case TARGET_F_NOTIFY:
6635         ret = F_NOTIFY;
6636         break;
6637 #ifdef F_GETOWN_EX
6638     case TARGET_F_GETOWN_EX:
6639         ret = F_GETOWN_EX;
6640         break;
6641 #endif
6642 #ifdef F_SETOWN_EX
6643     case TARGET_F_SETOWN_EX:
6644         ret = F_SETOWN_EX;
6645         break;
6646 #endif
6647 #ifdef F_SETPIPE_SZ
6648     case TARGET_F_SETPIPE_SZ:
6649         ret = F_SETPIPE_SZ;
6650         break;
6651     case TARGET_F_GETPIPE_SZ:
6652         ret = F_GETPIPE_SZ;
6653         break;
6654 #endif
6655 #ifdef F_ADD_SEALS
6656     case TARGET_F_ADD_SEALS:
6657         ret = F_ADD_SEALS;
6658         break;
6659     case TARGET_F_GET_SEALS:
6660         ret = F_GET_SEALS;
6661         break;
6662 #endif
6663     default:
6664         ret = -TARGET_EINVAL;
6665         break;
6666     }
6667 
6668 #if defined(__powerpc64__)
6669     /* On PPC64, glibc headers has the F_*LK* defined to 12, 13 and 14 and
6670      * is not supported by kernel. The glibc fcntl call actually adjusts
6671      * them to 5, 6 and 7 before making the syscall(). Since we make the
6672      * syscall directly, adjust to what is supported by the kernel.
6673      */
6674     if (ret >= F_GETLK64 && ret <= F_SETLKW64) {
6675         ret -= F_GETLK64 - 5;
6676     }
6677 #endif
6678 
6679     return ret;
6680 }
6681 
6682 #define FLOCK_TRANSTBL \
6683     switch (type) { \
6684     TRANSTBL_CONVERT(F_RDLCK); \
6685     TRANSTBL_CONVERT(F_WRLCK); \
6686     TRANSTBL_CONVERT(F_UNLCK); \
6687     }
6688 
6689 static int target_to_host_flock(int type)
6690 {
6691 #define TRANSTBL_CONVERT(a) case TARGET_##a: return a
6692     FLOCK_TRANSTBL
6693 #undef  TRANSTBL_CONVERT
6694     return -TARGET_EINVAL;
6695 }
6696 
6697 static int host_to_target_flock(int type)
6698 {
6699 #define TRANSTBL_CONVERT(a) case a: return TARGET_##a
6700     FLOCK_TRANSTBL
6701 #undef  TRANSTBL_CONVERT
6702     /* if we don't know how to convert the value coming
6703      * from the host we copy to the target field as-is
6704      */
6705     return type;
6706 }
6707 
6708 static inline abi_long copy_from_user_flock(struct flock64 *fl,
6709                                             abi_ulong target_flock_addr)
6710 {
6711     struct target_flock *target_fl;
6712     int l_type;
6713 
6714     if (!lock_user_struct(VERIFY_READ, target_fl, target_flock_addr, 1)) {
6715         return -TARGET_EFAULT;
6716     }
6717 
6718     __get_user(l_type, &target_fl->l_type);
6719     l_type = target_to_host_flock(l_type);
6720     if (l_type < 0) {
6721         return l_type;
6722     }
6723     fl->l_type = l_type;
6724     __get_user(fl->l_whence, &target_fl->l_whence);
6725     __get_user(fl->l_start, &target_fl->l_start);
6726     __get_user(fl->l_len, &target_fl->l_len);
6727     __get_user(fl->l_pid, &target_fl->l_pid);
6728     unlock_user_struct(target_fl, target_flock_addr, 0);
6729     return 0;
6730 }
6731 
6732 static inline abi_long copy_to_user_flock(abi_ulong target_flock_addr,
6733                                           const struct flock64 *fl)
6734 {
6735     struct target_flock *target_fl;
6736     short l_type;
6737 
6738     if (!lock_user_struct(VERIFY_WRITE, target_fl, target_flock_addr, 0)) {
6739         return -TARGET_EFAULT;
6740     }
6741 
6742     l_type = host_to_target_flock(fl->l_type);
6743     __put_user(l_type, &target_fl->l_type);
6744     __put_user(fl->l_whence, &target_fl->l_whence);
6745     __put_user(fl->l_start, &target_fl->l_start);
6746     __put_user(fl->l_len, &target_fl->l_len);
6747     __put_user(fl->l_pid, &target_fl->l_pid);
6748     unlock_user_struct(target_fl, target_flock_addr, 1);
6749     return 0;
6750 }
6751 
6752 typedef abi_long from_flock64_fn(struct flock64 *fl, abi_ulong target_addr);
6753 typedef abi_long to_flock64_fn(abi_ulong target_addr, const struct flock64 *fl);
6754 
6755 #if defined(TARGET_ARM) && TARGET_ABI_BITS == 32
6756 static inline abi_long copy_from_user_oabi_flock64(struct flock64 *fl,
6757                                                    abi_ulong target_flock_addr)
6758 {
6759     struct target_oabi_flock64 *target_fl;
6760     int l_type;
6761 
6762     if (!lock_user_struct(VERIFY_READ, target_fl, target_flock_addr, 1)) {
6763         return -TARGET_EFAULT;
6764     }
6765 
6766     __get_user(l_type, &target_fl->l_type);
6767     l_type = target_to_host_flock(l_type);
6768     if (l_type < 0) {
6769         return l_type;
6770     }
6771     fl->l_type = l_type;
6772     __get_user(fl->l_whence, &target_fl->l_whence);
6773     __get_user(fl->l_start, &target_fl->l_start);
6774     __get_user(fl->l_len, &target_fl->l_len);
6775     __get_user(fl->l_pid, &target_fl->l_pid);
6776     unlock_user_struct(target_fl, target_flock_addr, 0);
6777     return 0;
6778 }
6779 
6780 static inline abi_long copy_to_user_oabi_flock64(abi_ulong target_flock_addr,
6781                                                  const struct flock64 *fl)
6782 {
6783     struct target_oabi_flock64 *target_fl;
6784     short l_type;
6785 
6786     if (!lock_user_struct(VERIFY_WRITE, target_fl, target_flock_addr, 0)) {
6787         return -TARGET_EFAULT;
6788     }
6789 
6790     l_type = host_to_target_flock(fl->l_type);
6791     __put_user(l_type, &target_fl->l_type);
6792     __put_user(fl->l_whence, &target_fl->l_whence);
6793     __put_user(fl->l_start, &target_fl->l_start);
6794     __put_user(fl->l_len, &target_fl->l_len);
6795     __put_user(fl->l_pid, &target_fl->l_pid);
6796     unlock_user_struct(target_fl, target_flock_addr, 1);
6797     return 0;
6798 }
6799 #endif
6800 
6801 static inline abi_long copy_from_user_flock64(struct flock64 *fl,
6802                                               abi_ulong target_flock_addr)
6803 {
6804     struct target_flock64 *target_fl;
6805     int l_type;
6806 
6807     if (!lock_user_struct(VERIFY_READ, target_fl, target_flock_addr, 1)) {
6808         return -TARGET_EFAULT;
6809     }
6810 
6811     __get_user(l_type, &target_fl->l_type);
6812     l_type = target_to_host_flock(l_type);
6813     if (l_type < 0) {
6814         return l_type;
6815     }
6816     fl->l_type = l_type;
6817     __get_user(fl->l_whence, &target_fl->l_whence);
6818     __get_user(fl->l_start, &target_fl->l_start);
6819     __get_user(fl->l_len, &target_fl->l_len);
6820     __get_user(fl->l_pid, &target_fl->l_pid);
6821     unlock_user_struct(target_fl, target_flock_addr, 0);
6822     return 0;
6823 }
6824 
6825 static inline abi_long copy_to_user_flock64(abi_ulong target_flock_addr,
6826                                             const struct flock64 *fl)
6827 {
6828     struct target_flock64 *target_fl;
6829     short l_type;
6830 
6831     if (!lock_user_struct(VERIFY_WRITE, target_fl, target_flock_addr, 0)) {
6832         return -TARGET_EFAULT;
6833     }
6834 
6835     l_type = host_to_target_flock(fl->l_type);
6836     __put_user(l_type, &target_fl->l_type);
6837     __put_user(fl->l_whence, &target_fl->l_whence);
6838     __put_user(fl->l_start, &target_fl->l_start);
6839     __put_user(fl->l_len, &target_fl->l_len);
6840     __put_user(fl->l_pid, &target_fl->l_pid);
6841     unlock_user_struct(target_fl, target_flock_addr, 1);
6842     return 0;
6843 }
6844 
6845 static abi_long do_fcntl(int fd, int cmd, abi_ulong arg)
6846 {
6847     struct flock64 fl64;
6848 #ifdef F_GETOWN_EX
6849     struct f_owner_ex fox;
6850     struct target_f_owner_ex *target_fox;
6851 #endif
6852     abi_long ret;
6853     int host_cmd = target_to_host_fcntl_cmd(cmd);
6854 
6855     if (host_cmd == -TARGET_EINVAL)
6856 	    return host_cmd;
6857 
6858     switch(cmd) {
6859     case TARGET_F_GETLK:
6860         ret = copy_from_user_flock(&fl64, arg);
6861         if (ret) {
6862             return ret;
6863         }
6864         ret = get_errno(safe_fcntl(fd, host_cmd, &fl64));
6865         if (ret == 0) {
6866             ret = copy_to_user_flock(arg, &fl64);
6867         }
6868         break;
6869 
6870     case TARGET_F_SETLK:
6871     case TARGET_F_SETLKW:
6872         ret = copy_from_user_flock(&fl64, arg);
6873         if (ret) {
6874             return ret;
6875         }
6876         ret = get_errno(safe_fcntl(fd, host_cmd, &fl64));
6877         break;
6878 
6879     case TARGET_F_GETLK64:
6880     case TARGET_F_OFD_GETLK:
6881         ret = copy_from_user_flock64(&fl64, arg);
6882         if (ret) {
6883             return ret;
6884         }
6885         ret = get_errno(safe_fcntl(fd, host_cmd, &fl64));
6886         if (ret == 0) {
6887             ret = copy_to_user_flock64(arg, &fl64);
6888         }
6889         break;
6890     case TARGET_F_SETLK64:
6891     case TARGET_F_SETLKW64:
6892     case TARGET_F_OFD_SETLK:
6893     case TARGET_F_OFD_SETLKW:
6894         ret = copy_from_user_flock64(&fl64, arg);
6895         if (ret) {
6896             return ret;
6897         }
6898         ret = get_errno(safe_fcntl(fd, host_cmd, &fl64));
6899         break;
6900 
6901     case TARGET_F_GETFL:
6902         ret = get_errno(safe_fcntl(fd, host_cmd, arg));
6903         if (ret >= 0) {
6904             ret = host_to_target_bitmask(ret, fcntl_flags_tbl);
6905         }
6906         break;
6907 
6908     case TARGET_F_SETFL:
6909         ret = get_errno(safe_fcntl(fd, host_cmd,
6910                                    target_to_host_bitmask(arg,
6911                                                           fcntl_flags_tbl)));
6912         break;
6913 
6914 #ifdef F_GETOWN_EX
6915     case TARGET_F_GETOWN_EX:
6916         ret = get_errno(safe_fcntl(fd, host_cmd, &fox));
6917         if (ret >= 0) {
6918             if (!lock_user_struct(VERIFY_WRITE, target_fox, arg, 0))
6919                 return -TARGET_EFAULT;
6920             target_fox->type = tswap32(fox.type);
6921             target_fox->pid = tswap32(fox.pid);
6922             unlock_user_struct(target_fox, arg, 1);
6923         }
6924         break;
6925 #endif
6926 
6927 #ifdef F_SETOWN_EX
6928     case TARGET_F_SETOWN_EX:
6929         if (!lock_user_struct(VERIFY_READ, target_fox, arg, 1))
6930             return -TARGET_EFAULT;
6931         fox.type = tswap32(target_fox->type);
6932         fox.pid = tswap32(target_fox->pid);
6933         unlock_user_struct(target_fox, arg, 0);
6934         ret = get_errno(safe_fcntl(fd, host_cmd, &fox));
6935         break;
6936 #endif
6937 
6938     case TARGET_F_SETSIG:
6939         ret = get_errno(safe_fcntl(fd, host_cmd, target_to_host_signal(arg)));
6940         break;
6941 
6942     case TARGET_F_GETSIG:
6943         ret = host_to_target_signal(get_errno(safe_fcntl(fd, host_cmd, arg)));
6944         break;
6945 
6946     case TARGET_F_SETOWN:
6947     case TARGET_F_GETOWN:
6948     case TARGET_F_SETLEASE:
6949     case TARGET_F_GETLEASE:
6950     case TARGET_F_SETPIPE_SZ:
6951     case TARGET_F_GETPIPE_SZ:
6952     case TARGET_F_ADD_SEALS:
6953     case TARGET_F_GET_SEALS:
6954         ret = get_errno(safe_fcntl(fd, host_cmd, arg));
6955         break;
6956 
6957     default:
6958         ret = get_errno(safe_fcntl(fd, cmd, arg));
6959         break;
6960     }
6961     return ret;
6962 }
6963 
6964 #ifdef USE_UID16
6965 
6966 static inline int high2lowuid(int uid)
6967 {
6968     if (uid > 65535)
6969         return 65534;
6970     else
6971         return uid;
6972 }
6973 
6974 static inline int high2lowgid(int gid)
6975 {
6976     if (gid > 65535)
6977         return 65534;
6978     else
6979         return gid;
6980 }
6981 
6982 static inline int low2highuid(int uid)
6983 {
6984     if ((int16_t)uid == -1)
6985         return -1;
6986     else
6987         return uid;
6988 }
6989 
6990 static inline int low2highgid(int gid)
6991 {
6992     if ((int16_t)gid == -1)
6993         return -1;
6994     else
6995         return gid;
6996 }
6997 static inline int tswapid(int id)
6998 {
6999     return tswap16(id);
7000 }
7001 
7002 #define put_user_id(x, gaddr) put_user_u16(x, gaddr)
7003 
7004 #else /* !USE_UID16 */
7005 static inline int high2lowuid(int uid)
7006 {
7007     return uid;
7008 }
7009 static inline int high2lowgid(int gid)
7010 {
7011     return gid;
7012 }
7013 static inline int low2highuid(int uid)
7014 {
7015     return uid;
7016 }
7017 static inline int low2highgid(int gid)
7018 {
7019     return gid;
7020 }
7021 static inline int tswapid(int id)
7022 {
7023     return tswap32(id);
7024 }
7025 
7026 #define put_user_id(x, gaddr) put_user_u32(x, gaddr)
7027 
7028 #endif /* USE_UID16 */
7029 
7030 /* We must do direct syscalls for setting UID/GID, because we want to
7031  * implement the Linux system call semantics of "change only for this thread",
7032  * not the libc/POSIX semantics of "change for all threads in process".
7033  * (See http://ewontfix.com/17/ for more details.)
7034  * We use the 32-bit version of the syscalls if present; if it is not
7035  * then either the host architecture supports 32-bit UIDs natively with
7036  * the standard syscall, or the 16-bit UID is the best we can do.
7037  */
7038 #ifdef __NR_setuid32
7039 #define __NR_sys_setuid __NR_setuid32
7040 #else
7041 #define __NR_sys_setuid __NR_setuid
7042 #endif
7043 #ifdef __NR_setgid32
7044 #define __NR_sys_setgid __NR_setgid32
7045 #else
7046 #define __NR_sys_setgid __NR_setgid
7047 #endif
7048 #ifdef __NR_setresuid32
7049 #define __NR_sys_setresuid __NR_setresuid32
7050 #else
7051 #define __NR_sys_setresuid __NR_setresuid
7052 #endif
7053 #ifdef __NR_setresgid32
7054 #define __NR_sys_setresgid __NR_setresgid32
7055 #else
7056 #define __NR_sys_setresgid __NR_setresgid
7057 #endif
7058 
7059 _syscall1(int, sys_setuid, uid_t, uid)
7060 _syscall1(int, sys_setgid, gid_t, gid)
7061 _syscall3(int, sys_setresuid, uid_t, ruid, uid_t, euid, uid_t, suid)
7062 _syscall3(int, sys_setresgid, gid_t, rgid, gid_t, egid, gid_t, sgid)
7063 
7064 void syscall_init(void)
7065 {
7066     IOCTLEntry *ie;
7067     const argtype *arg_type;
7068     int size;
7069     int i;
7070 
7071     thunk_init(STRUCT_MAX);
7072 
7073 #define STRUCT(name, ...) thunk_register_struct(STRUCT_ ## name, #name, struct_ ## name ## _def);
7074 #define STRUCT_SPECIAL(name) thunk_register_struct_direct(STRUCT_ ## name, #name, &struct_ ## name ## _def);
7075 #include "syscall_types.h"
7076 #undef STRUCT
7077 #undef STRUCT_SPECIAL
7078 
7079     /* Build target_to_host_errno_table[] table from
7080      * host_to_target_errno_table[]. */
7081     for (i = 0; i < ERRNO_TABLE_SIZE; i++) {
7082         target_to_host_errno_table[host_to_target_errno_table[i]] = i;
7083     }
7084 
7085     /* we patch the ioctl size if necessary. We rely on the fact that
7086        no ioctl has all the bits at '1' in the size field */
7087     ie = ioctl_entries;
7088     while (ie->target_cmd != 0) {
7089         if (((ie->target_cmd >> TARGET_IOC_SIZESHIFT) & TARGET_IOC_SIZEMASK) ==
7090             TARGET_IOC_SIZEMASK) {
7091             arg_type = ie->arg_type;
7092             if (arg_type[0] != TYPE_PTR) {
7093                 fprintf(stderr, "cannot patch size for ioctl 0x%x\n",
7094                         ie->target_cmd);
7095                 exit(1);
7096             }
7097             arg_type++;
7098             size = thunk_type_size(arg_type, 0);
7099             ie->target_cmd = (ie->target_cmd &
7100                               ~(TARGET_IOC_SIZEMASK << TARGET_IOC_SIZESHIFT)) |
7101                 (size << TARGET_IOC_SIZESHIFT);
7102         }
7103 
7104         /* automatic consistency check if same arch */
7105 #if (defined(__i386__) && defined(TARGET_I386) && defined(TARGET_ABI32)) || \
7106     (defined(__x86_64__) && defined(TARGET_X86_64))
7107         if (unlikely(ie->target_cmd != ie->host_cmd)) {
7108             fprintf(stderr, "ERROR: ioctl(%s): target=0x%x host=0x%x\n",
7109                     ie->name, ie->target_cmd, ie->host_cmd);
7110         }
7111 #endif
7112         ie++;
7113     }
7114 }
7115 
7116 #ifdef TARGET_NR_truncate64
7117 static inline abi_long target_truncate64(void *cpu_env, const char *arg1,
7118                                          abi_long arg2,
7119                                          abi_long arg3,
7120                                          abi_long arg4)
7121 {
7122     if (regpairs_aligned(cpu_env, TARGET_NR_truncate64)) {
7123         arg2 = arg3;
7124         arg3 = arg4;
7125     }
7126     return get_errno(truncate64(arg1, target_offset64(arg2, arg3)));
7127 }
7128 #endif
7129 
7130 #ifdef TARGET_NR_ftruncate64
7131 static inline abi_long target_ftruncate64(void *cpu_env, abi_long arg1,
7132                                           abi_long arg2,
7133                                           abi_long arg3,
7134                                           abi_long arg4)
7135 {
7136     if (regpairs_aligned(cpu_env, TARGET_NR_ftruncate64)) {
7137         arg2 = arg3;
7138         arg3 = arg4;
7139     }
7140     return get_errno(ftruncate64(arg1, target_offset64(arg2, arg3)));
7141 }
7142 #endif
7143 
7144 #if defined(TARGET_NR_timer_settime) || \
7145     (defined(TARGET_NR_timerfd_settime) && defined(CONFIG_TIMERFD))
7146 static inline abi_long target_to_host_itimerspec(struct itimerspec *host_its,
7147                                                  abi_ulong target_addr)
7148 {
7149     if (target_to_host_timespec(&host_its->it_interval, target_addr +
7150                                 offsetof(struct target_itimerspec,
7151                                          it_interval)) ||
7152         target_to_host_timespec(&host_its->it_value, target_addr +
7153                                 offsetof(struct target_itimerspec,
7154                                          it_value))) {
7155         return -TARGET_EFAULT;
7156     }
7157 
7158     return 0;
7159 }
7160 #endif
7161 
7162 #if defined(TARGET_NR_timer_settime64) || \
7163     (defined(TARGET_NR_timerfd_settime64) && defined(CONFIG_TIMERFD))
7164 static inline abi_long target_to_host_itimerspec64(struct itimerspec *host_its,
7165                                                    abi_ulong target_addr)
7166 {
7167     if (target_to_host_timespec64(&host_its->it_interval, target_addr +
7168                                   offsetof(struct target__kernel_itimerspec,
7169                                            it_interval)) ||
7170         target_to_host_timespec64(&host_its->it_value, target_addr +
7171                                   offsetof(struct target__kernel_itimerspec,
7172                                            it_value))) {
7173         return -TARGET_EFAULT;
7174     }
7175 
7176     return 0;
7177 }
7178 #endif
7179 
7180 #if ((defined(TARGET_NR_timerfd_gettime) || \
7181       defined(TARGET_NR_timerfd_settime)) && defined(CONFIG_TIMERFD)) || \
7182       defined(TARGET_NR_timer_gettime) || defined(TARGET_NR_timer_settime)
7183 static inline abi_long host_to_target_itimerspec(abi_ulong target_addr,
7184                                                  struct itimerspec *host_its)
7185 {
7186     if (host_to_target_timespec(target_addr + offsetof(struct target_itimerspec,
7187                                                        it_interval),
7188                                 &host_its->it_interval) ||
7189         host_to_target_timespec(target_addr + offsetof(struct target_itimerspec,
7190                                                        it_value),
7191                                 &host_its->it_value)) {
7192         return -TARGET_EFAULT;
7193     }
7194     return 0;
7195 }
7196 #endif
7197 
7198 #if ((defined(TARGET_NR_timerfd_gettime64) || \
7199       defined(TARGET_NR_timerfd_settime64)) && defined(CONFIG_TIMERFD)) || \
7200       defined(TARGET_NR_timer_gettime64) || defined(TARGET_NR_timer_settime64)
7201 static inline abi_long host_to_target_itimerspec64(abi_ulong target_addr,
7202                                                    struct itimerspec *host_its)
7203 {
7204     if (host_to_target_timespec64(target_addr +
7205                                   offsetof(struct target__kernel_itimerspec,
7206                                            it_interval),
7207                                   &host_its->it_interval) ||
7208         host_to_target_timespec64(target_addr +
7209                                   offsetof(struct target__kernel_itimerspec,
7210                                            it_value),
7211                                   &host_its->it_value)) {
7212         return -TARGET_EFAULT;
7213     }
7214     return 0;
7215 }
7216 #endif
7217 
7218 #if defined(TARGET_NR_adjtimex) || \
7219     (defined(TARGET_NR_clock_adjtime) && defined(CONFIG_CLOCK_ADJTIME))
7220 static inline abi_long target_to_host_timex(struct timex *host_tx,
7221                                             abi_long target_addr)
7222 {
7223     struct target_timex *target_tx;
7224 
7225     if (!lock_user_struct(VERIFY_READ, target_tx, target_addr, 1)) {
7226         return -TARGET_EFAULT;
7227     }
7228 
7229     __get_user(host_tx->modes, &target_tx->modes);
7230     __get_user(host_tx->offset, &target_tx->offset);
7231     __get_user(host_tx->freq, &target_tx->freq);
7232     __get_user(host_tx->maxerror, &target_tx->maxerror);
7233     __get_user(host_tx->esterror, &target_tx->esterror);
7234     __get_user(host_tx->status, &target_tx->status);
7235     __get_user(host_tx->constant, &target_tx->constant);
7236     __get_user(host_tx->precision, &target_tx->precision);
7237     __get_user(host_tx->tolerance, &target_tx->tolerance);
7238     __get_user(host_tx->time.tv_sec, &target_tx->time.tv_sec);
7239     __get_user(host_tx->time.tv_usec, &target_tx->time.tv_usec);
7240     __get_user(host_tx->tick, &target_tx->tick);
7241     __get_user(host_tx->ppsfreq, &target_tx->ppsfreq);
7242     __get_user(host_tx->jitter, &target_tx->jitter);
7243     __get_user(host_tx->shift, &target_tx->shift);
7244     __get_user(host_tx->stabil, &target_tx->stabil);
7245     __get_user(host_tx->jitcnt, &target_tx->jitcnt);
7246     __get_user(host_tx->calcnt, &target_tx->calcnt);
7247     __get_user(host_tx->errcnt, &target_tx->errcnt);
7248     __get_user(host_tx->stbcnt, &target_tx->stbcnt);
7249     __get_user(host_tx->tai, &target_tx->tai);
7250 
7251     unlock_user_struct(target_tx, target_addr, 0);
7252     return 0;
7253 }
7254 
7255 static inline abi_long host_to_target_timex(abi_long target_addr,
7256                                             struct timex *host_tx)
7257 {
7258     struct target_timex *target_tx;
7259 
7260     if (!lock_user_struct(VERIFY_WRITE, target_tx, target_addr, 0)) {
7261         return -TARGET_EFAULT;
7262     }
7263 
7264     __put_user(host_tx->modes, &target_tx->modes);
7265     __put_user(host_tx->offset, &target_tx->offset);
7266     __put_user(host_tx->freq, &target_tx->freq);
7267     __put_user(host_tx->maxerror, &target_tx->maxerror);
7268     __put_user(host_tx->esterror, &target_tx->esterror);
7269     __put_user(host_tx->status, &target_tx->status);
7270     __put_user(host_tx->constant, &target_tx->constant);
7271     __put_user(host_tx->precision, &target_tx->precision);
7272     __put_user(host_tx->tolerance, &target_tx->tolerance);
7273     __put_user(host_tx->time.tv_sec, &target_tx->time.tv_sec);
7274     __put_user(host_tx->time.tv_usec, &target_tx->time.tv_usec);
7275     __put_user(host_tx->tick, &target_tx->tick);
7276     __put_user(host_tx->ppsfreq, &target_tx->ppsfreq);
7277     __put_user(host_tx->jitter, &target_tx->jitter);
7278     __put_user(host_tx->shift, &target_tx->shift);
7279     __put_user(host_tx->stabil, &target_tx->stabil);
7280     __put_user(host_tx->jitcnt, &target_tx->jitcnt);
7281     __put_user(host_tx->calcnt, &target_tx->calcnt);
7282     __put_user(host_tx->errcnt, &target_tx->errcnt);
7283     __put_user(host_tx->stbcnt, &target_tx->stbcnt);
7284     __put_user(host_tx->tai, &target_tx->tai);
7285 
7286     unlock_user_struct(target_tx, target_addr, 1);
7287     return 0;
7288 }
7289 #endif
7290 
7291 
7292 #if defined(TARGET_NR_clock_adjtime64) && defined(CONFIG_CLOCK_ADJTIME)
7293 static inline abi_long target_to_host_timex64(struct timex *host_tx,
7294                                               abi_long target_addr)
7295 {
7296     struct target__kernel_timex *target_tx;
7297 
7298     if (copy_from_user_timeval64(&host_tx->time, target_addr +
7299                                  offsetof(struct target__kernel_timex,
7300                                           time))) {
7301         return -TARGET_EFAULT;
7302     }
7303 
7304     if (!lock_user_struct(VERIFY_READ, target_tx, target_addr, 1)) {
7305         return -TARGET_EFAULT;
7306     }
7307 
7308     __get_user(host_tx->modes, &target_tx->modes);
7309     __get_user(host_tx->offset, &target_tx->offset);
7310     __get_user(host_tx->freq, &target_tx->freq);
7311     __get_user(host_tx->maxerror, &target_tx->maxerror);
7312     __get_user(host_tx->esterror, &target_tx->esterror);
7313     __get_user(host_tx->status, &target_tx->status);
7314     __get_user(host_tx->constant, &target_tx->constant);
7315     __get_user(host_tx->precision, &target_tx->precision);
7316     __get_user(host_tx->tolerance, &target_tx->tolerance);
7317     __get_user(host_tx->tick, &target_tx->tick);
7318     __get_user(host_tx->ppsfreq, &target_tx->ppsfreq);
7319     __get_user(host_tx->jitter, &target_tx->jitter);
7320     __get_user(host_tx->shift, &target_tx->shift);
7321     __get_user(host_tx->stabil, &target_tx->stabil);
7322     __get_user(host_tx->jitcnt, &target_tx->jitcnt);
7323     __get_user(host_tx->calcnt, &target_tx->calcnt);
7324     __get_user(host_tx->errcnt, &target_tx->errcnt);
7325     __get_user(host_tx->stbcnt, &target_tx->stbcnt);
7326     __get_user(host_tx->tai, &target_tx->tai);
7327 
7328     unlock_user_struct(target_tx, target_addr, 0);
7329     return 0;
7330 }
7331 
7332 static inline abi_long host_to_target_timex64(abi_long target_addr,
7333                                               struct timex *host_tx)
7334 {
7335     struct target__kernel_timex *target_tx;
7336 
7337    if (copy_to_user_timeval64(target_addr +
7338                               offsetof(struct target__kernel_timex, time),
7339                               &host_tx->time)) {
7340         return -TARGET_EFAULT;
7341     }
7342 
7343     if (!lock_user_struct(VERIFY_WRITE, target_tx, target_addr, 0)) {
7344         return -TARGET_EFAULT;
7345     }
7346 
7347     __put_user(host_tx->modes, &target_tx->modes);
7348     __put_user(host_tx->offset, &target_tx->offset);
7349     __put_user(host_tx->freq, &target_tx->freq);
7350     __put_user(host_tx->maxerror, &target_tx->maxerror);
7351     __put_user(host_tx->esterror, &target_tx->esterror);
7352     __put_user(host_tx->status, &target_tx->status);
7353     __put_user(host_tx->constant, &target_tx->constant);
7354     __put_user(host_tx->precision, &target_tx->precision);
7355     __put_user(host_tx->tolerance, &target_tx->tolerance);
7356     __put_user(host_tx->tick, &target_tx->tick);
7357     __put_user(host_tx->ppsfreq, &target_tx->ppsfreq);
7358     __put_user(host_tx->jitter, &target_tx->jitter);
7359     __put_user(host_tx->shift, &target_tx->shift);
7360     __put_user(host_tx->stabil, &target_tx->stabil);
7361     __put_user(host_tx->jitcnt, &target_tx->jitcnt);
7362     __put_user(host_tx->calcnt, &target_tx->calcnt);
7363     __put_user(host_tx->errcnt, &target_tx->errcnt);
7364     __put_user(host_tx->stbcnt, &target_tx->stbcnt);
7365     __put_user(host_tx->tai, &target_tx->tai);
7366 
7367     unlock_user_struct(target_tx, target_addr, 1);
7368     return 0;
7369 }
7370 #endif
7371 
7372 static inline abi_long target_to_host_sigevent(struct sigevent *host_sevp,
7373                                                abi_ulong target_addr)
7374 {
7375     struct target_sigevent *target_sevp;
7376 
7377     if (!lock_user_struct(VERIFY_READ, target_sevp, target_addr, 1)) {
7378         return -TARGET_EFAULT;
7379     }
7380 
7381     /* This union is awkward on 64 bit systems because it has a 32 bit
7382      * integer and a pointer in it; we follow the conversion approach
7383      * used for handling sigval types in signal.c so the guest should get
7384      * the correct value back even if we did a 64 bit byteswap and it's
7385      * using the 32 bit integer.
7386      */
7387     host_sevp->sigev_value.sival_ptr =
7388         (void *)(uintptr_t)tswapal(target_sevp->sigev_value.sival_ptr);
7389     host_sevp->sigev_signo =
7390         target_to_host_signal(tswap32(target_sevp->sigev_signo));
7391     host_sevp->sigev_notify = tswap32(target_sevp->sigev_notify);
7392     host_sevp->_sigev_un._tid = tswap32(target_sevp->_sigev_un._tid);
7393 
7394     unlock_user_struct(target_sevp, target_addr, 1);
7395     return 0;
7396 }
7397 
7398 #if defined(TARGET_NR_mlockall)
7399 static inline int target_to_host_mlockall_arg(int arg)
7400 {
7401     int result = 0;
7402 
7403     if (arg & TARGET_MCL_CURRENT) {
7404         result |= MCL_CURRENT;
7405     }
7406     if (arg & TARGET_MCL_FUTURE) {
7407         result |= MCL_FUTURE;
7408     }
7409 #ifdef MCL_ONFAULT
7410     if (arg & TARGET_MCL_ONFAULT) {
7411         result |= MCL_ONFAULT;
7412     }
7413 #endif
7414 
7415     return result;
7416 }
7417 #endif
7418 
7419 #if (defined(TARGET_NR_stat64) || defined(TARGET_NR_lstat64) ||     \
7420      defined(TARGET_NR_fstat64) || defined(TARGET_NR_fstatat64) ||  \
7421      defined(TARGET_NR_newfstatat))
7422 static inline abi_long host_to_target_stat64(void *cpu_env,
7423                                              abi_ulong target_addr,
7424                                              struct stat *host_st)
7425 {
7426 #if defined(TARGET_ARM) && defined(TARGET_ABI32)
7427     if (((CPUARMState *)cpu_env)->eabi) {
7428         struct target_eabi_stat64 *target_st;
7429 
7430         if (!lock_user_struct(VERIFY_WRITE, target_st, target_addr, 0))
7431             return -TARGET_EFAULT;
7432         memset(target_st, 0, sizeof(struct target_eabi_stat64));
7433         __put_user(host_st->st_dev, &target_st->st_dev);
7434         __put_user(host_st->st_ino, &target_st->st_ino);
7435 #ifdef TARGET_STAT64_HAS_BROKEN_ST_INO
7436         __put_user(host_st->st_ino, &target_st->__st_ino);
7437 #endif
7438         __put_user(host_st->st_mode, &target_st->st_mode);
7439         __put_user(host_st->st_nlink, &target_st->st_nlink);
7440         __put_user(host_st->st_uid, &target_st->st_uid);
7441         __put_user(host_st->st_gid, &target_st->st_gid);
7442         __put_user(host_st->st_rdev, &target_st->st_rdev);
7443         __put_user(host_st->st_size, &target_st->st_size);
7444         __put_user(host_st->st_blksize, &target_st->st_blksize);
7445         __put_user(host_st->st_blocks, &target_st->st_blocks);
7446         __put_user(host_st->st_atime, &target_st->target_st_atime);
7447         __put_user(host_st->st_mtime, &target_st->target_st_mtime);
7448         __put_user(host_st->st_ctime, &target_st->target_st_ctime);
7449 #if _POSIX_C_SOURCE >= 200809L || _XOPEN_SOURCE >= 700
7450         __put_user(host_st->st_atim.tv_nsec, &target_st->target_st_atime_nsec);
7451         __put_user(host_st->st_mtim.tv_nsec, &target_st->target_st_mtime_nsec);
7452         __put_user(host_st->st_ctim.tv_nsec, &target_st->target_st_ctime_nsec);
7453 #endif
7454         unlock_user_struct(target_st, target_addr, 1);
7455     } else
7456 #endif
7457     {
7458 #if defined(TARGET_HAS_STRUCT_STAT64)
7459         struct target_stat64 *target_st;
7460 #else
7461         struct target_stat *target_st;
7462 #endif
7463 
7464         if (!lock_user_struct(VERIFY_WRITE, target_st, target_addr, 0))
7465             return -TARGET_EFAULT;
7466         memset(target_st, 0, sizeof(*target_st));
7467         __put_user(host_st->st_dev, &target_st->st_dev);
7468         __put_user(host_st->st_ino, &target_st->st_ino);
7469 #ifdef TARGET_STAT64_HAS_BROKEN_ST_INO
7470         __put_user(host_st->st_ino, &target_st->__st_ino);
7471 #endif
7472         __put_user(host_st->st_mode, &target_st->st_mode);
7473         __put_user(host_st->st_nlink, &target_st->st_nlink);
7474         __put_user(host_st->st_uid, &target_st->st_uid);
7475         __put_user(host_st->st_gid, &target_st->st_gid);
7476         __put_user(host_st->st_rdev, &target_st->st_rdev);
7477         /* XXX: better use of kernel struct */
7478         __put_user(host_st->st_size, &target_st->st_size);
7479         __put_user(host_st->st_blksize, &target_st->st_blksize);
7480         __put_user(host_st->st_blocks, &target_st->st_blocks);
7481         __put_user(host_st->st_atime, &target_st->target_st_atime);
7482         __put_user(host_st->st_mtime, &target_st->target_st_mtime);
7483         __put_user(host_st->st_ctime, &target_st->target_st_ctime);
7484 #if _POSIX_C_SOURCE >= 200809L || _XOPEN_SOURCE >= 700
7485         __put_user(host_st->st_atim.tv_nsec, &target_st->target_st_atime_nsec);
7486         __put_user(host_st->st_mtim.tv_nsec, &target_st->target_st_mtime_nsec);
7487         __put_user(host_st->st_ctim.tv_nsec, &target_st->target_st_ctime_nsec);
7488 #endif
7489         unlock_user_struct(target_st, target_addr, 1);
7490     }
7491 
7492     return 0;
7493 }
7494 #endif
7495 
7496 #if defined(TARGET_NR_statx) && defined(__NR_statx)
7497 static inline abi_long host_to_target_statx(struct target_statx *host_stx,
7498                                             abi_ulong target_addr)
7499 {
7500     struct target_statx *target_stx;
7501 
7502     if (!lock_user_struct(VERIFY_WRITE, target_stx, target_addr,  0)) {
7503         return -TARGET_EFAULT;
7504     }
7505     memset(target_stx, 0, sizeof(*target_stx));
7506 
7507     __put_user(host_stx->stx_mask, &target_stx->stx_mask);
7508     __put_user(host_stx->stx_blksize, &target_stx->stx_blksize);
7509     __put_user(host_stx->stx_attributes, &target_stx->stx_attributes);
7510     __put_user(host_stx->stx_nlink, &target_stx->stx_nlink);
7511     __put_user(host_stx->stx_uid, &target_stx->stx_uid);
7512     __put_user(host_stx->stx_gid, &target_stx->stx_gid);
7513     __put_user(host_stx->stx_mode, &target_stx->stx_mode);
7514     __put_user(host_stx->stx_ino, &target_stx->stx_ino);
7515     __put_user(host_stx->stx_size, &target_stx->stx_size);
7516     __put_user(host_stx->stx_blocks, &target_stx->stx_blocks);
7517     __put_user(host_stx->stx_attributes_mask, &target_stx->stx_attributes_mask);
7518     __put_user(host_stx->stx_atime.tv_sec, &target_stx->stx_atime.tv_sec);
7519     __put_user(host_stx->stx_atime.tv_nsec, &target_stx->stx_atime.tv_nsec);
7520     __put_user(host_stx->stx_btime.tv_sec, &target_stx->stx_btime.tv_sec);
7521     __put_user(host_stx->stx_btime.tv_nsec, &target_stx->stx_btime.tv_nsec);
7522     __put_user(host_stx->stx_ctime.tv_sec, &target_stx->stx_ctime.tv_sec);
7523     __put_user(host_stx->stx_ctime.tv_nsec, &target_stx->stx_ctime.tv_nsec);
7524     __put_user(host_stx->stx_mtime.tv_sec, &target_stx->stx_mtime.tv_sec);
7525     __put_user(host_stx->stx_mtime.tv_nsec, &target_stx->stx_mtime.tv_nsec);
7526     __put_user(host_stx->stx_rdev_major, &target_stx->stx_rdev_major);
7527     __put_user(host_stx->stx_rdev_minor, &target_stx->stx_rdev_minor);
7528     __put_user(host_stx->stx_dev_major, &target_stx->stx_dev_major);
7529     __put_user(host_stx->stx_dev_minor, &target_stx->stx_dev_minor);
7530 
7531     unlock_user_struct(target_stx, target_addr, 1);
7532 
7533     return 0;
7534 }
7535 #endif
7536 
7537 static int do_sys_futex(int *uaddr, int op, int val,
7538                          const struct timespec *timeout, int *uaddr2,
7539                          int val3)
7540 {
7541 #if HOST_LONG_BITS == 64
7542 #if defined(__NR_futex)
7543     /* always a 64-bit time_t, it doesn't define _time64 version  */
7544     return sys_futex(uaddr, op, val, timeout, uaddr2, val3);
7545 
7546 #endif
7547 #else /* HOST_LONG_BITS == 64 */
7548 #if defined(__NR_futex_time64)
7549     if (sizeof(timeout->tv_sec) == 8) {
7550         /* _time64 function on 32bit arch */
7551         return sys_futex_time64(uaddr, op, val, timeout, uaddr2, val3);
7552     }
7553 #endif
7554 #if defined(__NR_futex)
7555     /* old function on 32bit arch */
7556     return sys_futex(uaddr, op, val, timeout, uaddr2, val3);
7557 #endif
7558 #endif /* HOST_LONG_BITS == 64 */
7559     g_assert_not_reached();
7560 }
7561 
7562 static int do_safe_futex(int *uaddr, int op, int val,
7563                          const struct timespec *timeout, int *uaddr2,
7564                          int val3)
7565 {
7566 #if HOST_LONG_BITS == 64
7567 #if defined(__NR_futex)
7568     /* always a 64-bit time_t, it doesn't define _time64 version  */
7569     return get_errno(safe_futex(uaddr, op, val, timeout, uaddr2, val3));
7570 #endif
7571 #else /* HOST_LONG_BITS == 64 */
7572 #if defined(__NR_futex_time64)
7573     if (sizeof(timeout->tv_sec) == 8) {
7574         /* _time64 function on 32bit arch */
7575         return get_errno(safe_futex_time64(uaddr, op, val, timeout, uaddr2,
7576                                            val3));
7577     }
7578 #endif
7579 #if defined(__NR_futex)
7580     /* old function on 32bit arch */
7581     return get_errno(safe_futex(uaddr, op, val, timeout, uaddr2, val3));
7582 #endif
7583 #endif /* HOST_LONG_BITS == 64 */
7584     return -TARGET_ENOSYS;
7585 }
7586 
7587 /* ??? Using host futex calls even when target atomic operations
7588    are not really atomic probably breaks things.  However implementing
7589    futexes locally would make futexes shared between multiple processes
7590    tricky.  However they're probably useless because guest atomic
7591    operations won't work either.  */
7592 #if defined(TARGET_NR_futex)
7593 static int do_futex(target_ulong uaddr, int op, int val, target_ulong timeout,
7594                     target_ulong uaddr2, int val3)
7595 {
7596     struct timespec ts, *pts;
7597     int base_op;
7598 
7599     /* ??? We assume FUTEX_* constants are the same on both host
7600        and target.  */
7601 #ifdef FUTEX_CMD_MASK
7602     base_op = op & FUTEX_CMD_MASK;
7603 #else
7604     base_op = op;
7605 #endif
7606     switch (base_op) {
7607     case FUTEX_WAIT:
7608     case FUTEX_WAIT_BITSET:
7609         if (timeout) {
7610             pts = &ts;
7611             target_to_host_timespec(pts, timeout);
7612         } else {
7613             pts = NULL;
7614         }
7615         return do_safe_futex(g2h(uaddr), op, tswap32(val), pts, NULL, val3);
7616     case FUTEX_WAKE:
7617         return do_safe_futex(g2h(uaddr), op, val, NULL, NULL, 0);
7618     case FUTEX_FD:
7619         return do_safe_futex(g2h(uaddr), op, val, NULL, NULL, 0);
7620     case FUTEX_REQUEUE:
7621     case FUTEX_CMP_REQUEUE:
7622     case FUTEX_WAKE_OP:
7623         /* For FUTEX_REQUEUE, FUTEX_CMP_REQUEUE, and FUTEX_WAKE_OP, the
7624            TIMEOUT parameter is interpreted as a uint32_t by the kernel.
7625            But the prototype takes a `struct timespec *'; insert casts
7626            to satisfy the compiler.  We do not need to tswap TIMEOUT
7627            since it's not compared to guest memory.  */
7628         pts = (struct timespec *)(uintptr_t) timeout;
7629         return do_safe_futex(g2h(uaddr), op, val, pts, g2h(uaddr2),
7630                              (base_op == FUTEX_CMP_REQUEUE
7631                                       ? tswap32(val3)
7632                                       : val3));
7633     default:
7634         return -TARGET_ENOSYS;
7635     }
7636 }
7637 #endif
7638 
7639 #if defined(TARGET_NR_futex_time64)
7640 static int do_futex_time64(target_ulong uaddr, int op, int val, target_ulong timeout,
7641                            target_ulong uaddr2, int val3)
7642 {
7643     struct timespec ts, *pts;
7644     int base_op;
7645 
7646     /* ??? We assume FUTEX_* constants are the same on both host
7647        and target.  */
7648 #ifdef FUTEX_CMD_MASK
7649     base_op = op & FUTEX_CMD_MASK;
7650 #else
7651     base_op = op;
7652 #endif
7653     switch (base_op) {
7654     case FUTEX_WAIT:
7655     case FUTEX_WAIT_BITSET:
7656         if (timeout) {
7657             pts = &ts;
7658             if (target_to_host_timespec64(pts, timeout)) {
7659                 return -TARGET_EFAULT;
7660             }
7661         } else {
7662             pts = NULL;
7663         }
7664         return do_safe_futex(g2h(uaddr), op, tswap32(val), pts, NULL, val3);
7665     case FUTEX_WAKE:
7666         return do_safe_futex(g2h(uaddr), op, val, NULL, NULL, 0);
7667     case FUTEX_FD:
7668         return do_safe_futex(g2h(uaddr), op, val, NULL, NULL, 0);
7669     case FUTEX_REQUEUE:
7670     case FUTEX_CMP_REQUEUE:
7671     case FUTEX_WAKE_OP:
7672         /* For FUTEX_REQUEUE, FUTEX_CMP_REQUEUE, and FUTEX_WAKE_OP, the
7673            TIMEOUT parameter is interpreted as a uint32_t by the kernel.
7674            But the prototype takes a `struct timespec *'; insert casts
7675            to satisfy the compiler.  We do not need to tswap TIMEOUT
7676            since it's not compared to guest memory.  */
7677         pts = (struct timespec *)(uintptr_t) timeout;
7678         return do_safe_futex(g2h(uaddr), op, val, pts, g2h(uaddr2),
7679                              (base_op == FUTEX_CMP_REQUEUE
7680                                       ? tswap32(val3)
7681                                       : val3));
7682     default:
7683         return -TARGET_ENOSYS;
7684     }
7685 }
7686 #endif
7687 
7688 #if defined(TARGET_NR_name_to_handle_at) && defined(CONFIG_OPEN_BY_HANDLE)
7689 static abi_long do_name_to_handle_at(abi_long dirfd, abi_long pathname,
7690                                      abi_long handle, abi_long mount_id,
7691                                      abi_long flags)
7692 {
7693     struct file_handle *target_fh;
7694     struct file_handle *fh;
7695     int mid = 0;
7696     abi_long ret;
7697     char *name;
7698     unsigned int size, total_size;
7699 
7700     if (get_user_s32(size, handle)) {
7701         return -TARGET_EFAULT;
7702     }
7703 
7704     name = lock_user_string(pathname);
7705     if (!name) {
7706         return -TARGET_EFAULT;
7707     }
7708 
7709     total_size = sizeof(struct file_handle) + size;
7710     target_fh = lock_user(VERIFY_WRITE, handle, total_size, 0);
7711     if (!target_fh) {
7712         unlock_user(name, pathname, 0);
7713         return -TARGET_EFAULT;
7714     }
7715 
7716     fh = g_malloc0(total_size);
7717     fh->handle_bytes = size;
7718 
7719     ret = get_errno(name_to_handle_at(dirfd, path(name), fh, &mid, flags));
7720     unlock_user(name, pathname, 0);
7721 
7722     /* man name_to_handle_at(2):
7723      * Other than the use of the handle_bytes field, the caller should treat
7724      * the file_handle structure as an opaque data type
7725      */
7726 
7727     memcpy(target_fh, fh, total_size);
7728     target_fh->handle_bytes = tswap32(fh->handle_bytes);
7729     target_fh->handle_type = tswap32(fh->handle_type);
7730     g_free(fh);
7731     unlock_user(target_fh, handle, total_size);
7732 
7733     if (put_user_s32(mid, mount_id)) {
7734         return -TARGET_EFAULT;
7735     }
7736 
7737     return ret;
7738 
7739 }
7740 #endif
7741 
7742 #if defined(TARGET_NR_open_by_handle_at) && defined(CONFIG_OPEN_BY_HANDLE)
7743 static abi_long do_open_by_handle_at(abi_long mount_fd, abi_long handle,
7744                                      abi_long flags)
7745 {
7746     struct file_handle *target_fh;
7747     struct file_handle *fh;
7748     unsigned int size, total_size;
7749     abi_long ret;
7750 
7751     if (get_user_s32(size, handle)) {
7752         return -TARGET_EFAULT;
7753     }
7754 
7755     total_size = sizeof(struct file_handle) + size;
7756     target_fh = lock_user(VERIFY_READ, handle, total_size, 1);
7757     if (!target_fh) {
7758         return -TARGET_EFAULT;
7759     }
7760 
7761     fh = g_memdup(target_fh, total_size);
7762     fh->handle_bytes = size;
7763     fh->handle_type = tswap32(target_fh->handle_type);
7764 
7765     ret = get_errno(open_by_handle_at(mount_fd, fh,
7766                     target_to_host_bitmask(flags, fcntl_flags_tbl)));
7767 
7768     g_free(fh);
7769 
7770     unlock_user(target_fh, handle, total_size);
7771 
7772     return ret;
7773 }
7774 #endif
7775 
7776 #if defined(TARGET_NR_signalfd) || defined(TARGET_NR_signalfd4)
7777 
7778 static abi_long do_signalfd4(int fd, abi_long mask, int flags)
7779 {
7780     int host_flags;
7781     target_sigset_t *target_mask;
7782     sigset_t host_mask;
7783     abi_long ret;
7784 
7785     if (flags & ~(TARGET_O_NONBLOCK_MASK | TARGET_O_CLOEXEC)) {
7786         return -TARGET_EINVAL;
7787     }
7788     if (!lock_user_struct(VERIFY_READ, target_mask, mask, 1)) {
7789         return -TARGET_EFAULT;
7790     }
7791 
7792     target_to_host_sigset(&host_mask, target_mask);
7793 
7794     host_flags = target_to_host_bitmask(flags, fcntl_flags_tbl);
7795 
7796     ret = get_errno(signalfd(fd, &host_mask, host_flags));
7797     if (ret >= 0) {
7798         fd_trans_register(ret, &target_signalfd_trans);
7799     }
7800 
7801     unlock_user_struct(target_mask, mask, 0);
7802 
7803     return ret;
7804 }
7805 #endif
7806 
7807 /* Map host to target signal numbers for the wait family of syscalls.
7808    Assume all other status bits are the same.  */
7809 int host_to_target_waitstatus(int status)
7810 {
7811     if (WIFSIGNALED(status)) {
7812         return host_to_target_signal(WTERMSIG(status)) | (status & ~0x7f);
7813     }
7814     if (WIFSTOPPED(status)) {
7815         return (host_to_target_signal(WSTOPSIG(status)) << 8)
7816                | (status & 0xff);
7817     }
7818     return status;
7819 }
7820 
7821 static int open_self_cmdline(void *cpu_env, int fd)
7822 {
7823     CPUState *cpu = env_cpu((CPUArchState *)cpu_env);
7824     struct linux_binprm *bprm = ((TaskState *)cpu->opaque)->bprm;
7825     int i;
7826 
7827     for (i = 0; i < bprm->argc; i++) {
7828         size_t len = strlen(bprm->argv[i]) + 1;
7829 
7830         if (write(fd, bprm->argv[i], len) != len) {
7831             return -1;
7832         }
7833     }
7834 
7835     return 0;
7836 }
7837 
7838 static int open_self_maps(void *cpu_env, int fd)
7839 {
7840     CPUState *cpu = env_cpu((CPUArchState *)cpu_env);
7841     TaskState *ts = cpu->opaque;
7842     GSList *map_info = read_self_maps();
7843     GSList *s;
7844     int count;
7845 
7846     for (s = map_info; s; s = g_slist_next(s)) {
7847         MapInfo *e = (MapInfo *) s->data;
7848 
7849         if (h2g_valid(e->start)) {
7850             unsigned long min = e->start;
7851             unsigned long max = e->end;
7852             int flags = page_get_flags(h2g(min));
7853             const char *path;
7854 
7855             max = h2g_valid(max - 1) ?
7856                 max : (uintptr_t) g2h(GUEST_ADDR_MAX) + 1;
7857 
7858             if (page_check_range(h2g(min), max - min, flags) == -1) {
7859                 continue;
7860             }
7861 
7862             if (h2g(min) == ts->info->stack_limit) {
7863                 path = "[stack]";
7864             } else {
7865                 path = e->path;
7866             }
7867 
7868             count = dprintf(fd, TARGET_ABI_FMT_ptr "-" TARGET_ABI_FMT_ptr
7869                             " %c%c%c%c %08" PRIx64 " %s %"PRId64,
7870                             h2g(min), h2g(max - 1) + 1,
7871                             e->is_read ? 'r' : '-',
7872                             e->is_write ? 'w' : '-',
7873                             e->is_exec ? 'x' : '-',
7874                             e->is_priv ? 'p' : '-',
7875                             (uint64_t) e->offset, e->dev, e->inode);
7876             if (path) {
7877                 dprintf(fd, "%*s%s\n", 73 - count, "", path);
7878             } else {
7879                 dprintf(fd, "\n");
7880             }
7881         }
7882     }
7883 
7884     free_self_maps(map_info);
7885 
7886 #ifdef TARGET_VSYSCALL_PAGE
7887     /*
7888      * We only support execution from the vsyscall page.
7889      * This is as if CONFIG_LEGACY_VSYSCALL_XONLY=y from v5.3.
7890      */
7891     count = dprintf(fd, TARGET_FMT_lx "-" TARGET_FMT_lx
7892                     " --xp 00000000 00:00 0",
7893                     TARGET_VSYSCALL_PAGE, TARGET_VSYSCALL_PAGE + TARGET_PAGE_SIZE);
7894     dprintf(fd, "%*s%s\n", 73 - count, "",  "[vsyscall]");
7895 #endif
7896 
7897     return 0;
7898 }
7899 
7900 static int open_self_stat(void *cpu_env, int fd)
7901 {
7902     CPUState *cpu = env_cpu((CPUArchState *)cpu_env);
7903     TaskState *ts = cpu->opaque;
7904     g_autoptr(GString) buf = g_string_new(NULL);
7905     int i;
7906 
7907     for (i = 0; i < 44; i++) {
7908         if (i == 0) {
7909             /* pid */
7910             g_string_printf(buf, FMT_pid " ", getpid());
7911         } else if (i == 1) {
7912             /* app name */
7913             gchar *bin = g_strrstr(ts->bprm->argv[0], "/");
7914             bin = bin ? bin + 1 : ts->bprm->argv[0];
7915             g_string_printf(buf, "(%.15s) ", bin);
7916         } else if (i == 27) {
7917             /* stack bottom */
7918             g_string_printf(buf, TARGET_ABI_FMT_ld " ", ts->info->start_stack);
7919         } else {
7920             /* for the rest, there is MasterCard */
7921             g_string_printf(buf, "0%c", i == 43 ? '\n' : ' ');
7922         }
7923 
7924         if (write(fd, buf->str, buf->len) != buf->len) {
7925             return -1;
7926         }
7927     }
7928 
7929     return 0;
7930 }
7931 
7932 static int open_self_auxv(void *cpu_env, int fd)
7933 {
7934     CPUState *cpu = env_cpu((CPUArchState *)cpu_env);
7935     TaskState *ts = cpu->opaque;
7936     abi_ulong auxv = ts->info->saved_auxv;
7937     abi_ulong len = ts->info->auxv_len;
7938     char *ptr;
7939 
7940     /*
7941      * Auxiliary vector is stored in target process stack.
7942      * read in whole auxv vector and copy it to file
7943      */
7944     ptr = lock_user(VERIFY_READ, auxv, len, 0);
7945     if (ptr != NULL) {
7946         while (len > 0) {
7947             ssize_t r;
7948             r = write(fd, ptr, len);
7949             if (r <= 0) {
7950                 break;
7951             }
7952             len -= r;
7953             ptr += r;
7954         }
7955         lseek(fd, 0, SEEK_SET);
7956         unlock_user(ptr, auxv, len);
7957     }
7958 
7959     return 0;
7960 }
7961 
7962 static int is_proc_myself(const char *filename, const char *entry)
7963 {
7964     if (!strncmp(filename, "/proc/", strlen("/proc/"))) {
7965         filename += strlen("/proc/");
7966         if (!strncmp(filename, "self/", strlen("self/"))) {
7967             filename += strlen("self/");
7968         } else if (*filename >= '1' && *filename <= '9') {
7969             char myself[80];
7970             snprintf(myself, sizeof(myself), "%d/", getpid());
7971             if (!strncmp(filename, myself, strlen(myself))) {
7972                 filename += strlen(myself);
7973             } else {
7974                 return 0;
7975             }
7976         } else {
7977             return 0;
7978         }
7979         if (!strcmp(filename, entry)) {
7980             return 1;
7981         }
7982     }
7983     return 0;
7984 }
7985 
7986 #if defined(HOST_WORDS_BIGENDIAN) != defined(TARGET_WORDS_BIGENDIAN) || \
7987     defined(TARGET_SPARC) || defined(TARGET_M68K) || defined(TARGET_HPPA)
7988 static int is_proc(const char *filename, const char *entry)
7989 {
7990     return strcmp(filename, entry) == 0;
7991 }
7992 #endif
7993 
7994 #if defined(HOST_WORDS_BIGENDIAN) != defined(TARGET_WORDS_BIGENDIAN)
7995 static int open_net_route(void *cpu_env, int fd)
7996 {
7997     FILE *fp;
7998     char *line = NULL;
7999     size_t len = 0;
8000     ssize_t read;
8001 
8002     fp = fopen("/proc/net/route", "r");
8003     if (fp == NULL) {
8004         return -1;
8005     }
8006 
8007     /* read header */
8008 
8009     read = getline(&line, &len, fp);
8010     dprintf(fd, "%s", line);
8011 
8012     /* read routes */
8013 
8014     while ((read = getline(&line, &len, fp)) != -1) {
8015         char iface[16];
8016         uint32_t dest, gw, mask;
8017         unsigned int flags, refcnt, use, metric, mtu, window, irtt;
8018         int fields;
8019 
8020         fields = sscanf(line,
8021                         "%s\t%08x\t%08x\t%04x\t%d\t%d\t%d\t%08x\t%d\t%u\t%u\n",
8022                         iface, &dest, &gw, &flags, &refcnt, &use, &metric,
8023                         &mask, &mtu, &window, &irtt);
8024         if (fields != 11) {
8025             continue;
8026         }
8027         dprintf(fd, "%s\t%08x\t%08x\t%04x\t%d\t%d\t%d\t%08x\t%d\t%u\t%u\n",
8028                 iface, tswap32(dest), tswap32(gw), flags, refcnt, use,
8029                 metric, tswap32(mask), mtu, window, irtt);
8030     }
8031 
8032     free(line);
8033     fclose(fp);
8034 
8035     return 0;
8036 }
8037 #endif
8038 
8039 #if defined(TARGET_SPARC)
8040 static int open_cpuinfo(void *cpu_env, int fd)
8041 {
8042     dprintf(fd, "type\t\t: sun4u\n");
8043     return 0;
8044 }
8045 #endif
8046 
8047 #if defined(TARGET_HPPA)
8048 static int open_cpuinfo(void *cpu_env, int fd)
8049 {
8050     dprintf(fd, "cpu family\t: PA-RISC 1.1e\n");
8051     dprintf(fd, "cpu\t\t: PA7300LC (PCX-L2)\n");
8052     dprintf(fd, "capabilities\t: os32\n");
8053     dprintf(fd, "model\t\t: 9000/778/B160L\n");
8054     dprintf(fd, "model name\t: Merlin L2 160 QEMU (9000/778/B160L)\n");
8055     return 0;
8056 }
8057 #endif
8058 
8059 #if defined(TARGET_M68K)
8060 static int open_hardware(void *cpu_env, int fd)
8061 {
8062     dprintf(fd, "Model:\t\tqemu-m68k\n");
8063     return 0;
8064 }
8065 #endif
8066 
8067 static int do_openat(void *cpu_env, int dirfd, const char *pathname, int flags, mode_t mode)
8068 {
8069     struct fake_open {
8070         const char *filename;
8071         int (*fill)(void *cpu_env, int fd);
8072         int (*cmp)(const char *s1, const char *s2);
8073     };
8074     const struct fake_open *fake_open;
8075     static const struct fake_open fakes[] = {
8076         { "maps", open_self_maps, is_proc_myself },
8077         { "stat", open_self_stat, is_proc_myself },
8078         { "auxv", open_self_auxv, is_proc_myself },
8079         { "cmdline", open_self_cmdline, is_proc_myself },
8080 #if defined(HOST_WORDS_BIGENDIAN) != defined(TARGET_WORDS_BIGENDIAN)
8081         { "/proc/net/route", open_net_route, is_proc },
8082 #endif
8083 #if defined(TARGET_SPARC) || defined(TARGET_HPPA)
8084         { "/proc/cpuinfo", open_cpuinfo, is_proc },
8085 #endif
8086 #if defined(TARGET_M68K)
8087         { "/proc/hardware", open_hardware, is_proc },
8088 #endif
8089         { NULL, NULL, NULL }
8090     };
8091 
8092     if (is_proc_myself(pathname, "exe")) {
8093         int execfd = qemu_getauxval(AT_EXECFD);
8094         return execfd ? execfd : safe_openat(dirfd, exec_path, flags, mode);
8095     }
8096 
8097     for (fake_open = fakes; fake_open->filename; fake_open++) {
8098         if (fake_open->cmp(pathname, fake_open->filename)) {
8099             break;
8100         }
8101     }
8102 
8103     if (fake_open->filename) {
8104         const char *tmpdir;
8105         char filename[PATH_MAX];
8106         int fd, r;
8107 
8108         /* create temporary file to map stat to */
8109         tmpdir = getenv("TMPDIR");
8110         if (!tmpdir)
8111             tmpdir = "/tmp";
8112         snprintf(filename, sizeof(filename), "%s/qemu-open.XXXXXX", tmpdir);
8113         fd = mkstemp(filename);
8114         if (fd < 0) {
8115             return fd;
8116         }
8117         unlink(filename);
8118 
8119         if ((r = fake_open->fill(cpu_env, fd))) {
8120             int e = errno;
8121             close(fd);
8122             errno = e;
8123             return r;
8124         }
8125         lseek(fd, 0, SEEK_SET);
8126 
8127         return fd;
8128     }
8129 
8130     return safe_openat(dirfd, path(pathname), flags, mode);
8131 }
8132 
8133 #define TIMER_MAGIC 0x0caf0000
8134 #define TIMER_MAGIC_MASK 0xffff0000
8135 
8136 /* Convert QEMU provided timer ID back to internal 16bit index format */
8137 static target_timer_t get_timer_id(abi_long arg)
8138 {
8139     target_timer_t timerid = arg;
8140 
8141     if ((timerid & TIMER_MAGIC_MASK) != TIMER_MAGIC) {
8142         return -TARGET_EINVAL;
8143     }
8144 
8145     timerid &= 0xffff;
8146 
8147     if (timerid >= ARRAY_SIZE(g_posix_timers)) {
8148         return -TARGET_EINVAL;
8149     }
8150 
8151     return timerid;
8152 }
8153 
8154 static int target_to_host_cpu_mask(unsigned long *host_mask,
8155                                    size_t host_size,
8156                                    abi_ulong target_addr,
8157                                    size_t target_size)
8158 {
8159     unsigned target_bits = sizeof(abi_ulong) * 8;
8160     unsigned host_bits = sizeof(*host_mask) * 8;
8161     abi_ulong *target_mask;
8162     unsigned i, j;
8163 
8164     assert(host_size >= target_size);
8165 
8166     target_mask = lock_user(VERIFY_READ, target_addr, target_size, 1);
8167     if (!target_mask) {
8168         return -TARGET_EFAULT;
8169     }
8170     memset(host_mask, 0, host_size);
8171 
8172     for (i = 0 ; i < target_size / sizeof(abi_ulong); i++) {
8173         unsigned bit = i * target_bits;
8174         abi_ulong val;
8175 
8176         __get_user(val, &target_mask[i]);
8177         for (j = 0; j < target_bits; j++, bit++) {
8178             if (val & (1UL << j)) {
8179                 host_mask[bit / host_bits] |= 1UL << (bit % host_bits);
8180             }
8181         }
8182     }
8183 
8184     unlock_user(target_mask, target_addr, 0);
8185     return 0;
8186 }
8187 
8188 static int host_to_target_cpu_mask(const unsigned long *host_mask,
8189                                    size_t host_size,
8190                                    abi_ulong target_addr,
8191                                    size_t target_size)
8192 {
8193     unsigned target_bits = sizeof(abi_ulong) * 8;
8194     unsigned host_bits = sizeof(*host_mask) * 8;
8195     abi_ulong *target_mask;
8196     unsigned i, j;
8197 
8198     assert(host_size >= target_size);
8199 
8200     target_mask = lock_user(VERIFY_WRITE, target_addr, target_size, 0);
8201     if (!target_mask) {
8202         return -TARGET_EFAULT;
8203     }
8204 
8205     for (i = 0 ; i < target_size / sizeof(abi_ulong); i++) {
8206         unsigned bit = i * target_bits;
8207         abi_ulong val = 0;
8208 
8209         for (j = 0; j < target_bits; j++, bit++) {
8210             if (host_mask[bit / host_bits] & (1UL << (bit % host_bits))) {
8211                 val |= 1UL << j;
8212             }
8213         }
8214         __put_user(val, &target_mask[i]);
8215     }
8216 
8217     unlock_user(target_mask, target_addr, target_size);
8218     return 0;
8219 }
8220 
8221 /* This is an internal helper for do_syscall so that it is easier
8222  * to have a single return point, so that actions, such as logging
8223  * of syscall results, can be performed.
8224  * All errnos that do_syscall() returns must be -TARGET_<errcode>.
8225  */
8226 static abi_long do_syscall1(void *cpu_env, int num, abi_long arg1,
8227                             abi_long arg2, abi_long arg3, abi_long arg4,
8228                             abi_long arg5, abi_long arg6, abi_long arg7,
8229                             abi_long arg8)
8230 {
8231     CPUState *cpu = env_cpu(cpu_env);
8232     abi_long ret;
8233 #if defined(TARGET_NR_stat) || defined(TARGET_NR_stat64) \
8234     || defined(TARGET_NR_lstat) || defined(TARGET_NR_lstat64) \
8235     || defined(TARGET_NR_fstat) || defined(TARGET_NR_fstat64) \
8236     || defined(TARGET_NR_statx)
8237     struct stat st;
8238 #endif
8239 #if defined(TARGET_NR_statfs) || defined(TARGET_NR_statfs64) \
8240     || defined(TARGET_NR_fstatfs)
8241     struct statfs stfs;
8242 #endif
8243     void *p;
8244 
8245     switch(num) {
8246     case TARGET_NR_exit:
8247         /* In old applications this may be used to implement _exit(2).
8248            However in threaded applications it is used for thread termination,
8249            and _exit_group is used for application termination.
8250            Do thread termination if we have more then one thread.  */
8251 
8252         if (block_signals()) {
8253             return -TARGET_ERESTARTSYS;
8254         }
8255 
8256         pthread_mutex_lock(&clone_lock);
8257 
8258         if (CPU_NEXT(first_cpu)) {
8259             TaskState *ts = cpu->opaque;
8260 
8261             object_property_set_bool(OBJECT(cpu), "realized", false, NULL);
8262             object_unref(OBJECT(cpu));
8263             /*
8264              * At this point the CPU should be unrealized and removed
8265              * from cpu lists. We can clean-up the rest of the thread
8266              * data without the lock held.
8267              */
8268 
8269             pthread_mutex_unlock(&clone_lock);
8270 
8271             if (ts->child_tidptr) {
8272                 put_user_u32(0, ts->child_tidptr);
8273                 do_sys_futex(g2h(ts->child_tidptr), FUTEX_WAKE, INT_MAX,
8274                           NULL, NULL, 0);
8275             }
8276             thread_cpu = NULL;
8277             g_free(ts);
8278             rcu_unregister_thread();
8279             pthread_exit(NULL);
8280         }
8281 
8282         pthread_mutex_unlock(&clone_lock);
8283         preexit_cleanup(cpu_env, arg1);
8284         _exit(arg1);
8285         return 0; /* avoid warning */
8286     case TARGET_NR_read:
8287         if (arg2 == 0 && arg3 == 0) {
8288             return get_errno(safe_read(arg1, 0, 0));
8289         } else {
8290             if (!(p = lock_user(VERIFY_WRITE, arg2, arg3, 0)))
8291                 return -TARGET_EFAULT;
8292             ret = get_errno(safe_read(arg1, p, arg3));
8293             if (ret >= 0 &&
8294                 fd_trans_host_to_target_data(arg1)) {
8295                 ret = fd_trans_host_to_target_data(arg1)(p, ret);
8296             }
8297             unlock_user(p, arg2, ret);
8298         }
8299         return ret;
8300     case TARGET_NR_write:
8301         if (arg2 == 0 && arg3 == 0) {
8302             return get_errno(safe_write(arg1, 0, 0));
8303         }
8304         if (!(p = lock_user(VERIFY_READ, arg2, arg3, 1)))
8305             return -TARGET_EFAULT;
8306         if (fd_trans_target_to_host_data(arg1)) {
8307             void *copy = g_malloc(arg3);
8308             memcpy(copy, p, arg3);
8309             ret = fd_trans_target_to_host_data(arg1)(copy, arg3);
8310             if (ret >= 0) {
8311                 ret = get_errno(safe_write(arg1, copy, ret));
8312             }
8313             g_free(copy);
8314         } else {
8315             ret = get_errno(safe_write(arg1, p, arg3));
8316         }
8317         unlock_user(p, arg2, 0);
8318         return ret;
8319 
8320 #ifdef TARGET_NR_open
8321     case TARGET_NR_open:
8322         if (!(p = lock_user_string(arg1)))
8323             return -TARGET_EFAULT;
8324         ret = get_errno(do_openat(cpu_env, AT_FDCWD, p,
8325                                   target_to_host_bitmask(arg2, fcntl_flags_tbl),
8326                                   arg3));
8327         fd_trans_unregister(ret);
8328         unlock_user(p, arg1, 0);
8329         return ret;
8330 #endif
8331     case TARGET_NR_openat:
8332         if (!(p = lock_user_string(arg2)))
8333             return -TARGET_EFAULT;
8334         ret = get_errno(do_openat(cpu_env, arg1, p,
8335                                   target_to_host_bitmask(arg3, fcntl_flags_tbl),
8336                                   arg4));
8337         fd_trans_unregister(ret);
8338         unlock_user(p, arg2, 0);
8339         return ret;
8340 #if defined(TARGET_NR_name_to_handle_at) && defined(CONFIG_OPEN_BY_HANDLE)
8341     case TARGET_NR_name_to_handle_at:
8342         ret = do_name_to_handle_at(arg1, arg2, arg3, arg4, arg5);
8343         return ret;
8344 #endif
8345 #if defined(TARGET_NR_open_by_handle_at) && defined(CONFIG_OPEN_BY_HANDLE)
8346     case TARGET_NR_open_by_handle_at:
8347         ret = do_open_by_handle_at(arg1, arg2, arg3);
8348         fd_trans_unregister(ret);
8349         return ret;
8350 #endif
8351     case TARGET_NR_close:
8352         fd_trans_unregister(arg1);
8353         return get_errno(close(arg1));
8354 
8355     case TARGET_NR_brk:
8356         return do_brk(arg1);
8357 #ifdef TARGET_NR_fork
8358     case TARGET_NR_fork:
8359         return get_errno(do_fork(cpu_env, TARGET_SIGCHLD, 0, 0, 0, 0));
8360 #endif
8361 #ifdef TARGET_NR_waitpid
8362     case TARGET_NR_waitpid:
8363         {
8364             int status;
8365             ret = get_errno(safe_wait4(arg1, &status, arg3, 0));
8366             if (!is_error(ret) && arg2 && ret
8367                 && put_user_s32(host_to_target_waitstatus(status), arg2))
8368                 return -TARGET_EFAULT;
8369         }
8370         return ret;
8371 #endif
8372 #ifdef TARGET_NR_waitid
8373     case TARGET_NR_waitid:
8374         {
8375             siginfo_t info;
8376             info.si_pid = 0;
8377             ret = get_errno(safe_waitid(arg1, arg2, &info, arg4, NULL));
8378             if (!is_error(ret) && arg3 && info.si_pid != 0) {
8379                 if (!(p = lock_user(VERIFY_WRITE, arg3, sizeof(target_siginfo_t), 0)))
8380                     return -TARGET_EFAULT;
8381                 host_to_target_siginfo(p, &info);
8382                 unlock_user(p, arg3, sizeof(target_siginfo_t));
8383             }
8384         }
8385         return ret;
8386 #endif
8387 #ifdef TARGET_NR_creat /* not on alpha */
8388     case TARGET_NR_creat:
8389         if (!(p = lock_user_string(arg1)))
8390             return -TARGET_EFAULT;
8391         ret = get_errno(creat(p, arg2));
8392         fd_trans_unregister(ret);
8393         unlock_user(p, arg1, 0);
8394         return ret;
8395 #endif
8396 #ifdef TARGET_NR_link
8397     case TARGET_NR_link:
8398         {
8399             void * p2;
8400             p = lock_user_string(arg1);
8401             p2 = lock_user_string(arg2);
8402             if (!p || !p2)
8403                 ret = -TARGET_EFAULT;
8404             else
8405                 ret = get_errno(link(p, p2));
8406             unlock_user(p2, arg2, 0);
8407             unlock_user(p, arg1, 0);
8408         }
8409         return ret;
8410 #endif
8411 #if defined(TARGET_NR_linkat)
8412     case TARGET_NR_linkat:
8413         {
8414             void * p2 = NULL;
8415             if (!arg2 || !arg4)
8416                 return -TARGET_EFAULT;
8417             p  = lock_user_string(arg2);
8418             p2 = lock_user_string(arg4);
8419             if (!p || !p2)
8420                 ret = -TARGET_EFAULT;
8421             else
8422                 ret = get_errno(linkat(arg1, p, arg3, p2, arg5));
8423             unlock_user(p, arg2, 0);
8424             unlock_user(p2, arg4, 0);
8425         }
8426         return ret;
8427 #endif
8428 #ifdef TARGET_NR_unlink
8429     case TARGET_NR_unlink:
8430         if (!(p = lock_user_string(arg1)))
8431             return -TARGET_EFAULT;
8432         ret = get_errno(unlink(p));
8433         unlock_user(p, arg1, 0);
8434         return ret;
8435 #endif
8436 #if defined(TARGET_NR_unlinkat)
8437     case TARGET_NR_unlinkat:
8438         if (!(p = lock_user_string(arg2)))
8439             return -TARGET_EFAULT;
8440         ret = get_errno(unlinkat(arg1, p, arg3));
8441         unlock_user(p, arg2, 0);
8442         return ret;
8443 #endif
8444     case TARGET_NR_execve:
8445         {
8446             char **argp, **envp;
8447             int argc, envc;
8448             abi_ulong gp;
8449             abi_ulong guest_argp;
8450             abi_ulong guest_envp;
8451             abi_ulong addr;
8452             char **q;
8453             int total_size = 0;
8454 
8455             argc = 0;
8456             guest_argp = arg2;
8457             for (gp = guest_argp; gp; gp += sizeof(abi_ulong)) {
8458                 if (get_user_ual(addr, gp))
8459                     return -TARGET_EFAULT;
8460                 if (!addr)
8461                     break;
8462                 argc++;
8463             }
8464             envc = 0;
8465             guest_envp = arg3;
8466             for (gp = guest_envp; gp; gp += sizeof(abi_ulong)) {
8467                 if (get_user_ual(addr, gp))
8468                     return -TARGET_EFAULT;
8469                 if (!addr)
8470                     break;
8471                 envc++;
8472             }
8473 
8474             argp = g_new0(char *, argc + 1);
8475             envp = g_new0(char *, envc + 1);
8476 
8477             for (gp = guest_argp, q = argp; gp;
8478                   gp += sizeof(abi_ulong), q++) {
8479                 if (get_user_ual(addr, gp))
8480                     goto execve_efault;
8481                 if (!addr)
8482                     break;
8483                 if (!(*q = lock_user_string(addr)))
8484                     goto execve_efault;
8485                 total_size += strlen(*q) + 1;
8486             }
8487             *q = NULL;
8488 
8489             for (gp = guest_envp, q = envp; gp;
8490                   gp += sizeof(abi_ulong), q++) {
8491                 if (get_user_ual(addr, gp))
8492                     goto execve_efault;
8493                 if (!addr)
8494                     break;
8495                 if (!(*q = lock_user_string(addr)))
8496                     goto execve_efault;
8497                 total_size += strlen(*q) + 1;
8498             }
8499             *q = NULL;
8500 
8501             if (!(p = lock_user_string(arg1)))
8502                 goto execve_efault;
8503             /* Although execve() is not an interruptible syscall it is
8504              * a special case where we must use the safe_syscall wrapper:
8505              * if we allow a signal to happen before we make the host
8506              * syscall then we will 'lose' it, because at the point of
8507              * execve the process leaves QEMU's control. So we use the
8508              * safe syscall wrapper to ensure that we either take the
8509              * signal as a guest signal, or else it does not happen
8510              * before the execve completes and makes it the other
8511              * program's problem.
8512              */
8513             ret = get_errno(safe_execve(p, argp, envp));
8514             unlock_user(p, arg1, 0);
8515 
8516             goto execve_end;
8517 
8518         execve_efault:
8519             ret = -TARGET_EFAULT;
8520 
8521         execve_end:
8522             for (gp = guest_argp, q = argp; *q;
8523                   gp += sizeof(abi_ulong), q++) {
8524                 if (get_user_ual(addr, gp)
8525                     || !addr)
8526                     break;
8527                 unlock_user(*q, addr, 0);
8528             }
8529             for (gp = guest_envp, q = envp; *q;
8530                   gp += sizeof(abi_ulong), q++) {
8531                 if (get_user_ual(addr, gp)
8532                     || !addr)
8533                     break;
8534                 unlock_user(*q, addr, 0);
8535             }
8536 
8537             g_free(argp);
8538             g_free(envp);
8539         }
8540         return ret;
8541     case TARGET_NR_chdir:
8542         if (!(p = lock_user_string(arg1)))
8543             return -TARGET_EFAULT;
8544         ret = get_errno(chdir(p));
8545         unlock_user(p, arg1, 0);
8546         return ret;
8547 #ifdef TARGET_NR_time
8548     case TARGET_NR_time:
8549         {
8550             time_t host_time;
8551             ret = get_errno(time(&host_time));
8552             if (!is_error(ret)
8553                 && arg1
8554                 && put_user_sal(host_time, arg1))
8555                 return -TARGET_EFAULT;
8556         }
8557         return ret;
8558 #endif
8559 #ifdef TARGET_NR_mknod
8560     case TARGET_NR_mknod:
8561         if (!(p = lock_user_string(arg1)))
8562             return -TARGET_EFAULT;
8563         ret = get_errno(mknod(p, arg2, arg3));
8564         unlock_user(p, arg1, 0);
8565         return ret;
8566 #endif
8567 #if defined(TARGET_NR_mknodat)
8568     case TARGET_NR_mknodat:
8569         if (!(p = lock_user_string(arg2)))
8570             return -TARGET_EFAULT;
8571         ret = get_errno(mknodat(arg1, p, arg3, arg4));
8572         unlock_user(p, arg2, 0);
8573         return ret;
8574 #endif
8575 #ifdef TARGET_NR_chmod
8576     case TARGET_NR_chmod:
8577         if (!(p = lock_user_string(arg1)))
8578             return -TARGET_EFAULT;
8579         ret = get_errno(chmod(p, arg2));
8580         unlock_user(p, arg1, 0);
8581         return ret;
8582 #endif
8583 #ifdef TARGET_NR_lseek
8584     case TARGET_NR_lseek:
8585         return get_errno(lseek(arg1, arg2, arg3));
8586 #endif
8587 #if defined(TARGET_NR_getxpid) && defined(TARGET_ALPHA)
8588     /* Alpha specific */
8589     case TARGET_NR_getxpid:
8590         ((CPUAlphaState *)cpu_env)->ir[IR_A4] = getppid();
8591         return get_errno(getpid());
8592 #endif
8593 #ifdef TARGET_NR_getpid
8594     case TARGET_NR_getpid:
8595         return get_errno(getpid());
8596 #endif
8597     case TARGET_NR_mount:
8598         {
8599             /* need to look at the data field */
8600             void *p2, *p3;
8601 
8602             if (arg1) {
8603                 p = lock_user_string(arg1);
8604                 if (!p) {
8605                     return -TARGET_EFAULT;
8606                 }
8607             } else {
8608                 p = NULL;
8609             }
8610 
8611             p2 = lock_user_string(arg2);
8612             if (!p2) {
8613                 if (arg1) {
8614                     unlock_user(p, arg1, 0);
8615                 }
8616                 return -TARGET_EFAULT;
8617             }
8618 
8619             if (arg3) {
8620                 p3 = lock_user_string(arg3);
8621                 if (!p3) {
8622                     if (arg1) {
8623                         unlock_user(p, arg1, 0);
8624                     }
8625                     unlock_user(p2, arg2, 0);
8626                     return -TARGET_EFAULT;
8627                 }
8628             } else {
8629                 p3 = NULL;
8630             }
8631 
8632             /* FIXME - arg5 should be locked, but it isn't clear how to
8633              * do that since it's not guaranteed to be a NULL-terminated
8634              * string.
8635              */
8636             if (!arg5) {
8637                 ret = mount(p, p2, p3, (unsigned long)arg4, NULL);
8638             } else {
8639                 ret = mount(p, p2, p3, (unsigned long)arg4, g2h(arg5));
8640             }
8641             ret = get_errno(ret);
8642 
8643             if (arg1) {
8644                 unlock_user(p, arg1, 0);
8645             }
8646             unlock_user(p2, arg2, 0);
8647             if (arg3) {
8648                 unlock_user(p3, arg3, 0);
8649             }
8650         }
8651         return ret;
8652 #if defined(TARGET_NR_umount) || defined(TARGET_NR_oldumount)
8653 #if defined(TARGET_NR_umount)
8654     case TARGET_NR_umount:
8655 #endif
8656 #if defined(TARGET_NR_oldumount)
8657     case TARGET_NR_oldumount:
8658 #endif
8659         if (!(p = lock_user_string(arg1)))
8660             return -TARGET_EFAULT;
8661         ret = get_errno(umount(p));
8662         unlock_user(p, arg1, 0);
8663         return ret;
8664 #endif
8665 #ifdef TARGET_NR_stime /* not on alpha */
8666     case TARGET_NR_stime:
8667         {
8668             struct timespec ts;
8669             ts.tv_nsec = 0;
8670             if (get_user_sal(ts.tv_sec, arg1)) {
8671                 return -TARGET_EFAULT;
8672             }
8673             return get_errno(clock_settime(CLOCK_REALTIME, &ts));
8674         }
8675 #endif
8676 #ifdef TARGET_NR_alarm /* not on alpha */
8677     case TARGET_NR_alarm:
8678         return alarm(arg1);
8679 #endif
8680 #ifdef TARGET_NR_pause /* not on alpha */
8681     case TARGET_NR_pause:
8682         if (!block_signals()) {
8683             sigsuspend(&((TaskState *)cpu->opaque)->signal_mask);
8684         }
8685         return -TARGET_EINTR;
8686 #endif
8687 #ifdef TARGET_NR_utime
8688     case TARGET_NR_utime:
8689         {
8690             struct utimbuf tbuf, *host_tbuf;
8691             struct target_utimbuf *target_tbuf;
8692             if (arg2) {
8693                 if (!lock_user_struct(VERIFY_READ, target_tbuf, arg2, 1))
8694                     return -TARGET_EFAULT;
8695                 tbuf.actime = tswapal(target_tbuf->actime);
8696                 tbuf.modtime = tswapal(target_tbuf->modtime);
8697                 unlock_user_struct(target_tbuf, arg2, 0);
8698                 host_tbuf = &tbuf;
8699             } else {
8700                 host_tbuf = NULL;
8701             }
8702             if (!(p = lock_user_string(arg1)))
8703                 return -TARGET_EFAULT;
8704             ret = get_errno(utime(p, host_tbuf));
8705             unlock_user(p, arg1, 0);
8706         }
8707         return ret;
8708 #endif
8709 #ifdef TARGET_NR_utimes
8710     case TARGET_NR_utimes:
8711         {
8712             struct timeval *tvp, tv[2];
8713             if (arg2) {
8714                 if (copy_from_user_timeval(&tv[0], arg2)
8715                     || copy_from_user_timeval(&tv[1],
8716                                               arg2 + sizeof(struct target_timeval)))
8717                     return -TARGET_EFAULT;
8718                 tvp = tv;
8719             } else {
8720                 tvp = NULL;
8721             }
8722             if (!(p = lock_user_string(arg1)))
8723                 return -TARGET_EFAULT;
8724             ret = get_errno(utimes(p, tvp));
8725             unlock_user(p, arg1, 0);
8726         }
8727         return ret;
8728 #endif
8729 #if defined(TARGET_NR_futimesat)
8730     case TARGET_NR_futimesat:
8731         {
8732             struct timeval *tvp, tv[2];
8733             if (arg3) {
8734                 if (copy_from_user_timeval(&tv[0], arg3)
8735                     || copy_from_user_timeval(&tv[1],
8736                                               arg3 + sizeof(struct target_timeval)))
8737                     return -TARGET_EFAULT;
8738                 tvp = tv;
8739             } else {
8740                 tvp = NULL;
8741             }
8742             if (!(p = lock_user_string(arg2))) {
8743                 return -TARGET_EFAULT;
8744             }
8745             ret = get_errno(futimesat(arg1, path(p), tvp));
8746             unlock_user(p, arg2, 0);
8747         }
8748         return ret;
8749 #endif
8750 #ifdef TARGET_NR_access
8751     case TARGET_NR_access:
8752         if (!(p = lock_user_string(arg1))) {
8753             return -TARGET_EFAULT;
8754         }
8755         ret = get_errno(access(path(p), arg2));
8756         unlock_user(p, arg1, 0);
8757         return ret;
8758 #endif
8759 #if defined(TARGET_NR_faccessat) && defined(__NR_faccessat)
8760     case TARGET_NR_faccessat:
8761         if (!(p = lock_user_string(arg2))) {
8762             return -TARGET_EFAULT;
8763         }
8764         ret = get_errno(faccessat(arg1, p, arg3, 0));
8765         unlock_user(p, arg2, 0);
8766         return ret;
8767 #endif
8768 #ifdef TARGET_NR_nice /* not on alpha */
8769     case TARGET_NR_nice:
8770         return get_errno(nice(arg1));
8771 #endif
8772     case TARGET_NR_sync:
8773         sync();
8774         return 0;
8775 #if defined(TARGET_NR_syncfs) && defined(CONFIG_SYNCFS)
8776     case TARGET_NR_syncfs:
8777         return get_errno(syncfs(arg1));
8778 #endif
8779     case TARGET_NR_kill:
8780         return get_errno(safe_kill(arg1, target_to_host_signal(arg2)));
8781 #ifdef TARGET_NR_rename
8782     case TARGET_NR_rename:
8783         {
8784             void *p2;
8785             p = lock_user_string(arg1);
8786             p2 = lock_user_string(arg2);
8787             if (!p || !p2)
8788                 ret = -TARGET_EFAULT;
8789             else
8790                 ret = get_errno(rename(p, p2));
8791             unlock_user(p2, arg2, 0);
8792             unlock_user(p, arg1, 0);
8793         }
8794         return ret;
8795 #endif
8796 #if defined(TARGET_NR_renameat)
8797     case TARGET_NR_renameat:
8798         {
8799             void *p2;
8800             p  = lock_user_string(arg2);
8801             p2 = lock_user_string(arg4);
8802             if (!p || !p2)
8803                 ret = -TARGET_EFAULT;
8804             else
8805                 ret = get_errno(renameat(arg1, p, arg3, p2));
8806             unlock_user(p2, arg4, 0);
8807             unlock_user(p, arg2, 0);
8808         }
8809         return ret;
8810 #endif
8811 #if defined(TARGET_NR_renameat2)
8812     case TARGET_NR_renameat2:
8813         {
8814             void *p2;
8815             p  = lock_user_string(arg2);
8816             p2 = lock_user_string(arg4);
8817             if (!p || !p2) {
8818                 ret = -TARGET_EFAULT;
8819             } else {
8820                 ret = get_errno(sys_renameat2(arg1, p, arg3, p2, arg5));
8821             }
8822             unlock_user(p2, arg4, 0);
8823             unlock_user(p, arg2, 0);
8824         }
8825         return ret;
8826 #endif
8827 #ifdef TARGET_NR_mkdir
8828     case TARGET_NR_mkdir:
8829         if (!(p = lock_user_string(arg1)))
8830             return -TARGET_EFAULT;
8831         ret = get_errno(mkdir(p, arg2));
8832         unlock_user(p, arg1, 0);
8833         return ret;
8834 #endif
8835 #if defined(TARGET_NR_mkdirat)
8836     case TARGET_NR_mkdirat:
8837         if (!(p = lock_user_string(arg2)))
8838             return -TARGET_EFAULT;
8839         ret = get_errno(mkdirat(arg1, p, arg3));
8840         unlock_user(p, arg2, 0);
8841         return ret;
8842 #endif
8843 #ifdef TARGET_NR_rmdir
8844     case TARGET_NR_rmdir:
8845         if (!(p = lock_user_string(arg1)))
8846             return -TARGET_EFAULT;
8847         ret = get_errno(rmdir(p));
8848         unlock_user(p, arg1, 0);
8849         return ret;
8850 #endif
8851     case TARGET_NR_dup:
8852         ret = get_errno(dup(arg1));
8853         if (ret >= 0) {
8854             fd_trans_dup(arg1, ret);
8855         }
8856         return ret;
8857 #ifdef TARGET_NR_pipe
8858     case TARGET_NR_pipe:
8859         return do_pipe(cpu_env, arg1, 0, 0);
8860 #endif
8861 #ifdef TARGET_NR_pipe2
8862     case TARGET_NR_pipe2:
8863         return do_pipe(cpu_env, arg1,
8864                        target_to_host_bitmask(arg2, fcntl_flags_tbl), 1);
8865 #endif
8866     case TARGET_NR_times:
8867         {
8868             struct target_tms *tmsp;
8869             struct tms tms;
8870             ret = get_errno(times(&tms));
8871             if (arg1) {
8872                 tmsp = lock_user(VERIFY_WRITE, arg1, sizeof(struct target_tms), 0);
8873                 if (!tmsp)
8874                     return -TARGET_EFAULT;
8875                 tmsp->tms_utime = tswapal(host_to_target_clock_t(tms.tms_utime));
8876                 tmsp->tms_stime = tswapal(host_to_target_clock_t(tms.tms_stime));
8877                 tmsp->tms_cutime = tswapal(host_to_target_clock_t(tms.tms_cutime));
8878                 tmsp->tms_cstime = tswapal(host_to_target_clock_t(tms.tms_cstime));
8879             }
8880             if (!is_error(ret))
8881                 ret = host_to_target_clock_t(ret);
8882         }
8883         return ret;
8884     case TARGET_NR_acct:
8885         if (arg1 == 0) {
8886             ret = get_errno(acct(NULL));
8887         } else {
8888             if (!(p = lock_user_string(arg1))) {
8889                 return -TARGET_EFAULT;
8890             }
8891             ret = get_errno(acct(path(p)));
8892             unlock_user(p, arg1, 0);
8893         }
8894         return ret;
8895 #ifdef TARGET_NR_umount2
8896     case TARGET_NR_umount2:
8897         if (!(p = lock_user_string(arg1)))
8898             return -TARGET_EFAULT;
8899         ret = get_errno(umount2(p, arg2));
8900         unlock_user(p, arg1, 0);
8901         return ret;
8902 #endif
8903     case TARGET_NR_ioctl:
8904         return do_ioctl(arg1, arg2, arg3);
8905 #ifdef TARGET_NR_fcntl
8906     case TARGET_NR_fcntl:
8907         return do_fcntl(arg1, arg2, arg3);
8908 #endif
8909     case TARGET_NR_setpgid:
8910         return get_errno(setpgid(arg1, arg2));
8911     case TARGET_NR_umask:
8912         return get_errno(umask(arg1));
8913     case TARGET_NR_chroot:
8914         if (!(p = lock_user_string(arg1)))
8915             return -TARGET_EFAULT;
8916         ret = get_errno(chroot(p));
8917         unlock_user(p, arg1, 0);
8918         return ret;
8919 #ifdef TARGET_NR_dup2
8920     case TARGET_NR_dup2:
8921         ret = get_errno(dup2(arg1, arg2));
8922         if (ret >= 0) {
8923             fd_trans_dup(arg1, arg2);
8924         }
8925         return ret;
8926 #endif
8927 #if defined(CONFIG_DUP3) && defined(TARGET_NR_dup3)
8928     case TARGET_NR_dup3:
8929     {
8930         int host_flags;
8931 
8932         if ((arg3 & ~TARGET_O_CLOEXEC) != 0) {
8933             return -EINVAL;
8934         }
8935         host_flags = target_to_host_bitmask(arg3, fcntl_flags_tbl);
8936         ret = get_errno(dup3(arg1, arg2, host_flags));
8937         if (ret >= 0) {
8938             fd_trans_dup(arg1, arg2);
8939         }
8940         return ret;
8941     }
8942 #endif
8943 #ifdef TARGET_NR_getppid /* not on alpha */
8944     case TARGET_NR_getppid:
8945         return get_errno(getppid());
8946 #endif
8947 #ifdef TARGET_NR_getpgrp
8948     case TARGET_NR_getpgrp:
8949         return get_errno(getpgrp());
8950 #endif
8951     case TARGET_NR_setsid:
8952         return get_errno(setsid());
8953 #ifdef TARGET_NR_sigaction
8954     case TARGET_NR_sigaction:
8955         {
8956 #if defined(TARGET_ALPHA)
8957             struct target_sigaction act, oact, *pact = 0;
8958             struct target_old_sigaction *old_act;
8959             if (arg2) {
8960                 if (!lock_user_struct(VERIFY_READ, old_act, arg2, 1))
8961                     return -TARGET_EFAULT;
8962                 act._sa_handler = old_act->_sa_handler;
8963                 target_siginitset(&act.sa_mask, old_act->sa_mask);
8964                 act.sa_flags = old_act->sa_flags;
8965                 act.sa_restorer = 0;
8966                 unlock_user_struct(old_act, arg2, 0);
8967                 pact = &act;
8968             }
8969             ret = get_errno(do_sigaction(arg1, pact, &oact));
8970             if (!is_error(ret) && arg3) {
8971                 if (!lock_user_struct(VERIFY_WRITE, old_act, arg3, 0))
8972                     return -TARGET_EFAULT;
8973                 old_act->_sa_handler = oact._sa_handler;
8974                 old_act->sa_mask = oact.sa_mask.sig[0];
8975                 old_act->sa_flags = oact.sa_flags;
8976                 unlock_user_struct(old_act, arg3, 1);
8977             }
8978 #elif defined(TARGET_MIPS)
8979 	    struct target_sigaction act, oact, *pact, *old_act;
8980 
8981 	    if (arg2) {
8982                 if (!lock_user_struct(VERIFY_READ, old_act, arg2, 1))
8983                     return -TARGET_EFAULT;
8984 		act._sa_handler = old_act->_sa_handler;
8985 		target_siginitset(&act.sa_mask, old_act->sa_mask.sig[0]);
8986 		act.sa_flags = old_act->sa_flags;
8987 		unlock_user_struct(old_act, arg2, 0);
8988 		pact = &act;
8989 	    } else {
8990 		pact = NULL;
8991 	    }
8992 
8993 	    ret = get_errno(do_sigaction(arg1, pact, &oact));
8994 
8995 	    if (!is_error(ret) && arg3) {
8996                 if (!lock_user_struct(VERIFY_WRITE, old_act, arg3, 0))
8997                     return -TARGET_EFAULT;
8998 		old_act->_sa_handler = oact._sa_handler;
8999 		old_act->sa_flags = oact.sa_flags;
9000 		old_act->sa_mask.sig[0] = oact.sa_mask.sig[0];
9001 		old_act->sa_mask.sig[1] = 0;
9002 		old_act->sa_mask.sig[2] = 0;
9003 		old_act->sa_mask.sig[3] = 0;
9004 		unlock_user_struct(old_act, arg3, 1);
9005 	    }
9006 #else
9007             struct target_old_sigaction *old_act;
9008             struct target_sigaction act, oact, *pact;
9009             if (arg2) {
9010                 if (!lock_user_struct(VERIFY_READ, old_act, arg2, 1))
9011                     return -TARGET_EFAULT;
9012                 act._sa_handler = old_act->_sa_handler;
9013                 target_siginitset(&act.sa_mask, old_act->sa_mask);
9014                 act.sa_flags = old_act->sa_flags;
9015                 act.sa_restorer = old_act->sa_restorer;
9016 #ifdef TARGET_ARCH_HAS_KA_RESTORER
9017                 act.ka_restorer = 0;
9018 #endif
9019                 unlock_user_struct(old_act, arg2, 0);
9020                 pact = &act;
9021             } else {
9022                 pact = NULL;
9023             }
9024             ret = get_errno(do_sigaction(arg1, pact, &oact));
9025             if (!is_error(ret) && arg3) {
9026                 if (!lock_user_struct(VERIFY_WRITE, old_act, arg3, 0))
9027                     return -TARGET_EFAULT;
9028                 old_act->_sa_handler = oact._sa_handler;
9029                 old_act->sa_mask = oact.sa_mask.sig[0];
9030                 old_act->sa_flags = oact.sa_flags;
9031                 old_act->sa_restorer = oact.sa_restorer;
9032                 unlock_user_struct(old_act, arg3, 1);
9033             }
9034 #endif
9035         }
9036         return ret;
9037 #endif
9038     case TARGET_NR_rt_sigaction:
9039         {
9040 #if defined(TARGET_ALPHA)
9041             /* For Alpha and SPARC this is a 5 argument syscall, with
9042              * a 'restorer' parameter which must be copied into the
9043              * sa_restorer field of the sigaction struct.
9044              * For Alpha that 'restorer' is arg5; for SPARC it is arg4,
9045              * and arg5 is the sigsetsize.
9046              * Alpha also has a separate rt_sigaction struct that it uses
9047              * here; SPARC uses the usual sigaction struct.
9048              */
9049             struct target_rt_sigaction *rt_act;
9050             struct target_sigaction act, oact, *pact = 0;
9051 
9052             if (arg4 != sizeof(target_sigset_t)) {
9053                 return -TARGET_EINVAL;
9054             }
9055             if (arg2) {
9056                 if (!lock_user_struct(VERIFY_READ, rt_act, arg2, 1))
9057                     return -TARGET_EFAULT;
9058                 act._sa_handler = rt_act->_sa_handler;
9059                 act.sa_mask = rt_act->sa_mask;
9060                 act.sa_flags = rt_act->sa_flags;
9061                 act.sa_restorer = arg5;
9062                 unlock_user_struct(rt_act, arg2, 0);
9063                 pact = &act;
9064             }
9065             ret = get_errno(do_sigaction(arg1, pact, &oact));
9066             if (!is_error(ret) && arg3) {
9067                 if (!lock_user_struct(VERIFY_WRITE, rt_act, arg3, 0))
9068                     return -TARGET_EFAULT;
9069                 rt_act->_sa_handler = oact._sa_handler;
9070                 rt_act->sa_mask = oact.sa_mask;
9071                 rt_act->sa_flags = oact.sa_flags;
9072                 unlock_user_struct(rt_act, arg3, 1);
9073             }
9074 #else
9075 #ifdef TARGET_SPARC
9076             target_ulong restorer = arg4;
9077             target_ulong sigsetsize = arg5;
9078 #else
9079             target_ulong sigsetsize = arg4;
9080 #endif
9081             struct target_sigaction *act;
9082             struct target_sigaction *oact;
9083 
9084             if (sigsetsize != sizeof(target_sigset_t)) {
9085                 return -TARGET_EINVAL;
9086             }
9087             if (arg2) {
9088                 if (!lock_user_struct(VERIFY_READ, act, arg2, 1)) {
9089                     return -TARGET_EFAULT;
9090                 }
9091 #ifdef TARGET_ARCH_HAS_KA_RESTORER
9092                 act->ka_restorer = restorer;
9093 #endif
9094             } else {
9095                 act = NULL;
9096             }
9097             if (arg3) {
9098                 if (!lock_user_struct(VERIFY_WRITE, oact, arg3, 0)) {
9099                     ret = -TARGET_EFAULT;
9100                     goto rt_sigaction_fail;
9101                 }
9102             } else
9103                 oact = NULL;
9104             ret = get_errno(do_sigaction(arg1, act, oact));
9105 	rt_sigaction_fail:
9106             if (act)
9107                 unlock_user_struct(act, arg2, 0);
9108             if (oact)
9109                 unlock_user_struct(oact, arg3, 1);
9110 #endif
9111         }
9112         return ret;
9113 #ifdef TARGET_NR_sgetmask /* not on alpha */
9114     case TARGET_NR_sgetmask:
9115         {
9116             sigset_t cur_set;
9117             abi_ulong target_set;
9118             ret = do_sigprocmask(0, NULL, &cur_set);
9119             if (!ret) {
9120                 host_to_target_old_sigset(&target_set, &cur_set);
9121                 ret = target_set;
9122             }
9123         }
9124         return ret;
9125 #endif
9126 #ifdef TARGET_NR_ssetmask /* not on alpha */
9127     case TARGET_NR_ssetmask:
9128         {
9129             sigset_t set, oset;
9130             abi_ulong target_set = arg1;
9131             target_to_host_old_sigset(&set, &target_set);
9132             ret = do_sigprocmask(SIG_SETMASK, &set, &oset);
9133             if (!ret) {
9134                 host_to_target_old_sigset(&target_set, &oset);
9135                 ret = target_set;
9136             }
9137         }
9138         return ret;
9139 #endif
9140 #ifdef TARGET_NR_sigprocmask
9141     case TARGET_NR_sigprocmask:
9142         {
9143 #if defined(TARGET_ALPHA)
9144             sigset_t set, oldset;
9145             abi_ulong mask;
9146             int how;
9147 
9148             switch (arg1) {
9149             case TARGET_SIG_BLOCK:
9150                 how = SIG_BLOCK;
9151                 break;
9152             case TARGET_SIG_UNBLOCK:
9153                 how = SIG_UNBLOCK;
9154                 break;
9155             case TARGET_SIG_SETMASK:
9156                 how = SIG_SETMASK;
9157                 break;
9158             default:
9159                 return -TARGET_EINVAL;
9160             }
9161             mask = arg2;
9162             target_to_host_old_sigset(&set, &mask);
9163 
9164             ret = do_sigprocmask(how, &set, &oldset);
9165             if (!is_error(ret)) {
9166                 host_to_target_old_sigset(&mask, &oldset);
9167                 ret = mask;
9168                 ((CPUAlphaState *)cpu_env)->ir[IR_V0] = 0; /* force no error */
9169             }
9170 #else
9171             sigset_t set, oldset, *set_ptr;
9172             int how;
9173 
9174             if (arg2) {
9175                 switch (arg1) {
9176                 case TARGET_SIG_BLOCK:
9177                     how = SIG_BLOCK;
9178                     break;
9179                 case TARGET_SIG_UNBLOCK:
9180                     how = SIG_UNBLOCK;
9181                     break;
9182                 case TARGET_SIG_SETMASK:
9183                     how = SIG_SETMASK;
9184                     break;
9185                 default:
9186                     return -TARGET_EINVAL;
9187                 }
9188                 if (!(p = lock_user(VERIFY_READ, arg2, sizeof(target_sigset_t), 1)))
9189                     return -TARGET_EFAULT;
9190                 target_to_host_old_sigset(&set, p);
9191                 unlock_user(p, arg2, 0);
9192                 set_ptr = &set;
9193             } else {
9194                 how = 0;
9195                 set_ptr = NULL;
9196             }
9197             ret = do_sigprocmask(how, set_ptr, &oldset);
9198             if (!is_error(ret) && arg3) {
9199                 if (!(p = lock_user(VERIFY_WRITE, arg3, sizeof(target_sigset_t), 0)))
9200                     return -TARGET_EFAULT;
9201                 host_to_target_old_sigset(p, &oldset);
9202                 unlock_user(p, arg3, sizeof(target_sigset_t));
9203             }
9204 #endif
9205         }
9206         return ret;
9207 #endif
9208     case TARGET_NR_rt_sigprocmask:
9209         {
9210             int how = arg1;
9211             sigset_t set, oldset, *set_ptr;
9212 
9213             if (arg4 != sizeof(target_sigset_t)) {
9214                 return -TARGET_EINVAL;
9215             }
9216 
9217             if (arg2) {
9218                 switch(how) {
9219                 case TARGET_SIG_BLOCK:
9220                     how = SIG_BLOCK;
9221                     break;
9222                 case TARGET_SIG_UNBLOCK:
9223                     how = SIG_UNBLOCK;
9224                     break;
9225                 case TARGET_SIG_SETMASK:
9226                     how = SIG_SETMASK;
9227                     break;
9228                 default:
9229                     return -TARGET_EINVAL;
9230                 }
9231                 if (!(p = lock_user(VERIFY_READ, arg2, sizeof(target_sigset_t), 1)))
9232                     return -TARGET_EFAULT;
9233                 target_to_host_sigset(&set, p);
9234                 unlock_user(p, arg2, 0);
9235                 set_ptr = &set;
9236             } else {
9237                 how = 0;
9238                 set_ptr = NULL;
9239             }
9240             ret = do_sigprocmask(how, set_ptr, &oldset);
9241             if (!is_error(ret) && arg3) {
9242                 if (!(p = lock_user(VERIFY_WRITE, arg3, sizeof(target_sigset_t), 0)))
9243                     return -TARGET_EFAULT;
9244                 host_to_target_sigset(p, &oldset);
9245                 unlock_user(p, arg3, sizeof(target_sigset_t));
9246             }
9247         }
9248         return ret;
9249 #ifdef TARGET_NR_sigpending
9250     case TARGET_NR_sigpending:
9251         {
9252             sigset_t set;
9253             ret = get_errno(sigpending(&set));
9254             if (!is_error(ret)) {
9255                 if (!(p = lock_user(VERIFY_WRITE, arg1, sizeof(target_sigset_t), 0)))
9256                     return -TARGET_EFAULT;
9257                 host_to_target_old_sigset(p, &set);
9258                 unlock_user(p, arg1, sizeof(target_sigset_t));
9259             }
9260         }
9261         return ret;
9262 #endif
9263     case TARGET_NR_rt_sigpending:
9264         {
9265             sigset_t set;
9266 
9267             /* Yes, this check is >, not != like most. We follow the kernel's
9268              * logic and it does it like this because it implements
9269              * NR_sigpending through the same code path, and in that case
9270              * the old_sigset_t is smaller in size.
9271              */
9272             if (arg2 > sizeof(target_sigset_t)) {
9273                 return -TARGET_EINVAL;
9274             }
9275 
9276             ret = get_errno(sigpending(&set));
9277             if (!is_error(ret)) {
9278                 if (!(p = lock_user(VERIFY_WRITE, arg1, sizeof(target_sigset_t), 0)))
9279                     return -TARGET_EFAULT;
9280                 host_to_target_sigset(p, &set);
9281                 unlock_user(p, arg1, sizeof(target_sigset_t));
9282             }
9283         }
9284         return ret;
9285 #ifdef TARGET_NR_sigsuspend
9286     case TARGET_NR_sigsuspend:
9287         {
9288             TaskState *ts = cpu->opaque;
9289 #if defined(TARGET_ALPHA)
9290             abi_ulong mask = arg1;
9291             target_to_host_old_sigset(&ts->sigsuspend_mask, &mask);
9292 #else
9293             if (!(p = lock_user(VERIFY_READ, arg1, sizeof(target_sigset_t), 1)))
9294                 return -TARGET_EFAULT;
9295             target_to_host_old_sigset(&ts->sigsuspend_mask, p);
9296             unlock_user(p, arg1, 0);
9297 #endif
9298             ret = get_errno(safe_rt_sigsuspend(&ts->sigsuspend_mask,
9299                                                SIGSET_T_SIZE));
9300             if (ret != -TARGET_ERESTARTSYS) {
9301                 ts->in_sigsuspend = 1;
9302             }
9303         }
9304         return ret;
9305 #endif
9306     case TARGET_NR_rt_sigsuspend:
9307         {
9308             TaskState *ts = cpu->opaque;
9309 
9310             if (arg2 != sizeof(target_sigset_t)) {
9311                 return -TARGET_EINVAL;
9312             }
9313             if (!(p = lock_user(VERIFY_READ, arg1, sizeof(target_sigset_t), 1)))
9314                 return -TARGET_EFAULT;
9315             target_to_host_sigset(&ts->sigsuspend_mask, p);
9316             unlock_user(p, arg1, 0);
9317             ret = get_errno(safe_rt_sigsuspend(&ts->sigsuspend_mask,
9318                                                SIGSET_T_SIZE));
9319             if (ret != -TARGET_ERESTARTSYS) {
9320                 ts->in_sigsuspend = 1;
9321             }
9322         }
9323         return ret;
9324 #ifdef TARGET_NR_rt_sigtimedwait
9325     case TARGET_NR_rt_sigtimedwait:
9326         {
9327             sigset_t set;
9328             struct timespec uts, *puts;
9329             siginfo_t uinfo;
9330 
9331             if (arg4 != sizeof(target_sigset_t)) {
9332                 return -TARGET_EINVAL;
9333             }
9334 
9335             if (!(p = lock_user(VERIFY_READ, arg1, sizeof(target_sigset_t), 1)))
9336                 return -TARGET_EFAULT;
9337             target_to_host_sigset(&set, p);
9338             unlock_user(p, arg1, 0);
9339             if (arg3) {
9340                 puts = &uts;
9341                 if (target_to_host_timespec(puts, arg3)) {
9342                     return -TARGET_EFAULT;
9343                 }
9344             } else {
9345                 puts = NULL;
9346             }
9347             ret = get_errno(safe_rt_sigtimedwait(&set, &uinfo, puts,
9348                                                  SIGSET_T_SIZE));
9349             if (!is_error(ret)) {
9350                 if (arg2) {
9351                     p = lock_user(VERIFY_WRITE, arg2, sizeof(target_siginfo_t),
9352                                   0);
9353                     if (!p) {
9354                         return -TARGET_EFAULT;
9355                     }
9356                     host_to_target_siginfo(p, &uinfo);
9357                     unlock_user(p, arg2, sizeof(target_siginfo_t));
9358                 }
9359                 ret = host_to_target_signal(ret);
9360             }
9361         }
9362         return ret;
9363 #endif
9364 #ifdef TARGET_NR_rt_sigtimedwait_time64
9365     case TARGET_NR_rt_sigtimedwait_time64:
9366         {
9367             sigset_t set;
9368             struct timespec uts, *puts;
9369             siginfo_t uinfo;
9370 
9371             if (arg4 != sizeof(target_sigset_t)) {
9372                 return -TARGET_EINVAL;
9373             }
9374 
9375             p = lock_user(VERIFY_READ, arg1, sizeof(target_sigset_t), 1);
9376             if (!p) {
9377                 return -TARGET_EFAULT;
9378             }
9379             target_to_host_sigset(&set, p);
9380             unlock_user(p, arg1, 0);
9381             if (arg3) {
9382                 puts = &uts;
9383                 if (target_to_host_timespec64(puts, arg3)) {
9384                     return -TARGET_EFAULT;
9385                 }
9386             } else {
9387                 puts = NULL;
9388             }
9389             ret = get_errno(safe_rt_sigtimedwait(&set, &uinfo, puts,
9390                                                  SIGSET_T_SIZE));
9391             if (!is_error(ret)) {
9392                 if (arg2) {
9393                     p = lock_user(VERIFY_WRITE, arg2,
9394                                   sizeof(target_siginfo_t), 0);
9395                     if (!p) {
9396                         return -TARGET_EFAULT;
9397                     }
9398                     host_to_target_siginfo(p, &uinfo);
9399                     unlock_user(p, arg2, sizeof(target_siginfo_t));
9400                 }
9401                 ret = host_to_target_signal(ret);
9402             }
9403         }
9404         return ret;
9405 #endif
9406     case TARGET_NR_rt_sigqueueinfo:
9407         {
9408             siginfo_t uinfo;
9409 
9410             p = lock_user(VERIFY_READ, arg3, sizeof(target_siginfo_t), 1);
9411             if (!p) {
9412                 return -TARGET_EFAULT;
9413             }
9414             target_to_host_siginfo(&uinfo, p);
9415             unlock_user(p, arg3, 0);
9416             ret = get_errno(sys_rt_sigqueueinfo(arg1, arg2, &uinfo));
9417         }
9418         return ret;
9419     case TARGET_NR_rt_tgsigqueueinfo:
9420         {
9421             siginfo_t uinfo;
9422 
9423             p = lock_user(VERIFY_READ, arg4, sizeof(target_siginfo_t), 1);
9424             if (!p) {
9425                 return -TARGET_EFAULT;
9426             }
9427             target_to_host_siginfo(&uinfo, p);
9428             unlock_user(p, arg4, 0);
9429             ret = get_errno(sys_rt_tgsigqueueinfo(arg1, arg2, arg3, &uinfo));
9430         }
9431         return ret;
9432 #ifdef TARGET_NR_sigreturn
9433     case TARGET_NR_sigreturn:
9434         if (block_signals()) {
9435             return -TARGET_ERESTARTSYS;
9436         }
9437         return do_sigreturn(cpu_env);
9438 #endif
9439     case TARGET_NR_rt_sigreturn:
9440         if (block_signals()) {
9441             return -TARGET_ERESTARTSYS;
9442         }
9443         return do_rt_sigreturn(cpu_env);
9444     case TARGET_NR_sethostname:
9445         if (!(p = lock_user_string(arg1)))
9446             return -TARGET_EFAULT;
9447         ret = get_errno(sethostname(p, arg2));
9448         unlock_user(p, arg1, 0);
9449         return ret;
9450 #ifdef TARGET_NR_setrlimit
9451     case TARGET_NR_setrlimit:
9452         {
9453             int resource = target_to_host_resource(arg1);
9454             struct target_rlimit *target_rlim;
9455             struct rlimit rlim;
9456             if (!lock_user_struct(VERIFY_READ, target_rlim, arg2, 1))
9457                 return -TARGET_EFAULT;
9458             rlim.rlim_cur = target_to_host_rlim(target_rlim->rlim_cur);
9459             rlim.rlim_max = target_to_host_rlim(target_rlim->rlim_max);
9460             unlock_user_struct(target_rlim, arg2, 0);
9461             /*
9462              * If we just passed through resource limit settings for memory then
9463              * they would also apply to QEMU's own allocations, and QEMU will
9464              * crash or hang or die if its allocations fail. Ideally we would
9465              * track the guest allocations in QEMU and apply the limits ourselves.
9466              * For now, just tell the guest the call succeeded but don't actually
9467              * limit anything.
9468              */
9469             if (resource != RLIMIT_AS &&
9470                 resource != RLIMIT_DATA &&
9471                 resource != RLIMIT_STACK) {
9472                 return get_errno(setrlimit(resource, &rlim));
9473             } else {
9474                 return 0;
9475             }
9476         }
9477 #endif
9478 #ifdef TARGET_NR_getrlimit
9479     case TARGET_NR_getrlimit:
9480         {
9481             int resource = target_to_host_resource(arg1);
9482             struct target_rlimit *target_rlim;
9483             struct rlimit rlim;
9484 
9485             ret = get_errno(getrlimit(resource, &rlim));
9486             if (!is_error(ret)) {
9487                 if (!lock_user_struct(VERIFY_WRITE, target_rlim, arg2, 0))
9488                     return -TARGET_EFAULT;
9489                 target_rlim->rlim_cur = host_to_target_rlim(rlim.rlim_cur);
9490                 target_rlim->rlim_max = host_to_target_rlim(rlim.rlim_max);
9491                 unlock_user_struct(target_rlim, arg2, 1);
9492             }
9493         }
9494         return ret;
9495 #endif
9496     case TARGET_NR_getrusage:
9497         {
9498             struct rusage rusage;
9499             ret = get_errno(getrusage(arg1, &rusage));
9500             if (!is_error(ret)) {
9501                 ret = host_to_target_rusage(arg2, &rusage);
9502             }
9503         }
9504         return ret;
9505 #if defined(TARGET_NR_gettimeofday)
9506     case TARGET_NR_gettimeofday:
9507         {
9508             struct timeval tv;
9509             struct timezone tz;
9510 
9511             ret = get_errno(gettimeofday(&tv, &tz));
9512             if (!is_error(ret)) {
9513                 if (arg1 && copy_to_user_timeval(arg1, &tv)) {
9514                     return -TARGET_EFAULT;
9515                 }
9516                 if (arg2 && copy_to_user_timezone(arg2, &tz)) {
9517                     return -TARGET_EFAULT;
9518                 }
9519             }
9520         }
9521         return ret;
9522 #endif
9523 #if defined(TARGET_NR_settimeofday)
9524     case TARGET_NR_settimeofday:
9525         {
9526             struct timeval tv, *ptv = NULL;
9527             struct timezone tz, *ptz = NULL;
9528 
9529             if (arg1) {
9530                 if (copy_from_user_timeval(&tv, arg1)) {
9531                     return -TARGET_EFAULT;
9532                 }
9533                 ptv = &tv;
9534             }
9535 
9536             if (arg2) {
9537                 if (copy_from_user_timezone(&tz, arg2)) {
9538                     return -TARGET_EFAULT;
9539                 }
9540                 ptz = &tz;
9541             }
9542 
9543             return get_errno(settimeofday(ptv, ptz));
9544         }
9545 #endif
9546 #if defined(TARGET_NR_select)
9547     case TARGET_NR_select:
9548 #if defined(TARGET_WANT_NI_OLD_SELECT)
9549         /* some architectures used to have old_select here
9550          * but now ENOSYS it.
9551          */
9552         ret = -TARGET_ENOSYS;
9553 #elif defined(TARGET_WANT_OLD_SYS_SELECT)
9554         ret = do_old_select(arg1);
9555 #else
9556         ret = do_select(arg1, arg2, arg3, arg4, arg5);
9557 #endif
9558         return ret;
9559 #endif
9560 #ifdef TARGET_NR_pselect6
9561     case TARGET_NR_pselect6:
9562         return do_pselect6(arg1, arg2, arg3, arg4, arg5, arg6, false);
9563 #endif
9564 #ifdef TARGET_NR_pselect6_time64
9565     case TARGET_NR_pselect6_time64:
9566         return do_pselect6(arg1, arg2, arg3, arg4, arg5, arg6, true);
9567 #endif
9568 #ifdef TARGET_NR_symlink
9569     case TARGET_NR_symlink:
9570         {
9571             void *p2;
9572             p = lock_user_string(arg1);
9573             p2 = lock_user_string(arg2);
9574             if (!p || !p2)
9575                 ret = -TARGET_EFAULT;
9576             else
9577                 ret = get_errno(symlink(p, p2));
9578             unlock_user(p2, arg2, 0);
9579             unlock_user(p, arg1, 0);
9580         }
9581         return ret;
9582 #endif
9583 #if defined(TARGET_NR_symlinkat)
9584     case TARGET_NR_symlinkat:
9585         {
9586             void *p2;
9587             p  = lock_user_string(arg1);
9588             p2 = lock_user_string(arg3);
9589             if (!p || !p2)
9590                 ret = -TARGET_EFAULT;
9591             else
9592                 ret = get_errno(symlinkat(p, arg2, p2));
9593             unlock_user(p2, arg3, 0);
9594             unlock_user(p, arg1, 0);
9595         }
9596         return ret;
9597 #endif
9598 #ifdef TARGET_NR_readlink
9599     case TARGET_NR_readlink:
9600         {
9601             void *p2;
9602             p = lock_user_string(arg1);
9603             p2 = lock_user(VERIFY_WRITE, arg2, arg3, 0);
9604             if (!p || !p2) {
9605                 ret = -TARGET_EFAULT;
9606             } else if (!arg3) {
9607                 /* Short circuit this for the magic exe check. */
9608                 ret = -TARGET_EINVAL;
9609             } else if (is_proc_myself((const char *)p, "exe")) {
9610                 char real[PATH_MAX], *temp;
9611                 temp = realpath(exec_path, real);
9612                 /* Return value is # of bytes that we wrote to the buffer. */
9613                 if (temp == NULL) {
9614                     ret = get_errno(-1);
9615                 } else {
9616                     /* Don't worry about sign mismatch as earlier mapping
9617                      * logic would have thrown a bad address error. */
9618                     ret = MIN(strlen(real), arg3);
9619                     /* We cannot NUL terminate the string. */
9620                     memcpy(p2, real, ret);
9621                 }
9622             } else {
9623                 ret = get_errno(readlink(path(p), p2, arg3));
9624             }
9625             unlock_user(p2, arg2, ret);
9626             unlock_user(p, arg1, 0);
9627         }
9628         return ret;
9629 #endif
9630 #if defined(TARGET_NR_readlinkat)
9631     case TARGET_NR_readlinkat:
9632         {
9633             void *p2;
9634             p  = lock_user_string(arg2);
9635             p2 = lock_user(VERIFY_WRITE, arg3, arg4, 0);
9636             if (!p || !p2) {
9637                 ret = -TARGET_EFAULT;
9638             } else if (is_proc_myself((const char *)p, "exe")) {
9639                 char real[PATH_MAX], *temp;
9640                 temp = realpath(exec_path, real);
9641                 ret = temp == NULL ? get_errno(-1) : strlen(real) ;
9642                 snprintf((char *)p2, arg4, "%s", real);
9643             } else {
9644                 ret = get_errno(readlinkat(arg1, path(p), p2, arg4));
9645             }
9646             unlock_user(p2, arg3, ret);
9647             unlock_user(p, arg2, 0);
9648         }
9649         return ret;
9650 #endif
9651 #ifdef TARGET_NR_swapon
9652     case TARGET_NR_swapon:
9653         if (!(p = lock_user_string(arg1)))
9654             return -TARGET_EFAULT;
9655         ret = get_errno(swapon(p, arg2));
9656         unlock_user(p, arg1, 0);
9657         return ret;
9658 #endif
9659     case TARGET_NR_reboot:
9660         if (arg3 == LINUX_REBOOT_CMD_RESTART2) {
9661            /* arg4 must be ignored in all other cases */
9662            p = lock_user_string(arg4);
9663            if (!p) {
9664                return -TARGET_EFAULT;
9665            }
9666            ret = get_errno(reboot(arg1, arg2, arg3, p));
9667            unlock_user(p, arg4, 0);
9668         } else {
9669            ret = get_errno(reboot(arg1, arg2, arg3, NULL));
9670         }
9671         return ret;
9672 #ifdef TARGET_NR_mmap
9673     case TARGET_NR_mmap:
9674 #if (defined(TARGET_I386) && defined(TARGET_ABI32)) || \
9675     (defined(TARGET_ARM) && defined(TARGET_ABI32)) || \
9676     defined(TARGET_M68K) || defined(TARGET_CRIS) || defined(TARGET_MICROBLAZE) \
9677     || defined(TARGET_S390X)
9678         {
9679             abi_ulong *v;
9680             abi_ulong v1, v2, v3, v4, v5, v6;
9681             if (!(v = lock_user(VERIFY_READ, arg1, 6 * sizeof(abi_ulong), 1)))
9682                 return -TARGET_EFAULT;
9683             v1 = tswapal(v[0]);
9684             v2 = tswapal(v[1]);
9685             v3 = tswapal(v[2]);
9686             v4 = tswapal(v[3]);
9687             v5 = tswapal(v[4]);
9688             v6 = tswapal(v[5]);
9689             unlock_user(v, arg1, 0);
9690             ret = get_errno(target_mmap(v1, v2, v3,
9691                                         target_to_host_bitmask(v4, mmap_flags_tbl),
9692                                         v5, v6));
9693         }
9694 #else
9695         ret = get_errno(target_mmap(arg1, arg2, arg3,
9696                                     target_to_host_bitmask(arg4, mmap_flags_tbl),
9697                                     arg5,
9698                                     arg6));
9699 #endif
9700         return ret;
9701 #endif
9702 #ifdef TARGET_NR_mmap2
9703     case TARGET_NR_mmap2:
9704 #ifndef MMAP_SHIFT
9705 #define MMAP_SHIFT 12
9706 #endif
9707         ret = target_mmap(arg1, arg2, arg3,
9708                           target_to_host_bitmask(arg4, mmap_flags_tbl),
9709                           arg5, arg6 << MMAP_SHIFT);
9710         return get_errno(ret);
9711 #endif
9712     case TARGET_NR_munmap:
9713         return get_errno(target_munmap(arg1, arg2));
9714     case TARGET_NR_mprotect:
9715         {
9716             TaskState *ts = cpu->opaque;
9717             /* Special hack to detect libc making the stack executable.  */
9718             if ((arg3 & PROT_GROWSDOWN)
9719                 && arg1 >= ts->info->stack_limit
9720                 && arg1 <= ts->info->start_stack) {
9721                 arg3 &= ~PROT_GROWSDOWN;
9722                 arg2 = arg2 + arg1 - ts->info->stack_limit;
9723                 arg1 = ts->info->stack_limit;
9724             }
9725         }
9726         return get_errno(target_mprotect(arg1, arg2, arg3));
9727 #ifdef TARGET_NR_mremap
9728     case TARGET_NR_mremap:
9729         return get_errno(target_mremap(arg1, arg2, arg3, arg4, arg5));
9730 #endif
9731         /* ??? msync/mlock/munlock are broken for softmmu.  */
9732 #ifdef TARGET_NR_msync
9733     case TARGET_NR_msync:
9734         return get_errno(msync(g2h(arg1), arg2, arg3));
9735 #endif
9736 #ifdef TARGET_NR_mlock
9737     case TARGET_NR_mlock:
9738         return get_errno(mlock(g2h(arg1), arg2));
9739 #endif
9740 #ifdef TARGET_NR_munlock
9741     case TARGET_NR_munlock:
9742         return get_errno(munlock(g2h(arg1), arg2));
9743 #endif
9744 #ifdef TARGET_NR_mlockall
9745     case TARGET_NR_mlockall:
9746         return get_errno(mlockall(target_to_host_mlockall_arg(arg1)));
9747 #endif
9748 #ifdef TARGET_NR_munlockall
9749     case TARGET_NR_munlockall:
9750         return get_errno(munlockall());
9751 #endif
9752 #ifdef TARGET_NR_truncate
9753     case TARGET_NR_truncate:
9754         if (!(p = lock_user_string(arg1)))
9755             return -TARGET_EFAULT;
9756         ret = get_errno(truncate(p, arg2));
9757         unlock_user(p, arg1, 0);
9758         return ret;
9759 #endif
9760 #ifdef TARGET_NR_ftruncate
9761     case TARGET_NR_ftruncate:
9762         return get_errno(ftruncate(arg1, arg2));
9763 #endif
9764     case TARGET_NR_fchmod:
9765         return get_errno(fchmod(arg1, arg2));
9766 #if defined(TARGET_NR_fchmodat)
9767     case TARGET_NR_fchmodat:
9768         if (!(p = lock_user_string(arg2)))
9769             return -TARGET_EFAULT;
9770         ret = get_errno(fchmodat(arg1, p, arg3, 0));
9771         unlock_user(p, arg2, 0);
9772         return ret;
9773 #endif
9774     case TARGET_NR_getpriority:
9775         /* Note that negative values are valid for getpriority, so we must
9776            differentiate based on errno settings.  */
9777         errno = 0;
9778         ret = getpriority(arg1, arg2);
9779         if (ret == -1 && errno != 0) {
9780             return -host_to_target_errno(errno);
9781         }
9782 #ifdef TARGET_ALPHA
9783         /* Return value is the unbiased priority.  Signal no error.  */
9784         ((CPUAlphaState *)cpu_env)->ir[IR_V0] = 0;
9785 #else
9786         /* Return value is a biased priority to avoid negative numbers.  */
9787         ret = 20 - ret;
9788 #endif
9789         return ret;
9790     case TARGET_NR_setpriority:
9791         return get_errno(setpriority(arg1, arg2, arg3));
9792 #ifdef TARGET_NR_statfs
9793     case TARGET_NR_statfs:
9794         if (!(p = lock_user_string(arg1))) {
9795             return -TARGET_EFAULT;
9796         }
9797         ret = get_errno(statfs(path(p), &stfs));
9798         unlock_user(p, arg1, 0);
9799     convert_statfs:
9800         if (!is_error(ret)) {
9801             struct target_statfs *target_stfs;
9802 
9803             if (!lock_user_struct(VERIFY_WRITE, target_stfs, arg2, 0))
9804                 return -TARGET_EFAULT;
9805             __put_user(stfs.f_type, &target_stfs->f_type);
9806             __put_user(stfs.f_bsize, &target_stfs->f_bsize);
9807             __put_user(stfs.f_blocks, &target_stfs->f_blocks);
9808             __put_user(stfs.f_bfree, &target_stfs->f_bfree);
9809             __put_user(stfs.f_bavail, &target_stfs->f_bavail);
9810             __put_user(stfs.f_files, &target_stfs->f_files);
9811             __put_user(stfs.f_ffree, &target_stfs->f_ffree);
9812             __put_user(stfs.f_fsid.__val[0], &target_stfs->f_fsid.val[0]);
9813             __put_user(stfs.f_fsid.__val[1], &target_stfs->f_fsid.val[1]);
9814             __put_user(stfs.f_namelen, &target_stfs->f_namelen);
9815             __put_user(stfs.f_frsize, &target_stfs->f_frsize);
9816 #ifdef _STATFS_F_FLAGS
9817             __put_user(stfs.f_flags, &target_stfs->f_flags);
9818 #else
9819             __put_user(0, &target_stfs->f_flags);
9820 #endif
9821             memset(target_stfs->f_spare, 0, sizeof(target_stfs->f_spare));
9822             unlock_user_struct(target_stfs, arg2, 1);
9823         }
9824         return ret;
9825 #endif
9826 #ifdef TARGET_NR_fstatfs
9827     case TARGET_NR_fstatfs:
9828         ret = get_errno(fstatfs(arg1, &stfs));
9829         goto convert_statfs;
9830 #endif
9831 #ifdef TARGET_NR_statfs64
9832     case TARGET_NR_statfs64:
9833         if (!(p = lock_user_string(arg1))) {
9834             return -TARGET_EFAULT;
9835         }
9836         ret = get_errno(statfs(path(p), &stfs));
9837         unlock_user(p, arg1, 0);
9838     convert_statfs64:
9839         if (!is_error(ret)) {
9840             struct target_statfs64 *target_stfs;
9841 
9842             if (!lock_user_struct(VERIFY_WRITE, target_stfs, arg3, 0))
9843                 return -TARGET_EFAULT;
9844             __put_user(stfs.f_type, &target_stfs->f_type);
9845             __put_user(stfs.f_bsize, &target_stfs->f_bsize);
9846             __put_user(stfs.f_blocks, &target_stfs->f_blocks);
9847             __put_user(stfs.f_bfree, &target_stfs->f_bfree);
9848             __put_user(stfs.f_bavail, &target_stfs->f_bavail);
9849             __put_user(stfs.f_files, &target_stfs->f_files);
9850             __put_user(stfs.f_ffree, &target_stfs->f_ffree);
9851             __put_user(stfs.f_fsid.__val[0], &target_stfs->f_fsid.val[0]);
9852             __put_user(stfs.f_fsid.__val[1], &target_stfs->f_fsid.val[1]);
9853             __put_user(stfs.f_namelen, &target_stfs->f_namelen);
9854             __put_user(stfs.f_frsize, &target_stfs->f_frsize);
9855 #ifdef _STATFS_F_FLAGS
9856             __put_user(stfs.f_flags, &target_stfs->f_flags);
9857 #else
9858             __put_user(0, &target_stfs->f_flags);
9859 #endif
9860             memset(target_stfs->f_spare, 0, sizeof(target_stfs->f_spare));
9861             unlock_user_struct(target_stfs, arg3, 1);
9862         }
9863         return ret;
9864     case TARGET_NR_fstatfs64:
9865         ret = get_errno(fstatfs(arg1, &stfs));
9866         goto convert_statfs64;
9867 #endif
9868 #ifdef TARGET_NR_socketcall
9869     case TARGET_NR_socketcall:
9870         return do_socketcall(arg1, arg2);
9871 #endif
9872 #ifdef TARGET_NR_accept
9873     case TARGET_NR_accept:
9874         return do_accept4(arg1, arg2, arg3, 0);
9875 #endif
9876 #ifdef TARGET_NR_accept4
9877     case TARGET_NR_accept4:
9878         return do_accept4(arg1, arg2, arg3, arg4);
9879 #endif
9880 #ifdef TARGET_NR_bind
9881     case TARGET_NR_bind:
9882         return do_bind(arg1, arg2, arg3);
9883 #endif
9884 #ifdef TARGET_NR_connect
9885     case TARGET_NR_connect:
9886         return do_connect(arg1, arg2, arg3);
9887 #endif
9888 #ifdef TARGET_NR_getpeername
9889     case TARGET_NR_getpeername:
9890         return do_getpeername(arg1, arg2, arg3);
9891 #endif
9892 #ifdef TARGET_NR_getsockname
9893     case TARGET_NR_getsockname:
9894         return do_getsockname(arg1, arg2, arg3);
9895 #endif
9896 #ifdef TARGET_NR_getsockopt
9897     case TARGET_NR_getsockopt:
9898         return do_getsockopt(arg1, arg2, arg3, arg4, arg5);
9899 #endif
9900 #ifdef TARGET_NR_listen
9901     case TARGET_NR_listen:
9902         return get_errno(listen(arg1, arg2));
9903 #endif
9904 #ifdef TARGET_NR_recv
9905     case TARGET_NR_recv:
9906         return do_recvfrom(arg1, arg2, arg3, arg4, 0, 0);
9907 #endif
9908 #ifdef TARGET_NR_recvfrom
9909     case TARGET_NR_recvfrom:
9910         return do_recvfrom(arg1, arg2, arg3, arg4, arg5, arg6);
9911 #endif
9912 #ifdef TARGET_NR_recvmsg
9913     case TARGET_NR_recvmsg:
9914         return do_sendrecvmsg(arg1, arg2, arg3, 0);
9915 #endif
9916 #ifdef TARGET_NR_send
9917     case TARGET_NR_send:
9918         return do_sendto(arg1, arg2, arg3, arg4, 0, 0);
9919 #endif
9920 #ifdef TARGET_NR_sendmsg
9921     case TARGET_NR_sendmsg:
9922         return do_sendrecvmsg(arg1, arg2, arg3, 1);
9923 #endif
9924 #ifdef TARGET_NR_sendmmsg
9925     case TARGET_NR_sendmmsg:
9926         return do_sendrecvmmsg(arg1, arg2, arg3, arg4, 1);
9927 #endif
9928 #ifdef TARGET_NR_recvmmsg
9929     case TARGET_NR_recvmmsg:
9930         return do_sendrecvmmsg(arg1, arg2, arg3, arg4, 0);
9931 #endif
9932 #ifdef TARGET_NR_sendto
9933     case TARGET_NR_sendto:
9934         return do_sendto(arg1, arg2, arg3, arg4, arg5, arg6);
9935 #endif
9936 #ifdef TARGET_NR_shutdown
9937     case TARGET_NR_shutdown:
9938         return get_errno(shutdown(arg1, arg2));
9939 #endif
9940 #if defined(TARGET_NR_getrandom) && defined(__NR_getrandom)
9941     case TARGET_NR_getrandom:
9942         p = lock_user(VERIFY_WRITE, arg1, arg2, 0);
9943         if (!p) {
9944             return -TARGET_EFAULT;
9945         }
9946         ret = get_errno(getrandom(p, arg2, arg3));
9947         unlock_user(p, arg1, ret);
9948         return ret;
9949 #endif
9950 #ifdef TARGET_NR_socket
9951     case TARGET_NR_socket:
9952         return do_socket(arg1, arg2, arg3);
9953 #endif
9954 #ifdef TARGET_NR_socketpair
9955     case TARGET_NR_socketpair:
9956         return do_socketpair(arg1, arg2, arg3, arg4);
9957 #endif
9958 #ifdef TARGET_NR_setsockopt
9959     case TARGET_NR_setsockopt:
9960         return do_setsockopt(arg1, arg2, arg3, arg4, (socklen_t) arg5);
9961 #endif
9962 #if defined(TARGET_NR_syslog)
9963     case TARGET_NR_syslog:
9964         {
9965             int len = arg2;
9966 
9967             switch (arg1) {
9968             case TARGET_SYSLOG_ACTION_CLOSE:         /* Close log */
9969             case TARGET_SYSLOG_ACTION_OPEN:          /* Open log */
9970             case TARGET_SYSLOG_ACTION_CLEAR:         /* Clear ring buffer */
9971             case TARGET_SYSLOG_ACTION_CONSOLE_OFF:   /* Disable logging */
9972             case TARGET_SYSLOG_ACTION_CONSOLE_ON:    /* Enable logging */
9973             case TARGET_SYSLOG_ACTION_CONSOLE_LEVEL: /* Set messages level */
9974             case TARGET_SYSLOG_ACTION_SIZE_UNREAD:   /* Number of chars */
9975             case TARGET_SYSLOG_ACTION_SIZE_BUFFER:   /* Size of the buffer */
9976                 return get_errno(sys_syslog((int)arg1, NULL, (int)arg3));
9977             case TARGET_SYSLOG_ACTION_READ:          /* Read from log */
9978             case TARGET_SYSLOG_ACTION_READ_CLEAR:    /* Read/clear msgs */
9979             case TARGET_SYSLOG_ACTION_READ_ALL:      /* Read last messages */
9980                 {
9981                     if (len < 0) {
9982                         return -TARGET_EINVAL;
9983                     }
9984                     if (len == 0) {
9985                         return 0;
9986                     }
9987                     p = lock_user(VERIFY_WRITE, arg2, arg3, 0);
9988                     if (!p) {
9989                         return -TARGET_EFAULT;
9990                     }
9991                     ret = get_errno(sys_syslog((int)arg1, p, (int)arg3));
9992                     unlock_user(p, arg2, arg3);
9993                 }
9994                 return ret;
9995             default:
9996                 return -TARGET_EINVAL;
9997             }
9998         }
9999         break;
10000 #endif
10001     case TARGET_NR_setitimer:
10002         {
10003             struct itimerval value, ovalue, *pvalue;
10004 
10005             if (arg2) {
10006                 pvalue = &value;
10007                 if (copy_from_user_timeval(&pvalue->it_interval, arg2)
10008                     || copy_from_user_timeval(&pvalue->it_value,
10009                                               arg2 + sizeof(struct target_timeval)))
10010                     return -TARGET_EFAULT;
10011             } else {
10012                 pvalue = NULL;
10013             }
10014             ret = get_errno(setitimer(arg1, pvalue, &ovalue));
10015             if (!is_error(ret) && arg3) {
10016                 if (copy_to_user_timeval(arg3,
10017                                          &ovalue.it_interval)
10018                     || copy_to_user_timeval(arg3 + sizeof(struct target_timeval),
10019                                             &ovalue.it_value))
10020                     return -TARGET_EFAULT;
10021             }
10022         }
10023         return ret;
10024     case TARGET_NR_getitimer:
10025         {
10026             struct itimerval value;
10027 
10028             ret = get_errno(getitimer(arg1, &value));
10029             if (!is_error(ret) && arg2) {
10030                 if (copy_to_user_timeval(arg2,
10031                                          &value.it_interval)
10032                     || copy_to_user_timeval(arg2 + sizeof(struct target_timeval),
10033                                             &value.it_value))
10034                     return -TARGET_EFAULT;
10035             }
10036         }
10037         return ret;
10038 #ifdef TARGET_NR_stat
10039     case TARGET_NR_stat:
10040         if (!(p = lock_user_string(arg1))) {
10041             return -TARGET_EFAULT;
10042         }
10043         ret = get_errno(stat(path(p), &st));
10044         unlock_user(p, arg1, 0);
10045         goto do_stat;
10046 #endif
10047 #ifdef TARGET_NR_lstat
10048     case TARGET_NR_lstat:
10049         if (!(p = lock_user_string(arg1))) {
10050             return -TARGET_EFAULT;
10051         }
10052         ret = get_errno(lstat(path(p), &st));
10053         unlock_user(p, arg1, 0);
10054         goto do_stat;
10055 #endif
10056 #ifdef TARGET_NR_fstat
10057     case TARGET_NR_fstat:
10058         {
10059             ret = get_errno(fstat(arg1, &st));
10060 #if defined(TARGET_NR_stat) || defined(TARGET_NR_lstat)
10061         do_stat:
10062 #endif
10063             if (!is_error(ret)) {
10064                 struct target_stat *target_st;
10065 
10066                 if (!lock_user_struct(VERIFY_WRITE, target_st, arg2, 0))
10067                     return -TARGET_EFAULT;
10068                 memset(target_st, 0, sizeof(*target_st));
10069                 __put_user(st.st_dev, &target_st->st_dev);
10070                 __put_user(st.st_ino, &target_st->st_ino);
10071                 __put_user(st.st_mode, &target_st->st_mode);
10072                 __put_user(st.st_uid, &target_st->st_uid);
10073                 __put_user(st.st_gid, &target_st->st_gid);
10074                 __put_user(st.st_nlink, &target_st->st_nlink);
10075                 __put_user(st.st_rdev, &target_st->st_rdev);
10076                 __put_user(st.st_size, &target_st->st_size);
10077                 __put_user(st.st_blksize, &target_st->st_blksize);
10078                 __put_user(st.st_blocks, &target_st->st_blocks);
10079                 __put_user(st.st_atime, &target_st->target_st_atime);
10080                 __put_user(st.st_mtime, &target_st->target_st_mtime);
10081                 __put_user(st.st_ctime, &target_st->target_st_ctime);
10082 #if (_POSIX_C_SOURCE >= 200809L || _XOPEN_SOURCE >= 700) && \
10083     defined(TARGET_STAT_HAVE_NSEC)
10084                 __put_user(st.st_atim.tv_nsec,
10085                            &target_st->target_st_atime_nsec);
10086                 __put_user(st.st_mtim.tv_nsec,
10087                            &target_st->target_st_mtime_nsec);
10088                 __put_user(st.st_ctim.tv_nsec,
10089                            &target_st->target_st_ctime_nsec);
10090 #endif
10091                 unlock_user_struct(target_st, arg2, 1);
10092             }
10093         }
10094         return ret;
10095 #endif
10096     case TARGET_NR_vhangup:
10097         return get_errno(vhangup());
10098 #ifdef TARGET_NR_syscall
10099     case TARGET_NR_syscall:
10100         return do_syscall(cpu_env, arg1 & 0xffff, arg2, arg3, arg4, arg5,
10101                           arg6, arg7, arg8, 0);
10102 #endif
10103 #if defined(TARGET_NR_wait4)
10104     case TARGET_NR_wait4:
10105         {
10106             int status;
10107             abi_long status_ptr = arg2;
10108             struct rusage rusage, *rusage_ptr;
10109             abi_ulong target_rusage = arg4;
10110             abi_long rusage_err;
10111             if (target_rusage)
10112                 rusage_ptr = &rusage;
10113             else
10114                 rusage_ptr = NULL;
10115             ret = get_errno(safe_wait4(arg1, &status, arg3, rusage_ptr));
10116             if (!is_error(ret)) {
10117                 if (status_ptr && ret) {
10118                     status = host_to_target_waitstatus(status);
10119                     if (put_user_s32(status, status_ptr))
10120                         return -TARGET_EFAULT;
10121                 }
10122                 if (target_rusage) {
10123                     rusage_err = host_to_target_rusage(target_rusage, &rusage);
10124                     if (rusage_err) {
10125                         ret = rusage_err;
10126                     }
10127                 }
10128             }
10129         }
10130         return ret;
10131 #endif
10132 #ifdef TARGET_NR_swapoff
10133     case TARGET_NR_swapoff:
10134         if (!(p = lock_user_string(arg1)))
10135             return -TARGET_EFAULT;
10136         ret = get_errno(swapoff(p));
10137         unlock_user(p, arg1, 0);
10138         return ret;
10139 #endif
10140     case TARGET_NR_sysinfo:
10141         {
10142             struct target_sysinfo *target_value;
10143             struct sysinfo value;
10144             ret = get_errno(sysinfo(&value));
10145             if (!is_error(ret) && arg1)
10146             {
10147                 if (!lock_user_struct(VERIFY_WRITE, target_value, arg1, 0))
10148                     return -TARGET_EFAULT;
10149                 __put_user(value.uptime, &target_value->uptime);
10150                 __put_user(value.loads[0], &target_value->loads[0]);
10151                 __put_user(value.loads[1], &target_value->loads[1]);
10152                 __put_user(value.loads[2], &target_value->loads[2]);
10153                 __put_user(value.totalram, &target_value->totalram);
10154                 __put_user(value.freeram, &target_value->freeram);
10155                 __put_user(value.sharedram, &target_value->sharedram);
10156                 __put_user(value.bufferram, &target_value->bufferram);
10157                 __put_user(value.totalswap, &target_value->totalswap);
10158                 __put_user(value.freeswap, &target_value->freeswap);
10159                 __put_user(value.procs, &target_value->procs);
10160                 __put_user(value.totalhigh, &target_value->totalhigh);
10161                 __put_user(value.freehigh, &target_value->freehigh);
10162                 __put_user(value.mem_unit, &target_value->mem_unit);
10163                 unlock_user_struct(target_value, arg1, 1);
10164             }
10165         }
10166         return ret;
10167 #ifdef TARGET_NR_ipc
10168     case TARGET_NR_ipc:
10169         return do_ipc(cpu_env, arg1, arg2, arg3, arg4, arg5, arg6);
10170 #endif
10171 #ifdef TARGET_NR_semget
10172     case TARGET_NR_semget:
10173         return get_errno(semget(arg1, arg2, arg3));
10174 #endif
10175 #ifdef TARGET_NR_semop
10176     case TARGET_NR_semop:
10177         return do_semtimedop(arg1, arg2, arg3, 0, false);
10178 #endif
10179 #ifdef TARGET_NR_semtimedop
10180     case TARGET_NR_semtimedop:
10181         return do_semtimedop(arg1, arg2, arg3, arg4, false);
10182 #endif
10183 #ifdef TARGET_NR_semtimedop_time64
10184     case TARGET_NR_semtimedop_time64:
10185         return do_semtimedop(arg1, arg2, arg3, arg4, true);
10186 #endif
10187 #ifdef TARGET_NR_semctl
10188     case TARGET_NR_semctl:
10189         return do_semctl(arg1, arg2, arg3, arg4);
10190 #endif
10191 #ifdef TARGET_NR_msgctl
10192     case TARGET_NR_msgctl:
10193         return do_msgctl(arg1, arg2, arg3);
10194 #endif
10195 #ifdef TARGET_NR_msgget
10196     case TARGET_NR_msgget:
10197         return get_errno(msgget(arg1, arg2));
10198 #endif
10199 #ifdef TARGET_NR_msgrcv
10200     case TARGET_NR_msgrcv:
10201         return do_msgrcv(arg1, arg2, arg3, arg4, arg5);
10202 #endif
10203 #ifdef TARGET_NR_msgsnd
10204     case TARGET_NR_msgsnd:
10205         return do_msgsnd(arg1, arg2, arg3, arg4);
10206 #endif
10207 #ifdef TARGET_NR_shmget
10208     case TARGET_NR_shmget:
10209         return get_errno(shmget(arg1, arg2, arg3));
10210 #endif
10211 #ifdef TARGET_NR_shmctl
10212     case TARGET_NR_shmctl:
10213         return do_shmctl(arg1, arg2, arg3);
10214 #endif
10215 #ifdef TARGET_NR_shmat
10216     case TARGET_NR_shmat:
10217         return do_shmat(cpu_env, arg1, arg2, arg3);
10218 #endif
10219 #ifdef TARGET_NR_shmdt
10220     case TARGET_NR_shmdt:
10221         return do_shmdt(arg1);
10222 #endif
10223     case TARGET_NR_fsync:
10224         return get_errno(fsync(arg1));
10225     case TARGET_NR_clone:
10226         /* Linux manages to have three different orderings for its
10227          * arguments to clone(); the BACKWARDS and BACKWARDS2 defines
10228          * match the kernel's CONFIG_CLONE_* settings.
10229          * Microblaze is further special in that it uses a sixth
10230          * implicit argument to clone for the TLS pointer.
10231          */
10232 #if defined(TARGET_MICROBLAZE)
10233         ret = get_errno(do_fork(cpu_env, arg1, arg2, arg4, arg6, arg5));
10234 #elif defined(TARGET_CLONE_BACKWARDS)
10235         ret = get_errno(do_fork(cpu_env, arg1, arg2, arg3, arg4, arg5));
10236 #elif defined(TARGET_CLONE_BACKWARDS2)
10237         ret = get_errno(do_fork(cpu_env, arg2, arg1, arg3, arg5, arg4));
10238 #else
10239         ret = get_errno(do_fork(cpu_env, arg1, arg2, arg3, arg5, arg4));
10240 #endif
10241         return ret;
10242 #ifdef __NR_exit_group
10243         /* new thread calls */
10244     case TARGET_NR_exit_group:
10245         preexit_cleanup(cpu_env, arg1);
10246         return get_errno(exit_group(arg1));
10247 #endif
10248     case TARGET_NR_setdomainname:
10249         if (!(p = lock_user_string(arg1)))
10250             return -TARGET_EFAULT;
10251         ret = get_errno(setdomainname(p, arg2));
10252         unlock_user(p, arg1, 0);
10253         return ret;
10254     case TARGET_NR_uname:
10255         /* no need to transcode because we use the linux syscall */
10256         {
10257             struct new_utsname * buf;
10258 
10259             if (!lock_user_struct(VERIFY_WRITE, buf, arg1, 0))
10260                 return -TARGET_EFAULT;
10261             ret = get_errno(sys_uname(buf));
10262             if (!is_error(ret)) {
10263                 /* Overwrite the native machine name with whatever is being
10264                    emulated. */
10265                 g_strlcpy(buf->machine, cpu_to_uname_machine(cpu_env),
10266                           sizeof(buf->machine));
10267                 /* Allow the user to override the reported release.  */
10268                 if (qemu_uname_release && *qemu_uname_release) {
10269                     g_strlcpy(buf->release, qemu_uname_release,
10270                               sizeof(buf->release));
10271                 }
10272             }
10273             unlock_user_struct(buf, arg1, 1);
10274         }
10275         return ret;
10276 #ifdef TARGET_I386
10277     case TARGET_NR_modify_ldt:
10278         return do_modify_ldt(cpu_env, arg1, arg2, arg3);
10279 #if !defined(TARGET_X86_64)
10280     case TARGET_NR_vm86:
10281         return do_vm86(cpu_env, arg1, arg2);
10282 #endif
10283 #endif
10284 #if defined(TARGET_NR_adjtimex)
10285     case TARGET_NR_adjtimex:
10286         {
10287             struct timex host_buf;
10288 
10289             if (target_to_host_timex(&host_buf, arg1) != 0) {
10290                 return -TARGET_EFAULT;
10291             }
10292             ret = get_errno(adjtimex(&host_buf));
10293             if (!is_error(ret)) {
10294                 if (host_to_target_timex(arg1, &host_buf) != 0) {
10295                     return -TARGET_EFAULT;
10296                 }
10297             }
10298         }
10299         return ret;
10300 #endif
10301 #if defined(TARGET_NR_clock_adjtime) && defined(CONFIG_CLOCK_ADJTIME)
10302     case TARGET_NR_clock_adjtime:
10303         {
10304             struct timex htx, *phtx = &htx;
10305 
10306             if (target_to_host_timex(phtx, arg2) != 0) {
10307                 return -TARGET_EFAULT;
10308             }
10309             ret = get_errno(clock_adjtime(arg1, phtx));
10310             if (!is_error(ret) && phtx) {
10311                 if (host_to_target_timex(arg2, phtx) != 0) {
10312                     return -TARGET_EFAULT;
10313                 }
10314             }
10315         }
10316         return ret;
10317 #endif
10318 #if defined(TARGET_NR_clock_adjtime64) && defined(CONFIG_CLOCK_ADJTIME)
10319     case TARGET_NR_clock_adjtime64:
10320         {
10321             struct timex htx;
10322 
10323             if (target_to_host_timex64(&htx, arg2) != 0) {
10324                 return -TARGET_EFAULT;
10325             }
10326             ret = get_errno(clock_adjtime(arg1, &htx));
10327             if (!is_error(ret) && host_to_target_timex64(arg2, &htx)) {
10328                     return -TARGET_EFAULT;
10329             }
10330         }
10331         return ret;
10332 #endif
10333     case TARGET_NR_getpgid:
10334         return get_errno(getpgid(arg1));
10335     case TARGET_NR_fchdir:
10336         return get_errno(fchdir(arg1));
10337     case TARGET_NR_personality:
10338         return get_errno(personality(arg1));
10339 #ifdef TARGET_NR__llseek /* Not on alpha */
10340     case TARGET_NR__llseek:
10341         {
10342             int64_t res;
10343 #if !defined(__NR_llseek)
10344             res = lseek(arg1, ((uint64_t)arg2 << 32) | (abi_ulong)arg3, arg5);
10345             if (res == -1) {
10346                 ret = get_errno(res);
10347             } else {
10348                 ret = 0;
10349             }
10350 #else
10351             ret = get_errno(_llseek(arg1, arg2, arg3, &res, arg5));
10352 #endif
10353             if ((ret == 0) && put_user_s64(res, arg4)) {
10354                 return -TARGET_EFAULT;
10355             }
10356         }
10357         return ret;
10358 #endif
10359 #ifdef TARGET_NR_getdents
10360     case TARGET_NR_getdents:
10361 #ifdef EMULATE_GETDENTS_WITH_GETDENTS
10362 #if TARGET_ABI_BITS == 32 && HOST_LONG_BITS == 64
10363         {
10364             struct target_dirent *target_dirp;
10365             struct linux_dirent *dirp;
10366             abi_long count = arg3;
10367 
10368             dirp = g_try_malloc(count);
10369             if (!dirp) {
10370                 return -TARGET_ENOMEM;
10371             }
10372 
10373             ret = get_errno(sys_getdents(arg1, dirp, count));
10374             if (!is_error(ret)) {
10375                 struct linux_dirent *de;
10376 		struct target_dirent *tde;
10377                 int len = ret;
10378                 int reclen, treclen;
10379 		int count1, tnamelen;
10380 
10381 		count1 = 0;
10382                 de = dirp;
10383                 if (!(target_dirp = lock_user(VERIFY_WRITE, arg2, count, 0)))
10384                     return -TARGET_EFAULT;
10385 		tde = target_dirp;
10386                 while (len > 0) {
10387                     reclen = de->d_reclen;
10388                     tnamelen = reclen - offsetof(struct linux_dirent, d_name);
10389                     assert(tnamelen >= 0);
10390                     treclen = tnamelen + offsetof(struct target_dirent, d_name);
10391                     assert(count1 + treclen <= count);
10392                     tde->d_reclen = tswap16(treclen);
10393                     tde->d_ino = tswapal(de->d_ino);
10394                     tde->d_off = tswapal(de->d_off);
10395                     memcpy(tde->d_name, de->d_name, tnamelen);
10396                     de = (struct linux_dirent *)((char *)de + reclen);
10397                     len -= reclen;
10398                     tde = (struct target_dirent *)((char *)tde + treclen);
10399 		    count1 += treclen;
10400                 }
10401 		ret = count1;
10402                 unlock_user(target_dirp, arg2, ret);
10403             }
10404             g_free(dirp);
10405         }
10406 #else
10407         {
10408             struct linux_dirent *dirp;
10409             abi_long count = arg3;
10410 
10411             if (!(dirp = lock_user(VERIFY_WRITE, arg2, count, 0)))
10412                 return -TARGET_EFAULT;
10413             ret = get_errno(sys_getdents(arg1, dirp, count));
10414             if (!is_error(ret)) {
10415                 struct linux_dirent *de;
10416                 int len = ret;
10417                 int reclen;
10418                 de = dirp;
10419                 while (len > 0) {
10420                     reclen = de->d_reclen;
10421                     if (reclen > len)
10422                         break;
10423                     de->d_reclen = tswap16(reclen);
10424                     tswapls(&de->d_ino);
10425                     tswapls(&de->d_off);
10426                     de = (struct linux_dirent *)((char *)de + reclen);
10427                     len -= reclen;
10428                 }
10429             }
10430             unlock_user(dirp, arg2, ret);
10431         }
10432 #endif
10433 #else
10434         /* Implement getdents in terms of getdents64 */
10435         {
10436             struct linux_dirent64 *dirp;
10437             abi_long count = arg3;
10438 
10439             dirp = lock_user(VERIFY_WRITE, arg2, count, 0);
10440             if (!dirp) {
10441                 return -TARGET_EFAULT;
10442             }
10443             ret = get_errno(sys_getdents64(arg1, dirp, count));
10444             if (!is_error(ret)) {
10445                 /* Convert the dirent64 structs to target dirent.  We do this
10446                  * in-place, since we can guarantee that a target_dirent is no
10447                  * larger than a dirent64; however this means we have to be
10448                  * careful to read everything before writing in the new format.
10449                  */
10450                 struct linux_dirent64 *de;
10451                 struct target_dirent *tde;
10452                 int len = ret;
10453                 int tlen = 0;
10454 
10455                 de = dirp;
10456                 tde = (struct target_dirent *)dirp;
10457                 while (len > 0) {
10458                     int namelen, treclen;
10459                     int reclen = de->d_reclen;
10460                     uint64_t ino = de->d_ino;
10461                     int64_t off = de->d_off;
10462                     uint8_t type = de->d_type;
10463 
10464                     namelen = strlen(de->d_name);
10465                     treclen = offsetof(struct target_dirent, d_name)
10466                         + namelen + 2;
10467                     treclen = QEMU_ALIGN_UP(treclen, sizeof(abi_long));
10468 
10469                     memmove(tde->d_name, de->d_name, namelen + 1);
10470                     tde->d_ino = tswapal(ino);
10471                     tde->d_off = tswapal(off);
10472                     tde->d_reclen = tswap16(treclen);
10473                     /* The target_dirent type is in what was formerly a padding
10474                      * byte at the end of the structure:
10475                      */
10476                     *(((char *)tde) + treclen - 1) = type;
10477 
10478                     de = (struct linux_dirent64 *)((char *)de + reclen);
10479                     tde = (struct target_dirent *)((char *)tde + treclen);
10480                     len -= reclen;
10481                     tlen += treclen;
10482                 }
10483                 ret = tlen;
10484             }
10485             unlock_user(dirp, arg2, ret);
10486         }
10487 #endif
10488         return ret;
10489 #endif /* TARGET_NR_getdents */
10490 #if defined(TARGET_NR_getdents64) && defined(__NR_getdents64)
10491     case TARGET_NR_getdents64:
10492         {
10493             struct linux_dirent64 *dirp;
10494             abi_long count = arg3;
10495             if (!(dirp = lock_user(VERIFY_WRITE, arg2, count, 0)))
10496                 return -TARGET_EFAULT;
10497             ret = get_errno(sys_getdents64(arg1, dirp, count));
10498             if (!is_error(ret)) {
10499                 struct linux_dirent64 *de;
10500                 int len = ret;
10501                 int reclen;
10502                 de = dirp;
10503                 while (len > 0) {
10504                     reclen = de->d_reclen;
10505                     if (reclen > len)
10506                         break;
10507                     de->d_reclen = tswap16(reclen);
10508                     tswap64s((uint64_t *)&de->d_ino);
10509                     tswap64s((uint64_t *)&de->d_off);
10510                     de = (struct linux_dirent64 *)((char *)de + reclen);
10511                     len -= reclen;
10512                 }
10513             }
10514             unlock_user(dirp, arg2, ret);
10515         }
10516         return ret;
10517 #endif /* TARGET_NR_getdents64 */
10518 #if defined(TARGET_NR__newselect)
10519     case TARGET_NR__newselect:
10520         return do_select(arg1, arg2, arg3, arg4, arg5);
10521 #endif
10522 #ifdef TARGET_NR_poll
10523     case TARGET_NR_poll:
10524         return do_ppoll(arg1, arg2, arg3, arg4, arg5, false, false);
10525 #endif
10526 #ifdef TARGET_NR_ppoll
10527     case TARGET_NR_ppoll:
10528         return do_ppoll(arg1, arg2, arg3, arg4, arg5, true, false);
10529 #endif
10530 #ifdef TARGET_NR_ppoll_time64
10531     case TARGET_NR_ppoll_time64:
10532         return do_ppoll(arg1, arg2, arg3, arg4, arg5, true, true);
10533 #endif
10534     case TARGET_NR_flock:
10535         /* NOTE: the flock constant seems to be the same for every
10536            Linux platform */
10537         return get_errno(safe_flock(arg1, arg2));
10538     case TARGET_NR_readv:
10539         {
10540             struct iovec *vec = lock_iovec(VERIFY_WRITE, arg2, arg3, 0);
10541             if (vec != NULL) {
10542                 ret = get_errno(safe_readv(arg1, vec, arg3));
10543                 unlock_iovec(vec, arg2, arg3, 1);
10544             } else {
10545                 ret = -host_to_target_errno(errno);
10546             }
10547         }
10548         return ret;
10549     case TARGET_NR_writev:
10550         {
10551             struct iovec *vec = lock_iovec(VERIFY_READ, arg2, arg3, 1);
10552             if (vec != NULL) {
10553                 ret = get_errno(safe_writev(arg1, vec, arg3));
10554                 unlock_iovec(vec, arg2, arg3, 0);
10555             } else {
10556                 ret = -host_to_target_errno(errno);
10557             }
10558         }
10559         return ret;
10560 #if defined(TARGET_NR_preadv)
10561     case TARGET_NR_preadv:
10562         {
10563             struct iovec *vec = lock_iovec(VERIFY_WRITE, arg2, arg3, 0);
10564             if (vec != NULL) {
10565                 unsigned long low, high;
10566 
10567                 target_to_host_low_high(arg4, arg5, &low, &high);
10568                 ret = get_errno(safe_preadv(arg1, vec, arg3, low, high));
10569                 unlock_iovec(vec, arg2, arg3, 1);
10570             } else {
10571                 ret = -host_to_target_errno(errno);
10572            }
10573         }
10574         return ret;
10575 #endif
10576 #if defined(TARGET_NR_pwritev)
10577     case TARGET_NR_pwritev:
10578         {
10579             struct iovec *vec = lock_iovec(VERIFY_READ, arg2, arg3, 1);
10580             if (vec != NULL) {
10581                 unsigned long low, high;
10582 
10583                 target_to_host_low_high(arg4, arg5, &low, &high);
10584                 ret = get_errno(safe_pwritev(arg1, vec, arg3, low, high));
10585                 unlock_iovec(vec, arg2, arg3, 0);
10586             } else {
10587                 ret = -host_to_target_errno(errno);
10588            }
10589         }
10590         return ret;
10591 #endif
10592     case TARGET_NR_getsid:
10593         return get_errno(getsid(arg1));
10594 #if defined(TARGET_NR_fdatasync) /* Not on alpha (osf_datasync ?) */
10595     case TARGET_NR_fdatasync:
10596         return get_errno(fdatasync(arg1));
10597 #endif
10598     case TARGET_NR_sched_getaffinity:
10599         {
10600             unsigned int mask_size;
10601             unsigned long *mask;
10602 
10603             /*
10604              * sched_getaffinity needs multiples of ulong, so need to take
10605              * care of mismatches between target ulong and host ulong sizes.
10606              */
10607             if (arg2 & (sizeof(abi_ulong) - 1)) {
10608                 return -TARGET_EINVAL;
10609             }
10610             mask_size = (arg2 + (sizeof(*mask) - 1)) & ~(sizeof(*mask) - 1);
10611 
10612             mask = alloca(mask_size);
10613             memset(mask, 0, mask_size);
10614             ret = get_errno(sys_sched_getaffinity(arg1, mask_size, mask));
10615 
10616             if (!is_error(ret)) {
10617                 if (ret > arg2) {
10618                     /* More data returned than the caller's buffer will fit.
10619                      * This only happens if sizeof(abi_long) < sizeof(long)
10620                      * and the caller passed us a buffer holding an odd number
10621                      * of abi_longs. If the host kernel is actually using the
10622                      * extra 4 bytes then fail EINVAL; otherwise we can just
10623                      * ignore them and only copy the interesting part.
10624                      */
10625                     int numcpus = sysconf(_SC_NPROCESSORS_CONF);
10626                     if (numcpus > arg2 * 8) {
10627                         return -TARGET_EINVAL;
10628                     }
10629                     ret = arg2;
10630                 }
10631 
10632                 if (host_to_target_cpu_mask(mask, mask_size, arg3, ret)) {
10633                     return -TARGET_EFAULT;
10634                 }
10635             }
10636         }
10637         return ret;
10638     case TARGET_NR_sched_setaffinity:
10639         {
10640             unsigned int mask_size;
10641             unsigned long *mask;
10642 
10643             /*
10644              * sched_setaffinity needs multiples of ulong, so need to take
10645              * care of mismatches between target ulong and host ulong sizes.
10646              */
10647             if (arg2 & (sizeof(abi_ulong) - 1)) {
10648                 return -TARGET_EINVAL;
10649             }
10650             mask_size = (arg2 + (sizeof(*mask) - 1)) & ~(sizeof(*mask) - 1);
10651             mask = alloca(mask_size);
10652 
10653             ret = target_to_host_cpu_mask(mask, mask_size, arg3, arg2);
10654             if (ret) {
10655                 return ret;
10656             }
10657 
10658             return get_errno(sys_sched_setaffinity(arg1, mask_size, mask));
10659         }
10660     case TARGET_NR_getcpu:
10661         {
10662             unsigned cpu, node;
10663             ret = get_errno(sys_getcpu(arg1 ? &cpu : NULL,
10664                                        arg2 ? &node : NULL,
10665                                        NULL));
10666             if (is_error(ret)) {
10667                 return ret;
10668             }
10669             if (arg1 && put_user_u32(cpu, arg1)) {
10670                 return -TARGET_EFAULT;
10671             }
10672             if (arg2 && put_user_u32(node, arg2)) {
10673                 return -TARGET_EFAULT;
10674             }
10675         }
10676         return ret;
10677     case TARGET_NR_sched_setparam:
10678         {
10679             struct sched_param *target_schp;
10680             struct sched_param schp;
10681 
10682             if (arg2 == 0) {
10683                 return -TARGET_EINVAL;
10684             }
10685             if (!lock_user_struct(VERIFY_READ, target_schp, arg2, 1))
10686                 return -TARGET_EFAULT;
10687             schp.sched_priority = tswap32(target_schp->sched_priority);
10688             unlock_user_struct(target_schp, arg2, 0);
10689             return get_errno(sched_setparam(arg1, &schp));
10690         }
10691     case TARGET_NR_sched_getparam:
10692         {
10693             struct sched_param *target_schp;
10694             struct sched_param schp;
10695 
10696             if (arg2 == 0) {
10697                 return -TARGET_EINVAL;
10698             }
10699             ret = get_errno(sched_getparam(arg1, &schp));
10700             if (!is_error(ret)) {
10701                 if (!lock_user_struct(VERIFY_WRITE, target_schp, arg2, 0))
10702                     return -TARGET_EFAULT;
10703                 target_schp->sched_priority = tswap32(schp.sched_priority);
10704                 unlock_user_struct(target_schp, arg2, 1);
10705             }
10706         }
10707         return ret;
10708     case TARGET_NR_sched_setscheduler:
10709         {
10710             struct sched_param *target_schp;
10711             struct sched_param schp;
10712             if (arg3 == 0) {
10713                 return -TARGET_EINVAL;
10714             }
10715             if (!lock_user_struct(VERIFY_READ, target_schp, arg3, 1))
10716                 return -TARGET_EFAULT;
10717             schp.sched_priority = tswap32(target_schp->sched_priority);
10718             unlock_user_struct(target_schp, arg3, 0);
10719             return get_errno(sched_setscheduler(arg1, arg2, &schp));
10720         }
10721     case TARGET_NR_sched_getscheduler:
10722         return get_errno(sched_getscheduler(arg1));
10723     case TARGET_NR_sched_yield:
10724         return get_errno(sched_yield());
10725     case TARGET_NR_sched_get_priority_max:
10726         return get_errno(sched_get_priority_max(arg1));
10727     case TARGET_NR_sched_get_priority_min:
10728         return get_errno(sched_get_priority_min(arg1));
10729 #ifdef TARGET_NR_sched_rr_get_interval
10730     case TARGET_NR_sched_rr_get_interval:
10731         {
10732             struct timespec ts;
10733             ret = get_errno(sched_rr_get_interval(arg1, &ts));
10734             if (!is_error(ret)) {
10735                 ret = host_to_target_timespec(arg2, &ts);
10736             }
10737         }
10738         return ret;
10739 #endif
10740 #ifdef TARGET_NR_sched_rr_get_interval_time64
10741     case TARGET_NR_sched_rr_get_interval_time64:
10742         {
10743             struct timespec ts;
10744             ret = get_errno(sched_rr_get_interval(arg1, &ts));
10745             if (!is_error(ret)) {
10746                 ret = host_to_target_timespec64(arg2, &ts);
10747             }
10748         }
10749         return ret;
10750 #endif
10751 #if defined(TARGET_NR_nanosleep)
10752     case TARGET_NR_nanosleep:
10753         {
10754             struct timespec req, rem;
10755             target_to_host_timespec(&req, arg1);
10756             ret = get_errno(safe_nanosleep(&req, &rem));
10757             if (is_error(ret) && arg2) {
10758                 host_to_target_timespec(arg2, &rem);
10759             }
10760         }
10761         return ret;
10762 #endif
10763     case TARGET_NR_prctl:
10764         switch (arg1) {
10765         case PR_GET_PDEATHSIG:
10766         {
10767             int deathsig;
10768             ret = get_errno(prctl(arg1, &deathsig, arg3, arg4, arg5));
10769             if (!is_error(ret) && arg2
10770                 && put_user_s32(deathsig, arg2)) {
10771                 return -TARGET_EFAULT;
10772             }
10773             return ret;
10774         }
10775 #ifdef PR_GET_NAME
10776         case PR_GET_NAME:
10777         {
10778             void *name = lock_user(VERIFY_WRITE, arg2, 16, 1);
10779             if (!name) {
10780                 return -TARGET_EFAULT;
10781             }
10782             ret = get_errno(prctl(arg1, (unsigned long)name,
10783                                   arg3, arg4, arg5));
10784             unlock_user(name, arg2, 16);
10785             return ret;
10786         }
10787         case PR_SET_NAME:
10788         {
10789             void *name = lock_user(VERIFY_READ, arg2, 16, 1);
10790             if (!name) {
10791                 return -TARGET_EFAULT;
10792             }
10793             ret = get_errno(prctl(arg1, (unsigned long)name,
10794                                   arg3, arg4, arg5));
10795             unlock_user(name, arg2, 0);
10796             return ret;
10797         }
10798 #endif
10799 #ifdef TARGET_MIPS
10800         case TARGET_PR_GET_FP_MODE:
10801         {
10802             CPUMIPSState *env = ((CPUMIPSState *)cpu_env);
10803             ret = 0;
10804             if (env->CP0_Status & (1 << CP0St_FR)) {
10805                 ret |= TARGET_PR_FP_MODE_FR;
10806             }
10807             if (env->CP0_Config5 & (1 << CP0C5_FRE)) {
10808                 ret |= TARGET_PR_FP_MODE_FRE;
10809             }
10810             return ret;
10811         }
10812         case TARGET_PR_SET_FP_MODE:
10813         {
10814             CPUMIPSState *env = ((CPUMIPSState *)cpu_env);
10815             bool old_fr = env->CP0_Status & (1 << CP0St_FR);
10816             bool old_fre = env->CP0_Config5 & (1 << CP0C5_FRE);
10817             bool new_fr = arg2 & TARGET_PR_FP_MODE_FR;
10818             bool new_fre = arg2 & TARGET_PR_FP_MODE_FRE;
10819 
10820             const unsigned int known_bits = TARGET_PR_FP_MODE_FR |
10821                                             TARGET_PR_FP_MODE_FRE;
10822 
10823             /* If nothing to change, return right away, successfully.  */
10824             if (old_fr == new_fr && old_fre == new_fre) {
10825                 return 0;
10826             }
10827             /* Check the value is valid */
10828             if (arg2 & ~known_bits) {
10829                 return -TARGET_EOPNOTSUPP;
10830             }
10831             /* Setting FRE without FR is not supported.  */
10832             if (new_fre && !new_fr) {
10833                 return -TARGET_EOPNOTSUPP;
10834             }
10835             if (new_fr && !(env->active_fpu.fcr0 & (1 << FCR0_F64))) {
10836                 /* FR1 is not supported */
10837                 return -TARGET_EOPNOTSUPP;
10838             }
10839             if (!new_fr && (env->active_fpu.fcr0 & (1 << FCR0_F64))
10840                 && !(env->CP0_Status_rw_bitmask & (1 << CP0St_FR))) {
10841                 /* cannot set FR=0 */
10842                 return -TARGET_EOPNOTSUPP;
10843             }
10844             if (new_fre && !(env->active_fpu.fcr0 & (1 << FCR0_FREP))) {
10845                 /* Cannot set FRE=1 */
10846                 return -TARGET_EOPNOTSUPP;
10847             }
10848 
10849             int i;
10850             fpr_t *fpr = env->active_fpu.fpr;
10851             for (i = 0; i < 32 ; i += 2) {
10852                 if (!old_fr && new_fr) {
10853                     fpr[i].w[!FP_ENDIAN_IDX] = fpr[i + 1].w[FP_ENDIAN_IDX];
10854                 } else if (old_fr && !new_fr) {
10855                     fpr[i + 1].w[FP_ENDIAN_IDX] = fpr[i].w[!FP_ENDIAN_IDX];
10856                 }
10857             }
10858 
10859             if (new_fr) {
10860                 env->CP0_Status |= (1 << CP0St_FR);
10861                 env->hflags |= MIPS_HFLAG_F64;
10862             } else {
10863                 env->CP0_Status &= ~(1 << CP0St_FR);
10864                 env->hflags &= ~MIPS_HFLAG_F64;
10865             }
10866             if (new_fre) {
10867                 env->CP0_Config5 |= (1 << CP0C5_FRE);
10868                 if (env->active_fpu.fcr0 & (1 << FCR0_FREP)) {
10869                     env->hflags |= MIPS_HFLAG_FRE;
10870                 }
10871             } else {
10872                 env->CP0_Config5 &= ~(1 << CP0C5_FRE);
10873                 env->hflags &= ~MIPS_HFLAG_FRE;
10874             }
10875 
10876             return 0;
10877         }
10878 #endif /* MIPS */
10879 #ifdef TARGET_AARCH64
10880         case TARGET_PR_SVE_SET_VL:
10881             /*
10882              * We cannot support either PR_SVE_SET_VL_ONEXEC or
10883              * PR_SVE_VL_INHERIT.  Note the kernel definition
10884              * of sve_vl_valid allows for VQ=512, i.e. VL=8192,
10885              * even though the current architectural maximum is VQ=16.
10886              */
10887             ret = -TARGET_EINVAL;
10888             if (cpu_isar_feature(aa64_sve, env_archcpu(cpu_env))
10889                 && arg2 >= 0 && arg2 <= 512 * 16 && !(arg2 & 15)) {
10890                 CPUARMState *env = cpu_env;
10891                 ARMCPU *cpu = env_archcpu(env);
10892                 uint32_t vq, old_vq;
10893 
10894                 old_vq = (env->vfp.zcr_el[1] & 0xf) + 1;
10895                 vq = MAX(arg2 / 16, 1);
10896                 vq = MIN(vq, cpu->sve_max_vq);
10897 
10898                 if (vq < old_vq) {
10899                     aarch64_sve_narrow_vq(env, vq);
10900                 }
10901                 env->vfp.zcr_el[1] = vq - 1;
10902                 arm_rebuild_hflags(env);
10903                 ret = vq * 16;
10904             }
10905             return ret;
10906         case TARGET_PR_SVE_GET_VL:
10907             ret = -TARGET_EINVAL;
10908             {
10909                 ARMCPU *cpu = env_archcpu(cpu_env);
10910                 if (cpu_isar_feature(aa64_sve, cpu)) {
10911                     ret = ((cpu->env.vfp.zcr_el[1] & 0xf) + 1) * 16;
10912                 }
10913             }
10914             return ret;
10915         case TARGET_PR_PAC_RESET_KEYS:
10916             {
10917                 CPUARMState *env = cpu_env;
10918                 ARMCPU *cpu = env_archcpu(env);
10919 
10920                 if (arg3 || arg4 || arg5) {
10921                     return -TARGET_EINVAL;
10922                 }
10923                 if (cpu_isar_feature(aa64_pauth, cpu)) {
10924                     int all = (TARGET_PR_PAC_APIAKEY | TARGET_PR_PAC_APIBKEY |
10925                                TARGET_PR_PAC_APDAKEY | TARGET_PR_PAC_APDBKEY |
10926                                TARGET_PR_PAC_APGAKEY);
10927                     int ret = 0;
10928                     Error *err = NULL;
10929 
10930                     if (arg2 == 0) {
10931                         arg2 = all;
10932                     } else if (arg2 & ~all) {
10933                         return -TARGET_EINVAL;
10934                     }
10935                     if (arg2 & TARGET_PR_PAC_APIAKEY) {
10936                         ret |= qemu_guest_getrandom(&env->keys.apia,
10937                                                     sizeof(ARMPACKey), &err);
10938                     }
10939                     if (arg2 & TARGET_PR_PAC_APIBKEY) {
10940                         ret |= qemu_guest_getrandom(&env->keys.apib,
10941                                                     sizeof(ARMPACKey), &err);
10942                     }
10943                     if (arg2 & TARGET_PR_PAC_APDAKEY) {
10944                         ret |= qemu_guest_getrandom(&env->keys.apda,
10945                                                     sizeof(ARMPACKey), &err);
10946                     }
10947                     if (arg2 & TARGET_PR_PAC_APDBKEY) {
10948                         ret |= qemu_guest_getrandom(&env->keys.apdb,
10949                                                     sizeof(ARMPACKey), &err);
10950                     }
10951                     if (arg2 & TARGET_PR_PAC_APGAKEY) {
10952                         ret |= qemu_guest_getrandom(&env->keys.apga,
10953                                                     sizeof(ARMPACKey), &err);
10954                     }
10955                     if (ret != 0) {
10956                         /*
10957                          * Some unknown failure in the crypto.  The best
10958                          * we can do is log it and fail the syscall.
10959                          * The real syscall cannot fail this way.
10960                          */
10961                         qemu_log_mask(LOG_UNIMP,
10962                                       "PR_PAC_RESET_KEYS: Crypto failure: %s",
10963                                       error_get_pretty(err));
10964                         error_free(err);
10965                         return -TARGET_EIO;
10966                     }
10967                     return 0;
10968                 }
10969             }
10970             return -TARGET_EINVAL;
10971 #endif /* AARCH64 */
10972         case PR_GET_SECCOMP:
10973         case PR_SET_SECCOMP:
10974             /* Disable seccomp to prevent the target disabling syscalls we
10975              * need. */
10976             return -TARGET_EINVAL;
10977         default:
10978             /* Most prctl options have no pointer arguments */
10979             return get_errno(prctl(arg1, arg2, arg3, arg4, arg5));
10980         }
10981         break;
10982 #ifdef TARGET_NR_arch_prctl
10983     case TARGET_NR_arch_prctl:
10984         return do_arch_prctl(cpu_env, arg1, arg2);
10985 #endif
10986 #ifdef TARGET_NR_pread64
10987     case TARGET_NR_pread64:
10988         if (regpairs_aligned(cpu_env, num)) {
10989             arg4 = arg5;
10990             arg5 = arg6;
10991         }
10992         if (arg2 == 0 && arg3 == 0) {
10993             /* Special-case NULL buffer and zero length, which should succeed */
10994             p = 0;
10995         } else {
10996             p = lock_user(VERIFY_WRITE, arg2, arg3, 0);
10997             if (!p) {
10998                 return -TARGET_EFAULT;
10999             }
11000         }
11001         ret = get_errno(pread64(arg1, p, arg3, target_offset64(arg4, arg5)));
11002         unlock_user(p, arg2, ret);
11003         return ret;
11004     case TARGET_NR_pwrite64:
11005         if (regpairs_aligned(cpu_env, num)) {
11006             arg4 = arg5;
11007             arg5 = arg6;
11008         }
11009         if (arg2 == 0 && arg3 == 0) {
11010             /* Special-case NULL buffer and zero length, which should succeed */
11011             p = 0;
11012         } else {
11013             p = lock_user(VERIFY_READ, arg2, arg3, 1);
11014             if (!p) {
11015                 return -TARGET_EFAULT;
11016             }
11017         }
11018         ret = get_errno(pwrite64(arg1, p, arg3, target_offset64(arg4, arg5)));
11019         unlock_user(p, arg2, 0);
11020         return ret;
11021 #endif
11022     case TARGET_NR_getcwd:
11023         if (!(p = lock_user(VERIFY_WRITE, arg1, arg2, 0)))
11024             return -TARGET_EFAULT;
11025         ret = get_errno(sys_getcwd1(p, arg2));
11026         unlock_user(p, arg1, ret);
11027         return ret;
11028     case TARGET_NR_capget:
11029     case TARGET_NR_capset:
11030     {
11031         struct target_user_cap_header *target_header;
11032         struct target_user_cap_data *target_data = NULL;
11033         struct __user_cap_header_struct header;
11034         struct __user_cap_data_struct data[2];
11035         struct __user_cap_data_struct *dataptr = NULL;
11036         int i, target_datalen;
11037         int data_items = 1;
11038 
11039         if (!lock_user_struct(VERIFY_WRITE, target_header, arg1, 1)) {
11040             return -TARGET_EFAULT;
11041         }
11042         header.version = tswap32(target_header->version);
11043         header.pid = tswap32(target_header->pid);
11044 
11045         if (header.version != _LINUX_CAPABILITY_VERSION) {
11046             /* Version 2 and up takes pointer to two user_data structs */
11047             data_items = 2;
11048         }
11049 
11050         target_datalen = sizeof(*target_data) * data_items;
11051 
11052         if (arg2) {
11053             if (num == TARGET_NR_capget) {
11054                 target_data = lock_user(VERIFY_WRITE, arg2, target_datalen, 0);
11055             } else {
11056                 target_data = lock_user(VERIFY_READ, arg2, target_datalen, 1);
11057             }
11058             if (!target_data) {
11059                 unlock_user_struct(target_header, arg1, 0);
11060                 return -TARGET_EFAULT;
11061             }
11062 
11063             if (num == TARGET_NR_capset) {
11064                 for (i = 0; i < data_items; i++) {
11065                     data[i].effective = tswap32(target_data[i].effective);
11066                     data[i].permitted = tswap32(target_data[i].permitted);
11067                     data[i].inheritable = tswap32(target_data[i].inheritable);
11068                 }
11069             }
11070 
11071             dataptr = data;
11072         }
11073 
11074         if (num == TARGET_NR_capget) {
11075             ret = get_errno(capget(&header, dataptr));
11076         } else {
11077             ret = get_errno(capset(&header, dataptr));
11078         }
11079 
11080         /* The kernel always updates version for both capget and capset */
11081         target_header->version = tswap32(header.version);
11082         unlock_user_struct(target_header, arg1, 1);
11083 
11084         if (arg2) {
11085             if (num == TARGET_NR_capget) {
11086                 for (i = 0; i < data_items; i++) {
11087                     target_data[i].effective = tswap32(data[i].effective);
11088                     target_data[i].permitted = tswap32(data[i].permitted);
11089                     target_data[i].inheritable = tswap32(data[i].inheritable);
11090                 }
11091                 unlock_user(target_data, arg2, target_datalen);
11092             } else {
11093                 unlock_user(target_data, arg2, 0);
11094             }
11095         }
11096         return ret;
11097     }
11098     case TARGET_NR_sigaltstack:
11099         return do_sigaltstack(arg1, arg2,
11100                               get_sp_from_cpustate((CPUArchState *)cpu_env));
11101 
11102 #ifdef CONFIG_SENDFILE
11103 #ifdef TARGET_NR_sendfile
11104     case TARGET_NR_sendfile:
11105     {
11106         off_t *offp = NULL;
11107         off_t off;
11108         if (arg3) {
11109             ret = get_user_sal(off, arg3);
11110             if (is_error(ret)) {
11111                 return ret;
11112             }
11113             offp = &off;
11114         }
11115         ret = get_errno(sendfile(arg1, arg2, offp, arg4));
11116         if (!is_error(ret) && arg3) {
11117             abi_long ret2 = put_user_sal(off, arg3);
11118             if (is_error(ret2)) {
11119                 ret = ret2;
11120             }
11121         }
11122         return ret;
11123     }
11124 #endif
11125 #ifdef TARGET_NR_sendfile64
11126     case TARGET_NR_sendfile64:
11127     {
11128         off_t *offp = NULL;
11129         off_t off;
11130         if (arg3) {
11131             ret = get_user_s64(off, arg3);
11132             if (is_error(ret)) {
11133                 return ret;
11134             }
11135             offp = &off;
11136         }
11137         ret = get_errno(sendfile(arg1, arg2, offp, arg4));
11138         if (!is_error(ret) && arg3) {
11139             abi_long ret2 = put_user_s64(off, arg3);
11140             if (is_error(ret2)) {
11141                 ret = ret2;
11142             }
11143         }
11144         return ret;
11145     }
11146 #endif
11147 #endif
11148 #ifdef TARGET_NR_vfork
11149     case TARGET_NR_vfork:
11150         return get_errno(do_fork(cpu_env,
11151                          CLONE_VFORK | CLONE_VM | TARGET_SIGCHLD,
11152                          0, 0, 0, 0));
11153 #endif
11154 #ifdef TARGET_NR_ugetrlimit
11155     case TARGET_NR_ugetrlimit:
11156     {
11157 	struct rlimit rlim;
11158 	int resource = target_to_host_resource(arg1);
11159 	ret = get_errno(getrlimit(resource, &rlim));
11160 	if (!is_error(ret)) {
11161 	    struct target_rlimit *target_rlim;
11162             if (!lock_user_struct(VERIFY_WRITE, target_rlim, arg2, 0))
11163                 return -TARGET_EFAULT;
11164 	    target_rlim->rlim_cur = host_to_target_rlim(rlim.rlim_cur);
11165 	    target_rlim->rlim_max = host_to_target_rlim(rlim.rlim_max);
11166             unlock_user_struct(target_rlim, arg2, 1);
11167 	}
11168         return ret;
11169     }
11170 #endif
11171 #ifdef TARGET_NR_truncate64
11172     case TARGET_NR_truncate64:
11173         if (!(p = lock_user_string(arg1)))
11174             return -TARGET_EFAULT;
11175 	ret = target_truncate64(cpu_env, p, arg2, arg3, arg4);
11176         unlock_user(p, arg1, 0);
11177         return ret;
11178 #endif
11179 #ifdef TARGET_NR_ftruncate64
11180     case TARGET_NR_ftruncate64:
11181         return target_ftruncate64(cpu_env, arg1, arg2, arg3, arg4);
11182 #endif
11183 #ifdef TARGET_NR_stat64
11184     case TARGET_NR_stat64:
11185         if (!(p = lock_user_string(arg1))) {
11186             return -TARGET_EFAULT;
11187         }
11188         ret = get_errno(stat(path(p), &st));
11189         unlock_user(p, arg1, 0);
11190         if (!is_error(ret))
11191             ret = host_to_target_stat64(cpu_env, arg2, &st);
11192         return ret;
11193 #endif
11194 #ifdef TARGET_NR_lstat64
11195     case TARGET_NR_lstat64:
11196         if (!(p = lock_user_string(arg1))) {
11197             return -TARGET_EFAULT;
11198         }
11199         ret = get_errno(lstat(path(p), &st));
11200         unlock_user(p, arg1, 0);
11201         if (!is_error(ret))
11202             ret = host_to_target_stat64(cpu_env, arg2, &st);
11203         return ret;
11204 #endif
11205 #ifdef TARGET_NR_fstat64
11206     case TARGET_NR_fstat64:
11207         ret = get_errno(fstat(arg1, &st));
11208         if (!is_error(ret))
11209             ret = host_to_target_stat64(cpu_env, arg2, &st);
11210         return ret;
11211 #endif
11212 #if (defined(TARGET_NR_fstatat64) || defined(TARGET_NR_newfstatat))
11213 #ifdef TARGET_NR_fstatat64
11214     case TARGET_NR_fstatat64:
11215 #endif
11216 #ifdef TARGET_NR_newfstatat
11217     case TARGET_NR_newfstatat:
11218 #endif
11219         if (!(p = lock_user_string(arg2))) {
11220             return -TARGET_EFAULT;
11221         }
11222         ret = get_errno(fstatat(arg1, path(p), &st, arg4));
11223         unlock_user(p, arg2, 0);
11224         if (!is_error(ret))
11225             ret = host_to_target_stat64(cpu_env, arg3, &st);
11226         return ret;
11227 #endif
11228 #if defined(TARGET_NR_statx)
11229     case TARGET_NR_statx:
11230         {
11231             struct target_statx *target_stx;
11232             int dirfd = arg1;
11233             int flags = arg3;
11234 
11235             p = lock_user_string(arg2);
11236             if (p == NULL) {
11237                 return -TARGET_EFAULT;
11238             }
11239 #if defined(__NR_statx)
11240             {
11241                 /*
11242                  * It is assumed that struct statx is architecture independent.
11243                  */
11244                 struct target_statx host_stx;
11245                 int mask = arg4;
11246 
11247                 ret = get_errno(sys_statx(dirfd, p, flags, mask, &host_stx));
11248                 if (!is_error(ret)) {
11249                     if (host_to_target_statx(&host_stx, arg5) != 0) {
11250                         unlock_user(p, arg2, 0);
11251                         return -TARGET_EFAULT;
11252                     }
11253                 }
11254 
11255                 if (ret != -TARGET_ENOSYS) {
11256                     unlock_user(p, arg2, 0);
11257                     return ret;
11258                 }
11259             }
11260 #endif
11261             ret = get_errno(fstatat(dirfd, path(p), &st, flags));
11262             unlock_user(p, arg2, 0);
11263 
11264             if (!is_error(ret)) {
11265                 if (!lock_user_struct(VERIFY_WRITE, target_stx, arg5, 0)) {
11266                     return -TARGET_EFAULT;
11267                 }
11268                 memset(target_stx, 0, sizeof(*target_stx));
11269                 __put_user(major(st.st_dev), &target_stx->stx_dev_major);
11270                 __put_user(minor(st.st_dev), &target_stx->stx_dev_minor);
11271                 __put_user(st.st_ino, &target_stx->stx_ino);
11272                 __put_user(st.st_mode, &target_stx->stx_mode);
11273                 __put_user(st.st_uid, &target_stx->stx_uid);
11274                 __put_user(st.st_gid, &target_stx->stx_gid);
11275                 __put_user(st.st_nlink, &target_stx->stx_nlink);
11276                 __put_user(major(st.st_rdev), &target_stx->stx_rdev_major);
11277                 __put_user(minor(st.st_rdev), &target_stx->stx_rdev_minor);
11278                 __put_user(st.st_size, &target_stx->stx_size);
11279                 __put_user(st.st_blksize, &target_stx->stx_blksize);
11280                 __put_user(st.st_blocks, &target_stx->stx_blocks);
11281                 __put_user(st.st_atime, &target_stx->stx_atime.tv_sec);
11282                 __put_user(st.st_mtime, &target_stx->stx_mtime.tv_sec);
11283                 __put_user(st.st_ctime, &target_stx->stx_ctime.tv_sec);
11284                 unlock_user_struct(target_stx, arg5, 1);
11285             }
11286         }
11287         return ret;
11288 #endif
11289 #ifdef TARGET_NR_lchown
11290     case TARGET_NR_lchown:
11291         if (!(p = lock_user_string(arg1)))
11292             return -TARGET_EFAULT;
11293         ret = get_errno(lchown(p, low2highuid(arg2), low2highgid(arg3)));
11294         unlock_user(p, arg1, 0);
11295         return ret;
11296 #endif
11297 #ifdef TARGET_NR_getuid
11298     case TARGET_NR_getuid:
11299         return get_errno(high2lowuid(getuid()));
11300 #endif
11301 #ifdef TARGET_NR_getgid
11302     case TARGET_NR_getgid:
11303         return get_errno(high2lowgid(getgid()));
11304 #endif
11305 #ifdef TARGET_NR_geteuid
11306     case TARGET_NR_geteuid:
11307         return get_errno(high2lowuid(geteuid()));
11308 #endif
11309 #ifdef TARGET_NR_getegid
11310     case TARGET_NR_getegid:
11311         return get_errno(high2lowgid(getegid()));
11312 #endif
11313     case TARGET_NR_setreuid:
11314         return get_errno(setreuid(low2highuid(arg1), low2highuid(arg2)));
11315     case TARGET_NR_setregid:
11316         return get_errno(setregid(low2highgid(arg1), low2highgid(arg2)));
11317     case TARGET_NR_getgroups:
11318         {
11319             int gidsetsize = arg1;
11320             target_id *target_grouplist;
11321             gid_t *grouplist;
11322             int i;
11323 
11324             grouplist = alloca(gidsetsize * sizeof(gid_t));
11325             ret = get_errno(getgroups(gidsetsize, grouplist));
11326             if (gidsetsize == 0)
11327                 return ret;
11328             if (!is_error(ret)) {
11329                 target_grouplist = lock_user(VERIFY_WRITE, arg2, gidsetsize * sizeof(target_id), 0);
11330                 if (!target_grouplist)
11331                     return -TARGET_EFAULT;
11332                 for(i = 0;i < ret; i++)
11333                     target_grouplist[i] = tswapid(high2lowgid(grouplist[i]));
11334                 unlock_user(target_grouplist, arg2, gidsetsize * sizeof(target_id));
11335             }
11336         }
11337         return ret;
11338     case TARGET_NR_setgroups:
11339         {
11340             int gidsetsize = arg1;
11341             target_id *target_grouplist;
11342             gid_t *grouplist = NULL;
11343             int i;
11344             if (gidsetsize) {
11345                 grouplist = alloca(gidsetsize * sizeof(gid_t));
11346                 target_grouplist = lock_user(VERIFY_READ, arg2, gidsetsize * sizeof(target_id), 1);
11347                 if (!target_grouplist) {
11348                     return -TARGET_EFAULT;
11349                 }
11350                 for (i = 0; i < gidsetsize; i++) {
11351                     grouplist[i] = low2highgid(tswapid(target_grouplist[i]));
11352                 }
11353                 unlock_user(target_grouplist, arg2, 0);
11354             }
11355             return get_errno(setgroups(gidsetsize, grouplist));
11356         }
11357     case TARGET_NR_fchown:
11358         return get_errno(fchown(arg1, low2highuid(arg2), low2highgid(arg3)));
11359 #if defined(TARGET_NR_fchownat)
11360     case TARGET_NR_fchownat:
11361         if (!(p = lock_user_string(arg2)))
11362             return -TARGET_EFAULT;
11363         ret = get_errno(fchownat(arg1, p, low2highuid(arg3),
11364                                  low2highgid(arg4), arg5));
11365         unlock_user(p, arg2, 0);
11366         return ret;
11367 #endif
11368 #ifdef TARGET_NR_setresuid
11369     case TARGET_NR_setresuid:
11370         return get_errno(sys_setresuid(low2highuid(arg1),
11371                                        low2highuid(arg2),
11372                                        low2highuid(arg3)));
11373 #endif
11374 #ifdef TARGET_NR_getresuid
11375     case TARGET_NR_getresuid:
11376         {
11377             uid_t ruid, euid, suid;
11378             ret = get_errno(getresuid(&ruid, &euid, &suid));
11379             if (!is_error(ret)) {
11380                 if (put_user_id(high2lowuid(ruid), arg1)
11381                     || put_user_id(high2lowuid(euid), arg2)
11382                     || put_user_id(high2lowuid(suid), arg3))
11383                     return -TARGET_EFAULT;
11384             }
11385         }
11386         return ret;
11387 #endif
11388 #ifdef TARGET_NR_getresgid
11389     case TARGET_NR_setresgid:
11390         return get_errno(sys_setresgid(low2highgid(arg1),
11391                                        low2highgid(arg2),
11392                                        low2highgid(arg3)));
11393 #endif
11394 #ifdef TARGET_NR_getresgid
11395     case TARGET_NR_getresgid:
11396         {
11397             gid_t rgid, egid, sgid;
11398             ret = get_errno(getresgid(&rgid, &egid, &sgid));
11399             if (!is_error(ret)) {
11400                 if (put_user_id(high2lowgid(rgid), arg1)
11401                     || put_user_id(high2lowgid(egid), arg2)
11402                     || put_user_id(high2lowgid(sgid), arg3))
11403                     return -TARGET_EFAULT;
11404             }
11405         }
11406         return ret;
11407 #endif
11408 #ifdef TARGET_NR_chown
11409     case TARGET_NR_chown:
11410         if (!(p = lock_user_string(arg1)))
11411             return -TARGET_EFAULT;
11412         ret = get_errno(chown(p, low2highuid(arg2), low2highgid(arg3)));
11413         unlock_user(p, arg1, 0);
11414         return ret;
11415 #endif
11416     case TARGET_NR_setuid:
11417         return get_errno(sys_setuid(low2highuid(arg1)));
11418     case TARGET_NR_setgid:
11419         return get_errno(sys_setgid(low2highgid(arg1)));
11420     case TARGET_NR_setfsuid:
11421         return get_errno(setfsuid(arg1));
11422     case TARGET_NR_setfsgid:
11423         return get_errno(setfsgid(arg1));
11424 
11425 #ifdef TARGET_NR_lchown32
11426     case TARGET_NR_lchown32:
11427         if (!(p = lock_user_string(arg1)))
11428             return -TARGET_EFAULT;
11429         ret = get_errno(lchown(p, arg2, arg3));
11430         unlock_user(p, arg1, 0);
11431         return ret;
11432 #endif
11433 #ifdef TARGET_NR_getuid32
11434     case TARGET_NR_getuid32:
11435         return get_errno(getuid());
11436 #endif
11437 
11438 #if defined(TARGET_NR_getxuid) && defined(TARGET_ALPHA)
11439    /* Alpha specific */
11440     case TARGET_NR_getxuid:
11441          {
11442             uid_t euid;
11443             euid=geteuid();
11444             ((CPUAlphaState *)cpu_env)->ir[IR_A4]=euid;
11445          }
11446         return get_errno(getuid());
11447 #endif
11448 #if defined(TARGET_NR_getxgid) && defined(TARGET_ALPHA)
11449    /* Alpha specific */
11450     case TARGET_NR_getxgid:
11451          {
11452             uid_t egid;
11453             egid=getegid();
11454             ((CPUAlphaState *)cpu_env)->ir[IR_A4]=egid;
11455          }
11456         return get_errno(getgid());
11457 #endif
11458 #if defined(TARGET_NR_osf_getsysinfo) && defined(TARGET_ALPHA)
11459     /* Alpha specific */
11460     case TARGET_NR_osf_getsysinfo:
11461         ret = -TARGET_EOPNOTSUPP;
11462         switch (arg1) {
11463           case TARGET_GSI_IEEE_FP_CONTROL:
11464             {
11465                 uint64_t fpcr = cpu_alpha_load_fpcr(cpu_env);
11466                 uint64_t swcr = ((CPUAlphaState *)cpu_env)->swcr;
11467 
11468                 swcr &= ~SWCR_STATUS_MASK;
11469                 swcr |= (fpcr >> 35) & SWCR_STATUS_MASK;
11470 
11471                 if (put_user_u64 (swcr, arg2))
11472                         return -TARGET_EFAULT;
11473                 ret = 0;
11474             }
11475             break;
11476 
11477           /* case GSI_IEEE_STATE_AT_SIGNAL:
11478              -- Not implemented in linux kernel.
11479              case GSI_UACPROC:
11480              -- Retrieves current unaligned access state; not much used.
11481              case GSI_PROC_TYPE:
11482              -- Retrieves implver information; surely not used.
11483              case GSI_GET_HWRPB:
11484              -- Grabs a copy of the HWRPB; surely not used.
11485           */
11486         }
11487         return ret;
11488 #endif
11489 #if defined(TARGET_NR_osf_setsysinfo) && defined(TARGET_ALPHA)
11490     /* Alpha specific */
11491     case TARGET_NR_osf_setsysinfo:
11492         ret = -TARGET_EOPNOTSUPP;
11493         switch (arg1) {
11494           case TARGET_SSI_IEEE_FP_CONTROL:
11495             {
11496                 uint64_t swcr, fpcr;
11497 
11498                 if (get_user_u64 (swcr, arg2)) {
11499                     return -TARGET_EFAULT;
11500                 }
11501 
11502                 /*
11503                  * The kernel calls swcr_update_status to update the
11504                  * status bits from the fpcr at every point that it
11505                  * could be queried.  Therefore, we store the status
11506                  * bits only in FPCR.
11507                  */
11508                 ((CPUAlphaState *)cpu_env)->swcr
11509                     = swcr & (SWCR_TRAP_ENABLE_MASK | SWCR_MAP_MASK);
11510 
11511                 fpcr = cpu_alpha_load_fpcr(cpu_env);
11512                 fpcr &= ((uint64_t)FPCR_DYN_MASK << 32);
11513                 fpcr |= alpha_ieee_swcr_to_fpcr(swcr);
11514                 cpu_alpha_store_fpcr(cpu_env, fpcr);
11515                 ret = 0;
11516             }
11517             break;
11518 
11519           case TARGET_SSI_IEEE_RAISE_EXCEPTION:
11520             {
11521                 uint64_t exc, fpcr, fex;
11522 
11523                 if (get_user_u64(exc, arg2)) {
11524                     return -TARGET_EFAULT;
11525                 }
11526                 exc &= SWCR_STATUS_MASK;
11527                 fpcr = cpu_alpha_load_fpcr(cpu_env);
11528 
11529                 /* Old exceptions are not signaled.  */
11530                 fex = alpha_ieee_fpcr_to_swcr(fpcr);
11531                 fex = exc & ~fex;
11532                 fex >>= SWCR_STATUS_TO_EXCSUM_SHIFT;
11533                 fex &= ((CPUArchState *)cpu_env)->swcr;
11534 
11535                 /* Update the hardware fpcr.  */
11536                 fpcr |= alpha_ieee_swcr_to_fpcr(exc);
11537                 cpu_alpha_store_fpcr(cpu_env, fpcr);
11538 
11539                 if (fex) {
11540                     int si_code = TARGET_FPE_FLTUNK;
11541                     target_siginfo_t info;
11542 
11543                     if (fex & SWCR_TRAP_ENABLE_DNO) {
11544                         si_code = TARGET_FPE_FLTUND;
11545                     }
11546                     if (fex & SWCR_TRAP_ENABLE_INE) {
11547                         si_code = TARGET_FPE_FLTRES;
11548                     }
11549                     if (fex & SWCR_TRAP_ENABLE_UNF) {
11550                         si_code = TARGET_FPE_FLTUND;
11551                     }
11552                     if (fex & SWCR_TRAP_ENABLE_OVF) {
11553                         si_code = TARGET_FPE_FLTOVF;
11554                     }
11555                     if (fex & SWCR_TRAP_ENABLE_DZE) {
11556                         si_code = TARGET_FPE_FLTDIV;
11557                     }
11558                     if (fex & SWCR_TRAP_ENABLE_INV) {
11559                         si_code = TARGET_FPE_FLTINV;
11560                     }
11561 
11562                     info.si_signo = SIGFPE;
11563                     info.si_errno = 0;
11564                     info.si_code = si_code;
11565                     info._sifields._sigfault._addr
11566                         = ((CPUArchState *)cpu_env)->pc;
11567                     queue_signal((CPUArchState *)cpu_env, info.si_signo,
11568                                  QEMU_SI_FAULT, &info);
11569                 }
11570                 ret = 0;
11571             }
11572             break;
11573 
11574           /* case SSI_NVPAIRS:
11575              -- Used with SSIN_UACPROC to enable unaligned accesses.
11576              case SSI_IEEE_STATE_AT_SIGNAL:
11577              case SSI_IEEE_IGNORE_STATE_AT_SIGNAL:
11578              -- Not implemented in linux kernel
11579           */
11580         }
11581         return ret;
11582 #endif
11583 #ifdef TARGET_NR_osf_sigprocmask
11584     /* Alpha specific.  */
11585     case TARGET_NR_osf_sigprocmask:
11586         {
11587             abi_ulong mask;
11588             int how;
11589             sigset_t set, oldset;
11590 
11591             switch(arg1) {
11592             case TARGET_SIG_BLOCK:
11593                 how = SIG_BLOCK;
11594                 break;
11595             case TARGET_SIG_UNBLOCK:
11596                 how = SIG_UNBLOCK;
11597                 break;
11598             case TARGET_SIG_SETMASK:
11599                 how = SIG_SETMASK;
11600                 break;
11601             default:
11602                 return -TARGET_EINVAL;
11603             }
11604             mask = arg2;
11605             target_to_host_old_sigset(&set, &mask);
11606             ret = do_sigprocmask(how, &set, &oldset);
11607             if (!ret) {
11608                 host_to_target_old_sigset(&mask, &oldset);
11609                 ret = mask;
11610             }
11611         }
11612         return ret;
11613 #endif
11614 
11615 #ifdef TARGET_NR_getgid32
11616     case TARGET_NR_getgid32:
11617         return get_errno(getgid());
11618 #endif
11619 #ifdef TARGET_NR_geteuid32
11620     case TARGET_NR_geteuid32:
11621         return get_errno(geteuid());
11622 #endif
11623 #ifdef TARGET_NR_getegid32
11624     case TARGET_NR_getegid32:
11625         return get_errno(getegid());
11626 #endif
11627 #ifdef TARGET_NR_setreuid32
11628     case TARGET_NR_setreuid32:
11629         return get_errno(setreuid(arg1, arg2));
11630 #endif
11631 #ifdef TARGET_NR_setregid32
11632     case TARGET_NR_setregid32:
11633         return get_errno(setregid(arg1, arg2));
11634 #endif
11635 #ifdef TARGET_NR_getgroups32
11636     case TARGET_NR_getgroups32:
11637         {
11638             int gidsetsize = arg1;
11639             uint32_t *target_grouplist;
11640             gid_t *grouplist;
11641             int i;
11642 
11643             grouplist = alloca(gidsetsize * sizeof(gid_t));
11644             ret = get_errno(getgroups(gidsetsize, grouplist));
11645             if (gidsetsize == 0)
11646                 return ret;
11647             if (!is_error(ret)) {
11648                 target_grouplist = lock_user(VERIFY_WRITE, arg2, gidsetsize * 4, 0);
11649                 if (!target_grouplist) {
11650                     return -TARGET_EFAULT;
11651                 }
11652                 for(i = 0;i < ret; i++)
11653                     target_grouplist[i] = tswap32(grouplist[i]);
11654                 unlock_user(target_grouplist, arg2, gidsetsize * 4);
11655             }
11656         }
11657         return ret;
11658 #endif
11659 #ifdef TARGET_NR_setgroups32
11660     case TARGET_NR_setgroups32:
11661         {
11662             int gidsetsize = arg1;
11663             uint32_t *target_grouplist;
11664             gid_t *grouplist;
11665             int i;
11666 
11667             grouplist = alloca(gidsetsize * sizeof(gid_t));
11668             target_grouplist = lock_user(VERIFY_READ, arg2, gidsetsize * 4, 1);
11669             if (!target_grouplist) {
11670                 return -TARGET_EFAULT;
11671             }
11672             for(i = 0;i < gidsetsize; i++)
11673                 grouplist[i] = tswap32(target_grouplist[i]);
11674             unlock_user(target_grouplist, arg2, 0);
11675             return get_errno(setgroups(gidsetsize, grouplist));
11676         }
11677 #endif
11678 #ifdef TARGET_NR_fchown32
11679     case TARGET_NR_fchown32:
11680         return get_errno(fchown(arg1, arg2, arg3));
11681 #endif
11682 #ifdef TARGET_NR_setresuid32
11683     case TARGET_NR_setresuid32:
11684         return get_errno(sys_setresuid(arg1, arg2, arg3));
11685 #endif
11686 #ifdef TARGET_NR_getresuid32
11687     case TARGET_NR_getresuid32:
11688         {
11689             uid_t ruid, euid, suid;
11690             ret = get_errno(getresuid(&ruid, &euid, &suid));
11691             if (!is_error(ret)) {
11692                 if (put_user_u32(ruid, arg1)
11693                     || put_user_u32(euid, arg2)
11694                     || put_user_u32(suid, arg3))
11695                     return -TARGET_EFAULT;
11696             }
11697         }
11698         return ret;
11699 #endif
11700 #ifdef TARGET_NR_setresgid32
11701     case TARGET_NR_setresgid32:
11702         return get_errno(sys_setresgid(arg1, arg2, arg3));
11703 #endif
11704 #ifdef TARGET_NR_getresgid32
11705     case TARGET_NR_getresgid32:
11706         {
11707             gid_t rgid, egid, sgid;
11708             ret = get_errno(getresgid(&rgid, &egid, &sgid));
11709             if (!is_error(ret)) {
11710                 if (put_user_u32(rgid, arg1)
11711                     || put_user_u32(egid, arg2)
11712                     || put_user_u32(sgid, arg3))
11713                     return -TARGET_EFAULT;
11714             }
11715         }
11716         return ret;
11717 #endif
11718 #ifdef TARGET_NR_chown32
11719     case TARGET_NR_chown32:
11720         if (!(p = lock_user_string(arg1)))
11721             return -TARGET_EFAULT;
11722         ret = get_errno(chown(p, arg2, arg3));
11723         unlock_user(p, arg1, 0);
11724         return ret;
11725 #endif
11726 #ifdef TARGET_NR_setuid32
11727     case TARGET_NR_setuid32:
11728         return get_errno(sys_setuid(arg1));
11729 #endif
11730 #ifdef TARGET_NR_setgid32
11731     case TARGET_NR_setgid32:
11732         return get_errno(sys_setgid(arg1));
11733 #endif
11734 #ifdef TARGET_NR_setfsuid32
11735     case TARGET_NR_setfsuid32:
11736         return get_errno(setfsuid(arg1));
11737 #endif
11738 #ifdef TARGET_NR_setfsgid32
11739     case TARGET_NR_setfsgid32:
11740         return get_errno(setfsgid(arg1));
11741 #endif
11742 #ifdef TARGET_NR_mincore
11743     case TARGET_NR_mincore:
11744         {
11745             void *a = lock_user(VERIFY_READ, arg1, arg2, 0);
11746             if (!a) {
11747                 return -TARGET_ENOMEM;
11748             }
11749             p = lock_user_string(arg3);
11750             if (!p) {
11751                 ret = -TARGET_EFAULT;
11752             } else {
11753                 ret = get_errno(mincore(a, arg2, p));
11754                 unlock_user(p, arg3, ret);
11755             }
11756             unlock_user(a, arg1, 0);
11757         }
11758         return ret;
11759 #endif
11760 #ifdef TARGET_NR_arm_fadvise64_64
11761     case TARGET_NR_arm_fadvise64_64:
11762         /* arm_fadvise64_64 looks like fadvise64_64 but
11763          * with different argument order: fd, advice, offset, len
11764          * rather than the usual fd, offset, len, advice.
11765          * Note that offset and len are both 64-bit so appear as
11766          * pairs of 32-bit registers.
11767          */
11768         ret = posix_fadvise(arg1, target_offset64(arg3, arg4),
11769                             target_offset64(arg5, arg6), arg2);
11770         return -host_to_target_errno(ret);
11771 #endif
11772 
11773 #if TARGET_ABI_BITS == 32
11774 
11775 #ifdef TARGET_NR_fadvise64_64
11776     case TARGET_NR_fadvise64_64:
11777 #if defined(TARGET_PPC) || defined(TARGET_XTENSA)
11778         /* 6 args: fd, advice, offset (high, low), len (high, low) */
11779         ret = arg2;
11780         arg2 = arg3;
11781         arg3 = arg4;
11782         arg4 = arg5;
11783         arg5 = arg6;
11784         arg6 = ret;
11785 #else
11786         /* 6 args: fd, offset (high, low), len (high, low), advice */
11787         if (regpairs_aligned(cpu_env, num)) {
11788             /* offset is in (3,4), len in (5,6) and advice in 7 */
11789             arg2 = arg3;
11790             arg3 = arg4;
11791             arg4 = arg5;
11792             arg5 = arg6;
11793             arg6 = arg7;
11794         }
11795 #endif
11796         ret = posix_fadvise(arg1, target_offset64(arg2, arg3),
11797                             target_offset64(arg4, arg5), arg6);
11798         return -host_to_target_errno(ret);
11799 #endif
11800 
11801 #ifdef TARGET_NR_fadvise64
11802     case TARGET_NR_fadvise64:
11803         /* 5 args: fd, offset (high, low), len, advice */
11804         if (regpairs_aligned(cpu_env, num)) {
11805             /* offset is in (3,4), len in 5 and advice in 6 */
11806             arg2 = arg3;
11807             arg3 = arg4;
11808             arg4 = arg5;
11809             arg5 = arg6;
11810         }
11811         ret = posix_fadvise(arg1, target_offset64(arg2, arg3), arg4, arg5);
11812         return -host_to_target_errno(ret);
11813 #endif
11814 
11815 #else /* not a 32-bit ABI */
11816 #if defined(TARGET_NR_fadvise64_64) || defined(TARGET_NR_fadvise64)
11817 #ifdef TARGET_NR_fadvise64_64
11818     case TARGET_NR_fadvise64_64:
11819 #endif
11820 #ifdef TARGET_NR_fadvise64
11821     case TARGET_NR_fadvise64:
11822 #endif
11823 #ifdef TARGET_S390X
11824         switch (arg4) {
11825         case 4: arg4 = POSIX_FADV_NOREUSE + 1; break; /* make sure it's an invalid value */
11826         case 5: arg4 = POSIX_FADV_NOREUSE + 2; break; /* ditto */
11827         case 6: arg4 = POSIX_FADV_DONTNEED; break;
11828         case 7: arg4 = POSIX_FADV_NOREUSE; break;
11829         default: break;
11830         }
11831 #endif
11832         return -host_to_target_errno(posix_fadvise(arg1, arg2, arg3, arg4));
11833 #endif
11834 #endif /* end of 64-bit ABI fadvise handling */
11835 
11836 #ifdef TARGET_NR_madvise
11837     case TARGET_NR_madvise:
11838         /* A straight passthrough may not be safe because qemu sometimes
11839            turns private file-backed mappings into anonymous mappings.
11840            This will break MADV_DONTNEED.
11841            This is a hint, so ignoring and returning success is ok.  */
11842         return 0;
11843 #endif
11844 #ifdef TARGET_NR_fcntl64
11845     case TARGET_NR_fcntl64:
11846     {
11847         int cmd;
11848         struct flock64 fl;
11849         from_flock64_fn *copyfrom = copy_from_user_flock64;
11850         to_flock64_fn *copyto = copy_to_user_flock64;
11851 
11852 #ifdef TARGET_ARM
11853         if (!((CPUARMState *)cpu_env)->eabi) {
11854             copyfrom = copy_from_user_oabi_flock64;
11855             copyto = copy_to_user_oabi_flock64;
11856         }
11857 #endif
11858 
11859         cmd = target_to_host_fcntl_cmd(arg2);
11860         if (cmd == -TARGET_EINVAL) {
11861             return cmd;
11862         }
11863 
11864         switch(arg2) {
11865         case TARGET_F_GETLK64:
11866             ret = copyfrom(&fl, arg3);
11867             if (ret) {
11868                 break;
11869             }
11870             ret = get_errno(safe_fcntl(arg1, cmd, &fl));
11871             if (ret == 0) {
11872                 ret = copyto(arg3, &fl);
11873             }
11874 	    break;
11875 
11876         case TARGET_F_SETLK64:
11877         case TARGET_F_SETLKW64:
11878             ret = copyfrom(&fl, arg3);
11879             if (ret) {
11880                 break;
11881             }
11882             ret = get_errno(safe_fcntl(arg1, cmd, &fl));
11883 	    break;
11884         default:
11885             ret = do_fcntl(arg1, arg2, arg3);
11886             break;
11887         }
11888         return ret;
11889     }
11890 #endif
11891 #ifdef TARGET_NR_cacheflush
11892     case TARGET_NR_cacheflush:
11893         /* self-modifying code is handled automatically, so nothing needed */
11894         return 0;
11895 #endif
11896 #ifdef TARGET_NR_getpagesize
11897     case TARGET_NR_getpagesize:
11898         return TARGET_PAGE_SIZE;
11899 #endif
11900     case TARGET_NR_gettid:
11901         return get_errno(sys_gettid());
11902 #ifdef TARGET_NR_readahead
11903     case TARGET_NR_readahead:
11904 #if TARGET_ABI_BITS == 32
11905         if (regpairs_aligned(cpu_env, num)) {
11906             arg2 = arg3;
11907             arg3 = arg4;
11908             arg4 = arg5;
11909         }
11910         ret = get_errno(readahead(arg1, target_offset64(arg2, arg3) , arg4));
11911 #else
11912         ret = get_errno(readahead(arg1, arg2, arg3));
11913 #endif
11914         return ret;
11915 #endif
11916 #ifdef CONFIG_ATTR
11917 #ifdef TARGET_NR_setxattr
11918     case TARGET_NR_listxattr:
11919     case TARGET_NR_llistxattr:
11920     {
11921         void *p, *b = 0;
11922         if (arg2) {
11923             b = lock_user(VERIFY_WRITE, arg2, arg3, 0);
11924             if (!b) {
11925                 return -TARGET_EFAULT;
11926             }
11927         }
11928         p = lock_user_string(arg1);
11929         if (p) {
11930             if (num == TARGET_NR_listxattr) {
11931                 ret = get_errno(listxattr(p, b, arg3));
11932             } else {
11933                 ret = get_errno(llistxattr(p, b, arg3));
11934             }
11935         } else {
11936             ret = -TARGET_EFAULT;
11937         }
11938         unlock_user(p, arg1, 0);
11939         unlock_user(b, arg2, arg3);
11940         return ret;
11941     }
11942     case TARGET_NR_flistxattr:
11943     {
11944         void *b = 0;
11945         if (arg2) {
11946             b = lock_user(VERIFY_WRITE, arg2, arg3, 0);
11947             if (!b) {
11948                 return -TARGET_EFAULT;
11949             }
11950         }
11951         ret = get_errno(flistxattr(arg1, b, arg3));
11952         unlock_user(b, arg2, arg3);
11953         return ret;
11954     }
11955     case TARGET_NR_setxattr:
11956     case TARGET_NR_lsetxattr:
11957         {
11958             void *p, *n, *v = 0;
11959             if (arg3) {
11960                 v = lock_user(VERIFY_READ, arg3, arg4, 1);
11961                 if (!v) {
11962                     return -TARGET_EFAULT;
11963                 }
11964             }
11965             p = lock_user_string(arg1);
11966             n = lock_user_string(arg2);
11967             if (p && n) {
11968                 if (num == TARGET_NR_setxattr) {
11969                     ret = get_errno(setxattr(p, n, v, arg4, arg5));
11970                 } else {
11971                     ret = get_errno(lsetxattr(p, n, v, arg4, arg5));
11972                 }
11973             } else {
11974                 ret = -TARGET_EFAULT;
11975             }
11976             unlock_user(p, arg1, 0);
11977             unlock_user(n, arg2, 0);
11978             unlock_user(v, arg3, 0);
11979         }
11980         return ret;
11981     case TARGET_NR_fsetxattr:
11982         {
11983             void *n, *v = 0;
11984             if (arg3) {
11985                 v = lock_user(VERIFY_READ, arg3, arg4, 1);
11986                 if (!v) {
11987                     return -TARGET_EFAULT;
11988                 }
11989             }
11990             n = lock_user_string(arg2);
11991             if (n) {
11992                 ret = get_errno(fsetxattr(arg1, n, v, arg4, arg5));
11993             } else {
11994                 ret = -TARGET_EFAULT;
11995             }
11996             unlock_user(n, arg2, 0);
11997             unlock_user(v, arg3, 0);
11998         }
11999         return ret;
12000     case TARGET_NR_getxattr:
12001     case TARGET_NR_lgetxattr:
12002         {
12003             void *p, *n, *v = 0;
12004             if (arg3) {
12005                 v = lock_user(VERIFY_WRITE, arg3, arg4, 0);
12006                 if (!v) {
12007                     return -TARGET_EFAULT;
12008                 }
12009             }
12010             p = lock_user_string(arg1);
12011             n = lock_user_string(arg2);
12012             if (p && n) {
12013                 if (num == TARGET_NR_getxattr) {
12014                     ret = get_errno(getxattr(p, n, v, arg4));
12015                 } else {
12016                     ret = get_errno(lgetxattr(p, n, v, arg4));
12017                 }
12018             } else {
12019                 ret = -TARGET_EFAULT;
12020             }
12021             unlock_user(p, arg1, 0);
12022             unlock_user(n, arg2, 0);
12023             unlock_user(v, arg3, arg4);
12024         }
12025         return ret;
12026     case TARGET_NR_fgetxattr:
12027         {
12028             void *n, *v = 0;
12029             if (arg3) {
12030                 v = lock_user(VERIFY_WRITE, arg3, arg4, 0);
12031                 if (!v) {
12032                     return -TARGET_EFAULT;
12033                 }
12034             }
12035             n = lock_user_string(arg2);
12036             if (n) {
12037                 ret = get_errno(fgetxattr(arg1, n, v, arg4));
12038             } else {
12039                 ret = -TARGET_EFAULT;
12040             }
12041             unlock_user(n, arg2, 0);
12042             unlock_user(v, arg3, arg4);
12043         }
12044         return ret;
12045     case TARGET_NR_removexattr:
12046     case TARGET_NR_lremovexattr:
12047         {
12048             void *p, *n;
12049             p = lock_user_string(arg1);
12050             n = lock_user_string(arg2);
12051             if (p && n) {
12052                 if (num == TARGET_NR_removexattr) {
12053                     ret = get_errno(removexattr(p, n));
12054                 } else {
12055                     ret = get_errno(lremovexattr(p, n));
12056                 }
12057             } else {
12058                 ret = -TARGET_EFAULT;
12059             }
12060             unlock_user(p, arg1, 0);
12061             unlock_user(n, arg2, 0);
12062         }
12063         return ret;
12064     case TARGET_NR_fremovexattr:
12065         {
12066             void *n;
12067             n = lock_user_string(arg2);
12068             if (n) {
12069                 ret = get_errno(fremovexattr(arg1, n));
12070             } else {
12071                 ret = -TARGET_EFAULT;
12072             }
12073             unlock_user(n, arg2, 0);
12074         }
12075         return ret;
12076 #endif
12077 #endif /* CONFIG_ATTR */
12078 #ifdef TARGET_NR_set_thread_area
12079     case TARGET_NR_set_thread_area:
12080 #if defined(TARGET_MIPS)
12081       ((CPUMIPSState *) cpu_env)->active_tc.CP0_UserLocal = arg1;
12082       return 0;
12083 #elif defined(TARGET_CRIS)
12084       if (arg1 & 0xff)
12085           ret = -TARGET_EINVAL;
12086       else {
12087           ((CPUCRISState *) cpu_env)->pregs[PR_PID] = arg1;
12088           ret = 0;
12089       }
12090       return ret;
12091 #elif defined(TARGET_I386) && defined(TARGET_ABI32)
12092       return do_set_thread_area(cpu_env, arg1);
12093 #elif defined(TARGET_M68K)
12094       {
12095           TaskState *ts = cpu->opaque;
12096           ts->tp_value = arg1;
12097           return 0;
12098       }
12099 #else
12100       return -TARGET_ENOSYS;
12101 #endif
12102 #endif
12103 #ifdef TARGET_NR_get_thread_area
12104     case TARGET_NR_get_thread_area:
12105 #if defined(TARGET_I386) && defined(TARGET_ABI32)
12106         return do_get_thread_area(cpu_env, arg1);
12107 #elif defined(TARGET_M68K)
12108         {
12109             TaskState *ts = cpu->opaque;
12110             return ts->tp_value;
12111         }
12112 #else
12113         return -TARGET_ENOSYS;
12114 #endif
12115 #endif
12116 #ifdef TARGET_NR_getdomainname
12117     case TARGET_NR_getdomainname:
12118         return -TARGET_ENOSYS;
12119 #endif
12120 
12121 #ifdef TARGET_NR_clock_settime
12122     case TARGET_NR_clock_settime:
12123     {
12124         struct timespec ts;
12125 
12126         ret = target_to_host_timespec(&ts, arg2);
12127         if (!is_error(ret)) {
12128             ret = get_errno(clock_settime(arg1, &ts));
12129         }
12130         return ret;
12131     }
12132 #endif
12133 #ifdef TARGET_NR_clock_settime64
12134     case TARGET_NR_clock_settime64:
12135     {
12136         struct timespec ts;
12137 
12138         ret = target_to_host_timespec64(&ts, arg2);
12139         if (!is_error(ret)) {
12140             ret = get_errno(clock_settime(arg1, &ts));
12141         }
12142         return ret;
12143     }
12144 #endif
12145 #ifdef TARGET_NR_clock_gettime
12146     case TARGET_NR_clock_gettime:
12147     {
12148         struct timespec ts;
12149         ret = get_errno(clock_gettime(arg1, &ts));
12150         if (!is_error(ret)) {
12151             ret = host_to_target_timespec(arg2, &ts);
12152         }
12153         return ret;
12154     }
12155 #endif
12156 #ifdef TARGET_NR_clock_gettime64
12157     case TARGET_NR_clock_gettime64:
12158     {
12159         struct timespec ts;
12160         ret = get_errno(clock_gettime(arg1, &ts));
12161         if (!is_error(ret)) {
12162             ret = host_to_target_timespec64(arg2, &ts);
12163         }
12164         return ret;
12165     }
12166 #endif
12167 #ifdef TARGET_NR_clock_getres
12168     case TARGET_NR_clock_getres:
12169     {
12170         struct timespec ts;
12171         ret = get_errno(clock_getres(arg1, &ts));
12172         if (!is_error(ret)) {
12173             host_to_target_timespec(arg2, &ts);
12174         }
12175         return ret;
12176     }
12177 #endif
12178 #ifdef TARGET_NR_clock_getres_time64
12179     case TARGET_NR_clock_getres_time64:
12180     {
12181         struct timespec ts;
12182         ret = get_errno(clock_getres(arg1, &ts));
12183         if (!is_error(ret)) {
12184             host_to_target_timespec64(arg2, &ts);
12185         }
12186         return ret;
12187     }
12188 #endif
12189 #ifdef TARGET_NR_clock_nanosleep
12190     case TARGET_NR_clock_nanosleep:
12191     {
12192         struct timespec ts;
12193         if (target_to_host_timespec(&ts, arg3)) {
12194             return -TARGET_EFAULT;
12195         }
12196         ret = get_errno(safe_clock_nanosleep(arg1, arg2,
12197                                              &ts, arg4 ? &ts : NULL));
12198         /*
12199          * if the call is interrupted by a signal handler, it fails
12200          * with error -TARGET_EINTR and if arg4 is not NULL and arg2 is not
12201          * TIMER_ABSTIME, it returns the remaining unslept time in arg4.
12202          */
12203         if (ret == -TARGET_EINTR && arg4 && arg2 != TIMER_ABSTIME &&
12204             host_to_target_timespec(arg4, &ts)) {
12205               return -TARGET_EFAULT;
12206         }
12207 
12208         return ret;
12209     }
12210 #endif
12211 #ifdef TARGET_NR_clock_nanosleep_time64
12212     case TARGET_NR_clock_nanosleep_time64:
12213     {
12214         struct timespec ts;
12215 
12216         if (target_to_host_timespec64(&ts, arg3)) {
12217             return -TARGET_EFAULT;
12218         }
12219 
12220         ret = get_errno(safe_clock_nanosleep(arg1, arg2,
12221                                              &ts, arg4 ? &ts : NULL));
12222 
12223         if (ret == -TARGET_EINTR && arg4 && arg2 != TIMER_ABSTIME &&
12224             host_to_target_timespec64(arg4, &ts)) {
12225             return -TARGET_EFAULT;
12226         }
12227         return ret;
12228     }
12229 #endif
12230 
12231 #if defined(TARGET_NR_set_tid_address) && defined(__NR_set_tid_address)
12232     case TARGET_NR_set_tid_address:
12233         return get_errno(set_tid_address((int *)g2h(arg1)));
12234 #endif
12235 
12236     case TARGET_NR_tkill:
12237         return get_errno(safe_tkill((int)arg1, target_to_host_signal(arg2)));
12238 
12239     case TARGET_NR_tgkill:
12240         return get_errno(safe_tgkill((int)arg1, (int)arg2,
12241                          target_to_host_signal(arg3)));
12242 
12243 #ifdef TARGET_NR_set_robust_list
12244     case TARGET_NR_set_robust_list:
12245     case TARGET_NR_get_robust_list:
12246         /* The ABI for supporting robust futexes has userspace pass
12247          * the kernel a pointer to a linked list which is updated by
12248          * userspace after the syscall; the list is walked by the kernel
12249          * when the thread exits. Since the linked list in QEMU guest
12250          * memory isn't a valid linked list for the host and we have
12251          * no way to reliably intercept the thread-death event, we can't
12252          * support these. Silently return ENOSYS so that guest userspace
12253          * falls back to a non-robust futex implementation (which should
12254          * be OK except in the corner case of the guest crashing while
12255          * holding a mutex that is shared with another process via
12256          * shared memory).
12257          */
12258         return -TARGET_ENOSYS;
12259 #endif
12260 
12261 #if defined(TARGET_NR_utimensat)
12262     case TARGET_NR_utimensat:
12263         {
12264             struct timespec *tsp, ts[2];
12265             if (!arg3) {
12266                 tsp = NULL;
12267             } else {
12268                 if (target_to_host_timespec(ts, arg3)) {
12269                     return -TARGET_EFAULT;
12270                 }
12271                 if (target_to_host_timespec(ts + 1, arg3 +
12272                                             sizeof(struct target_timespec))) {
12273                     return -TARGET_EFAULT;
12274                 }
12275                 tsp = ts;
12276             }
12277             if (!arg2)
12278                 ret = get_errno(sys_utimensat(arg1, NULL, tsp, arg4));
12279             else {
12280                 if (!(p = lock_user_string(arg2))) {
12281                     return -TARGET_EFAULT;
12282                 }
12283                 ret = get_errno(sys_utimensat(arg1, path(p), tsp, arg4));
12284                 unlock_user(p, arg2, 0);
12285             }
12286         }
12287         return ret;
12288 #endif
12289 #ifdef TARGET_NR_utimensat_time64
12290     case TARGET_NR_utimensat_time64:
12291         {
12292             struct timespec *tsp, ts[2];
12293             if (!arg3) {
12294                 tsp = NULL;
12295             } else {
12296                 if (target_to_host_timespec64(ts, arg3)) {
12297                     return -TARGET_EFAULT;
12298                 }
12299                 if (target_to_host_timespec64(ts + 1, arg3 +
12300                                      sizeof(struct target__kernel_timespec))) {
12301                     return -TARGET_EFAULT;
12302                 }
12303                 tsp = ts;
12304             }
12305             if (!arg2)
12306                 ret = get_errno(sys_utimensat(arg1, NULL, tsp, arg4));
12307             else {
12308                 p = lock_user_string(arg2);
12309                 if (!p) {
12310                     return -TARGET_EFAULT;
12311                 }
12312                 ret = get_errno(sys_utimensat(arg1, path(p), tsp, arg4));
12313                 unlock_user(p, arg2, 0);
12314             }
12315         }
12316         return ret;
12317 #endif
12318 #ifdef TARGET_NR_futex
12319     case TARGET_NR_futex:
12320         return do_futex(arg1, arg2, arg3, arg4, arg5, arg6);
12321 #endif
12322 #ifdef TARGET_NR_futex_time64
12323     case TARGET_NR_futex_time64:
12324         return do_futex_time64(arg1, arg2, arg3, arg4, arg5, arg6);
12325 #endif
12326 #if defined(TARGET_NR_inotify_init) && defined(__NR_inotify_init)
12327     case TARGET_NR_inotify_init:
12328         ret = get_errno(sys_inotify_init());
12329         if (ret >= 0) {
12330             fd_trans_register(ret, &target_inotify_trans);
12331         }
12332         return ret;
12333 #endif
12334 #ifdef CONFIG_INOTIFY1
12335 #if defined(TARGET_NR_inotify_init1) && defined(__NR_inotify_init1)
12336     case TARGET_NR_inotify_init1:
12337         ret = get_errno(sys_inotify_init1(target_to_host_bitmask(arg1,
12338                                           fcntl_flags_tbl)));
12339         if (ret >= 0) {
12340             fd_trans_register(ret, &target_inotify_trans);
12341         }
12342         return ret;
12343 #endif
12344 #endif
12345 #if defined(TARGET_NR_inotify_add_watch) && defined(__NR_inotify_add_watch)
12346     case TARGET_NR_inotify_add_watch:
12347         p = lock_user_string(arg2);
12348         ret = get_errno(sys_inotify_add_watch(arg1, path(p), arg3));
12349         unlock_user(p, arg2, 0);
12350         return ret;
12351 #endif
12352 #if defined(TARGET_NR_inotify_rm_watch) && defined(__NR_inotify_rm_watch)
12353     case TARGET_NR_inotify_rm_watch:
12354         return get_errno(sys_inotify_rm_watch(arg1, arg2));
12355 #endif
12356 
12357 #if defined(TARGET_NR_mq_open) && defined(__NR_mq_open)
12358     case TARGET_NR_mq_open:
12359         {
12360             struct mq_attr posix_mq_attr;
12361             struct mq_attr *pposix_mq_attr;
12362             int host_flags;
12363 
12364             host_flags = target_to_host_bitmask(arg2, fcntl_flags_tbl);
12365             pposix_mq_attr = NULL;
12366             if (arg4) {
12367                 if (copy_from_user_mq_attr(&posix_mq_attr, arg4) != 0) {
12368                     return -TARGET_EFAULT;
12369                 }
12370                 pposix_mq_attr = &posix_mq_attr;
12371             }
12372             p = lock_user_string(arg1 - 1);
12373             if (!p) {
12374                 return -TARGET_EFAULT;
12375             }
12376             ret = get_errno(mq_open(p, host_flags, arg3, pposix_mq_attr));
12377             unlock_user (p, arg1, 0);
12378         }
12379         return ret;
12380 
12381     case TARGET_NR_mq_unlink:
12382         p = lock_user_string(arg1 - 1);
12383         if (!p) {
12384             return -TARGET_EFAULT;
12385         }
12386         ret = get_errno(mq_unlink(p));
12387         unlock_user (p, arg1, 0);
12388         return ret;
12389 
12390 #ifdef TARGET_NR_mq_timedsend
12391     case TARGET_NR_mq_timedsend:
12392         {
12393             struct timespec ts;
12394 
12395             p = lock_user (VERIFY_READ, arg2, arg3, 1);
12396             if (arg5 != 0) {
12397                 if (target_to_host_timespec(&ts, arg5)) {
12398                     return -TARGET_EFAULT;
12399                 }
12400                 ret = get_errno(safe_mq_timedsend(arg1, p, arg3, arg4, &ts));
12401                 if (!is_error(ret) && host_to_target_timespec(arg5, &ts)) {
12402                     return -TARGET_EFAULT;
12403                 }
12404             } else {
12405                 ret = get_errno(safe_mq_timedsend(arg1, p, arg3, arg4, NULL));
12406             }
12407             unlock_user (p, arg2, arg3);
12408         }
12409         return ret;
12410 #endif
12411 #ifdef TARGET_NR_mq_timedsend_time64
12412     case TARGET_NR_mq_timedsend_time64:
12413         {
12414             struct timespec ts;
12415 
12416             p = lock_user(VERIFY_READ, arg2, arg3, 1);
12417             if (arg5 != 0) {
12418                 if (target_to_host_timespec64(&ts, arg5)) {
12419                     return -TARGET_EFAULT;
12420                 }
12421                 ret = get_errno(safe_mq_timedsend(arg1, p, arg3, arg4, &ts));
12422                 if (!is_error(ret) && host_to_target_timespec64(arg5, &ts)) {
12423                     return -TARGET_EFAULT;
12424                 }
12425             } else {
12426                 ret = get_errno(safe_mq_timedsend(arg1, p, arg3, arg4, NULL));
12427             }
12428             unlock_user(p, arg2, arg3);
12429         }
12430         return ret;
12431 #endif
12432 
12433 #ifdef TARGET_NR_mq_timedreceive
12434     case TARGET_NR_mq_timedreceive:
12435         {
12436             struct timespec ts;
12437             unsigned int prio;
12438 
12439             p = lock_user (VERIFY_READ, arg2, arg3, 1);
12440             if (arg5 != 0) {
12441                 if (target_to_host_timespec(&ts, arg5)) {
12442                     return -TARGET_EFAULT;
12443                 }
12444                 ret = get_errno(safe_mq_timedreceive(arg1, p, arg3,
12445                                                      &prio, &ts));
12446                 if (!is_error(ret) && host_to_target_timespec(arg5, &ts)) {
12447                     return -TARGET_EFAULT;
12448                 }
12449             } else {
12450                 ret = get_errno(safe_mq_timedreceive(arg1, p, arg3,
12451                                                      &prio, NULL));
12452             }
12453             unlock_user (p, arg2, arg3);
12454             if (arg4 != 0)
12455                 put_user_u32(prio, arg4);
12456         }
12457         return ret;
12458 #endif
12459 #ifdef TARGET_NR_mq_timedreceive_time64
12460     case TARGET_NR_mq_timedreceive_time64:
12461         {
12462             struct timespec ts;
12463             unsigned int prio;
12464 
12465             p = lock_user(VERIFY_READ, arg2, arg3, 1);
12466             if (arg5 != 0) {
12467                 if (target_to_host_timespec64(&ts, arg5)) {
12468                     return -TARGET_EFAULT;
12469                 }
12470                 ret = get_errno(safe_mq_timedreceive(arg1, p, arg3,
12471                                                      &prio, &ts));
12472                 if (!is_error(ret) && host_to_target_timespec64(arg5, &ts)) {
12473                     return -TARGET_EFAULT;
12474                 }
12475             } else {
12476                 ret = get_errno(safe_mq_timedreceive(arg1, p, arg3,
12477                                                      &prio, NULL));
12478             }
12479             unlock_user(p, arg2, arg3);
12480             if (arg4 != 0) {
12481                 put_user_u32(prio, arg4);
12482             }
12483         }
12484         return ret;
12485 #endif
12486 
12487     /* Not implemented for now... */
12488 /*     case TARGET_NR_mq_notify: */
12489 /*         break; */
12490 
12491     case TARGET_NR_mq_getsetattr:
12492         {
12493             struct mq_attr posix_mq_attr_in, posix_mq_attr_out;
12494             ret = 0;
12495             if (arg2 != 0) {
12496                 copy_from_user_mq_attr(&posix_mq_attr_in, arg2);
12497                 ret = get_errno(mq_setattr(arg1, &posix_mq_attr_in,
12498                                            &posix_mq_attr_out));
12499             } else if (arg3 != 0) {
12500                 ret = get_errno(mq_getattr(arg1, &posix_mq_attr_out));
12501             }
12502             if (ret == 0 && arg3 != 0) {
12503                 copy_to_user_mq_attr(arg3, &posix_mq_attr_out);
12504             }
12505         }
12506         return ret;
12507 #endif
12508 
12509 #ifdef CONFIG_SPLICE
12510 #ifdef TARGET_NR_tee
12511     case TARGET_NR_tee:
12512         {
12513             ret = get_errno(tee(arg1,arg2,arg3,arg4));
12514         }
12515         return ret;
12516 #endif
12517 #ifdef TARGET_NR_splice
12518     case TARGET_NR_splice:
12519         {
12520             loff_t loff_in, loff_out;
12521             loff_t *ploff_in = NULL, *ploff_out = NULL;
12522             if (arg2) {
12523                 if (get_user_u64(loff_in, arg2)) {
12524                     return -TARGET_EFAULT;
12525                 }
12526                 ploff_in = &loff_in;
12527             }
12528             if (arg4) {
12529                 if (get_user_u64(loff_out, arg4)) {
12530                     return -TARGET_EFAULT;
12531                 }
12532                 ploff_out = &loff_out;
12533             }
12534             ret = get_errno(splice(arg1, ploff_in, arg3, ploff_out, arg5, arg6));
12535             if (arg2) {
12536                 if (put_user_u64(loff_in, arg2)) {
12537                     return -TARGET_EFAULT;
12538                 }
12539             }
12540             if (arg4) {
12541                 if (put_user_u64(loff_out, arg4)) {
12542                     return -TARGET_EFAULT;
12543                 }
12544             }
12545         }
12546         return ret;
12547 #endif
12548 #ifdef TARGET_NR_vmsplice
12549 	case TARGET_NR_vmsplice:
12550         {
12551             struct iovec *vec = lock_iovec(VERIFY_READ, arg2, arg3, 1);
12552             if (vec != NULL) {
12553                 ret = get_errno(vmsplice(arg1, vec, arg3, arg4));
12554                 unlock_iovec(vec, arg2, arg3, 0);
12555             } else {
12556                 ret = -host_to_target_errno(errno);
12557             }
12558         }
12559         return ret;
12560 #endif
12561 #endif /* CONFIG_SPLICE */
12562 #ifdef CONFIG_EVENTFD
12563 #if defined(TARGET_NR_eventfd)
12564     case TARGET_NR_eventfd:
12565         ret = get_errno(eventfd(arg1, 0));
12566         if (ret >= 0) {
12567             fd_trans_register(ret, &target_eventfd_trans);
12568         }
12569         return ret;
12570 #endif
12571 #if defined(TARGET_NR_eventfd2)
12572     case TARGET_NR_eventfd2:
12573     {
12574         int host_flags = arg2 & (~(TARGET_O_NONBLOCK_MASK | TARGET_O_CLOEXEC));
12575         if (arg2 & TARGET_O_NONBLOCK) {
12576             host_flags |= O_NONBLOCK;
12577         }
12578         if (arg2 & TARGET_O_CLOEXEC) {
12579             host_flags |= O_CLOEXEC;
12580         }
12581         ret = get_errno(eventfd(arg1, host_flags));
12582         if (ret >= 0) {
12583             fd_trans_register(ret, &target_eventfd_trans);
12584         }
12585         return ret;
12586     }
12587 #endif
12588 #endif /* CONFIG_EVENTFD  */
12589 #if defined(CONFIG_FALLOCATE) && defined(TARGET_NR_fallocate)
12590     case TARGET_NR_fallocate:
12591 #if TARGET_ABI_BITS == 32
12592         ret = get_errno(fallocate(arg1, arg2, target_offset64(arg3, arg4),
12593                                   target_offset64(arg5, arg6)));
12594 #else
12595         ret = get_errno(fallocate(arg1, arg2, arg3, arg4));
12596 #endif
12597         return ret;
12598 #endif
12599 #if defined(CONFIG_SYNC_FILE_RANGE)
12600 #if defined(TARGET_NR_sync_file_range)
12601     case TARGET_NR_sync_file_range:
12602 #if TARGET_ABI_BITS == 32
12603 #if defined(TARGET_MIPS)
12604         ret = get_errno(sync_file_range(arg1, target_offset64(arg3, arg4),
12605                                         target_offset64(arg5, arg6), arg7));
12606 #else
12607         ret = get_errno(sync_file_range(arg1, target_offset64(arg2, arg3),
12608                                         target_offset64(arg4, arg5), arg6));
12609 #endif /* !TARGET_MIPS */
12610 #else
12611         ret = get_errno(sync_file_range(arg1, arg2, arg3, arg4));
12612 #endif
12613         return ret;
12614 #endif
12615 #if defined(TARGET_NR_sync_file_range2) || \
12616     defined(TARGET_NR_arm_sync_file_range)
12617 #if defined(TARGET_NR_sync_file_range2)
12618     case TARGET_NR_sync_file_range2:
12619 #endif
12620 #if defined(TARGET_NR_arm_sync_file_range)
12621     case TARGET_NR_arm_sync_file_range:
12622 #endif
12623         /* This is like sync_file_range but the arguments are reordered */
12624 #if TARGET_ABI_BITS == 32
12625         ret = get_errno(sync_file_range(arg1, target_offset64(arg3, arg4),
12626                                         target_offset64(arg5, arg6), arg2));
12627 #else
12628         ret = get_errno(sync_file_range(arg1, arg3, arg4, arg2));
12629 #endif
12630         return ret;
12631 #endif
12632 #endif
12633 #if defined(TARGET_NR_signalfd4)
12634     case TARGET_NR_signalfd4:
12635         return do_signalfd4(arg1, arg2, arg4);
12636 #endif
12637 #if defined(TARGET_NR_signalfd)
12638     case TARGET_NR_signalfd:
12639         return do_signalfd4(arg1, arg2, 0);
12640 #endif
12641 #if defined(CONFIG_EPOLL)
12642 #if defined(TARGET_NR_epoll_create)
12643     case TARGET_NR_epoll_create:
12644         return get_errno(epoll_create(arg1));
12645 #endif
12646 #if defined(TARGET_NR_epoll_create1) && defined(CONFIG_EPOLL_CREATE1)
12647     case TARGET_NR_epoll_create1:
12648         return get_errno(epoll_create1(target_to_host_bitmask(arg1, fcntl_flags_tbl)));
12649 #endif
12650 #if defined(TARGET_NR_epoll_ctl)
12651     case TARGET_NR_epoll_ctl:
12652     {
12653         struct epoll_event ep;
12654         struct epoll_event *epp = 0;
12655         if (arg4) {
12656             if (arg2 != EPOLL_CTL_DEL) {
12657                 struct target_epoll_event *target_ep;
12658                 if (!lock_user_struct(VERIFY_READ, target_ep, arg4, 1)) {
12659                     return -TARGET_EFAULT;
12660                 }
12661                 ep.events = tswap32(target_ep->events);
12662                 /*
12663                  * The epoll_data_t union is just opaque data to the kernel,
12664                  * so we transfer all 64 bits across and need not worry what
12665                  * actual data type it is.
12666                  */
12667                 ep.data.u64 = tswap64(target_ep->data.u64);
12668                 unlock_user_struct(target_ep, arg4, 0);
12669             }
12670             /*
12671              * before kernel 2.6.9, EPOLL_CTL_DEL operation required a
12672              * non-null pointer, even though this argument is ignored.
12673              *
12674              */
12675             epp = &ep;
12676         }
12677         return get_errno(epoll_ctl(arg1, arg2, arg3, epp));
12678     }
12679 #endif
12680 
12681 #if defined(TARGET_NR_epoll_wait) || defined(TARGET_NR_epoll_pwait)
12682 #if defined(TARGET_NR_epoll_wait)
12683     case TARGET_NR_epoll_wait:
12684 #endif
12685 #if defined(TARGET_NR_epoll_pwait)
12686     case TARGET_NR_epoll_pwait:
12687 #endif
12688     {
12689         struct target_epoll_event *target_ep;
12690         struct epoll_event *ep;
12691         int epfd = arg1;
12692         int maxevents = arg3;
12693         int timeout = arg4;
12694 
12695         if (maxevents <= 0 || maxevents > TARGET_EP_MAX_EVENTS) {
12696             return -TARGET_EINVAL;
12697         }
12698 
12699         target_ep = lock_user(VERIFY_WRITE, arg2,
12700                               maxevents * sizeof(struct target_epoll_event), 1);
12701         if (!target_ep) {
12702             return -TARGET_EFAULT;
12703         }
12704 
12705         ep = g_try_new(struct epoll_event, maxevents);
12706         if (!ep) {
12707             unlock_user(target_ep, arg2, 0);
12708             return -TARGET_ENOMEM;
12709         }
12710 
12711         switch (num) {
12712 #if defined(TARGET_NR_epoll_pwait)
12713         case TARGET_NR_epoll_pwait:
12714         {
12715             target_sigset_t *target_set;
12716             sigset_t _set, *set = &_set;
12717 
12718             if (arg5) {
12719                 if (arg6 != sizeof(target_sigset_t)) {
12720                     ret = -TARGET_EINVAL;
12721                     break;
12722                 }
12723 
12724                 target_set = lock_user(VERIFY_READ, arg5,
12725                                        sizeof(target_sigset_t), 1);
12726                 if (!target_set) {
12727                     ret = -TARGET_EFAULT;
12728                     break;
12729                 }
12730                 target_to_host_sigset(set, target_set);
12731                 unlock_user(target_set, arg5, 0);
12732             } else {
12733                 set = NULL;
12734             }
12735 
12736             ret = get_errno(safe_epoll_pwait(epfd, ep, maxevents, timeout,
12737                                              set, SIGSET_T_SIZE));
12738             break;
12739         }
12740 #endif
12741 #if defined(TARGET_NR_epoll_wait)
12742         case TARGET_NR_epoll_wait:
12743             ret = get_errno(safe_epoll_pwait(epfd, ep, maxevents, timeout,
12744                                              NULL, 0));
12745             break;
12746 #endif
12747         default:
12748             ret = -TARGET_ENOSYS;
12749         }
12750         if (!is_error(ret)) {
12751             int i;
12752             for (i = 0; i < ret; i++) {
12753                 target_ep[i].events = tswap32(ep[i].events);
12754                 target_ep[i].data.u64 = tswap64(ep[i].data.u64);
12755             }
12756             unlock_user(target_ep, arg2,
12757                         ret * sizeof(struct target_epoll_event));
12758         } else {
12759             unlock_user(target_ep, arg2, 0);
12760         }
12761         g_free(ep);
12762         return ret;
12763     }
12764 #endif
12765 #endif
12766 #ifdef TARGET_NR_prlimit64
12767     case TARGET_NR_prlimit64:
12768     {
12769         /* args: pid, resource number, ptr to new rlimit, ptr to old rlimit */
12770         struct target_rlimit64 *target_rnew, *target_rold;
12771         struct host_rlimit64 rnew, rold, *rnewp = 0;
12772         int resource = target_to_host_resource(arg2);
12773 
12774         if (arg3 && (resource != RLIMIT_AS &&
12775                      resource != RLIMIT_DATA &&
12776                      resource != RLIMIT_STACK)) {
12777             if (!lock_user_struct(VERIFY_READ, target_rnew, arg3, 1)) {
12778                 return -TARGET_EFAULT;
12779             }
12780             rnew.rlim_cur = tswap64(target_rnew->rlim_cur);
12781             rnew.rlim_max = tswap64(target_rnew->rlim_max);
12782             unlock_user_struct(target_rnew, arg3, 0);
12783             rnewp = &rnew;
12784         }
12785 
12786         ret = get_errno(sys_prlimit64(arg1, resource, rnewp, arg4 ? &rold : 0));
12787         if (!is_error(ret) && arg4) {
12788             if (!lock_user_struct(VERIFY_WRITE, target_rold, arg4, 1)) {
12789                 return -TARGET_EFAULT;
12790             }
12791             target_rold->rlim_cur = tswap64(rold.rlim_cur);
12792             target_rold->rlim_max = tswap64(rold.rlim_max);
12793             unlock_user_struct(target_rold, arg4, 1);
12794         }
12795         return ret;
12796     }
12797 #endif
12798 #ifdef TARGET_NR_gethostname
12799     case TARGET_NR_gethostname:
12800     {
12801         char *name = lock_user(VERIFY_WRITE, arg1, arg2, 0);
12802         if (name) {
12803             ret = get_errno(gethostname(name, arg2));
12804             unlock_user(name, arg1, arg2);
12805         } else {
12806             ret = -TARGET_EFAULT;
12807         }
12808         return ret;
12809     }
12810 #endif
12811 #ifdef TARGET_NR_atomic_cmpxchg_32
12812     case TARGET_NR_atomic_cmpxchg_32:
12813     {
12814         /* should use start_exclusive from main.c */
12815         abi_ulong mem_value;
12816         if (get_user_u32(mem_value, arg6)) {
12817             target_siginfo_t info;
12818             info.si_signo = SIGSEGV;
12819             info.si_errno = 0;
12820             info.si_code = TARGET_SEGV_MAPERR;
12821             info._sifields._sigfault._addr = arg6;
12822             queue_signal((CPUArchState *)cpu_env, info.si_signo,
12823                          QEMU_SI_FAULT, &info);
12824             ret = 0xdeadbeef;
12825 
12826         }
12827         if (mem_value == arg2)
12828             put_user_u32(arg1, arg6);
12829         return mem_value;
12830     }
12831 #endif
12832 #ifdef TARGET_NR_atomic_barrier
12833     case TARGET_NR_atomic_barrier:
12834         /* Like the kernel implementation and the
12835            qemu arm barrier, no-op this? */
12836         return 0;
12837 #endif
12838 
12839 #ifdef TARGET_NR_timer_create
12840     case TARGET_NR_timer_create:
12841     {
12842         /* args: clockid_t clockid, struct sigevent *sevp, timer_t *timerid */
12843 
12844         struct sigevent host_sevp = { {0}, }, *phost_sevp = NULL;
12845 
12846         int clkid = arg1;
12847         int timer_index = next_free_host_timer();
12848 
12849         if (timer_index < 0) {
12850             ret = -TARGET_EAGAIN;
12851         } else {
12852             timer_t *phtimer = g_posix_timers  + timer_index;
12853 
12854             if (arg2) {
12855                 phost_sevp = &host_sevp;
12856                 ret = target_to_host_sigevent(phost_sevp, arg2);
12857                 if (ret != 0) {
12858                     return ret;
12859                 }
12860             }
12861 
12862             ret = get_errno(timer_create(clkid, phost_sevp, phtimer));
12863             if (ret) {
12864                 phtimer = NULL;
12865             } else {
12866                 if (put_user(TIMER_MAGIC | timer_index, arg3, target_timer_t)) {
12867                     return -TARGET_EFAULT;
12868                 }
12869             }
12870         }
12871         return ret;
12872     }
12873 #endif
12874 
12875 #ifdef TARGET_NR_timer_settime
12876     case TARGET_NR_timer_settime:
12877     {
12878         /* args: timer_t timerid, int flags, const struct itimerspec *new_value,
12879          * struct itimerspec * old_value */
12880         target_timer_t timerid = get_timer_id(arg1);
12881 
12882         if (timerid < 0) {
12883             ret = timerid;
12884         } else if (arg3 == 0) {
12885             ret = -TARGET_EINVAL;
12886         } else {
12887             timer_t htimer = g_posix_timers[timerid];
12888             struct itimerspec hspec_new = {{0},}, hspec_old = {{0},};
12889 
12890             if (target_to_host_itimerspec(&hspec_new, arg3)) {
12891                 return -TARGET_EFAULT;
12892             }
12893             ret = get_errno(
12894                           timer_settime(htimer, arg2, &hspec_new, &hspec_old));
12895             if (arg4 && host_to_target_itimerspec(arg4, &hspec_old)) {
12896                 return -TARGET_EFAULT;
12897             }
12898         }
12899         return ret;
12900     }
12901 #endif
12902 
12903 #ifdef TARGET_NR_timer_settime64
12904     case TARGET_NR_timer_settime64:
12905     {
12906         target_timer_t timerid = get_timer_id(arg1);
12907 
12908         if (timerid < 0) {
12909             ret = timerid;
12910         } else if (arg3 == 0) {
12911             ret = -TARGET_EINVAL;
12912         } else {
12913             timer_t htimer = g_posix_timers[timerid];
12914             struct itimerspec hspec_new = {{0},}, hspec_old = {{0},};
12915 
12916             if (target_to_host_itimerspec64(&hspec_new, arg3)) {
12917                 return -TARGET_EFAULT;
12918             }
12919             ret = get_errno(
12920                           timer_settime(htimer, arg2, &hspec_new, &hspec_old));
12921             if (arg4 && host_to_target_itimerspec64(arg4, &hspec_old)) {
12922                 return -TARGET_EFAULT;
12923             }
12924         }
12925         return ret;
12926     }
12927 #endif
12928 
12929 #ifdef TARGET_NR_timer_gettime
12930     case TARGET_NR_timer_gettime:
12931     {
12932         /* args: timer_t timerid, struct itimerspec *curr_value */
12933         target_timer_t timerid = get_timer_id(arg1);
12934 
12935         if (timerid < 0) {
12936             ret = timerid;
12937         } else if (!arg2) {
12938             ret = -TARGET_EFAULT;
12939         } else {
12940             timer_t htimer = g_posix_timers[timerid];
12941             struct itimerspec hspec;
12942             ret = get_errno(timer_gettime(htimer, &hspec));
12943 
12944             if (host_to_target_itimerspec(arg2, &hspec)) {
12945                 ret = -TARGET_EFAULT;
12946             }
12947         }
12948         return ret;
12949     }
12950 #endif
12951 
12952 #ifdef TARGET_NR_timer_gettime64
12953     case TARGET_NR_timer_gettime64:
12954     {
12955         /* args: timer_t timerid, struct itimerspec64 *curr_value */
12956         target_timer_t timerid = get_timer_id(arg1);
12957 
12958         if (timerid < 0) {
12959             ret = timerid;
12960         } else if (!arg2) {
12961             ret = -TARGET_EFAULT;
12962         } else {
12963             timer_t htimer = g_posix_timers[timerid];
12964             struct itimerspec hspec;
12965             ret = get_errno(timer_gettime(htimer, &hspec));
12966 
12967             if (host_to_target_itimerspec64(arg2, &hspec)) {
12968                 ret = -TARGET_EFAULT;
12969             }
12970         }
12971         return ret;
12972     }
12973 #endif
12974 
12975 #ifdef TARGET_NR_timer_getoverrun
12976     case TARGET_NR_timer_getoverrun:
12977     {
12978         /* args: timer_t timerid */
12979         target_timer_t timerid = get_timer_id(arg1);
12980 
12981         if (timerid < 0) {
12982             ret = timerid;
12983         } else {
12984             timer_t htimer = g_posix_timers[timerid];
12985             ret = get_errno(timer_getoverrun(htimer));
12986         }
12987         return ret;
12988     }
12989 #endif
12990 
12991 #ifdef TARGET_NR_timer_delete
12992     case TARGET_NR_timer_delete:
12993     {
12994         /* args: timer_t timerid */
12995         target_timer_t timerid = get_timer_id(arg1);
12996 
12997         if (timerid < 0) {
12998             ret = timerid;
12999         } else {
13000             timer_t htimer = g_posix_timers[timerid];
13001             ret = get_errno(timer_delete(htimer));
13002             g_posix_timers[timerid] = 0;
13003         }
13004         return ret;
13005     }
13006 #endif
13007 
13008 #if defined(TARGET_NR_timerfd_create) && defined(CONFIG_TIMERFD)
13009     case TARGET_NR_timerfd_create:
13010         return get_errno(timerfd_create(arg1,
13011                           target_to_host_bitmask(arg2, fcntl_flags_tbl)));
13012 #endif
13013 
13014 #if defined(TARGET_NR_timerfd_gettime) && defined(CONFIG_TIMERFD)
13015     case TARGET_NR_timerfd_gettime:
13016         {
13017             struct itimerspec its_curr;
13018 
13019             ret = get_errno(timerfd_gettime(arg1, &its_curr));
13020 
13021             if (arg2 && host_to_target_itimerspec(arg2, &its_curr)) {
13022                 return -TARGET_EFAULT;
13023             }
13024         }
13025         return ret;
13026 #endif
13027 
13028 #if defined(TARGET_NR_timerfd_gettime64) && defined(CONFIG_TIMERFD)
13029     case TARGET_NR_timerfd_gettime64:
13030         {
13031             struct itimerspec its_curr;
13032 
13033             ret = get_errno(timerfd_gettime(arg1, &its_curr));
13034 
13035             if (arg2 && host_to_target_itimerspec64(arg2, &its_curr)) {
13036                 return -TARGET_EFAULT;
13037             }
13038         }
13039         return ret;
13040 #endif
13041 
13042 #if defined(TARGET_NR_timerfd_settime) && defined(CONFIG_TIMERFD)
13043     case TARGET_NR_timerfd_settime:
13044         {
13045             struct itimerspec its_new, its_old, *p_new;
13046 
13047             if (arg3) {
13048                 if (target_to_host_itimerspec(&its_new, arg3)) {
13049                     return -TARGET_EFAULT;
13050                 }
13051                 p_new = &its_new;
13052             } else {
13053                 p_new = NULL;
13054             }
13055 
13056             ret = get_errno(timerfd_settime(arg1, arg2, p_new, &its_old));
13057 
13058             if (arg4 && host_to_target_itimerspec(arg4, &its_old)) {
13059                 return -TARGET_EFAULT;
13060             }
13061         }
13062         return ret;
13063 #endif
13064 
13065 #if defined(TARGET_NR_timerfd_settime64) && defined(CONFIG_TIMERFD)
13066     case TARGET_NR_timerfd_settime64:
13067         {
13068             struct itimerspec its_new, its_old, *p_new;
13069 
13070             if (arg3) {
13071                 if (target_to_host_itimerspec64(&its_new, arg3)) {
13072                     return -TARGET_EFAULT;
13073                 }
13074                 p_new = &its_new;
13075             } else {
13076                 p_new = NULL;
13077             }
13078 
13079             ret = get_errno(timerfd_settime(arg1, arg2, p_new, &its_old));
13080 
13081             if (arg4 && host_to_target_itimerspec64(arg4, &its_old)) {
13082                 return -TARGET_EFAULT;
13083             }
13084         }
13085         return ret;
13086 #endif
13087 
13088 #if defined(TARGET_NR_ioprio_get) && defined(__NR_ioprio_get)
13089     case TARGET_NR_ioprio_get:
13090         return get_errno(ioprio_get(arg1, arg2));
13091 #endif
13092 
13093 #if defined(TARGET_NR_ioprio_set) && defined(__NR_ioprio_set)
13094     case TARGET_NR_ioprio_set:
13095         return get_errno(ioprio_set(arg1, arg2, arg3));
13096 #endif
13097 
13098 #if defined(TARGET_NR_setns) && defined(CONFIG_SETNS)
13099     case TARGET_NR_setns:
13100         return get_errno(setns(arg1, arg2));
13101 #endif
13102 #if defined(TARGET_NR_unshare) && defined(CONFIG_SETNS)
13103     case TARGET_NR_unshare:
13104         return get_errno(unshare(arg1));
13105 #endif
13106 #if defined(TARGET_NR_kcmp) && defined(__NR_kcmp)
13107     case TARGET_NR_kcmp:
13108         return get_errno(kcmp(arg1, arg2, arg3, arg4, arg5));
13109 #endif
13110 #ifdef TARGET_NR_swapcontext
13111     case TARGET_NR_swapcontext:
13112         /* PowerPC specific.  */
13113         return do_swapcontext(cpu_env, arg1, arg2, arg3);
13114 #endif
13115 #ifdef TARGET_NR_memfd_create
13116     case TARGET_NR_memfd_create:
13117         p = lock_user_string(arg1);
13118         if (!p) {
13119             return -TARGET_EFAULT;
13120         }
13121         ret = get_errno(memfd_create(p, arg2));
13122         fd_trans_unregister(ret);
13123         unlock_user(p, arg1, 0);
13124         return ret;
13125 #endif
13126 #if defined TARGET_NR_membarrier && defined __NR_membarrier
13127     case TARGET_NR_membarrier:
13128         return get_errno(membarrier(arg1, arg2));
13129 #endif
13130 
13131 #if defined(TARGET_NR_copy_file_range) && defined(__NR_copy_file_range)
13132     case TARGET_NR_copy_file_range:
13133         {
13134             loff_t inoff, outoff;
13135             loff_t *pinoff = NULL, *poutoff = NULL;
13136 
13137             if (arg2) {
13138                 if (get_user_u64(inoff, arg2)) {
13139                     return -TARGET_EFAULT;
13140                 }
13141                 pinoff = &inoff;
13142             }
13143             if (arg4) {
13144                 if (get_user_u64(outoff, arg4)) {
13145                     return -TARGET_EFAULT;
13146                 }
13147                 poutoff = &outoff;
13148             }
13149             ret = get_errno(safe_copy_file_range(arg1, pinoff, arg3, poutoff,
13150                                                  arg5, arg6));
13151             if (!is_error(ret) && ret > 0) {
13152                 if (arg2) {
13153                     if (put_user_u64(inoff, arg2)) {
13154                         return -TARGET_EFAULT;
13155                     }
13156                 }
13157                 if (arg4) {
13158                     if (put_user_u64(outoff, arg4)) {
13159                         return -TARGET_EFAULT;
13160                     }
13161                 }
13162             }
13163         }
13164         return ret;
13165 #endif
13166 
13167     default:
13168         qemu_log_mask(LOG_UNIMP, "Unsupported syscall: %d\n", num);
13169         return -TARGET_ENOSYS;
13170     }
13171     return ret;
13172 }
13173 
13174 abi_long do_syscall(void *cpu_env, int num, abi_long arg1,
13175                     abi_long arg2, abi_long arg3, abi_long arg4,
13176                     abi_long arg5, abi_long arg6, abi_long arg7,
13177                     abi_long arg8)
13178 {
13179     CPUState *cpu = env_cpu(cpu_env);
13180     abi_long ret;
13181 
13182 #ifdef DEBUG_ERESTARTSYS
13183     /* Debug-only code for exercising the syscall-restart code paths
13184      * in the per-architecture cpu main loops: restart every syscall
13185      * the guest makes once before letting it through.
13186      */
13187     {
13188         static bool flag;
13189         flag = !flag;
13190         if (flag) {
13191             return -TARGET_ERESTARTSYS;
13192         }
13193     }
13194 #endif
13195 
13196     record_syscall_start(cpu, num, arg1,
13197                          arg2, arg3, arg4, arg5, arg6, arg7, arg8);
13198 
13199     if (unlikely(qemu_loglevel_mask(LOG_STRACE))) {
13200         print_syscall(cpu_env, num, arg1, arg2, arg3, arg4, arg5, arg6);
13201     }
13202 
13203     ret = do_syscall1(cpu_env, num, arg1, arg2, arg3, arg4,
13204                       arg5, arg6, arg7, arg8);
13205 
13206     if (unlikely(qemu_loglevel_mask(LOG_STRACE))) {
13207         print_syscall_ret(cpu_env, num, ret, arg1, arg2,
13208                           arg3, arg4, arg5, arg6);
13209     }
13210 
13211     record_syscall_return(cpu, num, ret);
13212     return ret;
13213 }
13214