xref: /openbmc/qemu/linux-user/syscall.c (revision a8dc82ce)
1 /*
2  *  Linux syscalls
3  *
4  *  Copyright (c) 2003 Fabrice Bellard
5  *
6  *  This program is free software; you can redistribute it and/or modify
7  *  it under the terms of the GNU General Public License as published by
8  *  the Free Software Foundation; either version 2 of the License, or
9  *  (at your option) any later version.
10  *
11  *  This program is distributed in the hope that it will be useful,
12  *  but WITHOUT ANY WARRANTY; without even the implied warranty of
13  *  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
14  *  GNU General Public License for more details.
15  *
16  *  You should have received a copy of the GNU General Public License
17  *  along with this program; if not, see <http://www.gnu.org/licenses/>.
18  */
19 #define _ATFILE_SOURCE
20 #include "qemu/osdep.h"
21 #include "qemu/cutils.h"
22 #include "qemu/path.h"
23 #include "qemu/memfd.h"
24 #include "qemu/queue.h"
25 #include <elf.h>
26 #include <endian.h>
27 #include <grp.h>
28 #include <sys/ipc.h>
29 #include <sys/msg.h>
30 #include <sys/wait.h>
31 #include <sys/mount.h>
32 #include <sys/file.h>
33 #include <sys/fsuid.h>
34 #include <sys/personality.h>
35 #include <sys/prctl.h>
36 #include <sys/resource.h>
37 #include <sys/swap.h>
38 #include <linux/capability.h>
39 #include <sched.h>
40 #include <sys/timex.h>
41 #include <sys/socket.h>
42 #include <linux/sockios.h>
43 #include <sys/un.h>
44 #include <sys/uio.h>
45 #include <poll.h>
46 #include <sys/times.h>
47 #include <sys/shm.h>
48 #include <sys/sem.h>
49 #include <sys/statfs.h>
50 #include <utime.h>
51 #include <sys/sysinfo.h>
52 #include <sys/signalfd.h>
53 //#include <sys/user.h>
54 #include <netinet/in.h>
55 #include <netinet/ip.h>
56 #include <netinet/tcp.h>
57 #include <netinet/udp.h>
58 #include <linux/wireless.h>
59 #include <linux/icmp.h>
60 #include <linux/icmpv6.h>
61 #include <linux/if_tun.h>
62 #include <linux/in6.h>
63 #include <linux/errqueue.h>
64 #include <linux/random.h>
65 #ifdef CONFIG_TIMERFD
66 #include <sys/timerfd.h>
67 #endif
68 #ifdef CONFIG_EVENTFD
69 #include <sys/eventfd.h>
70 #endif
71 #ifdef CONFIG_EPOLL
72 #include <sys/epoll.h>
73 #endif
74 #ifdef CONFIG_ATTR
75 #include "qemu/xattr.h"
76 #endif
77 #ifdef CONFIG_SENDFILE
78 #include <sys/sendfile.h>
79 #endif
80 #ifdef HAVE_SYS_KCOV_H
81 #include <sys/kcov.h>
82 #endif
83 
84 #define termios host_termios
85 #define winsize host_winsize
86 #define termio host_termio
87 #define sgttyb host_sgttyb /* same as target */
88 #define tchars host_tchars /* same as target */
89 #define ltchars host_ltchars /* same as target */
90 
91 #include <linux/termios.h>
92 #include <linux/unistd.h>
93 #include <linux/cdrom.h>
94 #include <linux/hdreg.h>
95 #include <linux/soundcard.h>
96 #include <linux/kd.h>
97 #include <linux/mtio.h>
98 #include <linux/fs.h>
99 #include <linux/fd.h>
100 #if defined(CONFIG_FIEMAP)
101 #include <linux/fiemap.h>
102 #endif
103 #include <linux/fb.h>
104 #if defined(CONFIG_USBFS)
105 #include <linux/usbdevice_fs.h>
106 #include <linux/usb/ch9.h>
107 #endif
108 #include <linux/vt.h>
109 #include <linux/dm-ioctl.h>
110 #include <linux/reboot.h>
111 #include <linux/route.h>
112 #include <linux/filter.h>
113 #include <linux/blkpg.h>
114 #include <netpacket/packet.h>
115 #include <linux/netlink.h>
116 #include <linux/if_alg.h>
117 #include <linux/rtc.h>
118 #include <sound/asound.h>
119 #ifdef HAVE_BTRFS_H
120 #include <linux/btrfs.h>
121 #endif
122 #ifdef HAVE_DRM_H
123 #include <libdrm/drm.h>
124 #include <libdrm/i915_drm.h>
125 #endif
126 #include "linux_loop.h"
127 #include "uname.h"
128 
129 #include "qemu.h"
130 #include "qemu/guest-random.h"
131 #include "qemu/selfmap.h"
132 #include "user/syscall-trace.h"
133 #include "qapi/error.h"
134 #include "fd-trans.h"
135 #include "tcg/tcg.h"
136 
137 #ifndef CLONE_IO
138 #define CLONE_IO                0x80000000      /* Clone io context */
139 #endif
140 
141 /* We can't directly call the host clone syscall, because this will
142  * badly confuse libc (breaking mutexes, for example). So we must
143  * divide clone flags into:
144  *  * flag combinations that look like pthread_create()
145  *  * flag combinations that look like fork()
146  *  * flags we can implement within QEMU itself
147  *  * flags we can't support and will return an error for
148  */
149 /* For thread creation, all these flags must be present; for
150  * fork, none must be present.
151  */
152 #define CLONE_THREAD_FLAGS                              \
153     (CLONE_VM | CLONE_FS | CLONE_FILES |                \
154      CLONE_SIGHAND | CLONE_THREAD | CLONE_SYSVSEM)
155 
156 /* These flags are ignored:
157  * CLONE_DETACHED is now ignored by the kernel;
158  * CLONE_IO is just an optimisation hint to the I/O scheduler
159  */
160 #define CLONE_IGNORED_FLAGS                     \
161     (CLONE_DETACHED | CLONE_IO)
162 
163 /* Flags for fork which we can implement within QEMU itself */
164 #define CLONE_OPTIONAL_FORK_FLAGS               \
165     (CLONE_SETTLS | CLONE_PARENT_SETTID |       \
166      CLONE_CHILD_CLEARTID | CLONE_CHILD_SETTID)
167 
168 /* Flags for thread creation which we can implement within QEMU itself */
169 #define CLONE_OPTIONAL_THREAD_FLAGS                             \
170     (CLONE_SETTLS | CLONE_PARENT_SETTID |                       \
171      CLONE_CHILD_CLEARTID | CLONE_CHILD_SETTID | CLONE_PARENT)
172 
173 #define CLONE_INVALID_FORK_FLAGS                                        \
174     (~(CSIGNAL | CLONE_OPTIONAL_FORK_FLAGS | CLONE_IGNORED_FLAGS))
175 
176 #define CLONE_INVALID_THREAD_FLAGS                                      \
177     (~(CSIGNAL | CLONE_THREAD_FLAGS | CLONE_OPTIONAL_THREAD_FLAGS |     \
178        CLONE_IGNORED_FLAGS))
179 
180 /* CLONE_VFORK is special cased early in do_fork(). The other flag bits
181  * have almost all been allocated. We cannot support any of
182  * CLONE_NEWNS, CLONE_NEWCGROUP, CLONE_NEWUTS, CLONE_NEWIPC,
183  * CLONE_NEWUSER, CLONE_NEWPID, CLONE_NEWNET, CLONE_PTRACE, CLONE_UNTRACED.
184  * The checks against the invalid thread masks above will catch these.
185  * (The one remaining unallocated bit is 0x1000 which used to be CLONE_PID.)
186  */
187 
188 /* Define DEBUG_ERESTARTSYS to force every syscall to be restarted
189  * once. This exercises the codepaths for restart.
190  */
191 //#define DEBUG_ERESTARTSYS
192 
193 //#include <linux/msdos_fs.h>
194 #define	VFAT_IOCTL_READDIR_BOTH		_IOR('r', 1, struct linux_dirent [2])
195 #define	VFAT_IOCTL_READDIR_SHORT	_IOR('r', 2, struct linux_dirent [2])
196 
197 #undef _syscall0
198 #undef _syscall1
199 #undef _syscall2
200 #undef _syscall3
201 #undef _syscall4
202 #undef _syscall5
203 #undef _syscall6
204 
205 #define _syscall0(type,name)		\
206 static type name (void)			\
207 {					\
208 	return syscall(__NR_##name);	\
209 }
210 
211 #define _syscall1(type,name,type1,arg1)		\
212 static type name (type1 arg1)			\
213 {						\
214 	return syscall(__NR_##name, arg1);	\
215 }
216 
217 #define _syscall2(type,name,type1,arg1,type2,arg2)	\
218 static type name (type1 arg1,type2 arg2)		\
219 {							\
220 	return syscall(__NR_##name, arg1, arg2);	\
221 }
222 
223 #define _syscall3(type,name,type1,arg1,type2,arg2,type3,arg3)	\
224 static type name (type1 arg1,type2 arg2,type3 arg3)		\
225 {								\
226 	return syscall(__NR_##name, arg1, arg2, arg3);		\
227 }
228 
229 #define _syscall4(type,name,type1,arg1,type2,arg2,type3,arg3,type4,arg4)	\
230 static type name (type1 arg1,type2 arg2,type3 arg3,type4 arg4)			\
231 {										\
232 	return syscall(__NR_##name, arg1, arg2, arg3, arg4);			\
233 }
234 
235 #define _syscall5(type,name,type1,arg1,type2,arg2,type3,arg3,type4,arg4,	\
236 		  type5,arg5)							\
237 static type name (type1 arg1,type2 arg2,type3 arg3,type4 arg4,type5 arg5)	\
238 {										\
239 	return syscall(__NR_##name, arg1, arg2, arg3, arg4, arg5);		\
240 }
241 
242 
243 #define _syscall6(type,name,type1,arg1,type2,arg2,type3,arg3,type4,arg4,	\
244 		  type5,arg5,type6,arg6)					\
245 static type name (type1 arg1,type2 arg2,type3 arg3,type4 arg4,type5 arg5,	\
246                   type6 arg6)							\
247 {										\
248 	return syscall(__NR_##name, arg1, arg2, arg3, arg4, arg5, arg6);	\
249 }
250 
251 
252 #define __NR_sys_uname __NR_uname
253 #define __NR_sys_getcwd1 __NR_getcwd
254 #define __NR_sys_getdents __NR_getdents
255 #define __NR_sys_getdents64 __NR_getdents64
256 #define __NR_sys_getpriority __NR_getpriority
257 #define __NR_sys_rt_sigqueueinfo __NR_rt_sigqueueinfo
258 #define __NR_sys_rt_tgsigqueueinfo __NR_rt_tgsigqueueinfo
259 #define __NR_sys_syslog __NR_syslog
260 #if defined(__NR_futex)
261 # define __NR_sys_futex __NR_futex
262 #endif
263 #if defined(__NR_futex_time64)
264 # define __NR_sys_futex_time64 __NR_futex_time64
265 #endif
266 #define __NR_sys_inotify_init __NR_inotify_init
267 #define __NR_sys_inotify_add_watch __NR_inotify_add_watch
268 #define __NR_sys_inotify_rm_watch __NR_inotify_rm_watch
269 #define __NR_sys_statx __NR_statx
270 
271 #if defined(__alpha__) || defined(__x86_64__) || defined(__s390x__)
272 #define __NR__llseek __NR_lseek
273 #endif
274 
275 /* Newer kernel ports have llseek() instead of _llseek() */
276 #if defined(TARGET_NR_llseek) && !defined(TARGET_NR__llseek)
277 #define TARGET_NR__llseek TARGET_NR_llseek
278 #endif
279 
280 #define __NR_sys_gettid __NR_gettid
281 _syscall0(int, sys_gettid)
282 
283 /* For the 64-bit guest on 32-bit host case we must emulate
284  * getdents using getdents64, because otherwise the host
285  * might hand us back more dirent records than we can fit
286  * into the guest buffer after structure format conversion.
287  * Otherwise we emulate getdents with getdents if the host has it.
288  */
289 #if defined(__NR_getdents) && HOST_LONG_BITS >= TARGET_ABI_BITS
290 #define EMULATE_GETDENTS_WITH_GETDENTS
291 #endif
292 
293 #if defined(TARGET_NR_getdents) && defined(EMULATE_GETDENTS_WITH_GETDENTS)
294 _syscall3(int, sys_getdents, uint, fd, struct linux_dirent *, dirp, uint, count);
295 #endif
296 #if (defined(TARGET_NR_getdents) && \
297       !defined(EMULATE_GETDENTS_WITH_GETDENTS)) || \
298     (defined(TARGET_NR_getdents64) && defined(__NR_getdents64))
299 _syscall3(int, sys_getdents64, uint, fd, struct linux_dirent64 *, dirp, uint, count);
300 #endif
301 #if defined(TARGET_NR__llseek) && defined(__NR_llseek)
302 _syscall5(int, _llseek,  uint,  fd, ulong, hi, ulong, lo,
303           loff_t *, res, uint, wh);
304 #endif
305 _syscall3(int, sys_rt_sigqueueinfo, pid_t, pid, int, sig, siginfo_t *, uinfo)
306 _syscall4(int, sys_rt_tgsigqueueinfo, pid_t, pid, pid_t, tid, int, sig,
307           siginfo_t *, uinfo)
308 _syscall3(int,sys_syslog,int,type,char*,bufp,int,len)
309 #ifdef __NR_exit_group
310 _syscall1(int,exit_group,int,error_code)
311 #endif
312 #if defined(TARGET_NR_set_tid_address) && defined(__NR_set_tid_address)
313 _syscall1(int,set_tid_address,int *,tidptr)
314 #endif
315 #if defined(__NR_futex)
316 _syscall6(int,sys_futex,int *,uaddr,int,op,int,val,
317           const struct timespec *,timeout,int *,uaddr2,int,val3)
318 #endif
319 #if defined(__NR_futex_time64)
320 _syscall6(int,sys_futex_time64,int *,uaddr,int,op,int,val,
321           const struct timespec *,timeout,int *,uaddr2,int,val3)
322 #endif
323 #define __NR_sys_sched_getaffinity __NR_sched_getaffinity
324 _syscall3(int, sys_sched_getaffinity, pid_t, pid, unsigned int, len,
325           unsigned long *, user_mask_ptr);
326 #define __NR_sys_sched_setaffinity __NR_sched_setaffinity
327 _syscall3(int, sys_sched_setaffinity, pid_t, pid, unsigned int, len,
328           unsigned long *, user_mask_ptr);
329 #define __NR_sys_getcpu __NR_getcpu
330 _syscall3(int, sys_getcpu, unsigned *, cpu, unsigned *, node, void *, tcache);
331 _syscall4(int, reboot, int, magic1, int, magic2, unsigned int, cmd,
332           void *, arg);
333 _syscall2(int, capget, struct __user_cap_header_struct *, header,
334           struct __user_cap_data_struct *, data);
335 _syscall2(int, capset, struct __user_cap_header_struct *, header,
336           struct __user_cap_data_struct *, data);
337 #if defined(TARGET_NR_ioprio_get) && defined(__NR_ioprio_get)
338 _syscall2(int, ioprio_get, int, which, int, who)
339 #endif
340 #if defined(TARGET_NR_ioprio_set) && defined(__NR_ioprio_set)
341 _syscall3(int, ioprio_set, int, which, int, who, int, ioprio)
342 #endif
343 #if defined(TARGET_NR_getrandom) && defined(__NR_getrandom)
344 _syscall3(int, getrandom, void *, buf, size_t, buflen, unsigned int, flags)
345 #endif
346 
347 #if defined(TARGET_NR_kcmp) && defined(__NR_kcmp)
348 _syscall5(int, kcmp, pid_t, pid1, pid_t, pid2, int, type,
349           unsigned long, idx1, unsigned long, idx2)
350 #endif
351 
352 /*
353  * It is assumed that struct statx is architecture independent.
354  */
355 #if defined(TARGET_NR_statx) && defined(__NR_statx)
356 _syscall5(int, sys_statx, int, dirfd, const char *, pathname, int, flags,
357           unsigned int, mask, struct target_statx *, statxbuf)
358 #endif
359 #if defined(TARGET_NR_membarrier) && defined(__NR_membarrier)
360 _syscall2(int, membarrier, int, cmd, int, flags)
361 #endif
362 
363 static bitmask_transtbl fcntl_flags_tbl[] = {
364   { TARGET_O_ACCMODE,   TARGET_O_WRONLY,    O_ACCMODE,   O_WRONLY,    },
365   { TARGET_O_ACCMODE,   TARGET_O_RDWR,      O_ACCMODE,   O_RDWR,      },
366   { TARGET_O_CREAT,     TARGET_O_CREAT,     O_CREAT,     O_CREAT,     },
367   { TARGET_O_EXCL,      TARGET_O_EXCL,      O_EXCL,      O_EXCL,      },
368   { TARGET_O_NOCTTY,    TARGET_O_NOCTTY,    O_NOCTTY,    O_NOCTTY,    },
369   { TARGET_O_TRUNC,     TARGET_O_TRUNC,     O_TRUNC,     O_TRUNC,     },
370   { TARGET_O_APPEND,    TARGET_O_APPEND,    O_APPEND,    O_APPEND,    },
371   { TARGET_O_NONBLOCK,  TARGET_O_NONBLOCK,  O_NONBLOCK,  O_NONBLOCK,  },
372   { TARGET_O_SYNC,      TARGET_O_DSYNC,     O_SYNC,      O_DSYNC,     },
373   { TARGET_O_SYNC,      TARGET_O_SYNC,      O_SYNC,      O_SYNC,      },
374   { TARGET_FASYNC,      TARGET_FASYNC,      FASYNC,      FASYNC,      },
375   { TARGET_O_DIRECTORY, TARGET_O_DIRECTORY, O_DIRECTORY, O_DIRECTORY, },
376   { TARGET_O_NOFOLLOW,  TARGET_O_NOFOLLOW,  O_NOFOLLOW,  O_NOFOLLOW,  },
377 #if defined(O_DIRECT)
378   { TARGET_O_DIRECT,    TARGET_O_DIRECT,    O_DIRECT,    O_DIRECT,    },
379 #endif
380 #if defined(O_NOATIME)
381   { TARGET_O_NOATIME,   TARGET_O_NOATIME,   O_NOATIME,   O_NOATIME    },
382 #endif
383 #if defined(O_CLOEXEC)
384   { TARGET_O_CLOEXEC,   TARGET_O_CLOEXEC,   O_CLOEXEC,   O_CLOEXEC    },
385 #endif
386 #if defined(O_PATH)
387   { TARGET_O_PATH,      TARGET_O_PATH,      O_PATH,      O_PATH       },
388 #endif
389 #if defined(O_TMPFILE)
390   { TARGET_O_TMPFILE,   TARGET_O_TMPFILE,   O_TMPFILE,   O_TMPFILE    },
391 #endif
392   /* Don't terminate the list prematurely on 64-bit host+guest.  */
393 #if TARGET_O_LARGEFILE != 0 || O_LARGEFILE != 0
394   { TARGET_O_LARGEFILE, TARGET_O_LARGEFILE, O_LARGEFILE, O_LARGEFILE, },
395 #endif
396   { 0, 0, 0, 0 }
397 };
398 
399 _syscall2(int, sys_getcwd1, char *, buf, size_t, size)
400 
401 #if defined(TARGET_NR_utimensat) || defined(TARGET_NR_utimensat_time64)
402 #if defined(__NR_utimensat)
403 #define __NR_sys_utimensat __NR_utimensat
404 _syscall4(int,sys_utimensat,int,dirfd,const char *,pathname,
405           const struct timespec *,tsp,int,flags)
406 #else
407 static int sys_utimensat(int dirfd, const char *pathname,
408                          const struct timespec times[2], int flags)
409 {
410     errno = ENOSYS;
411     return -1;
412 }
413 #endif
414 #endif /* TARGET_NR_utimensat */
415 
416 #ifdef TARGET_NR_renameat2
417 #if defined(__NR_renameat2)
418 #define __NR_sys_renameat2 __NR_renameat2
419 _syscall5(int, sys_renameat2, int, oldfd, const char *, old, int, newfd,
420           const char *, new, unsigned int, flags)
421 #else
422 static int sys_renameat2(int oldfd, const char *old,
423                          int newfd, const char *new, int flags)
424 {
425     if (flags == 0) {
426         return renameat(oldfd, old, newfd, new);
427     }
428     errno = ENOSYS;
429     return -1;
430 }
431 #endif
432 #endif /* TARGET_NR_renameat2 */
433 
434 #ifdef CONFIG_INOTIFY
435 #include <sys/inotify.h>
436 
437 #if defined(TARGET_NR_inotify_init) && defined(__NR_inotify_init)
438 static int sys_inotify_init(void)
439 {
440   return (inotify_init());
441 }
442 #endif
443 #if defined(TARGET_NR_inotify_add_watch) && defined(__NR_inotify_add_watch)
444 static int sys_inotify_add_watch(int fd,const char *pathname, int32_t mask)
445 {
446   return (inotify_add_watch(fd, pathname, mask));
447 }
448 #endif
449 #if defined(TARGET_NR_inotify_rm_watch) && defined(__NR_inotify_rm_watch)
450 static int sys_inotify_rm_watch(int fd, int32_t wd)
451 {
452   return (inotify_rm_watch(fd, wd));
453 }
454 #endif
455 #ifdef CONFIG_INOTIFY1
456 #if defined(TARGET_NR_inotify_init1) && defined(__NR_inotify_init1)
457 static int sys_inotify_init1(int flags)
458 {
459   return (inotify_init1(flags));
460 }
461 #endif
462 #endif
463 #else
464 /* Userspace can usually survive runtime without inotify */
465 #undef TARGET_NR_inotify_init
466 #undef TARGET_NR_inotify_init1
467 #undef TARGET_NR_inotify_add_watch
468 #undef TARGET_NR_inotify_rm_watch
469 #endif /* CONFIG_INOTIFY  */
470 
471 #if defined(TARGET_NR_prlimit64)
472 #ifndef __NR_prlimit64
473 # define __NR_prlimit64 -1
474 #endif
475 #define __NR_sys_prlimit64 __NR_prlimit64
476 /* The glibc rlimit structure may not be that used by the underlying syscall */
477 struct host_rlimit64 {
478     uint64_t rlim_cur;
479     uint64_t rlim_max;
480 };
481 _syscall4(int, sys_prlimit64, pid_t, pid, int, resource,
482           const struct host_rlimit64 *, new_limit,
483           struct host_rlimit64 *, old_limit)
484 #endif
485 
486 
487 #if defined(TARGET_NR_timer_create)
488 /* Maximum of 32 active POSIX timers allowed at any one time. */
489 static timer_t g_posix_timers[32] = { 0, } ;
490 
491 static inline int next_free_host_timer(void)
492 {
493     int k ;
494     /* FIXME: Does finding the next free slot require a lock? */
495     for (k = 0; k < ARRAY_SIZE(g_posix_timers); k++) {
496         if (g_posix_timers[k] == 0) {
497             g_posix_timers[k] = (timer_t) 1;
498             return k;
499         }
500     }
501     return -1;
502 }
503 #endif
504 
505 #define ERRNO_TABLE_SIZE 1200
506 
507 /* target_to_host_errno_table[] is initialized from
508  * host_to_target_errno_table[] in syscall_init(). */
509 static uint16_t target_to_host_errno_table[ERRNO_TABLE_SIZE] = {
510 };
511 
512 /*
513  * This list is the union of errno values overridden in asm-<arch>/errno.h
514  * minus the errnos that are not actually generic to all archs.
515  */
516 static uint16_t host_to_target_errno_table[ERRNO_TABLE_SIZE] = {
517     [EAGAIN]		= TARGET_EAGAIN,
518     [EIDRM]		= TARGET_EIDRM,
519     [ECHRNG]		= TARGET_ECHRNG,
520     [EL2NSYNC]		= TARGET_EL2NSYNC,
521     [EL3HLT]		= TARGET_EL3HLT,
522     [EL3RST]		= TARGET_EL3RST,
523     [ELNRNG]		= TARGET_ELNRNG,
524     [EUNATCH]		= TARGET_EUNATCH,
525     [ENOCSI]		= TARGET_ENOCSI,
526     [EL2HLT]		= TARGET_EL2HLT,
527     [EDEADLK]		= TARGET_EDEADLK,
528     [ENOLCK]		= TARGET_ENOLCK,
529     [EBADE]		= TARGET_EBADE,
530     [EBADR]		= TARGET_EBADR,
531     [EXFULL]		= TARGET_EXFULL,
532     [ENOANO]		= TARGET_ENOANO,
533     [EBADRQC]		= TARGET_EBADRQC,
534     [EBADSLT]		= TARGET_EBADSLT,
535     [EBFONT]		= TARGET_EBFONT,
536     [ENOSTR]		= TARGET_ENOSTR,
537     [ENODATA]		= TARGET_ENODATA,
538     [ETIME]		= TARGET_ETIME,
539     [ENOSR]		= TARGET_ENOSR,
540     [ENONET]		= TARGET_ENONET,
541     [ENOPKG]		= TARGET_ENOPKG,
542     [EREMOTE]		= TARGET_EREMOTE,
543     [ENOLINK]		= TARGET_ENOLINK,
544     [EADV]		= TARGET_EADV,
545     [ESRMNT]		= TARGET_ESRMNT,
546     [ECOMM]		= TARGET_ECOMM,
547     [EPROTO]		= TARGET_EPROTO,
548     [EDOTDOT]		= TARGET_EDOTDOT,
549     [EMULTIHOP]		= TARGET_EMULTIHOP,
550     [EBADMSG]		= TARGET_EBADMSG,
551     [ENAMETOOLONG]	= TARGET_ENAMETOOLONG,
552     [EOVERFLOW]		= TARGET_EOVERFLOW,
553     [ENOTUNIQ]		= TARGET_ENOTUNIQ,
554     [EBADFD]		= TARGET_EBADFD,
555     [EREMCHG]		= TARGET_EREMCHG,
556     [ELIBACC]		= TARGET_ELIBACC,
557     [ELIBBAD]		= TARGET_ELIBBAD,
558     [ELIBSCN]		= TARGET_ELIBSCN,
559     [ELIBMAX]		= TARGET_ELIBMAX,
560     [ELIBEXEC]		= TARGET_ELIBEXEC,
561     [EILSEQ]		= TARGET_EILSEQ,
562     [ENOSYS]		= TARGET_ENOSYS,
563     [ELOOP]		= TARGET_ELOOP,
564     [ERESTART]		= TARGET_ERESTART,
565     [ESTRPIPE]		= TARGET_ESTRPIPE,
566     [ENOTEMPTY]		= TARGET_ENOTEMPTY,
567     [EUSERS]		= TARGET_EUSERS,
568     [ENOTSOCK]		= TARGET_ENOTSOCK,
569     [EDESTADDRREQ]	= TARGET_EDESTADDRREQ,
570     [EMSGSIZE]		= TARGET_EMSGSIZE,
571     [EPROTOTYPE]	= TARGET_EPROTOTYPE,
572     [ENOPROTOOPT]	= TARGET_ENOPROTOOPT,
573     [EPROTONOSUPPORT]	= TARGET_EPROTONOSUPPORT,
574     [ESOCKTNOSUPPORT]	= TARGET_ESOCKTNOSUPPORT,
575     [EOPNOTSUPP]	= TARGET_EOPNOTSUPP,
576     [EPFNOSUPPORT]	= TARGET_EPFNOSUPPORT,
577     [EAFNOSUPPORT]	= TARGET_EAFNOSUPPORT,
578     [EADDRINUSE]	= TARGET_EADDRINUSE,
579     [EADDRNOTAVAIL]	= TARGET_EADDRNOTAVAIL,
580     [ENETDOWN]		= TARGET_ENETDOWN,
581     [ENETUNREACH]	= TARGET_ENETUNREACH,
582     [ENETRESET]		= TARGET_ENETRESET,
583     [ECONNABORTED]	= TARGET_ECONNABORTED,
584     [ECONNRESET]	= TARGET_ECONNRESET,
585     [ENOBUFS]		= TARGET_ENOBUFS,
586     [EISCONN]		= TARGET_EISCONN,
587     [ENOTCONN]		= TARGET_ENOTCONN,
588     [EUCLEAN]		= TARGET_EUCLEAN,
589     [ENOTNAM]		= TARGET_ENOTNAM,
590     [ENAVAIL]		= TARGET_ENAVAIL,
591     [EISNAM]		= TARGET_EISNAM,
592     [EREMOTEIO]		= TARGET_EREMOTEIO,
593     [EDQUOT]            = TARGET_EDQUOT,
594     [ESHUTDOWN]		= TARGET_ESHUTDOWN,
595     [ETOOMANYREFS]	= TARGET_ETOOMANYREFS,
596     [ETIMEDOUT]		= TARGET_ETIMEDOUT,
597     [ECONNREFUSED]	= TARGET_ECONNREFUSED,
598     [EHOSTDOWN]		= TARGET_EHOSTDOWN,
599     [EHOSTUNREACH]	= TARGET_EHOSTUNREACH,
600     [EALREADY]		= TARGET_EALREADY,
601     [EINPROGRESS]	= TARGET_EINPROGRESS,
602     [ESTALE]		= TARGET_ESTALE,
603     [ECANCELED]		= TARGET_ECANCELED,
604     [ENOMEDIUM]		= TARGET_ENOMEDIUM,
605     [EMEDIUMTYPE]	= TARGET_EMEDIUMTYPE,
606 #ifdef ENOKEY
607     [ENOKEY]		= TARGET_ENOKEY,
608 #endif
609 #ifdef EKEYEXPIRED
610     [EKEYEXPIRED]	= TARGET_EKEYEXPIRED,
611 #endif
612 #ifdef EKEYREVOKED
613     [EKEYREVOKED]	= TARGET_EKEYREVOKED,
614 #endif
615 #ifdef EKEYREJECTED
616     [EKEYREJECTED]	= TARGET_EKEYREJECTED,
617 #endif
618 #ifdef EOWNERDEAD
619     [EOWNERDEAD]	= TARGET_EOWNERDEAD,
620 #endif
621 #ifdef ENOTRECOVERABLE
622     [ENOTRECOVERABLE]	= TARGET_ENOTRECOVERABLE,
623 #endif
624 #ifdef ENOMSG
625     [ENOMSG]            = TARGET_ENOMSG,
626 #endif
627 #ifdef ERKFILL
628     [ERFKILL]           = TARGET_ERFKILL,
629 #endif
630 #ifdef EHWPOISON
631     [EHWPOISON]         = TARGET_EHWPOISON,
632 #endif
633 };
634 
635 static inline int host_to_target_errno(int err)
636 {
637     if (err >= 0 && err < ERRNO_TABLE_SIZE &&
638         host_to_target_errno_table[err]) {
639         return host_to_target_errno_table[err];
640     }
641     return err;
642 }
643 
644 static inline int target_to_host_errno(int err)
645 {
646     if (err >= 0 && err < ERRNO_TABLE_SIZE &&
647         target_to_host_errno_table[err]) {
648         return target_to_host_errno_table[err];
649     }
650     return err;
651 }
652 
653 static inline abi_long get_errno(abi_long ret)
654 {
655     if (ret == -1)
656         return -host_to_target_errno(errno);
657     else
658         return ret;
659 }
660 
661 const char *target_strerror(int err)
662 {
663     if (err == TARGET_ERESTARTSYS) {
664         return "To be restarted";
665     }
666     if (err == TARGET_QEMU_ESIGRETURN) {
667         return "Successful exit from sigreturn";
668     }
669 
670     if ((err >= ERRNO_TABLE_SIZE) || (err < 0)) {
671         return NULL;
672     }
673     return strerror(target_to_host_errno(err));
674 }
675 
676 #define safe_syscall0(type, name) \
677 static type safe_##name(void) \
678 { \
679     return safe_syscall(__NR_##name); \
680 }
681 
682 #define safe_syscall1(type, name, type1, arg1) \
683 static type safe_##name(type1 arg1) \
684 { \
685     return safe_syscall(__NR_##name, arg1); \
686 }
687 
688 #define safe_syscall2(type, name, type1, arg1, type2, arg2) \
689 static type safe_##name(type1 arg1, type2 arg2) \
690 { \
691     return safe_syscall(__NR_##name, arg1, arg2); \
692 }
693 
694 #define safe_syscall3(type, name, type1, arg1, type2, arg2, type3, arg3) \
695 static type safe_##name(type1 arg1, type2 arg2, type3 arg3) \
696 { \
697     return safe_syscall(__NR_##name, arg1, arg2, arg3); \
698 }
699 
700 #define safe_syscall4(type, name, type1, arg1, type2, arg2, type3, arg3, \
701     type4, arg4) \
702 static type safe_##name(type1 arg1, type2 arg2, type3 arg3, type4 arg4) \
703 { \
704     return safe_syscall(__NR_##name, arg1, arg2, arg3, arg4); \
705 }
706 
707 #define safe_syscall5(type, name, type1, arg1, type2, arg2, type3, arg3, \
708     type4, arg4, type5, arg5) \
709 static type safe_##name(type1 arg1, type2 arg2, type3 arg3, type4 arg4, \
710     type5 arg5) \
711 { \
712     return safe_syscall(__NR_##name, arg1, arg2, arg3, arg4, arg5); \
713 }
714 
715 #define safe_syscall6(type, name, type1, arg1, type2, arg2, type3, arg3, \
716     type4, arg4, type5, arg5, type6, arg6) \
717 static type safe_##name(type1 arg1, type2 arg2, type3 arg3, type4 arg4, \
718     type5 arg5, type6 arg6) \
719 { \
720     return safe_syscall(__NR_##name, arg1, arg2, arg3, arg4, arg5, arg6); \
721 }
722 
723 safe_syscall3(ssize_t, read, int, fd, void *, buff, size_t, count)
724 safe_syscall3(ssize_t, write, int, fd, const void *, buff, size_t, count)
725 safe_syscall4(int, openat, int, dirfd, const char *, pathname, \
726               int, flags, mode_t, mode)
727 #if defined(TARGET_NR_wait4) || defined(TARGET_NR_waitpid)
728 safe_syscall4(pid_t, wait4, pid_t, pid, int *, status, int, options, \
729               struct rusage *, rusage)
730 #endif
731 safe_syscall5(int, waitid, idtype_t, idtype, id_t, id, siginfo_t *, infop, \
732               int, options, struct rusage *, rusage)
733 safe_syscall3(int, execve, const char *, filename, char **, argv, char **, envp)
734 #if defined(TARGET_NR_select) || defined(TARGET_NR__newselect) || \
735     defined(TARGET_NR_pselect6) || defined(TARGET_NR_pselect6_time64)
736 safe_syscall6(int, pselect6, int, nfds, fd_set *, readfds, fd_set *, writefds, \
737               fd_set *, exceptfds, struct timespec *, timeout, void *, sig)
738 #endif
739 #if defined(TARGET_NR_ppoll) || defined(TARGET_NR_ppoll_time64)
740 safe_syscall5(int, ppoll, struct pollfd *, ufds, unsigned int, nfds,
741               struct timespec *, tsp, const sigset_t *, sigmask,
742               size_t, sigsetsize)
743 #endif
744 safe_syscall6(int, epoll_pwait, int, epfd, struct epoll_event *, events,
745               int, maxevents, int, timeout, const sigset_t *, sigmask,
746               size_t, sigsetsize)
747 #if defined(__NR_futex)
748 safe_syscall6(int,futex,int *,uaddr,int,op,int,val, \
749               const struct timespec *,timeout,int *,uaddr2,int,val3)
750 #endif
751 #if defined(__NR_futex_time64)
752 safe_syscall6(int,futex_time64,int *,uaddr,int,op,int,val, \
753               const struct timespec *,timeout,int *,uaddr2,int,val3)
754 #endif
755 safe_syscall2(int, rt_sigsuspend, sigset_t *, newset, size_t, sigsetsize)
756 safe_syscall2(int, kill, pid_t, pid, int, sig)
757 safe_syscall2(int, tkill, int, tid, int, sig)
758 safe_syscall3(int, tgkill, int, tgid, int, pid, int, sig)
759 safe_syscall3(ssize_t, readv, int, fd, const struct iovec *, iov, int, iovcnt)
760 safe_syscall3(ssize_t, writev, int, fd, const struct iovec *, iov, int, iovcnt)
761 safe_syscall5(ssize_t, preadv, int, fd, const struct iovec *, iov, int, iovcnt,
762               unsigned long, pos_l, unsigned long, pos_h)
763 safe_syscall5(ssize_t, pwritev, int, fd, const struct iovec *, iov, int, iovcnt,
764               unsigned long, pos_l, unsigned long, pos_h)
765 safe_syscall3(int, connect, int, fd, const struct sockaddr *, addr,
766               socklen_t, addrlen)
767 safe_syscall6(ssize_t, sendto, int, fd, const void *, buf, size_t, len,
768               int, flags, const struct sockaddr *, addr, socklen_t, addrlen)
769 safe_syscall6(ssize_t, recvfrom, int, fd, void *, buf, size_t, len,
770               int, flags, struct sockaddr *, addr, socklen_t *, addrlen)
771 safe_syscall3(ssize_t, sendmsg, int, fd, const struct msghdr *, msg, int, flags)
772 safe_syscall3(ssize_t, recvmsg, int, fd, struct msghdr *, msg, int, flags)
773 safe_syscall2(int, flock, int, fd, int, operation)
774 #if defined(TARGET_NR_rt_sigtimedwait) || defined(TARGET_NR_rt_sigtimedwait_time64)
775 safe_syscall4(int, rt_sigtimedwait, const sigset_t *, these, siginfo_t *, uinfo,
776               const struct timespec *, uts, size_t, sigsetsize)
777 #endif
778 safe_syscall4(int, accept4, int, fd, struct sockaddr *, addr, socklen_t *, len,
779               int, flags)
780 #if defined(TARGET_NR_nanosleep)
781 safe_syscall2(int, nanosleep, const struct timespec *, req,
782               struct timespec *, rem)
783 #endif
784 #if defined(TARGET_NR_clock_nanosleep) || \
785     defined(TARGET_NR_clock_nanosleep_time64)
786 safe_syscall4(int, clock_nanosleep, const clockid_t, clock, int, flags,
787               const struct timespec *, req, struct timespec *, rem)
788 #endif
789 #ifdef __NR_ipc
790 #ifdef __s390x__
791 safe_syscall5(int, ipc, int, call, long, first, long, second, long, third,
792               void *, ptr)
793 #else
794 safe_syscall6(int, ipc, int, call, long, first, long, second, long, third,
795               void *, ptr, long, fifth)
796 #endif
797 #endif
798 #ifdef __NR_msgsnd
799 safe_syscall4(int, msgsnd, int, msgid, const void *, msgp, size_t, sz,
800               int, flags)
801 #endif
802 #ifdef __NR_msgrcv
803 safe_syscall5(int, msgrcv, int, msgid, void *, msgp, size_t, sz,
804               long, msgtype, int, flags)
805 #endif
806 #ifdef __NR_semtimedop
807 safe_syscall4(int, semtimedop, int, semid, struct sembuf *, tsops,
808               unsigned, nsops, const struct timespec *, timeout)
809 #endif
810 #if defined(TARGET_NR_mq_timedsend) || \
811     defined(TARGET_NR_mq_timedsend_time64)
812 safe_syscall5(int, mq_timedsend, int, mqdes, const char *, msg_ptr,
813               size_t, len, unsigned, prio, const struct timespec *, timeout)
814 #endif
815 #if defined(TARGET_NR_mq_timedreceive) || \
816     defined(TARGET_NR_mq_timedreceive_time64)
817 safe_syscall5(int, mq_timedreceive, int, mqdes, char *, msg_ptr,
818               size_t, len, unsigned *, prio, const struct timespec *, timeout)
819 #endif
820 #if defined(TARGET_NR_copy_file_range) && defined(__NR_copy_file_range)
821 safe_syscall6(ssize_t, copy_file_range, int, infd, loff_t *, pinoff,
822               int, outfd, loff_t *, poutoff, size_t, length,
823               unsigned int, flags)
824 #endif
825 
826 /* We do ioctl like this rather than via safe_syscall3 to preserve the
827  * "third argument might be integer or pointer or not present" behaviour of
828  * the libc function.
829  */
830 #define safe_ioctl(...) safe_syscall(__NR_ioctl, __VA_ARGS__)
831 /* Similarly for fcntl. Note that callers must always:
832  *  pass the F_GETLK64 etc constants rather than the unsuffixed F_GETLK
833  *  use the flock64 struct rather than unsuffixed flock
834  * This will then work and use a 64-bit offset for both 32-bit and 64-bit hosts.
835  */
836 #ifdef __NR_fcntl64
837 #define safe_fcntl(...) safe_syscall(__NR_fcntl64, __VA_ARGS__)
838 #else
839 #define safe_fcntl(...) safe_syscall(__NR_fcntl, __VA_ARGS__)
840 #endif
841 
842 static inline int host_to_target_sock_type(int host_type)
843 {
844     int target_type;
845 
846     switch (host_type & 0xf /* SOCK_TYPE_MASK */) {
847     case SOCK_DGRAM:
848         target_type = TARGET_SOCK_DGRAM;
849         break;
850     case SOCK_STREAM:
851         target_type = TARGET_SOCK_STREAM;
852         break;
853     default:
854         target_type = host_type & 0xf /* SOCK_TYPE_MASK */;
855         break;
856     }
857 
858 #if defined(SOCK_CLOEXEC)
859     if (host_type & SOCK_CLOEXEC) {
860         target_type |= TARGET_SOCK_CLOEXEC;
861     }
862 #endif
863 
864 #if defined(SOCK_NONBLOCK)
865     if (host_type & SOCK_NONBLOCK) {
866         target_type |= TARGET_SOCK_NONBLOCK;
867     }
868 #endif
869 
870     return target_type;
871 }
872 
873 static abi_ulong target_brk;
874 static abi_ulong target_original_brk;
875 static abi_ulong brk_page;
876 
877 void target_set_brk(abi_ulong new_brk)
878 {
879     target_original_brk = target_brk = HOST_PAGE_ALIGN(new_brk);
880     brk_page = HOST_PAGE_ALIGN(target_brk);
881 }
882 
883 //#define DEBUGF_BRK(message, args...) do { fprintf(stderr, (message), ## args); } while (0)
884 #define DEBUGF_BRK(message, args...)
885 
886 /* do_brk() must return target values and target errnos. */
887 abi_long do_brk(abi_ulong new_brk)
888 {
889     abi_long mapped_addr;
890     abi_ulong new_alloc_size;
891 
892     DEBUGF_BRK("do_brk(" TARGET_ABI_FMT_lx ") -> ", new_brk);
893 
894     if (!new_brk) {
895         DEBUGF_BRK(TARGET_ABI_FMT_lx " (!new_brk)\n", target_brk);
896         return target_brk;
897     }
898     if (new_brk < target_original_brk) {
899         DEBUGF_BRK(TARGET_ABI_FMT_lx " (new_brk < target_original_brk)\n",
900                    target_brk);
901         return target_brk;
902     }
903 
904     /* If the new brk is less than the highest page reserved to the
905      * target heap allocation, set it and we're almost done...  */
906     if (new_brk <= brk_page) {
907         /* Heap contents are initialized to zero, as for anonymous
908          * mapped pages.  */
909         if (new_brk > target_brk) {
910             memset(g2h(target_brk), 0, new_brk - target_brk);
911         }
912 	target_brk = new_brk;
913         DEBUGF_BRK(TARGET_ABI_FMT_lx " (new_brk <= brk_page)\n", target_brk);
914 	return target_brk;
915     }
916 
917     /* We need to allocate more memory after the brk... Note that
918      * we don't use MAP_FIXED because that will map over the top of
919      * any existing mapping (like the one with the host libc or qemu
920      * itself); instead we treat "mapped but at wrong address" as
921      * a failure and unmap again.
922      */
923     new_alloc_size = HOST_PAGE_ALIGN(new_brk - brk_page);
924     mapped_addr = get_errno(target_mmap(brk_page, new_alloc_size,
925                                         PROT_READ|PROT_WRITE,
926                                         MAP_ANON|MAP_PRIVATE, 0, 0));
927 
928     if (mapped_addr == brk_page) {
929         /* Heap contents are initialized to zero, as for anonymous
930          * mapped pages.  Technically the new pages are already
931          * initialized to zero since they *are* anonymous mapped
932          * pages, however we have to take care with the contents that
933          * come from the remaining part of the previous page: it may
934          * contains garbage data due to a previous heap usage (grown
935          * then shrunken).  */
936         memset(g2h(target_brk), 0, brk_page - target_brk);
937 
938         target_brk = new_brk;
939         brk_page = HOST_PAGE_ALIGN(target_brk);
940         DEBUGF_BRK(TARGET_ABI_FMT_lx " (mapped_addr == brk_page)\n",
941             target_brk);
942         return target_brk;
943     } else if (mapped_addr != -1) {
944         /* Mapped but at wrong address, meaning there wasn't actually
945          * enough space for this brk.
946          */
947         target_munmap(mapped_addr, new_alloc_size);
948         mapped_addr = -1;
949         DEBUGF_BRK(TARGET_ABI_FMT_lx " (mapped_addr != -1)\n", target_brk);
950     }
951     else {
952         DEBUGF_BRK(TARGET_ABI_FMT_lx " (otherwise)\n", target_brk);
953     }
954 
955 #if defined(TARGET_ALPHA)
956     /* We (partially) emulate OSF/1 on Alpha, which requires we
957        return a proper errno, not an unchanged brk value.  */
958     return -TARGET_ENOMEM;
959 #endif
960     /* For everything else, return the previous break. */
961     return target_brk;
962 }
963 
964 #if defined(TARGET_NR_select) || defined(TARGET_NR__newselect) || \
965     defined(TARGET_NR_pselect6) || defined(TARGET_NR_pselect6_time64)
966 static inline abi_long copy_from_user_fdset(fd_set *fds,
967                                             abi_ulong target_fds_addr,
968                                             int n)
969 {
970     int i, nw, j, k;
971     abi_ulong b, *target_fds;
972 
973     nw = DIV_ROUND_UP(n, TARGET_ABI_BITS);
974     if (!(target_fds = lock_user(VERIFY_READ,
975                                  target_fds_addr,
976                                  sizeof(abi_ulong) * nw,
977                                  1)))
978         return -TARGET_EFAULT;
979 
980     FD_ZERO(fds);
981     k = 0;
982     for (i = 0; i < nw; i++) {
983         /* grab the abi_ulong */
984         __get_user(b, &target_fds[i]);
985         for (j = 0; j < TARGET_ABI_BITS; j++) {
986             /* check the bit inside the abi_ulong */
987             if ((b >> j) & 1)
988                 FD_SET(k, fds);
989             k++;
990         }
991     }
992 
993     unlock_user(target_fds, target_fds_addr, 0);
994 
995     return 0;
996 }
997 
998 static inline abi_ulong copy_from_user_fdset_ptr(fd_set *fds, fd_set **fds_ptr,
999                                                  abi_ulong target_fds_addr,
1000                                                  int n)
1001 {
1002     if (target_fds_addr) {
1003         if (copy_from_user_fdset(fds, target_fds_addr, n))
1004             return -TARGET_EFAULT;
1005         *fds_ptr = fds;
1006     } else {
1007         *fds_ptr = NULL;
1008     }
1009     return 0;
1010 }
1011 
1012 static inline abi_long copy_to_user_fdset(abi_ulong target_fds_addr,
1013                                           const fd_set *fds,
1014                                           int n)
1015 {
1016     int i, nw, j, k;
1017     abi_long v;
1018     abi_ulong *target_fds;
1019 
1020     nw = DIV_ROUND_UP(n, TARGET_ABI_BITS);
1021     if (!(target_fds = lock_user(VERIFY_WRITE,
1022                                  target_fds_addr,
1023                                  sizeof(abi_ulong) * nw,
1024                                  0)))
1025         return -TARGET_EFAULT;
1026 
1027     k = 0;
1028     for (i = 0; i < nw; i++) {
1029         v = 0;
1030         for (j = 0; j < TARGET_ABI_BITS; j++) {
1031             v |= ((abi_ulong)(FD_ISSET(k, fds) != 0) << j);
1032             k++;
1033         }
1034         __put_user(v, &target_fds[i]);
1035     }
1036 
1037     unlock_user(target_fds, target_fds_addr, sizeof(abi_ulong) * nw);
1038 
1039     return 0;
1040 }
1041 #endif
1042 
1043 #if defined(__alpha__)
1044 #define HOST_HZ 1024
1045 #else
1046 #define HOST_HZ 100
1047 #endif
1048 
1049 static inline abi_long host_to_target_clock_t(long ticks)
1050 {
1051 #if HOST_HZ == TARGET_HZ
1052     return ticks;
1053 #else
1054     return ((int64_t)ticks * TARGET_HZ) / HOST_HZ;
1055 #endif
1056 }
1057 
1058 static inline abi_long host_to_target_rusage(abi_ulong target_addr,
1059                                              const struct rusage *rusage)
1060 {
1061     struct target_rusage *target_rusage;
1062 
1063     if (!lock_user_struct(VERIFY_WRITE, target_rusage, target_addr, 0))
1064         return -TARGET_EFAULT;
1065     target_rusage->ru_utime.tv_sec = tswapal(rusage->ru_utime.tv_sec);
1066     target_rusage->ru_utime.tv_usec = tswapal(rusage->ru_utime.tv_usec);
1067     target_rusage->ru_stime.tv_sec = tswapal(rusage->ru_stime.tv_sec);
1068     target_rusage->ru_stime.tv_usec = tswapal(rusage->ru_stime.tv_usec);
1069     target_rusage->ru_maxrss = tswapal(rusage->ru_maxrss);
1070     target_rusage->ru_ixrss = tswapal(rusage->ru_ixrss);
1071     target_rusage->ru_idrss = tswapal(rusage->ru_idrss);
1072     target_rusage->ru_isrss = tswapal(rusage->ru_isrss);
1073     target_rusage->ru_minflt = tswapal(rusage->ru_minflt);
1074     target_rusage->ru_majflt = tswapal(rusage->ru_majflt);
1075     target_rusage->ru_nswap = tswapal(rusage->ru_nswap);
1076     target_rusage->ru_inblock = tswapal(rusage->ru_inblock);
1077     target_rusage->ru_oublock = tswapal(rusage->ru_oublock);
1078     target_rusage->ru_msgsnd = tswapal(rusage->ru_msgsnd);
1079     target_rusage->ru_msgrcv = tswapal(rusage->ru_msgrcv);
1080     target_rusage->ru_nsignals = tswapal(rusage->ru_nsignals);
1081     target_rusage->ru_nvcsw = tswapal(rusage->ru_nvcsw);
1082     target_rusage->ru_nivcsw = tswapal(rusage->ru_nivcsw);
1083     unlock_user_struct(target_rusage, target_addr, 1);
1084 
1085     return 0;
1086 }
1087 
1088 #ifdef TARGET_NR_setrlimit
1089 static inline rlim_t target_to_host_rlim(abi_ulong target_rlim)
1090 {
1091     abi_ulong target_rlim_swap;
1092     rlim_t result;
1093 
1094     target_rlim_swap = tswapal(target_rlim);
1095     if (target_rlim_swap == TARGET_RLIM_INFINITY)
1096         return RLIM_INFINITY;
1097 
1098     result = target_rlim_swap;
1099     if (target_rlim_swap != (rlim_t)result)
1100         return RLIM_INFINITY;
1101 
1102     return result;
1103 }
1104 #endif
1105 
1106 #if defined(TARGET_NR_getrlimit) || defined(TARGET_NR_ugetrlimit)
1107 static inline abi_ulong host_to_target_rlim(rlim_t rlim)
1108 {
1109     abi_ulong target_rlim_swap;
1110     abi_ulong result;
1111 
1112     if (rlim == RLIM_INFINITY || rlim != (abi_long)rlim)
1113         target_rlim_swap = TARGET_RLIM_INFINITY;
1114     else
1115         target_rlim_swap = rlim;
1116     result = tswapal(target_rlim_swap);
1117 
1118     return result;
1119 }
1120 #endif
1121 
1122 static inline int target_to_host_resource(int code)
1123 {
1124     switch (code) {
1125     case TARGET_RLIMIT_AS:
1126         return RLIMIT_AS;
1127     case TARGET_RLIMIT_CORE:
1128         return RLIMIT_CORE;
1129     case TARGET_RLIMIT_CPU:
1130         return RLIMIT_CPU;
1131     case TARGET_RLIMIT_DATA:
1132         return RLIMIT_DATA;
1133     case TARGET_RLIMIT_FSIZE:
1134         return RLIMIT_FSIZE;
1135     case TARGET_RLIMIT_LOCKS:
1136         return RLIMIT_LOCKS;
1137     case TARGET_RLIMIT_MEMLOCK:
1138         return RLIMIT_MEMLOCK;
1139     case TARGET_RLIMIT_MSGQUEUE:
1140         return RLIMIT_MSGQUEUE;
1141     case TARGET_RLIMIT_NICE:
1142         return RLIMIT_NICE;
1143     case TARGET_RLIMIT_NOFILE:
1144         return RLIMIT_NOFILE;
1145     case TARGET_RLIMIT_NPROC:
1146         return RLIMIT_NPROC;
1147     case TARGET_RLIMIT_RSS:
1148         return RLIMIT_RSS;
1149     case TARGET_RLIMIT_RTPRIO:
1150         return RLIMIT_RTPRIO;
1151     case TARGET_RLIMIT_SIGPENDING:
1152         return RLIMIT_SIGPENDING;
1153     case TARGET_RLIMIT_STACK:
1154         return RLIMIT_STACK;
1155     default:
1156         return code;
1157     }
1158 }
1159 
1160 static inline abi_long copy_from_user_timeval(struct timeval *tv,
1161                                               abi_ulong target_tv_addr)
1162 {
1163     struct target_timeval *target_tv;
1164 
1165     if (!lock_user_struct(VERIFY_READ, target_tv, target_tv_addr, 1)) {
1166         return -TARGET_EFAULT;
1167     }
1168 
1169     __get_user(tv->tv_sec, &target_tv->tv_sec);
1170     __get_user(tv->tv_usec, &target_tv->tv_usec);
1171 
1172     unlock_user_struct(target_tv, target_tv_addr, 0);
1173 
1174     return 0;
1175 }
1176 
1177 static inline abi_long copy_to_user_timeval(abi_ulong target_tv_addr,
1178                                             const struct timeval *tv)
1179 {
1180     struct target_timeval *target_tv;
1181 
1182     if (!lock_user_struct(VERIFY_WRITE, target_tv, target_tv_addr, 0)) {
1183         return -TARGET_EFAULT;
1184     }
1185 
1186     __put_user(tv->tv_sec, &target_tv->tv_sec);
1187     __put_user(tv->tv_usec, &target_tv->tv_usec);
1188 
1189     unlock_user_struct(target_tv, target_tv_addr, 1);
1190 
1191     return 0;
1192 }
1193 
1194 #if defined(TARGET_NR_clock_adjtime64) && defined(CONFIG_CLOCK_ADJTIME)
1195 static inline abi_long copy_from_user_timeval64(struct timeval *tv,
1196                                                 abi_ulong target_tv_addr)
1197 {
1198     struct target__kernel_sock_timeval *target_tv;
1199 
1200     if (!lock_user_struct(VERIFY_READ, target_tv, target_tv_addr, 1)) {
1201         return -TARGET_EFAULT;
1202     }
1203 
1204     __get_user(tv->tv_sec, &target_tv->tv_sec);
1205     __get_user(tv->tv_usec, &target_tv->tv_usec);
1206 
1207     unlock_user_struct(target_tv, target_tv_addr, 0);
1208 
1209     return 0;
1210 }
1211 #endif
1212 
1213 static inline abi_long copy_to_user_timeval64(abi_ulong target_tv_addr,
1214                                               const struct timeval *tv)
1215 {
1216     struct target__kernel_sock_timeval *target_tv;
1217 
1218     if (!lock_user_struct(VERIFY_WRITE, target_tv, target_tv_addr, 0)) {
1219         return -TARGET_EFAULT;
1220     }
1221 
1222     __put_user(tv->tv_sec, &target_tv->tv_sec);
1223     __put_user(tv->tv_usec, &target_tv->tv_usec);
1224 
1225     unlock_user_struct(target_tv, target_tv_addr, 1);
1226 
1227     return 0;
1228 }
1229 
1230 #if defined(TARGET_NR_futex) || \
1231     defined(TARGET_NR_rt_sigtimedwait) || \
1232     defined(TARGET_NR_pselect6) || defined(TARGET_NR_pselect6) || \
1233     defined(TARGET_NR_nanosleep) || defined(TARGET_NR_clock_settime) || \
1234     defined(TARGET_NR_utimensat) || defined(TARGET_NR_mq_timedsend) || \
1235     defined(TARGET_NR_mq_timedreceive) || defined(TARGET_NR_ipc) || \
1236     defined(TARGET_NR_semop) || defined(TARGET_NR_semtimedop) || \
1237     defined(TARGET_NR_timer_settime) || \
1238     (defined(TARGET_NR_timerfd_settime) && defined(CONFIG_TIMERFD))
1239 static inline abi_long target_to_host_timespec(struct timespec *host_ts,
1240                                                abi_ulong target_addr)
1241 {
1242     struct target_timespec *target_ts;
1243 
1244     if (!lock_user_struct(VERIFY_READ, target_ts, target_addr, 1)) {
1245         return -TARGET_EFAULT;
1246     }
1247     __get_user(host_ts->tv_sec, &target_ts->tv_sec);
1248     __get_user(host_ts->tv_nsec, &target_ts->tv_nsec);
1249     unlock_user_struct(target_ts, target_addr, 0);
1250     return 0;
1251 }
1252 #endif
1253 
1254 #if defined(TARGET_NR_clock_settime64) || defined(TARGET_NR_futex_time64) || \
1255     defined(TARGET_NR_timer_settime64) || \
1256     defined(TARGET_NR_mq_timedsend_time64) || \
1257     defined(TARGET_NR_mq_timedreceive_time64) || \
1258     (defined(TARGET_NR_timerfd_settime64) && defined(CONFIG_TIMERFD)) || \
1259     defined(TARGET_NR_clock_nanosleep_time64) || \
1260     defined(TARGET_NR_rt_sigtimedwait_time64) || \
1261     defined(TARGET_NR_utimensat) || \
1262     defined(TARGET_NR_utimensat_time64) || \
1263     defined(TARGET_NR_semtimedop_time64) || \
1264     defined(TARGET_NR_pselect6_time64) || defined(TARGET_NR_ppoll_time64)
1265 static inline abi_long target_to_host_timespec64(struct timespec *host_ts,
1266                                                  abi_ulong target_addr)
1267 {
1268     struct target__kernel_timespec *target_ts;
1269 
1270     if (!lock_user_struct(VERIFY_READ, target_ts, target_addr, 1)) {
1271         return -TARGET_EFAULT;
1272     }
1273     __get_user(host_ts->tv_sec, &target_ts->tv_sec);
1274     __get_user(host_ts->tv_nsec, &target_ts->tv_nsec);
1275     /* in 32bit mode, this drops the padding */
1276     host_ts->tv_nsec = (long)(abi_long)host_ts->tv_nsec;
1277     unlock_user_struct(target_ts, target_addr, 0);
1278     return 0;
1279 }
1280 #endif
1281 
1282 static inline abi_long host_to_target_timespec(abi_ulong target_addr,
1283                                                struct timespec *host_ts)
1284 {
1285     struct target_timespec *target_ts;
1286 
1287     if (!lock_user_struct(VERIFY_WRITE, target_ts, target_addr, 0)) {
1288         return -TARGET_EFAULT;
1289     }
1290     __put_user(host_ts->tv_sec, &target_ts->tv_sec);
1291     __put_user(host_ts->tv_nsec, &target_ts->tv_nsec);
1292     unlock_user_struct(target_ts, target_addr, 1);
1293     return 0;
1294 }
1295 
1296 static inline abi_long host_to_target_timespec64(abi_ulong target_addr,
1297                                                  struct timespec *host_ts)
1298 {
1299     struct target__kernel_timespec *target_ts;
1300 
1301     if (!lock_user_struct(VERIFY_WRITE, target_ts, target_addr, 0)) {
1302         return -TARGET_EFAULT;
1303     }
1304     __put_user(host_ts->tv_sec, &target_ts->tv_sec);
1305     __put_user(host_ts->tv_nsec, &target_ts->tv_nsec);
1306     unlock_user_struct(target_ts, target_addr, 1);
1307     return 0;
1308 }
1309 
1310 #if defined(TARGET_NR_gettimeofday)
1311 static inline abi_long copy_to_user_timezone(abi_ulong target_tz_addr,
1312                                              struct timezone *tz)
1313 {
1314     struct target_timezone *target_tz;
1315 
1316     if (!lock_user_struct(VERIFY_WRITE, target_tz, target_tz_addr, 1)) {
1317         return -TARGET_EFAULT;
1318     }
1319 
1320     __put_user(tz->tz_minuteswest, &target_tz->tz_minuteswest);
1321     __put_user(tz->tz_dsttime, &target_tz->tz_dsttime);
1322 
1323     unlock_user_struct(target_tz, target_tz_addr, 1);
1324 
1325     return 0;
1326 }
1327 #endif
1328 
1329 #if defined(TARGET_NR_settimeofday)
1330 static inline abi_long copy_from_user_timezone(struct timezone *tz,
1331                                                abi_ulong target_tz_addr)
1332 {
1333     struct target_timezone *target_tz;
1334 
1335     if (!lock_user_struct(VERIFY_READ, target_tz, target_tz_addr, 1)) {
1336         return -TARGET_EFAULT;
1337     }
1338 
1339     __get_user(tz->tz_minuteswest, &target_tz->tz_minuteswest);
1340     __get_user(tz->tz_dsttime, &target_tz->tz_dsttime);
1341 
1342     unlock_user_struct(target_tz, target_tz_addr, 0);
1343 
1344     return 0;
1345 }
1346 #endif
1347 
1348 #if defined(TARGET_NR_mq_open) && defined(__NR_mq_open)
1349 #include <mqueue.h>
1350 
1351 static inline abi_long copy_from_user_mq_attr(struct mq_attr *attr,
1352                                               abi_ulong target_mq_attr_addr)
1353 {
1354     struct target_mq_attr *target_mq_attr;
1355 
1356     if (!lock_user_struct(VERIFY_READ, target_mq_attr,
1357                           target_mq_attr_addr, 1))
1358         return -TARGET_EFAULT;
1359 
1360     __get_user(attr->mq_flags, &target_mq_attr->mq_flags);
1361     __get_user(attr->mq_maxmsg, &target_mq_attr->mq_maxmsg);
1362     __get_user(attr->mq_msgsize, &target_mq_attr->mq_msgsize);
1363     __get_user(attr->mq_curmsgs, &target_mq_attr->mq_curmsgs);
1364 
1365     unlock_user_struct(target_mq_attr, target_mq_attr_addr, 0);
1366 
1367     return 0;
1368 }
1369 
1370 static inline abi_long copy_to_user_mq_attr(abi_ulong target_mq_attr_addr,
1371                                             const struct mq_attr *attr)
1372 {
1373     struct target_mq_attr *target_mq_attr;
1374 
1375     if (!lock_user_struct(VERIFY_WRITE, target_mq_attr,
1376                           target_mq_attr_addr, 0))
1377         return -TARGET_EFAULT;
1378 
1379     __put_user(attr->mq_flags, &target_mq_attr->mq_flags);
1380     __put_user(attr->mq_maxmsg, &target_mq_attr->mq_maxmsg);
1381     __put_user(attr->mq_msgsize, &target_mq_attr->mq_msgsize);
1382     __put_user(attr->mq_curmsgs, &target_mq_attr->mq_curmsgs);
1383 
1384     unlock_user_struct(target_mq_attr, target_mq_attr_addr, 1);
1385 
1386     return 0;
1387 }
1388 #endif
1389 
1390 #if defined(TARGET_NR_select) || defined(TARGET_NR__newselect)
1391 /* do_select() must return target values and target errnos. */
1392 static abi_long do_select(int n,
1393                           abi_ulong rfd_addr, abi_ulong wfd_addr,
1394                           abi_ulong efd_addr, abi_ulong target_tv_addr)
1395 {
1396     fd_set rfds, wfds, efds;
1397     fd_set *rfds_ptr, *wfds_ptr, *efds_ptr;
1398     struct timeval tv;
1399     struct timespec ts, *ts_ptr;
1400     abi_long ret;
1401 
1402     ret = copy_from_user_fdset_ptr(&rfds, &rfds_ptr, rfd_addr, n);
1403     if (ret) {
1404         return ret;
1405     }
1406     ret = copy_from_user_fdset_ptr(&wfds, &wfds_ptr, wfd_addr, n);
1407     if (ret) {
1408         return ret;
1409     }
1410     ret = copy_from_user_fdset_ptr(&efds, &efds_ptr, efd_addr, n);
1411     if (ret) {
1412         return ret;
1413     }
1414 
1415     if (target_tv_addr) {
1416         if (copy_from_user_timeval(&tv, target_tv_addr))
1417             return -TARGET_EFAULT;
1418         ts.tv_sec = tv.tv_sec;
1419         ts.tv_nsec = tv.tv_usec * 1000;
1420         ts_ptr = &ts;
1421     } else {
1422         ts_ptr = NULL;
1423     }
1424 
1425     ret = get_errno(safe_pselect6(n, rfds_ptr, wfds_ptr, efds_ptr,
1426                                   ts_ptr, NULL));
1427 
1428     if (!is_error(ret)) {
1429         if (rfd_addr && copy_to_user_fdset(rfd_addr, &rfds, n))
1430             return -TARGET_EFAULT;
1431         if (wfd_addr && copy_to_user_fdset(wfd_addr, &wfds, n))
1432             return -TARGET_EFAULT;
1433         if (efd_addr && copy_to_user_fdset(efd_addr, &efds, n))
1434             return -TARGET_EFAULT;
1435 
1436         if (target_tv_addr) {
1437             tv.tv_sec = ts.tv_sec;
1438             tv.tv_usec = ts.tv_nsec / 1000;
1439             if (copy_to_user_timeval(target_tv_addr, &tv)) {
1440                 return -TARGET_EFAULT;
1441             }
1442         }
1443     }
1444 
1445     return ret;
1446 }
1447 
1448 #if defined(TARGET_WANT_OLD_SYS_SELECT)
1449 static abi_long do_old_select(abi_ulong arg1)
1450 {
1451     struct target_sel_arg_struct *sel;
1452     abi_ulong inp, outp, exp, tvp;
1453     long nsel;
1454 
1455     if (!lock_user_struct(VERIFY_READ, sel, arg1, 1)) {
1456         return -TARGET_EFAULT;
1457     }
1458 
1459     nsel = tswapal(sel->n);
1460     inp = tswapal(sel->inp);
1461     outp = tswapal(sel->outp);
1462     exp = tswapal(sel->exp);
1463     tvp = tswapal(sel->tvp);
1464 
1465     unlock_user_struct(sel, arg1, 0);
1466 
1467     return do_select(nsel, inp, outp, exp, tvp);
1468 }
1469 #endif
1470 #endif
1471 
1472 #if defined(TARGET_NR_pselect6) || defined(TARGET_NR_pselect6_time64)
1473 static abi_long do_pselect6(abi_long arg1, abi_long arg2, abi_long arg3,
1474                             abi_long arg4, abi_long arg5, abi_long arg6,
1475                             bool time64)
1476 {
1477     abi_long rfd_addr, wfd_addr, efd_addr, n, ts_addr;
1478     fd_set rfds, wfds, efds;
1479     fd_set *rfds_ptr, *wfds_ptr, *efds_ptr;
1480     struct timespec ts, *ts_ptr;
1481     abi_long ret;
1482 
1483     /*
1484      * The 6th arg is actually two args smashed together,
1485      * so we cannot use the C library.
1486      */
1487     sigset_t set;
1488     struct {
1489         sigset_t *set;
1490         size_t size;
1491     } sig, *sig_ptr;
1492 
1493     abi_ulong arg_sigset, arg_sigsize, *arg7;
1494     target_sigset_t *target_sigset;
1495 
1496     n = arg1;
1497     rfd_addr = arg2;
1498     wfd_addr = arg3;
1499     efd_addr = arg4;
1500     ts_addr = arg5;
1501 
1502     ret = copy_from_user_fdset_ptr(&rfds, &rfds_ptr, rfd_addr, n);
1503     if (ret) {
1504         return ret;
1505     }
1506     ret = copy_from_user_fdset_ptr(&wfds, &wfds_ptr, wfd_addr, n);
1507     if (ret) {
1508         return ret;
1509     }
1510     ret = copy_from_user_fdset_ptr(&efds, &efds_ptr, efd_addr, n);
1511     if (ret) {
1512         return ret;
1513     }
1514 
1515     /*
1516      * This takes a timespec, and not a timeval, so we cannot
1517      * use the do_select() helper ...
1518      */
1519     if (ts_addr) {
1520         if (time64) {
1521             if (target_to_host_timespec64(&ts, ts_addr)) {
1522                 return -TARGET_EFAULT;
1523             }
1524         } else {
1525             if (target_to_host_timespec(&ts, ts_addr)) {
1526                 return -TARGET_EFAULT;
1527             }
1528         }
1529             ts_ptr = &ts;
1530     } else {
1531         ts_ptr = NULL;
1532     }
1533 
1534     /* Extract the two packed args for the sigset */
1535     if (arg6) {
1536         sig_ptr = &sig;
1537         sig.size = SIGSET_T_SIZE;
1538 
1539         arg7 = lock_user(VERIFY_READ, arg6, sizeof(*arg7) * 2, 1);
1540         if (!arg7) {
1541             return -TARGET_EFAULT;
1542         }
1543         arg_sigset = tswapal(arg7[0]);
1544         arg_sigsize = tswapal(arg7[1]);
1545         unlock_user(arg7, arg6, 0);
1546 
1547         if (arg_sigset) {
1548             sig.set = &set;
1549             if (arg_sigsize != sizeof(*target_sigset)) {
1550                 /* Like the kernel, we enforce correct size sigsets */
1551                 return -TARGET_EINVAL;
1552             }
1553             target_sigset = lock_user(VERIFY_READ, arg_sigset,
1554                                       sizeof(*target_sigset), 1);
1555             if (!target_sigset) {
1556                 return -TARGET_EFAULT;
1557             }
1558             target_to_host_sigset(&set, target_sigset);
1559             unlock_user(target_sigset, arg_sigset, 0);
1560         } else {
1561             sig.set = NULL;
1562         }
1563     } else {
1564         sig_ptr = NULL;
1565     }
1566 
1567     ret = get_errno(safe_pselect6(n, rfds_ptr, wfds_ptr, efds_ptr,
1568                                   ts_ptr, sig_ptr));
1569 
1570     if (!is_error(ret)) {
1571         if (rfd_addr && copy_to_user_fdset(rfd_addr, &rfds, n)) {
1572             return -TARGET_EFAULT;
1573         }
1574         if (wfd_addr && copy_to_user_fdset(wfd_addr, &wfds, n)) {
1575             return -TARGET_EFAULT;
1576         }
1577         if (efd_addr && copy_to_user_fdset(efd_addr, &efds, n)) {
1578             return -TARGET_EFAULT;
1579         }
1580         if (time64) {
1581             if (ts_addr && host_to_target_timespec64(ts_addr, &ts)) {
1582                 return -TARGET_EFAULT;
1583             }
1584         } else {
1585             if (ts_addr && host_to_target_timespec(ts_addr, &ts)) {
1586                 return -TARGET_EFAULT;
1587             }
1588         }
1589     }
1590     return ret;
1591 }
1592 #endif
1593 
1594 #if defined(TARGET_NR_poll) || defined(TARGET_NR_ppoll) || \
1595     defined(TARGET_NR_ppoll_time64)
1596 static abi_long do_ppoll(abi_long arg1, abi_long arg2, abi_long arg3,
1597                          abi_long arg4, abi_long arg5, bool ppoll, bool time64)
1598 {
1599     struct target_pollfd *target_pfd;
1600     unsigned int nfds = arg2;
1601     struct pollfd *pfd;
1602     unsigned int i;
1603     abi_long ret;
1604 
1605     pfd = NULL;
1606     target_pfd = NULL;
1607     if (nfds) {
1608         if (nfds > (INT_MAX / sizeof(struct target_pollfd))) {
1609             return -TARGET_EINVAL;
1610         }
1611         target_pfd = lock_user(VERIFY_WRITE, arg1,
1612                                sizeof(struct target_pollfd) * nfds, 1);
1613         if (!target_pfd) {
1614             return -TARGET_EFAULT;
1615         }
1616 
1617         pfd = alloca(sizeof(struct pollfd) * nfds);
1618         for (i = 0; i < nfds; i++) {
1619             pfd[i].fd = tswap32(target_pfd[i].fd);
1620             pfd[i].events = tswap16(target_pfd[i].events);
1621         }
1622     }
1623     if (ppoll) {
1624         struct timespec _timeout_ts, *timeout_ts = &_timeout_ts;
1625         target_sigset_t *target_set;
1626         sigset_t _set, *set = &_set;
1627 
1628         if (arg3) {
1629             if (time64) {
1630                 if (target_to_host_timespec64(timeout_ts, arg3)) {
1631                     unlock_user(target_pfd, arg1, 0);
1632                     return -TARGET_EFAULT;
1633                 }
1634             } else {
1635                 if (target_to_host_timespec(timeout_ts, arg3)) {
1636                     unlock_user(target_pfd, arg1, 0);
1637                     return -TARGET_EFAULT;
1638                 }
1639             }
1640         } else {
1641             timeout_ts = NULL;
1642         }
1643 
1644         if (arg4) {
1645             if (arg5 != sizeof(target_sigset_t)) {
1646                 unlock_user(target_pfd, arg1, 0);
1647                 return -TARGET_EINVAL;
1648             }
1649 
1650             target_set = lock_user(VERIFY_READ, arg4,
1651                                    sizeof(target_sigset_t), 1);
1652             if (!target_set) {
1653                 unlock_user(target_pfd, arg1, 0);
1654                 return -TARGET_EFAULT;
1655             }
1656             target_to_host_sigset(set, target_set);
1657         } else {
1658             set = NULL;
1659         }
1660 
1661         ret = get_errno(safe_ppoll(pfd, nfds, timeout_ts,
1662                                    set, SIGSET_T_SIZE));
1663 
1664         if (!is_error(ret) && arg3) {
1665             if (time64) {
1666                 if (host_to_target_timespec64(arg3, timeout_ts)) {
1667                     return -TARGET_EFAULT;
1668                 }
1669             } else {
1670                 if (host_to_target_timespec(arg3, timeout_ts)) {
1671                     return -TARGET_EFAULT;
1672                 }
1673             }
1674         }
1675         if (arg4) {
1676             unlock_user(target_set, arg4, 0);
1677         }
1678     } else {
1679           struct timespec ts, *pts;
1680 
1681           if (arg3 >= 0) {
1682               /* Convert ms to secs, ns */
1683               ts.tv_sec = arg3 / 1000;
1684               ts.tv_nsec = (arg3 % 1000) * 1000000LL;
1685               pts = &ts;
1686           } else {
1687               /* -ve poll() timeout means "infinite" */
1688               pts = NULL;
1689           }
1690           ret = get_errno(safe_ppoll(pfd, nfds, pts, NULL, 0));
1691     }
1692 
1693     if (!is_error(ret)) {
1694         for (i = 0; i < nfds; i++) {
1695             target_pfd[i].revents = tswap16(pfd[i].revents);
1696         }
1697     }
1698     unlock_user(target_pfd, arg1, sizeof(struct target_pollfd) * nfds);
1699     return ret;
1700 }
1701 #endif
1702 
1703 static abi_long do_pipe2(int host_pipe[], int flags)
1704 {
1705 #ifdef CONFIG_PIPE2
1706     return pipe2(host_pipe, flags);
1707 #else
1708     return -ENOSYS;
1709 #endif
1710 }
1711 
1712 static abi_long do_pipe(void *cpu_env, abi_ulong pipedes,
1713                         int flags, int is_pipe2)
1714 {
1715     int host_pipe[2];
1716     abi_long ret;
1717     ret = flags ? do_pipe2(host_pipe, flags) : pipe(host_pipe);
1718 
1719     if (is_error(ret))
1720         return get_errno(ret);
1721 
1722     /* Several targets have special calling conventions for the original
1723        pipe syscall, but didn't replicate this into the pipe2 syscall.  */
1724     if (!is_pipe2) {
1725 #if defined(TARGET_ALPHA)
1726         ((CPUAlphaState *)cpu_env)->ir[IR_A4] = host_pipe[1];
1727         return host_pipe[0];
1728 #elif defined(TARGET_MIPS)
1729         ((CPUMIPSState*)cpu_env)->active_tc.gpr[3] = host_pipe[1];
1730         return host_pipe[0];
1731 #elif defined(TARGET_SH4)
1732         ((CPUSH4State*)cpu_env)->gregs[1] = host_pipe[1];
1733         return host_pipe[0];
1734 #elif defined(TARGET_SPARC)
1735         ((CPUSPARCState*)cpu_env)->regwptr[1] = host_pipe[1];
1736         return host_pipe[0];
1737 #endif
1738     }
1739 
1740     if (put_user_s32(host_pipe[0], pipedes)
1741         || put_user_s32(host_pipe[1], pipedes + sizeof(host_pipe[0])))
1742         return -TARGET_EFAULT;
1743     return get_errno(ret);
1744 }
1745 
1746 static inline abi_long target_to_host_ip_mreq(struct ip_mreqn *mreqn,
1747                                               abi_ulong target_addr,
1748                                               socklen_t len)
1749 {
1750     struct target_ip_mreqn *target_smreqn;
1751 
1752     target_smreqn = lock_user(VERIFY_READ, target_addr, len, 1);
1753     if (!target_smreqn)
1754         return -TARGET_EFAULT;
1755     mreqn->imr_multiaddr.s_addr = target_smreqn->imr_multiaddr.s_addr;
1756     mreqn->imr_address.s_addr = target_smreqn->imr_address.s_addr;
1757     if (len == sizeof(struct target_ip_mreqn))
1758         mreqn->imr_ifindex = tswapal(target_smreqn->imr_ifindex);
1759     unlock_user(target_smreqn, target_addr, 0);
1760 
1761     return 0;
1762 }
1763 
1764 static inline abi_long target_to_host_sockaddr(int fd, struct sockaddr *addr,
1765                                                abi_ulong target_addr,
1766                                                socklen_t len)
1767 {
1768     const socklen_t unix_maxlen = sizeof (struct sockaddr_un);
1769     sa_family_t sa_family;
1770     struct target_sockaddr *target_saddr;
1771 
1772     if (fd_trans_target_to_host_addr(fd)) {
1773         return fd_trans_target_to_host_addr(fd)(addr, target_addr, len);
1774     }
1775 
1776     target_saddr = lock_user(VERIFY_READ, target_addr, len, 1);
1777     if (!target_saddr)
1778         return -TARGET_EFAULT;
1779 
1780     sa_family = tswap16(target_saddr->sa_family);
1781 
1782     /* Oops. The caller might send a incomplete sun_path; sun_path
1783      * must be terminated by \0 (see the manual page), but
1784      * unfortunately it is quite common to specify sockaddr_un
1785      * length as "strlen(x->sun_path)" while it should be
1786      * "strlen(...) + 1". We'll fix that here if needed.
1787      * Linux kernel has a similar feature.
1788      */
1789 
1790     if (sa_family == AF_UNIX) {
1791         if (len < unix_maxlen && len > 0) {
1792             char *cp = (char*)target_saddr;
1793 
1794             if ( cp[len-1] && !cp[len] )
1795                 len++;
1796         }
1797         if (len > unix_maxlen)
1798             len = unix_maxlen;
1799     }
1800 
1801     memcpy(addr, target_saddr, len);
1802     addr->sa_family = sa_family;
1803     if (sa_family == AF_NETLINK) {
1804         struct sockaddr_nl *nladdr;
1805 
1806         nladdr = (struct sockaddr_nl *)addr;
1807         nladdr->nl_pid = tswap32(nladdr->nl_pid);
1808         nladdr->nl_groups = tswap32(nladdr->nl_groups);
1809     } else if (sa_family == AF_PACKET) {
1810 	struct target_sockaddr_ll *lladdr;
1811 
1812 	lladdr = (struct target_sockaddr_ll *)addr;
1813 	lladdr->sll_ifindex = tswap32(lladdr->sll_ifindex);
1814 	lladdr->sll_hatype = tswap16(lladdr->sll_hatype);
1815     }
1816     unlock_user(target_saddr, target_addr, 0);
1817 
1818     return 0;
1819 }
1820 
1821 static inline abi_long host_to_target_sockaddr(abi_ulong target_addr,
1822                                                struct sockaddr *addr,
1823                                                socklen_t len)
1824 {
1825     struct target_sockaddr *target_saddr;
1826 
1827     if (len == 0) {
1828         return 0;
1829     }
1830     assert(addr);
1831 
1832     target_saddr = lock_user(VERIFY_WRITE, target_addr, len, 0);
1833     if (!target_saddr)
1834         return -TARGET_EFAULT;
1835     memcpy(target_saddr, addr, len);
1836     if (len >= offsetof(struct target_sockaddr, sa_family) +
1837         sizeof(target_saddr->sa_family)) {
1838         target_saddr->sa_family = tswap16(addr->sa_family);
1839     }
1840     if (addr->sa_family == AF_NETLINK &&
1841         len >= sizeof(struct target_sockaddr_nl)) {
1842         struct target_sockaddr_nl *target_nl =
1843                (struct target_sockaddr_nl *)target_saddr;
1844         target_nl->nl_pid = tswap32(target_nl->nl_pid);
1845         target_nl->nl_groups = tswap32(target_nl->nl_groups);
1846     } else if (addr->sa_family == AF_PACKET) {
1847         struct sockaddr_ll *target_ll = (struct sockaddr_ll *)target_saddr;
1848         target_ll->sll_ifindex = tswap32(target_ll->sll_ifindex);
1849         target_ll->sll_hatype = tswap16(target_ll->sll_hatype);
1850     } else if (addr->sa_family == AF_INET6 &&
1851                len >= sizeof(struct target_sockaddr_in6)) {
1852         struct target_sockaddr_in6 *target_in6 =
1853                (struct target_sockaddr_in6 *)target_saddr;
1854         target_in6->sin6_scope_id = tswap16(target_in6->sin6_scope_id);
1855     }
1856     unlock_user(target_saddr, target_addr, len);
1857 
1858     return 0;
1859 }
1860 
1861 static inline abi_long target_to_host_cmsg(struct msghdr *msgh,
1862                                            struct target_msghdr *target_msgh)
1863 {
1864     struct cmsghdr *cmsg = CMSG_FIRSTHDR(msgh);
1865     abi_long msg_controllen;
1866     abi_ulong target_cmsg_addr;
1867     struct target_cmsghdr *target_cmsg, *target_cmsg_start;
1868     socklen_t space = 0;
1869 
1870     msg_controllen = tswapal(target_msgh->msg_controllen);
1871     if (msg_controllen < sizeof (struct target_cmsghdr))
1872         goto the_end;
1873     target_cmsg_addr = tswapal(target_msgh->msg_control);
1874     target_cmsg = lock_user(VERIFY_READ, target_cmsg_addr, msg_controllen, 1);
1875     target_cmsg_start = target_cmsg;
1876     if (!target_cmsg)
1877         return -TARGET_EFAULT;
1878 
1879     while (cmsg && target_cmsg) {
1880         void *data = CMSG_DATA(cmsg);
1881         void *target_data = TARGET_CMSG_DATA(target_cmsg);
1882 
1883         int len = tswapal(target_cmsg->cmsg_len)
1884             - sizeof(struct target_cmsghdr);
1885 
1886         space += CMSG_SPACE(len);
1887         if (space > msgh->msg_controllen) {
1888             space -= CMSG_SPACE(len);
1889             /* This is a QEMU bug, since we allocated the payload
1890              * area ourselves (unlike overflow in host-to-target
1891              * conversion, which is just the guest giving us a buffer
1892              * that's too small). It can't happen for the payload types
1893              * we currently support; if it becomes an issue in future
1894              * we would need to improve our allocation strategy to
1895              * something more intelligent than "twice the size of the
1896              * target buffer we're reading from".
1897              */
1898             qemu_log_mask(LOG_UNIMP,
1899                           ("Unsupported ancillary data %d/%d: "
1900                            "unhandled msg size\n"),
1901                           tswap32(target_cmsg->cmsg_level),
1902                           tswap32(target_cmsg->cmsg_type));
1903             break;
1904         }
1905 
1906         if (tswap32(target_cmsg->cmsg_level) == TARGET_SOL_SOCKET) {
1907             cmsg->cmsg_level = SOL_SOCKET;
1908         } else {
1909             cmsg->cmsg_level = tswap32(target_cmsg->cmsg_level);
1910         }
1911         cmsg->cmsg_type = tswap32(target_cmsg->cmsg_type);
1912         cmsg->cmsg_len = CMSG_LEN(len);
1913 
1914         if (cmsg->cmsg_level == SOL_SOCKET && cmsg->cmsg_type == SCM_RIGHTS) {
1915             int *fd = (int *)data;
1916             int *target_fd = (int *)target_data;
1917             int i, numfds = len / sizeof(int);
1918 
1919             for (i = 0; i < numfds; i++) {
1920                 __get_user(fd[i], target_fd + i);
1921             }
1922         } else if (cmsg->cmsg_level == SOL_SOCKET
1923                &&  cmsg->cmsg_type == SCM_CREDENTIALS) {
1924             struct ucred *cred = (struct ucred *)data;
1925             struct target_ucred *target_cred =
1926                 (struct target_ucred *)target_data;
1927 
1928             __get_user(cred->pid, &target_cred->pid);
1929             __get_user(cred->uid, &target_cred->uid);
1930             __get_user(cred->gid, &target_cred->gid);
1931         } else {
1932             qemu_log_mask(LOG_UNIMP, "Unsupported ancillary data: %d/%d\n",
1933                           cmsg->cmsg_level, cmsg->cmsg_type);
1934             memcpy(data, target_data, len);
1935         }
1936 
1937         cmsg = CMSG_NXTHDR(msgh, cmsg);
1938         target_cmsg = TARGET_CMSG_NXTHDR(target_msgh, target_cmsg,
1939                                          target_cmsg_start);
1940     }
1941     unlock_user(target_cmsg, target_cmsg_addr, 0);
1942  the_end:
1943     msgh->msg_controllen = space;
1944     return 0;
1945 }
1946 
1947 static inline abi_long host_to_target_cmsg(struct target_msghdr *target_msgh,
1948                                            struct msghdr *msgh)
1949 {
1950     struct cmsghdr *cmsg = CMSG_FIRSTHDR(msgh);
1951     abi_long msg_controllen;
1952     abi_ulong target_cmsg_addr;
1953     struct target_cmsghdr *target_cmsg, *target_cmsg_start;
1954     socklen_t space = 0;
1955 
1956     msg_controllen = tswapal(target_msgh->msg_controllen);
1957     if (msg_controllen < sizeof (struct target_cmsghdr))
1958         goto the_end;
1959     target_cmsg_addr = tswapal(target_msgh->msg_control);
1960     target_cmsg = lock_user(VERIFY_WRITE, target_cmsg_addr, msg_controllen, 0);
1961     target_cmsg_start = target_cmsg;
1962     if (!target_cmsg)
1963         return -TARGET_EFAULT;
1964 
1965     while (cmsg && target_cmsg) {
1966         void *data = CMSG_DATA(cmsg);
1967         void *target_data = TARGET_CMSG_DATA(target_cmsg);
1968 
1969         int len = cmsg->cmsg_len - sizeof(struct cmsghdr);
1970         int tgt_len, tgt_space;
1971 
1972         /* We never copy a half-header but may copy half-data;
1973          * this is Linux's behaviour in put_cmsg(). Note that
1974          * truncation here is a guest problem (which we report
1975          * to the guest via the CTRUNC bit), unlike truncation
1976          * in target_to_host_cmsg, which is a QEMU bug.
1977          */
1978         if (msg_controllen < sizeof(struct target_cmsghdr)) {
1979             target_msgh->msg_flags |= tswap32(MSG_CTRUNC);
1980             break;
1981         }
1982 
1983         if (cmsg->cmsg_level == SOL_SOCKET) {
1984             target_cmsg->cmsg_level = tswap32(TARGET_SOL_SOCKET);
1985         } else {
1986             target_cmsg->cmsg_level = tswap32(cmsg->cmsg_level);
1987         }
1988         target_cmsg->cmsg_type = tswap32(cmsg->cmsg_type);
1989 
1990         /* Payload types which need a different size of payload on
1991          * the target must adjust tgt_len here.
1992          */
1993         tgt_len = len;
1994         switch (cmsg->cmsg_level) {
1995         case SOL_SOCKET:
1996             switch (cmsg->cmsg_type) {
1997             case SO_TIMESTAMP:
1998                 tgt_len = sizeof(struct target_timeval);
1999                 break;
2000             default:
2001                 break;
2002             }
2003             break;
2004         default:
2005             break;
2006         }
2007 
2008         if (msg_controllen < TARGET_CMSG_LEN(tgt_len)) {
2009             target_msgh->msg_flags |= tswap32(MSG_CTRUNC);
2010             tgt_len = msg_controllen - sizeof(struct target_cmsghdr);
2011         }
2012 
2013         /* We must now copy-and-convert len bytes of payload
2014          * into tgt_len bytes of destination space. Bear in mind
2015          * that in both source and destination we may be dealing
2016          * with a truncated value!
2017          */
2018         switch (cmsg->cmsg_level) {
2019         case SOL_SOCKET:
2020             switch (cmsg->cmsg_type) {
2021             case SCM_RIGHTS:
2022             {
2023                 int *fd = (int *)data;
2024                 int *target_fd = (int *)target_data;
2025                 int i, numfds = tgt_len / sizeof(int);
2026 
2027                 for (i = 0; i < numfds; i++) {
2028                     __put_user(fd[i], target_fd + i);
2029                 }
2030                 break;
2031             }
2032             case SO_TIMESTAMP:
2033             {
2034                 struct timeval *tv = (struct timeval *)data;
2035                 struct target_timeval *target_tv =
2036                     (struct target_timeval *)target_data;
2037 
2038                 if (len != sizeof(struct timeval) ||
2039                     tgt_len != sizeof(struct target_timeval)) {
2040                     goto unimplemented;
2041                 }
2042 
2043                 /* copy struct timeval to target */
2044                 __put_user(tv->tv_sec, &target_tv->tv_sec);
2045                 __put_user(tv->tv_usec, &target_tv->tv_usec);
2046                 break;
2047             }
2048             case SCM_CREDENTIALS:
2049             {
2050                 struct ucred *cred = (struct ucred *)data;
2051                 struct target_ucred *target_cred =
2052                     (struct target_ucred *)target_data;
2053 
2054                 __put_user(cred->pid, &target_cred->pid);
2055                 __put_user(cred->uid, &target_cred->uid);
2056                 __put_user(cred->gid, &target_cred->gid);
2057                 break;
2058             }
2059             default:
2060                 goto unimplemented;
2061             }
2062             break;
2063 
2064         case SOL_IP:
2065             switch (cmsg->cmsg_type) {
2066             case IP_TTL:
2067             {
2068                 uint32_t *v = (uint32_t *)data;
2069                 uint32_t *t_int = (uint32_t *)target_data;
2070 
2071                 if (len != sizeof(uint32_t) ||
2072                     tgt_len != sizeof(uint32_t)) {
2073                     goto unimplemented;
2074                 }
2075                 __put_user(*v, t_int);
2076                 break;
2077             }
2078             case IP_RECVERR:
2079             {
2080                 struct errhdr_t {
2081                    struct sock_extended_err ee;
2082                    struct sockaddr_in offender;
2083                 };
2084                 struct errhdr_t *errh = (struct errhdr_t *)data;
2085                 struct errhdr_t *target_errh =
2086                     (struct errhdr_t *)target_data;
2087 
2088                 if (len != sizeof(struct errhdr_t) ||
2089                     tgt_len != sizeof(struct errhdr_t)) {
2090                     goto unimplemented;
2091                 }
2092                 __put_user(errh->ee.ee_errno, &target_errh->ee.ee_errno);
2093                 __put_user(errh->ee.ee_origin, &target_errh->ee.ee_origin);
2094                 __put_user(errh->ee.ee_type,  &target_errh->ee.ee_type);
2095                 __put_user(errh->ee.ee_code, &target_errh->ee.ee_code);
2096                 __put_user(errh->ee.ee_pad, &target_errh->ee.ee_pad);
2097                 __put_user(errh->ee.ee_info, &target_errh->ee.ee_info);
2098                 __put_user(errh->ee.ee_data, &target_errh->ee.ee_data);
2099                 host_to_target_sockaddr((unsigned long) &target_errh->offender,
2100                     (void *) &errh->offender, sizeof(errh->offender));
2101                 break;
2102             }
2103             default:
2104                 goto unimplemented;
2105             }
2106             break;
2107 
2108         case SOL_IPV6:
2109             switch (cmsg->cmsg_type) {
2110             case IPV6_HOPLIMIT:
2111             {
2112                 uint32_t *v = (uint32_t *)data;
2113                 uint32_t *t_int = (uint32_t *)target_data;
2114 
2115                 if (len != sizeof(uint32_t) ||
2116                     tgt_len != sizeof(uint32_t)) {
2117                     goto unimplemented;
2118                 }
2119                 __put_user(*v, t_int);
2120                 break;
2121             }
2122             case IPV6_RECVERR:
2123             {
2124                 struct errhdr6_t {
2125                    struct sock_extended_err ee;
2126                    struct sockaddr_in6 offender;
2127                 };
2128                 struct errhdr6_t *errh = (struct errhdr6_t *)data;
2129                 struct errhdr6_t *target_errh =
2130                     (struct errhdr6_t *)target_data;
2131 
2132                 if (len != sizeof(struct errhdr6_t) ||
2133                     tgt_len != sizeof(struct errhdr6_t)) {
2134                     goto unimplemented;
2135                 }
2136                 __put_user(errh->ee.ee_errno, &target_errh->ee.ee_errno);
2137                 __put_user(errh->ee.ee_origin, &target_errh->ee.ee_origin);
2138                 __put_user(errh->ee.ee_type,  &target_errh->ee.ee_type);
2139                 __put_user(errh->ee.ee_code, &target_errh->ee.ee_code);
2140                 __put_user(errh->ee.ee_pad, &target_errh->ee.ee_pad);
2141                 __put_user(errh->ee.ee_info, &target_errh->ee.ee_info);
2142                 __put_user(errh->ee.ee_data, &target_errh->ee.ee_data);
2143                 host_to_target_sockaddr((unsigned long) &target_errh->offender,
2144                     (void *) &errh->offender, sizeof(errh->offender));
2145                 break;
2146             }
2147             default:
2148                 goto unimplemented;
2149             }
2150             break;
2151 
2152         default:
2153         unimplemented:
2154             qemu_log_mask(LOG_UNIMP, "Unsupported ancillary data: %d/%d\n",
2155                           cmsg->cmsg_level, cmsg->cmsg_type);
2156             memcpy(target_data, data, MIN(len, tgt_len));
2157             if (tgt_len > len) {
2158                 memset(target_data + len, 0, tgt_len - len);
2159             }
2160         }
2161 
2162         target_cmsg->cmsg_len = tswapal(TARGET_CMSG_LEN(tgt_len));
2163         tgt_space = TARGET_CMSG_SPACE(tgt_len);
2164         if (msg_controllen < tgt_space) {
2165             tgt_space = msg_controllen;
2166         }
2167         msg_controllen -= tgt_space;
2168         space += tgt_space;
2169         cmsg = CMSG_NXTHDR(msgh, cmsg);
2170         target_cmsg = TARGET_CMSG_NXTHDR(target_msgh, target_cmsg,
2171                                          target_cmsg_start);
2172     }
2173     unlock_user(target_cmsg, target_cmsg_addr, space);
2174  the_end:
2175     target_msgh->msg_controllen = tswapal(space);
2176     return 0;
2177 }
2178 
2179 /* do_setsockopt() Must return target values and target errnos. */
2180 static abi_long do_setsockopt(int sockfd, int level, int optname,
2181                               abi_ulong optval_addr, socklen_t optlen)
2182 {
2183     abi_long ret;
2184     int val;
2185     struct ip_mreqn *ip_mreq;
2186     struct ip_mreq_source *ip_mreq_source;
2187 
2188     switch(level) {
2189     case SOL_TCP:
2190     case SOL_UDP:
2191         /* TCP and UDP options all take an 'int' value.  */
2192         if (optlen < sizeof(uint32_t))
2193             return -TARGET_EINVAL;
2194 
2195         if (get_user_u32(val, optval_addr))
2196             return -TARGET_EFAULT;
2197         ret = get_errno(setsockopt(sockfd, level, optname, &val, sizeof(val)));
2198         break;
2199     case SOL_IP:
2200         switch(optname) {
2201         case IP_TOS:
2202         case IP_TTL:
2203         case IP_HDRINCL:
2204         case IP_ROUTER_ALERT:
2205         case IP_RECVOPTS:
2206         case IP_RETOPTS:
2207         case IP_PKTINFO:
2208         case IP_MTU_DISCOVER:
2209         case IP_RECVERR:
2210         case IP_RECVTTL:
2211         case IP_RECVTOS:
2212 #ifdef IP_FREEBIND
2213         case IP_FREEBIND:
2214 #endif
2215         case IP_MULTICAST_TTL:
2216         case IP_MULTICAST_LOOP:
2217             val = 0;
2218             if (optlen >= sizeof(uint32_t)) {
2219                 if (get_user_u32(val, optval_addr))
2220                     return -TARGET_EFAULT;
2221             } else if (optlen >= 1) {
2222                 if (get_user_u8(val, optval_addr))
2223                     return -TARGET_EFAULT;
2224             }
2225             ret = get_errno(setsockopt(sockfd, level, optname, &val, sizeof(val)));
2226             break;
2227         case IP_ADD_MEMBERSHIP:
2228         case IP_DROP_MEMBERSHIP:
2229             if (optlen < sizeof (struct target_ip_mreq) ||
2230                 optlen > sizeof (struct target_ip_mreqn))
2231                 return -TARGET_EINVAL;
2232 
2233             ip_mreq = (struct ip_mreqn *) alloca(optlen);
2234             target_to_host_ip_mreq(ip_mreq, optval_addr, optlen);
2235             ret = get_errno(setsockopt(sockfd, level, optname, ip_mreq, optlen));
2236             break;
2237 
2238         case IP_BLOCK_SOURCE:
2239         case IP_UNBLOCK_SOURCE:
2240         case IP_ADD_SOURCE_MEMBERSHIP:
2241         case IP_DROP_SOURCE_MEMBERSHIP:
2242             if (optlen != sizeof (struct target_ip_mreq_source))
2243                 return -TARGET_EINVAL;
2244 
2245             ip_mreq_source = lock_user(VERIFY_READ, optval_addr, optlen, 1);
2246             ret = get_errno(setsockopt(sockfd, level, optname, ip_mreq_source, optlen));
2247             unlock_user (ip_mreq_source, optval_addr, 0);
2248             break;
2249 
2250         default:
2251             goto unimplemented;
2252         }
2253         break;
2254     case SOL_IPV6:
2255         switch (optname) {
2256         case IPV6_MTU_DISCOVER:
2257         case IPV6_MTU:
2258         case IPV6_V6ONLY:
2259         case IPV6_RECVPKTINFO:
2260         case IPV6_UNICAST_HOPS:
2261         case IPV6_MULTICAST_HOPS:
2262         case IPV6_MULTICAST_LOOP:
2263         case IPV6_RECVERR:
2264         case IPV6_RECVHOPLIMIT:
2265         case IPV6_2292HOPLIMIT:
2266         case IPV6_CHECKSUM:
2267         case IPV6_ADDRFORM:
2268         case IPV6_2292PKTINFO:
2269         case IPV6_RECVTCLASS:
2270         case IPV6_RECVRTHDR:
2271         case IPV6_2292RTHDR:
2272         case IPV6_RECVHOPOPTS:
2273         case IPV6_2292HOPOPTS:
2274         case IPV6_RECVDSTOPTS:
2275         case IPV6_2292DSTOPTS:
2276         case IPV6_TCLASS:
2277         case IPV6_ADDR_PREFERENCES:
2278 #ifdef IPV6_RECVPATHMTU
2279         case IPV6_RECVPATHMTU:
2280 #endif
2281 #ifdef IPV6_TRANSPARENT
2282         case IPV6_TRANSPARENT:
2283 #endif
2284 #ifdef IPV6_FREEBIND
2285         case IPV6_FREEBIND:
2286 #endif
2287 #ifdef IPV6_RECVORIGDSTADDR
2288         case IPV6_RECVORIGDSTADDR:
2289 #endif
2290             val = 0;
2291             if (optlen < sizeof(uint32_t)) {
2292                 return -TARGET_EINVAL;
2293             }
2294             if (get_user_u32(val, optval_addr)) {
2295                 return -TARGET_EFAULT;
2296             }
2297             ret = get_errno(setsockopt(sockfd, level, optname,
2298                                        &val, sizeof(val)));
2299             break;
2300         case IPV6_PKTINFO:
2301         {
2302             struct in6_pktinfo pki;
2303 
2304             if (optlen < sizeof(pki)) {
2305                 return -TARGET_EINVAL;
2306             }
2307 
2308             if (copy_from_user(&pki, optval_addr, sizeof(pki))) {
2309                 return -TARGET_EFAULT;
2310             }
2311 
2312             pki.ipi6_ifindex = tswap32(pki.ipi6_ifindex);
2313 
2314             ret = get_errno(setsockopt(sockfd, level, optname,
2315                                        &pki, sizeof(pki)));
2316             break;
2317         }
2318         case IPV6_ADD_MEMBERSHIP:
2319         case IPV6_DROP_MEMBERSHIP:
2320         {
2321             struct ipv6_mreq ipv6mreq;
2322 
2323             if (optlen < sizeof(ipv6mreq)) {
2324                 return -TARGET_EINVAL;
2325             }
2326 
2327             if (copy_from_user(&ipv6mreq, optval_addr, sizeof(ipv6mreq))) {
2328                 return -TARGET_EFAULT;
2329             }
2330 
2331             ipv6mreq.ipv6mr_interface = tswap32(ipv6mreq.ipv6mr_interface);
2332 
2333             ret = get_errno(setsockopt(sockfd, level, optname,
2334                                        &ipv6mreq, sizeof(ipv6mreq)));
2335             break;
2336         }
2337         default:
2338             goto unimplemented;
2339         }
2340         break;
2341     case SOL_ICMPV6:
2342         switch (optname) {
2343         case ICMPV6_FILTER:
2344         {
2345             struct icmp6_filter icmp6f;
2346 
2347             if (optlen > sizeof(icmp6f)) {
2348                 optlen = sizeof(icmp6f);
2349             }
2350 
2351             if (copy_from_user(&icmp6f, optval_addr, optlen)) {
2352                 return -TARGET_EFAULT;
2353             }
2354 
2355             for (val = 0; val < 8; val++) {
2356                 icmp6f.data[val] = tswap32(icmp6f.data[val]);
2357             }
2358 
2359             ret = get_errno(setsockopt(sockfd, level, optname,
2360                                        &icmp6f, optlen));
2361             break;
2362         }
2363         default:
2364             goto unimplemented;
2365         }
2366         break;
2367     case SOL_RAW:
2368         switch (optname) {
2369         case ICMP_FILTER:
2370         case IPV6_CHECKSUM:
2371             /* those take an u32 value */
2372             if (optlen < sizeof(uint32_t)) {
2373                 return -TARGET_EINVAL;
2374             }
2375 
2376             if (get_user_u32(val, optval_addr)) {
2377                 return -TARGET_EFAULT;
2378             }
2379             ret = get_errno(setsockopt(sockfd, level, optname,
2380                                        &val, sizeof(val)));
2381             break;
2382 
2383         default:
2384             goto unimplemented;
2385         }
2386         break;
2387 #if defined(SOL_ALG) && defined(ALG_SET_KEY) && defined(ALG_SET_AEAD_AUTHSIZE)
2388     case SOL_ALG:
2389         switch (optname) {
2390         case ALG_SET_KEY:
2391         {
2392             char *alg_key = g_malloc(optlen);
2393 
2394             if (!alg_key) {
2395                 return -TARGET_ENOMEM;
2396             }
2397             if (copy_from_user(alg_key, optval_addr, optlen)) {
2398                 g_free(alg_key);
2399                 return -TARGET_EFAULT;
2400             }
2401             ret = get_errno(setsockopt(sockfd, level, optname,
2402                                        alg_key, optlen));
2403             g_free(alg_key);
2404             break;
2405         }
2406         case ALG_SET_AEAD_AUTHSIZE:
2407         {
2408             ret = get_errno(setsockopt(sockfd, level, optname,
2409                                        NULL, optlen));
2410             break;
2411         }
2412         default:
2413             goto unimplemented;
2414         }
2415         break;
2416 #endif
2417     case TARGET_SOL_SOCKET:
2418         switch (optname) {
2419         case TARGET_SO_RCVTIMEO:
2420         {
2421                 struct timeval tv;
2422 
2423                 optname = SO_RCVTIMEO;
2424 
2425 set_timeout:
2426                 if (optlen != sizeof(struct target_timeval)) {
2427                     return -TARGET_EINVAL;
2428                 }
2429 
2430                 if (copy_from_user_timeval(&tv, optval_addr)) {
2431                     return -TARGET_EFAULT;
2432                 }
2433 
2434                 ret = get_errno(setsockopt(sockfd, SOL_SOCKET, optname,
2435                                 &tv, sizeof(tv)));
2436                 return ret;
2437         }
2438         case TARGET_SO_SNDTIMEO:
2439                 optname = SO_SNDTIMEO;
2440                 goto set_timeout;
2441         case TARGET_SO_ATTACH_FILTER:
2442         {
2443                 struct target_sock_fprog *tfprog;
2444                 struct target_sock_filter *tfilter;
2445                 struct sock_fprog fprog;
2446                 struct sock_filter *filter;
2447                 int i;
2448 
2449                 if (optlen != sizeof(*tfprog)) {
2450                     return -TARGET_EINVAL;
2451                 }
2452                 if (!lock_user_struct(VERIFY_READ, tfprog, optval_addr, 0)) {
2453                     return -TARGET_EFAULT;
2454                 }
2455                 if (!lock_user_struct(VERIFY_READ, tfilter,
2456                                       tswapal(tfprog->filter), 0)) {
2457                     unlock_user_struct(tfprog, optval_addr, 1);
2458                     return -TARGET_EFAULT;
2459                 }
2460 
2461                 fprog.len = tswap16(tfprog->len);
2462                 filter = g_try_new(struct sock_filter, fprog.len);
2463                 if (filter == NULL) {
2464                     unlock_user_struct(tfilter, tfprog->filter, 1);
2465                     unlock_user_struct(tfprog, optval_addr, 1);
2466                     return -TARGET_ENOMEM;
2467                 }
2468                 for (i = 0; i < fprog.len; i++) {
2469                     filter[i].code = tswap16(tfilter[i].code);
2470                     filter[i].jt = tfilter[i].jt;
2471                     filter[i].jf = tfilter[i].jf;
2472                     filter[i].k = tswap32(tfilter[i].k);
2473                 }
2474                 fprog.filter = filter;
2475 
2476                 ret = get_errno(setsockopt(sockfd, SOL_SOCKET,
2477                                 SO_ATTACH_FILTER, &fprog, sizeof(fprog)));
2478                 g_free(filter);
2479 
2480                 unlock_user_struct(tfilter, tfprog->filter, 1);
2481                 unlock_user_struct(tfprog, optval_addr, 1);
2482                 return ret;
2483         }
2484 	case TARGET_SO_BINDTODEVICE:
2485 	{
2486 		char *dev_ifname, *addr_ifname;
2487 
2488 		if (optlen > IFNAMSIZ - 1) {
2489 		    optlen = IFNAMSIZ - 1;
2490 		}
2491 		dev_ifname = lock_user(VERIFY_READ, optval_addr, optlen, 1);
2492 		if (!dev_ifname) {
2493 		    return -TARGET_EFAULT;
2494 		}
2495 		optname = SO_BINDTODEVICE;
2496 		addr_ifname = alloca(IFNAMSIZ);
2497 		memcpy(addr_ifname, dev_ifname, optlen);
2498 		addr_ifname[optlen] = 0;
2499 		ret = get_errno(setsockopt(sockfd, SOL_SOCKET, optname,
2500                                            addr_ifname, optlen));
2501 		unlock_user (dev_ifname, optval_addr, 0);
2502 		return ret;
2503 	}
2504         case TARGET_SO_LINGER:
2505         {
2506                 struct linger lg;
2507                 struct target_linger *tlg;
2508 
2509                 if (optlen != sizeof(struct target_linger)) {
2510                     return -TARGET_EINVAL;
2511                 }
2512                 if (!lock_user_struct(VERIFY_READ, tlg, optval_addr, 1)) {
2513                     return -TARGET_EFAULT;
2514                 }
2515                 __get_user(lg.l_onoff, &tlg->l_onoff);
2516                 __get_user(lg.l_linger, &tlg->l_linger);
2517                 ret = get_errno(setsockopt(sockfd, SOL_SOCKET, SO_LINGER,
2518                                 &lg, sizeof(lg)));
2519                 unlock_user_struct(tlg, optval_addr, 0);
2520                 return ret;
2521         }
2522             /* Options with 'int' argument.  */
2523         case TARGET_SO_DEBUG:
2524 		optname = SO_DEBUG;
2525 		break;
2526         case TARGET_SO_REUSEADDR:
2527 		optname = SO_REUSEADDR;
2528 		break;
2529 #ifdef SO_REUSEPORT
2530         case TARGET_SO_REUSEPORT:
2531                 optname = SO_REUSEPORT;
2532                 break;
2533 #endif
2534         case TARGET_SO_TYPE:
2535 		optname = SO_TYPE;
2536 		break;
2537         case TARGET_SO_ERROR:
2538 		optname = SO_ERROR;
2539 		break;
2540         case TARGET_SO_DONTROUTE:
2541 		optname = SO_DONTROUTE;
2542 		break;
2543         case TARGET_SO_BROADCAST:
2544 		optname = SO_BROADCAST;
2545 		break;
2546         case TARGET_SO_SNDBUF:
2547 		optname = SO_SNDBUF;
2548 		break;
2549         case TARGET_SO_SNDBUFFORCE:
2550                 optname = SO_SNDBUFFORCE;
2551                 break;
2552         case TARGET_SO_RCVBUF:
2553 		optname = SO_RCVBUF;
2554 		break;
2555         case TARGET_SO_RCVBUFFORCE:
2556                 optname = SO_RCVBUFFORCE;
2557                 break;
2558         case TARGET_SO_KEEPALIVE:
2559 		optname = SO_KEEPALIVE;
2560 		break;
2561         case TARGET_SO_OOBINLINE:
2562 		optname = SO_OOBINLINE;
2563 		break;
2564         case TARGET_SO_NO_CHECK:
2565 		optname = SO_NO_CHECK;
2566 		break;
2567         case TARGET_SO_PRIORITY:
2568 		optname = SO_PRIORITY;
2569 		break;
2570 #ifdef SO_BSDCOMPAT
2571         case TARGET_SO_BSDCOMPAT:
2572 		optname = SO_BSDCOMPAT;
2573 		break;
2574 #endif
2575         case TARGET_SO_PASSCRED:
2576 		optname = SO_PASSCRED;
2577 		break;
2578         case TARGET_SO_PASSSEC:
2579                 optname = SO_PASSSEC;
2580                 break;
2581         case TARGET_SO_TIMESTAMP:
2582 		optname = SO_TIMESTAMP;
2583 		break;
2584         case TARGET_SO_RCVLOWAT:
2585 		optname = SO_RCVLOWAT;
2586 		break;
2587         default:
2588             goto unimplemented;
2589         }
2590 	if (optlen < sizeof(uint32_t))
2591             return -TARGET_EINVAL;
2592 
2593 	if (get_user_u32(val, optval_addr))
2594             return -TARGET_EFAULT;
2595 	ret = get_errno(setsockopt(sockfd, SOL_SOCKET, optname, &val, sizeof(val)));
2596         break;
2597 #ifdef SOL_NETLINK
2598     case SOL_NETLINK:
2599         switch (optname) {
2600         case NETLINK_PKTINFO:
2601         case NETLINK_ADD_MEMBERSHIP:
2602         case NETLINK_DROP_MEMBERSHIP:
2603         case NETLINK_BROADCAST_ERROR:
2604         case NETLINK_NO_ENOBUFS:
2605 #if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 2, 0)
2606         case NETLINK_LISTEN_ALL_NSID:
2607         case NETLINK_CAP_ACK:
2608 #endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(4, 2, 0) */
2609 #if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 12, 0)
2610         case NETLINK_EXT_ACK:
2611 #endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(4, 12, 0) */
2612 #if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 20, 0)
2613         case NETLINK_GET_STRICT_CHK:
2614 #endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(4, 12, 0) */
2615             break;
2616         default:
2617             goto unimplemented;
2618         }
2619         val = 0;
2620         if (optlen < sizeof(uint32_t)) {
2621             return -TARGET_EINVAL;
2622         }
2623         if (get_user_u32(val, optval_addr)) {
2624             return -TARGET_EFAULT;
2625         }
2626         ret = get_errno(setsockopt(sockfd, SOL_NETLINK, optname, &val,
2627                                    sizeof(val)));
2628         break;
2629 #endif /* SOL_NETLINK */
2630     default:
2631     unimplemented:
2632         qemu_log_mask(LOG_UNIMP, "Unsupported setsockopt level=%d optname=%d\n",
2633                       level, optname);
2634         ret = -TARGET_ENOPROTOOPT;
2635     }
2636     return ret;
2637 }
2638 
2639 /* do_getsockopt() Must return target values and target errnos. */
2640 static abi_long do_getsockopt(int sockfd, int level, int optname,
2641                               abi_ulong optval_addr, abi_ulong optlen)
2642 {
2643     abi_long ret;
2644     int len, val;
2645     socklen_t lv;
2646 
2647     switch(level) {
2648     case TARGET_SOL_SOCKET:
2649         level = SOL_SOCKET;
2650         switch (optname) {
2651         /* These don't just return a single integer */
2652         case TARGET_SO_PEERNAME:
2653             goto unimplemented;
2654         case TARGET_SO_RCVTIMEO: {
2655             struct timeval tv;
2656             socklen_t tvlen;
2657 
2658             optname = SO_RCVTIMEO;
2659 
2660 get_timeout:
2661             if (get_user_u32(len, optlen)) {
2662                 return -TARGET_EFAULT;
2663             }
2664             if (len < 0) {
2665                 return -TARGET_EINVAL;
2666             }
2667 
2668             tvlen = sizeof(tv);
2669             ret = get_errno(getsockopt(sockfd, level, optname,
2670                                        &tv, &tvlen));
2671             if (ret < 0) {
2672                 return ret;
2673             }
2674             if (len > sizeof(struct target_timeval)) {
2675                 len = sizeof(struct target_timeval);
2676             }
2677             if (copy_to_user_timeval(optval_addr, &tv)) {
2678                 return -TARGET_EFAULT;
2679             }
2680             if (put_user_u32(len, optlen)) {
2681                 return -TARGET_EFAULT;
2682             }
2683             break;
2684         }
2685         case TARGET_SO_SNDTIMEO:
2686             optname = SO_SNDTIMEO;
2687             goto get_timeout;
2688         case TARGET_SO_PEERCRED: {
2689             struct ucred cr;
2690             socklen_t crlen;
2691             struct target_ucred *tcr;
2692 
2693             if (get_user_u32(len, optlen)) {
2694                 return -TARGET_EFAULT;
2695             }
2696             if (len < 0) {
2697                 return -TARGET_EINVAL;
2698             }
2699 
2700             crlen = sizeof(cr);
2701             ret = get_errno(getsockopt(sockfd, level, SO_PEERCRED,
2702                                        &cr, &crlen));
2703             if (ret < 0) {
2704                 return ret;
2705             }
2706             if (len > crlen) {
2707                 len = crlen;
2708             }
2709             if (!lock_user_struct(VERIFY_WRITE, tcr, optval_addr, 0)) {
2710                 return -TARGET_EFAULT;
2711             }
2712             __put_user(cr.pid, &tcr->pid);
2713             __put_user(cr.uid, &tcr->uid);
2714             __put_user(cr.gid, &tcr->gid);
2715             unlock_user_struct(tcr, optval_addr, 1);
2716             if (put_user_u32(len, optlen)) {
2717                 return -TARGET_EFAULT;
2718             }
2719             break;
2720         }
2721         case TARGET_SO_PEERSEC: {
2722             char *name;
2723 
2724             if (get_user_u32(len, optlen)) {
2725                 return -TARGET_EFAULT;
2726             }
2727             if (len < 0) {
2728                 return -TARGET_EINVAL;
2729             }
2730             name = lock_user(VERIFY_WRITE, optval_addr, len, 0);
2731             if (!name) {
2732                 return -TARGET_EFAULT;
2733             }
2734             lv = len;
2735             ret = get_errno(getsockopt(sockfd, level, SO_PEERSEC,
2736                                        name, &lv));
2737             if (put_user_u32(lv, optlen)) {
2738                 ret = -TARGET_EFAULT;
2739             }
2740             unlock_user(name, optval_addr, lv);
2741             break;
2742         }
2743         case TARGET_SO_LINGER:
2744         {
2745             struct linger lg;
2746             socklen_t lglen;
2747             struct target_linger *tlg;
2748 
2749             if (get_user_u32(len, optlen)) {
2750                 return -TARGET_EFAULT;
2751             }
2752             if (len < 0) {
2753                 return -TARGET_EINVAL;
2754             }
2755 
2756             lglen = sizeof(lg);
2757             ret = get_errno(getsockopt(sockfd, level, SO_LINGER,
2758                                        &lg, &lglen));
2759             if (ret < 0) {
2760                 return ret;
2761             }
2762             if (len > lglen) {
2763                 len = lglen;
2764             }
2765             if (!lock_user_struct(VERIFY_WRITE, tlg, optval_addr, 0)) {
2766                 return -TARGET_EFAULT;
2767             }
2768             __put_user(lg.l_onoff, &tlg->l_onoff);
2769             __put_user(lg.l_linger, &tlg->l_linger);
2770             unlock_user_struct(tlg, optval_addr, 1);
2771             if (put_user_u32(len, optlen)) {
2772                 return -TARGET_EFAULT;
2773             }
2774             break;
2775         }
2776         /* Options with 'int' argument.  */
2777         case TARGET_SO_DEBUG:
2778             optname = SO_DEBUG;
2779             goto int_case;
2780         case TARGET_SO_REUSEADDR:
2781             optname = SO_REUSEADDR;
2782             goto int_case;
2783 #ifdef SO_REUSEPORT
2784         case TARGET_SO_REUSEPORT:
2785             optname = SO_REUSEPORT;
2786             goto int_case;
2787 #endif
2788         case TARGET_SO_TYPE:
2789             optname = SO_TYPE;
2790             goto int_case;
2791         case TARGET_SO_ERROR:
2792             optname = SO_ERROR;
2793             goto int_case;
2794         case TARGET_SO_DONTROUTE:
2795             optname = SO_DONTROUTE;
2796             goto int_case;
2797         case TARGET_SO_BROADCAST:
2798             optname = SO_BROADCAST;
2799             goto int_case;
2800         case TARGET_SO_SNDBUF:
2801             optname = SO_SNDBUF;
2802             goto int_case;
2803         case TARGET_SO_RCVBUF:
2804             optname = SO_RCVBUF;
2805             goto int_case;
2806         case TARGET_SO_KEEPALIVE:
2807             optname = SO_KEEPALIVE;
2808             goto int_case;
2809         case TARGET_SO_OOBINLINE:
2810             optname = SO_OOBINLINE;
2811             goto int_case;
2812         case TARGET_SO_NO_CHECK:
2813             optname = SO_NO_CHECK;
2814             goto int_case;
2815         case TARGET_SO_PRIORITY:
2816             optname = SO_PRIORITY;
2817             goto int_case;
2818 #ifdef SO_BSDCOMPAT
2819         case TARGET_SO_BSDCOMPAT:
2820             optname = SO_BSDCOMPAT;
2821             goto int_case;
2822 #endif
2823         case TARGET_SO_PASSCRED:
2824             optname = SO_PASSCRED;
2825             goto int_case;
2826         case TARGET_SO_TIMESTAMP:
2827             optname = SO_TIMESTAMP;
2828             goto int_case;
2829         case TARGET_SO_RCVLOWAT:
2830             optname = SO_RCVLOWAT;
2831             goto int_case;
2832         case TARGET_SO_ACCEPTCONN:
2833             optname = SO_ACCEPTCONN;
2834             goto int_case;
2835         default:
2836             goto int_case;
2837         }
2838         break;
2839     case SOL_TCP:
2840     case SOL_UDP:
2841         /* TCP and UDP options all take an 'int' value.  */
2842     int_case:
2843         if (get_user_u32(len, optlen))
2844             return -TARGET_EFAULT;
2845         if (len < 0)
2846             return -TARGET_EINVAL;
2847         lv = sizeof(lv);
2848         ret = get_errno(getsockopt(sockfd, level, optname, &val, &lv));
2849         if (ret < 0)
2850             return ret;
2851         if (optname == SO_TYPE) {
2852             val = host_to_target_sock_type(val);
2853         }
2854         if (len > lv)
2855             len = lv;
2856         if (len == 4) {
2857             if (put_user_u32(val, optval_addr))
2858                 return -TARGET_EFAULT;
2859         } else {
2860             if (put_user_u8(val, optval_addr))
2861                 return -TARGET_EFAULT;
2862         }
2863         if (put_user_u32(len, optlen))
2864             return -TARGET_EFAULT;
2865         break;
2866     case SOL_IP:
2867         switch(optname) {
2868         case IP_TOS:
2869         case IP_TTL:
2870         case IP_HDRINCL:
2871         case IP_ROUTER_ALERT:
2872         case IP_RECVOPTS:
2873         case IP_RETOPTS:
2874         case IP_PKTINFO:
2875         case IP_MTU_DISCOVER:
2876         case IP_RECVERR:
2877         case IP_RECVTOS:
2878 #ifdef IP_FREEBIND
2879         case IP_FREEBIND:
2880 #endif
2881         case IP_MULTICAST_TTL:
2882         case IP_MULTICAST_LOOP:
2883             if (get_user_u32(len, optlen))
2884                 return -TARGET_EFAULT;
2885             if (len < 0)
2886                 return -TARGET_EINVAL;
2887             lv = sizeof(lv);
2888             ret = get_errno(getsockopt(sockfd, level, optname, &val, &lv));
2889             if (ret < 0)
2890                 return ret;
2891             if (len < sizeof(int) && len > 0 && val >= 0 && val < 255) {
2892                 len = 1;
2893                 if (put_user_u32(len, optlen)
2894                     || put_user_u8(val, optval_addr))
2895                     return -TARGET_EFAULT;
2896             } else {
2897                 if (len > sizeof(int))
2898                     len = sizeof(int);
2899                 if (put_user_u32(len, optlen)
2900                     || put_user_u32(val, optval_addr))
2901                     return -TARGET_EFAULT;
2902             }
2903             break;
2904         default:
2905             ret = -TARGET_ENOPROTOOPT;
2906             break;
2907         }
2908         break;
2909     case SOL_IPV6:
2910         switch (optname) {
2911         case IPV6_MTU_DISCOVER:
2912         case IPV6_MTU:
2913         case IPV6_V6ONLY:
2914         case IPV6_RECVPKTINFO:
2915         case IPV6_UNICAST_HOPS:
2916         case IPV6_MULTICAST_HOPS:
2917         case IPV6_MULTICAST_LOOP:
2918         case IPV6_RECVERR:
2919         case IPV6_RECVHOPLIMIT:
2920         case IPV6_2292HOPLIMIT:
2921         case IPV6_CHECKSUM:
2922         case IPV6_ADDRFORM:
2923         case IPV6_2292PKTINFO:
2924         case IPV6_RECVTCLASS:
2925         case IPV6_RECVRTHDR:
2926         case IPV6_2292RTHDR:
2927         case IPV6_RECVHOPOPTS:
2928         case IPV6_2292HOPOPTS:
2929         case IPV6_RECVDSTOPTS:
2930         case IPV6_2292DSTOPTS:
2931         case IPV6_TCLASS:
2932         case IPV6_ADDR_PREFERENCES:
2933 #ifdef IPV6_RECVPATHMTU
2934         case IPV6_RECVPATHMTU:
2935 #endif
2936 #ifdef IPV6_TRANSPARENT
2937         case IPV6_TRANSPARENT:
2938 #endif
2939 #ifdef IPV6_FREEBIND
2940         case IPV6_FREEBIND:
2941 #endif
2942 #ifdef IPV6_RECVORIGDSTADDR
2943         case IPV6_RECVORIGDSTADDR:
2944 #endif
2945             if (get_user_u32(len, optlen))
2946                 return -TARGET_EFAULT;
2947             if (len < 0)
2948                 return -TARGET_EINVAL;
2949             lv = sizeof(lv);
2950             ret = get_errno(getsockopt(sockfd, level, optname, &val, &lv));
2951             if (ret < 0)
2952                 return ret;
2953             if (len < sizeof(int) && len > 0 && val >= 0 && val < 255) {
2954                 len = 1;
2955                 if (put_user_u32(len, optlen)
2956                     || put_user_u8(val, optval_addr))
2957                     return -TARGET_EFAULT;
2958             } else {
2959                 if (len > sizeof(int))
2960                     len = sizeof(int);
2961                 if (put_user_u32(len, optlen)
2962                     || put_user_u32(val, optval_addr))
2963                     return -TARGET_EFAULT;
2964             }
2965             break;
2966         default:
2967             ret = -TARGET_ENOPROTOOPT;
2968             break;
2969         }
2970         break;
2971 #ifdef SOL_NETLINK
2972     case SOL_NETLINK:
2973         switch (optname) {
2974         case NETLINK_PKTINFO:
2975         case NETLINK_BROADCAST_ERROR:
2976         case NETLINK_NO_ENOBUFS:
2977 #if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 2, 0)
2978         case NETLINK_LISTEN_ALL_NSID:
2979         case NETLINK_CAP_ACK:
2980 #endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(4, 2, 0) */
2981 #if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 12, 0)
2982         case NETLINK_EXT_ACK:
2983 #endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(4, 12, 0) */
2984 #if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 20, 0)
2985         case NETLINK_GET_STRICT_CHK:
2986 #endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(4, 12, 0) */
2987             if (get_user_u32(len, optlen)) {
2988                 return -TARGET_EFAULT;
2989             }
2990             if (len != sizeof(val)) {
2991                 return -TARGET_EINVAL;
2992             }
2993             lv = len;
2994             ret = get_errno(getsockopt(sockfd, level, optname, &val, &lv));
2995             if (ret < 0) {
2996                 return ret;
2997             }
2998             if (put_user_u32(lv, optlen)
2999                 || put_user_u32(val, optval_addr)) {
3000                 return -TARGET_EFAULT;
3001             }
3002             break;
3003 #if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 2, 0)
3004         case NETLINK_LIST_MEMBERSHIPS:
3005         {
3006             uint32_t *results;
3007             int i;
3008             if (get_user_u32(len, optlen)) {
3009                 return -TARGET_EFAULT;
3010             }
3011             if (len < 0) {
3012                 return -TARGET_EINVAL;
3013             }
3014             results = lock_user(VERIFY_WRITE, optval_addr, len, 1);
3015             if (!results) {
3016                 return -TARGET_EFAULT;
3017             }
3018             lv = len;
3019             ret = get_errno(getsockopt(sockfd, level, optname, results, &lv));
3020             if (ret < 0) {
3021                 unlock_user(results, optval_addr, 0);
3022                 return ret;
3023             }
3024             /* swap host endianess to target endianess. */
3025             for (i = 0; i < (len / sizeof(uint32_t)); i++) {
3026                 results[i] = tswap32(results[i]);
3027             }
3028             if (put_user_u32(lv, optlen)) {
3029                 return -TARGET_EFAULT;
3030             }
3031             unlock_user(results, optval_addr, 0);
3032             break;
3033         }
3034 #endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(4, 2, 0) */
3035         default:
3036             goto unimplemented;
3037         }
3038         break;
3039 #endif /* SOL_NETLINK */
3040     default:
3041     unimplemented:
3042         qemu_log_mask(LOG_UNIMP,
3043                       "getsockopt level=%d optname=%d not yet supported\n",
3044                       level, optname);
3045         ret = -TARGET_EOPNOTSUPP;
3046         break;
3047     }
3048     return ret;
3049 }
3050 
3051 /* Convert target low/high pair representing file offset into the host
3052  * low/high pair. This function doesn't handle offsets bigger than 64 bits
3053  * as the kernel doesn't handle them either.
3054  */
3055 static void target_to_host_low_high(abi_ulong tlow,
3056                                     abi_ulong thigh,
3057                                     unsigned long *hlow,
3058                                     unsigned long *hhigh)
3059 {
3060     uint64_t off = tlow |
3061         ((unsigned long long)thigh << TARGET_LONG_BITS / 2) <<
3062         TARGET_LONG_BITS / 2;
3063 
3064     *hlow = off;
3065     *hhigh = (off >> HOST_LONG_BITS / 2) >> HOST_LONG_BITS / 2;
3066 }
3067 
3068 static struct iovec *lock_iovec(int type, abi_ulong target_addr,
3069                                 abi_ulong count, int copy)
3070 {
3071     struct target_iovec *target_vec;
3072     struct iovec *vec;
3073     abi_ulong total_len, max_len;
3074     int i;
3075     int err = 0;
3076     bool bad_address = false;
3077 
3078     if (count == 0) {
3079         errno = 0;
3080         return NULL;
3081     }
3082     if (count > IOV_MAX) {
3083         errno = EINVAL;
3084         return NULL;
3085     }
3086 
3087     vec = g_try_new0(struct iovec, count);
3088     if (vec == NULL) {
3089         errno = ENOMEM;
3090         return NULL;
3091     }
3092 
3093     target_vec = lock_user(VERIFY_READ, target_addr,
3094                            count * sizeof(struct target_iovec), 1);
3095     if (target_vec == NULL) {
3096         err = EFAULT;
3097         goto fail2;
3098     }
3099 
3100     /* ??? If host page size > target page size, this will result in a
3101        value larger than what we can actually support.  */
3102     max_len = 0x7fffffff & TARGET_PAGE_MASK;
3103     total_len = 0;
3104 
3105     for (i = 0; i < count; i++) {
3106         abi_ulong base = tswapal(target_vec[i].iov_base);
3107         abi_long len = tswapal(target_vec[i].iov_len);
3108 
3109         if (len < 0) {
3110             err = EINVAL;
3111             goto fail;
3112         } else if (len == 0) {
3113             /* Zero length pointer is ignored.  */
3114             vec[i].iov_base = 0;
3115         } else {
3116             vec[i].iov_base = lock_user(type, base, len, copy);
3117             /* If the first buffer pointer is bad, this is a fault.  But
3118              * subsequent bad buffers will result in a partial write; this
3119              * is realized by filling the vector with null pointers and
3120              * zero lengths. */
3121             if (!vec[i].iov_base) {
3122                 if (i == 0) {
3123                     err = EFAULT;
3124                     goto fail;
3125                 } else {
3126                     bad_address = true;
3127                 }
3128             }
3129             if (bad_address) {
3130                 len = 0;
3131             }
3132             if (len > max_len - total_len) {
3133                 len = max_len - total_len;
3134             }
3135         }
3136         vec[i].iov_len = len;
3137         total_len += len;
3138     }
3139 
3140     unlock_user(target_vec, target_addr, 0);
3141     return vec;
3142 
3143  fail:
3144     while (--i >= 0) {
3145         if (tswapal(target_vec[i].iov_len) > 0) {
3146             unlock_user(vec[i].iov_base, tswapal(target_vec[i].iov_base), 0);
3147         }
3148     }
3149     unlock_user(target_vec, target_addr, 0);
3150  fail2:
3151     g_free(vec);
3152     errno = err;
3153     return NULL;
3154 }
3155 
3156 static void unlock_iovec(struct iovec *vec, abi_ulong target_addr,
3157                          abi_ulong count, int copy)
3158 {
3159     struct target_iovec *target_vec;
3160     int i;
3161 
3162     target_vec = lock_user(VERIFY_READ, target_addr,
3163                            count * sizeof(struct target_iovec), 1);
3164     if (target_vec) {
3165         for (i = 0; i < count; i++) {
3166             abi_ulong base = tswapal(target_vec[i].iov_base);
3167             abi_long len = tswapal(target_vec[i].iov_len);
3168             if (len < 0) {
3169                 break;
3170             }
3171             unlock_user(vec[i].iov_base, base, copy ? vec[i].iov_len : 0);
3172         }
3173         unlock_user(target_vec, target_addr, 0);
3174     }
3175 
3176     g_free(vec);
3177 }
3178 
3179 static inline int target_to_host_sock_type(int *type)
3180 {
3181     int host_type = 0;
3182     int target_type = *type;
3183 
3184     switch (target_type & TARGET_SOCK_TYPE_MASK) {
3185     case TARGET_SOCK_DGRAM:
3186         host_type = SOCK_DGRAM;
3187         break;
3188     case TARGET_SOCK_STREAM:
3189         host_type = SOCK_STREAM;
3190         break;
3191     default:
3192         host_type = target_type & TARGET_SOCK_TYPE_MASK;
3193         break;
3194     }
3195     if (target_type & TARGET_SOCK_CLOEXEC) {
3196 #if defined(SOCK_CLOEXEC)
3197         host_type |= SOCK_CLOEXEC;
3198 #else
3199         return -TARGET_EINVAL;
3200 #endif
3201     }
3202     if (target_type & TARGET_SOCK_NONBLOCK) {
3203 #if defined(SOCK_NONBLOCK)
3204         host_type |= SOCK_NONBLOCK;
3205 #elif !defined(O_NONBLOCK)
3206         return -TARGET_EINVAL;
3207 #endif
3208     }
3209     *type = host_type;
3210     return 0;
3211 }
3212 
3213 /* Try to emulate socket type flags after socket creation.  */
3214 static int sock_flags_fixup(int fd, int target_type)
3215 {
3216 #if !defined(SOCK_NONBLOCK) && defined(O_NONBLOCK)
3217     if (target_type & TARGET_SOCK_NONBLOCK) {
3218         int flags = fcntl(fd, F_GETFL);
3219         if (fcntl(fd, F_SETFL, O_NONBLOCK | flags) == -1) {
3220             close(fd);
3221             return -TARGET_EINVAL;
3222         }
3223     }
3224 #endif
3225     return fd;
3226 }
3227 
3228 /* do_socket() Must return target values and target errnos. */
3229 static abi_long do_socket(int domain, int type, int protocol)
3230 {
3231     int target_type = type;
3232     int ret;
3233 
3234     ret = target_to_host_sock_type(&type);
3235     if (ret) {
3236         return ret;
3237     }
3238 
3239     if (domain == PF_NETLINK && !(
3240 #ifdef CONFIG_RTNETLINK
3241          protocol == NETLINK_ROUTE ||
3242 #endif
3243          protocol == NETLINK_KOBJECT_UEVENT ||
3244          protocol == NETLINK_AUDIT)) {
3245         return -TARGET_EPROTONOSUPPORT;
3246     }
3247 
3248     if (domain == AF_PACKET ||
3249         (domain == AF_INET && type == SOCK_PACKET)) {
3250         protocol = tswap16(protocol);
3251     }
3252 
3253     ret = get_errno(socket(domain, type, protocol));
3254     if (ret >= 0) {
3255         ret = sock_flags_fixup(ret, target_type);
3256         if (type == SOCK_PACKET) {
3257             /* Manage an obsolete case :
3258              * if socket type is SOCK_PACKET, bind by name
3259              */
3260             fd_trans_register(ret, &target_packet_trans);
3261         } else if (domain == PF_NETLINK) {
3262             switch (protocol) {
3263 #ifdef CONFIG_RTNETLINK
3264             case NETLINK_ROUTE:
3265                 fd_trans_register(ret, &target_netlink_route_trans);
3266                 break;
3267 #endif
3268             case NETLINK_KOBJECT_UEVENT:
3269                 /* nothing to do: messages are strings */
3270                 break;
3271             case NETLINK_AUDIT:
3272                 fd_trans_register(ret, &target_netlink_audit_trans);
3273                 break;
3274             default:
3275                 g_assert_not_reached();
3276             }
3277         }
3278     }
3279     return ret;
3280 }
3281 
3282 /* do_bind() Must return target values and target errnos. */
3283 static abi_long do_bind(int sockfd, abi_ulong target_addr,
3284                         socklen_t addrlen)
3285 {
3286     void *addr;
3287     abi_long ret;
3288 
3289     if ((int)addrlen < 0) {
3290         return -TARGET_EINVAL;
3291     }
3292 
3293     addr = alloca(addrlen+1);
3294 
3295     ret = target_to_host_sockaddr(sockfd, addr, target_addr, addrlen);
3296     if (ret)
3297         return ret;
3298 
3299     return get_errno(bind(sockfd, addr, addrlen));
3300 }
3301 
3302 /* do_connect() Must return target values and target errnos. */
3303 static abi_long do_connect(int sockfd, abi_ulong target_addr,
3304                            socklen_t addrlen)
3305 {
3306     void *addr;
3307     abi_long ret;
3308 
3309     if ((int)addrlen < 0) {
3310         return -TARGET_EINVAL;
3311     }
3312 
3313     addr = alloca(addrlen+1);
3314 
3315     ret = target_to_host_sockaddr(sockfd, addr, target_addr, addrlen);
3316     if (ret)
3317         return ret;
3318 
3319     return get_errno(safe_connect(sockfd, addr, addrlen));
3320 }
3321 
3322 /* do_sendrecvmsg_locked() Must return target values and target errnos. */
3323 static abi_long do_sendrecvmsg_locked(int fd, struct target_msghdr *msgp,
3324                                       int flags, int send)
3325 {
3326     abi_long ret, len;
3327     struct msghdr msg;
3328     abi_ulong count;
3329     struct iovec *vec;
3330     abi_ulong target_vec;
3331 
3332     if (msgp->msg_name) {
3333         msg.msg_namelen = tswap32(msgp->msg_namelen);
3334         msg.msg_name = alloca(msg.msg_namelen+1);
3335         ret = target_to_host_sockaddr(fd, msg.msg_name,
3336                                       tswapal(msgp->msg_name),
3337                                       msg.msg_namelen);
3338         if (ret == -TARGET_EFAULT) {
3339             /* For connected sockets msg_name and msg_namelen must
3340              * be ignored, so returning EFAULT immediately is wrong.
3341              * Instead, pass a bad msg_name to the host kernel, and
3342              * let it decide whether to return EFAULT or not.
3343              */
3344             msg.msg_name = (void *)-1;
3345         } else if (ret) {
3346             goto out2;
3347         }
3348     } else {
3349         msg.msg_name = NULL;
3350         msg.msg_namelen = 0;
3351     }
3352     msg.msg_controllen = 2 * tswapal(msgp->msg_controllen);
3353     msg.msg_control = alloca(msg.msg_controllen);
3354     memset(msg.msg_control, 0, msg.msg_controllen);
3355 
3356     msg.msg_flags = tswap32(msgp->msg_flags);
3357 
3358     count = tswapal(msgp->msg_iovlen);
3359     target_vec = tswapal(msgp->msg_iov);
3360 
3361     if (count > IOV_MAX) {
3362         /* sendrcvmsg returns a different errno for this condition than
3363          * readv/writev, so we must catch it here before lock_iovec() does.
3364          */
3365         ret = -TARGET_EMSGSIZE;
3366         goto out2;
3367     }
3368 
3369     vec = lock_iovec(send ? VERIFY_READ : VERIFY_WRITE,
3370                      target_vec, count, send);
3371     if (vec == NULL) {
3372         ret = -host_to_target_errno(errno);
3373         goto out2;
3374     }
3375     msg.msg_iovlen = count;
3376     msg.msg_iov = vec;
3377 
3378     if (send) {
3379         if (fd_trans_target_to_host_data(fd)) {
3380             void *host_msg;
3381 
3382             host_msg = g_malloc(msg.msg_iov->iov_len);
3383             memcpy(host_msg, msg.msg_iov->iov_base, msg.msg_iov->iov_len);
3384             ret = fd_trans_target_to_host_data(fd)(host_msg,
3385                                                    msg.msg_iov->iov_len);
3386             if (ret >= 0) {
3387                 msg.msg_iov->iov_base = host_msg;
3388                 ret = get_errno(safe_sendmsg(fd, &msg, flags));
3389             }
3390             g_free(host_msg);
3391         } else {
3392             ret = target_to_host_cmsg(&msg, msgp);
3393             if (ret == 0) {
3394                 ret = get_errno(safe_sendmsg(fd, &msg, flags));
3395             }
3396         }
3397     } else {
3398         ret = get_errno(safe_recvmsg(fd, &msg, flags));
3399         if (!is_error(ret)) {
3400             len = ret;
3401             if (fd_trans_host_to_target_data(fd)) {
3402                 ret = fd_trans_host_to_target_data(fd)(msg.msg_iov->iov_base,
3403                                                MIN(msg.msg_iov->iov_len, len));
3404             } else {
3405                 ret = host_to_target_cmsg(msgp, &msg);
3406             }
3407             if (!is_error(ret)) {
3408                 msgp->msg_namelen = tswap32(msg.msg_namelen);
3409                 msgp->msg_flags = tswap32(msg.msg_flags);
3410                 if (msg.msg_name != NULL && msg.msg_name != (void *)-1) {
3411                     ret = host_to_target_sockaddr(tswapal(msgp->msg_name),
3412                                     msg.msg_name, msg.msg_namelen);
3413                     if (ret) {
3414                         goto out;
3415                     }
3416                 }
3417 
3418                 ret = len;
3419             }
3420         }
3421     }
3422 
3423 out:
3424     unlock_iovec(vec, target_vec, count, !send);
3425 out2:
3426     return ret;
3427 }
3428 
3429 static abi_long do_sendrecvmsg(int fd, abi_ulong target_msg,
3430                                int flags, int send)
3431 {
3432     abi_long ret;
3433     struct target_msghdr *msgp;
3434 
3435     if (!lock_user_struct(send ? VERIFY_READ : VERIFY_WRITE,
3436                           msgp,
3437                           target_msg,
3438                           send ? 1 : 0)) {
3439         return -TARGET_EFAULT;
3440     }
3441     ret = do_sendrecvmsg_locked(fd, msgp, flags, send);
3442     unlock_user_struct(msgp, target_msg, send ? 0 : 1);
3443     return ret;
3444 }
3445 
3446 /* We don't rely on the C library to have sendmmsg/recvmmsg support,
3447  * so it might not have this *mmsg-specific flag either.
3448  */
3449 #ifndef MSG_WAITFORONE
3450 #define MSG_WAITFORONE 0x10000
3451 #endif
3452 
3453 static abi_long do_sendrecvmmsg(int fd, abi_ulong target_msgvec,
3454                                 unsigned int vlen, unsigned int flags,
3455                                 int send)
3456 {
3457     struct target_mmsghdr *mmsgp;
3458     abi_long ret = 0;
3459     int i;
3460 
3461     if (vlen > UIO_MAXIOV) {
3462         vlen = UIO_MAXIOV;
3463     }
3464 
3465     mmsgp = lock_user(VERIFY_WRITE, target_msgvec, sizeof(*mmsgp) * vlen, 1);
3466     if (!mmsgp) {
3467         return -TARGET_EFAULT;
3468     }
3469 
3470     for (i = 0; i < vlen; i++) {
3471         ret = do_sendrecvmsg_locked(fd, &mmsgp[i].msg_hdr, flags, send);
3472         if (is_error(ret)) {
3473             break;
3474         }
3475         mmsgp[i].msg_len = tswap32(ret);
3476         /* MSG_WAITFORONE turns on MSG_DONTWAIT after one packet */
3477         if (flags & MSG_WAITFORONE) {
3478             flags |= MSG_DONTWAIT;
3479         }
3480     }
3481 
3482     unlock_user(mmsgp, target_msgvec, sizeof(*mmsgp) * i);
3483 
3484     /* Return number of datagrams sent if we sent any at all;
3485      * otherwise return the error.
3486      */
3487     if (i) {
3488         return i;
3489     }
3490     return ret;
3491 }
3492 
3493 /* do_accept4() Must return target values and target errnos. */
3494 static abi_long do_accept4(int fd, abi_ulong target_addr,
3495                            abi_ulong target_addrlen_addr, int flags)
3496 {
3497     socklen_t addrlen, ret_addrlen;
3498     void *addr;
3499     abi_long ret;
3500     int host_flags;
3501 
3502     host_flags = target_to_host_bitmask(flags, fcntl_flags_tbl);
3503 
3504     if (target_addr == 0) {
3505         return get_errno(safe_accept4(fd, NULL, NULL, host_flags));
3506     }
3507 
3508     /* linux returns EFAULT if addrlen pointer is invalid */
3509     if (get_user_u32(addrlen, target_addrlen_addr))
3510         return -TARGET_EFAULT;
3511 
3512     if ((int)addrlen < 0) {
3513         return -TARGET_EINVAL;
3514     }
3515 
3516     if (!access_ok(VERIFY_WRITE, target_addr, addrlen))
3517         return -TARGET_EFAULT;
3518 
3519     addr = alloca(addrlen);
3520 
3521     ret_addrlen = addrlen;
3522     ret = get_errno(safe_accept4(fd, addr, &ret_addrlen, host_flags));
3523     if (!is_error(ret)) {
3524         host_to_target_sockaddr(target_addr, addr, MIN(addrlen, ret_addrlen));
3525         if (put_user_u32(ret_addrlen, target_addrlen_addr)) {
3526             ret = -TARGET_EFAULT;
3527         }
3528     }
3529     return ret;
3530 }
3531 
3532 /* do_getpeername() Must return target values and target errnos. */
3533 static abi_long do_getpeername(int fd, abi_ulong target_addr,
3534                                abi_ulong target_addrlen_addr)
3535 {
3536     socklen_t addrlen, ret_addrlen;
3537     void *addr;
3538     abi_long ret;
3539 
3540     if (get_user_u32(addrlen, target_addrlen_addr))
3541         return -TARGET_EFAULT;
3542 
3543     if ((int)addrlen < 0) {
3544         return -TARGET_EINVAL;
3545     }
3546 
3547     if (!access_ok(VERIFY_WRITE, target_addr, addrlen))
3548         return -TARGET_EFAULT;
3549 
3550     addr = alloca(addrlen);
3551 
3552     ret_addrlen = addrlen;
3553     ret = get_errno(getpeername(fd, addr, &ret_addrlen));
3554     if (!is_error(ret)) {
3555         host_to_target_sockaddr(target_addr, addr, MIN(addrlen, ret_addrlen));
3556         if (put_user_u32(ret_addrlen, target_addrlen_addr)) {
3557             ret = -TARGET_EFAULT;
3558         }
3559     }
3560     return ret;
3561 }
3562 
3563 /* do_getsockname() Must return target values and target errnos. */
3564 static abi_long do_getsockname(int fd, abi_ulong target_addr,
3565                                abi_ulong target_addrlen_addr)
3566 {
3567     socklen_t addrlen, ret_addrlen;
3568     void *addr;
3569     abi_long ret;
3570 
3571     if (get_user_u32(addrlen, target_addrlen_addr))
3572         return -TARGET_EFAULT;
3573 
3574     if ((int)addrlen < 0) {
3575         return -TARGET_EINVAL;
3576     }
3577 
3578     if (!access_ok(VERIFY_WRITE, target_addr, addrlen))
3579         return -TARGET_EFAULT;
3580 
3581     addr = alloca(addrlen);
3582 
3583     ret_addrlen = addrlen;
3584     ret = get_errno(getsockname(fd, addr, &ret_addrlen));
3585     if (!is_error(ret)) {
3586         host_to_target_sockaddr(target_addr, addr, MIN(addrlen, ret_addrlen));
3587         if (put_user_u32(ret_addrlen, target_addrlen_addr)) {
3588             ret = -TARGET_EFAULT;
3589         }
3590     }
3591     return ret;
3592 }
3593 
3594 /* do_socketpair() Must return target values and target errnos. */
3595 static abi_long do_socketpair(int domain, int type, int protocol,
3596                               abi_ulong target_tab_addr)
3597 {
3598     int tab[2];
3599     abi_long ret;
3600 
3601     target_to_host_sock_type(&type);
3602 
3603     ret = get_errno(socketpair(domain, type, protocol, tab));
3604     if (!is_error(ret)) {
3605         if (put_user_s32(tab[0], target_tab_addr)
3606             || put_user_s32(tab[1], target_tab_addr + sizeof(tab[0])))
3607             ret = -TARGET_EFAULT;
3608     }
3609     return ret;
3610 }
3611 
3612 /* do_sendto() Must return target values and target errnos. */
3613 static abi_long do_sendto(int fd, abi_ulong msg, size_t len, int flags,
3614                           abi_ulong target_addr, socklen_t addrlen)
3615 {
3616     void *addr;
3617     void *host_msg;
3618     void *copy_msg = NULL;
3619     abi_long ret;
3620 
3621     if ((int)addrlen < 0) {
3622         return -TARGET_EINVAL;
3623     }
3624 
3625     host_msg = lock_user(VERIFY_READ, msg, len, 1);
3626     if (!host_msg)
3627         return -TARGET_EFAULT;
3628     if (fd_trans_target_to_host_data(fd)) {
3629         copy_msg = host_msg;
3630         host_msg = g_malloc(len);
3631         memcpy(host_msg, copy_msg, len);
3632         ret = fd_trans_target_to_host_data(fd)(host_msg, len);
3633         if (ret < 0) {
3634             goto fail;
3635         }
3636     }
3637     if (target_addr) {
3638         addr = alloca(addrlen+1);
3639         ret = target_to_host_sockaddr(fd, addr, target_addr, addrlen);
3640         if (ret) {
3641             goto fail;
3642         }
3643         ret = get_errno(safe_sendto(fd, host_msg, len, flags, addr, addrlen));
3644     } else {
3645         ret = get_errno(safe_sendto(fd, host_msg, len, flags, NULL, 0));
3646     }
3647 fail:
3648     if (copy_msg) {
3649         g_free(host_msg);
3650         host_msg = copy_msg;
3651     }
3652     unlock_user(host_msg, msg, 0);
3653     return ret;
3654 }
3655 
3656 /* do_recvfrom() Must return target values and target errnos. */
3657 static abi_long do_recvfrom(int fd, abi_ulong msg, size_t len, int flags,
3658                             abi_ulong target_addr,
3659                             abi_ulong target_addrlen)
3660 {
3661     socklen_t addrlen, ret_addrlen;
3662     void *addr;
3663     void *host_msg;
3664     abi_long ret;
3665 
3666     host_msg = lock_user(VERIFY_WRITE, msg, len, 0);
3667     if (!host_msg)
3668         return -TARGET_EFAULT;
3669     if (target_addr) {
3670         if (get_user_u32(addrlen, target_addrlen)) {
3671             ret = -TARGET_EFAULT;
3672             goto fail;
3673         }
3674         if ((int)addrlen < 0) {
3675             ret = -TARGET_EINVAL;
3676             goto fail;
3677         }
3678         addr = alloca(addrlen);
3679         ret_addrlen = addrlen;
3680         ret = get_errno(safe_recvfrom(fd, host_msg, len, flags,
3681                                       addr, &ret_addrlen));
3682     } else {
3683         addr = NULL; /* To keep compiler quiet.  */
3684         addrlen = 0; /* To keep compiler quiet.  */
3685         ret = get_errno(safe_recvfrom(fd, host_msg, len, flags, NULL, 0));
3686     }
3687     if (!is_error(ret)) {
3688         if (fd_trans_host_to_target_data(fd)) {
3689             abi_long trans;
3690             trans = fd_trans_host_to_target_data(fd)(host_msg, MIN(ret, len));
3691             if (is_error(trans)) {
3692                 ret = trans;
3693                 goto fail;
3694             }
3695         }
3696         if (target_addr) {
3697             host_to_target_sockaddr(target_addr, addr,
3698                                     MIN(addrlen, ret_addrlen));
3699             if (put_user_u32(ret_addrlen, target_addrlen)) {
3700                 ret = -TARGET_EFAULT;
3701                 goto fail;
3702             }
3703         }
3704         unlock_user(host_msg, msg, len);
3705     } else {
3706 fail:
3707         unlock_user(host_msg, msg, 0);
3708     }
3709     return ret;
3710 }
3711 
3712 #ifdef TARGET_NR_socketcall
3713 /* do_socketcall() must return target values and target errnos. */
3714 static abi_long do_socketcall(int num, abi_ulong vptr)
3715 {
3716     static const unsigned nargs[] = { /* number of arguments per operation */
3717         [TARGET_SYS_SOCKET] = 3,      /* domain, type, protocol */
3718         [TARGET_SYS_BIND] = 3,        /* fd, addr, addrlen */
3719         [TARGET_SYS_CONNECT] = 3,     /* fd, addr, addrlen */
3720         [TARGET_SYS_LISTEN] = 2,      /* fd, backlog */
3721         [TARGET_SYS_ACCEPT] = 3,      /* fd, addr, addrlen */
3722         [TARGET_SYS_GETSOCKNAME] = 3, /* fd, addr, addrlen */
3723         [TARGET_SYS_GETPEERNAME] = 3, /* fd, addr, addrlen */
3724         [TARGET_SYS_SOCKETPAIR] = 4,  /* domain, type, protocol, tab */
3725         [TARGET_SYS_SEND] = 4,        /* fd, msg, len, flags */
3726         [TARGET_SYS_RECV] = 4,        /* fd, msg, len, flags */
3727         [TARGET_SYS_SENDTO] = 6,      /* fd, msg, len, flags, addr, addrlen */
3728         [TARGET_SYS_RECVFROM] = 6,    /* fd, msg, len, flags, addr, addrlen */
3729         [TARGET_SYS_SHUTDOWN] = 2,    /* fd, how */
3730         [TARGET_SYS_SETSOCKOPT] = 5,  /* fd, level, optname, optval, optlen */
3731         [TARGET_SYS_GETSOCKOPT] = 5,  /* fd, level, optname, optval, optlen */
3732         [TARGET_SYS_SENDMSG] = 3,     /* fd, msg, flags */
3733         [TARGET_SYS_RECVMSG] = 3,     /* fd, msg, flags */
3734         [TARGET_SYS_ACCEPT4] = 4,     /* fd, addr, addrlen, flags */
3735         [TARGET_SYS_RECVMMSG] = 4,    /* fd, msgvec, vlen, flags */
3736         [TARGET_SYS_SENDMMSG] = 4,    /* fd, msgvec, vlen, flags */
3737     };
3738     abi_long a[6]; /* max 6 args */
3739     unsigned i;
3740 
3741     /* check the range of the first argument num */
3742     /* (TARGET_SYS_SENDMMSG is the highest among TARGET_SYS_xxx) */
3743     if (num < 1 || num > TARGET_SYS_SENDMMSG) {
3744         return -TARGET_EINVAL;
3745     }
3746     /* ensure we have space for args */
3747     if (nargs[num] > ARRAY_SIZE(a)) {
3748         return -TARGET_EINVAL;
3749     }
3750     /* collect the arguments in a[] according to nargs[] */
3751     for (i = 0; i < nargs[num]; ++i) {
3752         if (get_user_ual(a[i], vptr + i * sizeof(abi_long)) != 0) {
3753             return -TARGET_EFAULT;
3754         }
3755     }
3756     /* now when we have the args, invoke the appropriate underlying function */
3757     switch (num) {
3758     case TARGET_SYS_SOCKET: /* domain, type, protocol */
3759         return do_socket(a[0], a[1], a[2]);
3760     case TARGET_SYS_BIND: /* sockfd, addr, addrlen */
3761         return do_bind(a[0], a[1], a[2]);
3762     case TARGET_SYS_CONNECT: /* sockfd, addr, addrlen */
3763         return do_connect(a[0], a[1], a[2]);
3764     case TARGET_SYS_LISTEN: /* sockfd, backlog */
3765         return get_errno(listen(a[0], a[1]));
3766     case TARGET_SYS_ACCEPT: /* sockfd, addr, addrlen */
3767         return do_accept4(a[0], a[1], a[2], 0);
3768     case TARGET_SYS_GETSOCKNAME: /* sockfd, addr, addrlen */
3769         return do_getsockname(a[0], a[1], a[2]);
3770     case TARGET_SYS_GETPEERNAME: /* sockfd, addr, addrlen */
3771         return do_getpeername(a[0], a[1], a[2]);
3772     case TARGET_SYS_SOCKETPAIR: /* domain, type, protocol, tab */
3773         return do_socketpair(a[0], a[1], a[2], a[3]);
3774     case TARGET_SYS_SEND: /* sockfd, msg, len, flags */
3775         return do_sendto(a[0], a[1], a[2], a[3], 0, 0);
3776     case TARGET_SYS_RECV: /* sockfd, msg, len, flags */
3777         return do_recvfrom(a[0], a[1], a[2], a[3], 0, 0);
3778     case TARGET_SYS_SENDTO: /* sockfd, msg, len, flags, addr, addrlen */
3779         return do_sendto(a[0], a[1], a[2], a[3], a[4], a[5]);
3780     case TARGET_SYS_RECVFROM: /* sockfd, msg, len, flags, addr, addrlen */
3781         return do_recvfrom(a[0], a[1], a[2], a[3], a[4], a[5]);
3782     case TARGET_SYS_SHUTDOWN: /* sockfd, how */
3783         return get_errno(shutdown(a[0], a[1]));
3784     case TARGET_SYS_SETSOCKOPT: /* sockfd, level, optname, optval, optlen */
3785         return do_setsockopt(a[0], a[1], a[2], a[3], a[4]);
3786     case TARGET_SYS_GETSOCKOPT: /* sockfd, level, optname, optval, optlen */
3787         return do_getsockopt(a[0], a[1], a[2], a[3], a[4]);
3788     case TARGET_SYS_SENDMSG: /* sockfd, msg, flags */
3789         return do_sendrecvmsg(a[0], a[1], a[2], 1);
3790     case TARGET_SYS_RECVMSG: /* sockfd, msg, flags */
3791         return do_sendrecvmsg(a[0], a[1], a[2], 0);
3792     case TARGET_SYS_ACCEPT4: /* sockfd, addr, addrlen, flags */
3793         return do_accept4(a[0], a[1], a[2], a[3]);
3794     case TARGET_SYS_RECVMMSG: /* sockfd, msgvec, vlen, flags */
3795         return do_sendrecvmmsg(a[0], a[1], a[2], a[3], 0);
3796     case TARGET_SYS_SENDMMSG: /* sockfd, msgvec, vlen, flags */
3797         return do_sendrecvmmsg(a[0], a[1], a[2], a[3], 1);
3798     default:
3799         qemu_log_mask(LOG_UNIMP, "Unsupported socketcall: %d\n", num);
3800         return -TARGET_EINVAL;
3801     }
3802 }
3803 #endif
3804 
3805 #define N_SHM_REGIONS	32
3806 
3807 static struct shm_region {
3808     abi_ulong start;
3809     abi_ulong size;
3810     bool in_use;
3811 } shm_regions[N_SHM_REGIONS];
3812 
3813 #ifndef TARGET_SEMID64_DS
3814 /* asm-generic version of this struct */
3815 struct target_semid64_ds
3816 {
3817   struct target_ipc_perm sem_perm;
3818   abi_ulong sem_otime;
3819 #if TARGET_ABI_BITS == 32
3820   abi_ulong __unused1;
3821 #endif
3822   abi_ulong sem_ctime;
3823 #if TARGET_ABI_BITS == 32
3824   abi_ulong __unused2;
3825 #endif
3826   abi_ulong sem_nsems;
3827   abi_ulong __unused3;
3828   abi_ulong __unused4;
3829 };
3830 #endif
3831 
3832 static inline abi_long target_to_host_ipc_perm(struct ipc_perm *host_ip,
3833                                                abi_ulong target_addr)
3834 {
3835     struct target_ipc_perm *target_ip;
3836     struct target_semid64_ds *target_sd;
3837 
3838     if (!lock_user_struct(VERIFY_READ, target_sd, target_addr, 1))
3839         return -TARGET_EFAULT;
3840     target_ip = &(target_sd->sem_perm);
3841     host_ip->__key = tswap32(target_ip->__key);
3842     host_ip->uid = tswap32(target_ip->uid);
3843     host_ip->gid = tswap32(target_ip->gid);
3844     host_ip->cuid = tswap32(target_ip->cuid);
3845     host_ip->cgid = tswap32(target_ip->cgid);
3846 #if defined(TARGET_ALPHA) || defined(TARGET_MIPS) || defined(TARGET_PPC)
3847     host_ip->mode = tswap32(target_ip->mode);
3848 #else
3849     host_ip->mode = tswap16(target_ip->mode);
3850 #endif
3851 #if defined(TARGET_PPC)
3852     host_ip->__seq = tswap32(target_ip->__seq);
3853 #else
3854     host_ip->__seq = tswap16(target_ip->__seq);
3855 #endif
3856     unlock_user_struct(target_sd, target_addr, 0);
3857     return 0;
3858 }
3859 
3860 static inline abi_long host_to_target_ipc_perm(abi_ulong target_addr,
3861                                                struct ipc_perm *host_ip)
3862 {
3863     struct target_ipc_perm *target_ip;
3864     struct target_semid64_ds *target_sd;
3865 
3866     if (!lock_user_struct(VERIFY_WRITE, target_sd, target_addr, 0))
3867         return -TARGET_EFAULT;
3868     target_ip = &(target_sd->sem_perm);
3869     target_ip->__key = tswap32(host_ip->__key);
3870     target_ip->uid = tswap32(host_ip->uid);
3871     target_ip->gid = tswap32(host_ip->gid);
3872     target_ip->cuid = tswap32(host_ip->cuid);
3873     target_ip->cgid = tswap32(host_ip->cgid);
3874 #if defined(TARGET_ALPHA) || defined(TARGET_MIPS) || defined(TARGET_PPC)
3875     target_ip->mode = tswap32(host_ip->mode);
3876 #else
3877     target_ip->mode = tswap16(host_ip->mode);
3878 #endif
3879 #if defined(TARGET_PPC)
3880     target_ip->__seq = tswap32(host_ip->__seq);
3881 #else
3882     target_ip->__seq = tswap16(host_ip->__seq);
3883 #endif
3884     unlock_user_struct(target_sd, target_addr, 1);
3885     return 0;
3886 }
3887 
3888 static inline abi_long target_to_host_semid_ds(struct semid_ds *host_sd,
3889                                                abi_ulong target_addr)
3890 {
3891     struct target_semid64_ds *target_sd;
3892 
3893     if (!lock_user_struct(VERIFY_READ, target_sd, target_addr, 1))
3894         return -TARGET_EFAULT;
3895     if (target_to_host_ipc_perm(&(host_sd->sem_perm),target_addr))
3896         return -TARGET_EFAULT;
3897     host_sd->sem_nsems = tswapal(target_sd->sem_nsems);
3898     host_sd->sem_otime = tswapal(target_sd->sem_otime);
3899     host_sd->sem_ctime = tswapal(target_sd->sem_ctime);
3900     unlock_user_struct(target_sd, target_addr, 0);
3901     return 0;
3902 }
3903 
3904 static inline abi_long host_to_target_semid_ds(abi_ulong target_addr,
3905                                                struct semid_ds *host_sd)
3906 {
3907     struct target_semid64_ds *target_sd;
3908 
3909     if (!lock_user_struct(VERIFY_WRITE, target_sd, target_addr, 0))
3910         return -TARGET_EFAULT;
3911     if (host_to_target_ipc_perm(target_addr,&(host_sd->sem_perm)))
3912         return -TARGET_EFAULT;
3913     target_sd->sem_nsems = tswapal(host_sd->sem_nsems);
3914     target_sd->sem_otime = tswapal(host_sd->sem_otime);
3915     target_sd->sem_ctime = tswapal(host_sd->sem_ctime);
3916     unlock_user_struct(target_sd, target_addr, 1);
3917     return 0;
3918 }
3919 
3920 struct target_seminfo {
3921     int semmap;
3922     int semmni;
3923     int semmns;
3924     int semmnu;
3925     int semmsl;
3926     int semopm;
3927     int semume;
3928     int semusz;
3929     int semvmx;
3930     int semaem;
3931 };
3932 
3933 static inline abi_long host_to_target_seminfo(abi_ulong target_addr,
3934                                               struct seminfo *host_seminfo)
3935 {
3936     struct target_seminfo *target_seminfo;
3937     if (!lock_user_struct(VERIFY_WRITE, target_seminfo, target_addr, 0))
3938         return -TARGET_EFAULT;
3939     __put_user(host_seminfo->semmap, &target_seminfo->semmap);
3940     __put_user(host_seminfo->semmni, &target_seminfo->semmni);
3941     __put_user(host_seminfo->semmns, &target_seminfo->semmns);
3942     __put_user(host_seminfo->semmnu, &target_seminfo->semmnu);
3943     __put_user(host_seminfo->semmsl, &target_seminfo->semmsl);
3944     __put_user(host_seminfo->semopm, &target_seminfo->semopm);
3945     __put_user(host_seminfo->semume, &target_seminfo->semume);
3946     __put_user(host_seminfo->semusz, &target_seminfo->semusz);
3947     __put_user(host_seminfo->semvmx, &target_seminfo->semvmx);
3948     __put_user(host_seminfo->semaem, &target_seminfo->semaem);
3949     unlock_user_struct(target_seminfo, target_addr, 1);
3950     return 0;
3951 }
3952 
3953 union semun {
3954 	int val;
3955 	struct semid_ds *buf;
3956 	unsigned short *array;
3957 	struct seminfo *__buf;
3958 };
3959 
3960 union target_semun {
3961 	int val;
3962 	abi_ulong buf;
3963 	abi_ulong array;
3964 	abi_ulong __buf;
3965 };
3966 
3967 static inline abi_long target_to_host_semarray(int semid, unsigned short **host_array,
3968                                                abi_ulong target_addr)
3969 {
3970     int nsems;
3971     unsigned short *array;
3972     union semun semun;
3973     struct semid_ds semid_ds;
3974     int i, ret;
3975 
3976     semun.buf = &semid_ds;
3977 
3978     ret = semctl(semid, 0, IPC_STAT, semun);
3979     if (ret == -1)
3980         return get_errno(ret);
3981 
3982     nsems = semid_ds.sem_nsems;
3983 
3984     *host_array = g_try_new(unsigned short, nsems);
3985     if (!*host_array) {
3986         return -TARGET_ENOMEM;
3987     }
3988     array = lock_user(VERIFY_READ, target_addr,
3989                       nsems*sizeof(unsigned short), 1);
3990     if (!array) {
3991         g_free(*host_array);
3992         return -TARGET_EFAULT;
3993     }
3994 
3995     for(i=0; i<nsems; i++) {
3996         __get_user((*host_array)[i], &array[i]);
3997     }
3998     unlock_user(array, target_addr, 0);
3999 
4000     return 0;
4001 }
4002 
4003 static inline abi_long host_to_target_semarray(int semid, abi_ulong target_addr,
4004                                                unsigned short **host_array)
4005 {
4006     int nsems;
4007     unsigned short *array;
4008     union semun semun;
4009     struct semid_ds semid_ds;
4010     int i, ret;
4011 
4012     semun.buf = &semid_ds;
4013 
4014     ret = semctl(semid, 0, IPC_STAT, semun);
4015     if (ret == -1)
4016         return get_errno(ret);
4017 
4018     nsems = semid_ds.sem_nsems;
4019 
4020     array = lock_user(VERIFY_WRITE, target_addr,
4021                       nsems*sizeof(unsigned short), 0);
4022     if (!array)
4023         return -TARGET_EFAULT;
4024 
4025     for(i=0; i<nsems; i++) {
4026         __put_user((*host_array)[i], &array[i]);
4027     }
4028     g_free(*host_array);
4029     unlock_user(array, target_addr, 1);
4030 
4031     return 0;
4032 }
4033 
4034 static inline abi_long do_semctl(int semid, int semnum, int cmd,
4035                                  abi_ulong target_arg)
4036 {
4037     union target_semun target_su = { .buf = target_arg };
4038     union semun arg;
4039     struct semid_ds dsarg;
4040     unsigned short *array = NULL;
4041     struct seminfo seminfo;
4042     abi_long ret = -TARGET_EINVAL;
4043     abi_long err;
4044     cmd &= 0xff;
4045 
4046     switch( cmd ) {
4047 	case GETVAL:
4048 	case SETVAL:
4049             /* In 64 bit cross-endian situations, we will erroneously pick up
4050              * the wrong half of the union for the "val" element.  To rectify
4051              * this, the entire 8-byte structure is byteswapped, followed by
4052 	     * a swap of the 4 byte val field. In other cases, the data is
4053 	     * already in proper host byte order. */
4054 	    if (sizeof(target_su.val) != (sizeof(target_su.buf))) {
4055 		target_su.buf = tswapal(target_su.buf);
4056 		arg.val = tswap32(target_su.val);
4057 	    } else {
4058 		arg.val = target_su.val;
4059 	    }
4060             ret = get_errno(semctl(semid, semnum, cmd, arg));
4061             break;
4062 	case GETALL:
4063 	case SETALL:
4064             err = target_to_host_semarray(semid, &array, target_su.array);
4065             if (err)
4066                 return err;
4067             arg.array = array;
4068             ret = get_errno(semctl(semid, semnum, cmd, arg));
4069             err = host_to_target_semarray(semid, target_su.array, &array);
4070             if (err)
4071                 return err;
4072             break;
4073 	case IPC_STAT:
4074 	case IPC_SET:
4075 	case SEM_STAT:
4076             err = target_to_host_semid_ds(&dsarg, target_su.buf);
4077             if (err)
4078                 return err;
4079             arg.buf = &dsarg;
4080             ret = get_errno(semctl(semid, semnum, cmd, arg));
4081             err = host_to_target_semid_ds(target_su.buf, &dsarg);
4082             if (err)
4083                 return err;
4084             break;
4085 	case IPC_INFO:
4086 	case SEM_INFO:
4087             arg.__buf = &seminfo;
4088             ret = get_errno(semctl(semid, semnum, cmd, arg));
4089             err = host_to_target_seminfo(target_su.__buf, &seminfo);
4090             if (err)
4091                 return err;
4092             break;
4093 	case IPC_RMID:
4094 	case GETPID:
4095 	case GETNCNT:
4096 	case GETZCNT:
4097             ret = get_errno(semctl(semid, semnum, cmd, NULL));
4098             break;
4099     }
4100 
4101     return ret;
4102 }
4103 
4104 struct target_sembuf {
4105     unsigned short sem_num;
4106     short sem_op;
4107     short sem_flg;
4108 };
4109 
4110 static inline abi_long target_to_host_sembuf(struct sembuf *host_sembuf,
4111                                              abi_ulong target_addr,
4112                                              unsigned nsops)
4113 {
4114     struct target_sembuf *target_sembuf;
4115     int i;
4116 
4117     target_sembuf = lock_user(VERIFY_READ, target_addr,
4118                               nsops*sizeof(struct target_sembuf), 1);
4119     if (!target_sembuf)
4120         return -TARGET_EFAULT;
4121 
4122     for(i=0; i<nsops; i++) {
4123         __get_user(host_sembuf[i].sem_num, &target_sembuf[i].sem_num);
4124         __get_user(host_sembuf[i].sem_op, &target_sembuf[i].sem_op);
4125         __get_user(host_sembuf[i].sem_flg, &target_sembuf[i].sem_flg);
4126     }
4127 
4128     unlock_user(target_sembuf, target_addr, 0);
4129 
4130     return 0;
4131 }
4132 
4133 #if defined(TARGET_NR_ipc) || defined(TARGET_NR_semop) || \
4134     defined(TARGET_NR_semtimedop) || defined(TARGET_NR_semtimedop_time64)
4135 
4136 /*
4137  * This macro is required to handle the s390 variants, which passes the
4138  * arguments in a different order than default.
4139  */
4140 #ifdef __s390x__
4141 #define SEMTIMEDOP_IPC_ARGS(__nsops, __sops, __timeout) \
4142   (__nsops), (__timeout), (__sops)
4143 #else
4144 #define SEMTIMEDOP_IPC_ARGS(__nsops, __sops, __timeout) \
4145   (__nsops), 0, (__sops), (__timeout)
4146 #endif
4147 
4148 static inline abi_long do_semtimedop(int semid,
4149                                      abi_long ptr,
4150                                      unsigned nsops,
4151                                      abi_long timeout, bool time64)
4152 {
4153     struct sembuf *sops;
4154     struct timespec ts, *pts = NULL;
4155     abi_long ret;
4156 
4157     if (timeout) {
4158         pts = &ts;
4159         if (time64) {
4160             if (target_to_host_timespec64(pts, timeout)) {
4161                 return -TARGET_EFAULT;
4162             }
4163         } else {
4164             if (target_to_host_timespec(pts, timeout)) {
4165                 return -TARGET_EFAULT;
4166             }
4167         }
4168     }
4169 
4170     if (nsops > TARGET_SEMOPM) {
4171         return -TARGET_E2BIG;
4172     }
4173 
4174     sops = g_new(struct sembuf, nsops);
4175 
4176     if (target_to_host_sembuf(sops, ptr, nsops)) {
4177         g_free(sops);
4178         return -TARGET_EFAULT;
4179     }
4180 
4181     ret = -TARGET_ENOSYS;
4182 #ifdef __NR_semtimedop
4183     ret = get_errno(safe_semtimedop(semid, sops, nsops, pts));
4184 #endif
4185 #ifdef __NR_ipc
4186     if (ret == -TARGET_ENOSYS) {
4187         ret = get_errno(safe_ipc(IPCOP_semtimedop, semid,
4188                                  SEMTIMEDOP_IPC_ARGS(nsops, sops, (long)pts)));
4189     }
4190 #endif
4191     g_free(sops);
4192     return ret;
4193 }
4194 #endif
4195 
4196 struct target_msqid_ds
4197 {
4198     struct target_ipc_perm msg_perm;
4199     abi_ulong msg_stime;
4200 #if TARGET_ABI_BITS == 32
4201     abi_ulong __unused1;
4202 #endif
4203     abi_ulong msg_rtime;
4204 #if TARGET_ABI_BITS == 32
4205     abi_ulong __unused2;
4206 #endif
4207     abi_ulong msg_ctime;
4208 #if TARGET_ABI_BITS == 32
4209     abi_ulong __unused3;
4210 #endif
4211     abi_ulong __msg_cbytes;
4212     abi_ulong msg_qnum;
4213     abi_ulong msg_qbytes;
4214     abi_ulong msg_lspid;
4215     abi_ulong msg_lrpid;
4216     abi_ulong __unused4;
4217     abi_ulong __unused5;
4218 };
4219 
4220 static inline abi_long target_to_host_msqid_ds(struct msqid_ds *host_md,
4221                                                abi_ulong target_addr)
4222 {
4223     struct target_msqid_ds *target_md;
4224 
4225     if (!lock_user_struct(VERIFY_READ, target_md, target_addr, 1))
4226         return -TARGET_EFAULT;
4227     if (target_to_host_ipc_perm(&(host_md->msg_perm),target_addr))
4228         return -TARGET_EFAULT;
4229     host_md->msg_stime = tswapal(target_md->msg_stime);
4230     host_md->msg_rtime = tswapal(target_md->msg_rtime);
4231     host_md->msg_ctime = tswapal(target_md->msg_ctime);
4232     host_md->__msg_cbytes = tswapal(target_md->__msg_cbytes);
4233     host_md->msg_qnum = tswapal(target_md->msg_qnum);
4234     host_md->msg_qbytes = tswapal(target_md->msg_qbytes);
4235     host_md->msg_lspid = tswapal(target_md->msg_lspid);
4236     host_md->msg_lrpid = tswapal(target_md->msg_lrpid);
4237     unlock_user_struct(target_md, target_addr, 0);
4238     return 0;
4239 }
4240 
4241 static inline abi_long host_to_target_msqid_ds(abi_ulong target_addr,
4242                                                struct msqid_ds *host_md)
4243 {
4244     struct target_msqid_ds *target_md;
4245 
4246     if (!lock_user_struct(VERIFY_WRITE, target_md, target_addr, 0))
4247         return -TARGET_EFAULT;
4248     if (host_to_target_ipc_perm(target_addr,&(host_md->msg_perm)))
4249         return -TARGET_EFAULT;
4250     target_md->msg_stime = tswapal(host_md->msg_stime);
4251     target_md->msg_rtime = tswapal(host_md->msg_rtime);
4252     target_md->msg_ctime = tswapal(host_md->msg_ctime);
4253     target_md->__msg_cbytes = tswapal(host_md->__msg_cbytes);
4254     target_md->msg_qnum = tswapal(host_md->msg_qnum);
4255     target_md->msg_qbytes = tswapal(host_md->msg_qbytes);
4256     target_md->msg_lspid = tswapal(host_md->msg_lspid);
4257     target_md->msg_lrpid = tswapal(host_md->msg_lrpid);
4258     unlock_user_struct(target_md, target_addr, 1);
4259     return 0;
4260 }
4261 
4262 struct target_msginfo {
4263     int msgpool;
4264     int msgmap;
4265     int msgmax;
4266     int msgmnb;
4267     int msgmni;
4268     int msgssz;
4269     int msgtql;
4270     unsigned short int msgseg;
4271 };
4272 
4273 static inline abi_long host_to_target_msginfo(abi_ulong target_addr,
4274                                               struct msginfo *host_msginfo)
4275 {
4276     struct target_msginfo *target_msginfo;
4277     if (!lock_user_struct(VERIFY_WRITE, target_msginfo, target_addr, 0))
4278         return -TARGET_EFAULT;
4279     __put_user(host_msginfo->msgpool, &target_msginfo->msgpool);
4280     __put_user(host_msginfo->msgmap, &target_msginfo->msgmap);
4281     __put_user(host_msginfo->msgmax, &target_msginfo->msgmax);
4282     __put_user(host_msginfo->msgmnb, &target_msginfo->msgmnb);
4283     __put_user(host_msginfo->msgmni, &target_msginfo->msgmni);
4284     __put_user(host_msginfo->msgssz, &target_msginfo->msgssz);
4285     __put_user(host_msginfo->msgtql, &target_msginfo->msgtql);
4286     __put_user(host_msginfo->msgseg, &target_msginfo->msgseg);
4287     unlock_user_struct(target_msginfo, target_addr, 1);
4288     return 0;
4289 }
4290 
4291 static inline abi_long do_msgctl(int msgid, int cmd, abi_long ptr)
4292 {
4293     struct msqid_ds dsarg;
4294     struct msginfo msginfo;
4295     abi_long ret = -TARGET_EINVAL;
4296 
4297     cmd &= 0xff;
4298 
4299     switch (cmd) {
4300     case IPC_STAT:
4301     case IPC_SET:
4302     case MSG_STAT:
4303         if (target_to_host_msqid_ds(&dsarg,ptr))
4304             return -TARGET_EFAULT;
4305         ret = get_errno(msgctl(msgid, cmd, &dsarg));
4306         if (host_to_target_msqid_ds(ptr,&dsarg))
4307             return -TARGET_EFAULT;
4308         break;
4309     case IPC_RMID:
4310         ret = get_errno(msgctl(msgid, cmd, NULL));
4311         break;
4312     case IPC_INFO:
4313     case MSG_INFO:
4314         ret = get_errno(msgctl(msgid, cmd, (struct msqid_ds *)&msginfo));
4315         if (host_to_target_msginfo(ptr, &msginfo))
4316             return -TARGET_EFAULT;
4317         break;
4318     }
4319 
4320     return ret;
4321 }
4322 
4323 struct target_msgbuf {
4324     abi_long mtype;
4325     char	mtext[1];
4326 };
4327 
4328 static inline abi_long do_msgsnd(int msqid, abi_long msgp,
4329                                  ssize_t msgsz, int msgflg)
4330 {
4331     struct target_msgbuf *target_mb;
4332     struct msgbuf *host_mb;
4333     abi_long ret = 0;
4334 
4335     if (msgsz < 0) {
4336         return -TARGET_EINVAL;
4337     }
4338 
4339     if (!lock_user_struct(VERIFY_READ, target_mb, msgp, 0))
4340         return -TARGET_EFAULT;
4341     host_mb = g_try_malloc(msgsz + sizeof(long));
4342     if (!host_mb) {
4343         unlock_user_struct(target_mb, msgp, 0);
4344         return -TARGET_ENOMEM;
4345     }
4346     host_mb->mtype = (abi_long) tswapal(target_mb->mtype);
4347     memcpy(host_mb->mtext, target_mb->mtext, msgsz);
4348     ret = -TARGET_ENOSYS;
4349 #ifdef __NR_msgsnd
4350     ret = get_errno(safe_msgsnd(msqid, host_mb, msgsz, msgflg));
4351 #endif
4352 #ifdef __NR_ipc
4353     if (ret == -TARGET_ENOSYS) {
4354 #ifdef __s390x__
4355         ret = get_errno(safe_ipc(IPCOP_msgsnd, msqid, msgsz, msgflg,
4356                                  host_mb));
4357 #else
4358         ret = get_errno(safe_ipc(IPCOP_msgsnd, msqid, msgsz, msgflg,
4359                                  host_mb, 0));
4360 #endif
4361     }
4362 #endif
4363     g_free(host_mb);
4364     unlock_user_struct(target_mb, msgp, 0);
4365 
4366     return ret;
4367 }
4368 
4369 #ifdef __NR_ipc
4370 #if defined(__sparc__)
4371 /* SPARC for msgrcv it does not use the kludge on final 2 arguments.  */
4372 #define MSGRCV_ARGS(__msgp, __msgtyp) __msgp, __msgtyp
4373 #elif defined(__s390x__)
4374 /* The s390 sys_ipc variant has only five parameters.  */
4375 #define MSGRCV_ARGS(__msgp, __msgtyp) \
4376     ((long int[]){(long int)__msgp, __msgtyp})
4377 #else
4378 #define MSGRCV_ARGS(__msgp, __msgtyp) \
4379     ((long int[]){(long int)__msgp, __msgtyp}), 0
4380 #endif
4381 #endif
4382 
4383 static inline abi_long do_msgrcv(int msqid, abi_long msgp,
4384                                  ssize_t msgsz, abi_long msgtyp,
4385                                  int msgflg)
4386 {
4387     struct target_msgbuf *target_mb;
4388     char *target_mtext;
4389     struct msgbuf *host_mb;
4390     abi_long ret = 0;
4391 
4392     if (msgsz < 0) {
4393         return -TARGET_EINVAL;
4394     }
4395 
4396     if (!lock_user_struct(VERIFY_WRITE, target_mb, msgp, 0))
4397         return -TARGET_EFAULT;
4398 
4399     host_mb = g_try_malloc(msgsz + sizeof(long));
4400     if (!host_mb) {
4401         ret = -TARGET_ENOMEM;
4402         goto end;
4403     }
4404     ret = -TARGET_ENOSYS;
4405 #ifdef __NR_msgrcv
4406     ret = get_errno(safe_msgrcv(msqid, host_mb, msgsz, msgtyp, msgflg));
4407 #endif
4408 #ifdef __NR_ipc
4409     if (ret == -TARGET_ENOSYS) {
4410         ret = get_errno(safe_ipc(IPCOP_CALL(1, IPCOP_msgrcv), msqid, msgsz,
4411                         msgflg, MSGRCV_ARGS(host_mb, msgtyp)));
4412     }
4413 #endif
4414 
4415     if (ret > 0) {
4416         abi_ulong target_mtext_addr = msgp + sizeof(abi_ulong);
4417         target_mtext = lock_user(VERIFY_WRITE, target_mtext_addr, ret, 0);
4418         if (!target_mtext) {
4419             ret = -TARGET_EFAULT;
4420             goto end;
4421         }
4422         memcpy(target_mb->mtext, host_mb->mtext, ret);
4423         unlock_user(target_mtext, target_mtext_addr, ret);
4424     }
4425 
4426     target_mb->mtype = tswapal(host_mb->mtype);
4427 
4428 end:
4429     if (target_mb)
4430         unlock_user_struct(target_mb, msgp, 1);
4431     g_free(host_mb);
4432     return ret;
4433 }
4434 
4435 static inline abi_long target_to_host_shmid_ds(struct shmid_ds *host_sd,
4436                                                abi_ulong target_addr)
4437 {
4438     struct target_shmid_ds *target_sd;
4439 
4440     if (!lock_user_struct(VERIFY_READ, target_sd, target_addr, 1))
4441         return -TARGET_EFAULT;
4442     if (target_to_host_ipc_perm(&(host_sd->shm_perm), target_addr))
4443         return -TARGET_EFAULT;
4444     __get_user(host_sd->shm_segsz, &target_sd->shm_segsz);
4445     __get_user(host_sd->shm_atime, &target_sd->shm_atime);
4446     __get_user(host_sd->shm_dtime, &target_sd->shm_dtime);
4447     __get_user(host_sd->shm_ctime, &target_sd->shm_ctime);
4448     __get_user(host_sd->shm_cpid, &target_sd->shm_cpid);
4449     __get_user(host_sd->shm_lpid, &target_sd->shm_lpid);
4450     __get_user(host_sd->shm_nattch, &target_sd->shm_nattch);
4451     unlock_user_struct(target_sd, target_addr, 0);
4452     return 0;
4453 }
4454 
4455 static inline abi_long host_to_target_shmid_ds(abi_ulong target_addr,
4456                                                struct shmid_ds *host_sd)
4457 {
4458     struct target_shmid_ds *target_sd;
4459 
4460     if (!lock_user_struct(VERIFY_WRITE, target_sd, target_addr, 0))
4461         return -TARGET_EFAULT;
4462     if (host_to_target_ipc_perm(target_addr, &(host_sd->shm_perm)))
4463         return -TARGET_EFAULT;
4464     __put_user(host_sd->shm_segsz, &target_sd->shm_segsz);
4465     __put_user(host_sd->shm_atime, &target_sd->shm_atime);
4466     __put_user(host_sd->shm_dtime, &target_sd->shm_dtime);
4467     __put_user(host_sd->shm_ctime, &target_sd->shm_ctime);
4468     __put_user(host_sd->shm_cpid, &target_sd->shm_cpid);
4469     __put_user(host_sd->shm_lpid, &target_sd->shm_lpid);
4470     __put_user(host_sd->shm_nattch, &target_sd->shm_nattch);
4471     unlock_user_struct(target_sd, target_addr, 1);
4472     return 0;
4473 }
4474 
4475 struct  target_shminfo {
4476     abi_ulong shmmax;
4477     abi_ulong shmmin;
4478     abi_ulong shmmni;
4479     abi_ulong shmseg;
4480     abi_ulong shmall;
4481 };
4482 
4483 static inline abi_long host_to_target_shminfo(abi_ulong target_addr,
4484                                               struct shminfo *host_shminfo)
4485 {
4486     struct target_shminfo *target_shminfo;
4487     if (!lock_user_struct(VERIFY_WRITE, target_shminfo, target_addr, 0))
4488         return -TARGET_EFAULT;
4489     __put_user(host_shminfo->shmmax, &target_shminfo->shmmax);
4490     __put_user(host_shminfo->shmmin, &target_shminfo->shmmin);
4491     __put_user(host_shminfo->shmmni, &target_shminfo->shmmni);
4492     __put_user(host_shminfo->shmseg, &target_shminfo->shmseg);
4493     __put_user(host_shminfo->shmall, &target_shminfo->shmall);
4494     unlock_user_struct(target_shminfo, target_addr, 1);
4495     return 0;
4496 }
4497 
4498 struct target_shm_info {
4499     int used_ids;
4500     abi_ulong shm_tot;
4501     abi_ulong shm_rss;
4502     abi_ulong shm_swp;
4503     abi_ulong swap_attempts;
4504     abi_ulong swap_successes;
4505 };
4506 
4507 static inline abi_long host_to_target_shm_info(abi_ulong target_addr,
4508                                                struct shm_info *host_shm_info)
4509 {
4510     struct target_shm_info *target_shm_info;
4511     if (!lock_user_struct(VERIFY_WRITE, target_shm_info, target_addr, 0))
4512         return -TARGET_EFAULT;
4513     __put_user(host_shm_info->used_ids, &target_shm_info->used_ids);
4514     __put_user(host_shm_info->shm_tot, &target_shm_info->shm_tot);
4515     __put_user(host_shm_info->shm_rss, &target_shm_info->shm_rss);
4516     __put_user(host_shm_info->shm_swp, &target_shm_info->shm_swp);
4517     __put_user(host_shm_info->swap_attempts, &target_shm_info->swap_attempts);
4518     __put_user(host_shm_info->swap_successes, &target_shm_info->swap_successes);
4519     unlock_user_struct(target_shm_info, target_addr, 1);
4520     return 0;
4521 }
4522 
4523 static inline abi_long do_shmctl(int shmid, int cmd, abi_long buf)
4524 {
4525     struct shmid_ds dsarg;
4526     struct shminfo shminfo;
4527     struct shm_info shm_info;
4528     abi_long ret = -TARGET_EINVAL;
4529 
4530     cmd &= 0xff;
4531 
4532     switch(cmd) {
4533     case IPC_STAT:
4534     case IPC_SET:
4535     case SHM_STAT:
4536         if (target_to_host_shmid_ds(&dsarg, buf))
4537             return -TARGET_EFAULT;
4538         ret = get_errno(shmctl(shmid, cmd, &dsarg));
4539         if (host_to_target_shmid_ds(buf, &dsarg))
4540             return -TARGET_EFAULT;
4541         break;
4542     case IPC_INFO:
4543         ret = get_errno(shmctl(shmid, cmd, (struct shmid_ds *)&shminfo));
4544         if (host_to_target_shminfo(buf, &shminfo))
4545             return -TARGET_EFAULT;
4546         break;
4547     case SHM_INFO:
4548         ret = get_errno(shmctl(shmid, cmd, (struct shmid_ds *)&shm_info));
4549         if (host_to_target_shm_info(buf, &shm_info))
4550             return -TARGET_EFAULT;
4551         break;
4552     case IPC_RMID:
4553     case SHM_LOCK:
4554     case SHM_UNLOCK:
4555         ret = get_errno(shmctl(shmid, cmd, NULL));
4556         break;
4557     }
4558 
4559     return ret;
4560 }
4561 
4562 #ifndef TARGET_FORCE_SHMLBA
4563 /* For most architectures, SHMLBA is the same as the page size;
4564  * some architectures have larger values, in which case they should
4565  * define TARGET_FORCE_SHMLBA and provide a target_shmlba() function.
4566  * This corresponds to the kernel arch code defining __ARCH_FORCE_SHMLBA
4567  * and defining its own value for SHMLBA.
4568  *
4569  * The kernel also permits SHMLBA to be set by the architecture to a
4570  * value larger than the page size without setting __ARCH_FORCE_SHMLBA;
4571  * this means that addresses are rounded to the large size if
4572  * SHM_RND is set but addresses not aligned to that size are not rejected
4573  * as long as they are at least page-aligned. Since the only architecture
4574  * which uses this is ia64 this code doesn't provide for that oddity.
4575  */
4576 static inline abi_ulong target_shmlba(CPUArchState *cpu_env)
4577 {
4578     return TARGET_PAGE_SIZE;
4579 }
4580 #endif
4581 
4582 static inline abi_ulong do_shmat(CPUArchState *cpu_env,
4583                                  int shmid, abi_ulong shmaddr, int shmflg)
4584 {
4585     abi_long raddr;
4586     void *host_raddr;
4587     struct shmid_ds shm_info;
4588     int i,ret;
4589     abi_ulong shmlba;
4590 
4591     /* find out the length of the shared memory segment */
4592     ret = get_errno(shmctl(shmid, IPC_STAT, &shm_info));
4593     if (is_error(ret)) {
4594         /* can't get length, bail out */
4595         return ret;
4596     }
4597 
4598     shmlba = target_shmlba(cpu_env);
4599 
4600     if (shmaddr & (shmlba - 1)) {
4601         if (shmflg & SHM_RND) {
4602             shmaddr &= ~(shmlba - 1);
4603         } else {
4604             return -TARGET_EINVAL;
4605         }
4606     }
4607     if (!guest_range_valid(shmaddr, shm_info.shm_segsz)) {
4608         return -TARGET_EINVAL;
4609     }
4610 
4611     mmap_lock();
4612 
4613     if (shmaddr)
4614         host_raddr = shmat(shmid, (void *)g2h(shmaddr), shmflg);
4615     else {
4616         abi_ulong mmap_start;
4617 
4618         /* In order to use the host shmat, we need to honor host SHMLBA.  */
4619         mmap_start = mmap_find_vma(0, shm_info.shm_segsz, MAX(SHMLBA, shmlba));
4620 
4621         if (mmap_start == -1) {
4622             errno = ENOMEM;
4623             host_raddr = (void *)-1;
4624         } else
4625             host_raddr = shmat(shmid, g2h(mmap_start), shmflg | SHM_REMAP);
4626     }
4627 
4628     if (host_raddr == (void *)-1) {
4629         mmap_unlock();
4630         return get_errno((long)host_raddr);
4631     }
4632     raddr=h2g((unsigned long)host_raddr);
4633 
4634     page_set_flags(raddr, raddr + shm_info.shm_segsz,
4635                    PAGE_VALID | PAGE_READ |
4636                    ((shmflg & SHM_RDONLY)? 0 : PAGE_WRITE));
4637 
4638     for (i = 0; i < N_SHM_REGIONS; i++) {
4639         if (!shm_regions[i].in_use) {
4640             shm_regions[i].in_use = true;
4641             shm_regions[i].start = raddr;
4642             shm_regions[i].size = shm_info.shm_segsz;
4643             break;
4644         }
4645     }
4646 
4647     mmap_unlock();
4648     return raddr;
4649 
4650 }
4651 
4652 static inline abi_long do_shmdt(abi_ulong shmaddr)
4653 {
4654     int i;
4655     abi_long rv;
4656 
4657     mmap_lock();
4658 
4659     for (i = 0; i < N_SHM_REGIONS; ++i) {
4660         if (shm_regions[i].in_use && shm_regions[i].start == shmaddr) {
4661             shm_regions[i].in_use = false;
4662             page_set_flags(shmaddr, shmaddr + shm_regions[i].size, 0);
4663             break;
4664         }
4665     }
4666     rv = get_errno(shmdt(g2h(shmaddr)));
4667 
4668     mmap_unlock();
4669 
4670     return rv;
4671 }
4672 
4673 #ifdef TARGET_NR_ipc
4674 /* ??? This only works with linear mappings.  */
4675 /* do_ipc() must return target values and target errnos. */
4676 static abi_long do_ipc(CPUArchState *cpu_env,
4677                        unsigned int call, abi_long first,
4678                        abi_long second, abi_long third,
4679                        abi_long ptr, abi_long fifth)
4680 {
4681     int version;
4682     abi_long ret = 0;
4683 
4684     version = call >> 16;
4685     call &= 0xffff;
4686 
4687     switch (call) {
4688     case IPCOP_semop:
4689         ret = do_semtimedop(first, ptr, second, 0, false);
4690         break;
4691     case IPCOP_semtimedop:
4692     /*
4693      * The s390 sys_ipc variant has only five parameters instead of six
4694      * (as for default variant) and the only difference is the handling of
4695      * SEMTIMEDOP where on s390 the third parameter is used as a pointer
4696      * to a struct timespec where the generic variant uses fifth parameter.
4697      */
4698 #if defined(TARGET_S390X)
4699         ret = do_semtimedop(first, ptr, second, third, TARGET_ABI_BITS == 64);
4700 #else
4701         ret = do_semtimedop(first, ptr, second, fifth, TARGET_ABI_BITS == 64);
4702 #endif
4703         break;
4704 
4705     case IPCOP_semget:
4706         ret = get_errno(semget(first, second, third));
4707         break;
4708 
4709     case IPCOP_semctl: {
4710         /* The semun argument to semctl is passed by value, so dereference the
4711          * ptr argument. */
4712         abi_ulong atptr;
4713         get_user_ual(atptr, ptr);
4714         ret = do_semctl(first, second, third, atptr);
4715         break;
4716     }
4717 
4718     case IPCOP_msgget:
4719         ret = get_errno(msgget(first, second));
4720         break;
4721 
4722     case IPCOP_msgsnd:
4723         ret = do_msgsnd(first, ptr, second, third);
4724         break;
4725 
4726     case IPCOP_msgctl:
4727         ret = do_msgctl(first, second, ptr);
4728         break;
4729 
4730     case IPCOP_msgrcv:
4731         switch (version) {
4732         case 0:
4733             {
4734                 struct target_ipc_kludge {
4735                     abi_long msgp;
4736                     abi_long msgtyp;
4737                 } *tmp;
4738 
4739                 if (!lock_user_struct(VERIFY_READ, tmp, ptr, 1)) {
4740                     ret = -TARGET_EFAULT;
4741                     break;
4742                 }
4743 
4744                 ret = do_msgrcv(first, tswapal(tmp->msgp), second, tswapal(tmp->msgtyp), third);
4745 
4746                 unlock_user_struct(tmp, ptr, 0);
4747                 break;
4748             }
4749         default:
4750             ret = do_msgrcv(first, ptr, second, fifth, third);
4751         }
4752         break;
4753 
4754     case IPCOP_shmat:
4755         switch (version) {
4756         default:
4757         {
4758             abi_ulong raddr;
4759             raddr = do_shmat(cpu_env, first, ptr, second);
4760             if (is_error(raddr))
4761                 return get_errno(raddr);
4762             if (put_user_ual(raddr, third))
4763                 return -TARGET_EFAULT;
4764             break;
4765         }
4766         case 1:
4767             ret = -TARGET_EINVAL;
4768             break;
4769         }
4770 	break;
4771     case IPCOP_shmdt:
4772         ret = do_shmdt(ptr);
4773 	break;
4774 
4775     case IPCOP_shmget:
4776 	/* IPC_* flag values are the same on all linux platforms */
4777 	ret = get_errno(shmget(first, second, third));
4778 	break;
4779 
4780 	/* IPC_* and SHM_* command values are the same on all linux platforms */
4781     case IPCOP_shmctl:
4782         ret = do_shmctl(first, second, ptr);
4783         break;
4784     default:
4785         qemu_log_mask(LOG_UNIMP, "Unsupported ipc call: %d (version %d)\n",
4786                       call, version);
4787 	ret = -TARGET_ENOSYS;
4788 	break;
4789     }
4790     return ret;
4791 }
4792 #endif
4793 
4794 /* kernel structure types definitions */
4795 
4796 #define STRUCT(name, ...) STRUCT_ ## name,
4797 #define STRUCT_SPECIAL(name) STRUCT_ ## name,
4798 enum {
4799 #include "syscall_types.h"
4800 STRUCT_MAX
4801 };
4802 #undef STRUCT
4803 #undef STRUCT_SPECIAL
4804 
4805 #define STRUCT(name, ...) static const argtype struct_ ## name ## _def[] = {  __VA_ARGS__, TYPE_NULL };
4806 #define STRUCT_SPECIAL(name)
4807 #include "syscall_types.h"
4808 #undef STRUCT
4809 #undef STRUCT_SPECIAL
4810 
4811 #define MAX_STRUCT_SIZE 4096
4812 
4813 #ifdef CONFIG_FIEMAP
4814 /* So fiemap access checks don't overflow on 32 bit systems.
4815  * This is very slightly smaller than the limit imposed by
4816  * the underlying kernel.
4817  */
4818 #define FIEMAP_MAX_EXTENTS ((UINT_MAX - sizeof(struct fiemap))  \
4819                             / sizeof(struct fiemap_extent))
4820 
4821 static abi_long do_ioctl_fs_ioc_fiemap(const IOCTLEntry *ie, uint8_t *buf_temp,
4822                                        int fd, int cmd, abi_long arg)
4823 {
4824     /* The parameter for this ioctl is a struct fiemap followed
4825      * by an array of struct fiemap_extent whose size is set
4826      * in fiemap->fm_extent_count. The array is filled in by the
4827      * ioctl.
4828      */
4829     int target_size_in, target_size_out;
4830     struct fiemap *fm;
4831     const argtype *arg_type = ie->arg_type;
4832     const argtype extent_arg_type[] = { MK_STRUCT(STRUCT_fiemap_extent) };
4833     void *argptr, *p;
4834     abi_long ret;
4835     int i, extent_size = thunk_type_size(extent_arg_type, 0);
4836     uint32_t outbufsz;
4837     int free_fm = 0;
4838 
4839     assert(arg_type[0] == TYPE_PTR);
4840     assert(ie->access == IOC_RW);
4841     arg_type++;
4842     target_size_in = thunk_type_size(arg_type, 0);
4843     argptr = lock_user(VERIFY_READ, arg, target_size_in, 1);
4844     if (!argptr) {
4845         return -TARGET_EFAULT;
4846     }
4847     thunk_convert(buf_temp, argptr, arg_type, THUNK_HOST);
4848     unlock_user(argptr, arg, 0);
4849     fm = (struct fiemap *)buf_temp;
4850     if (fm->fm_extent_count > FIEMAP_MAX_EXTENTS) {
4851         return -TARGET_EINVAL;
4852     }
4853 
4854     outbufsz = sizeof (*fm) +
4855         (sizeof(struct fiemap_extent) * fm->fm_extent_count);
4856 
4857     if (outbufsz > MAX_STRUCT_SIZE) {
4858         /* We can't fit all the extents into the fixed size buffer.
4859          * Allocate one that is large enough and use it instead.
4860          */
4861         fm = g_try_malloc(outbufsz);
4862         if (!fm) {
4863             return -TARGET_ENOMEM;
4864         }
4865         memcpy(fm, buf_temp, sizeof(struct fiemap));
4866         free_fm = 1;
4867     }
4868     ret = get_errno(safe_ioctl(fd, ie->host_cmd, fm));
4869     if (!is_error(ret)) {
4870         target_size_out = target_size_in;
4871         /* An extent_count of 0 means we were only counting the extents
4872          * so there are no structs to copy
4873          */
4874         if (fm->fm_extent_count != 0) {
4875             target_size_out += fm->fm_mapped_extents * extent_size;
4876         }
4877         argptr = lock_user(VERIFY_WRITE, arg, target_size_out, 0);
4878         if (!argptr) {
4879             ret = -TARGET_EFAULT;
4880         } else {
4881             /* Convert the struct fiemap */
4882             thunk_convert(argptr, fm, arg_type, THUNK_TARGET);
4883             if (fm->fm_extent_count != 0) {
4884                 p = argptr + target_size_in;
4885                 /* ...and then all the struct fiemap_extents */
4886                 for (i = 0; i < fm->fm_mapped_extents; i++) {
4887                     thunk_convert(p, &fm->fm_extents[i], extent_arg_type,
4888                                   THUNK_TARGET);
4889                     p += extent_size;
4890                 }
4891             }
4892             unlock_user(argptr, arg, target_size_out);
4893         }
4894     }
4895     if (free_fm) {
4896         g_free(fm);
4897     }
4898     return ret;
4899 }
4900 #endif
4901 
4902 static abi_long do_ioctl_ifconf(const IOCTLEntry *ie, uint8_t *buf_temp,
4903                                 int fd, int cmd, abi_long arg)
4904 {
4905     const argtype *arg_type = ie->arg_type;
4906     int target_size;
4907     void *argptr;
4908     int ret;
4909     struct ifconf *host_ifconf;
4910     uint32_t outbufsz;
4911     const argtype ifreq_arg_type[] = { MK_STRUCT(STRUCT_sockaddr_ifreq) };
4912     int target_ifreq_size;
4913     int nb_ifreq;
4914     int free_buf = 0;
4915     int i;
4916     int target_ifc_len;
4917     abi_long target_ifc_buf;
4918     int host_ifc_len;
4919     char *host_ifc_buf;
4920 
4921     assert(arg_type[0] == TYPE_PTR);
4922     assert(ie->access == IOC_RW);
4923 
4924     arg_type++;
4925     target_size = thunk_type_size(arg_type, 0);
4926 
4927     argptr = lock_user(VERIFY_READ, arg, target_size, 1);
4928     if (!argptr)
4929         return -TARGET_EFAULT;
4930     thunk_convert(buf_temp, argptr, arg_type, THUNK_HOST);
4931     unlock_user(argptr, arg, 0);
4932 
4933     host_ifconf = (struct ifconf *)(unsigned long)buf_temp;
4934     target_ifc_buf = (abi_long)(unsigned long)host_ifconf->ifc_buf;
4935     target_ifreq_size = thunk_type_size(ifreq_arg_type, 0);
4936 
4937     if (target_ifc_buf != 0) {
4938         target_ifc_len = host_ifconf->ifc_len;
4939         nb_ifreq = target_ifc_len / target_ifreq_size;
4940         host_ifc_len = nb_ifreq * sizeof(struct ifreq);
4941 
4942         outbufsz = sizeof(*host_ifconf) + host_ifc_len;
4943         if (outbufsz > MAX_STRUCT_SIZE) {
4944             /*
4945              * We can't fit all the extents into the fixed size buffer.
4946              * Allocate one that is large enough and use it instead.
4947              */
4948             host_ifconf = malloc(outbufsz);
4949             if (!host_ifconf) {
4950                 return -TARGET_ENOMEM;
4951             }
4952             memcpy(host_ifconf, buf_temp, sizeof(*host_ifconf));
4953             free_buf = 1;
4954         }
4955         host_ifc_buf = (char *)host_ifconf + sizeof(*host_ifconf);
4956 
4957         host_ifconf->ifc_len = host_ifc_len;
4958     } else {
4959       host_ifc_buf = NULL;
4960     }
4961     host_ifconf->ifc_buf = host_ifc_buf;
4962 
4963     ret = get_errno(safe_ioctl(fd, ie->host_cmd, host_ifconf));
4964     if (!is_error(ret)) {
4965 	/* convert host ifc_len to target ifc_len */
4966 
4967         nb_ifreq = host_ifconf->ifc_len / sizeof(struct ifreq);
4968         target_ifc_len = nb_ifreq * target_ifreq_size;
4969         host_ifconf->ifc_len = target_ifc_len;
4970 
4971 	/* restore target ifc_buf */
4972 
4973         host_ifconf->ifc_buf = (char *)(unsigned long)target_ifc_buf;
4974 
4975 	/* copy struct ifconf to target user */
4976 
4977         argptr = lock_user(VERIFY_WRITE, arg, target_size, 0);
4978         if (!argptr)
4979             return -TARGET_EFAULT;
4980         thunk_convert(argptr, host_ifconf, arg_type, THUNK_TARGET);
4981         unlock_user(argptr, arg, target_size);
4982 
4983         if (target_ifc_buf != 0) {
4984             /* copy ifreq[] to target user */
4985             argptr = lock_user(VERIFY_WRITE, target_ifc_buf, target_ifc_len, 0);
4986             for (i = 0; i < nb_ifreq ; i++) {
4987                 thunk_convert(argptr + i * target_ifreq_size,
4988                               host_ifc_buf + i * sizeof(struct ifreq),
4989                               ifreq_arg_type, THUNK_TARGET);
4990             }
4991             unlock_user(argptr, target_ifc_buf, target_ifc_len);
4992         }
4993     }
4994 
4995     if (free_buf) {
4996         free(host_ifconf);
4997     }
4998 
4999     return ret;
5000 }
5001 
5002 #if defined(CONFIG_USBFS)
5003 #if HOST_LONG_BITS > 64
5004 #error USBDEVFS thunks do not support >64 bit hosts yet.
5005 #endif
5006 struct live_urb {
5007     uint64_t target_urb_adr;
5008     uint64_t target_buf_adr;
5009     char *target_buf_ptr;
5010     struct usbdevfs_urb host_urb;
5011 };
5012 
5013 static GHashTable *usbdevfs_urb_hashtable(void)
5014 {
5015     static GHashTable *urb_hashtable;
5016 
5017     if (!urb_hashtable) {
5018         urb_hashtable = g_hash_table_new(g_int64_hash, g_int64_equal);
5019     }
5020     return urb_hashtable;
5021 }
5022 
5023 static void urb_hashtable_insert(struct live_urb *urb)
5024 {
5025     GHashTable *urb_hashtable = usbdevfs_urb_hashtable();
5026     g_hash_table_insert(urb_hashtable, urb, urb);
5027 }
5028 
5029 static struct live_urb *urb_hashtable_lookup(uint64_t target_urb_adr)
5030 {
5031     GHashTable *urb_hashtable = usbdevfs_urb_hashtable();
5032     return g_hash_table_lookup(urb_hashtable, &target_urb_adr);
5033 }
5034 
5035 static void urb_hashtable_remove(struct live_urb *urb)
5036 {
5037     GHashTable *urb_hashtable = usbdevfs_urb_hashtable();
5038     g_hash_table_remove(urb_hashtable, urb);
5039 }
5040 
5041 static abi_long
5042 do_ioctl_usbdevfs_reapurb(const IOCTLEntry *ie, uint8_t *buf_temp,
5043                           int fd, int cmd, abi_long arg)
5044 {
5045     const argtype usbfsurb_arg_type[] = { MK_STRUCT(STRUCT_usbdevfs_urb) };
5046     const argtype ptrvoid_arg_type[] = { TYPE_PTRVOID, 0, 0 };
5047     struct live_urb *lurb;
5048     void *argptr;
5049     uint64_t hurb;
5050     int target_size;
5051     uintptr_t target_urb_adr;
5052     abi_long ret;
5053 
5054     target_size = thunk_type_size(usbfsurb_arg_type, THUNK_TARGET);
5055 
5056     memset(buf_temp, 0, sizeof(uint64_t));
5057     ret = get_errno(safe_ioctl(fd, ie->host_cmd, buf_temp));
5058     if (is_error(ret)) {
5059         return ret;
5060     }
5061 
5062     memcpy(&hurb, buf_temp, sizeof(uint64_t));
5063     lurb = (void *)((uintptr_t)hurb - offsetof(struct live_urb, host_urb));
5064     if (!lurb->target_urb_adr) {
5065         return -TARGET_EFAULT;
5066     }
5067     urb_hashtable_remove(lurb);
5068     unlock_user(lurb->target_buf_ptr, lurb->target_buf_adr,
5069         lurb->host_urb.buffer_length);
5070     lurb->target_buf_ptr = NULL;
5071 
5072     /* restore the guest buffer pointer */
5073     lurb->host_urb.buffer = (void *)(uintptr_t)lurb->target_buf_adr;
5074 
5075     /* update the guest urb struct */
5076     argptr = lock_user(VERIFY_WRITE, lurb->target_urb_adr, target_size, 0);
5077     if (!argptr) {
5078         g_free(lurb);
5079         return -TARGET_EFAULT;
5080     }
5081     thunk_convert(argptr, &lurb->host_urb, usbfsurb_arg_type, THUNK_TARGET);
5082     unlock_user(argptr, lurb->target_urb_adr, target_size);
5083 
5084     target_size = thunk_type_size(ptrvoid_arg_type, THUNK_TARGET);
5085     /* write back the urb handle */
5086     argptr = lock_user(VERIFY_WRITE, arg, target_size, 0);
5087     if (!argptr) {
5088         g_free(lurb);
5089         return -TARGET_EFAULT;
5090     }
5091 
5092     /* GHashTable uses 64-bit keys but thunk_convert expects uintptr_t */
5093     target_urb_adr = lurb->target_urb_adr;
5094     thunk_convert(argptr, &target_urb_adr, ptrvoid_arg_type, THUNK_TARGET);
5095     unlock_user(argptr, arg, target_size);
5096 
5097     g_free(lurb);
5098     return ret;
5099 }
5100 
5101 static abi_long
5102 do_ioctl_usbdevfs_discardurb(const IOCTLEntry *ie,
5103                              uint8_t *buf_temp __attribute__((unused)),
5104                              int fd, int cmd, abi_long arg)
5105 {
5106     struct live_urb *lurb;
5107 
5108     /* map target address back to host URB with metadata. */
5109     lurb = urb_hashtable_lookup(arg);
5110     if (!lurb) {
5111         return -TARGET_EFAULT;
5112     }
5113     return get_errno(safe_ioctl(fd, ie->host_cmd, &lurb->host_urb));
5114 }
5115 
5116 static abi_long
5117 do_ioctl_usbdevfs_submiturb(const IOCTLEntry *ie, uint8_t *buf_temp,
5118                             int fd, int cmd, abi_long arg)
5119 {
5120     const argtype *arg_type = ie->arg_type;
5121     int target_size;
5122     abi_long ret;
5123     void *argptr;
5124     int rw_dir;
5125     struct live_urb *lurb;
5126 
5127     /*
5128      * each submitted URB needs to map to a unique ID for the
5129      * kernel, and that unique ID needs to be a pointer to
5130      * host memory.  hence, we need to malloc for each URB.
5131      * isochronous transfers have a variable length struct.
5132      */
5133     arg_type++;
5134     target_size = thunk_type_size(arg_type, THUNK_TARGET);
5135 
5136     /* construct host copy of urb and metadata */
5137     lurb = g_try_malloc0(sizeof(struct live_urb));
5138     if (!lurb) {
5139         return -TARGET_ENOMEM;
5140     }
5141 
5142     argptr = lock_user(VERIFY_READ, arg, target_size, 1);
5143     if (!argptr) {
5144         g_free(lurb);
5145         return -TARGET_EFAULT;
5146     }
5147     thunk_convert(&lurb->host_urb, argptr, arg_type, THUNK_HOST);
5148     unlock_user(argptr, arg, 0);
5149 
5150     lurb->target_urb_adr = arg;
5151     lurb->target_buf_adr = (uintptr_t)lurb->host_urb.buffer;
5152 
5153     /* buffer space used depends on endpoint type so lock the entire buffer */
5154     /* control type urbs should check the buffer contents for true direction */
5155     rw_dir = lurb->host_urb.endpoint & USB_DIR_IN ? VERIFY_WRITE : VERIFY_READ;
5156     lurb->target_buf_ptr = lock_user(rw_dir, lurb->target_buf_adr,
5157         lurb->host_urb.buffer_length, 1);
5158     if (lurb->target_buf_ptr == NULL) {
5159         g_free(lurb);
5160         return -TARGET_EFAULT;
5161     }
5162 
5163     /* update buffer pointer in host copy */
5164     lurb->host_urb.buffer = lurb->target_buf_ptr;
5165 
5166     ret = get_errno(safe_ioctl(fd, ie->host_cmd, &lurb->host_urb));
5167     if (is_error(ret)) {
5168         unlock_user(lurb->target_buf_ptr, lurb->target_buf_adr, 0);
5169         g_free(lurb);
5170     } else {
5171         urb_hashtable_insert(lurb);
5172     }
5173 
5174     return ret;
5175 }
5176 #endif /* CONFIG_USBFS */
5177 
5178 static abi_long do_ioctl_dm(const IOCTLEntry *ie, uint8_t *buf_temp, int fd,
5179                             int cmd, abi_long arg)
5180 {
5181     void *argptr;
5182     struct dm_ioctl *host_dm;
5183     abi_long guest_data;
5184     uint32_t guest_data_size;
5185     int target_size;
5186     const argtype *arg_type = ie->arg_type;
5187     abi_long ret;
5188     void *big_buf = NULL;
5189     char *host_data;
5190 
5191     arg_type++;
5192     target_size = thunk_type_size(arg_type, 0);
5193     argptr = lock_user(VERIFY_READ, arg, target_size, 1);
5194     if (!argptr) {
5195         ret = -TARGET_EFAULT;
5196         goto out;
5197     }
5198     thunk_convert(buf_temp, argptr, arg_type, THUNK_HOST);
5199     unlock_user(argptr, arg, 0);
5200 
5201     /* buf_temp is too small, so fetch things into a bigger buffer */
5202     big_buf = g_malloc0(((struct dm_ioctl*)buf_temp)->data_size * 2);
5203     memcpy(big_buf, buf_temp, target_size);
5204     buf_temp = big_buf;
5205     host_dm = big_buf;
5206 
5207     guest_data = arg + host_dm->data_start;
5208     if ((guest_data - arg) < 0) {
5209         ret = -TARGET_EINVAL;
5210         goto out;
5211     }
5212     guest_data_size = host_dm->data_size - host_dm->data_start;
5213     host_data = (char*)host_dm + host_dm->data_start;
5214 
5215     argptr = lock_user(VERIFY_READ, guest_data, guest_data_size, 1);
5216     if (!argptr) {
5217         ret = -TARGET_EFAULT;
5218         goto out;
5219     }
5220 
5221     switch (ie->host_cmd) {
5222     case DM_REMOVE_ALL:
5223     case DM_LIST_DEVICES:
5224     case DM_DEV_CREATE:
5225     case DM_DEV_REMOVE:
5226     case DM_DEV_SUSPEND:
5227     case DM_DEV_STATUS:
5228     case DM_DEV_WAIT:
5229     case DM_TABLE_STATUS:
5230     case DM_TABLE_CLEAR:
5231     case DM_TABLE_DEPS:
5232     case DM_LIST_VERSIONS:
5233         /* no input data */
5234         break;
5235     case DM_DEV_RENAME:
5236     case DM_DEV_SET_GEOMETRY:
5237         /* data contains only strings */
5238         memcpy(host_data, argptr, guest_data_size);
5239         break;
5240     case DM_TARGET_MSG:
5241         memcpy(host_data, argptr, guest_data_size);
5242         *(uint64_t*)host_data = tswap64(*(uint64_t*)argptr);
5243         break;
5244     case DM_TABLE_LOAD:
5245     {
5246         void *gspec = argptr;
5247         void *cur_data = host_data;
5248         const argtype arg_type[] = { MK_STRUCT(STRUCT_dm_target_spec) };
5249         int spec_size = thunk_type_size(arg_type, 0);
5250         int i;
5251 
5252         for (i = 0; i < host_dm->target_count; i++) {
5253             struct dm_target_spec *spec = cur_data;
5254             uint32_t next;
5255             int slen;
5256 
5257             thunk_convert(spec, gspec, arg_type, THUNK_HOST);
5258             slen = strlen((char*)gspec + spec_size) + 1;
5259             next = spec->next;
5260             spec->next = sizeof(*spec) + slen;
5261             strcpy((char*)&spec[1], gspec + spec_size);
5262             gspec += next;
5263             cur_data += spec->next;
5264         }
5265         break;
5266     }
5267     default:
5268         ret = -TARGET_EINVAL;
5269         unlock_user(argptr, guest_data, 0);
5270         goto out;
5271     }
5272     unlock_user(argptr, guest_data, 0);
5273 
5274     ret = get_errno(safe_ioctl(fd, ie->host_cmd, buf_temp));
5275     if (!is_error(ret)) {
5276         guest_data = arg + host_dm->data_start;
5277         guest_data_size = host_dm->data_size - host_dm->data_start;
5278         argptr = lock_user(VERIFY_WRITE, guest_data, guest_data_size, 0);
5279         switch (ie->host_cmd) {
5280         case DM_REMOVE_ALL:
5281         case DM_DEV_CREATE:
5282         case DM_DEV_REMOVE:
5283         case DM_DEV_RENAME:
5284         case DM_DEV_SUSPEND:
5285         case DM_DEV_STATUS:
5286         case DM_TABLE_LOAD:
5287         case DM_TABLE_CLEAR:
5288         case DM_TARGET_MSG:
5289         case DM_DEV_SET_GEOMETRY:
5290             /* no return data */
5291             break;
5292         case DM_LIST_DEVICES:
5293         {
5294             struct dm_name_list *nl = (void*)host_dm + host_dm->data_start;
5295             uint32_t remaining_data = guest_data_size;
5296             void *cur_data = argptr;
5297             const argtype arg_type[] = { MK_STRUCT(STRUCT_dm_name_list) };
5298             int nl_size = 12; /* can't use thunk_size due to alignment */
5299 
5300             while (1) {
5301                 uint32_t next = nl->next;
5302                 if (next) {
5303                     nl->next = nl_size + (strlen(nl->name) + 1);
5304                 }
5305                 if (remaining_data < nl->next) {
5306                     host_dm->flags |= DM_BUFFER_FULL_FLAG;
5307                     break;
5308                 }
5309                 thunk_convert(cur_data, nl, arg_type, THUNK_TARGET);
5310                 strcpy(cur_data + nl_size, nl->name);
5311                 cur_data += nl->next;
5312                 remaining_data -= nl->next;
5313                 if (!next) {
5314                     break;
5315                 }
5316                 nl = (void*)nl + next;
5317             }
5318             break;
5319         }
5320         case DM_DEV_WAIT:
5321         case DM_TABLE_STATUS:
5322         {
5323             struct dm_target_spec *spec = (void*)host_dm + host_dm->data_start;
5324             void *cur_data = argptr;
5325             const argtype arg_type[] = { MK_STRUCT(STRUCT_dm_target_spec) };
5326             int spec_size = thunk_type_size(arg_type, 0);
5327             int i;
5328 
5329             for (i = 0; i < host_dm->target_count; i++) {
5330                 uint32_t next = spec->next;
5331                 int slen = strlen((char*)&spec[1]) + 1;
5332                 spec->next = (cur_data - argptr) + spec_size + slen;
5333                 if (guest_data_size < spec->next) {
5334                     host_dm->flags |= DM_BUFFER_FULL_FLAG;
5335                     break;
5336                 }
5337                 thunk_convert(cur_data, spec, arg_type, THUNK_TARGET);
5338                 strcpy(cur_data + spec_size, (char*)&spec[1]);
5339                 cur_data = argptr + spec->next;
5340                 spec = (void*)host_dm + host_dm->data_start + next;
5341             }
5342             break;
5343         }
5344         case DM_TABLE_DEPS:
5345         {
5346             void *hdata = (void*)host_dm + host_dm->data_start;
5347             int count = *(uint32_t*)hdata;
5348             uint64_t *hdev = hdata + 8;
5349             uint64_t *gdev = argptr + 8;
5350             int i;
5351 
5352             *(uint32_t*)argptr = tswap32(count);
5353             for (i = 0; i < count; i++) {
5354                 *gdev = tswap64(*hdev);
5355                 gdev++;
5356                 hdev++;
5357             }
5358             break;
5359         }
5360         case DM_LIST_VERSIONS:
5361         {
5362             struct dm_target_versions *vers = (void*)host_dm + host_dm->data_start;
5363             uint32_t remaining_data = guest_data_size;
5364             void *cur_data = argptr;
5365             const argtype arg_type[] = { MK_STRUCT(STRUCT_dm_target_versions) };
5366             int vers_size = thunk_type_size(arg_type, 0);
5367 
5368             while (1) {
5369                 uint32_t next = vers->next;
5370                 if (next) {
5371                     vers->next = vers_size + (strlen(vers->name) + 1);
5372                 }
5373                 if (remaining_data < vers->next) {
5374                     host_dm->flags |= DM_BUFFER_FULL_FLAG;
5375                     break;
5376                 }
5377                 thunk_convert(cur_data, vers, arg_type, THUNK_TARGET);
5378                 strcpy(cur_data + vers_size, vers->name);
5379                 cur_data += vers->next;
5380                 remaining_data -= vers->next;
5381                 if (!next) {
5382                     break;
5383                 }
5384                 vers = (void*)vers + next;
5385             }
5386             break;
5387         }
5388         default:
5389             unlock_user(argptr, guest_data, 0);
5390             ret = -TARGET_EINVAL;
5391             goto out;
5392         }
5393         unlock_user(argptr, guest_data, guest_data_size);
5394 
5395         argptr = lock_user(VERIFY_WRITE, arg, target_size, 0);
5396         if (!argptr) {
5397             ret = -TARGET_EFAULT;
5398             goto out;
5399         }
5400         thunk_convert(argptr, buf_temp, arg_type, THUNK_TARGET);
5401         unlock_user(argptr, arg, target_size);
5402     }
5403 out:
5404     g_free(big_buf);
5405     return ret;
5406 }
5407 
5408 static abi_long do_ioctl_blkpg(const IOCTLEntry *ie, uint8_t *buf_temp, int fd,
5409                                int cmd, abi_long arg)
5410 {
5411     void *argptr;
5412     int target_size;
5413     const argtype *arg_type = ie->arg_type;
5414     const argtype part_arg_type[] = { MK_STRUCT(STRUCT_blkpg_partition) };
5415     abi_long ret;
5416 
5417     struct blkpg_ioctl_arg *host_blkpg = (void*)buf_temp;
5418     struct blkpg_partition host_part;
5419 
5420     /* Read and convert blkpg */
5421     arg_type++;
5422     target_size = thunk_type_size(arg_type, 0);
5423     argptr = lock_user(VERIFY_READ, arg, target_size, 1);
5424     if (!argptr) {
5425         ret = -TARGET_EFAULT;
5426         goto out;
5427     }
5428     thunk_convert(buf_temp, argptr, arg_type, THUNK_HOST);
5429     unlock_user(argptr, arg, 0);
5430 
5431     switch (host_blkpg->op) {
5432     case BLKPG_ADD_PARTITION:
5433     case BLKPG_DEL_PARTITION:
5434         /* payload is struct blkpg_partition */
5435         break;
5436     default:
5437         /* Unknown opcode */
5438         ret = -TARGET_EINVAL;
5439         goto out;
5440     }
5441 
5442     /* Read and convert blkpg->data */
5443     arg = (abi_long)(uintptr_t)host_blkpg->data;
5444     target_size = thunk_type_size(part_arg_type, 0);
5445     argptr = lock_user(VERIFY_READ, arg, target_size, 1);
5446     if (!argptr) {
5447         ret = -TARGET_EFAULT;
5448         goto out;
5449     }
5450     thunk_convert(&host_part, argptr, part_arg_type, THUNK_HOST);
5451     unlock_user(argptr, arg, 0);
5452 
5453     /* Swizzle the data pointer to our local copy and call! */
5454     host_blkpg->data = &host_part;
5455     ret = get_errno(safe_ioctl(fd, ie->host_cmd, host_blkpg));
5456 
5457 out:
5458     return ret;
5459 }
5460 
5461 static abi_long do_ioctl_rt(const IOCTLEntry *ie, uint8_t *buf_temp,
5462                                 int fd, int cmd, abi_long arg)
5463 {
5464     const argtype *arg_type = ie->arg_type;
5465     const StructEntry *se;
5466     const argtype *field_types;
5467     const int *dst_offsets, *src_offsets;
5468     int target_size;
5469     void *argptr;
5470     abi_ulong *target_rt_dev_ptr = NULL;
5471     unsigned long *host_rt_dev_ptr = NULL;
5472     abi_long ret;
5473     int i;
5474 
5475     assert(ie->access == IOC_W);
5476     assert(*arg_type == TYPE_PTR);
5477     arg_type++;
5478     assert(*arg_type == TYPE_STRUCT);
5479     target_size = thunk_type_size(arg_type, 0);
5480     argptr = lock_user(VERIFY_READ, arg, target_size, 1);
5481     if (!argptr) {
5482         return -TARGET_EFAULT;
5483     }
5484     arg_type++;
5485     assert(*arg_type == (int)STRUCT_rtentry);
5486     se = struct_entries + *arg_type++;
5487     assert(se->convert[0] == NULL);
5488     /* convert struct here to be able to catch rt_dev string */
5489     field_types = se->field_types;
5490     dst_offsets = se->field_offsets[THUNK_HOST];
5491     src_offsets = se->field_offsets[THUNK_TARGET];
5492     for (i = 0; i < se->nb_fields; i++) {
5493         if (dst_offsets[i] == offsetof(struct rtentry, rt_dev)) {
5494             assert(*field_types == TYPE_PTRVOID);
5495             target_rt_dev_ptr = (abi_ulong *)(argptr + src_offsets[i]);
5496             host_rt_dev_ptr = (unsigned long *)(buf_temp + dst_offsets[i]);
5497             if (*target_rt_dev_ptr != 0) {
5498                 *host_rt_dev_ptr = (unsigned long)lock_user_string(
5499                                                   tswapal(*target_rt_dev_ptr));
5500                 if (!*host_rt_dev_ptr) {
5501                     unlock_user(argptr, arg, 0);
5502                     return -TARGET_EFAULT;
5503                 }
5504             } else {
5505                 *host_rt_dev_ptr = 0;
5506             }
5507             field_types++;
5508             continue;
5509         }
5510         field_types = thunk_convert(buf_temp + dst_offsets[i],
5511                                     argptr + src_offsets[i],
5512                                     field_types, THUNK_HOST);
5513     }
5514     unlock_user(argptr, arg, 0);
5515 
5516     ret = get_errno(safe_ioctl(fd, ie->host_cmd, buf_temp));
5517 
5518     assert(host_rt_dev_ptr != NULL);
5519     assert(target_rt_dev_ptr != NULL);
5520     if (*host_rt_dev_ptr != 0) {
5521         unlock_user((void *)*host_rt_dev_ptr,
5522                     *target_rt_dev_ptr, 0);
5523     }
5524     return ret;
5525 }
5526 
5527 static abi_long do_ioctl_kdsigaccept(const IOCTLEntry *ie, uint8_t *buf_temp,
5528                                      int fd, int cmd, abi_long arg)
5529 {
5530     int sig = target_to_host_signal(arg);
5531     return get_errno(safe_ioctl(fd, ie->host_cmd, sig));
5532 }
5533 
5534 static abi_long do_ioctl_SIOCGSTAMP(const IOCTLEntry *ie, uint8_t *buf_temp,
5535                                     int fd, int cmd, abi_long arg)
5536 {
5537     struct timeval tv;
5538     abi_long ret;
5539 
5540     ret = get_errno(safe_ioctl(fd, SIOCGSTAMP, &tv));
5541     if (is_error(ret)) {
5542         return ret;
5543     }
5544 
5545     if (cmd == (int)TARGET_SIOCGSTAMP_OLD) {
5546         if (copy_to_user_timeval(arg, &tv)) {
5547             return -TARGET_EFAULT;
5548         }
5549     } else {
5550         if (copy_to_user_timeval64(arg, &tv)) {
5551             return -TARGET_EFAULT;
5552         }
5553     }
5554 
5555     return ret;
5556 }
5557 
5558 static abi_long do_ioctl_SIOCGSTAMPNS(const IOCTLEntry *ie, uint8_t *buf_temp,
5559                                       int fd, int cmd, abi_long arg)
5560 {
5561     struct timespec ts;
5562     abi_long ret;
5563 
5564     ret = get_errno(safe_ioctl(fd, SIOCGSTAMPNS, &ts));
5565     if (is_error(ret)) {
5566         return ret;
5567     }
5568 
5569     if (cmd == (int)TARGET_SIOCGSTAMPNS_OLD) {
5570         if (host_to_target_timespec(arg, &ts)) {
5571             return -TARGET_EFAULT;
5572         }
5573     } else{
5574         if (host_to_target_timespec64(arg, &ts)) {
5575             return -TARGET_EFAULT;
5576         }
5577     }
5578 
5579     return ret;
5580 }
5581 
5582 #ifdef TIOCGPTPEER
5583 static abi_long do_ioctl_tiocgptpeer(const IOCTLEntry *ie, uint8_t *buf_temp,
5584                                      int fd, int cmd, abi_long arg)
5585 {
5586     int flags = target_to_host_bitmask(arg, fcntl_flags_tbl);
5587     return get_errno(safe_ioctl(fd, ie->host_cmd, flags));
5588 }
5589 #endif
5590 
5591 #ifdef HAVE_DRM_H
5592 
5593 static void unlock_drm_version(struct drm_version *host_ver,
5594                                struct target_drm_version *target_ver,
5595                                bool copy)
5596 {
5597     unlock_user(host_ver->name, target_ver->name,
5598                                 copy ? host_ver->name_len : 0);
5599     unlock_user(host_ver->date, target_ver->date,
5600                                 copy ? host_ver->date_len : 0);
5601     unlock_user(host_ver->desc, target_ver->desc,
5602                                 copy ? host_ver->desc_len : 0);
5603 }
5604 
5605 static inline abi_long target_to_host_drmversion(struct drm_version *host_ver,
5606                                           struct target_drm_version *target_ver)
5607 {
5608     memset(host_ver, 0, sizeof(*host_ver));
5609 
5610     __get_user(host_ver->name_len, &target_ver->name_len);
5611     if (host_ver->name_len) {
5612         host_ver->name = lock_user(VERIFY_WRITE, target_ver->name,
5613                                    target_ver->name_len, 0);
5614         if (!host_ver->name) {
5615             return -EFAULT;
5616         }
5617     }
5618 
5619     __get_user(host_ver->date_len, &target_ver->date_len);
5620     if (host_ver->date_len) {
5621         host_ver->date = lock_user(VERIFY_WRITE, target_ver->date,
5622                                    target_ver->date_len, 0);
5623         if (!host_ver->date) {
5624             goto err;
5625         }
5626     }
5627 
5628     __get_user(host_ver->desc_len, &target_ver->desc_len);
5629     if (host_ver->desc_len) {
5630         host_ver->desc = lock_user(VERIFY_WRITE, target_ver->desc,
5631                                    target_ver->desc_len, 0);
5632         if (!host_ver->desc) {
5633             goto err;
5634         }
5635     }
5636 
5637     return 0;
5638 err:
5639     unlock_drm_version(host_ver, target_ver, false);
5640     return -EFAULT;
5641 }
5642 
5643 static inline void host_to_target_drmversion(
5644                                           struct target_drm_version *target_ver,
5645                                           struct drm_version *host_ver)
5646 {
5647     __put_user(host_ver->version_major, &target_ver->version_major);
5648     __put_user(host_ver->version_minor, &target_ver->version_minor);
5649     __put_user(host_ver->version_patchlevel, &target_ver->version_patchlevel);
5650     __put_user(host_ver->name_len, &target_ver->name_len);
5651     __put_user(host_ver->date_len, &target_ver->date_len);
5652     __put_user(host_ver->desc_len, &target_ver->desc_len);
5653     unlock_drm_version(host_ver, target_ver, true);
5654 }
5655 
5656 static abi_long do_ioctl_drm(const IOCTLEntry *ie, uint8_t *buf_temp,
5657                              int fd, int cmd, abi_long arg)
5658 {
5659     struct drm_version *ver;
5660     struct target_drm_version *target_ver;
5661     abi_long ret;
5662 
5663     switch (ie->host_cmd) {
5664     case DRM_IOCTL_VERSION:
5665         if (!lock_user_struct(VERIFY_WRITE, target_ver, arg, 0)) {
5666             return -TARGET_EFAULT;
5667         }
5668         ver = (struct drm_version *)buf_temp;
5669         ret = target_to_host_drmversion(ver, target_ver);
5670         if (!is_error(ret)) {
5671             ret = get_errno(safe_ioctl(fd, ie->host_cmd, ver));
5672             if (is_error(ret)) {
5673                 unlock_drm_version(ver, target_ver, false);
5674             } else {
5675                 host_to_target_drmversion(target_ver, ver);
5676             }
5677         }
5678         unlock_user_struct(target_ver, arg, 0);
5679         return ret;
5680     }
5681     return -TARGET_ENOSYS;
5682 }
5683 
5684 static abi_long do_ioctl_drm_i915_getparam(const IOCTLEntry *ie,
5685                                            struct drm_i915_getparam *gparam,
5686                                            int fd, abi_long arg)
5687 {
5688     abi_long ret;
5689     int value;
5690     struct target_drm_i915_getparam *target_gparam;
5691 
5692     if (!lock_user_struct(VERIFY_READ, target_gparam, arg, 0)) {
5693         return -TARGET_EFAULT;
5694     }
5695 
5696     __get_user(gparam->param, &target_gparam->param);
5697     gparam->value = &value;
5698     ret = get_errno(safe_ioctl(fd, ie->host_cmd, gparam));
5699     put_user_s32(value, target_gparam->value);
5700 
5701     unlock_user_struct(target_gparam, arg, 0);
5702     return ret;
5703 }
5704 
5705 static abi_long do_ioctl_drm_i915(const IOCTLEntry *ie, uint8_t *buf_temp,
5706                                   int fd, int cmd, abi_long arg)
5707 {
5708     switch (ie->host_cmd) {
5709     case DRM_IOCTL_I915_GETPARAM:
5710         return do_ioctl_drm_i915_getparam(ie,
5711                                           (struct drm_i915_getparam *)buf_temp,
5712                                           fd, arg);
5713     default:
5714         return -TARGET_ENOSYS;
5715     }
5716 }
5717 
5718 #endif
5719 
5720 static abi_long do_ioctl_TUNSETTXFILTER(const IOCTLEntry *ie, uint8_t *buf_temp,
5721                                         int fd, int cmd, abi_long arg)
5722 {
5723     struct tun_filter *filter = (struct tun_filter *)buf_temp;
5724     struct tun_filter *target_filter;
5725     char *target_addr;
5726 
5727     assert(ie->access == IOC_W);
5728 
5729     target_filter = lock_user(VERIFY_READ, arg, sizeof(*target_filter), 1);
5730     if (!target_filter) {
5731         return -TARGET_EFAULT;
5732     }
5733     filter->flags = tswap16(target_filter->flags);
5734     filter->count = tswap16(target_filter->count);
5735     unlock_user(target_filter, arg, 0);
5736 
5737     if (filter->count) {
5738         if (offsetof(struct tun_filter, addr) + filter->count * ETH_ALEN >
5739             MAX_STRUCT_SIZE) {
5740             return -TARGET_EFAULT;
5741         }
5742 
5743         target_addr = lock_user(VERIFY_READ,
5744                                 arg + offsetof(struct tun_filter, addr),
5745                                 filter->count * ETH_ALEN, 1);
5746         if (!target_addr) {
5747             return -TARGET_EFAULT;
5748         }
5749         memcpy(filter->addr, target_addr, filter->count * ETH_ALEN);
5750         unlock_user(target_addr, arg + offsetof(struct tun_filter, addr), 0);
5751     }
5752 
5753     return get_errno(safe_ioctl(fd, ie->host_cmd, filter));
5754 }
5755 
5756 IOCTLEntry ioctl_entries[] = {
5757 #define IOCTL(cmd, access, ...) \
5758     { TARGET_ ## cmd, cmd, #cmd, access, 0, {  __VA_ARGS__ } },
5759 #define IOCTL_SPECIAL(cmd, access, dofn, ...)                      \
5760     { TARGET_ ## cmd, cmd, #cmd, access, dofn, {  __VA_ARGS__ } },
5761 #define IOCTL_IGNORE(cmd) \
5762     { TARGET_ ## cmd, 0, #cmd },
5763 #include "ioctls.h"
5764     { 0, 0, },
5765 };
5766 
5767 /* ??? Implement proper locking for ioctls.  */
5768 /* do_ioctl() Must return target values and target errnos. */
5769 static abi_long do_ioctl(int fd, int cmd, abi_long arg)
5770 {
5771     const IOCTLEntry *ie;
5772     const argtype *arg_type;
5773     abi_long ret;
5774     uint8_t buf_temp[MAX_STRUCT_SIZE];
5775     int target_size;
5776     void *argptr;
5777 
5778     ie = ioctl_entries;
5779     for(;;) {
5780         if (ie->target_cmd == 0) {
5781             qemu_log_mask(
5782                 LOG_UNIMP, "Unsupported ioctl: cmd=0x%04lx\n", (long)cmd);
5783             return -TARGET_ENOSYS;
5784         }
5785         if (ie->target_cmd == cmd)
5786             break;
5787         ie++;
5788     }
5789     arg_type = ie->arg_type;
5790     if (ie->do_ioctl) {
5791         return ie->do_ioctl(ie, buf_temp, fd, cmd, arg);
5792     } else if (!ie->host_cmd) {
5793         /* Some architectures define BSD ioctls in their headers
5794            that are not implemented in Linux.  */
5795         return -TARGET_ENOSYS;
5796     }
5797 
5798     switch(arg_type[0]) {
5799     case TYPE_NULL:
5800         /* no argument */
5801         ret = get_errno(safe_ioctl(fd, ie->host_cmd));
5802         break;
5803     case TYPE_PTRVOID:
5804     case TYPE_INT:
5805     case TYPE_LONG:
5806     case TYPE_ULONG:
5807         ret = get_errno(safe_ioctl(fd, ie->host_cmd, arg));
5808         break;
5809     case TYPE_PTR:
5810         arg_type++;
5811         target_size = thunk_type_size(arg_type, 0);
5812         switch(ie->access) {
5813         case IOC_R:
5814             ret = get_errno(safe_ioctl(fd, ie->host_cmd, buf_temp));
5815             if (!is_error(ret)) {
5816                 argptr = lock_user(VERIFY_WRITE, arg, target_size, 0);
5817                 if (!argptr)
5818                     return -TARGET_EFAULT;
5819                 thunk_convert(argptr, buf_temp, arg_type, THUNK_TARGET);
5820                 unlock_user(argptr, arg, target_size);
5821             }
5822             break;
5823         case IOC_W:
5824             argptr = lock_user(VERIFY_READ, arg, target_size, 1);
5825             if (!argptr)
5826                 return -TARGET_EFAULT;
5827             thunk_convert(buf_temp, argptr, arg_type, THUNK_HOST);
5828             unlock_user(argptr, arg, 0);
5829             ret = get_errno(safe_ioctl(fd, ie->host_cmd, buf_temp));
5830             break;
5831         default:
5832         case IOC_RW:
5833             argptr = lock_user(VERIFY_READ, arg, target_size, 1);
5834             if (!argptr)
5835                 return -TARGET_EFAULT;
5836             thunk_convert(buf_temp, argptr, arg_type, THUNK_HOST);
5837             unlock_user(argptr, arg, 0);
5838             ret = get_errno(safe_ioctl(fd, ie->host_cmd, buf_temp));
5839             if (!is_error(ret)) {
5840                 argptr = lock_user(VERIFY_WRITE, arg, target_size, 0);
5841                 if (!argptr)
5842                     return -TARGET_EFAULT;
5843                 thunk_convert(argptr, buf_temp, arg_type, THUNK_TARGET);
5844                 unlock_user(argptr, arg, target_size);
5845             }
5846             break;
5847         }
5848         break;
5849     default:
5850         qemu_log_mask(LOG_UNIMP,
5851                       "Unsupported ioctl type: cmd=0x%04lx type=%d\n",
5852                       (long)cmd, arg_type[0]);
5853         ret = -TARGET_ENOSYS;
5854         break;
5855     }
5856     return ret;
5857 }
5858 
5859 static const bitmask_transtbl iflag_tbl[] = {
5860         { TARGET_IGNBRK, TARGET_IGNBRK, IGNBRK, IGNBRK },
5861         { TARGET_BRKINT, TARGET_BRKINT, BRKINT, BRKINT },
5862         { TARGET_IGNPAR, TARGET_IGNPAR, IGNPAR, IGNPAR },
5863         { TARGET_PARMRK, TARGET_PARMRK, PARMRK, PARMRK },
5864         { TARGET_INPCK, TARGET_INPCK, INPCK, INPCK },
5865         { TARGET_ISTRIP, TARGET_ISTRIP, ISTRIP, ISTRIP },
5866         { TARGET_INLCR, TARGET_INLCR, INLCR, INLCR },
5867         { TARGET_IGNCR, TARGET_IGNCR, IGNCR, IGNCR },
5868         { TARGET_ICRNL, TARGET_ICRNL, ICRNL, ICRNL },
5869         { TARGET_IUCLC, TARGET_IUCLC, IUCLC, IUCLC },
5870         { TARGET_IXON, TARGET_IXON, IXON, IXON },
5871         { TARGET_IXANY, TARGET_IXANY, IXANY, IXANY },
5872         { TARGET_IXOFF, TARGET_IXOFF, IXOFF, IXOFF },
5873         { TARGET_IMAXBEL, TARGET_IMAXBEL, IMAXBEL, IMAXBEL },
5874         { TARGET_IUTF8, TARGET_IUTF8, IUTF8, IUTF8},
5875         { 0, 0, 0, 0 }
5876 };
5877 
5878 static const bitmask_transtbl oflag_tbl[] = {
5879 	{ TARGET_OPOST, TARGET_OPOST, OPOST, OPOST },
5880 	{ TARGET_OLCUC, TARGET_OLCUC, OLCUC, OLCUC },
5881 	{ TARGET_ONLCR, TARGET_ONLCR, ONLCR, ONLCR },
5882 	{ TARGET_OCRNL, TARGET_OCRNL, OCRNL, OCRNL },
5883 	{ TARGET_ONOCR, TARGET_ONOCR, ONOCR, ONOCR },
5884 	{ TARGET_ONLRET, TARGET_ONLRET, ONLRET, ONLRET },
5885 	{ TARGET_OFILL, TARGET_OFILL, OFILL, OFILL },
5886 	{ TARGET_OFDEL, TARGET_OFDEL, OFDEL, OFDEL },
5887 	{ TARGET_NLDLY, TARGET_NL0, NLDLY, NL0 },
5888 	{ TARGET_NLDLY, TARGET_NL1, NLDLY, NL1 },
5889 	{ TARGET_CRDLY, TARGET_CR0, CRDLY, CR0 },
5890 	{ TARGET_CRDLY, TARGET_CR1, CRDLY, CR1 },
5891 	{ TARGET_CRDLY, TARGET_CR2, CRDLY, CR2 },
5892 	{ TARGET_CRDLY, TARGET_CR3, CRDLY, CR3 },
5893 	{ TARGET_TABDLY, TARGET_TAB0, TABDLY, TAB0 },
5894 	{ TARGET_TABDLY, TARGET_TAB1, TABDLY, TAB1 },
5895 	{ TARGET_TABDLY, TARGET_TAB2, TABDLY, TAB2 },
5896 	{ TARGET_TABDLY, TARGET_TAB3, TABDLY, TAB3 },
5897 	{ TARGET_BSDLY, TARGET_BS0, BSDLY, BS0 },
5898 	{ TARGET_BSDLY, TARGET_BS1, BSDLY, BS1 },
5899 	{ TARGET_VTDLY, TARGET_VT0, VTDLY, VT0 },
5900 	{ TARGET_VTDLY, TARGET_VT1, VTDLY, VT1 },
5901 	{ TARGET_FFDLY, TARGET_FF0, FFDLY, FF0 },
5902 	{ TARGET_FFDLY, TARGET_FF1, FFDLY, FF1 },
5903 	{ 0, 0, 0, 0 }
5904 };
5905 
5906 static const bitmask_transtbl cflag_tbl[] = {
5907 	{ TARGET_CBAUD, TARGET_B0, CBAUD, B0 },
5908 	{ TARGET_CBAUD, TARGET_B50, CBAUD, B50 },
5909 	{ TARGET_CBAUD, TARGET_B75, CBAUD, B75 },
5910 	{ TARGET_CBAUD, TARGET_B110, CBAUD, B110 },
5911 	{ TARGET_CBAUD, TARGET_B134, CBAUD, B134 },
5912 	{ TARGET_CBAUD, TARGET_B150, CBAUD, B150 },
5913 	{ TARGET_CBAUD, TARGET_B200, CBAUD, B200 },
5914 	{ TARGET_CBAUD, TARGET_B300, CBAUD, B300 },
5915 	{ TARGET_CBAUD, TARGET_B600, CBAUD, B600 },
5916 	{ TARGET_CBAUD, TARGET_B1200, CBAUD, B1200 },
5917 	{ TARGET_CBAUD, TARGET_B1800, CBAUD, B1800 },
5918 	{ TARGET_CBAUD, TARGET_B2400, CBAUD, B2400 },
5919 	{ TARGET_CBAUD, TARGET_B4800, CBAUD, B4800 },
5920 	{ TARGET_CBAUD, TARGET_B9600, CBAUD, B9600 },
5921 	{ TARGET_CBAUD, TARGET_B19200, CBAUD, B19200 },
5922 	{ TARGET_CBAUD, TARGET_B38400, CBAUD, B38400 },
5923 	{ TARGET_CBAUD, TARGET_B57600, CBAUD, B57600 },
5924 	{ TARGET_CBAUD, TARGET_B115200, CBAUD, B115200 },
5925 	{ TARGET_CBAUD, TARGET_B230400, CBAUD, B230400 },
5926 	{ TARGET_CBAUD, TARGET_B460800, CBAUD, B460800 },
5927 	{ TARGET_CSIZE, TARGET_CS5, CSIZE, CS5 },
5928 	{ TARGET_CSIZE, TARGET_CS6, CSIZE, CS6 },
5929 	{ TARGET_CSIZE, TARGET_CS7, CSIZE, CS7 },
5930 	{ TARGET_CSIZE, TARGET_CS8, CSIZE, CS8 },
5931 	{ TARGET_CSTOPB, TARGET_CSTOPB, CSTOPB, CSTOPB },
5932 	{ TARGET_CREAD, TARGET_CREAD, CREAD, CREAD },
5933 	{ TARGET_PARENB, TARGET_PARENB, PARENB, PARENB },
5934 	{ TARGET_PARODD, TARGET_PARODD, PARODD, PARODD },
5935 	{ TARGET_HUPCL, TARGET_HUPCL, HUPCL, HUPCL },
5936 	{ TARGET_CLOCAL, TARGET_CLOCAL, CLOCAL, CLOCAL },
5937 	{ TARGET_CRTSCTS, TARGET_CRTSCTS, CRTSCTS, CRTSCTS },
5938 	{ 0, 0, 0, 0 }
5939 };
5940 
5941 static const bitmask_transtbl lflag_tbl[] = {
5942   { TARGET_ISIG, TARGET_ISIG, ISIG, ISIG },
5943   { TARGET_ICANON, TARGET_ICANON, ICANON, ICANON },
5944   { TARGET_XCASE, TARGET_XCASE, XCASE, XCASE },
5945   { TARGET_ECHO, TARGET_ECHO, ECHO, ECHO },
5946   { TARGET_ECHOE, TARGET_ECHOE, ECHOE, ECHOE },
5947   { TARGET_ECHOK, TARGET_ECHOK, ECHOK, ECHOK },
5948   { TARGET_ECHONL, TARGET_ECHONL, ECHONL, ECHONL },
5949   { TARGET_NOFLSH, TARGET_NOFLSH, NOFLSH, NOFLSH },
5950   { TARGET_TOSTOP, TARGET_TOSTOP, TOSTOP, TOSTOP },
5951   { TARGET_ECHOCTL, TARGET_ECHOCTL, ECHOCTL, ECHOCTL },
5952   { TARGET_ECHOPRT, TARGET_ECHOPRT, ECHOPRT, ECHOPRT },
5953   { TARGET_ECHOKE, TARGET_ECHOKE, ECHOKE, ECHOKE },
5954   { TARGET_FLUSHO, TARGET_FLUSHO, FLUSHO, FLUSHO },
5955   { TARGET_PENDIN, TARGET_PENDIN, PENDIN, PENDIN },
5956   { TARGET_IEXTEN, TARGET_IEXTEN, IEXTEN, IEXTEN },
5957   { TARGET_EXTPROC, TARGET_EXTPROC, EXTPROC, EXTPROC},
5958   { 0, 0, 0, 0 }
5959 };
5960 
5961 static void target_to_host_termios (void *dst, const void *src)
5962 {
5963     struct host_termios *host = dst;
5964     const struct target_termios *target = src;
5965 
5966     host->c_iflag =
5967         target_to_host_bitmask(tswap32(target->c_iflag), iflag_tbl);
5968     host->c_oflag =
5969         target_to_host_bitmask(tswap32(target->c_oflag), oflag_tbl);
5970     host->c_cflag =
5971         target_to_host_bitmask(tswap32(target->c_cflag), cflag_tbl);
5972     host->c_lflag =
5973         target_to_host_bitmask(tswap32(target->c_lflag), lflag_tbl);
5974     host->c_line = target->c_line;
5975 
5976     memset(host->c_cc, 0, sizeof(host->c_cc));
5977     host->c_cc[VINTR] = target->c_cc[TARGET_VINTR];
5978     host->c_cc[VQUIT] = target->c_cc[TARGET_VQUIT];
5979     host->c_cc[VERASE] = target->c_cc[TARGET_VERASE];
5980     host->c_cc[VKILL] = target->c_cc[TARGET_VKILL];
5981     host->c_cc[VEOF] = target->c_cc[TARGET_VEOF];
5982     host->c_cc[VTIME] = target->c_cc[TARGET_VTIME];
5983     host->c_cc[VMIN] = target->c_cc[TARGET_VMIN];
5984     host->c_cc[VSWTC] = target->c_cc[TARGET_VSWTC];
5985     host->c_cc[VSTART] = target->c_cc[TARGET_VSTART];
5986     host->c_cc[VSTOP] = target->c_cc[TARGET_VSTOP];
5987     host->c_cc[VSUSP] = target->c_cc[TARGET_VSUSP];
5988     host->c_cc[VEOL] = target->c_cc[TARGET_VEOL];
5989     host->c_cc[VREPRINT] = target->c_cc[TARGET_VREPRINT];
5990     host->c_cc[VDISCARD] = target->c_cc[TARGET_VDISCARD];
5991     host->c_cc[VWERASE] = target->c_cc[TARGET_VWERASE];
5992     host->c_cc[VLNEXT] = target->c_cc[TARGET_VLNEXT];
5993     host->c_cc[VEOL2] = target->c_cc[TARGET_VEOL2];
5994 }
5995 
5996 static void host_to_target_termios (void *dst, const void *src)
5997 {
5998     struct target_termios *target = dst;
5999     const struct host_termios *host = src;
6000 
6001     target->c_iflag =
6002         tswap32(host_to_target_bitmask(host->c_iflag, iflag_tbl));
6003     target->c_oflag =
6004         tswap32(host_to_target_bitmask(host->c_oflag, oflag_tbl));
6005     target->c_cflag =
6006         tswap32(host_to_target_bitmask(host->c_cflag, cflag_tbl));
6007     target->c_lflag =
6008         tswap32(host_to_target_bitmask(host->c_lflag, lflag_tbl));
6009     target->c_line = host->c_line;
6010 
6011     memset(target->c_cc, 0, sizeof(target->c_cc));
6012     target->c_cc[TARGET_VINTR] = host->c_cc[VINTR];
6013     target->c_cc[TARGET_VQUIT] = host->c_cc[VQUIT];
6014     target->c_cc[TARGET_VERASE] = host->c_cc[VERASE];
6015     target->c_cc[TARGET_VKILL] = host->c_cc[VKILL];
6016     target->c_cc[TARGET_VEOF] = host->c_cc[VEOF];
6017     target->c_cc[TARGET_VTIME] = host->c_cc[VTIME];
6018     target->c_cc[TARGET_VMIN] = host->c_cc[VMIN];
6019     target->c_cc[TARGET_VSWTC] = host->c_cc[VSWTC];
6020     target->c_cc[TARGET_VSTART] = host->c_cc[VSTART];
6021     target->c_cc[TARGET_VSTOP] = host->c_cc[VSTOP];
6022     target->c_cc[TARGET_VSUSP] = host->c_cc[VSUSP];
6023     target->c_cc[TARGET_VEOL] = host->c_cc[VEOL];
6024     target->c_cc[TARGET_VREPRINT] = host->c_cc[VREPRINT];
6025     target->c_cc[TARGET_VDISCARD] = host->c_cc[VDISCARD];
6026     target->c_cc[TARGET_VWERASE] = host->c_cc[VWERASE];
6027     target->c_cc[TARGET_VLNEXT] = host->c_cc[VLNEXT];
6028     target->c_cc[TARGET_VEOL2] = host->c_cc[VEOL2];
6029 }
6030 
6031 static const StructEntry struct_termios_def = {
6032     .convert = { host_to_target_termios, target_to_host_termios },
6033     .size = { sizeof(struct target_termios), sizeof(struct host_termios) },
6034     .align = { __alignof__(struct target_termios), __alignof__(struct host_termios) },
6035     .print = print_termios,
6036 };
6037 
6038 static bitmask_transtbl mmap_flags_tbl[] = {
6039     { TARGET_MAP_SHARED, TARGET_MAP_SHARED, MAP_SHARED, MAP_SHARED },
6040     { TARGET_MAP_PRIVATE, TARGET_MAP_PRIVATE, MAP_PRIVATE, MAP_PRIVATE },
6041     { TARGET_MAP_FIXED, TARGET_MAP_FIXED, MAP_FIXED, MAP_FIXED },
6042     { TARGET_MAP_ANONYMOUS, TARGET_MAP_ANONYMOUS,
6043       MAP_ANONYMOUS, MAP_ANONYMOUS },
6044     { TARGET_MAP_GROWSDOWN, TARGET_MAP_GROWSDOWN,
6045       MAP_GROWSDOWN, MAP_GROWSDOWN },
6046     { TARGET_MAP_DENYWRITE, TARGET_MAP_DENYWRITE,
6047       MAP_DENYWRITE, MAP_DENYWRITE },
6048     { TARGET_MAP_EXECUTABLE, TARGET_MAP_EXECUTABLE,
6049       MAP_EXECUTABLE, MAP_EXECUTABLE },
6050     { TARGET_MAP_LOCKED, TARGET_MAP_LOCKED, MAP_LOCKED, MAP_LOCKED },
6051     { TARGET_MAP_NORESERVE, TARGET_MAP_NORESERVE,
6052       MAP_NORESERVE, MAP_NORESERVE },
6053     { TARGET_MAP_HUGETLB, TARGET_MAP_HUGETLB, MAP_HUGETLB, MAP_HUGETLB },
6054     /* MAP_STACK had been ignored by the kernel for quite some time.
6055        Recognize it for the target insofar as we do not want to pass
6056        it through to the host.  */
6057     { TARGET_MAP_STACK, TARGET_MAP_STACK, 0, 0 },
6058     { 0, 0, 0, 0 }
6059 };
6060 
6061 /*
6062  * NOTE: TARGET_ABI32 is defined for TARGET_I386 (but not for TARGET_X86_64)
6063  *       TARGET_I386 is defined if TARGET_X86_64 is defined
6064  */
6065 #if defined(TARGET_I386)
6066 
6067 /* NOTE: there is really one LDT for all the threads */
6068 static uint8_t *ldt_table;
6069 
6070 static abi_long read_ldt(abi_ulong ptr, unsigned long bytecount)
6071 {
6072     int size;
6073     void *p;
6074 
6075     if (!ldt_table)
6076         return 0;
6077     size = TARGET_LDT_ENTRIES * TARGET_LDT_ENTRY_SIZE;
6078     if (size > bytecount)
6079         size = bytecount;
6080     p = lock_user(VERIFY_WRITE, ptr, size, 0);
6081     if (!p)
6082         return -TARGET_EFAULT;
6083     /* ??? Should this by byteswapped?  */
6084     memcpy(p, ldt_table, size);
6085     unlock_user(p, ptr, size);
6086     return size;
6087 }
6088 
6089 /* XXX: add locking support */
6090 static abi_long write_ldt(CPUX86State *env,
6091                           abi_ulong ptr, unsigned long bytecount, int oldmode)
6092 {
6093     struct target_modify_ldt_ldt_s ldt_info;
6094     struct target_modify_ldt_ldt_s *target_ldt_info;
6095     int seg_32bit, contents, read_exec_only, limit_in_pages;
6096     int seg_not_present, useable, lm;
6097     uint32_t *lp, entry_1, entry_2;
6098 
6099     if (bytecount != sizeof(ldt_info))
6100         return -TARGET_EINVAL;
6101     if (!lock_user_struct(VERIFY_READ, target_ldt_info, ptr, 1))
6102         return -TARGET_EFAULT;
6103     ldt_info.entry_number = tswap32(target_ldt_info->entry_number);
6104     ldt_info.base_addr = tswapal(target_ldt_info->base_addr);
6105     ldt_info.limit = tswap32(target_ldt_info->limit);
6106     ldt_info.flags = tswap32(target_ldt_info->flags);
6107     unlock_user_struct(target_ldt_info, ptr, 0);
6108 
6109     if (ldt_info.entry_number >= TARGET_LDT_ENTRIES)
6110         return -TARGET_EINVAL;
6111     seg_32bit = ldt_info.flags & 1;
6112     contents = (ldt_info.flags >> 1) & 3;
6113     read_exec_only = (ldt_info.flags >> 3) & 1;
6114     limit_in_pages = (ldt_info.flags >> 4) & 1;
6115     seg_not_present = (ldt_info.flags >> 5) & 1;
6116     useable = (ldt_info.flags >> 6) & 1;
6117 #ifdef TARGET_ABI32
6118     lm = 0;
6119 #else
6120     lm = (ldt_info.flags >> 7) & 1;
6121 #endif
6122     if (contents == 3) {
6123         if (oldmode)
6124             return -TARGET_EINVAL;
6125         if (seg_not_present == 0)
6126             return -TARGET_EINVAL;
6127     }
6128     /* allocate the LDT */
6129     if (!ldt_table) {
6130         env->ldt.base = target_mmap(0,
6131                                     TARGET_LDT_ENTRIES * TARGET_LDT_ENTRY_SIZE,
6132                                     PROT_READ|PROT_WRITE,
6133                                     MAP_ANONYMOUS|MAP_PRIVATE, -1, 0);
6134         if (env->ldt.base == -1)
6135             return -TARGET_ENOMEM;
6136         memset(g2h(env->ldt.base), 0,
6137                TARGET_LDT_ENTRIES * TARGET_LDT_ENTRY_SIZE);
6138         env->ldt.limit = 0xffff;
6139         ldt_table = g2h(env->ldt.base);
6140     }
6141 
6142     /* NOTE: same code as Linux kernel */
6143     /* Allow LDTs to be cleared by the user. */
6144     if (ldt_info.base_addr == 0 && ldt_info.limit == 0) {
6145         if (oldmode ||
6146             (contents == 0		&&
6147              read_exec_only == 1	&&
6148              seg_32bit == 0		&&
6149              limit_in_pages == 0	&&
6150              seg_not_present == 1	&&
6151              useable == 0 )) {
6152             entry_1 = 0;
6153             entry_2 = 0;
6154             goto install;
6155         }
6156     }
6157 
6158     entry_1 = ((ldt_info.base_addr & 0x0000ffff) << 16) |
6159         (ldt_info.limit & 0x0ffff);
6160     entry_2 = (ldt_info.base_addr & 0xff000000) |
6161         ((ldt_info.base_addr & 0x00ff0000) >> 16) |
6162         (ldt_info.limit & 0xf0000) |
6163         ((read_exec_only ^ 1) << 9) |
6164         (contents << 10) |
6165         ((seg_not_present ^ 1) << 15) |
6166         (seg_32bit << 22) |
6167         (limit_in_pages << 23) |
6168         (lm << 21) |
6169         0x7000;
6170     if (!oldmode)
6171         entry_2 |= (useable << 20);
6172 
6173     /* Install the new entry ...  */
6174 install:
6175     lp = (uint32_t *)(ldt_table + (ldt_info.entry_number << 3));
6176     lp[0] = tswap32(entry_1);
6177     lp[1] = tswap32(entry_2);
6178     return 0;
6179 }
6180 
6181 /* specific and weird i386 syscalls */
6182 static abi_long do_modify_ldt(CPUX86State *env, int func, abi_ulong ptr,
6183                               unsigned long bytecount)
6184 {
6185     abi_long ret;
6186 
6187     switch (func) {
6188     case 0:
6189         ret = read_ldt(ptr, bytecount);
6190         break;
6191     case 1:
6192         ret = write_ldt(env, ptr, bytecount, 1);
6193         break;
6194     case 0x11:
6195         ret = write_ldt(env, ptr, bytecount, 0);
6196         break;
6197     default:
6198         ret = -TARGET_ENOSYS;
6199         break;
6200     }
6201     return ret;
6202 }
6203 
6204 #if defined(TARGET_ABI32)
6205 abi_long do_set_thread_area(CPUX86State *env, abi_ulong ptr)
6206 {
6207     uint64_t *gdt_table = g2h(env->gdt.base);
6208     struct target_modify_ldt_ldt_s ldt_info;
6209     struct target_modify_ldt_ldt_s *target_ldt_info;
6210     int seg_32bit, contents, read_exec_only, limit_in_pages;
6211     int seg_not_present, useable, lm;
6212     uint32_t *lp, entry_1, entry_2;
6213     int i;
6214 
6215     lock_user_struct(VERIFY_WRITE, target_ldt_info, ptr, 1);
6216     if (!target_ldt_info)
6217         return -TARGET_EFAULT;
6218     ldt_info.entry_number = tswap32(target_ldt_info->entry_number);
6219     ldt_info.base_addr = tswapal(target_ldt_info->base_addr);
6220     ldt_info.limit = tswap32(target_ldt_info->limit);
6221     ldt_info.flags = tswap32(target_ldt_info->flags);
6222     if (ldt_info.entry_number == -1) {
6223         for (i=TARGET_GDT_ENTRY_TLS_MIN; i<=TARGET_GDT_ENTRY_TLS_MAX; i++) {
6224             if (gdt_table[i] == 0) {
6225                 ldt_info.entry_number = i;
6226                 target_ldt_info->entry_number = tswap32(i);
6227                 break;
6228             }
6229         }
6230     }
6231     unlock_user_struct(target_ldt_info, ptr, 1);
6232 
6233     if (ldt_info.entry_number < TARGET_GDT_ENTRY_TLS_MIN ||
6234         ldt_info.entry_number > TARGET_GDT_ENTRY_TLS_MAX)
6235            return -TARGET_EINVAL;
6236     seg_32bit = ldt_info.flags & 1;
6237     contents = (ldt_info.flags >> 1) & 3;
6238     read_exec_only = (ldt_info.flags >> 3) & 1;
6239     limit_in_pages = (ldt_info.flags >> 4) & 1;
6240     seg_not_present = (ldt_info.flags >> 5) & 1;
6241     useable = (ldt_info.flags >> 6) & 1;
6242 #ifdef TARGET_ABI32
6243     lm = 0;
6244 #else
6245     lm = (ldt_info.flags >> 7) & 1;
6246 #endif
6247 
6248     if (contents == 3) {
6249         if (seg_not_present == 0)
6250             return -TARGET_EINVAL;
6251     }
6252 
6253     /* NOTE: same code as Linux kernel */
6254     /* Allow LDTs to be cleared by the user. */
6255     if (ldt_info.base_addr == 0 && ldt_info.limit == 0) {
6256         if ((contents == 0             &&
6257              read_exec_only == 1       &&
6258              seg_32bit == 0            &&
6259              limit_in_pages == 0       &&
6260              seg_not_present == 1      &&
6261              useable == 0 )) {
6262             entry_1 = 0;
6263             entry_2 = 0;
6264             goto install;
6265         }
6266     }
6267 
6268     entry_1 = ((ldt_info.base_addr & 0x0000ffff) << 16) |
6269         (ldt_info.limit & 0x0ffff);
6270     entry_2 = (ldt_info.base_addr & 0xff000000) |
6271         ((ldt_info.base_addr & 0x00ff0000) >> 16) |
6272         (ldt_info.limit & 0xf0000) |
6273         ((read_exec_only ^ 1) << 9) |
6274         (contents << 10) |
6275         ((seg_not_present ^ 1) << 15) |
6276         (seg_32bit << 22) |
6277         (limit_in_pages << 23) |
6278         (useable << 20) |
6279         (lm << 21) |
6280         0x7000;
6281 
6282     /* Install the new entry ...  */
6283 install:
6284     lp = (uint32_t *)(gdt_table + ldt_info.entry_number);
6285     lp[0] = tswap32(entry_1);
6286     lp[1] = tswap32(entry_2);
6287     return 0;
6288 }
6289 
6290 static abi_long do_get_thread_area(CPUX86State *env, abi_ulong ptr)
6291 {
6292     struct target_modify_ldt_ldt_s *target_ldt_info;
6293     uint64_t *gdt_table = g2h(env->gdt.base);
6294     uint32_t base_addr, limit, flags;
6295     int seg_32bit, contents, read_exec_only, limit_in_pages, idx;
6296     int seg_not_present, useable, lm;
6297     uint32_t *lp, entry_1, entry_2;
6298 
6299     lock_user_struct(VERIFY_WRITE, target_ldt_info, ptr, 1);
6300     if (!target_ldt_info)
6301         return -TARGET_EFAULT;
6302     idx = tswap32(target_ldt_info->entry_number);
6303     if (idx < TARGET_GDT_ENTRY_TLS_MIN ||
6304         idx > TARGET_GDT_ENTRY_TLS_MAX) {
6305         unlock_user_struct(target_ldt_info, ptr, 1);
6306         return -TARGET_EINVAL;
6307     }
6308     lp = (uint32_t *)(gdt_table + idx);
6309     entry_1 = tswap32(lp[0]);
6310     entry_2 = tswap32(lp[1]);
6311 
6312     read_exec_only = ((entry_2 >> 9) & 1) ^ 1;
6313     contents = (entry_2 >> 10) & 3;
6314     seg_not_present = ((entry_2 >> 15) & 1) ^ 1;
6315     seg_32bit = (entry_2 >> 22) & 1;
6316     limit_in_pages = (entry_2 >> 23) & 1;
6317     useable = (entry_2 >> 20) & 1;
6318 #ifdef TARGET_ABI32
6319     lm = 0;
6320 #else
6321     lm = (entry_2 >> 21) & 1;
6322 #endif
6323     flags = (seg_32bit << 0) | (contents << 1) |
6324         (read_exec_only << 3) | (limit_in_pages << 4) |
6325         (seg_not_present << 5) | (useable << 6) | (lm << 7);
6326     limit = (entry_1 & 0xffff) | (entry_2  & 0xf0000);
6327     base_addr = (entry_1 >> 16) |
6328         (entry_2 & 0xff000000) |
6329         ((entry_2 & 0xff) << 16);
6330     target_ldt_info->base_addr = tswapal(base_addr);
6331     target_ldt_info->limit = tswap32(limit);
6332     target_ldt_info->flags = tswap32(flags);
6333     unlock_user_struct(target_ldt_info, ptr, 1);
6334     return 0;
6335 }
6336 
6337 abi_long do_arch_prctl(CPUX86State *env, int code, abi_ulong addr)
6338 {
6339     return -TARGET_ENOSYS;
6340 }
6341 #else
6342 abi_long do_arch_prctl(CPUX86State *env, int code, abi_ulong addr)
6343 {
6344     abi_long ret = 0;
6345     abi_ulong val;
6346     int idx;
6347 
6348     switch(code) {
6349     case TARGET_ARCH_SET_GS:
6350     case TARGET_ARCH_SET_FS:
6351         if (code == TARGET_ARCH_SET_GS)
6352             idx = R_GS;
6353         else
6354             idx = R_FS;
6355         cpu_x86_load_seg(env, idx, 0);
6356         env->segs[idx].base = addr;
6357         break;
6358     case TARGET_ARCH_GET_GS:
6359     case TARGET_ARCH_GET_FS:
6360         if (code == TARGET_ARCH_GET_GS)
6361             idx = R_GS;
6362         else
6363             idx = R_FS;
6364         val = env->segs[idx].base;
6365         if (put_user(val, addr, abi_ulong))
6366             ret = -TARGET_EFAULT;
6367         break;
6368     default:
6369         ret = -TARGET_EINVAL;
6370         break;
6371     }
6372     return ret;
6373 }
6374 #endif /* defined(TARGET_ABI32 */
6375 
6376 #endif /* defined(TARGET_I386) */
6377 
6378 #define NEW_STACK_SIZE 0x40000
6379 
6380 
6381 static pthread_mutex_t clone_lock = PTHREAD_MUTEX_INITIALIZER;
6382 typedef struct {
6383     CPUArchState *env;
6384     pthread_mutex_t mutex;
6385     pthread_cond_t cond;
6386     pthread_t thread;
6387     uint32_t tid;
6388     abi_ulong child_tidptr;
6389     abi_ulong parent_tidptr;
6390     sigset_t sigmask;
6391 } new_thread_info;
6392 
6393 static void *clone_func(void *arg)
6394 {
6395     new_thread_info *info = arg;
6396     CPUArchState *env;
6397     CPUState *cpu;
6398     TaskState *ts;
6399 
6400     rcu_register_thread();
6401     tcg_register_thread();
6402     env = info->env;
6403     cpu = env_cpu(env);
6404     thread_cpu = cpu;
6405     ts = (TaskState *)cpu->opaque;
6406     info->tid = sys_gettid();
6407     task_settid(ts);
6408     if (info->child_tidptr)
6409         put_user_u32(info->tid, info->child_tidptr);
6410     if (info->parent_tidptr)
6411         put_user_u32(info->tid, info->parent_tidptr);
6412     qemu_guest_random_seed_thread_part2(cpu->random_seed);
6413     /* Enable signals.  */
6414     sigprocmask(SIG_SETMASK, &info->sigmask, NULL);
6415     /* Signal to the parent that we're ready.  */
6416     pthread_mutex_lock(&info->mutex);
6417     pthread_cond_broadcast(&info->cond);
6418     pthread_mutex_unlock(&info->mutex);
6419     /* Wait until the parent has finished initializing the tls state.  */
6420     pthread_mutex_lock(&clone_lock);
6421     pthread_mutex_unlock(&clone_lock);
6422     cpu_loop(env);
6423     /* never exits */
6424     return NULL;
6425 }
6426 
6427 /* do_fork() Must return host values and target errnos (unlike most
6428    do_*() functions). */
6429 static int do_fork(CPUArchState *env, unsigned int flags, abi_ulong newsp,
6430                    abi_ulong parent_tidptr, target_ulong newtls,
6431                    abi_ulong child_tidptr)
6432 {
6433     CPUState *cpu = env_cpu(env);
6434     int ret;
6435     TaskState *ts;
6436     CPUState *new_cpu;
6437     CPUArchState *new_env;
6438     sigset_t sigmask;
6439 
6440     flags &= ~CLONE_IGNORED_FLAGS;
6441 
6442     /* Emulate vfork() with fork() */
6443     if (flags & CLONE_VFORK)
6444         flags &= ~(CLONE_VFORK | CLONE_VM);
6445 
6446     if (flags & CLONE_VM) {
6447         TaskState *parent_ts = (TaskState *)cpu->opaque;
6448         new_thread_info info;
6449         pthread_attr_t attr;
6450 
6451         if (((flags & CLONE_THREAD_FLAGS) != CLONE_THREAD_FLAGS) ||
6452             (flags & CLONE_INVALID_THREAD_FLAGS)) {
6453             return -TARGET_EINVAL;
6454         }
6455 
6456         ts = g_new0(TaskState, 1);
6457         init_task_state(ts);
6458 
6459         /* Grab a mutex so that thread setup appears atomic.  */
6460         pthread_mutex_lock(&clone_lock);
6461 
6462         /* we create a new CPU instance. */
6463         new_env = cpu_copy(env);
6464         /* Init regs that differ from the parent.  */
6465         cpu_clone_regs_child(new_env, newsp, flags);
6466         cpu_clone_regs_parent(env, flags);
6467         new_cpu = env_cpu(new_env);
6468         new_cpu->opaque = ts;
6469         ts->bprm = parent_ts->bprm;
6470         ts->info = parent_ts->info;
6471         ts->signal_mask = parent_ts->signal_mask;
6472 
6473         if (flags & CLONE_CHILD_CLEARTID) {
6474             ts->child_tidptr = child_tidptr;
6475         }
6476 
6477         if (flags & CLONE_SETTLS) {
6478             cpu_set_tls (new_env, newtls);
6479         }
6480 
6481         memset(&info, 0, sizeof(info));
6482         pthread_mutex_init(&info.mutex, NULL);
6483         pthread_mutex_lock(&info.mutex);
6484         pthread_cond_init(&info.cond, NULL);
6485         info.env = new_env;
6486         if (flags & CLONE_CHILD_SETTID) {
6487             info.child_tidptr = child_tidptr;
6488         }
6489         if (flags & CLONE_PARENT_SETTID) {
6490             info.parent_tidptr = parent_tidptr;
6491         }
6492 
6493         ret = pthread_attr_init(&attr);
6494         ret = pthread_attr_setstacksize(&attr, NEW_STACK_SIZE);
6495         ret = pthread_attr_setdetachstate(&attr, PTHREAD_CREATE_DETACHED);
6496         /* It is not safe to deliver signals until the child has finished
6497            initializing, so temporarily block all signals.  */
6498         sigfillset(&sigmask);
6499         sigprocmask(SIG_BLOCK, &sigmask, &info.sigmask);
6500         cpu->random_seed = qemu_guest_random_seed_thread_part1();
6501 
6502         /* If this is our first additional thread, we need to ensure we
6503          * generate code for parallel execution and flush old translations.
6504          */
6505         if (!parallel_cpus) {
6506             parallel_cpus = true;
6507             tb_flush(cpu);
6508         }
6509 
6510         ret = pthread_create(&info.thread, &attr, clone_func, &info);
6511         /* TODO: Free new CPU state if thread creation failed.  */
6512 
6513         sigprocmask(SIG_SETMASK, &info.sigmask, NULL);
6514         pthread_attr_destroy(&attr);
6515         if (ret == 0) {
6516             /* Wait for the child to initialize.  */
6517             pthread_cond_wait(&info.cond, &info.mutex);
6518             ret = info.tid;
6519         } else {
6520             ret = -1;
6521         }
6522         pthread_mutex_unlock(&info.mutex);
6523         pthread_cond_destroy(&info.cond);
6524         pthread_mutex_destroy(&info.mutex);
6525         pthread_mutex_unlock(&clone_lock);
6526     } else {
6527         /* if no CLONE_VM, we consider it is a fork */
6528         if (flags & CLONE_INVALID_FORK_FLAGS) {
6529             return -TARGET_EINVAL;
6530         }
6531 
6532         /* We can't support custom termination signals */
6533         if ((flags & CSIGNAL) != TARGET_SIGCHLD) {
6534             return -TARGET_EINVAL;
6535         }
6536 
6537         if (block_signals()) {
6538             return -TARGET_ERESTARTSYS;
6539         }
6540 
6541         fork_start();
6542         ret = fork();
6543         if (ret == 0) {
6544             /* Child Process.  */
6545             cpu_clone_regs_child(env, newsp, flags);
6546             fork_end(1);
6547             /* There is a race condition here.  The parent process could
6548                theoretically read the TID in the child process before the child
6549                tid is set.  This would require using either ptrace
6550                (not implemented) or having *_tidptr to point at a shared memory
6551                mapping.  We can't repeat the spinlock hack used above because
6552                the child process gets its own copy of the lock.  */
6553             if (flags & CLONE_CHILD_SETTID)
6554                 put_user_u32(sys_gettid(), child_tidptr);
6555             if (flags & CLONE_PARENT_SETTID)
6556                 put_user_u32(sys_gettid(), parent_tidptr);
6557             ts = (TaskState *)cpu->opaque;
6558             if (flags & CLONE_SETTLS)
6559                 cpu_set_tls (env, newtls);
6560             if (flags & CLONE_CHILD_CLEARTID)
6561                 ts->child_tidptr = child_tidptr;
6562         } else {
6563             cpu_clone_regs_parent(env, flags);
6564             fork_end(0);
6565         }
6566     }
6567     return ret;
6568 }
6569 
6570 /* warning : doesn't handle linux specific flags... */
6571 static int target_to_host_fcntl_cmd(int cmd)
6572 {
6573     int ret;
6574 
6575     switch(cmd) {
6576     case TARGET_F_DUPFD:
6577     case TARGET_F_GETFD:
6578     case TARGET_F_SETFD:
6579     case TARGET_F_GETFL:
6580     case TARGET_F_SETFL:
6581     case TARGET_F_OFD_GETLK:
6582     case TARGET_F_OFD_SETLK:
6583     case TARGET_F_OFD_SETLKW:
6584         ret = cmd;
6585         break;
6586     case TARGET_F_GETLK:
6587         ret = F_GETLK64;
6588         break;
6589     case TARGET_F_SETLK:
6590         ret = F_SETLK64;
6591         break;
6592     case TARGET_F_SETLKW:
6593         ret = F_SETLKW64;
6594         break;
6595     case TARGET_F_GETOWN:
6596         ret = F_GETOWN;
6597         break;
6598     case TARGET_F_SETOWN:
6599         ret = F_SETOWN;
6600         break;
6601     case TARGET_F_GETSIG:
6602         ret = F_GETSIG;
6603         break;
6604     case TARGET_F_SETSIG:
6605         ret = F_SETSIG;
6606         break;
6607 #if TARGET_ABI_BITS == 32
6608     case TARGET_F_GETLK64:
6609         ret = F_GETLK64;
6610         break;
6611     case TARGET_F_SETLK64:
6612         ret = F_SETLK64;
6613         break;
6614     case TARGET_F_SETLKW64:
6615         ret = F_SETLKW64;
6616         break;
6617 #endif
6618     case TARGET_F_SETLEASE:
6619         ret = F_SETLEASE;
6620         break;
6621     case TARGET_F_GETLEASE:
6622         ret = F_GETLEASE;
6623         break;
6624 #ifdef F_DUPFD_CLOEXEC
6625     case TARGET_F_DUPFD_CLOEXEC:
6626         ret = F_DUPFD_CLOEXEC;
6627         break;
6628 #endif
6629     case TARGET_F_NOTIFY:
6630         ret = F_NOTIFY;
6631         break;
6632 #ifdef F_GETOWN_EX
6633     case TARGET_F_GETOWN_EX:
6634         ret = F_GETOWN_EX;
6635         break;
6636 #endif
6637 #ifdef F_SETOWN_EX
6638     case TARGET_F_SETOWN_EX:
6639         ret = F_SETOWN_EX;
6640         break;
6641 #endif
6642 #ifdef F_SETPIPE_SZ
6643     case TARGET_F_SETPIPE_SZ:
6644         ret = F_SETPIPE_SZ;
6645         break;
6646     case TARGET_F_GETPIPE_SZ:
6647         ret = F_GETPIPE_SZ;
6648         break;
6649 #endif
6650 #ifdef F_ADD_SEALS
6651     case TARGET_F_ADD_SEALS:
6652         ret = F_ADD_SEALS;
6653         break;
6654     case TARGET_F_GET_SEALS:
6655         ret = F_GET_SEALS;
6656         break;
6657 #endif
6658     default:
6659         ret = -TARGET_EINVAL;
6660         break;
6661     }
6662 
6663 #if defined(__powerpc64__)
6664     /* On PPC64, glibc headers has the F_*LK* defined to 12, 13 and 14 and
6665      * is not supported by kernel. The glibc fcntl call actually adjusts
6666      * them to 5, 6 and 7 before making the syscall(). Since we make the
6667      * syscall directly, adjust to what is supported by the kernel.
6668      */
6669     if (ret >= F_GETLK64 && ret <= F_SETLKW64) {
6670         ret -= F_GETLK64 - 5;
6671     }
6672 #endif
6673 
6674     return ret;
6675 }
6676 
6677 #define FLOCK_TRANSTBL \
6678     switch (type) { \
6679     TRANSTBL_CONVERT(F_RDLCK); \
6680     TRANSTBL_CONVERT(F_WRLCK); \
6681     TRANSTBL_CONVERT(F_UNLCK); \
6682     }
6683 
6684 static int target_to_host_flock(int type)
6685 {
6686 #define TRANSTBL_CONVERT(a) case TARGET_##a: return a
6687     FLOCK_TRANSTBL
6688 #undef  TRANSTBL_CONVERT
6689     return -TARGET_EINVAL;
6690 }
6691 
6692 static int host_to_target_flock(int type)
6693 {
6694 #define TRANSTBL_CONVERT(a) case a: return TARGET_##a
6695     FLOCK_TRANSTBL
6696 #undef  TRANSTBL_CONVERT
6697     /* if we don't know how to convert the value coming
6698      * from the host we copy to the target field as-is
6699      */
6700     return type;
6701 }
6702 
6703 static inline abi_long copy_from_user_flock(struct flock64 *fl,
6704                                             abi_ulong target_flock_addr)
6705 {
6706     struct target_flock *target_fl;
6707     int l_type;
6708 
6709     if (!lock_user_struct(VERIFY_READ, target_fl, target_flock_addr, 1)) {
6710         return -TARGET_EFAULT;
6711     }
6712 
6713     __get_user(l_type, &target_fl->l_type);
6714     l_type = target_to_host_flock(l_type);
6715     if (l_type < 0) {
6716         return l_type;
6717     }
6718     fl->l_type = l_type;
6719     __get_user(fl->l_whence, &target_fl->l_whence);
6720     __get_user(fl->l_start, &target_fl->l_start);
6721     __get_user(fl->l_len, &target_fl->l_len);
6722     __get_user(fl->l_pid, &target_fl->l_pid);
6723     unlock_user_struct(target_fl, target_flock_addr, 0);
6724     return 0;
6725 }
6726 
6727 static inline abi_long copy_to_user_flock(abi_ulong target_flock_addr,
6728                                           const struct flock64 *fl)
6729 {
6730     struct target_flock *target_fl;
6731     short l_type;
6732 
6733     if (!lock_user_struct(VERIFY_WRITE, target_fl, target_flock_addr, 0)) {
6734         return -TARGET_EFAULT;
6735     }
6736 
6737     l_type = host_to_target_flock(fl->l_type);
6738     __put_user(l_type, &target_fl->l_type);
6739     __put_user(fl->l_whence, &target_fl->l_whence);
6740     __put_user(fl->l_start, &target_fl->l_start);
6741     __put_user(fl->l_len, &target_fl->l_len);
6742     __put_user(fl->l_pid, &target_fl->l_pid);
6743     unlock_user_struct(target_fl, target_flock_addr, 1);
6744     return 0;
6745 }
6746 
6747 typedef abi_long from_flock64_fn(struct flock64 *fl, abi_ulong target_addr);
6748 typedef abi_long to_flock64_fn(abi_ulong target_addr, const struct flock64 *fl);
6749 
6750 #if defined(TARGET_ARM) && TARGET_ABI_BITS == 32
6751 static inline abi_long copy_from_user_oabi_flock64(struct flock64 *fl,
6752                                                    abi_ulong target_flock_addr)
6753 {
6754     struct target_oabi_flock64 *target_fl;
6755     int l_type;
6756 
6757     if (!lock_user_struct(VERIFY_READ, target_fl, target_flock_addr, 1)) {
6758         return -TARGET_EFAULT;
6759     }
6760 
6761     __get_user(l_type, &target_fl->l_type);
6762     l_type = target_to_host_flock(l_type);
6763     if (l_type < 0) {
6764         return l_type;
6765     }
6766     fl->l_type = l_type;
6767     __get_user(fl->l_whence, &target_fl->l_whence);
6768     __get_user(fl->l_start, &target_fl->l_start);
6769     __get_user(fl->l_len, &target_fl->l_len);
6770     __get_user(fl->l_pid, &target_fl->l_pid);
6771     unlock_user_struct(target_fl, target_flock_addr, 0);
6772     return 0;
6773 }
6774 
6775 static inline abi_long copy_to_user_oabi_flock64(abi_ulong target_flock_addr,
6776                                                  const struct flock64 *fl)
6777 {
6778     struct target_oabi_flock64 *target_fl;
6779     short l_type;
6780 
6781     if (!lock_user_struct(VERIFY_WRITE, target_fl, target_flock_addr, 0)) {
6782         return -TARGET_EFAULT;
6783     }
6784 
6785     l_type = host_to_target_flock(fl->l_type);
6786     __put_user(l_type, &target_fl->l_type);
6787     __put_user(fl->l_whence, &target_fl->l_whence);
6788     __put_user(fl->l_start, &target_fl->l_start);
6789     __put_user(fl->l_len, &target_fl->l_len);
6790     __put_user(fl->l_pid, &target_fl->l_pid);
6791     unlock_user_struct(target_fl, target_flock_addr, 1);
6792     return 0;
6793 }
6794 #endif
6795 
6796 static inline abi_long copy_from_user_flock64(struct flock64 *fl,
6797                                               abi_ulong target_flock_addr)
6798 {
6799     struct target_flock64 *target_fl;
6800     int l_type;
6801 
6802     if (!lock_user_struct(VERIFY_READ, target_fl, target_flock_addr, 1)) {
6803         return -TARGET_EFAULT;
6804     }
6805 
6806     __get_user(l_type, &target_fl->l_type);
6807     l_type = target_to_host_flock(l_type);
6808     if (l_type < 0) {
6809         return l_type;
6810     }
6811     fl->l_type = l_type;
6812     __get_user(fl->l_whence, &target_fl->l_whence);
6813     __get_user(fl->l_start, &target_fl->l_start);
6814     __get_user(fl->l_len, &target_fl->l_len);
6815     __get_user(fl->l_pid, &target_fl->l_pid);
6816     unlock_user_struct(target_fl, target_flock_addr, 0);
6817     return 0;
6818 }
6819 
6820 static inline abi_long copy_to_user_flock64(abi_ulong target_flock_addr,
6821                                             const struct flock64 *fl)
6822 {
6823     struct target_flock64 *target_fl;
6824     short l_type;
6825 
6826     if (!lock_user_struct(VERIFY_WRITE, target_fl, target_flock_addr, 0)) {
6827         return -TARGET_EFAULT;
6828     }
6829 
6830     l_type = host_to_target_flock(fl->l_type);
6831     __put_user(l_type, &target_fl->l_type);
6832     __put_user(fl->l_whence, &target_fl->l_whence);
6833     __put_user(fl->l_start, &target_fl->l_start);
6834     __put_user(fl->l_len, &target_fl->l_len);
6835     __put_user(fl->l_pid, &target_fl->l_pid);
6836     unlock_user_struct(target_fl, target_flock_addr, 1);
6837     return 0;
6838 }
6839 
6840 static abi_long do_fcntl(int fd, int cmd, abi_ulong arg)
6841 {
6842     struct flock64 fl64;
6843 #ifdef F_GETOWN_EX
6844     struct f_owner_ex fox;
6845     struct target_f_owner_ex *target_fox;
6846 #endif
6847     abi_long ret;
6848     int host_cmd = target_to_host_fcntl_cmd(cmd);
6849 
6850     if (host_cmd == -TARGET_EINVAL)
6851 	    return host_cmd;
6852 
6853     switch(cmd) {
6854     case TARGET_F_GETLK:
6855         ret = copy_from_user_flock(&fl64, arg);
6856         if (ret) {
6857             return ret;
6858         }
6859         ret = get_errno(safe_fcntl(fd, host_cmd, &fl64));
6860         if (ret == 0) {
6861             ret = copy_to_user_flock(arg, &fl64);
6862         }
6863         break;
6864 
6865     case TARGET_F_SETLK:
6866     case TARGET_F_SETLKW:
6867         ret = copy_from_user_flock(&fl64, arg);
6868         if (ret) {
6869             return ret;
6870         }
6871         ret = get_errno(safe_fcntl(fd, host_cmd, &fl64));
6872         break;
6873 
6874     case TARGET_F_GETLK64:
6875     case TARGET_F_OFD_GETLK:
6876         ret = copy_from_user_flock64(&fl64, arg);
6877         if (ret) {
6878             return ret;
6879         }
6880         ret = get_errno(safe_fcntl(fd, host_cmd, &fl64));
6881         if (ret == 0) {
6882             ret = copy_to_user_flock64(arg, &fl64);
6883         }
6884         break;
6885     case TARGET_F_SETLK64:
6886     case TARGET_F_SETLKW64:
6887     case TARGET_F_OFD_SETLK:
6888     case TARGET_F_OFD_SETLKW:
6889         ret = copy_from_user_flock64(&fl64, arg);
6890         if (ret) {
6891             return ret;
6892         }
6893         ret = get_errno(safe_fcntl(fd, host_cmd, &fl64));
6894         break;
6895 
6896     case TARGET_F_GETFL:
6897         ret = get_errno(safe_fcntl(fd, host_cmd, arg));
6898         if (ret >= 0) {
6899             ret = host_to_target_bitmask(ret, fcntl_flags_tbl);
6900         }
6901         break;
6902 
6903     case TARGET_F_SETFL:
6904         ret = get_errno(safe_fcntl(fd, host_cmd,
6905                                    target_to_host_bitmask(arg,
6906                                                           fcntl_flags_tbl)));
6907         break;
6908 
6909 #ifdef F_GETOWN_EX
6910     case TARGET_F_GETOWN_EX:
6911         ret = get_errno(safe_fcntl(fd, host_cmd, &fox));
6912         if (ret >= 0) {
6913             if (!lock_user_struct(VERIFY_WRITE, target_fox, arg, 0))
6914                 return -TARGET_EFAULT;
6915             target_fox->type = tswap32(fox.type);
6916             target_fox->pid = tswap32(fox.pid);
6917             unlock_user_struct(target_fox, arg, 1);
6918         }
6919         break;
6920 #endif
6921 
6922 #ifdef F_SETOWN_EX
6923     case TARGET_F_SETOWN_EX:
6924         if (!lock_user_struct(VERIFY_READ, target_fox, arg, 1))
6925             return -TARGET_EFAULT;
6926         fox.type = tswap32(target_fox->type);
6927         fox.pid = tswap32(target_fox->pid);
6928         unlock_user_struct(target_fox, arg, 0);
6929         ret = get_errno(safe_fcntl(fd, host_cmd, &fox));
6930         break;
6931 #endif
6932 
6933     case TARGET_F_SETSIG:
6934         ret = get_errno(safe_fcntl(fd, host_cmd, target_to_host_signal(arg)));
6935         break;
6936 
6937     case TARGET_F_GETSIG:
6938         ret = host_to_target_signal(get_errno(safe_fcntl(fd, host_cmd, arg)));
6939         break;
6940 
6941     case TARGET_F_SETOWN:
6942     case TARGET_F_GETOWN:
6943     case TARGET_F_SETLEASE:
6944     case TARGET_F_GETLEASE:
6945     case TARGET_F_SETPIPE_SZ:
6946     case TARGET_F_GETPIPE_SZ:
6947     case TARGET_F_ADD_SEALS:
6948     case TARGET_F_GET_SEALS:
6949         ret = get_errno(safe_fcntl(fd, host_cmd, arg));
6950         break;
6951 
6952     default:
6953         ret = get_errno(safe_fcntl(fd, cmd, arg));
6954         break;
6955     }
6956     return ret;
6957 }
6958 
6959 #ifdef USE_UID16
6960 
6961 static inline int high2lowuid(int uid)
6962 {
6963     if (uid > 65535)
6964         return 65534;
6965     else
6966         return uid;
6967 }
6968 
6969 static inline int high2lowgid(int gid)
6970 {
6971     if (gid > 65535)
6972         return 65534;
6973     else
6974         return gid;
6975 }
6976 
6977 static inline int low2highuid(int uid)
6978 {
6979     if ((int16_t)uid == -1)
6980         return -1;
6981     else
6982         return uid;
6983 }
6984 
6985 static inline int low2highgid(int gid)
6986 {
6987     if ((int16_t)gid == -1)
6988         return -1;
6989     else
6990         return gid;
6991 }
6992 static inline int tswapid(int id)
6993 {
6994     return tswap16(id);
6995 }
6996 
6997 #define put_user_id(x, gaddr) put_user_u16(x, gaddr)
6998 
6999 #else /* !USE_UID16 */
7000 static inline int high2lowuid(int uid)
7001 {
7002     return uid;
7003 }
7004 static inline int high2lowgid(int gid)
7005 {
7006     return gid;
7007 }
7008 static inline int low2highuid(int uid)
7009 {
7010     return uid;
7011 }
7012 static inline int low2highgid(int gid)
7013 {
7014     return gid;
7015 }
7016 static inline int tswapid(int id)
7017 {
7018     return tswap32(id);
7019 }
7020 
7021 #define put_user_id(x, gaddr) put_user_u32(x, gaddr)
7022 
7023 #endif /* USE_UID16 */
7024 
7025 /* We must do direct syscalls for setting UID/GID, because we want to
7026  * implement the Linux system call semantics of "change only for this thread",
7027  * not the libc/POSIX semantics of "change for all threads in process".
7028  * (See http://ewontfix.com/17/ for more details.)
7029  * We use the 32-bit version of the syscalls if present; if it is not
7030  * then either the host architecture supports 32-bit UIDs natively with
7031  * the standard syscall, or the 16-bit UID is the best we can do.
7032  */
7033 #ifdef __NR_setuid32
7034 #define __NR_sys_setuid __NR_setuid32
7035 #else
7036 #define __NR_sys_setuid __NR_setuid
7037 #endif
7038 #ifdef __NR_setgid32
7039 #define __NR_sys_setgid __NR_setgid32
7040 #else
7041 #define __NR_sys_setgid __NR_setgid
7042 #endif
7043 #ifdef __NR_setresuid32
7044 #define __NR_sys_setresuid __NR_setresuid32
7045 #else
7046 #define __NR_sys_setresuid __NR_setresuid
7047 #endif
7048 #ifdef __NR_setresgid32
7049 #define __NR_sys_setresgid __NR_setresgid32
7050 #else
7051 #define __NR_sys_setresgid __NR_setresgid
7052 #endif
7053 
7054 _syscall1(int, sys_setuid, uid_t, uid)
7055 _syscall1(int, sys_setgid, gid_t, gid)
7056 _syscall3(int, sys_setresuid, uid_t, ruid, uid_t, euid, uid_t, suid)
7057 _syscall3(int, sys_setresgid, gid_t, rgid, gid_t, egid, gid_t, sgid)
7058 
7059 void syscall_init(void)
7060 {
7061     IOCTLEntry *ie;
7062     const argtype *arg_type;
7063     int size;
7064     int i;
7065 
7066     thunk_init(STRUCT_MAX);
7067 
7068 #define STRUCT(name, ...) thunk_register_struct(STRUCT_ ## name, #name, struct_ ## name ## _def);
7069 #define STRUCT_SPECIAL(name) thunk_register_struct_direct(STRUCT_ ## name, #name, &struct_ ## name ## _def);
7070 #include "syscall_types.h"
7071 #undef STRUCT
7072 #undef STRUCT_SPECIAL
7073 
7074     /* Build target_to_host_errno_table[] table from
7075      * host_to_target_errno_table[]. */
7076     for (i = 0; i < ERRNO_TABLE_SIZE; i++) {
7077         target_to_host_errno_table[host_to_target_errno_table[i]] = i;
7078     }
7079 
7080     /* we patch the ioctl size if necessary. We rely on the fact that
7081        no ioctl has all the bits at '1' in the size field */
7082     ie = ioctl_entries;
7083     while (ie->target_cmd != 0) {
7084         if (((ie->target_cmd >> TARGET_IOC_SIZESHIFT) & TARGET_IOC_SIZEMASK) ==
7085             TARGET_IOC_SIZEMASK) {
7086             arg_type = ie->arg_type;
7087             if (arg_type[0] != TYPE_PTR) {
7088                 fprintf(stderr, "cannot patch size for ioctl 0x%x\n",
7089                         ie->target_cmd);
7090                 exit(1);
7091             }
7092             arg_type++;
7093             size = thunk_type_size(arg_type, 0);
7094             ie->target_cmd = (ie->target_cmd &
7095                               ~(TARGET_IOC_SIZEMASK << TARGET_IOC_SIZESHIFT)) |
7096                 (size << TARGET_IOC_SIZESHIFT);
7097         }
7098 
7099         /* automatic consistency check if same arch */
7100 #if (defined(__i386__) && defined(TARGET_I386) && defined(TARGET_ABI32)) || \
7101     (defined(__x86_64__) && defined(TARGET_X86_64))
7102         if (unlikely(ie->target_cmd != ie->host_cmd)) {
7103             fprintf(stderr, "ERROR: ioctl(%s): target=0x%x host=0x%x\n",
7104                     ie->name, ie->target_cmd, ie->host_cmd);
7105         }
7106 #endif
7107         ie++;
7108     }
7109 }
7110 
7111 #ifdef TARGET_NR_truncate64
7112 static inline abi_long target_truncate64(void *cpu_env, const char *arg1,
7113                                          abi_long arg2,
7114                                          abi_long arg3,
7115                                          abi_long arg4)
7116 {
7117     if (regpairs_aligned(cpu_env, TARGET_NR_truncate64)) {
7118         arg2 = arg3;
7119         arg3 = arg4;
7120     }
7121     return get_errno(truncate64(arg1, target_offset64(arg2, arg3)));
7122 }
7123 #endif
7124 
7125 #ifdef TARGET_NR_ftruncate64
7126 static inline abi_long target_ftruncate64(void *cpu_env, abi_long arg1,
7127                                           abi_long arg2,
7128                                           abi_long arg3,
7129                                           abi_long arg4)
7130 {
7131     if (regpairs_aligned(cpu_env, TARGET_NR_ftruncate64)) {
7132         arg2 = arg3;
7133         arg3 = arg4;
7134     }
7135     return get_errno(ftruncate64(arg1, target_offset64(arg2, arg3)));
7136 }
7137 #endif
7138 
7139 #if defined(TARGET_NR_timer_settime) || \
7140     (defined(TARGET_NR_timerfd_settime) && defined(CONFIG_TIMERFD))
7141 static inline abi_long target_to_host_itimerspec(struct itimerspec *host_its,
7142                                                  abi_ulong target_addr)
7143 {
7144     if (target_to_host_timespec(&host_its->it_interval, target_addr +
7145                                 offsetof(struct target_itimerspec,
7146                                          it_interval)) ||
7147         target_to_host_timespec(&host_its->it_value, target_addr +
7148                                 offsetof(struct target_itimerspec,
7149                                          it_value))) {
7150         return -TARGET_EFAULT;
7151     }
7152 
7153     return 0;
7154 }
7155 #endif
7156 
7157 #if defined(TARGET_NR_timer_settime64) || \
7158     (defined(TARGET_NR_timerfd_settime64) && defined(CONFIG_TIMERFD))
7159 static inline abi_long target_to_host_itimerspec64(struct itimerspec *host_its,
7160                                                    abi_ulong target_addr)
7161 {
7162     if (target_to_host_timespec64(&host_its->it_interval, target_addr +
7163                                   offsetof(struct target__kernel_itimerspec,
7164                                            it_interval)) ||
7165         target_to_host_timespec64(&host_its->it_value, target_addr +
7166                                   offsetof(struct target__kernel_itimerspec,
7167                                            it_value))) {
7168         return -TARGET_EFAULT;
7169     }
7170 
7171     return 0;
7172 }
7173 #endif
7174 
7175 #if ((defined(TARGET_NR_timerfd_gettime) || \
7176       defined(TARGET_NR_timerfd_settime)) && defined(CONFIG_TIMERFD)) || \
7177       defined(TARGET_NR_timer_gettime) || defined(TARGET_NR_timer_settime)
7178 static inline abi_long host_to_target_itimerspec(abi_ulong target_addr,
7179                                                  struct itimerspec *host_its)
7180 {
7181     if (host_to_target_timespec(target_addr + offsetof(struct target_itimerspec,
7182                                                        it_interval),
7183                                 &host_its->it_interval) ||
7184         host_to_target_timespec(target_addr + offsetof(struct target_itimerspec,
7185                                                        it_value),
7186                                 &host_its->it_value)) {
7187         return -TARGET_EFAULT;
7188     }
7189     return 0;
7190 }
7191 #endif
7192 
7193 #if ((defined(TARGET_NR_timerfd_gettime64) || \
7194       defined(TARGET_NR_timerfd_settime64)) && defined(CONFIG_TIMERFD)) || \
7195       defined(TARGET_NR_timer_gettime64) || defined(TARGET_NR_timer_settime64)
7196 static inline abi_long host_to_target_itimerspec64(abi_ulong target_addr,
7197                                                    struct itimerspec *host_its)
7198 {
7199     if (host_to_target_timespec64(target_addr +
7200                                   offsetof(struct target__kernel_itimerspec,
7201                                            it_interval),
7202                                   &host_its->it_interval) ||
7203         host_to_target_timespec64(target_addr +
7204                                   offsetof(struct target__kernel_itimerspec,
7205                                            it_value),
7206                                   &host_its->it_value)) {
7207         return -TARGET_EFAULT;
7208     }
7209     return 0;
7210 }
7211 #endif
7212 
7213 #if defined(TARGET_NR_adjtimex) || \
7214     (defined(TARGET_NR_clock_adjtime) && defined(CONFIG_CLOCK_ADJTIME))
7215 static inline abi_long target_to_host_timex(struct timex *host_tx,
7216                                             abi_long target_addr)
7217 {
7218     struct target_timex *target_tx;
7219 
7220     if (!lock_user_struct(VERIFY_READ, target_tx, target_addr, 1)) {
7221         return -TARGET_EFAULT;
7222     }
7223 
7224     __get_user(host_tx->modes, &target_tx->modes);
7225     __get_user(host_tx->offset, &target_tx->offset);
7226     __get_user(host_tx->freq, &target_tx->freq);
7227     __get_user(host_tx->maxerror, &target_tx->maxerror);
7228     __get_user(host_tx->esterror, &target_tx->esterror);
7229     __get_user(host_tx->status, &target_tx->status);
7230     __get_user(host_tx->constant, &target_tx->constant);
7231     __get_user(host_tx->precision, &target_tx->precision);
7232     __get_user(host_tx->tolerance, &target_tx->tolerance);
7233     __get_user(host_tx->time.tv_sec, &target_tx->time.tv_sec);
7234     __get_user(host_tx->time.tv_usec, &target_tx->time.tv_usec);
7235     __get_user(host_tx->tick, &target_tx->tick);
7236     __get_user(host_tx->ppsfreq, &target_tx->ppsfreq);
7237     __get_user(host_tx->jitter, &target_tx->jitter);
7238     __get_user(host_tx->shift, &target_tx->shift);
7239     __get_user(host_tx->stabil, &target_tx->stabil);
7240     __get_user(host_tx->jitcnt, &target_tx->jitcnt);
7241     __get_user(host_tx->calcnt, &target_tx->calcnt);
7242     __get_user(host_tx->errcnt, &target_tx->errcnt);
7243     __get_user(host_tx->stbcnt, &target_tx->stbcnt);
7244     __get_user(host_tx->tai, &target_tx->tai);
7245 
7246     unlock_user_struct(target_tx, target_addr, 0);
7247     return 0;
7248 }
7249 
7250 static inline abi_long host_to_target_timex(abi_long target_addr,
7251                                             struct timex *host_tx)
7252 {
7253     struct target_timex *target_tx;
7254 
7255     if (!lock_user_struct(VERIFY_WRITE, target_tx, target_addr, 0)) {
7256         return -TARGET_EFAULT;
7257     }
7258 
7259     __put_user(host_tx->modes, &target_tx->modes);
7260     __put_user(host_tx->offset, &target_tx->offset);
7261     __put_user(host_tx->freq, &target_tx->freq);
7262     __put_user(host_tx->maxerror, &target_tx->maxerror);
7263     __put_user(host_tx->esterror, &target_tx->esterror);
7264     __put_user(host_tx->status, &target_tx->status);
7265     __put_user(host_tx->constant, &target_tx->constant);
7266     __put_user(host_tx->precision, &target_tx->precision);
7267     __put_user(host_tx->tolerance, &target_tx->tolerance);
7268     __put_user(host_tx->time.tv_sec, &target_tx->time.tv_sec);
7269     __put_user(host_tx->time.tv_usec, &target_tx->time.tv_usec);
7270     __put_user(host_tx->tick, &target_tx->tick);
7271     __put_user(host_tx->ppsfreq, &target_tx->ppsfreq);
7272     __put_user(host_tx->jitter, &target_tx->jitter);
7273     __put_user(host_tx->shift, &target_tx->shift);
7274     __put_user(host_tx->stabil, &target_tx->stabil);
7275     __put_user(host_tx->jitcnt, &target_tx->jitcnt);
7276     __put_user(host_tx->calcnt, &target_tx->calcnt);
7277     __put_user(host_tx->errcnt, &target_tx->errcnt);
7278     __put_user(host_tx->stbcnt, &target_tx->stbcnt);
7279     __put_user(host_tx->tai, &target_tx->tai);
7280 
7281     unlock_user_struct(target_tx, target_addr, 1);
7282     return 0;
7283 }
7284 #endif
7285 
7286 
7287 #if defined(TARGET_NR_clock_adjtime64) && defined(CONFIG_CLOCK_ADJTIME)
7288 static inline abi_long target_to_host_timex64(struct timex *host_tx,
7289                                               abi_long target_addr)
7290 {
7291     struct target__kernel_timex *target_tx;
7292 
7293     if (copy_from_user_timeval64(&host_tx->time, target_addr +
7294                                  offsetof(struct target__kernel_timex,
7295                                           time))) {
7296         return -TARGET_EFAULT;
7297     }
7298 
7299     if (!lock_user_struct(VERIFY_READ, target_tx, target_addr, 1)) {
7300         return -TARGET_EFAULT;
7301     }
7302 
7303     __get_user(host_tx->modes, &target_tx->modes);
7304     __get_user(host_tx->offset, &target_tx->offset);
7305     __get_user(host_tx->freq, &target_tx->freq);
7306     __get_user(host_tx->maxerror, &target_tx->maxerror);
7307     __get_user(host_tx->esterror, &target_tx->esterror);
7308     __get_user(host_tx->status, &target_tx->status);
7309     __get_user(host_tx->constant, &target_tx->constant);
7310     __get_user(host_tx->precision, &target_tx->precision);
7311     __get_user(host_tx->tolerance, &target_tx->tolerance);
7312     __get_user(host_tx->tick, &target_tx->tick);
7313     __get_user(host_tx->ppsfreq, &target_tx->ppsfreq);
7314     __get_user(host_tx->jitter, &target_tx->jitter);
7315     __get_user(host_tx->shift, &target_tx->shift);
7316     __get_user(host_tx->stabil, &target_tx->stabil);
7317     __get_user(host_tx->jitcnt, &target_tx->jitcnt);
7318     __get_user(host_tx->calcnt, &target_tx->calcnt);
7319     __get_user(host_tx->errcnt, &target_tx->errcnt);
7320     __get_user(host_tx->stbcnt, &target_tx->stbcnt);
7321     __get_user(host_tx->tai, &target_tx->tai);
7322 
7323     unlock_user_struct(target_tx, target_addr, 0);
7324     return 0;
7325 }
7326 
7327 static inline abi_long host_to_target_timex64(abi_long target_addr,
7328                                               struct timex *host_tx)
7329 {
7330     struct target__kernel_timex *target_tx;
7331 
7332    if (copy_to_user_timeval64(target_addr +
7333                               offsetof(struct target__kernel_timex, time),
7334                               &host_tx->time)) {
7335         return -TARGET_EFAULT;
7336     }
7337 
7338     if (!lock_user_struct(VERIFY_WRITE, target_tx, target_addr, 0)) {
7339         return -TARGET_EFAULT;
7340     }
7341 
7342     __put_user(host_tx->modes, &target_tx->modes);
7343     __put_user(host_tx->offset, &target_tx->offset);
7344     __put_user(host_tx->freq, &target_tx->freq);
7345     __put_user(host_tx->maxerror, &target_tx->maxerror);
7346     __put_user(host_tx->esterror, &target_tx->esterror);
7347     __put_user(host_tx->status, &target_tx->status);
7348     __put_user(host_tx->constant, &target_tx->constant);
7349     __put_user(host_tx->precision, &target_tx->precision);
7350     __put_user(host_tx->tolerance, &target_tx->tolerance);
7351     __put_user(host_tx->tick, &target_tx->tick);
7352     __put_user(host_tx->ppsfreq, &target_tx->ppsfreq);
7353     __put_user(host_tx->jitter, &target_tx->jitter);
7354     __put_user(host_tx->shift, &target_tx->shift);
7355     __put_user(host_tx->stabil, &target_tx->stabil);
7356     __put_user(host_tx->jitcnt, &target_tx->jitcnt);
7357     __put_user(host_tx->calcnt, &target_tx->calcnt);
7358     __put_user(host_tx->errcnt, &target_tx->errcnt);
7359     __put_user(host_tx->stbcnt, &target_tx->stbcnt);
7360     __put_user(host_tx->tai, &target_tx->tai);
7361 
7362     unlock_user_struct(target_tx, target_addr, 1);
7363     return 0;
7364 }
7365 #endif
7366 
7367 static inline abi_long target_to_host_sigevent(struct sigevent *host_sevp,
7368                                                abi_ulong target_addr)
7369 {
7370     struct target_sigevent *target_sevp;
7371 
7372     if (!lock_user_struct(VERIFY_READ, target_sevp, target_addr, 1)) {
7373         return -TARGET_EFAULT;
7374     }
7375 
7376     /* This union is awkward on 64 bit systems because it has a 32 bit
7377      * integer and a pointer in it; we follow the conversion approach
7378      * used for handling sigval types in signal.c so the guest should get
7379      * the correct value back even if we did a 64 bit byteswap and it's
7380      * using the 32 bit integer.
7381      */
7382     host_sevp->sigev_value.sival_ptr =
7383         (void *)(uintptr_t)tswapal(target_sevp->sigev_value.sival_ptr);
7384     host_sevp->sigev_signo =
7385         target_to_host_signal(tswap32(target_sevp->sigev_signo));
7386     host_sevp->sigev_notify = tswap32(target_sevp->sigev_notify);
7387     host_sevp->_sigev_un._tid = tswap32(target_sevp->_sigev_un._tid);
7388 
7389     unlock_user_struct(target_sevp, target_addr, 1);
7390     return 0;
7391 }
7392 
7393 #if defined(TARGET_NR_mlockall)
7394 static inline int target_to_host_mlockall_arg(int arg)
7395 {
7396     int result = 0;
7397 
7398     if (arg & TARGET_MCL_CURRENT) {
7399         result |= MCL_CURRENT;
7400     }
7401     if (arg & TARGET_MCL_FUTURE) {
7402         result |= MCL_FUTURE;
7403     }
7404 #ifdef MCL_ONFAULT
7405     if (arg & TARGET_MCL_ONFAULT) {
7406         result |= MCL_ONFAULT;
7407     }
7408 #endif
7409 
7410     return result;
7411 }
7412 #endif
7413 
7414 #if (defined(TARGET_NR_stat64) || defined(TARGET_NR_lstat64) ||     \
7415      defined(TARGET_NR_fstat64) || defined(TARGET_NR_fstatat64) ||  \
7416      defined(TARGET_NR_newfstatat))
7417 static inline abi_long host_to_target_stat64(void *cpu_env,
7418                                              abi_ulong target_addr,
7419                                              struct stat *host_st)
7420 {
7421 #if defined(TARGET_ARM) && defined(TARGET_ABI32)
7422     if (((CPUARMState *)cpu_env)->eabi) {
7423         struct target_eabi_stat64 *target_st;
7424 
7425         if (!lock_user_struct(VERIFY_WRITE, target_st, target_addr, 0))
7426             return -TARGET_EFAULT;
7427         memset(target_st, 0, sizeof(struct target_eabi_stat64));
7428         __put_user(host_st->st_dev, &target_st->st_dev);
7429         __put_user(host_st->st_ino, &target_st->st_ino);
7430 #ifdef TARGET_STAT64_HAS_BROKEN_ST_INO
7431         __put_user(host_st->st_ino, &target_st->__st_ino);
7432 #endif
7433         __put_user(host_st->st_mode, &target_st->st_mode);
7434         __put_user(host_st->st_nlink, &target_st->st_nlink);
7435         __put_user(host_st->st_uid, &target_st->st_uid);
7436         __put_user(host_st->st_gid, &target_st->st_gid);
7437         __put_user(host_st->st_rdev, &target_st->st_rdev);
7438         __put_user(host_st->st_size, &target_st->st_size);
7439         __put_user(host_st->st_blksize, &target_st->st_blksize);
7440         __put_user(host_st->st_blocks, &target_st->st_blocks);
7441         __put_user(host_st->st_atime, &target_st->target_st_atime);
7442         __put_user(host_st->st_mtime, &target_st->target_st_mtime);
7443         __put_user(host_st->st_ctime, &target_st->target_st_ctime);
7444 #if _POSIX_C_SOURCE >= 200809L || _XOPEN_SOURCE >= 700
7445         __put_user(host_st->st_atim.tv_nsec, &target_st->target_st_atime_nsec);
7446         __put_user(host_st->st_mtim.tv_nsec, &target_st->target_st_mtime_nsec);
7447         __put_user(host_st->st_ctim.tv_nsec, &target_st->target_st_ctime_nsec);
7448 #endif
7449         unlock_user_struct(target_st, target_addr, 1);
7450     } else
7451 #endif
7452     {
7453 #if defined(TARGET_HAS_STRUCT_STAT64)
7454         struct target_stat64 *target_st;
7455 #else
7456         struct target_stat *target_st;
7457 #endif
7458 
7459         if (!lock_user_struct(VERIFY_WRITE, target_st, target_addr, 0))
7460             return -TARGET_EFAULT;
7461         memset(target_st, 0, sizeof(*target_st));
7462         __put_user(host_st->st_dev, &target_st->st_dev);
7463         __put_user(host_st->st_ino, &target_st->st_ino);
7464 #ifdef TARGET_STAT64_HAS_BROKEN_ST_INO
7465         __put_user(host_st->st_ino, &target_st->__st_ino);
7466 #endif
7467         __put_user(host_st->st_mode, &target_st->st_mode);
7468         __put_user(host_st->st_nlink, &target_st->st_nlink);
7469         __put_user(host_st->st_uid, &target_st->st_uid);
7470         __put_user(host_st->st_gid, &target_st->st_gid);
7471         __put_user(host_st->st_rdev, &target_st->st_rdev);
7472         /* XXX: better use of kernel struct */
7473         __put_user(host_st->st_size, &target_st->st_size);
7474         __put_user(host_st->st_blksize, &target_st->st_blksize);
7475         __put_user(host_st->st_blocks, &target_st->st_blocks);
7476         __put_user(host_st->st_atime, &target_st->target_st_atime);
7477         __put_user(host_st->st_mtime, &target_st->target_st_mtime);
7478         __put_user(host_st->st_ctime, &target_st->target_st_ctime);
7479 #if _POSIX_C_SOURCE >= 200809L || _XOPEN_SOURCE >= 700
7480         __put_user(host_st->st_atim.tv_nsec, &target_st->target_st_atime_nsec);
7481         __put_user(host_st->st_mtim.tv_nsec, &target_st->target_st_mtime_nsec);
7482         __put_user(host_st->st_ctim.tv_nsec, &target_st->target_st_ctime_nsec);
7483 #endif
7484         unlock_user_struct(target_st, target_addr, 1);
7485     }
7486 
7487     return 0;
7488 }
7489 #endif
7490 
7491 #if defined(TARGET_NR_statx) && defined(__NR_statx)
7492 static inline abi_long host_to_target_statx(struct target_statx *host_stx,
7493                                             abi_ulong target_addr)
7494 {
7495     struct target_statx *target_stx;
7496 
7497     if (!lock_user_struct(VERIFY_WRITE, target_stx, target_addr,  0)) {
7498         return -TARGET_EFAULT;
7499     }
7500     memset(target_stx, 0, sizeof(*target_stx));
7501 
7502     __put_user(host_stx->stx_mask, &target_stx->stx_mask);
7503     __put_user(host_stx->stx_blksize, &target_stx->stx_blksize);
7504     __put_user(host_stx->stx_attributes, &target_stx->stx_attributes);
7505     __put_user(host_stx->stx_nlink, &target_stx->stx_nlink);
7506     __put_user(host_stx->stx_uid, &target_stx->stx_uid);
7507     __put_user(host_stx->stx_gid, &target_stx->stx_gid);
7508     __put_user(host_stx->stx_mode, &target_stx->stx_mode);
7509     __put_user(host_stx->stx_ino, &target_stx->stx_ino);
7510     __put_user(host_stx->stx_size, &target_stx->stx_size);
7511     __put_user(host_stx->stx_blocks, &target_stx->stx_blocks);
7512     __put_user(host_stx->stx_attributes_mask, &target_stx->stx_attributes_mask);
7513     __put_user(host_stx->stx_atime.tv_sec, &target_stx->stx_atime.tv_sec);
7514     __put_user(host_stx->stx_atime.tv_nsec, &target_stx->stx_atime.tv_nsec);
7515     __put_user(host_stx->stx_btime.tv_sec, &target_stx->stx_btime.tv_sec);
7516     __put_user(host_stx->stx_btime.tv_nsec, &target_stx->stx_btime.tv_nsec);
7517     __put_user(host_stx->stx_ctime.tv_sec, &target_stx->stx_ctime.tv_sec);
7518     __put_user(host_stx->stx_ctime.tv_nsec, &target_stx->stx_ctime.tv_nsec);
7519     __put_user(host_stx->stx_mtime.tv_sec, &target_stx->stx_mtime.tv_sec);
7520     __put_user(host_stx->stx_mtime.tv_nsec, &target_stx->stx_mtime.tv_nsec);
7521     __put_user(host_stx->stx_rdev_major, &target_stx->stx_rdev_major);
7522     __put_user(host_stx->stx_rdev_minor, &target_stx->stx_rdev_minor);
7523     __put_user(host_stx->stx_dev_major, &target_stx->stx_dev_major);
7524     __put_user(host_stx->stx_dev_minor, &target_stx->stx_dev_minor);
7525 
7526     unlock_user_struct(target_stx, target_addr, 1);
7527 
7528     return 0;
7529 }
7530 #endif
7531 
7532 static int do_sys_futex(int *uaddr, int op, int val,
7533                          const struct timespec *timeout, int *uaddr2,
7534                          int val3)
7535 {
7536 #if HOST_LONG_BITS == 64
7537 #if defined(__NR_futex)
7538     /* always a 64-bit time_t, it doesn't define _time64 version  */
7539     return sys_futex(uaddr, op, val, timeout, uaddr2, val3);
7540 
7541 #endif
7542 #else /* HOST_LONG_BITS == 64 */
7543 #if defined(__NR_futex_time64)
7544     if (sizeof(timeout->tv_sec) == 8) {
7545         /* _time64 function on 32bit arch */
7546         return sys_futex_time64(uaddr, op, val, timeout, uaddr2, val3);
7547     }
7548 #endif
7549 #if defined(__NR_futex)
7550     /* old function on 32bit arch */
7551     return sys_futex(uaddr, op, val, timeout, uaddr2, val3);
7552 #endif
7553 #endif /* HOST_LONG_BITS == 64 */
7554     g_assert_not_reached();
7555 }
7556 
7557 static int do_safe_futex(int *uaddr, int op, int val,
7558                          const struct timespec *timeout, int *uaddr2,
7559                          int val3)
7560 {
7561 #if HOST_LONG_BITS == 64
7562 #if defined(__NR_futex)
7563     /* always a 64-bit time_t, it doesn't define _time64 version  */
7564     return get_errno(safe_futex(uaddr, op, val, timeout, uaddr2, val3));
7565 #endif
7566 #else /* HOST_LONG_BITS == 64 */
7567 #if defined(__NR_futex_time64)
7568     if (sizeof(timeout->tv_sec) == 8) {
7569         /* _time64 function on 32bit arch */
7570         return get_errno(safe_futex_time64(uaddr, op, val, timeout, uaddr2,
7571                                            val3));
7572     }
7573 #endif
7574 #if defined(__NR_futex)
7575     /* old function on 32bit arch */
7576     return get_errno(safe_futex(uaddr, op, val, timeout, uaddr2, val3));
7577 #endif
7578 #endif /* HOST_LONG_BITS == 64 */
7579     return -TARGET_ENOSYS;
7580 }
7581 
7582 /* ??? Using host futex calls even when target atomic operations
7583    are not really atomic probably breaks things.  However implementing
7584    futexes locally would make futexes shared between multiple processes
7585    tricky.  However they're probably useless because guest atomic
7586    operations won't work either.  */
7587 #if defined(TARGET_NR_futex)
7588 static int do_futex(target_ulong uaddr, int op, int val, target_ulong timeout,
7589                     target_ulong uaddr2, int val3)
7590 {
7591     struct timespec ts, *pts;
7592     int base_op;
7593 
7594     /* ??? We assume FUTEX_* constants are the same on both host
7595        and target.  */
7596 #ifdef FUTEX_CMD_MASK
7597     base_op = op & FUTEX_CMD_MASK;
7598 #else
7599     base_op = op;
7600 #endif
7601     switch (base_op) {
7602     case FUTEX_WAIT:
7603     case FUTEX_WAIT_BITSET:
7604         if (timeout) {
7605             pts = &ts;
7606             target_to_host_timespec(pts, timeout);
7607         } else {
7608             pts = NULL;
7609         }
7610         return do_safe_futex(g2h(uaddr), op, tswap32(val), pts, NULL, val3);
7611     case FUTEX_WAKE:
7612         return do_safe_futex(g2h(uaddr), op, val, NULL, NULL, 0);
7613     case FUTEX_FD:
7614         return do_safe_futex(g2h(uaddr), op, val, NULL, NULL, 0);
7615     case FUTEX_REQUEUE:
7616     case FUTEX_CMP_REQUEUE:
7617     case FUTEX_WAKE_OP:
7618         /* For FUTEX_REQUEUE, FUTEX_CMP_REQUEUE, and FUTEX_WAKE_OP, the
7619            TIMEOUT parameter is interpreted as a uint32_t by the kernel.
7620            But the prototype takes a `struct timespec *'; insert casts
7621            to satisfy the compiler.  We do not need to tswap TIMEOUT
7622            since it's not compared to guest memory.  */
7623         pts = (struct timespec *)(uintptr_t) timeout;
7624         return do_safe_futex(g2h(uaddr), op, val, pts, g2h(uaddr2),
7625                              (base_op == FUTEX_CMP_REQUEUE
7626                                       ? tswap32(val3)
7627                                       : val3));
7628     default:
7629         return -TARGET_ENOSYS;
7630     }
7631 }
7632 #endif
7633 
7634 #if defined(TARGET_NR_futex_time64)
7635 static int do_futex_time64(target_ulong uaddr, int op, int val, target_ulong timeout,
7636                            target_ulong uaddr2, int val3)
7637 {
7638     struct timespec ts, *pts;
7639     int base_op;
7640 
7641     /* ??? We assume FUTEX_* constants are the same on both host
7642        and target.  */
7643 #ifdef FUTEX_CMD_MASK
7644     base_op = op & FUTEX_CMD_MASK;
7645 #else
7646     base_op = op;
7647 #endif
7648     switch (base_op) {
7649     case FUTEX_WAIT:
7650     case FUTEX_WAIT_BITSET:
7651         if (timeout) {
7652             pts = &ts;
7653             if (target_to_host_timespec64(pts, timeout)) {
7654                 return -TARGET_EFAULT;
7655             }
7656         } else {
7657             pts = NULL;
7658         }
7659         return do_safe_futex(g2h(uaddr), op, tswap32(val), pts, NULL, val3);
7660     case FUTEX_WAKE:
7661         return do_safe_futex(g2h(uaddr), op, val, NULL, NULL, 0);
7662     case FUTEX_FD:
7663         return do_safe_futex(g2h(uaddr), op, val, NULL, NULL, 0);
7664     case FUTEX_REQUEUE:
7665     case FUTEX_CMP_REQUEUE:
7666     case FUTEX_WAKE_OP:
7667         /* For FUTEX_REQUEUE, FUTEX_CMP_REQUEUE, and FUTEX_WAKE_OP, the
7668            TIMEOUT parameter is interpreted as a uint32_t by the kernel.
7669            But the prototype takes a `struct timespec *'; insert casts
7670            to satisfy the compiler.  We do not need to tswap TIMEOUT
7671            since it's not compared to guest memory.  */
7672         pts = (struct timespec *)(uintptr_t) timeout;
7673         return do_safe_futex(g2h(uaddr), op, val, pts, g2h(uaddr2),
7674                              (base_op == FUTEX_CMP_REQUEUE
7675                                       ? tswap32(val3)
7676                                       : val3));
7677     default:
7678         return -TARGET_ENOSYS;
7679     }
7680 }
7681 #endif
7682 
7683 #if defined(TARGET_NR_name_to_handle_at) && defined(CONFIG_OPEN_BY_HANDLE)
7684 static abi_long do_name_to_handle_at(abi_long dirfd, abi_long pathname,
7685                                      abi_long handle, abi_long mount_id,
7686                                      abi_long flags)
7687 {
7688     struct file_handle *target_fh;
7689     struct file_handle *fh;
7690     int mid = 0;
7691     abi_long ret;
7692     char *name;
7693     unsigned int size, total_size;
7694 
7695     if (get_user_s32(size, handle)) {
7696         return -TARGET_EFAULT;
7697     }
7698 
7699     name = lock_user_string(pathname);
7700     if (!name) {
7701         return -TARGET_EFAULT;
7702     }
7703 
7704     total_size = sizeof(struct file_handle) + size;
7705     target_fh = lock_user(VERIFY_WRITE, handle, total_size, 0);
7706     if (!target_fh) {
7707         unlock_user(name, pathname, 0);
7708         return -TARGET_EFAULT;
7709     }
7710 
7711     fh = g_malloc0(total_size);
7712     fh->handle_bytes = size;
7713 
7714     ret = get_errno(name_to_handle_at(dirfd, path(name), fh, &mid, flags));
7715     unlock_user(name, pathname, 0);
7716 
7717     /* man name_to_handle_at(2):
7718      * Other than the use of the handle_bytes field, the caller should treat
7719      * the file_handle structure as an opaque data type
7720      */
7721 
7722     memcpy(target_fh, fh, total_size);
7723     target_fh->handle_bytes = tswap32(fh->handle_bytes);
7724     target_fh->handle_type = tswap32(fh->handle_type);
7725     g_free(fh);
7726     unlock_user(target_fh, handle, total_size);
7727 
7728     if (put_user_s32(mid, mount_id)) {
7729         return -TARGET_EFAULT;
7730     }
7731 
7732     return ret;
7733 
7734 }
7735 #endif
7736 
7737 #if defined(TARGET_NR_open_by_handle_at) && defined(CONFIG_OPEN_BY_HANDLE)
7738 static abi_long do_open_by_handle_at(abi_long mount_fd, abi_long handle,
7739                                      abi_long flags)
7740 {
7741     struct file_handle *target_fh;
7742     struct file_handle *fh;
7743     unsigned int size, total_size;
7744     abi_long ret;
7745 
7746     if (get_user_s32(size, handle)) {
7747         return -TARGET_EFAULT;
7748     }
7749 
7750     total_size = sizeof(struct file_handle) + size;
7751     target_fh = lock_user(VERIFY_READ, handle, total_size, 1);
7752     if (!target_fh) {
7753         return -TARGET_EFAULT;
7754     }
7755 
7756     fh = g_memdup(target_fh, total_size);
7757     fh->handle_bytes = size;
7758     fh->handle_type = tswap32(target_fh->handle_type);
7759 
7760     ret = get_errno(open_by_handle_at(mount_fd, fh,
7761                     target_to_host_bitmask(flags, fcntl_flags_tbl)));
7762 
7763     g_free(fh);
7764 
7765     unlock_user(target_fh, handle, total_size);
7766 
7767     return ret;
7768 }
7769 #endif
7770 
7771 #if defined(TARGET_NR_signalfd) || defined(TARGET_NR_signalfd4)
7772 
7773 static abi_long do_signalfd4(int fd, abi_long mask, int flags)
7774 {
7775     int host_flags;
7776     target_sigset_t *target_mask;
7777     sigset_t host_mask;
7778     abi_long ret;
7779 
7780     if (flags & ~(TARGET_O_NONBLOCK | TARGET_O_CLOEXEC)) {
7781         return -TARGET_EINVAL;
7782     }
7783     if (!lock_user_struct(VERIFY_READ, target_mask, mask, 1)) {
7784         return -TARGET_EFAULT;
7785     }
7786 
7787     target_to_host_sigset(&host_mask, target_mask);
7788 
7789     host_flags = target_to_host_bitmask(flags, fcntl_flags_tbl);
7790 
7791     ret = get_errno(signalfd(fd, &host_mask, host_flags));
7792     if (ret >= 0) {
7793         fd_trans_register(ret, &target_signalfd_trans);
7794     }
7795 
7796     unlock_user_struct(target_mask, mask, 0);
7797 
7798     return ret;
7799 }
7800 #endif
7801 
7802 /* Map host to target signal numbers for the wait family of syscalls.
7803    Assume all other status bits are the same.  */
7804 int host_to_target_waitstatus(int status)
7805 {
7806     if (WIFSIGNALED(status)) {
7807         return host_to_target_signal(WTERMSIG(status)) | (status & ~0x7f);
7808     }
7809     if (WIFSTOPPED(status)) {
7810         return (host_to_target_signal(WSTOPSIG(status)) << 8)
7811                | (status & 0xff);
7812     }
7813     return status;
7814 }
7815 
7816 static int open_self_cmdline(void *cpu_env, int fd)
7817 {
7818     CPUState *cpu = env_cpu((CPUArchState *)cpu_env);
7819     struct linux_binprm *bprm = ((TaskState *)cpu->opaque)->bprm;
7820     int i;
7821 
7822     for (i = 0; i < bprm->argc; i++) {
7823         size_t len = strlen(bprm->argv[i]) + 1;
7824 
7825         if (write(fd, bprm->argv[i], len) != len) {
7826             return -1;
7827         }
7828     }
7829 
7830     return 0;
7831 }
7832 
7833 static int open_self_maps(void *cpu_env, int fd)
7834 {
7835     CPUState *cpu = env_cpu((CPUArchState *)cpu_env);
7836     TaskState *ts = cpu->opaque;
7837     GSList *map_info = read_self_maps();
7838     GSList *s;
7839     int count;
7840 
7841     for (s = map_info; s; s = g_slist_next(s)) {
7842         MapInfo *e = (MapInfo *) s->data;
7843 
7844         if (h2g_valid(e->start)) {
7845             unsigned long min = e->start;
7846             unsigned long max = e->end;
7847             int flags = page_get_flags(h2g(min));
7848             const char *path;
7849 
7850             max = h2g_valid(max - 1) ?
7851                 max : (uintptr_t) g2h(GUEST_ADDR_MAX) + 1;
7852 
7853             if (page_check_range(h2g(min), max - min, flags) == -1) {
7854                 continue;
7855             }
7856 
7857             if (h2g(min) == ts->info->stack_limit) {
7858                 path = "[stack]";
7859             } else {
7860                 path = e->path;
7861             }
7862 
7863             count = dprintf(fd, TARGET_ABI_FMT_ptr "-" TARGET_ABI_FMT_ptr
7864                             " %c%c%c%c %08" PRIx64 " %s %"PRId64,
7865                             h2g(min), h2g(max - 1) + 1,
7866                             e->is_read ? 'r' : '-',
7867                             e->is_write ? 'w' : '-',
7868                             e->is_exec ? 'x' : '-',
7869                             e->is_priv ? 'p' : '-',
7870                             (uint64_t) e->offset, e->dev, e->inode);
7871             if (path) {
7872                 dprintf(fd, "%*s%s\n", 73 - count, "", path);
7873             } else {
7874                 dprintf(fd, "\n");
7875             }
7876         }
7877     }
7878 
7879     free_self_maps(map_info);
7880 
7881 #ifdef TARGET_VSYSCALL_PAGE
7882     /*
7883      * We only support execution from the vsyscall page.
7884      * This is as if CONFIG_LEGACY_VSYSCALL_XONLY=y from v5.3.
7885      */
7886     count = dprintf(fd, TARGET_FMT_lx "-" TARGET_FMT_lx
7887                     " --xp 00000000 00:00 0",
7888                     TARGET_VSYSCALL_PAGE, TARGET_VSYSCALL_PAGE + TARGET_PAGE_SIZE);
7889     dprintf(fd, "%*s%s\n", 73 - count, "",  "[vsyscall]");
7890 #endif
7891 
7892     return 0;
7893 }
7894 
7895 static int open_self_stat(void *cpu_env, int fd)
7896 {
7897     CPUState *cpu = env_cpu((CPUArchState *)cpu_env);
7898     TaskState *ts = cpu->opaque;
7899     g_autoptr(GString) buf = g_string_new(NULL);
7900     int i;
7901 
7902     for (i = 0; i < 44; i++) {
7903         if (i == 0) {
7904             /* pid */
7905             g_string_printf(buf, FMT_pid " ", getpid());
7906         } else if (i == 1) {
7907             /* app name */
7908             gchar *bin = g_strrstr(ts->bprm->argv[0], "/");
7909             bin = bin ? bin + 1 : ts->bprm->argv[0];
7910             g_string_printf(buf, "(%.15s) ", bin);
7911         } else if (i == 27) {
7912             /* stack bottom */
7913             g_string_printf(buf, TARGET_ABI_FMT_ld " ", ts->info->start_stack);
7914         } else {
7915             /* for the rest, there is MasterCard */
7916             g_string_printf(buf, "0%c", i == 43 ? '\n' : ' ');
7917         }
7918 
7919         if (write(fd, buf->str, buf->len) != buf->len) {
7920             return -1;
7921         }
7922     }
7923 
7924     return 0;
7925 }
7926 
7927 static int open_self_auxv(void *cpu_env, int fd)
7928 {
7929     CPUState *cpu = env_cpu((CPUArchState *)cpu_env);
7930     TaskState *ts = cpu->opaque;
7931     abi_ulong auxv = ts->info->saved_auxv;
7932     abi_ulong len = ts->info->auxv_len;
7933     char *ptr;
7934 
7935     /*
7936      * Auxiliary vector is stored in target process stack.
7937      * read in whole auxv vector and copy it to file
7938      */
7939     ptr = lock_user(VERIFY_READ, auxv, len, 0);
7940     if (ptr != NULL) {
7941         while (len > 0) {
7942             ssize_t r;
7943             r = write(fd, ptr, len);
7944             if (r <= 0) {
7945                 break;
7946             }
7947             len -= r;
7948             ptr += r;
7949         }
7950         lseek(fd, 0, SEEK_SET);
7951         unlock_user(ptr, auxv, len);
7952     }
7953 
7954     return 0;
7955 }
7956 
7957 static int is_proc_myself(const char *filename, const char *entry)
7958 {
7959     if (!strncmp(filename, "/proc/", strlen("/proc/"))) {
7960         filename += strlen("/proc/");
7961         if (!strncmp(filename, "self/", strlen("self/"))) {
7962             filename += strlen("self/");
7963         } else if (*filename >= '1' && *filename <= '9') {
7964             char myself[80];
7965             snprintf(myself, sizeof(myself), "%d/", getpid());
7966             if (!strncmp(filename, myself, strlen(myself))) {
7967                 filename += strlen(myself);
7968             } else {
7969                 return 0;
7970             }
7971         } else {
7972             return 0;
7973         }
7974         if (!strcmp(filename, entry)) {
7975             return 1;
7976         }
7977     }
7978     return 0;
7979 }
7980 
7981 #if defined(HOST_WORDS_BIGENDIAN) != defined(TARGET_WORDS_BIGENDIAN) || \
7982     defined(TARGET_SPARC) || defined(TARGET_M68K) || defined(TARGET_HPPA)
7983 static int is_proc(const char *filename, const char *entry)
7984 {
7985     return strcmp(filename, entry) == 0;
7986 }
7987 #endif
7988 
7989 #if defined(HOST_WORDS_BIGENDIAN) != defined(TARGET_WORDS_BIGENDIAN)
7990 static int open_net_route(void *cpu_env, int fd)
7991 {
7992     FILE *fp;
7993     char *line = NULL;
7994     size_t len = 0;
7995     ssize_t read;
7996 
7997     fp = fopen("/proc/net/route", "r");
7998     if (fp == NULL) {
7999         return -1;
8000     }
8001 
8002     /* read header */
8003 
8004     read = getline(&line, &len, fp);
8005     dprintf(fd, "%s", line);
8006 
8007     /* read routes */
8008 
8009     while ((read = getline(&line, &len, fp)) != -1) {
8010         char iface[16];
8011         uint32_t dest, gw, mask;
8012         unsigned int flags, refcnt, use, metric, mtu, window, irtt;
8013         int fields;
8014 
8015         fields = sscanf(line,
8016                         "%s\t%08x\t%08x\t%04x\t%d\t%d\t%d\t%08x\t%d\t%u\t%u\n",
8017                         iface, &dest, &gw, &flags, &refcnt, &use, &metric,
8018                         &mask, &mtu, &window, &irtt);
8019         if (fields != 11) {
8020             continue;
8021         }
8022         dprintf(fd, "%s\t%08x\t%08x\t%04x\t%d\t%d\t%d\t%08x\t%d\t%u\t%u\n",
8023                 iface, tswap32(dest), tswap32(gw), flags, refcnt, use,
8024                 metric, tswap32(mask), mtu, window, irtt);
8025     }
8026 
8027     free(line);
8028     fclose(fp);
8029 
8030     return 0;
8031 }
8032 #endif
8033 
8034 #if defined(TARGET_SPARC)
8035 static int open_cpuinfo(void *cpu_env, int fd)
8036 {
8037     dprintf(fd, "type\t\t: sun4u\n");
8038     return 0;
8039 }
8040 #endif
8041 
8042 #if defined(TARGET_HPPA)
8043 static int open_cpuinfo(void *cpu_env, int fd)
8044 {
8045     dprintf(fd, "cpu family\t: PA-RISC 1.1e\n");
8046     dprintf(fd, "cpu\t\t: PA7300LC (PCX-L2)\n");
8047     dprintf(fd, "capabilities\t: os32\n");
8048     dprintf(fd, "model\t\t: 9000/778/B160L\n");
8049     dprintf(fd, "model name\t: Merlin L2 160 QEMU (9000/778/B160L)\n");
8050     return 0;
8051 }
8052 #endif
8053 
8054 #if defined(TARGET_M68K)
8055 static int open_hardware(void *cpu_env, int fd)
8056 {
8057     dprintf(fd, "Model:\t\tqemu-m68k\n");
8058     return 0;
8059 }
8060 #endif
8061 
8062 static int do_openat(void *cpu_env, int dirfd, const char *pathname, int flags, mode_t mode)
8063 {
8064     struct fake_open {
8065         const char *filename;
8066         int (*fill)(void *cpu_env, int fd);
8067         int (*cmp)(const char *s1, const char *s2);
8068     };
8069     const struct fake_open *fake_open;
8070     static const struct fake_open fakes[] = {
8071         { "maps", open_self_maps, is_proc_myself },
8072         { "stat", open_self_stat, is_proc_myself },
8073         { "auxv", open_self_auxv, is_proc_myself },
8074         { "cmdline", open_self_cmdline, is_proc_myself },
8075 #if defined(HOST_WORDS_BIGENDIAN) != defined(TARGET_WORDS_BIGENDIAN)
8076         { "/proc/net/route", open_net_route, is_proc },
8077 #endif
8078 #if defined(TARGET_SPARC) || defined(TARGET_HPPA)
8079         { "/proc/cpuinfo", open_cpuinfo, is_proc },
8080 #endif
8081 #if defined(TARGET_M68K)
8082         { "/proc/hardware", open_hardware, is_proc },
8083 #endif
8084         { NULL, NULL, NULL }
8085     };
8086 
8087     if (is_proc_myself(pathname, "exe")) {
8088         int execfd = qemu_getauxval(AT_EXECFD);
8089         return execfd ? execfd : safe_openat(dirfd, exec_path, flags, mode);
8090     }
8091 
8092     for (fake_open = fakes; fake_open->filename; fake_open++) {
8093         if (fake_open->cmp(pathname, fake_open->filename)) {
8094             break;
8095         }
8096     }
8097 
8098     if (fake_open->filename) {
8099         const char *tmpdir;
8100         char filename[PATH_MAX];
8101         int fd, r;
8102 
8103         /* create temporary file to map stat to */
8104         tmpdir = getenv("TMPDIR");
8105         if (!tmpdir)
8106             tmpdir = "/tmp";
8107         snprintf(filename, sizeof(filename), "%s/qemu-open.XXXXXX", tmpdir);
8108         fd = mkstemp(filename);
8109         if (fd < 0) {
8110             return fd;
8111         }
8112         unlink(filename);
8113 
8114         if ((r = fake_open->fill(cpu_env, fd))) {
8115             int e = errno;
8116             close(fd);
8117             errno = e;
8118             return r;
8119         }
8120         lseek(fd, 0, SEEK_SET);
8121 
8122         return fd;
8123     }
8124 
8125     return safe_openat(dirfd, path(pathname), flags, mode);
8126 }
8127 
8128 #define TIMER_MAGIC 0x0caf0000
8129 #define TIMER_MAGIC_MASK 0xffff0000
8130 
8131 /* Convert QEMU provided timer ID back to internal 16bit index format */
8132 static target_timer_t get_timer_id(abi_long arg)
8133 {
8134     target_timer_t timerid = arg;
8135 
8136     if ((timerid & TIMER_MAGIC_MASK) != TIMER_MAGIC) {
8137         return -TARGET_EINVAL;
8138     }
8139 
8140     timerid &= 0xffff;
8141 
8142     if (timerid >= ARRAY_SIZE(g_posix_timers)) {
8143         return -TARGET_EINVAL;
8144     }
8145 
8146     return timerid;
8147 }
8148 
8149 static int target_to_host_cpu_mask(unsigned long *host_mask,
8150                                    size_t host_size,
8151                                    abi_ulong target_addr,
8152                                    size_t target_size)
8153 {
8154     unsigned target_bits = sizeof(abi_ulong) * 8;
8155     unsigned host_bits = sizeof(*host_mask) * 8;
8156     abi_ulong *target_mask;
8157     unsigned i, j;
8158 
8159     assert(host_size >= target_size);
8160 
8161     target_mask = lock_user(VERIFY_READ, target_addr, target_size, 1);
8162     if (!target_mask) {
8163         return -TARGET_EFAULT;
8164     }
8165     memset(host_mask, 0, host_size);
8166 
8167     for (i = 0 ; i < target_size / sizeof(abi_ulong); i++) {
8168         unsigned bit = i * target_bits;
8169         abi_ulong val;
8170 
8171         __get_user(val, &target_mask[i]);
8172         for (j = 0; j < target_bits; j++, bit++) {
8173             if (val & (1UL << j)) {
8174                 host_mask[bit / host_bits] |= 1UL << (bit % host_bits);
8175             }
8176         }
8177     }
8178 
8179     unlock_user(target_mask, target_addr, 0);
8180     return 0;
8181 }
8182 
8183 static int host_to_target_cpu_mask(const unsigned long *host_mask,
8184                                    size_t host_size,
8185                                    abi_ulong target_addr,
8186                                    size_t target_size)
8187 {
8188     unsigned target_bits = sizeof(abi_ulong) * 8;
8189     unsigned host_bits = sizeof(*host_mask) * 8;
8190     abi_ulong *target_mask;
8191     unsigned i, j;
8192 
8193     assert(host_size >= target_size);
8194 
8195     target_mask = lock_user(VERIFY_WRITE, target_addr, target_size, 0);
8196     if (!target_mask) {
8197         return -TARGET_EFAULT;
8198     }
8199 
8200     for (i = 0 ; i < target_size / sizeof(abi_ulong); i++) {
8201         unsigned bit = i * target_bits;
8202         abi_ulong val = 0;
8203 
8204         for (j = 0; j < target_bits; j++, bit++) {
8205             if (host_mask[bit / host_bits] & (1UL << (bit % host_bits))) {
8206                 val |= 1UL << j;
8207             }
8208         }
8209         __put_user(val, &target_mask[i]);
8210     }
8211 
8212     unlock_user(target_mask, target_addr, target_size);
8213     return 0;
8214 }
8215 
8216 /* This is an internal helper for do_syscall so that it is easier
8217  * to have a single return point, so that actions, such as logging
8218  * of syscall results, can be performed.
8219  * All errnos that do_syscall() returns must be -TARGET_<errcode>.
8220  */
8221 static abi_long do_syscall1(void *cpu_env, int num, abi_long arg1,
8222                             abi_long arg2, abi_long arg3, abi_long arg4,
8223                             abi_long arg5, abi_long arg6, abi_long arg7,
8224                             abi_long arg8)
8225 {
8226     CPUState *cpu = env_cpu(cpu_env);
8227     abi_long ret;
8228 #if defined(TARGET_NR_stat) || defined(TARGET_NR_stat64) \
8229     || defined(TARGET_NR_lstat) || defined(TARGET_NR_lstat64) \
8230     || defined(TARGET_NR_fstat) || defined(TARGET_NR_fstat64) \
8231     || defined(TARGET_NR_statx)
8232     struct stat st;
8233 #endif
8234 #if defined(TARGET_NR_statfs) || defined(TARGET_NR_statfs64) \
8235     || defined(TARGET_NR_fstatfs)
8236     struct statfs stfs;
8237 #endif
8238     void *p;
8239 
8240     switch(num) {
8241     case TARGET_NR_exit:
8242         /* In old applications this may be used to implement _exit(2).
8243            However in threaded applications it is used for thread termination,
8244            and _exit_group is used for application termination.
8245            Do thread termination if we have more then one thread.  */
8246 
8247         if (block_signals()) {
8248             return -TARGET_ERESTARTSYS;
8249         }
8250 
8251         pthread_mutex_lock(&clone_lock);
8252 
8253         if (CPU_NEXT(first_cpu)) {
8254             TaskState *ts = cpu->opaque;
8255 
8256             object_property_set_bool(OBJECT(cpu), "realized", false, NULL);
8257             object_unref(OBJECT(cpu));
8258             /*
8259              * At this point the CPU should be unrealized and removed
8260              * from cpu lists. We can clean-up the rest of the thread
8261              * data without the lock held.
8262              */
8263 
8264             pthread_mutex_unlock(&clone_lock);
8265 
8266             if (ts->child_tidptr) {
8267                 put_user_u32(0, ts->child_tidptr);
8268                 do_sys_futex(g2h(ts->child_tidptr), FUTEX_WAKE, INT_MAX,
8269                           NULL, NULL, 0);
8270             }
8271             thread_cpu = NULL;
8272             g_free(ts);
8273             rcu_unregister_thread();
8274             pthread_exit(NULL);
8275         }
8276 
8277         pthread_mutex_unlock(&clone_lock);
8278         preexit_cleanup(cpu_env, arg1);
8279         _exit(arg1);
8280         return 0; /* avoid warning */
8281     case TARGET_NR_read:
8282         if (arg2 == 0 && arg3 == 0) {
8283             return get_errno(safe_read(arg1, 0, 0));
8284         } else {
8285             if (!(p = lock_user(VERIFY_WRITE, arg2, arg3, 0)))
8286                 return -TARGET_EFAULT;
8287             ret = get_errno(safe_read(arg1, p, arg3));
8288             if (ret >= 0 &&
8289                 fd_trans_host_to_target_data(arg1)) {
8290                 ret = fd_trans_host_to_target_data(arg1)(p, ret);
8291             }
8292             unlock_user(p, arg2, ret);
8293         }
8294         return ret;
8295     case TARGET_NR_write:
8296         if (arg2 == 0 && arg3 == 0) {
8297             return get_errno(safe_write(arg1, 0, 0));
8298         }
8299         if (!(p = lock_user(VERIFY_READ, arg2, arg3, 1)))
8300             return -TARGET_EFAULT;
8301         if (fd_trans_target_to_host_data(arg1)) {
8302             void *copy = g_malloc(arg3);
8303             memcpy(copy, p, arg3);
8304             ret = fd_trans_target_to_host_data(arg1)(copy, arg3);
8305             if (ret >= 0) {
8306                 ret = get_errno(safe_write(arg1, copy, ret));
8307             }
8308             g_free(copy);
8309         } else {
8310             ret = get_errno(safe_write(arg1, p, arg3));
8311         }
8312         unlock_user(p, arg2, 0);
8313         return ret;
8314 
8315 #ifdef TARGET_NR_open
8316     case TARGET_NR_open:
8317         if (!(p = lock_user_string(arg1)))
8318             return -TARGET_EFAULT;
8319         ret = get_errno(do_openat(cpu_env, AT_FDCWD, p,
8320                                   target_to_host_bitmask(arg2, fcntl_flags_tbl),
8321                                   arg3));
8322         fd_trans_unregister(ret);
8323         unlock_user(p, arg1, 0);
8324         return ret;
8325 #endif
8326     case TARGET_NR_openat:
8327         if (!(p = lock_user_string(arg2)))
8328             return -TARGET_EFAULT;
8329         ret = get_errno(do_openat(cpu_env, arg1, p,
8330                                   target_to_host_bitmask(arg3, fcntl_flags_tbl),
8331                                   arg4));
8332         fd_trans_unregister(ret);
8333         unlock_user(p, arg2, 0);
8334         return ret;
8335 #if defined(TARGET_NR_name_to_handle_at) && defined(CONFIG_OPEN_BY_HANDLE)
8336     case TARGET_NR_name_to_handle_at:
8337         ret = do_name_to_handle_at(arg1, arg2, arg3, arg4, arg5);
8338         return ret;
8339 #endif
8340 #if defined(TARGET_NR_open_by_handle_at) && defined(CONFIG_OPEN_BY_HANDLE)
8341     case TARGET_NR_open_by_handle_at:
8342         ret = do_open_by_handle_at(arg1, arg2, arg3);
8343         fd_trans_unregister(ret);
8344         return ret;
8345 #endif
8346     case TARGET_NR_close:
8347         fd_trans_unregister(arg1);
8348         return get_errno(close(arg1));
8349 
8350     case TARGET_NR_brk:
8351         return do_brk(arg1);
8352 #ifdef TARGET_NR_fork
8353     case TARGET_NR_fork:
8354         return get_errno(do_fork(cpu_env, TARGET_SIGCHLD, 0, 0, 0, 0));
8355 #endif
8356 #ifdef TARGET_NR_waitpid
8357     case TARGET_NR_waitpid:
8358         {
8359             int status;
8360             ret = get_errno(safe_wait4(arg1, &status, arg3, 0));
8361             if (!is_error(ret) && arg2 && ret
8362                 && put_user_s32(host_to_target_waitstatus(status), arg2))
8363                 return -TARGET_EFAULT;
8364         }
8365         return ret;
8366 #endif
8367 #ifdef TARGET_NR_waitid
8368     case TARGET_NR_waitid:
8369         {
8370             siginfo_t info;
8371             info.si_pid = 0;
8372             ret = get_errno(safe_waitid(arg1, arg2, &info, arg4, NULL));
8373             if (!is_error(ret) && arg3 && info.si_pid != 0) {
8374                 if (!(p = lock_user(VERIFY_WRITE, arg3, sizeof(target_siginfo_t), 0)))
8375                     return -TARGET_EFAULT;
8376                 host_to_target_siginfo(p, &info);
8377                 unlock_user(p, arg3, sizeof(target_siginfo_t));
8378             }
8379         }
8380         return ret;
8381 #endif
8382 #ifdef TARGET_NR_creat /* not on alpha */
8383     case TARGET_NR_creat:
8384         if (!(p = lock_user_string(arg1)))
8385             return -TARGET_EFAULT;
8386         ret = get_errno(creat(p, arg2));
8387         fd_trans_unregister(ret);
8388         unlock_user(p, arg1, 0);
8389         return ret;
8390 #endif
8391 #ifdef TARGET_NR_link
8392     case TARGET_NR_link:
8393         {
8394             void * p2;
8395             p = lock_user_string(arg1);
8396             p2 = lock_user_string(arg2);
8397             if (!p || !p2)
8398                 ret = -TARGET_EFAULT;
8399             else
8400                 ret = get_errno(link(p, p2));
8401             unlock_user(p2, arg2, 0);
8402             unlock_user(p, arg1, 0);
8403         }
8404         return ret;
8405 #endif
8406 #if defined(TARGET_NR_linkat)
8407     case TARGET_NR_linkat:
8408         {
8409             void * p2 = NULL;
8410             if (!arg2 || !arg4)
8411                 return -TARGET_EFAULT;
8412             p  = lock_user_string(arg2);
8413             p2 = lock_user_string(arg4);
8414             if (!p || !p2)
8415                 ret = -TARGET_EFAULT;
8416             else
8417                 ret = get_errno(linkat(arg1, p, arg3, p2, arg5));
8418             unlock_user(p, arg2, 0);
8419             unlock_user(p2, arg4, 0);
8420         }
8421         return ret;
8422 #endif
8423 #ifdef TARGET_NR_unlink
8424     case TARGET_NR_unlink:
8425         if (!(p = lock_user_string(arg1)))
8426             return -TARGET_EFAULT;
8427         ret = get_errno(unlink(p));
8428         unlock_user(p, arg1, 0);
8429         return ret;
8430 #endif
8431 #if defined(TARGET_NR_unlinkat)
8432     case TARGET_NR_unlinkat:
8433         if (!(p = lock_user_string(arg2)))
8434             return -TARGET_EFAULT;
8435         ret = get_errno(unlinkat(arg1, p, arg3));
8436         unlock_user(p, arg2, 0);
8437         return ret;
8438 #endif
8439     case TARGET_NR_execve:
8440         {
8441             char **argp, **envp;
8442             int argc, envc;
8443             abi_ulong gp;
8444             abi_ulong guest_argp;
8445             abi_ulong guest_envp;
8446             abi_ulong addr;
8447             char **q;
8448             int total_size = 0;
8449 
8450             argc = 0;
8451             guest_argp = arg2;
8452             for (gp = guest_argp; gp; gp += sizeof(abi_ulong)) {
8453                 if (get_user_ual(addr, gp))
8454                     return -TARGET_EFAULT;
8455                 if (!addr)
8456                     break;
8457                 argc++;
8458             }
8459             envc = 0;
8460             guest_envp = arg3;
8461             for (gp = guest_envp; gp; gp += sizeof(abi_ulong)) {
8462                 if (get_user_ual(addr, gp))
8463                     return -TARGET_EFAULT;
8464                 if (!addr)
8465                     break;
8466                 envc++;
8467             }
8468 
8469             argp = g_new0(char *, argc + 1);
8470             envp = g_new0(char *, envc + 1);
8471 
8472             for (gp = guest_argp, q = argp; gp;
8473                   gp += sizeof(abi_ulong), q++) {
8474                 if (get_user_ual(addr, gp))
8475                     goto execve_efault;
8476                 if (!addr)
8477                     break;
8478                 if (!(*q = lock_user_string(addr)))
8479                     goto execve_efault;
8480                 total_size += strlen(*q) + 1;
8481             }
8482             *q = NULL;
8483 
8484             for (gp = guest_envp, q = envp; gp;
8485                   gp += sizeof(abi_ulong), q++) {
8486                 if (get_user_ual(addr, gp))
8487                     goto execve_efault;
8488                 if (!addr)
8489                     break;
8490                 if (!(*q = lock_user_string(addr)))
8491                     goto execve_efault;
8492                 total_size += strlen(*q) + 1;
8493             }
8494             *q = NULL;
8495 
8496             if (!(p = lock_user_string(arg1)))
8497                 goto execve_efault;
8498             /* Although execve() is not an interruptible syscall it is
8499              * a special case where we must use the safe_syscall wrapper:
8500              * if we allow a signal to happen before we make the host
8501              * syscall then we will 'lose' it, because at the point of
8502              * execve the process leaves QEMU's control. So we use the
8503              * safe syscall wrapper to ensure that we either take the
8504              * signal as a guest signal, or else it does not happen
8505              * before the execve completes and makes it the other
8506              * program's problem.
8507              */
8508             ret = get_errno(safe_execve(p, argp, envp));
8509             unlock_user(p, arg1, 0);
8510 
8511             goto execve_end;
8512 
8513         execve_efault:
8514             ret = -TARGET_EFAULT;
8515 
8516         execve_end:
8517             for (gp = guest_argp, q = argp; *q;
8518                   gp += sizeof(abi_ulong), q++) {
8519                 if (get_user_ual(addr, gp)
8520                     || !addr)
8521                     break;
8522                 unlock_user(*q, addr, 0);
8523             }
8524             for (gp = guest_envp, q = envp; *q;
8525                   gp += sizeof(abi_ulong), q++) {
8526                 if (get_user_ual(addr, gp)
8527                     || !addr)
8528                     break;
8529                 unlock_user(*q, addr, 0);
8530             }
8531 
8532             g_free(argp);
8533             g_free(envp);
8534         }
8535         return ret;
8536     case TARGET_NR_chdir:
8537         if (!(p = lock_user_string(arg1)))
8538             return -TARGET_EFAULT;
8539         ret = get_errno(chdir(p));
8540         unlock_user(p, arg1, 0);
8541         return ret;
8542 #ifdef TARGET_NR_time
8543     case TARGET_NR_time:
8544         {
8545             time_t host_time;
8546             ret = get_errno(time(&host_time));
8547             if (!is_error(ret)
8548                 && arg1
8549                 && put_user_sal(host_time, arg1))
8550                 return -TARGET_EFAULT;
8551         }
8552         return ret;
8553 #endif
8554 #ifdef TARGET_NR_mknod
8555     case TARGET_NR_mknod:
8556         if (!(p = lock_user_string(arg1)))
8557             return -TARGET_EFAULT;
8558         ret = get_errno(mknod(p, arg2, arg3));
8559         unlock_user(p, arg1, 0);
8560         return ret;
8561 #endif
8562 #if defined(TARGET_NR_mknodat)
8563     case TARGET_NR_mknodat:
8564         if (!(p = lock_user_string(arg2)))
8565             return -TARGET_EFAULT;
8566         ret = get_errno(mknodat(arg1, p, arg3, arg4));
8567         unlock_user(p, arg2, 0);
8568         return ret;
8569 #endif
8570 #ifdef TARGET_NR_chmod
8571     case TARGET_NR_chmod:
8572         if (!(p = lock_user_string(arg1)))
8573             return -TARGET_EFAULT;
8574         ret = get_errno(chmod(p, arg2));
8575         unlock_user(p, arg1, 0);
8576         return ret;
8577 #endif
8578 #ifdef TARGET_NR_lseek
8579     case TARGET_NR_lseek:
8580         return get_errno(lseek(arg1, arg2, arg3));
8581 #endif
8582 #if defined(TARGET_NR_getxpid) && defined(TARGET_ALPHA)
8583     /* Alpha specific */
8584     case TARGET_NR_getxpid:
8585         ((CPUAlphaState *)cpu_env)->ir[IR_A4] = getppid();
8586         return get_errno(getpid());
8587 #endif
8588 #ifdef TARGET_NR_getpid
8589     case TARGET_NR_getpid:
8590         return get_errno(getpid());
8591 #endif
8592     case TARGET_NR_mount:
8593         {
8594             /* need to look at the data field */
8595             void *p2, *p3;
8596 
8597             if (arg1) {
8598                 p = lock_user_string(arg1);
8599                 if (!p) {
8600                     return -TARGET_EFAULT;
8601                 }
8602             } else {
8603                 p = NULL;
8604             }
8605 
8606             p2 = lock_user_string(arg2);
8607             if (!p2) {
8608                 if (arg1) {
8609                     unlock_user(p, arg1, 0);
8610                 }
8611                 return -TARGET_EFAULT;
8612             }
8613 
8614             if (arg3) {
8615                 p3 = lock_user_string(arg3);
8616                 if (!p3) {
8617                     if (arg1) {
8618                         unlock_user(p, arg1, 0);
8619                     }
8620                     unlock_user(p2, arg2, 0);
8621                     return -TARGET_EFAULT;
8622                 }
8623             } else {
8624                 p3 = NULL;
8625             }
8626 
8627             /* FIXME - arg5 should be locked, but it isn't clear how to
8628              * do that since it's not guaranteed to be a NULL-terminated
8629              * string.
8630              */
8631             if (!arg5) {
8632                 ret = mount(p, p2, p3, (unsigned long)arg4, NULL);
8633             } else {
8634                 ret = mount(p, p2, p3, (unsigned long)arg4, g2h(arg5));
8635             }
8636             ret = get_errno(ret);
8637 
8638             if (arg1) {
8639                 unlock_user(p, arg1, 0);
8640             }
8641             unlock_user(p2, arg2, 0);
8642             if (arg3) {
8643                 unlock_user(p3, arg3, 0);
8644             }
8645         }
8646         return ret;
8647 #if defined(TARGET_NR_umount) || defined(TARGET_NR_oldumount)
8648 #if defined(TARGET_NR_umount)
8649     case TARGET_NR_umount:
8650 #endif
8651 #if defined(TARGET_NR_oldumount)
8652     case TARGET_NR_oldumount:
8653 #endif
8654         if (!(p = lock_user_string(arg1)))
8655             return -TARGET_EFAULT;
8656         ret = get_errno(umount(p));
8657         unlock_user(p, arg1, 0);
8658         return ret;
8659 #endif
8660 #ifdef TARGET_NR_stime /* not on alpha */
8661     case TARGET_NR_stime:
8662         {
8663             struct timespec ts;
8664             ts.tv_nsec = 0;
8665             if (get_user_sal(ts.tv_sec, arg1)) {
8666                 return -TARGET_EFAULT;
8667             }
8668             return get_errno(clock_settime(CLOCK_REALTIME, &ts));
8669         }
8670 #endif
8671 #ifdef TARGET_NR_alarm /* not on alpha */
8672     case TARGET_NR_alarm:
8673         return alarm(arg1);
8674 #endif
8675 #ifdef TARGET_NR_pause /* not on alpha */
8676     case TARGET_NR_pause:
8677         if (!block_signals()) {
8678             sigsuspend(&((TaskState *)cpu->opaque)->signal_mask);
8679         }
8680         return -TARGET_EINTR;
8681 #endif
8682 #ifdef TARGET_NR_utime
8683     case TARGET_NR_utime:
8684         {
8685             struct utimbuf tbuf, *host_tbuf;
8686             struct target_utimbuf *target_tbuf;
8687             if (arg2) {
8688                 if (!lock_user_struct(VERIFY_READ, target_tbuf, arg2, 1))
8689                     return -TARGET_EFAULT;
8690                 tbuf.actime = tswapal(target_tbuf->actime);
8691                 tbuf.modtime = tswapal(target_tbuf->modtime);
8692                 unlock_user_struct(target_tbuf, arg2, 0);
8693                 host_tbuf = &tbuf;
8694             } else {
8695                 host_tbuf = NULL;
8696             }
8697             if (!(p = lock_user_string(arg1)))
8698                 return -TARGET_EFAULT;
8699             ret = get_errno(utime(p, host_tbuf));
8700             unlock_user(p, arg1, 0);
8701         }
8702         return ret;
8703 #endif
8704 #ifdef TARGET_NR_utimes
8705     case TARGET_NR_utimes:
8706         {
8707             struct timeval *tvp, tv[2];
8708             if (arg2) {
8709                 if (copy_from_user_timeval(&tv[0], arg2)
8710                     || copy_from_user_timeval(&tv[1],
8711                                               arg2 + sizeof(struct target_timeval)))
8712                     return -TARGET_EFAULT;
8713                 tvp = tv;
8714             } else {
8715                 tvp = NULL;
8716             }
8717             if (!(p = lock_user_string(arg1)))
8718                 return -TARGET_EFAULT;
8719             ret = get_errno(utimes(p, tvp));
8720             unlock_user(p, arg1, 0);
8721         }
8722         return ret;
8723 #endif
8724 #if defined(TARGET_NR_futimesat)
8725     case TARGET_NR_futimesat:
8726         {
8727             struct timeval *tvp, tv[2];
8728             if (arg3) {
8729                 if (copy_from_user_timeval(&tv[0], arg3)
8730                     || copy_from_user_timeval(&tv[1],
8731                                               arg3 + sizeof(struct target_timeval)))
8732                     return -TARGET_EFAULT;
8733                 tvp = tv;
8734             } else {
8735                 tvp = NULL;
8736             }
8737             if (!(p = lock_user_string(arg2))) {
8738                 return -TARGET_EFAULT;
8739             }
8740             ret = get_errno(futimesat(arg1, path(p), tvp));
8741             unlock_user(p, arg2, 0);
8742         }
8743         return ret;
8744 #endif
8745 #ifdef TARGET_NR_access
8746     case TARGET_NR_access:
8747         if (!(p = lock_user_string(arg1))) {
8748             return -TARGET_EFAULT;
8749         }
8750         ret = get_errno(access(path(p), arg2));
8751         unlock_user(p, arg1, 0);
8752         return ret;
8753 #endif
8754 #if defined(TARGET_NR_faccessat) && defined(__NR_faccessat)
8755     case TARGET_NR_faccessat:
8756         if (!(p = lock_user_string(arg2))) {
8757             return -TARGET_EFAULT;
8758         }
8759         ret = get_errno(faccessat(arg1, p, arg3, 0));
8760         unlock_user(p, arg2, 0);
8761         return ret;
8762 #endif
8763 #ifdef TARGET_NR_nice /* not on alpha */
8764     case TARGET_NR_nice:
8765         return get_errno(nice(arg1));
8766 #endif
8767     case TARGET_NR_sync:
8768         sync();
8769         return 0;
8770 #if defined(TARGET_NR_syncfs) && defined(CONFIG_SYNCFS)
8771     case TARGET_NR_syncfs:
8772         return get_errno(syncfs(arg1));
8773 #endif
8774     case TARGET_NR_kill:
8775         return get_errno(safe_kill(arg1, target_to_host_signal(arg2)));
8776 #ifdef TARGET_NR_rename
8777     case TARGET_NR_rename:
8778         {
8779             void *p2;
8780             p = lock_user_string(arg1);
8781             p2 = lock_user_string(arg2);
8782             if (!p || !p2)
8783                 ret = -TARGET_EFAULT;
8784             else
8785                 ret = get_errno(rename(p, p2));
8786             unlock_user(p2, arg2, 0);
8787             unlock_user(p, arg1, 0);
8788         }
8789         return ret;
8790 #endif
8791 #if defined(TARGET_NR_renameat)
8792     case TARGET_NR_renameat:
8793         {
8794             void *p2;
8795             p  = lock_user_string(arg2);
8796             p2 = lock_user_string(arg4);
8797             if (!p || !p2)
8798                 ret = -TARGET_EFAULT;
8799             else
8800                 ret = get_errno(renameat(arg1, p, arg3, p2));
8801             unlock_user(p2, arg4, 0);
8802             unlock_user(p, arg2, 0);
8803         }
8804         return ret;
8805 #endif
8806 #if defined(TARGET_NR_renameat2)
8807     case TARGET_NR_renameat2:
8808         {
8809             void *p2;
8810             p  = lock_user_string(arg2);
8811             p2 = lock_user_string(arg4);
8812             if (!p || !p2) {
8813                 ret = -TARGET_EFAULT;
8814             } else {
8815                 ret = get_errno(sys_renameat2(arg1, p, arg3, p2, arg5));
8816             }
8817             unlock_user(p2, arg4, 0);
8818             unlock_user(p, arg2, 0);
8819         }
8820         return ret;
8821 #endif
8822 #ifdef TARGET_NR_mkdir
8823     case TARGET_NR_mkdir:
8824         if (!(p = lock_user_string(arg1)))
8825             return -TARGET_EFAULT;
8826         ret = get_errno(mkdir(p, arg2));
8827         unlock_user(p, arg1, 0);
8828         return ret;
8829 #endif
8830 #if defined(TARGET_NR_mkdirat)
8831     case TARGET_NR_mkdirat:
8832         if (!(p = lock_user_string(arg2)))
8833             return -TARGET_EFAULT;
8834         ret = get_errno(mkdirat(arg1, p, arg3));
8835         unlock_user(p, arg2, 0);
8836         return ret;
8837 #endif
8838 #ifdef TARGET_NR_rmdir
8839     case TARGET_NR_rmdir:
8840         if (!(p = lock_user_string(arg1)))
8841             return -TARGET_EFAULT;
8842         ret = get_errno(rmdir(p));
8843         unlock_user(p, arg1, 0);
8844         return ret;
8845 #endif
8846     case TARGET_NR_dup:
8847         ret = get_errno(dup(arg1));
8848         if (ret >= 0) {
8849             fd_trans_dup(arg1, ret);
8850         }
8851         return ret;
8852 #ifdef TARGET_NR_pipe
8853     case TARGET_NR_pipe:
8854         return do_pipe(cpu_env, arg1, 0, 0);
8855 #endif
8856 #ifdef TARGET_NR_pipe2
8857     case TARGET_NR_pipe2:
8858         return do_pipe(cpu_env, arg1,
8859                        target_to_host_bitmask(arg2, fcntl_flags_tbl), 1);
8860 #endif
8861     case TARGET_NR_times:
8862         {
8863             struct target_tms *tmsp;
8864             struct tms tms;
8865             ret = get_errno(times(&tms));
8866             if (arg1) {
8867                 tmsp = lock_user(VERIFY_WRITE, arg1, sizeof(struct target_tms), 0);
8868                 if (!tmsp)
8869                     return -TARGET_EFAULT;
8870                 tmsp->tms_utime = tswapal(host_to_target_clock_t(tms.tms_utime));
8871                 tmsp->tms_stime = tswapal(host_to_target_clock_t(tms.tms_stime));
8872                 tmsp->tms_cutime = tswapal(host_to_target_clock_t(tms.tms_cutime));
8873                 tmsp->tms_cstime = tswapal(host_to_target_clock_t(tms.tms_cstime));
8874             }
8875             if (!is_error(ret))
8876                 ret = host_to_target_clock_t(ret);
8877         }
8878         return ret;
8879     case TARGET_NR_acct:
8880         if (arg1 == 0) {
8881             ret = get_errno(acct(NULL));
8882         } else {
8883             if (!(p = lock_user_string(arg1))) {
8884                 return -TARGET_EFAULT;
8885             }
8886             ret = get_errno(acct(path(p)));
8887             unlock_user(p, arg1, 0);
8888         }
8889         return ret;
8890 #ifdef TARGET_NR_umount2
8891     case TARGET_NR_umount2:
8892         if (!(p = lock_user_string(arg1)))
8893             return -TARGET_EFAULT;
8894         ret = get_errno(umount2(p, arg2));
8895         unlock_user(p, arg1, 0);
8896         return ret;
8897 #endif
8898     case TARGET_NR_ioctl:
8899         return do_ioctl(arg1, arg2, arg3);
8900 #ifdef TARGET_NR_fcntl
8901     case TARGET_NR_fcntl:
8902         return do_fcntl(arg1, arg2, arg3);
8903 #endif
8904     case TARGET_NR_setpgid:
8905         return get_errno(setpgid(arg1, arg2));
8906     case TARGET_NR_umask:
8907         return get_errno(umask(arg1));
8908     case TARGET_NR_chroot:
8909         if (!(p = lock_user_string(arg1)))
8910             return -TARGET_EFAULT;
8911         ret = get_errno(chroot(p));
8912         unlock_user(p, arg1, 0);
8913         return ret;
8914 #ifdef TARGET_NR_dup2
8915     case TARGET_NR_dup2:
8916         ret = get_errno(dup2(arg1, arg2));
8917         if (ret >= 0) {
8918             fd_trans_dup(arg1, arg2);
8919         }
8920         return ret;
8921 #endif
8922 #if defined(CONFIG_DUP3) && defined(TARGET_NR_dup3)
8923     case TARGET_NR_dup3:
8924     {
8925         int host_flags;
8926 
8927         if ((arg3 & ~TARGET_O_CLOEXEC) != 0) {
8928             return -EINVAL;
8929         }
8930         host_flags = target_to_host_bitmask(arg3, fcntl_flags_tbl);
8931         ret = get_errno(dup3(arg1, arg2, host_flags));
8932         if (ret >= 0) {
8933             fd_trans_dup(arg1, arg2);
8934         }
8935         return ret;
8936     }
8937 #endif
8938 #ifdef TARGET_NR_getppid /* not on alpha */
8939     case TARGET_NR_getppid:
8940         return get_errno(getppid());
8941 #endif
8942 #ifdef TARGET_NR_getpgrp
8943     case TARGET_NR_getpgrp:
8944         return get_errno(getpgrp());
8945 #endif
8946     case TARGET_NR_setsid:
8947         return get_errno(setsid());
8948 #ifdef TARGET_NR_sigaction
8949     case TARGET_NR_sigaction:
8950         {
8951 #if defined(TARGET_ALPHA)
8952             struct target_sigaction act, oact, *pact = 0;
8953             struct target_old_sigaction *old_act;
8954             if (arg2) {
8955                 if (!lock_user_struct(VERIFY_READ, old_act, arg2, 1))
8956                     return -TARGET_EFAULT;
8957                 act._sa_handler = old_act->_sa_handler;
8958                 target_siginitset(&act.sa_mask, old_act->sa_mask);
8959                 act.sa_flags = old_act->sa_flags;
8960                 act.sa_restorer = 0;
8961                 unlock_user_struct(old_act, arg2, 0);
8962                 pact = &act;
8963             }
8964             ret = get_errno(do_sigaction(arg1, pact, &oact));
8965             if (!is_error(ret) && arg3) {
8966                 if (!lock_user_struct(VERIFY_WRITE, old_act, arg3, 0))
8967                     return -TARGET_EFAULT;
8968                 old_act->_sa_handler = oact._sa_handler;
8969                 old_act->sa_mask = oact.sa_mask.sig[0];
8970                 old_act->sa_flags = oact.sa_flags;
8971                 unlock_user_struct(old_act, arg3, 1);
8972             }
8973 #elif defined(TARGET_MIPS)
8974 	    struct target_sigaction act, oact, *pact, *old_act;
8975 
8976 	    if (arg2) {
8977                 if (!lock_user_struct(VERIFY_READ, old_act, arg2, 1))
8978                     return -TARGET_EFAULT;
8979 		act._sa_handler = old_act->_sa_handler;
8980 		target_siginitset(&act.sa_mask, old_act->sa_mask.sig[0]);
8981 		act.sa_flags = old_act->sa_flags;
8982 		unlock_user_struct(old_act, arg2, 0);
8983 		pact = &act;
8984 	    } else {
8985 		pact = NULL;
8986 	    }
8987 
8988 	    ret = get_errno(do_sigaction(arg1, pact, &oact));
8989 
8990 	    if (!is_error(ret) && arg3) {
8991                 if (!lock_user_struct(VERIFY_WRITE, old_act, arg3, 0))
8992                     return -TARGET_EFAULT;
8993 		old_act->_sa_handler = oact._sa_handler;
8994 		old_act->sa_flags = oact.sa_flags;
8995 		old_act->sa_mask.sig[0] = oact.sa_mask.sig[0];
8996 		old_act->sa_mask.sig[1] = 0;
8997 		old_act->sa_mask.sig[2] = 0;
8998 		old_act->sa_mask.sig[3] = 0;
8999 		unlock_user_struct(old_act, arg3, 1);
9000 	    }
9001 #else
9002             struct target_old_sigaction *old_act;
9003             struct target_sigaction act, oact, *pact;
9004             if (arg2) {
9005                 if (!lock_user_struct(VERIFY_READ, old_act, arg2, 1))
9006                     return -TARGET_EFAULT;
9007                 act._sa_handler = old_act->_sa_handler;
9008                 target_siginitset(&act.sa_mask, old_act->sa_mask);
9009                 act.sa_flags = old_act->sa_flags;
9010                 act.sa_restorer = old_act->sa_restorer;
9011 #ifdef TARGET_ARCH_HAS_KA_RESTORER
9012                 act.ka_restorer = 0;
9013 #endif
9014                 unlock_user_struct(old_act, arg2, 0);
9015                 pact = &act;
9016             } else {
9017                 pact = NULL;
9018             }
9019             ret = get_errno(do_sigaction(arg1, pact, &oact));
9020             if (!is_error(ret) && arg3) {
9021                 if (!lock_user_struct(VERIFY_WRITE, old_act, arg3, 0))
9022                     return -TARGET_EFAULT;
9023                 old_act->_sa_handler = oact._sa_handler;
9024                 old_act->sa_mask = oact.sa_mask.sig[0];
9025                 old_act->sa_flags = oact.sa_flags;
9026                 old_act->sa_restorer = oact.sa_restorer;
9027                 unlock_user_struct(old_act, arg3, 1);
9028             }
9029 #endif
9030         }
9031         return ret;
9032 #endif
9033     case TARGET_NR_rt_sigaction:
9034         {
9035 #if defined(TARGET_ALPHA)
9036             /* For Alpha and SPARC this is a 5 argument syscall, with
9037              * a 'restorer' parameter which must be copied into the
9038              * sa_restorer field of the sigaction struct.
9039              * For Alpha that 'restorer' is arg5; for SPARC it is arg4,
9040              * and arg5 is the sigsetsize.
9041              * Alpha also has a separate rt_sigaction struct that it uses
9042              * here; SPARC uses the usual sigaction struct.
9043              */
9044             struct target_rt_sigaction *rt_act;
9045             struct target_sigaction act, oact, *pact = 0;
9046 
9047             if (arg4 != sizeof(target_sigset_t)) {
9048                 return -TARGET_EINVAL;
9049             }
9050             if (arg2) {
9051                 if (!lock_user_struct(VERIFY_READ, rt_act, arg2, 1))
9052                     return -TARGET_EFAULT;
9053                 act._sa_handler = rt_act->_sa_handler;
9054                 act.sa_mask = rt_act->sa_mask;
9055                 act.sa_flags = rt_act->sa_flags;
9056                 act.sa_restorer = arg5;
9057                 unlock_user_struct(rt_act, arg2, 0);
9058                 pact = &act;
9059             }
9060             ret = get_errno(do_sigaction(arg1, pact, &oact));
9061             if (!is_error(ret) && arg3) {
9062                 if (!lock_user_struct(VERIFY_WRITE, rt_act, arg3, 0))
9063                     return -TARGET_EFAULT;
9064                 rt_act->_sa_handler = oact._sa_handler;
9065                 rt_act->sa_mask = oact.sa_mask;
9066                 rt_act->sa_flags = oact.sa_flags;
9067                 unlock_user_struct(rt_act, arg3, 1);
9068             }
9069 #else
9070 #ifdef TARGET_SPARC
9071             target_ulong restorer = arg4;
9072             target_ulong sigsetsize = arg5;
9073 #else
9074             target_ulong sigsetsize = arg4;
9075 #endif
9076             struct target_sigaction *act;
9077             struct target_sigaction *oact;
9078 
9079             if (sigsetsize != sizeof(target_sigset_t)) {
9080                 return -TARGET_EINVAL;
9081             }
9082             if (arg2) {
9083                 if (!lock_user_struct(VERIFY_READ, act, arg2, 1)) {
9084                     return -TARGET_EFAULT;
9085                 }
9086 #ifdef TARGET_ARCH_HAS_KA_RESTORER
9087                 act->ka_restorer = restorer;
9088 #endif
9089             } else {
9090                 act = NULL;
9091             }
9092             if (arg3) {
9093                 if (!lock_user_struct(VERIFY_WRITE, oact, arg3, 0)) {
9094                     ret = -TARGET_EFAULT;
9095                     goto rt_sigaction_fail;
9096                 }
9097             } else
9098                 oact = NULL;
9099             ret = get_errno(do_sigaction(arg1, act, oact));
9100 	rt_sigaction_fail:
9101             if (act)
9102                 unlock_user_struct(act, arg2, 0);
9103             if (oact)
9104                 unlock_user_struct(oact, arg3, 1);
9105 #endif
9106         }
9107         return ret;
9108 #ifdef TARGET_NR_sgetmask /* not on alpha */
9109     case TARGET_NR_sgetmask:
9110         {
9111             sigset_t cur_set;
9112             abi_ulong target_set;
9113             ret = do_sigprocmask(0, NULL, &cur_set);
9114             if (!ret) {
9115                 host_to_target_old_sigset(&target_set, &cur_set);
9116                 ret = target_set;
9117             }
9118         }
9119         return ret;
9120 #endif
9121 #ifdef TARGET_NR_ssetmask /* not on alpha */
9122     case TARGET_NR_ssetmask:
9123         {
9124             sigset_t set, oset;
9125             abi_ulong target_set = arg1;
9126             target_to_host_old_sigset(&set, &target_set);
9127             ret = do_sigprocmask(SIG_SETMASK, &set, &oset);
9128             if (!ret) {
9129                 host_to_target_old_sigset(&target_set, &oset);
9130                 ret = target_set;
9131             }
9132         }
9133         return ret;
9134 #endif
9135 #ifdef TARGET_NR_sigprocmask
9136     case TARGET_NR_sigprocmask:
9137         {
9138 #if defined(TARGET_ALPHA)
9139             sigset_t set, oldset;
9140             abi_ulong mask;
9141             int how;
9142 
9143             switch (arg1) {
9144             case TARGET_SIG_BLOCK:
9145                 how = SIG_BLOCK;
9146                 break;
9147             case TARGET_SIG_UNBLOCK:
9148                 how = SIG_UNBLOCK;
9149                 break;
9150             case TARGET_SIG_SETMASK:
9151                 how = SIG_SETMASK;
9152                 break;
9153             default:
9154                 return -TARGET_EINVAL;
9155             }
9156             mask = arg2;
9157             target_to_host_old_sigset(&set, &mask);
9158 
9159             ret = do_sigprocmask(how, &set, &oldset);
9160             if (!is_error(ret)) {
9161                 host_to_target_old_sigset(&mask, &oldset);
9162                 ret = mask;
9163                 ((CPUAlphaState *)cpu_env)->ir[IR_V0] = 0; /* force no error */
9164             }
9165 #else
9166             sigset_t set, oldset, *set_ptr;
9167             int how;
9168 
9169             if (arg2) {
9170                 switch (arg1) {
9171                 case TARGET_SIG_BLOCK:
9172                     how = SIG_BLOCK;
9173                     break;
9174                 case TARGET_SIG_UNBLOCK:
9175                     how = SIG_UNBLOCK;
9176                     break;
9177                 case TARGET_SIG_SETMASK:
9178                     how = SIG_SETMASK;
9179                     break;
9180                 default:
9181                     return -TARGET_EINVAL;
9182                 }
9183                 if (!(p = lock_user(VERIFY_READ, arg2, sizeof(target_sigset_t), 1)))
9184                     return -TARGET_EFAULT;
9185                 target_to_host_old_sigset(&set, p);
9186                 unlock_user(p, arg2, 0);
9187                 set_ptr = &set;
9188             } else {
9189                 how = 0;
9190                 set_ptr = NULL;
9191             }
9192             ret = do_sigprocmask(how, set_ptr, &oldset);
9193             if (!is_error(ret) && arg3) {
9194                 if (!(p = lock_user(VERIFY_WRITE, arg3, sizeof(target_sigset_t), 0)))
9195                     return -TARGET_EFAULT;
9196                 host_to_target_old_sigset(p, &oldset);
9197                 unlock_user(p, arg3, sizeof(target_sigset_t));
9198             }
9199 #endif
9200         }
9201         return ret;
9202 #endif
9203     case TARGET_NR_rt_sigprocmask:
9204         {
9205             int how = arg1;
9206             sigset_t set, oldset, *set_ptr;
9207 
9208             if (arg4 != sizeof(target_sigset_t)) {
9209                 return -TARGET_EINVAL;
9210             }
9211 
9212             if (arg2) {
9213                 switch(how) {
9214                 case TARGET_SIG_BLOCK:
9215                     how = SIG_BLOCK;
9216                     break;
9217                 case TARGET_SIG_UNBLOCK:
9218                     how = SIG_UNBLOCK;
9219                     break;
9220                 case TARGET_SIG_SETMASK:
9221                     how = SIG_SETMASK;
9222                     break;
9223                 default:
9224                     return -TARGET_EINVAL;
9225                 }
9226                 if (!(p = lock_user(VERIFY_READ, arg2, sizeof(target_sigset_t), 1)))
9227                     return -TARGET_EFAULT;
9228                 target_to_host_sigset(&set, p);
9229                 unlock_user(p, arg2, 0);
9230                 set_ptr = &set;
9231             } else {
9232                 how = 0;
9233                 set_ptr = NULL;
9234             }
9235             ret = do_sigprocmask(how, set_ptr, &oldset);
9236             if (!is_error(ret) && arg3) {
9237                 if (!(p = lock_user(VERIFY_WRITE, arg3, sizeof(target_sigset_t), 0)))
9238                     return -TARGET_EFAULT;
9239                 host_to_target_sigset(p, &oldset);
9240                 unlock_user(p, arg3, sizeof(target_sigset_t));
9241             }
9242         }
9243         return ret;
9244 #ifdef TARGET_NR_sigpending
9245     case TARGET_NR_sigpending:
9246         {
9247             sigset_t set;
9248             ret = get_errno(sigpending(&set));
9249             if (!is_error(ret)) {
9250                 if (!(p = lock_user(VERIFY_WRITE, arg1, sizeof(target_sigset_t), 0)))
9251                     return -TARGET_EFAULT;
9252                 host_to_target_old_sigset(p, &set);
9253                 unlock_user(p, arg1, sizeof(target_sigset_t));
9254             }
9255         }
9256         return ret;
9257 #endif
9258     case TARGET_NR_rt_sigpending:
9259         {
9260             sigset_t set;
9261 
9262             /* Yes, this check is >, not != like most. We follow the kernel's
9263              * logic and it does it like this because it implements
9264              * NR_sigpending through the same code path, and in that case
9265              * the old_sigset_t is smaller in size.
9266              */
9267             if (arg2 > sizeof(target_sigset_t)) {
9268                 return -TARGET_EINVAL;
9269             }
9270 
9271             ret = get_errno(sigpending(&set));
9272             if (!is_error(ret)) {
9273                 if (!(p = lock_user(VERIFY_WRITE, arg1, sizeof(target_sigset_t), 0)))
9274                     return -TARGET_EFAULT;
9275                 host_to_target_sigset(p, &set);
9276                 unlock_user(p, arg1, sizeof(target_sigset_t));
9277             }
9278         }
9279         return ret;
9280 #ifdef TARGET_NR_sigsuspend
9281     case TARGET_NR_sigsuspend:
9282         {
9283             TaskState *ts = cpu->opaque;
9284 #if defined(TARGET_ALPHA)
9285             abi_ulong mask = arg1;
9286             target_to_host_old_sigset(&ts->sigsuspend_mask, &mask);
9287 #else
9288             if (!(p = lock_user(VERIFY_READ, arg1, sizeof(target_sigset_t), 1)))
9289                 return -TARGET_EFAULT;
9290             target_to_host_old_sigset(&ts->sigsuspend_mask, p);
9291             unlock_user(p, arg1, 0);
9292 #endif
9293             ret = get_errno(safe_rt_sigsuspend(&ts->sigsuspend_mask,
9294                                                SIGSET_T_SIZE));
9295             if (ret != -TARGET_ERESTARTSYS) {
9296                 ts->in_sigsuspend = 1;
9297             }
9298         }
9299         return ret;
9300 #endif
9301     case TARGET_NR_rt_sigsuspend:
9302         {
9303             TaskState *ts = cpu->opaque;
9304 
9305             if (arg2 != sizeof(target_sigset_t)) {
9306                 return -TARGET_EINVAL;
9307             }
9308             if (!(p = lock_user(VERIFY_READ, arg1, sizeof(target_sigset_t), 1)))
9309                 return -TARGET_EFAULT;
9310             target_to_host_sigset(&ts->sigsuspend_mask, p);
9311             unlock_user(p, arg1, 0);
9312             ret = get_errno(safe_rt_sigsuspend(&ts->sigsuspend_mask,
9313                                                SIGSET_T_SIZE));
9314             if (ret != -TARGET_ERESTARTSYS) {
9315                 ts->in_sigsuspend = 1;
9316             }
9317         }
9318         return ret;
9319 #ifdef TARGET_NR_rt_sigtimedwait
9320     case TARGET_NR_rt_sigtimedwait:
9321         {
9322             sigset_t set;
9323             struct timespec uts, *puts;
9324             siginfo_t uinfo;
9325 
9326             if (arg4 != sizeof(target_sigset_t)) {
9327                 return -TARGET_EINVAL;
9328             }
9329 
9330             if (!(p = lock_user(VERIFY_READ, arg1, sizeof(target_sigset_t), 1)))
9331                 return -TARGET_EFAULT;
9332             target_to_host_sigset(&set, p);
9333             unlock_user(p, arg1, 0);
9334             if (arg3) {
9335                 puts = &uts;
9336                 if (target_to_host_timespec(puts, arg3)) {
9337                     return -TARGET_EFAULT;
9338                 }
9339             } else {
9340                 puts = NULL;
9341             }
9342             ret = get_errno(safe_rt_sigtimedwait(&set, &uinfo, puts,
9343                                                  SIGSET_T_SIZE));
9344             if (!is_error(ret)) {
9345                 if (arg2) {
9346                     p = lock_user(VERIFY_WRITE, arg2, sizeof(target_siginfo_t),
9347                                   0);
9348                     if (!p) {
9349                         return -TARGET_EFAULT;
9350                     }
9351                     host_to_target_siginfo(p, &uinfo);
9352                     unlock_user(p, arg2, sizeof(target_siginfo_t));
9353                 }
9354                 ret = host_to_target_signal(ret);
9355             }
9356         }
9357         return ret;
9358 #endif
9359 #ifdef TARGET_NR_rt_sigtimedwait_time64
9360     case TARGET_NR_rt_sigtimedwait_time64:
9361         {
9362             sigset_t set;
9363             struct timespec uts, *puts;
9364             siginfo_t uinfo;
9365 
9366             if (arg4 != sizeof(target_sigset_t)) {
9367                 return -TARGET_EINVAL;
9368             }
9369 
9370             p = lock_user(VERIFY_READ, arg1, sizeof(target_sigset_t), 1);
9371             if (!p) {
9372                 return -TARGET_EFAULT;
9373             }
9374             target_to_host_sigset(&set, p);
9375             unlock_user(p, arg1, 0);
9376             if (arg3) {
9377                 puts = &uts;
9378                 if (target_to_host_timespec64(puts, arg3)) {
9379                     return -TARGET_EFAULT;
9380                 }
9381             } else {
9382                 puts = NULL;
9383             }
9384             ret = get_errno(safe_rt_sigtimedwait(&set, &uinfo, puts,
9385                                                  SIGSET_T_SIZE));
9386             if (!is_error(ret)) {
9387                 if (arg2) {
9388                     p = lock_user(VERIFY_WRITE, arg2,
9389                                   sizeof(target_siginfo_t), 0);
9390                     if (!p) {
9391                         return -TARGET_EFAULT;
9392                     }
9393                     host_to_target_siginfo(p, &uinfo);
9394                     unlock_user(p, arg2, sizeof(target_siginfo_t));
9395                 }
9396                 ret = host_to_target_signal(ret);
9397             }
9398         }
9399         return ret;
9400 #endif
9401     case TARGET_NR_rt_sigqueueinfo:
9402         {
9403             siginfo_t uinfo;
9404 
9405             p = lock_user(VERIFY_READ, arg3, sizeof(target_siginfo_t), 1);
9406             if (!p) {
9407                 return -TARGET_EFAULT;
9408             }
9409             target_to_host_siginfo(&uinfo, p);
9410             unlock_user(p, arg3, 0);
9411             ret = get_errno(sys_rt_sigqueueinfo(arg1, arg2, &uinfo));
9412         }
9413         return ret;
9414     case TARGET_NR_rt_tgsigqueueinfo:
9415         {
9416             siginfo_t uinfo;
9417 
9418             p = lock_user(VERIFY_READ, arg4, sizeof(target_siginfo_t), 1);
9419             if (!p) {
9420                 return -TARGET_EFAULT;
9421             }
9422             target_to_host_siginfo(&uinfo, p);
9423             unlock_user(p, arg4, 0);
9424             ret = get_errno(sys_rt_tgsigqueueinfo(arg1, arg2, arg3, &uinfo));
9425         }
9426         return ret;
9427 #ifdef TARGET_NR_sigreturn
9428     case TARGET_NR_sigreturn:
9429         if (block_signals()) {
9430             return -TARGET_ERESTARTSYS;
9431         }
9432         return do_sigreturn(cpu_env);
9433 #endif
9434     case TARGET_NR_rt_sigreturn:
9435         if (block_signals()) {
9436             return -TARGET_ERESTARTSYS;
9437         }
9438         return do_rt_sigreturn(cpu_env);
9439     case TARGET_NR_sethostname:
9440         if (!(p = lock_user_string(arg1)))
9441             return -TARGET_EFAULT;
9442         ret = get_errno(sethostname(p, arg2));
9443         unlock_user(p, arg1, 0);
9444         return ret;
9445 #ifdef TARGET_NR_setrlimit
9446     case TARGET_NR_setrlimit:
9447         {
9448             int resource = target_to_host_resource(arg1);
9449             struct target_rlimit *target_rlim;
9450             struct rlimit rlim;
9451             if (!lock_user_struct(VERIFY_READ, target_rlim, arg2, 1))
9452                 return -TARGET_EFAULT;
9453             rlim.rlim_cur = target_to_host_rlim(target_rlim->rlim_cur);
9454             rlim.rlim_max = target_to_host_rlim(target_rlim->rlim_max);
9455             unlock_user_struct(target_rlim, arg2, 0);
9456             /*
9457              * If we just passed through resource limit settings for memory then
9458              * they would also apply to QEMU's own allocations, and QEMU will
9459              * crash or hang or die if its allocations fail. Ideally we would
9460              * track the guest allocations in QEMU and apply the limits ourselves.
9461              * For now, just tell the guest the call succeeded but don't actually
9462              * limit anything.
9463              */
9464             if (resource != RLIMIT_AS &&
9465                 resource != RLIMIT_DATA &&
9466                 resource != RLIMIT_STACK) {
9467                 return get_errno(setrlimit(resource, &rlim));
9468             } else {
9469                 return 0;
9470             }
9471         }
9472 #endif
9473 #ifdef TARGET_NR_getrlimit
9474     case TARGET_NR_getrlimit:
9475         {
9476             int resource = target_to_host_resource(arg1);
9477             struct target_rlimit *target_rlim;
9478             struct rlimit rlim;
9479 
9480             ret = get_errno(getrlimit(resource, &rlim));
9481             if (!is_error(ret)) {
9482                 if (!lock_user_struct(VERIFY_WRITE, target_rlim, arg2, 0))
9483                     return -TARGET_EFAULT;
9484                 target_rlim->rlim_cur = host_to_target_rlim(rlim.rlim_cur);
9485                 target_rlim->rlim_max = host_to_target_rlim(rlim.rlim_max);
9486                 unlock_user_struct(target_rlim, arg2, 1);
9487             }
9488         }
9489         return ret;
9490 #endif
9491     case TARGET_NR_getrusage:
9492         {
9493             struct rusage rusage;
9494             ret = get_errno(getrusage(arg1, &rusage));
9495             if (!is_error(ret)) {
9496                 ret = host_to_target_rusage(arg2, &rusage);
9497             }
9498         }
9499         return ret;
9500 #if defined(TARGET_NR_gettimeofday)
9501     case TARGET_NR_gettimeofday:
9502         {
9503             struct timeval tv;
9504             struct timezone tz;
9505 
9506             ret = get_errno(gettimeofday(&tv, &tz));
9507             if (!is_error(ret)) {
9508                 if (arg1 && copy_to_user_timeval(arg1, &tv)) {
9509                     return -TARGET_EFAULT;
9510                 }
9511                 if (arg2 && copy_to_user_timezone(arg2, &tz)) {
9512                     return -TARGET_EFAULT;
9513                 }
9514             }
9515         }
9516         return ret;
9517 #endif
9518 #if defined(TARGET_NR_settimeofday)
9519     case TARGET_NR_settimeofday:
9520         {
9521             struct timeval tv, *ptv = NULL;
9522             struct timezone tz, *ptz = NULL;
9523 
9524             if (arg1) {
9525                 if (copy_from_user_timeval(&tv, arg1)) {
9526                     return -TARGET_EFAULT;
9527                 }
9528                 ptv = &tv;
9529             }
9530 
9531             if (arg2) {
9532                 if (copy_from_user_timezone(&tz, arg2)) {
9533                     return -TARGET_EFAULT;
9534                 }
9535                 ptz = &tz;
9536             }
9537 
9538             return get_errno(settimeofday(ptv, ptz));
9539         }
9540 #endif
9541 #if defined(TARGET_NR_select)
9542     case TARGET_NR_select:
9543 #if defined(TARGET_WANT_NI_OLD_SELECT)
9544         /* some architectures used to have old_select here
9545          * but now ENOSYS it.
9546          */
9547         ret = -TARGET_ENOSYS;
9548 #elif defined(TARGET_WANT_OLD_SYS_SELECT)
9549         ret = do_old_select(arg1);
9550 #else
9551         ret = do_select(arg1, arg2, arg3, arg4, arg5);
9552 #endif
9553         return ret;
9554 #endif
9555 #ifdef TARGET_NR_pselect6
9556     case TARGET_NR_pselect6:
9557         return do_pselect6(arg1, arg2, arg3, arg4, arg5, arg6, false);
9558 #endif
9559 #ifdef TARGET_NR_pselect6_time64
9560     case TARGET_NR_pselect6_time64:
9561         return do_pselect6(arg1, arg2, arg3, arg4, arg5, arg6, true);
9562 #endif
9563 #ifdef TARGET_NR_symlink
9564     case TARGET_NR_symlink:
9565         {
9566             void *p2;
9567             p = lock_user_string(arg1);
9568             p2 = lock_user_string(arg2);
9569             if (!p || !p2)
9570                 ret = -TARGET_EFAULT;
9571             else
9572                 ret = get_errno(symlink(p, p2));
9573             unlock_user(p2, arg2, 0);
9574             unlock_user(p, arg1, 0);
9575         }
9576         return ret;
9577 #endif
9578 #if defined(TARGET_NR_symlinkat)
9579     case TARGET_NR_symlinkat:
9580         {
9581             void *p2;
9582             p  = lock_user_string(arg1);
9583             p2 = lock_user_string(arg3);
9584             if (!p || !p2)
9585                 ret = -TARGET_EFAULT;
9586             else
9587                 ret = get_errno(symlinkat(p, arg2, p2));
9588             unlock_user(p2, arg3, 0);
9589             unlock_user(p, arg1, 0);
9590         }
9591         return ret;
9592 #endif
9593 #ifdef TARGET_NR_readlink
9594     case TARGET_NR_readlink:
9595         {
9596             void *p2;
9597             p = lock_user_string(arg1);
9598             p2 = lock_user(VERIFY_WRITE, arg2, arg3, 0);
9599             if (!p || !p2) {
9600                 ret = -TARGET_EFAULT;
9601             } else if (!arg3) {
9602                 /* Short circuit this for the magic exe check. */
9603                 ret = -TARGET_EINVAL;
9604             } else if (is_proc_myself((const char *)p, "exe")) {
9605                 char real[PATH_MAX], *temp;
9606                 temp = realpath(exec_path, real);
9607                 /* Return value is # of bytes that we wrote to the buffer. */
9608                 if (temp == NULL) {
9609                     ret = get_errno(-1);
9610                 } else {
9611                     /* Don't worry about sign mismatch as earlier mapping
9612                      * logic would have thrown a bad address error. */
9613                     ret = MIN(strlen(real), arg3);
9614                     /* We cannot NUL terminate the string. */
9615                     memcpy(p2, real, ret);
9616                 }
9617             } else {
9618                 ret = get_errno(readlink(path(p), p2, arg3));
9619             }
9620             unlock_user(p2, arg2, ret);
9621             unlock_user(p, arg1, 0);
9622         }
9623         return ret;
9624 #endif
9625 #if defined(TARGET_NR_readlinkat)
9626     case TARGET_NR_readlinkat:
9627         {
9628             void *p2;
9629             p  = lock_user_string(arg2);
9630             p2 = lock_user(VERIFY_WRITE, arg3, arg4, 0);
9631             if (!p || !p2) {
9632                 ret = -TARGET_EFAULT;
9633             } else if (is_proc_myself((const char *)p, "exe")) {
9634                 char real[PATH_MAX], *temp;
9635                 temp = realpath(exec_path, real);
9636                 ret = temp == NULL ? get_errno(-1) : strlen(real) ;
9637                 snprintf((char *)p2, arg4, "%s", real);
9638             } else {
9639                 ret = get_errno(readlinkat(arg1, path(p), p2, arg4));
9640             }
9641             unlock_user(p2, arg3, ret);
9642             unlock_user(p, arg2, 0);
9643         }
9644         return ret;
9645 #endif
9646 #ifdef TARGET_NR_swapon
9647     case TARGET_NR_swapon:
9648         if (!(p = lock_user_string(arg1)))
9649             return -TARGET_EFAULT;
9650         ret = get_errno(swapon(p, arg2));
9651         unlock_user(p, arg1, 0);
9652         return ret;
9653 #endif
9654     case TARGET_NR_reboot:
9655         if (arg3 == LINUX_REBOOT_CMD_RESTART2) {
9656            /* arg4 must be ignored in all other cases */
9657            p = lock_user_string(arg4);
9658            if (!p) {
9659                return -TARGET_EFAULT;
9660            }
9661            ret = get_errno(reboot(arg1, arg2, arg3, p));
9662            unlock_user(p, arg4, 0);
9663         } else {
9664            ret = get_errno(reboot(arg1, arg2, arg3, NULL));
9665         }
9666         return ret;
9667 #ifdef TARGET_NR_mmap
9668     case TARGET_NR_mmap:
9669 #if (defined(TARGET_I386) && defined(TARGET_ABI32)) || \
9670     (defined(TARGET_ARM) && defined(TARGET_ABI32)) || \
9671     defined(TARGET_M68K) || defined(TARGET_CRIS) || defined(TARGET_MICROBLAZE) \
9672     || defined(TARGET_S390X)
9673         {
9674             abi_ulong *v;
9675             abi_ulong v1, v2, v3, v4, v5, v6;
9676             if (!(v = lock_user(VERIFY_READ, arg1, 6 * sizeof(abi_ulong), 1)))
9677                 return -TARGET_EFAULT;
9678             v1 = tswapal(v[0]);
9679             v2 = tswapal(v[1]);
9680             v3 = tswapal(v[2]);
9681             v4 = tswapal(v[3]);
9682             v5 = tswapal(v[4]);
9683             v6 = tswapal(v[5]);
9684             unlock_user(v, arg1, 0);
9685             ret = get_errno(target_mmap(v1, v2, v3,
9686                                         target_to_host_bitmask(v4, mmap_flags_tbl),
9687                                         v5, v6));
9688         }
9689 #else
9690         ret = get_errno(target_mmap(arg1, arg2, arg3,
9691                                     target_to_host_bitmask(arg4, mmap_flags_tbl),
9692                                     arg5,
9693                                     arg6));
9694 #endif
9695         return ret;
9696 #endif
9697 #ifdef TARGET_NR_mmap2
9698     case TARGET_NR_mmap2:
9699 #ifndef MMAP_SHIFT
9700 #define MMAP_SHIFT 12
9701 #endif
9702         ret = target_mmap(arg1, arg2, arg3,
9703                           target_to_host_bitmask(arg4, mmap_flags_tbl),
9704                           arg5, arg6 << MMAP_SHIFT);
9705         return get_errno(ret);
9706 #endif
9707     case TARGET_NR_munmap:
9708         return get_errno(target_munmap(arg1, arg2));
9709     case TARGET_NR_mprotect:
9710         {
9711             TaskState *ts = cpu->opaque;
9712             /* Special hack to detect libc making the stack executable.  */
9713             if ((arg3 & PROT_GROWSDOWN)
9714                 && arg1 >= ts->info->stack_limit
9715                 && arg1 <= ts->info->start_stack) {
9716                 arg3 &= ~PROT_GROWSDOWN;
9717                 arg2 = arg2 + arg1 - ts->info->stack_limit;
9718                 arg1 = ts->info->stack_limit;
9719             }
9720         }
9721         return get_errno(target_mprotect(arg1, arg2, arg3));
9722 #ifdef TARGET_NR_mremap
9723     case TARGET_NR_mremap:
9724         return get_errno(target_mremap(arg1, arg2, arg3, arg4, arg5));
9725 #endif
9726         /* ??? msync/mlock/munlock are broken for softmmu.  */
9727 #ifdef TARGET_NR_msync
9728     case TARGET_NR_msync:
9729         return get_errno(msync(g2h(arg1), arg2, arg3));
9730 #endif
9731 #ifdef TARGET_NR_mlock
9732     case TARGET_NR_mlock:
9733         return get_errno(mlock(g2h(arg1), arg2));
9734 #endif
9735 #ifdef TARGET_NR_munlock
9736     case TARGET_NR_munlock:
9737         return get_errno(munlock(g2h(arg1), arg2));
9738 #endif
9739 #ifdef TARGET_NR_mlockall
9740     case TARGET_NR_mlockall:
9741         return get_errno(mlockall(target_to_host_mlockall_arg(arg1)));
9742 #endif
9743 #ifdef TARGET_NR_munlockall
9744     case TARGET_NR_munlockall:
9745         return get_errno(munlockall());
9746 #endif
9747 #ifdef TARGET_NR_truncate
9748     case TARGET_NR_truncate:
9749         if (!(p = lock_user_string(arg1)))
9750             return -TARGET_EFAULT;
9751         ret = get_errno(truncate(p, arg2));
9752         unlock_user(p, arg1, 0);
9753         return ret;
9754 #endif
9755 #ifdef TARGET_NR_ftruncate
9756     case TARGET_NR_ftruncate:
9757         return get_errno(ftruncate(arg1, arg2));
9758 #endif
9759     case TARGET_NR_fchmod:
9760         return get_errno(fchmod(arg1, arg2));
9761 #if defined(TARGET_NR_fchmodat)
9762     case TARGET_NR_fchmodat:
9763         if (!(p = lock_user_string(arg2)))
9764             return -TARGET_EFAULT;
9765         ret = get_errno(fchmodat(arg1, p, arg3, 0));
9766         unlock_user(p, arg2, 0);
9767         return ret;
9768 #endif
9769     case TARGET_NR_getpriority:
9770         /* Note that negative values are valid for getpriority, so we must
9771            differentiate based on errno settings.  */
9772         errno = 0;
9773         ret = getpriority(arg1, arg2);
9774         if (ret == -1 && errno != 0) {
9775             return -host_to_target_errno(errno);
9776         }
9777 #ifdef TARGET_ALPHA
9778         /* Return value is the unbiased priority.  Signal no error.  */
9779         ((CPUAlphaState *)cpu_env)->ir[IR_V0] = 0;
9780 #else
9781         /* Return value is a biased priority to avoid negative numbers.  */
9782         ret = 20 - ret;
9783 #endif
9784         return ret;
9785     case TARGET_NR_setpriority:
9786         return get_errno(setpriority(arg1, arg2, arg3));
9787 #ifdef TARGET_NR_statfs
9788     case TARGET_NR_statfs:
9789         if (!(p = lock_user_string(arg1))) {
9790             return -TARGET_EFAULT;
9791         }
9792         ret = get_errno(statfs(path(p), &stfs));
9793         unlock_user(p, arg1, 0);
9794     convert_statfs:
9795         if (!is_error(ret)) {
9796             struct target_statfs *target_stfs;
9797 
9798             if (!lock_user_struct(VERIFY_WRITE, target_stfs, arg2, 0))
9799                 return -TARGET_EFAULT;
9800             __put_user(stfs.f_type, &target_stfs->f_type);
9801             __put_user(stfs.f_bsize, &target_stfs->f_bsize);
9802             __put_user(stfs.f_blocks, &target_stfs->f_blocks);
9803             __put_user(stfs.f_bfree, &target_stfs->f_bfree);
9804             __put_user(stfs.f_bavail, &target_stfs->f_bavail);
9805             __put_user(stfs.f_files, &target_stfs->f_files);
9806             __put_user(stfs.f_ffree, &target_stfs->f_ffree);
9807             __put_user(stfs.f_fsid.__val[0], &target_stfs->f_fsid.val[0]);
9808             __put_user(stfs.f_fsid.__val[1], &target_stfs->f_fsid.val[1]);
9809             __put_user(stfs.f_namelen, &target_stfs->f_namelen);
9810             __put_user(stfs.f_frsize, &target_stfs->f_frsize);
9811 #ifdef _STATFS_F_FLAGS
9812             __put_user(stfs.f_flags, &target_stfs->f_flags);
9813 #else
9814             __put_user(0, &target_stfs->f_flags);
9815 #endif
9816             memset(target_stfs->f_spare, 0, sizeof(target_stfs->f_spare));
9817             unlock_user_struct(target_stfs, arg2, 1);
9818         }
9819         return ret;
9820 #endif
9821 #ifdef TARGET_NR_fstatfs
9822     case TARGET_NR_fstatfs:
9823         ret = get_errno(fstatfs(arg1, &stfs));
9824         goto convert_statfs;
9825 #endif
9826 #ifdef TARGET_NR_statfs64
9827     case TARGET_NR_statfs64:
9828         if (!(p = lock_user_string(arg1))) {
9829             return -TARGET_EFAULT;
9830         }
9831         ret = get_errno(statfs(path(p), &stfs));
9832         unlock_user(p, arg1, 0);
9833     convert_statfs64:
9834         if (!is_error(ret)) {
9835             struct target_statfs64 *target_stfs;
9836 
9837             if (!lock_user_struct(VERIFY_WRITE, target_stfs, arg3, 0))
9838                 return -TARGET_EFAULT;
9839             __put_user(stfs.f_type, &target_stfs->f_type);
9840             __put_user(stfs.f_bsize, &target_stfs->f_bsize);
9841             __put_user(stfs.f_blocks, &target_stfs->f_blocks);
9842             __put_user(stfs.f_bfree, &target_stfs->f_bfree);
9843             __put_user(stfs.f_bavail, &target_stfs->f_bavail);
9844             __put_user(stfs.f_files, &target_stfs->f_files);
9845             __put_user(stfs.f_ffree, &target_stfs->f_ffree);
9846             __put_user(stfs.f_fsid.__val[0], &target_stfs->f_fsid.val[0]);
9847             __put_user(stfs.f_fsid.__val[1], &target_stfs->f_fsid.val[1]);
9848             __put_user(stfs.f_namelen, &target_stfs->f_namelen);
9849             __put_user(stfs.f_frsize, &target_stfs->f_frsize);
9850 #ifdef _STATFS_F_FLAGS
9851             __put_user(stfs.f_flags, &target_stfs->f_flags);
9852 #else
9853             __put_user(0, &target_stfs->f_flags);
9854 #endif
9855             memset(target_stfs->f_spare, 0, sizeof(target_stfs->f_spare));
9856             unlock_user_struct(target_stfs, arg3, 1);
9857         }
9858         return ret;
9859     case TARGET_NR_fstatfs64:
9860         ret = get_errno(fstatfs(arg1, &stfs));
9861         goto convert_statfs64;
9862 #endif
9863 #ifdef TARGET_NR_socketcall
9864     case TARGET_NR_socketcall:
9865         return do_socketcall(arg1, arg2);
9866 #endif
9867 #ifdef TARGET_NR_accept
9868     case TARGET_NR_accept:
9869         return do_accept4(arg1, arg2, arg3, 0);
9870 #endif
9871 #ifdef TARGET_NR_accept4
9872     case TARGET_NR_accept4:
9873         return do_accept4(arg1, arg2, arg3, arg4);
9874 #endif
9875 #ifdef TARGET_NR_bind
9876     case TARGET_NR_bind:
9877         return do_bind(arg1, arg2, arg3);
9878 #endif
9879 #ifdef TARGET_NR_connect
9880     case TARGET_NR_connect:
9881         return do_connect(arg1, arg2, arg3);
9882 #endif
9883 #ifdef TARGET_NR_getpeername
9884     case TARGET_NR_getpeername:
9885         return do_getpeername(arg1, arg2, arg3);
9886 #endif
9887 #ifdef TARGET_NR_getsockname
9888     case TARGET_NR_getsockname:
9889         return do_getsockname(arg1, arg2, arg3);
9890 #endif
9891 #ifdef TARGET_NR_getsockopt
9892     case TARGET_NR_getsockopt:
9893         return do_getsockopt(arg1, arg2, arg3, arg4, arg5);
9894 #endif
9895 #ifdef TARGET_NR_listen
9896     case TARGET_NR_listen:
9897         return get_errno(listen(arg1, arg2));
9898 #endif
9899 #ifdef TARGET_NR_recv
9900     case TARGET_NR_recv:
9901         return do_recvfrom(arg1, arg2, arg3, arg4, 0, 0);
9902 #endif
9903 #ifdef TARGET_NR_recvfrom
9904     case TARGET_NR_recvfrom:
9905         return do_recvfrom(arg1, arg2, arg3, arg4, arg5, arg6);
9906 #endif
9907 #ifdef TARGET_NR_recvmsg
9908     case TARGET_NR_recvmsg:
9909         return do_sendrecvmsg(arg1, arg2, arg3, 0);
9910 #endif
9911 #ifdef TARGET_NR_send
9912     case TARGET_NR_send:
9913         return do_sendto(arg1, arg2, arg3, arg4, 0, 0);
9914 #endif
9915 #ifdef TARGET_NR_sendmsg
9916     case TARGET_NR_sendmsg:
9917         return do_sendrecvmsg(arg1, arg2, arg3, 1);
9918 #endif
9919 #ifdef TARGET_NR_sendmmsg
9920     case TARGET_NR_sendmmsg:
9921         return do_sendrecvmmsg(arg1, arg2, arg3, arg4, 1);
9922 #endif
9923 #ifdef TARGET_NR_recvmmsg
9924     case TARGET_NR_recvmmsg:
9925         return do_sendrecvmmsg(arg1, arg2, arg3, arg4, 0);
9926 #endif
9927 #ifdef TARGET_NR_sendto
9928     case TARGET_NR_sendto:
9929         return do_sendto(arg1, arg2, arg3, arg4, arg5, arg6);
9930 #endif
9931 #ifdef TARGET_NR_shutdown
9932     case TARGET_NR_shutdown:
9933         return get_errno(shutdown(arg1, arg2));
9934 #endif
9935 #if defined(TARGET_NR_getrandom) && defined(__NR_getrandom)
9936     case TARGET_NR_getrandom:
9937         p = lock_user(VERIFY_WRITE, arg1, arg2, 0);
9938         if (!p) {
9939             return -TARGET_EFAULT;
9940         }
9941         ret = get_errno(getrandom(p, arg2, arg3));
9942         unlock_user(p, arg1, ret);
9943         return ret;
9944 #endif
9945 #ifdef TARGET_NR_socket
9946     case TARGET_NR_socket:
9947         return do_socket(arg1, arg2, arg3);
9948 #endif
9949 #ifdef TARGET_NR_socketpair
9950     case TARGET_NR_socketpair:
9951         return do_socketpair(arg1, arg2, arg3, arg4);
9952 #endif
9953 #ifdef TARGET_NR_setsockopt
9954     case TARGET_NR_setsockopt:
9955         return do_setsockopt(arg1, arg2, arg3, arg4, (socklen_t) arg5);
9956 #endif
9957 #if defined(TARGET_NR_syslog)
9958     case TARGET_NR_syslog:
9959         {
9960             int len = arg2;
9961 
9962             switch (arg1) {
9963             case TARGET_SYSLOG_ACTION_CLOSE:         /* Close log */
9964             case TARGET_SYSLOG_ACTION_OPEN:          /* Open log */
9965             case TARGET_SYSLOG_ACTION_CLEAR:         /* Clear ring buffer */
9966             case TARGET_SYSLOG_ACTION_CONSOLE_OFF:   /* Disable logging */
9967             case TARGET_SYSLOG_ACTION_CONSOLE_ON:    /* Enable logging */
9968             case TARGET_SYSLOG_ACTION_CONSOLE_LEVEL: /* Set messages level */
9969             case TARGET_SYSLOG_ACTION_SIZE_UNREAD:   /* Number of chars */
9970             case TARGET_SYSLOG_ACTION_SIZE_BUFFER:   /* Size of the buffer */
9971                 return get_errno(sys_syslog((int)arg1, NULL, (int)arg3));
9972             case TARGET_SYSLOG_ACTION_READ:          /* Read from log */
9973             case TARGET_SYSLOG_ACTION_READ_CLEAR:    /* Read/clear msgs */
9974             case TARGET_SYSLOG_ACTION_READ_ALL:      /* Read last messages */
9975                 {
9976                     if (len < 0) {
9977                         return -TARGET_EINVAL;
9978                     }
9979                     if (len == 0) {
9980                         return 0;
9981                     }
9982                     p = lock_user(VERIFY_WRITE, arg2, arg3, 0);
9983                     if (!p) {
9984                         return -TARGET_EFAULT;
9985                     }
9986                     ret = get_errno(sys_syslog((int)arg1, p, (int)arg3));
9987                     unlock_user(p, arg2, arg3);
9988                 }
9989                 return ret;
9990             default:
9991                 return -TARGET_EINVAL;
9992             }
9993         }
9994         break;
9995 #endif
9996     case TARGET_NR_setitimer:
9997         {
9998             struct itimerval value, ovalue, *pvalue;
9999 
10000             if (arg2) {
10001                 pvalue = &value;
10002                 if (copy_from_user_timeval(&pvalue->it_interval, arg2)
10003                     || copy_from_user_timeval(&pvalue->it_value,
10004                                               arg2 + sizeof(struct target_timeval)))
10005                     return -TARGET_EFAULT;
10006             } else {
10007                 pvalue = NULL;
10008             }
10009             ret = get_errno(setitimer(arg1, pvalue, &ovalue));
10010             if (!is_error(ret) && arg3) {
10011                 if (copy_to_user_timeval(arg3,
10012                                          &ovalue.it_interval)
10013                     || copy_to_user_timeval(arg3 + sizeof(struct target_timeval),
10014                                             &ovalue.it_value))
10015                     return -TARGET_EFAULT;
10016             }
10017         }
10018         return ret;
10019     case TARGET_NR_getitimer:
10020         {
10021             struct itimerval value;
10022 
10023             ret = get_errno(getitimer(arg1, &value));
10024             if (!is_error(ret) && arg2) {
10025                 if (copy_to_user_timeval(arg2,
10026                                          &value.it_interval)
10027                     || copy_to_user_timeval(arg2 + sizeof(struct target_timeval),
10028                                             &value.it_value))
10029                     return -TARGET_EFAULT;
10030             }
10031         }
10032         return ret;
10033 #ifdef TARGET_NR_stat
10034     case TARGET_NR_stat:
10035         if (!(p = lock_user_string(arg1))) {
10036             return -TARGET_EFAULT;
10037         }
10038         ret = get_errno(stat(path(p), &st));
10039         unlock_user(p, arg1, 0);
10040         goto do_stat;
10041 #endif
10042 #ifdef TARGET_NR_lstat
10043     case TARGET_NR_lstat:
10044         if (!(p = lock_user_string(arg1))) {
10045             return -TARGET_EFAULT;
10046         }
10047         ret = get_errno(lstat(path(p), &st));
10048         unlock_user(p, arg1, 0);
10049         goto do_stat;
10050 #endif
10051 #ifdef TARGET_NR_fstat
10052     case TARGET_NR_fstat:
10053         {
10054             ret = get_errno(fstat(arg1, &st));
10055 #if defined(TARGET_NR_stat) || defined(TARGET_NR_lstat)
10056         do_stat:
10057 #endif
10058             if (!is_error(ret)) {
10059                 struct target_stat *target_st;
10060 
10061                 if (!lock_user_struct(VERIFY_WRITE, target_st, arg2, 0))
10062                     return -TARGET_EFAULT;
10063                 memset(target_st, 0, sizeof(*target_st));
10064                 __put_user(st.st_dev, &target_st->st_dev);
10065                 __put_user(st.st_ino, &target_st->st_ino);
10066                 __put_user(st.st_mode, &target_st->st_mode);
10067                 __put_user(st.st_uid, &target_st->st_uid);
10068                 __put_user(st.st_gid, &target_st->st_gid);
10069                 __put_user(st.st_nlink, &target_st->st_nlink);
10070                 __put_user(st.st_rdev, &target_st->st_rdev);
10071                 __put_user(st.st_size, &target_st->st_size);
10072                 __put_user(st.st_blksize, &target_st->st_blksize);
10073                 __put_user(st.st_blocks, &target_st->st_blocks);
10074                 __put_user(st.st_atime, &target_st->target_st_atime);
10075                 __put_user(st.st_mtime, &target_st->target_st_mtime);
10076                 __put_user(st.st_ctime, &target_st->target_st_ctime);
10077 #if (_POSIX_C_SOURCE >= 200809L || _XOPEN_SOURCE >= 700) && \
10078     defined(TARGET_STAT_HAVE_NSEC)
10079                 __put_user(st.st_atim.tv_nsec,
10080                            &target_st->target_st_atime_nsec);
10081                 __put_user(st.st_mtim.tv_nsec,
10082                            &target_st->target_st_mtime_nsec);
10083                 __put_user(st.st_ctim.tv_nsec,
10084                            &target_st->target_st_ctime_nsec);
10085 #endif
10086                 unlock_user_struct(target_st, arg2, 1);
10087             }
10088         }
10089         return ret;
10090 #endif
10091     case TARGET_NR_vhangup:
10092         return get_errno(vhangup());
10093 #ifdef TARGET_NR_syscall
10094     case TARGET_NR_syscall:
10095         return do_syscall(cpu_env, arg1 & 0xffff, arg2, arg3, arg4, arg5,
10096                           arg6, arg7, arg8, 0);
10097 #endif
10098 #if defined(TARGET_NR_wait4)
10099     case TARGET_NR_wait4:
10100         {
10101             int status;
10102             abi_long status_ptr = arg2;
10103             struct rusage rusage, *rusage_ptr;
10104             abi_ulong target_rusage = arg4;
10105             abi_long rusage_err;
10106             if (target_rusage)
10107                 rusage_ptr = &rusage;
10108             else
10109                 rusage_ptr = NULL;
10110             ret = get_errno(safe_wait4(arg1, &status, arg3, rusage_ptr));
10111             if (!is_error(ret)) {
10112                 if (status_ptr && ret) {
10113                     status = host_to_target_waitstatus(status);
10114                     if (put_user_s32(status, status_ptr))
10115                         return -TARGET_EFAULT;
10116                 }
10117                 if (target_rusage) {
10118                     rusage_err = host_to_target_rusage(target_rusage, &rusage);
10119                     if (rusage_err) {
10120                         ret = rusage_err;
10121                     }
10122                 }
10123             }
10124         }
10125         return ret;
10126 #endif
10127 #ifdef TARGET_NR_swapoff
10128     case TARGET_NR_swapoff:
10129         if (!(p = lock_user_string(arg1)))
10130             return -TARGET_EFAULT;
10131         ret = get_errno(swapoff(p));
10132         unlock_user(p, arg1, 0);
10133         return ret;
10134 #endif
10135     case TARGET_NR_sysinfo:
10136         {
10137             struct target_sysinfo *target_value;
10138             struct sysinfo value;
10139             ret = get_errno(sysinfo(&value));
10140             if (!is_error(ret) && arg1)
10141             {
10142                 if (!lock_user_struct(VERIFY_WRITE, target_value, arg1, 0))
10143                     return -TARGET_EFAULT;
10144                 __put_user(value.uptime, &target_value->uptime);
10145                 __put_user(value.loads[0], &target_value->loads[0]);
10146                 __put_user(value.loads[1], &target_value->loads[1]);
10147                 __put_user(value.loads[2], &target_value->loads[2]);
10148                 __put_user(value.totalram, &target_value->totalram);
10149                 __put_user(value.freeram, &target_value->freeram);
10150                 __put_user(value.sharedram, &target_value->sharedram);
10151                 __put_user(value.bufferram, &target_value->bufferram);
10152                 __put_user(value.totalswap, &target_value->totalswap);
10153                 __put_user(value.freeswap, &target_value->freeswap);
10154                 __put_user(value.procs, &target_value->procs);
10155                 __put_user(value.totalhigh, &target_value->totalhigh);
10156                 __put_user(value.freehigh, &target_value->freehigh);
10157                 __put_user(value.mem_unit, &target_value->mem_unit);
10158                 unlock_user_struct(target_value, arg1, 1);
10159             }
10160         }
10161         return ret;
10162 #ifdef TARGET_NR_ipc
10163     case TARGET_NR_ipc:
10164         return do_ipc(cpu_env, arg1, arg2, arg3, arg4, arg5, arg6);
10165 #endif
10166 #ifdef TARGET_NR_semget
10167     case TARGET_NR_semget:
10168         return get_errno(semget(arg1, arg2, arg3));
10169 #endif
10170 #ifdef TARGET_NR_semop
10171     case TARGET_NR_semop:
10172         return do_semtimedop(arg1, arg2, arg3, 0, false);
10173 #endif
10174 #ifdef TARGET_NR_semtimedop
10175     case TARGET_NR_semtimedop:
10176         return do_semtimedop(arg1, arg2, arg3, arg4, false);
10177 #endif
10178 #ifdef TARGET_NR_semtimedop_time64
10179     case TARGET_NR_semtimedop_time64:
10180         return do_semtimedop(arg1, arg2, arg3, arg4, true);
10181 #endif
10182 #ifdef TARGET_NR_semctl
10183     case TARGET_NR_semctl:
10184         return do_semctl(arg1, arg2, arg3, arg4);
10185 #endif
10186 #ifdef TARGET_NR_msgctl
10187     case TARGET_NR_msgctl:
10188         return do_msgctl(arg1, arg2, arg3);
10189 #endif
10190 #ifdef TARGET_NR_msgget
10191     case TARGET_NR_msgget:
10192         return get_errno(msgget(arg1, arg2));
10193 #endif
10194 #ifdef TARGET_NR_msgrcv
10195     case TARGET_NR_msgrcv:
10196         return do_msgrcv(arg1, arg2, arg3, arg4, arg5);
10197 #endif
10198 #ifdef TARGET_NR_msgsnd
10199     case TARGET_NR_msgsnd:
10200         return do_msgsnd(arg1, arg2, arg3, arg4);
10201 #endif
10202 #ifdef TARGET_NR_shmget
10203     case TARGET_NR_shmget:
10204         return get_errno(shmget(arg1, arg2, arg3));
10205 #endif
10206 #ifdef TARGET_NR_shmctl
10207     case TARGET_NR_shmctl:
10208         return do_shmctl(arg1, arg2, arg3);
10209 #endif
10210 #ifdef TARGET_NR_shmat
10211     case TARGET_NR_shmat:
10212         return do_shmat(cpu_env, arg1, arg2, arg3);
10213 #endif
10214 #ifdef TARGET_NR_shmdt
10215     case TARGET_NR_shmdt:
10216         return do_shmdt(arg1);
10217 #endif
10218     case TARGET_NR_fsync:
10219         return get_errno(fsync(arg1));
10220     case TARGET_NR_clone:
10221         /* Linux manages to have three different orderings for its
10222          * arguments to clone(); the BACKWARDS and BACKWARDS2 defines
10223          * match the kernel's CONFIG_CLONE_* settings.
10224          * Microblaze is further special in that it uses a sixth
10225          * implicit argument to clone for the TLS pointer.
10226          */
10227 #if defined(TARGET_MICROBLAZE)
10228         ret = get_errno(do_fork(cpu_env, arg1, arg2, arg4, arg6, arg5));
10229 #elif defined(TARGET_CLONE_BACKWARDS)
10230         ret = get_errno(do_fork(cpu_env, arg1, arg2, arg3, arg4, arg5));
10231 #elif defined(TARGET_CLONE_BACKWARDS2)
10232         ret = get_errno(do_fork(cpu_env, arg2, arg1, arg3, arg5, arg4));
10233 #else
10234         ret = get_errno(do_fork(cpu_env, arg1, arg2, arg3, arg5, arg4));
10235 #endif
10236         return ret;
10237 #ifdef __NR_exit_group
10238         /* new thread calls */
10239     case TARGET_NR_exit_group:
10240         preexit_cleanup(cpu_env, arg1);
10241         return get_errno(exit_group(arg1));
10242 #endif
10243     case TARGET_NR_setdomainname:
10244         if (!(p = lock_user_string(arg1)))
10245             return -TARGET_EFAULT;
10246         ret = get_errno(setdomainname(p, arg2));
10247         unlock_user(p, arg1, 0);
10248         return ret;
10249     case TARGET_NR_uname:
10250         /* no need to transcode because we use the linux syscall */
10251         {
10252             struct new_utsname * buf;
10253 
10254             if (!lock_user_struct(VERIFY_WRITE, buf, arg1, 0))
10255                 return -TARGET_EFAULT;
10256             ret = get_errno(sys_uname(buf));
10257             if (!is_error(ret)) {
10258                 /* Overwrite the native machine name with whatever is being
10259                    emulated. */
10260                 g_strlcpy(buf->machine, cpu_to_uname_machine(cpu_env),
10261                           sizeof(buf->machine));
10262                 /* Allow the user to override the reported release.  */
10263                 if (qemu_uname_release && *qemu_uname_release) {
10264                     g_strlcpy(buf->release, qemu_uname_release,
10265                               sizeof(buf->release));
10266                 }
10267             }
10268             unlock_user_struct(buf, arg1, 1);
10269         }
10270         return ret;
10271 #ifdef TARGET_I386
10272     case TARGET_NR_modify_ldt:
10273         return do_modify_ldt(cpu_env, arg1, arg2, arg3);
10274 #if !defined(TARGET_X86_64)
10275     case TARGET_NR_vm86:
10276         return do_vm86(cpu_env, arg1, arg2);
10277 #endif
10278 #endif
10279 #if defined(TARGET_NR_adjtimex)
10280     case TARGET_NR_adjtimex:
10281         {
10282             struct timex host_buf;
10283 
10284             if (target_to_host_timex(&host_buf, arg1) != 0) {
10285                 return -TARGET_EFAULT;
10286             }
10287             ret = get_errno(adjtimex(&host_buf));
10288             if (!is_error(ret)) {
10289                 if (host_to_target_timex(arg1, &host_buf) != 0) {
10290                     return -TARGET_EFAULT;
10291                 }
10292             }
10293         }
10294         return ret;
10295 #endif
10296 #if defined(TARGET_NR_clock_adjtime) && defined(CONFIG_CLOCK_ADJTIME)
10297     case TARGET_NR_clock_adjtime:
10298         {
10299             struct timex htx, *phtx = &htx;
10300 
10301             if (target_to_host_timex(phtx, arg2) != 0) {
10302                 return -TARGET_EFAULT;
10303             }
10304             ret = get_errno(clock_adjtime(arg1, phtx));
10305             if (!is_error(ret) && phtx) {
10306                 if (host_to_target_timex(arg2, phtx) != 0) {
10307                     return -TARGET_EFAULT;
10308                 }
10309             }
10310         }
10311         return ret;
10312 #endif
10313 #if defined(TARGET_NR_clock_adjtime64) && defined(CONFIG_CLOCK_ADJTIME)
10314     case TARGET_NR_clock_adjtime64:
10315         {
10316             struct timex htx;
10317 
10318             if (target_to_host_timex64(&htx, arg2) != 0) {
10319                 return -TARGET_EFAULT;
10320             }
10321             ret = get_errno(clock_adjtime(arg1, &htx));
10322             if (!is_error(ret) && host_to_target_timex64(arg2, &htx)) {
10323                     return -TARGET_EFAULT;
10324             }
10325         }
10326         return ret;
10327 #endif
10328     case TARGET_NR_getpgid:
10329         return get_errno(getpgid(arg1));
10330     case TARGET_NR_fchdir:
10331         return get_errno(fchdir(arg1));
10332     case TARGET_NR_personality:
10333         return get_errno(personality(arg1));
10334 #ifdef TARGET_NR__llseek /* Not on alpha */
10335     case TARGET_NR__llseek:
10336         {
10337             int64_t res;
10338 #if !defined(__NR_llseek)
10339             res = lseek(arg1, ((uint64_t)arg2 << 32) | (abi_ulong)arg3, arg5);
10340             if (res == -1) {
10341                 ret = get_errno(res);
10342             } else {
10343                 ret = 0;
10344             }
10345 #else
10346             ret = get_errno(_llseek(arg1, arg2, arg3, &res, arg5));
10347 #endif
10348             if ((ret == 0) && put_user_s64(res, arg4)) {
10349                 return -TARGET_EFAULT;
10350             }
10351         }
10352         return ret;
10353 #endif
10354 #ifdef TARGET_NR_getdents
10355     case TARGET_NR_getdents:
10356 #ifdef EMULATE_GETDENTS_WITH_GETDENTS
10357 #if TARGET_ABI_BITS == 32 && HOST_LONG_BITS == 64
10358         {
10359             struct target_dirent *target_dirp;
10360             struct linux_dirent *dirp;
10361             abi_long count = arg3;
10362 
10363             dirp = g_try_malloc(count);
10364             if (!dirp) {
10365                 return -TARGET_ENOMEM;
10366             }
10367 
10368             ret = get_errno(sys_getdents(arg1, dirp, count));
10369             if (!is_error(ret)) {
10370                 struct linux_dirent *de;
10371 		struct target_dirent *tde;
10372                 int len = ret;
10373                 int reclen, treclen;
10374 		int count1, tnamelen;
10375 
10376 		count1 = 0;
10377                 de = dirp;
10378                 if (!(target_dirp = lock_user(VERIFY_WRITE, arg2, count, 0)))
10379                     return -TARGET_EFAULT;
10380 		tde = target_dirp;
10381                 while (len > 0) {
10382                     reclen = de->d_reclen;
10383                     tnamelen = reclen - offsetof(struct linux_dirent, d_name);
10384                     assert(tnamelen >= 0);
10385                     treclen = tnamelen + offsetof(struct target_dirent, d_name);
10386                     assert(count1 + treclen <= count);
10387                     tde->d_reclen = tswap16(treclen);
10388                     tde->d_ino = tswapal(de->d_ino);
10389                     tde->d_off = tswapal(de->d_off);
10390                     memcpy(tde->d_name, de->d_name, tnamelen);
10391                     de = (struct linux_dirent *)((char *)de + reclen);
10392                     len -= reclen;
10393                     tde = (struct target_dirent *)((char *)tde + treclen);
10394 		    count1 += treclen;
10395                 }
10396 		ret = count1;
10397                 unlock_user(target_dirp, arg2, ret);
10398             }
10399             g_free(dirp);
10400         }
10401 #else
10402         {
10403             struct linux_dirent *dirp;
10404             abi_long count = arg3;
10405 
10406             if (!(dirp = lock_user(VERIFY_WRITE, arg2, count, 0)))
10407                 return -TARGET_EFAULT;
10408             ret = get_errno(sys_getdents(arg1, dirp, count));
10409             if (!is_error(ret)) {
10410                 struct linux_dirent *de;
10411                 int len = ret;
10412                 int reclen;
10413                 de = dirp;
10414                 while (len > 0) {
10415                     reclen = de->d_reclen;
10416                     if (reclen > len)
10417                         break;
10418                     de->d_reclen = tswap16(reclen);
10419                     tswapls(&de->d_ino);
10420                     tswapls(&de->d_off);
10421                     de = (struct linux_dirent *)((char *)de + reclen);
10422                     len -= reclen;
10423                 }
10424             }
10425             unlock_user(dirp, arg2, ret);
10426         }
10427 #endif
10428 #else
10429         /* Implement getdents in terms of getdents64 */
10430         {
10431             struct linux_dirent64 *dirp;
10432             abi_long count = arg3;
10433 
10434             dirp = lock_user(VERIFY_WRITE, arg2, count, 0);
10435             if (!dirp) {
10436                 return -TARGET_EFAULT;
10437             }
10438             ret = get_errno(sys_getdents64(arg1, dirp, count));
10439             if (!is_error(ret)) {
10440                 /* Convert the dirent64 structs to target dirent.  We do this
10441                  * in-place, since we can guarantee that a target_dirent is no
10442                  * larger than a dirent64; however this means we have to be
10443                  * careful to read everything before writing in the new format.
10444                  */
10445                 struct linux_dirent64 *de;
10446                 struct target_dirent *tde;
10447                 int len = ret;
10448                 int tlen = 0;
10449 
10450                 de = dirp;
10451                 tde = (struct target_dirent *)dirp;
10452                 while (len > 0) {
10453                     int namelen, treclen;
10454                     int reclen = de->d_reclen;
10455                     uint64_t ino = de->d_ino;
10456                     int64_t off = de->d_off;
10457                     uint8_t type = de->d_type;
10458 
10459                     namelen = strlen(de->d_name);
10460                     treclen = offsetof(struct target_dirent, d_name)
10461                         + namelen + 2;
10462                     treclen = QEMU_ALIGN_UP(treclen, sizeof(abi_long));
10463 
10464                     memmove(tde->d_name, de->d_name, namelen + 1);
10465                     tde->d_ino = tswapal(ino);
10466                     tde->d_off = tswapal(off);
10467                     tde->d_reclen = tswap16(treclen);
10468                     /* The target_dirent type is in what was formerly a padding
10469                      * byte at the end of the structure:
10470                      */
10471                     *(((char *)tde) + treclen - 1) = type;
10472 
10473                     de = (struct linux_dirent64 *)((char *)de + reclen);
10474                     tde = (struct target_dirent *)((char *)tde + treclen);
10475                     len -= reclen;
10476                     tlen += treclen;
10477                 }
10478                 ret = tlen;
10479             }
10480             unlock_user(dirp, arg2, ret);
10481         }
10482 #endif
10483         return ret;
10484 #endif /* TARGET_NR_getdents */
10485 #if defined(TARGET_NR_getdents64) && defined(__NR_getdents64)
10486     case TARGET_NR_getdents64:
10487         {
10488             struct linux_dirent64 *dirp;
10489             abi_long count = arg3;
10490             if (!(dirp = lock_user(VERIFY_WRITE, arg2, count, 0)))
10491                 return -TARGET_EFAULT;
10492             ret = get_errno(sys_getdents64(arg1, dirp, count));
10493             if (!is_error(ret)) {
10494                 struct linux_dirent64 *de;
10495                 int len = ret;
10496                 int reclen;
10497                 de = dirp;
10498                 while (len > 0) {
10499                     reclen = de->d_reclen;
10500                     if (reclen > len)
10501                         break;
10502                     de->d_reclen = tswap16(reclen);
10503                     tswap64s((uint64_t *)&de->d_ino);
10504                     tswap64s((uint64_t *)&de->d_off);
10505                     de = (struct linux_dirent64 *)((char *)de + reclen);
10506                     len -= reclen;
10507                 }
10508             }
10509             unlock_user(dirp, arg2, ret);
10510         }
10511         return ret;
10512 #endif /* TARGET_NR_getdents64 */
10513 #if defined(TARGET_NR__newselect)
10514     case TARGET_NR__newselect:
10515         return do_select(arg1, arg2, arg3, arg4, arg5);
10516 #endif
10517 #ifdef TARGET_NR_poll
10518     case TARGET_NR_poll:
10519         return do_ppoll(arg1, arg2, arg3, arg4, arg5, false, false);
10520 #endif
10521 #ifdef TARGET_NR_ppoll
10522     case TARGET_NR_ppoll:
10523         return do_ppoll(arg1, arg2, arg3, arg4, arg5, true, false);
10524 #endif
10525 #ifdef TARGET_NR_ppoll_time64
10526     case TARGET_NR_ppoll_time64:
10527         return do_ppoll(arg1, arg2, arg3, arg4, arg5, true, true);
10528 #endif
10529     case TARGET_NR_flock:
10530         /* NOTE: the flock constant seems to be the same for every
10531            Linux platform */
10532         return get_errno(safe_flock(arg1, arg2));
10533     case TARGET_NR_readv:
10534         {
10535             struct iovec *vec = lock_iovec(VERIFY_WRITE, arg2, arg3, 0);
10536             if (vec != NULL) {
10537                 ret = get_errno(safe_readv(arg1, vec, arg3));
10538                 unlock_iovec(vec, arg2, arg3, 1);
10539             } else {
10540                 ret = -host_to_target_errno(errno);
10541             }
10542         }
10543         return ret;
10544     case TARGET_NR_writev:
10545         {
10546             struct iovec *vec = lock_iovec(VERIFY_READ, arg2, arg3, 1);
10547             if (vec != NULL) {
10548                 ret = get_errno(safe_writev(arg1, vec, arg3));
10549                 unlock_iovec(vec, arg2, arg3, 0);
10550             } else {
10551                 ret = -host_to_target_errno(errno);
10552             }
10553         }
10554         return ret;
10555 #if defined(TARGET_NR_preadv)
10556     case TARGET_NR_preadv:
10557         {
10558             struct iovec *vec = lock_iovec(VERIFY_WRITE, arg2, arg3, 0);
10559             if (vec != NULL) {
10560                 unsigned long low, high;
10561 
10562                 target_to_host_low_high(arg4, arg5, &low, &high);
10563                 ret = get_errno(safe_preadv(arg1, vec, arg3, low, high));
10564                 unlock_iovec(vec, arg2, arg3, 1);
10565             } else {
10566                 ret = -host_to_target_errno(errno);
10567            }
10568         }
10569         return ret;
10570 #endif
10571 #if defined(TARGET_NR_pwritev)
10572     case TARGET_NR_pwritev:
10573         {
10574             struct iovec *vec = lock_iovec(VERIFY_READ, arg2, arg3, 1);
10575             if (vec != NULL) {
10576                 unsigned long low, high;
10577 
10578                 target_to_host_low_high(arg4, arg5, &low, &high);
10579                 ret = get_errno(safe_pwritev(arg1, vec, arg3, low, high));
10580                 unlock_iovec(vec, arg2, arg3, 0);
10581             } else {
10582                 ret = -host_to_target_errno(errno);
10583            }
10584         }
10585         return ret;
10586 #endif
10587     case TARGET_NR_getsid:
10588         return get_errno(getsid(arg1));
10589 #if defined(TARGET_NR_fdatasync) /* Not on alpha (osf_datasync ?) */
10590     case TARGET_NR_fdatasync:
10591         return get_errno(fdatasync(arg1));
10592 #endif
10593     case TARGET_NR_sched_getaffinity:
10594         {
10595             unsigned int mask_size;
10596             unsigned long *mask;
10597 
10598             /*
10599              * sched_getaffinity needs multiples of ulong, so need to take
10600              * care of mismatches between target ulong and host ulong sizes.
10601              */
10602             if (arg2 & (sizeof(abi_ulong) - 1)) {
10603                 return -TARGET_EINVAL;
10604             }
10605             mask_size = (arg2 + (sizeof(*mask) - 1)) & ~(sizeof(*mask) - 1);
10606 
10607             mask = alloca(mask_size);
10608             memset(mask, 0, mask_size);
10609             ret = get_errno(sys_sched_getaffinity(arg1, mask_size, mask));
10610 
10611             if (!is_error(ret)) {
10612                 if (ret > arg2) {
10613                     /* More data returned than the caller's buffer will fit.
10614                      * This only happens if sizeof(abi_long) < sizeof(long)
10615                      * and the caller passed us a buffer holding an odd number
10616                      * of abi_longs. If the host kernel is actually using the
10617                      * extra 4 bytes then fail EINVAL; otherwise we can just
10618                      * ignore them and only copy the interesting part.
10619                      */
10620                     int numcpus = sysconf(_SC_NPROCESSORS_CONF);
10621                     if (numcpus > arg2 * 8) {
10622                         return -TARGET_EINVAL;
10623                     }
10624                     ret = arg2;
10625                 }
10626 
10627                 if (host_to_target_cpu_mask(mask, mask_size, arg3, ret)) {
10628                     return -TARGET_EFAULT;
10629                 }
10630             }
10631         }
10632         return ret;
10633     case TARGET_NR_sched_setaffinity:
10634         {
10635             unsigned int mask_size;
10636             unsigned long *mask;
10637 
10638             /*
10639              * sched_setaffinity needs multiples of ulong, so need to take
10640              * care of mismatches between target ulong and host ulong sizes.
10641              */
10642             if (arg2 & (sizeof(abi_ulong) - 1)) {
10643                 return -TARGET_EINVAL;
10644             }
10645             mask_size = (arg2 + (sizeof(*mask) - 1)) & ~(sizeof(*mask) - 1);
10646             mask = alloca(mask_size);
10647 
10648             ret = target_to_host_cpu_mask(mask, mask_size, arg3, arg2);
10649             if (ret) {
10650                 return ret;
10651             }
10652 
10653             return get_errno(sys_sched_setaffinity(arg1, mask_size, mask));
10654         }
10655     case TARGET_NR_getcpu:
10656         {
10657             unsigned cpu, node;
10658             ret = get_errno(sys_getcpu(arg1 ? &cpu : NULL,
10659                                        arg2 ? &node : NULL,
10660                                        NULL));
10661             if (is_error(ret)) {
10662                 return ret;
10663             }
10664             if (arg1 && put_user_u32(cpu, arg1)) {
10665                 return -TARGET_EFAULT;
10666             }
10667             if (arg2 && put_user_u32(node, arg2)) {
10668                 return -TARGET_EFAULT;
10669             }
10670         }
10671         return ret;
10672     case TARGET_NR_sched_setparam:
10673         {
10674             struct sched_param *target_schp;
10675             struct sched_param schp;
10676 
10677             if (arg2 == 0) {
10678                 return -TARGET_EINVAL;
10679             }
10680             if (!lock_user_struct(VERIFY_READ, target_schp, arg2, 1))
10681                 return -TARGET_EFAULT;
10682             schp.sched_priority = tswap32(target_schp->sched_priority);
10683             unlock_user_struct(target_schp, arg2, 0);
10684             return get_errno(sched_setparam(arg1, &schp));
10685         }
10686     case TARGET_NR_sched_getparam:
10687         {
10688             struct sched_param *target_schp;
10689             struct sched_param schp;
10690 
10691             if (arg2 == 0) {
10692                 return -TARGET_EINVAL;
10693             }
10694             ret = get_errno(sched_getparam(arg1, &schp));
10695             if (!is_error(ret)) {
10696                 if (!lock_user_struct(VERIFY_WRITE, target_schp, arg2, 0))
10697                     return -TARGET_EFAULT;
10698                 target_schp->sched_priority = tswap32(schp.sched_priority);
10699                 unlock_user_struct(target_schp, arg2, 1);
10700             }
10701         }
10702         return ret;
10703     case TARGET_NR_sched_setscheduler:
10704         {
10705             struct sched_param *target_schp;
10706             struct sched_param schp;
10707             if (arg3 == 0) {
10708                 return -TARGET_EINVAL;
10709             }
10710             if (!lock_user_struct(VERIFY_READ, target_schp, arg3, 1))
10711                 return -TARGET_EFAULT;
10712             schp.sched_priority = tswap32(target_schp->sched_priority);
10713             unlock_user_struct(target_schp, arg3, 0);
10714             return get_errno(sched_setscheduler(arg1, arg2, &schp));
10715         }
10716     case TARGET_NR_sched_getscheduler:
10717         return get_errno(sched_getscheduler(arg1));
10718     case TARGET_NR_sched_yield:
10719         return get_errno(sched_yield());
10720     case TARGET_NR_sched_get_priority_max:
10721         return get_errno(sched_get_priority_max(arg1));
10722     case TARGET_NR_sched_get_priority_min:
10723         return get_errno(sched_get_priority_min(arg1));
10724 #ifdef TARGET_NR_sched_rr_get_interval
10725     case TARGET_NR_sched_rr_get_interval:
10726         {
10727             struct timespec ts;
10728             ret = get_errno(sched_rr_get_interval(arg1, &ts));
10729             if (!is_error(ret)) {
10730                 ret = host_to_target_timespec(arg2, &ts);
10731             }
10732         }
10733         return ret;
10734 #endif
10735 #ifdef TARGET_NR_sched_rr_get_interval_time64
10736     case TARGET_NR_sched_rr_get_interval_time64:
10737         {
10738             struct timespec ts;
10739             ret = get_errno(sched_rr_get_interval(arg1, &ts));
10740             if (!is_error(ret)) {
10741                 ret = host_to_target_timespec64(arg2, &ts);
10742             }
10743         }
10744         return ret;
10745 #endif
10746 #if defined(TARGET_NR_nanosleep)
10747     case TARGET_NR_nanosleep:
10748         {
10749             struct timespec req, rem;
10750             target_to_host_timespec(&req, arg1);
10751             ret = get_errno(safe_nanosleep(&req, &rem));
10752             if (is_error(ret) && arg2) {
10753                 host_to_target_timespec(arg2, &rem);
10754             }
10755         }
10756         return ret;
10757 #endif
10758     case TARGET_NR_prctl:
10759         switch (arg1) {
10760         case PR_GET_PDEATHSIG:
10761         {
10762             int deathsig;
10763             ret = get_errno(prctl(arg1, &deathsig, arg3, arg4, arg5));
10764             if (!is_error(ret) && arg2
10765                 && put_user_s32(deathsig, arg2)) {
10766                 return -TARGET_EFAULT;
10767             }
10768             return ret;
10769         }
10770 #ifdef PR_GET_NAME
10771         case PR_GET_NAME:
10772         {
10773             void *name = lock_user(VERIFY_WRITE, arg2, 16, 1);
10774             if (!name) {
10775                 return -TARGET_EFAULT;
10776             }
10777             ret = get_errno(prctl(arg1, (unsigned long)name,
10778                                   arg3, arg4, arg5));
10779             unlock_user(name, arg2, 16);
10780             return ret;
10781         }
10782         case PR_SET_NAME:
10783         {
10784             void *name = lock_user(VERIFY_READ, arg2, 16, 1);
10785             if (!name) {
10786                 return -TARGET_EFAULT;
10787             }
10788             ret = get_errno(prctl(arg1, (unsigned long)name,
10789                                   arg3, arg4, arg5));
10790             unlock_user(name, arg2, 0);
10791             return ret;
10792         }
10793 #endif
10794 #ifdef TARGET_MIPS
10795         case TARGET_PR_GET_FP_MODE:
10796         {
10797             CPUMIPSState *env = ((CPUMIPSState *)cpu_env);
10798             ret = 0;
10799             if (env->CP0_Status & (1 << CP0St_FR)) {
10800                 ret |= TARGET_PR_FP_MODE_FR;
10801             }
10802             if (env->CP0_Config5 & (1 << CP0C5_FRE)) {
10803                 ret |= TARGET_PR_FP_MODE_FRE;
10804             }
10805             return ret;
10806         }
10807         case TARGET_PR_SET_FP_MODE:
10808         {
10809             CPUMIPSState *env = ((CPUMIPSState *)cpu_env);
10810             bool old_fr = env->CP0_Status & (1 << CP0St_FR);
10811             bool old_fre = env->CP0_Config5 & (1 << CP0C5_FRE);
10812             bool new_fr = arg2 & TARGET_PR_FP_MODE_FR;
10813             bool new_fre = arg2 & TARGET_PR_FP_MODE_FRE;
10814 
10815             const unsigned int known_bits = TARGET_PR_FP_MODE_FR |
10816                                             TARGET_PR_FP_MODE_FRE;
10817 
10818             /* If nothing to change, return right away, successfully.  */
10819             if (old_fr == new_fr && old_fre == new_fre) {
10820                 return 0;
10821             }
10822             /* Check the value is valid */
10823             if (arg2 & ~known_bits) {
10824                 return -TARGET_EOPNOTSUPP;
10825             }
10826             /* Setting FRE without FR is not supported.  */
10827             if (new_fre && !new_fr) {
10828                 return -TARGET_EOPNOTSUPP;
10829             }
10830             if (new_fr && !(env->active_fpu.fcr0 & (1 << FCR0_F64))) {
10831                 /* FR1 is not supported */
10832                 return -TARGET_EOPNOTSUPP;
10833             }
10834             if (!new_fr && (env->active_fpu.fcr0 & (1 << FCR0_F64))
10835                 && !(env->CP0_Status_rw_bitmask & (1 << CP0St_FR))) {
10836                 /* cannot set FR=0 */
10837                 return -TARGET_EOPNOTSUPP;
10838             }
10839             if (new_fre && !(env->active_fpu.fcr0 & (1 << FCR0_FREP))) {
10840                 /* Cannot set FRE=1 */
10841                 return -TARGET_EOPNOTSUPP;
10842             }
10843 
10844             int i;
10845             fpr_t *fpr = env->active_fpu.fpr;
10846             for (i = 0; i < 32 ; i += 2) {
10847                 if (!old_fr && new_fr) {
10848                     fpr[i].w[!FP_ENDIAN_IDX] = fpr[i + 1].w[FP_ENDIAN_IDX];
10849                 } else if (old_fr && !new_fr) {
10850                     fpr[i + 1].w[FP_ENDIAN_IDX] = fpr[i].w[!FP_ENDIAN_IDX];
10851                 }
10852             }
10853 
10854             if (new_fr) {
10855                 env->CP0_Status |= (1 << CP0St_FR);
10856                 env->hflags |= MIPS_HFLAG_F64;
10857             } else {
10858                 env->CP0_Status &= ~(1 << CP0St_FR);
10859                 env->hflags &= ~MIPS_HFLAG_F64;
10860             }
10861             if (new_fre) {
10862                 env->CP0_Config5 |= (1 << CP0C5_FRE);
10863                 if (env->active_fpu.fcr0 & (1 << FCR0_FREP)) {
10864                     env->hflags |= MIPS_HFLAG_FRE;
10865                 }
10866             } else {
10867                 env->CP0_Config5 &= ~(1 << CP0C5_FRE);
10868                 env->hflags &= ~MIPS_HFLAG_FRE;
10869             }
10870 
10871             return 0;
10872         }
10873 #endif /* MIPS */
10874 #ifdef TARGET_AARCH64
10875         case TARGET_PR_SVE_SET_VL:
10876             /*
10877              * We cannot support either PR_SVE_SET_VL_ONEXEC or
10878              * PR_SVE_VL_INHERIT.  Note the kernel definition
10879              * of sve_vl_valid allows for VQ=512, i.e. VL=8192,
10880              * even though the current architectural maximum is VQ=16.
10881              */
10882             ret = -TARGET_EINVAL;
10883             if (cpu_isar_feature(aa64_sve, env_archcpu(cpu_env))
10884                 && arg2 >= 0 && arg2 <= 512 * 16 && !(arg2 & 15)) {
10885                 CPUARMState *env = cpu_env;
10886                 ARMCPU *cpu = env_archcpu(env);
10887                 uint32_t vq, old_vq;
10888 
10889                 old_vq = (env->vfp.zcr_el[1] & 0xf) + 1;
10890                 vq = MAX(arg2 / 16, 1);
10891                 vq = MIN(vq, cpu->sve_max_vq);
10892 
10893                 if (vq < old_vq) {
10894                     aarch64_sve_narrow_vq(env, vq);
10895                 }
10896                 env->vfp.zcr_el[1] = vq - 1;
10897                 arm_rebuild_hflags(env);
10898                 ret = vq * 16;
10899             }
10900             return ret;
10901         case TARGET_PR_SVE_GET_VL:
10902             ret = -TARGET_EINVAL;
10903             {
10904                 ARMCPU *cpu = env_archcpu(cpu_env);
10905                 if (cpu_isar_feature(aa64_sve, cpu)) {
10906                     ret = ((cpu->env.vfp.zcr_el[1] & 0xf) + 1) * 16;
10907                 }
10908             }
10909             return ret;
10910         case TARGET_PR_PAC_RESET_KEYS:
10911             {
10912                 CPUARMState *env = cpu_env;
10913                 ARMCPU *cpu = env_archcpu(env);
10914 
10915                 if (arg3 || arg4 || arg5) {
10916                     return -TARGET_EINVAL;
10917                 }
10918                 if (cpu_isar_feature(aa64_pauth, cpu)) {
10919                     int all = (TARGET_PR_PAC_APIAKEY | TARGET_PR_PAC_APIBKEY |
10920                                TARGET_PR_PAC_APDAKEY | TARGET_PR_PAC_APDBKEY |
10921                                TARGET_PR_PAC_APGAKEY);
10922                     int ret = 0;
10923                     Error *err = NULL;
10924 
10925                     if (arg2 == 0) {
10926                         arg2 = all;
10927                     } else if (arg2 & ~all) {
10928                         return -TARGET_EINVAL;
10929                     }
10930                     if (arg2 & TARGET_PR_PAC_APIAKEY) {
10931                         ret |= qemu_guest_getrandom(&env->keys.apia,
10932                                                     sizeof(ARMPACKey), &err);
10933                     }
10934                     if (arg2 & TARGET_PR_PAC_APIBKEY) {
10935                         ret |= qemu_guest_getrandom(&env->keys.apib,
10936                                                     sizeof(ARMPACKey), &err);
10937                     }
10938                     if (arg2 & TARGET_PR_PAC_APDAKEY) {
10939                         ret |= qemu_guest_getrandom(&env->keys.apda,
10940                                                     sizeof(ARMPACKey), &err);
10941                     }
10942                     if (arg2 & TARGET_PR_PAC_APDBKEY) {
10943                         ret |= qemu_guest_getrandom(&env->keys.apdb,
10944                                                     sizeof(ARMPACKey), &err);
10945                     }
10946                     if (arg2 & TARGET_PR_PAC_APGAKEY) {
10947                         ret |= qemu_guest_getrandom(&env->keys.apga,
10948                                                     sizeof(ARMPACKey), &err);
10949                     }
10950                     if (ret != 0) {
10951                         /*
10952                          * Some unknown failure in the crypto.  The best
10953                          * we can do is log it and fail the syscall.
10954                          * The real syscall cannot fail this way.
10955                          */
10956                         qemu_log_mask(LOG_UNIMP,
10957                                       "PR_PAC_RESET_KEYS: Crypto failure: %s",
10958                                       error_get_pretty(err));
10959                         error_free(err);
10960                         return -TARGET_EIO;
10961                     }
10962                     return 0;
10963                 }
10964             }
10965             return -TARGET_EINVAL;
10966 #endif /* AARCH64 */
10967         case PR_GET_SECCOMP:
10968         case PR_SET_SECCOMP:
10969             /* Disable seccomp to prevent the target disabling syscalls we
10970              * need. */
10971             return -TARGET_EINVAL;
10972         default:
10973             /* Most prctl options have no pointer arguments */
10974             return get_errno(prctl(arg1, arg2, arg3, arg4, arg5));
10975         }
10976         break;
10977 #ifdef TARGET_NR_arch_prctl
10978     case TARGET_NR_arch_prctl:
10979         return do_arch_prctl(cpu_env, arg1, arg2);
10980 #endif
10981 #ifdef TARGET_NR_pread64
10982     case TARGET_NR_pread64:
10983         if (regpairs_aligned(cpu_env, num)) {
10984             arg4 = arg5;
10985             arg5 = arg6;
10986         }
10987         if (arg2 == 0 && arg3 == 0) {
10988             /* Special-case NULL buffer and zero length, which should succeed */
10989             p = 0;
10990         } else {
10991             p = lock_user(VERIFY_WRITE, arg2, arg3, 0);
10992             if (!p) {
10993                 return -TARGET_EFAULT;
10994             }
10995         }
10996         ret = get_errno(pread64(arg1, p, arg3, target_offset64(arg4, arg5)));
10997         unlock_user(p, arg2, ret);
10998         return ret;
10999     case TARGET_NR_pwrite64:
11000         if (regpairs_aligned(cpu_env, num)) {
11001             arg4 = arg5;
11002             arg5 = arg6;
11003         }
11004         if (arg2 == 0 && arg3 == 0) {
11005             /* Special-case NULL buffer and zero length, which should succeed */
11006             p = 0;
11007         } else {
11008             p = lock_user(VERIFY_READ, arg2, arg3, 1);
11009             if (!p) {
11010                 return -TARGET_EFAULT;
11011             }
11012         }
11013         ret = get_errno(pwrite64(arg1, p, arg3, target_offset64(arg4, arg5)));
11014         unlock_user(p, arg2, 0);
11015         return ret;
11016 #endif
11017     case TARGET_NR_getcwd:
11018         if (!(p = lock_user(VERIFY_WRITE, arg1, arg2, 0)))
11019             return -TARGET_EFAULT;
11020         ret = get_errno(sys_getcwd1(p, arg2));
11021         unlock_user(p, arg1, ret);
11022         return ret;
11023     case TARGET_NR_capget:
11024     case TARGET_NR_capset:
11025     {
11026         struct target_user_cap_header *target_header;
11027         struct target_user_cap_data *target_data = NULL;
11028         struct __user_cap_header_struct header;
11029         struct __user_cap_data_struct data[2];
11030         struct __user_cap_data_struct *dataptr = NULL;
11031         int i, target_datalen;
11032         int data_items = 1;
11033 
11034         if (!lock_user_struct(VERIFY_WRITE, target_header, arg1, 1)) {
11035             return -TARGET_EFAULT;
11036         }
11037         header.version = tswap32(target_header->version);
11038         header.pid = tswap32(target_header->pid);
11039 
11040         if (header.version != _LINUX_CAPABILITY_VERSION) {
11041             /* Version 2 and up takes pointer to two user_data structs */
11042             data_items = 2;
11043         }
11044 
11045         target_datalen = sizeof(*target_data) * data_items;
11046 
11047         if (arg2) {
11048             if (num == TARGET_NR_capget) {
11049                 target_data = lock_user(VERIFY_WRITE, arg2, target_datalen, 0);
11050             } else {
11051                 target_data = lock_user(VERIFY_READ, arg2, target_datalen, 1);
11052             }
11053             if (!target_data) {
11054                 unlock_user_struct(target_header, arg1, 0);
11055                 return -TARGET_EFAULT;
11056             }
11057 
11058             if (num == TARGET_NR_capset) {
11059                 for (i = 0; i < data_items; i++) {
11060                     data[i].effective = tswap32(target_data[i].effective);
11061                     data[i].permitted = tswap32(target_data[i].permitted);
11062                     data[i].inheritable = tswap32(target_data[i].inheritable);
11063                 }
11064             }
11065 
11066             dataptr = data;
11067         }
11068 
11069         if (num == TARGET_NR_capget) {
11070             ret = get_errno(capget(&header, dataptr));
11071         } else {
11072             ret = get_errno(capset(&header, dataptr));
11073         }
11074 
11075         /* The kernel always updates version for both capget and capset */
11076         target_header->version = tswap32(header.version);
11077         unlock_user_struct(target_header, arg1, 1);
11078 
11079         if (arg2) {
11080             if (num == TARGET_NR_capget) {
11081                 for (i = 0; i < data_items; i++) {
11082                     target_data[i].effective = tswap32(data[i].effective);
11083                     target_data[i].permitted = tswap32(data[i].permitted);
11084                     target_data[i].inheritable = tswap32(data[i].inheritable);
11085                 }
11086                 unlock_user(target_data, arg2, target_datalen);
11087             } else {
11088                 unlock_user(target_data, arg2, 0);
11089             }
11090         }
11091         return ret;
11092     }
11093     case TARGET_NR_sigaltstack:
11094         return do_sigaltstack(arg1, arg2,
11095                               get_sp_from_cpustate((CPUArchState *)cpu_env));
11096 
11097 #ifdef CONFIG_SENDFILE
11098 #ifdef TARGET_NR_sendfile
11099     case TARGET_NR_sendfile:
11100     {
11101         off_t *offp = NULL;
11102         off_t off;
11103         if (arg3) {
11104             ret = get_user_sal(off, arg3);
11105             if (is_error(ret)) {
11106                 return ret;
11107             }
11108             offp = &off;
11109         }
11110         ret = get_errno(sendfile(arg1, arg2, offp, arg4));
11111         if (!is_error(ret) && arg3) {
11112             abi_long ret2 = put_user_sal(off, arg3);
11113             if (is_error(ret2)) {
11114                 ret = ret2;
11115             }
11116         }
11117         return ret;
11118     }
11119 #endif
11120 #ifdef TARGET_NR_sendfile64
11121     case TARGET_NR_sendfile64:
11122     {
11123         off_t *offp = NULL;
11124         off_t off;
11125         if (arg3) {
11126             ret = get_user_s64(off, arg3);
11127             if (is_error(ret)) {
11128                 return ret;
11129             }
11130             offp = &off;
11131         }
11132         ret = get_errno(sendfile(arg1, arg2, offp, arg4));
11133         if (!is_error(ret) && arg3) {
11134             abi_long ret2 = put_user_s64(off, arg3);
11135             if (is_error(ret2)) {
11136                 ret = ret2;
11137             }
11138         }
11139         return ret;
11140     }
11141 #endif
11142 #endif
11143 #ifdef TARGET_NR_vfork
11144     case TARGET_NR_vfork:
11145         return get_errno(do_fork(cpu_env,
11146                          CLONE_VFORK | CLONE_VM | TARGET_SIGCHLD,
11147                          0, 0, 0, 0));
11148 #endif
11149 #ifdef TARGET_NR_ugetrlimit
11150     case TARGET_NR_ugetrlimit:
11151     {
11152 	struct rlimit rlim;
11153 	int resource = target_to_host_resource(arg1);
11154 	ret = get_errno(getrlimit(resource, &rlim));
11155 	if (!is_error(ret)) {
11156 	    struct target_rlimit *target_rlim;
11157             if (!lock_user_struct(VERIFY_WRITE, target_rlim, arg2, 0))
11158                 return -TARGET_EFAULT;
11159 	    target_rlim->rlim_cur = host_to_target_rlim(rlim.rlim_cur);
11160 	    target_rlim->rlim_max = host_to_target_rlim(rlim.rlim_max);
11161             unlock_user_struct(target_rlim, arg2, 1);
11162 	}
11163         return ret;
11164     }
11165 #endif
11166 #ifdef TARGET_NR_truncate64
11167     case TARGET_NR_truncate64:
11168         if (!(p = lock_user_string(arg1)))
11169             return -TARGET_EFAULT;
11170 	ret = target_truncate64(cpu_env, p, arg2, arg3, arg4);
11171         unlock_user(p, arg1, 0);
11172         return ret;
11173 #endif
11174 #ifdef TARGET_NR_ftruncate64
11175     case TARGET_NR_ftruncate64:
11176         return target_ftruncate64(cpu_env, arg1, arg2, arg3, arg4);
11177 #endif
11178 #ifdef TARGET_NR_stat64
11179     case TARGET_NR_stat64:
11180         if (!(p = lock_user_string(arg1))) {
11181             return -TARGET_EFAULT;
11182         }
11183         ret = get_errno(stat(path(p), &st));
11184         unlock_user(p, arg1, 0);
11185         if (!is_error(ret))
11186             ret = host_to_target_stat64(cpu_env, arg2, &st);
11187         return ret;
11188 #endif
11189 #ifdef TARGET_NR_lstat64
11190     case TARGET_NR_lstat64:
11191         if (!(p = lock_user_string(arg1))) {
11192             return -TARGET_EFAULT;
11193         }
11194         ret = get_errno(lstat(path(p), &st));
11195         unlock_user(p, arg1, 0);
11196         if (!is_error(ret))
11197             ret = host_to_target_stat64(cpu_env, arg2, &st);
11198         return ret;
11199 #endif
11200 #ifdef TARGET_NR_fstat64
11201     case TARGET_NR_fstat64:
11202         ret = get_errno(fstat(arg1, &st));
11203         if (!is_error(ret))
11204             ret = host_to_target_stat64(cpu_env, arg2, &st);
11205         return ret;
11206 #endif
11207 #if (defined(TARGET_NR_fstatat64) || defined(TARGET_NR_newfstatat))
11208 #ifdef TARGET_NR_fstatat64
11209     case TARGET_NR_fstatat64:
11210 #endif
11211 #ifdef TARGET_NR_newfstatat
11212     case TARGET_NR_newfstatat:
11213 #endif
11214         if (!(p = lock_user_string(arg2))) {
11215             return -TARGET_EFAULT;
11216         }
11217         ret = get_errno(fstatat(arg1, path(p), &st, arg4));
11218         unlock_user(p, arg2, 0);
11219         if (!is_error(ret))
11220             ret = host_to_target_stat64(cpu_env, arg3, &st);
11221         return ret;
11222 #endif
11223 #if defined(TARGET_NR_statx)
11224     case TARGET_NR_statx:
11225         {
11226             struct target_statx *target_stx;
11227             int dirfd = arg1;
11228             int flags = arg3;
11229 
11230             p = lock_user_string(arg2);
11231             if (p == NULL) {
11232                 return -TARGET_EFAULT;
11233             }
11234 #if defined(__NR_statx)
11235             {
11236                 /*
11237                  * It is assumed that struct statx is architecture independent.
11238                  */
11239                 struct target_statx host_stx;
11240                 int mask = arg4;
11241 
11242                 ret = get_errno(sys_statx(dirfd, p, flags, mask, &host_stx));
11243                 if (!is_error(ret)) {
11244                     if (host_to_target_statx(&host_stx, arg5) != 0) {
11245                         unlock_user(p, arg2, 0);
11246                         return -TARGET_EFAULT;
11247                     }
11248                 }
11249 
11250                 if (ret != -TARGET_ENOSYS) {
11251                     unlock_user(p, arg2, 0);
11252                     return ret;
11253                 }
11254             }
11255 #endif
11256             ret = get_errno(fstatat(dirfd, path(p), &st, flags));
11257             unlock_user(p, arg2, 0);
11258 
11259             if (!is_error(ret)) {
11260                 if (!lock_user_struct(VERIFY_WRITE, target_stx, arg5, 0)) {
11261                     return -TARGET_EFAULT;
11262                 }
11263                 memset(target_stx, 0, sizeof(*target_stx));
11264                 __put_user(major(st.st_dev), &target_stx->stx_dev_major);
11265                 __put_user(minor(st.st_dev), &target_stx->stx_dev_minor);
11266                 __put_user(st.st_ino, &target_stx->stx_ino);
11267                 __put_user(st.st_mode, &target_stx->stx_mode);
11268                 __put_user(st.st_uid, &target_stx->stx_uid);
11269                 __put_user(st.st_gid, &target_stx->stx_gid);
11270                 __put_user(st.st_nlink, &target_stx->stx_nlink);
11271                 __put_user(major(st.st_rdev), &target_stx->stx_rdev_major);
11272                 __put_user(minor(st.st_rdev), &target_stx->stx_rdev_minor);
11273                 __put_user(st.st_size, &target_stx->stx_size);
11274                 __put_user(st.st_blksize, &target_stx->stx_blksize);
11275                 __put_user(st.st_blocks, &target_stx->stx_blocks);
11276                 __put_user(st.st_atime, &target_stx->stx_atime.tv_sec);
11277                 __put_user(st.st_mtime, &target_stx->stx_mtime.tv_sec);
11278                 __put_user(st.st_ctime, &target_stx->stx_ctime.tv_sec);
11279                 unlock_user_struct(target_stx, arg5, 1);
11280             }
11281         }
11282         return ret;
11283 #endif
11284 #ifdef TARGET_NR_lchown
11285     case TARGET_NR_lchown:
11286         if (!(p = lock_user_string(arg1)))
11287             return -TARGET_EFAULT;
11288         ret = get_errno(lchown(p, low2highuid(arg2), low2highgid(arg3)));
11289         unlock_user(p, arg1, 0);
11290         return ret;
11291 #endif
11292 #ifdef TARGET_NR_getuid
11293     case TARGET_NR_getuid:
11294         return get_errno(high2lowuid(getuid()));
11295 #endif
11296 #ifdef TARGET_NR_getgid
11297     case TARGET_NR_getgid:
11298         return get_errno(high2lowgid(getgid()));
11299 #endif
11300 #ifdef TARGET_NR_geteuid
11301     case TARGET_NR_geteuid:
11302         return get_errno(high2lowuid(geteuid()));
11303 #endif
11304 #ifdef TARGET_NR_getegid
11305     case TARGET_NR_getegid:
11306         return get_errno(high2lowgid(getegid()));
11307 #endif
11308     case TARGET_NR_setreuid:
11309         return get_errno(setreuid(low2highuid(arg1), low2highuid(arg2)));
11310     case TARGET_NR_setregid:
11311         return get_errno(setregid(low2highgid(arg1), low2highgid(arg2)));
11312     case TARGET_NR_getgroups:
11313         {
11314             int gidsetsize = arg1;
11315             target_id *target_grouplist;
11316             gid_t *grouplist;
11317             int i;
11318 
11319             grouplist = alloca(gidsetsize * sizeof(gid_t));
11320             ret = get_errno(getgroups(gidsetsize, grouplist));
11321             if (gidsetsize == 0)
11322                 return ret;
11323             if (!is_error(ret)) {
11324                 target_grouplist = lock_user(VERIFY_WRITE, arg2, gidsetsize * sizeof(target_id), 0);
11325                 if (!target_grouplist)
11326                     return -TARGET_EFAULT;
11327                 for(i = 0;i < ret; i++)
11328                     target_grouplist[i] = tswapid(high2lowgid(grouplist[i]));
11329                 unlock_user(target_grouplist, arg2, gidsetsize * sizeof(target_id));
11330             }
11331         }
11332         return ret;
11333     case TARGET_NR_setgroups:
11334         {
11335             int gidsetsize = arg1;
11336             target_id *target_grouplist;
11337             gid_t *grouplist = NULL;
11338             int i;
11339             if (gidsetsize) {
11340                 grouplist = alloca(gidsetsize * sizeof(gid_t));
11341                 target_grouplist = lock_user(VERIFY_READ, arg2, gidsetsize * sizeof(target_id), 1);
11342                 if (!target_grouplist) {
11343                     return -TARGET_EFAULT;
11344                 }
11345                 for (i = 0; i < gidsetsize; i++) {
11346                     grouplist[i] = low2highgid(tswapid(target_grouplist[i]));
11347                 }
11348                 unlock_user(target_grouplist, arg2, 0);
11349             }
11350             return get_errno(setgroups(gidsetsize, grouplist));
11351         }
11352     case TARGET_NR_fchown:
11353         return get_errno(fchown(arg1, low2highuid(arg2), low2highgid(arg3)));
11354 #if defined(TARGET_NR_fchownat)
11355     case TARGET_NR_fchownat:
11356         if (!(p = lock_user_string(arg2)))
11357             return -TARGET_EFAULT;
11358         ret = get_errno(fchownat(arg1, p, low2highuid(arg3),
11359                                  low2highgid(arg4), arg5));
11360         unlock_user(p, arg2, 0);
11361         return ret;
11362 #endif
11363 #ifdef TARGET_NR_setresuid
11364     case TARGET_NR_setresuid:
11365         return get_errno(sys_setresuid(low2highuid(arg1),
11366                                        low2highuid(arg2),
11367                                        low2highuid(arg3)));
11368 #endif
11369 #ifdef TARGET_NR_getresuid
11370     case TARGET_NR_getresuid:
11371         {
11372             uid_t ruid, euid, suid;
11373             ret = get_errno(getresuid(&ruid, &euid, &suid));
11374             if (!is_error(ret)) {
11375                 if (put_user_id(high2lowuid(ruid), arg1)
11376                     || put_user_id(high2lowuid(euid), arg2)
11377                     || put_user_id(high2lowuid(suid), arg3))
11378                     return -TARGET_EFAULT;
11379             }
11380         }
11381         return ret;
11382 #endif
11383 #ifdef TARGET_NR_getresgid
11384     case TARGET_NR_setresgid:
11385         return get_errno(sys_setresgid(low2highgid(arg1),
11386                                        low2highgid(arg2),
11387                                        low2highgid(arg3)));
11388 #endif
11389 #ifdef TARGET_NR_getresgid
11390     case TARGET_NR_getresgid:
11391         {
11392             gid_t rgid, egid, sgid;
11393             ret = get_errno(getresgid(&rgid, &egid, &sgid));
11394             if (!is_error(ret)) {
11395                 if (put_user_id(high2lowgid(rgid), arg1)
11396                     || put_user_id(high2lowgid(egid), arg2)
11397                     || put_user_id(high2lowgid(sgid), arg3))
11398                     return -TARGET_EFAULT;
11399             }
11400         }
11401         return ret;
11402 #endif
11403 #ifdef TARGET_NR_chown
11404     case TARGET_NR_chown:
11405         if (!(p = lock_user_string(arg1)))
11406             return -TARGET_EFAULT;
11407         ret = get_errno(chown(p, low2highuid(arg2), low2highgid(arg3)));
11408         unlock_user(p, arg1, 0);
11409         return ret;
11410 #endif
11411     case TARGET_NR_setuid:
11412         return get_errno(sys_setuid(low2highuid(arg1)));
11413     case TARGET_NR_setgid:
11414         return get_errno(sys_setgid(low2highgid(arg1)));
11415     case TARGET_NR_setfsuid:
11416         return get_errno(setfsuid(arg1));
11417     case TARGET_NR_setfsgid:
11418         return get_errno(setfsgid(arg1));
11419 
11420 #ifdef TARGET_NR_lchown32
11421     case TARGET_NR_lchown32:
11422         if (!(p = lock_user_string(arg1)))
11423             return -TARGET_EFAULT;
11424         ret = get_errno(lchown(p, arg2, arg3));
11425         unlock_user(p, arg1, 0);
11426         return ret;
11427 #endif
11428 #ifdef TARGET_NR_getuid32
11429     case TARGET_NR_getuid32:
11430         return get_errno(getuid());
11431 #endif
11432 
11433 #if defined(TARGET_NR_getxuid) && defined(TARGET_ALPHA)
11434    /* Alpha specific */
11435     case TARGET_NR_getxuid:
11436          {
11437             uid_t euid;
11438             euid=geteuid();
11439             ((CPUAlphaState *)cpu_env)->ir[IR_A4]=euid;
11440          }
11441         return get_errno(getuid());
11442 #endif
11443 #if defined(TARGET_NR_getxgid) && defined(TARGET_ALPHA)
11444    /* Alpha specific */
11445     case TARGET_NR_getxgid:
11446          {
11447             uid_t egid;
11448             egid=getegid();
11449             ((CPUAlphaState *)cpu_env)->ir[IR_A4]=egid;
11450          }
11451         return get_errno(getgid());
11452 #endif
11453 #if defined(TARGET_NR_osf_getsysinfo) && defined(TARGET_ALPHA)
11454     /* Alpha specific */
11455     case TARGET_NR_osf_getsysinfo:
11456         ret = -TARGET_EOPNOTSUPP;
11457         switch (arg1) {
11458           case TARGET_GSI_IEEE_FP_CONTROL:
11459             {
11460                 uint64_t fpcr = cpu_alpha_load_fpcr(cpu_env);
11461                 uint64_t swcr = ((CPUAlphaState *)cpu_env)->swcr;
11462 
11463                 swcr &= ~SWCR_STATUS_MASK;
11464                 swcr |= (fpcr >> 35) & SWCR_STATUS_MASK;
11465 
11466                 if (put_user_u64 (swcr, arg2))
11467                         return -TARGET_EFAULT;
11468                 ret = 0;
11469             }
11470             break;
11471 
11472           /* case GSI_IEEE_STATE_AT_SIGNAL:
11473              -- Not implemented in linux kernel.
11474              case GSI_UACPROC:
11475              -- Retrieves current unaligned access state; not much used.
11476              case GSI_PROC_TYPE:
11477              -- Retrieves implver information; surely not used.
11478              case GSI_GET_HWRPB:
11479              -- Grabs a copy of the HWRPB; surely not used.
11480           */
11481         }
11482         return ret;
11483 #endif
11484 #if defined(TARGET_NR_osf_setsysinfo) && defined(TARGET_ALPHA)
11485     /* Alpha specific */
11486     case TARGET_NR_osf_setsysinfo:
11487         ret = -TARGET_EOPNOTSUPP;
11488         switch (arg1) {
11489           case TARGET_SSI_IEEE_FP_CONTROL:
11490             {
11491                 uint64_t swcr, fpcr;
11492 
11493                 if (get_user_u64 (swcr, arg2)) {
11494                     return -TARGET_EFAULT;
11495                 }
11496 
11497                 /*
11498                  * The kernel calls swcr_update_status to update the
11499                  * status bits from the fpcr at every point that it
11500                  * could be queried.  Therefore, we store the status
11501                  * bits only in FPCR.
11502                  */
11503                 ((CPUAlphaState *)cpu_env)->swcr
11504                     = swcr & (SWCR_TRAP_ENABLE_MASK | SWCR_MAP_MASK);
11505 
11506                 fpcr = cpu_alpha_load_fpcr(cpu_env);
11507                 fpcr &= ((uint64_t)FPCR_DYN_MASK << 32);
11508                 fpcr |= alpha_ieee_swcr_to_fpcr(swcr);
11509                 cpu_alpha_store_fpcr(cpu_env, fpcr);
11510                 ret = 0;
11511             }
11512             break;
11513 
11514           case TARGET_SSI_IEEE_RAISE_EXCEPTION:
11515             {
11516                 uint64_t exc, fpcr, fex;
11517 
11518                 if (get_user_u64(exc, arg2)) {
11519                     return -TARGET_EFAULT;
11520                 }
11521                 exc &= SWCR_STATUS_MASK;
11522                 fpcr = cpu_alpha_load_fpcr(cpu_env);
11523 
11524                 /* Old exceptions are not signaled.  */
11525                 fex = alpha_ieee_fpcr_to_swcr(fpcr);
11526                 fex = exc & ~fex;
11527                 fex >>= SWCR_STATUS_TO_EXCSUM_SHIFT;
11528                 fex &= ((CPUArchState *)cpu_env)->swcr;
11529 
11530                 /* Update the hardware fpcr.  */
11531                 fpcr |= alpha_ieee_swcr_to_fpcr(exc);
11532                 cpu_alpha_store_fpcr(cpu_env, fpcr);
11533 
11534                 if (fex) {
11535                     int si_code = TARGET_FPE_FLTUNK;
11536                     target_siginfo_t info;
11537 
11538                     if (fex & SWCR_TRAP_ENABLE_DNO) {
11539                         si_code = TARGET_FPE_FLTUND;
11540                     }
11541                     if (fex & SWCR_TRAP_ENABLE_INE) {
11542                         si_code = TARGET_FPE_FLTRES;
11543                     }
11544                     if (fex & SWCR_TRAP_ENABLE_UNF) {
11545                         si_code = TARGET_FPE_FLTUND;
11546                     }
11547                     if (fex & SWCR_TRAP_ENABLE_OVF) {
11548                         si_code = TARGET_FPE_FLTOVF;
11549                     }
11550                     if (fex & SWCR_TRAP_ENABLE_DZE) {
11551                         si_code = TARGET_FPE_FLTDIV;
11552                     }
11553                     if (fex & SWCR_TRAP_ENABLE_INV) {
11554                         si_code = TARGET_FPE_FLTINV;
11555                     }
11556 
11557                     info.si_signo = SIGFPE;
11558                     info.si_errno = 0;
11559                     info.si_code = si_code;
11560                     info._sifields._sigfault._addr
11561                         = ((CPUArchState *)cpu_env)->pc;
11562                     queue_signal((CPUArchState *)cpu_env, info.si_signo,
11563                                  QEMU_SI_FAULT, &info);
11564                 }
11565                 ret = 0;
11566             }
11567             break;
11568 
11569           /* case SSI_NVPAIRS:
11570              -- Used with SSIN_UACPROC to enable unaligned accesses.
11571              case SSI_IEEE_STATE_AT_SIGNAL:
11572              case SSI_IEEE_IGNORE_STATE_AT_SIGNAL:
11573              -- Not implemented in linux kernel
11574           */
11575         }
11576         return ret;
11577 #endif
11578 #ifdef TARGET_NR_osf_sigprocmask
11579     /* Alpha specific.  */
11580     case TARGET_NR_osf_sigprocmask:
11581         {
11582             abi_ulong mask;
11583             int how;
11584             sigset_t set, oldset;
11585 
11586             switch(arg1) {
11587             case TARGET_SIG_BLOCK:
11588                 how = SIG_BLOCK;
11589                 break;
11590             case TARGET_SIG_UNBLOCK:
11591                 how = SIG_UNBLOCK;
11592                 break;
11593             case TARGET_SIG_SETMASK:
11594                 how = SIG_SETMASK;
11595                 break;
11596             default:
11597                 return -TARGET_EINVAL;
11598             }
11599             mask = arg2;
11600             target_to_host_old_sigset(&set, &mask);
11601             ret = do_sigprocmask(how, &set, &oldset);
11602             if (!ret) {
11603                 host_to_target_old_sigset(&mask, &oldset);
11604                 ret = mask;
11605             }
11606         }
11607         return ret;
11608 #endif
11609 
11610 #ifdef TARGET_NR_getgid32
11611     case TARGET_NR_getgid32:
11612         return get_errno(getgid());
11613 #endif
11614 #ifdef TARGET_NR_geteuid32
11615     case TARGET_NR_geteuid32:
11616         return get_errno(geteuid());
11617 #endif
11618 #ifdef TARGET_NR_getegid32
11619     case TARGET_NR_getegid32:
11620         return get_errno(getegid());
11621 #endif
11622 #ifdef TARGET_NR_setreuid32
11623     case TARGET_NR_setreuid32:
11624         return get_errno(setreuid(arg1, arg2));
11625 #endif
11626 #ifdef TARGET_NR_setregid32
11627     case TARGET_NR_setregid32:
11628         return get_errno(setregid(arg1, arg2));
11629 #endif
11630 #ifdef TARGET_NR_getgroups32
11631     case TARGET_NR_getgroups32:
11632         {
11633             int gidsetsize = arg1;
11634             uint32_t *target_grouplist;
11635             gid_t *grouplist;
11636             int i;
11637 
11638             grouplist = alloca(gidsetsize * sizeof(gid_t));
11639             ret = get_errno(getgroups(gidsetsize, grouplist));
11640             if (gidsetsize == 0)
11641                 return ret;
11642             if (!is_error(ret)) {
11643                 target_grouplist = lock_user(VERIFY_WRITE, arg2, gidsetsize * 4, 0);
11644                 if (!target_grouplist) {
11645                     return -TARGET_EFAULT;
11646                 }
11647                 for(i = 0;i < ret; i++)
11648                     target_grouplist[i] = tswap32(grouplist[i]);
11649                 unlock_user(target_grouplist, arg2, gidsetsize * 4);
11650             }
11651         }
11652         return ret;
11653 #endif
11654 #ifdef TARGET_NR_setgroups32
11655     case TARGET_NR_setgroups32:
11656         {
11657             int gidsetsize = arg1;
11658             uint32_t *target_grouplist;
11659             gid_t *grouplist;
11660             int i;
11661 
11662             grouplist = alloca(gidsetsize * sizeof(gid_t));
11663             target_grouplist = lock_user(VERIFY_READ, arg2, gidsetsize * 4, 1);
11664             if (!target_grouplist) {
11665                 return -TARGET_EFAULT;
11666             }
11667             for(i = 0;i < gidsetsize; i++)
11668                 grouplist[i] = tswap32(target_grouplist[i]);
11669             unlock_user(target_grouplist, arg2, 0);
11670             return get_errno(setgroups(gidsetsize, grouplist));
11671         }
11672 #endif
11673 #ifdef TARGET_NR_fchown32
11674     case TARGET_NR_fchown32:
11675         return get_errno(fchown(arg1, arg2, arg3));
11676 #endif
11677 #ifdef TARGET_NR_setresuid32
11678     case TARGET_NR_setresuid32:
11679         return get_errno(sys_setresuid(arg1, arg2, arg3));
11680 #endif
11681 #ifdef TARGET_NR_getresuid32
11682     case TARGET_NR_getresuid32:
11683         {
11684             uid_t ruid, euid, suid;
11685             ret = get_errno(getresuid(&ruid, &euid, &suid));
11686             if (!is_error(ret)) {
11687                 if (put_user_u32(ruid, arg1)
11688                     || put_user_u32(euid, arg2)
11689                     || put_user_u32(suid, arg3))
11690                     return -TARGET_EFAULT;
11691             }
11692         }
11693         return ret;
11694 #endif
11695 #ifdef TARGET_NR_setresgid32
11696     case TARGET_NR_setresgid32:
11697         return get_errno(sys_setresgid(arg1, arg2, arg3));
11698 #endif
11699 #ifdef TARGET_NR_getresgid32
11700     case TARGET_NR_getresgid32:
11701         {
11702             gid_t rgid, egid, sgid;
11703             ret = get_errno(getresgid(&rgid, &egid, &sgid));
11704             if (!is_error(ret)) {
11705                 if (put_user_u32(rgid, arg1)
11706                     || put_user_u32(egid, arg2)
11707                     || put_user_u32(sgid, arg3))
11708                     return -TARGET_EFAULT;
11709             }
11710         }
11711         return ret;
11712 #endif
11713 #ifdef TARGET_NR_chown32
11714     case TARGET_NR_chown32:
11715         if (!(p = lock_user_string(arg1)))
11716             return -TARGET_EFAULT;
11717         ret = get_errno(chown(p, arg2, arg3));
11718         unlock_user(p, arg1, 0);
11719         return ret;
11720 #endif
11721 #ifdef TARGET_NR_setuid32
11722     case TARGET_NR_setuid32:
11723         return get_errno(sys_setuid(arg1));
11724 #endif
11725 #ifdef TARGET_NR_setgid32
11726     case TARGET_NR_setgid32:
11727         return get_errno(sys_setgid(arg1));
11728 #endif
11729 #ifdef TARGET_NR_setfsuid32
11730     case TARGET_NR_setfsuid32:
11731         return get_errno(setfsuid(arg1));
11732 #endif
11733 #ifdef TARGET_NR_setfsgid32
11734     case TARGET_NR_setfsgid32:
11735         return get_errno(setfsgid(arg1));
11736 #endif
11737 #ifdef TARGET_NR_mincore
11738     case TARGET_NR_mincore:
11739         {
11740             void *a = lock_user(VERIFY_READ, arg1, arg2, 0);
11741             if (!a) {
11742                 return -TARGET_ENOMEM;
11743             }
11744             p = lock_user_string(arg3);
11745             if (!p) {
11746                 ret = -TARGET_EFAULT;
11747             } else {
11748                 ret = get_errno(mincore(a, arg2, p));
11749                 unlock_user(p, arg3, ret);
11750             }
11751             unlock_user(a, arg1, 0);
11752         }
11753         return ret;
11754 #endif
11755 #ifdef TARGET_NR_arm_fadvise64_64
11756     case TARGET_NR_arm_fadvise64_64:
11757         /* arm_fadvise64_64 looks like fadvise64_64 but
11758          * with different argument order: fd, advice, offset, len
11759          * rather than the usual fd, offset, len, advice.
11760          * Note that offset and len are both 64-bit so appear as
11761          * pairs of 32-bit registers.
11762          */
11763         ret = posix_fadvise(arg1, target_offset64(arg3, arg4),
11764                             target_offset64(arg5, arg6), arg2);
11765         return -host_to_target_errno(ret);
11766 #endif
11767 
11768 #if TARGET_ABI_BITS == 32
11769 
11770 #ifdef TARGET_NR_fadvise64_64
11771     case TARGET_NR_fadvise64_64:
11772 #if defined(TARGET_PPC) || defined(TARGET_XTENSA)
11773         /* 6 args: fd, advice, offset (high, low), len (high, low) */
11774         ret = arg2;
11775         arg2 = arg3;
11776         arg3 = arg4;
11777         arg4 = arg5;
11778         arg5 = arg6;
11779         arg6 = ret;
11780 #else
11781         /* 6 args: fd, offset (high, low), len (high, low), advice */
11782         if (regpairs_aligned(cpu_env, num)) {
11783             /* offset is in (3,4), len in (5,6) and advice in 7 */
11784             arg2 = arg3;
11785             arg3 = arg4;
11786             arg4 = arg5;
11787             arg5 = arg6;
11788             arg6 = arg7;
11789         }
11790 #endif
11791         ret = posix_fadvise(arg1, target_offset64(arg2, arg3),
11792                             target_offset64(arg4, arg5), arg6);
11793         return -host_to_target_errno(ret);
11794 #endif
11795 
11796 #ifdef TARGET_NR_fadvise64
11797     case TARGET_NR_fadvise64:
11798         /* 5 args: fd, offset (high, low), len, advice */
11799         if (regpairs_aligned(cpu_env, num)) {
11800             /* offset is in (3,4), len in 5 and advice in 6 */
11801             arg2 = arg3;
11802             arg3 = arg4;
11803             arg4 = arg5;
11804             arg5 = arg6;
11805         }
11806         ret = posix_fadvise(arg1, target_offset64(arg2, arg3), arg4, arg5);
11807         return -host_to_target_errno(ret);
11808 #endif
11809 
11810 #else /* not a 32-bit ABI */
11811 #if defined(TARGET_NR_fadvise64_64) || defined(TARGET_NR_fadvise64)
11812 #ifdef TARGET_NR_fadvise64_64
11813     case TARGET_NR_fadvise64_64:
11814 #endif
11815 #ifdef TARGET_NR_fadvise64
11816     case TARGET_NR_fadvise64:
11817 #endif
11818 #ifdef TARGET_S390X
11819         switch (arg4) {
11820         case 4: arg4 = POSIX_FADV_NOREUSE + 1; break; /* make sure it's an invalid value */
11821         case 5: arg4 = POSIX_FADV_NOREUSE + 2; break; /* ditto */
11822         case 6: arg4 = POSIX_FADV_DONTNEED; break;
11823         case 7: arg4 = POSIX_FADV_NOREUSE; break;
11824         default: break;
11825         }
11826 #endif
11827         return -host_to_target_errno(posix_fadvise(arg1, arg2, arg3, arg4));
11828 #endif
11829 #endif /* end of 64-bit ABI fadvise handling */
11830 
11831 #ifdef TARGET_NR_madvise
11832     case TARGET_NR_madvise:
11833         /* A straight passthrough may not be safe because qemu sometimes
11834            turns private file-backed mappings into anonymous mappings.
11835            This will break MADV_DONTNEED.
11836            This is a hint, so ignoring and returning success is ok.  */
11837         return 0;
11838 #endif
11839 #ifdef TARGET_NR_fcntl64
11840     case TARGET_NR_fcntl64:
11841     {
11842         int cmd;
11843         struct flock64 fl;
11844         from_flock64_fn *copyfrom = copy_from_user_flock64;
11845         to_flock64_fn *copyto = copy_to_user_flock64;
11846 
11847 #ifdef TARGET_ARM
11848         if (!((CPUARMState *)cpu_env)->eabi) {
11849             copyfrom = copy_from_user_oabi_flock64;
11850             copyto = copy_to_user_oabi_flock64;
11851         }
11852 #endif
11853 
11854         cmd = target_to_host_fcntl_cmd(arg2);
11855         if (cmd == -TARGET_EINVAL) {
11856             return cmd;
11857         }
11858 
11859         switch(arg2) {
11860         case TARGET_F_GETLK64:
11861             ret = copyfrom(&fl, arg3);
11862             if (ret) {
11863                 break;
11864             }
11865             ret = get_errno(safe_fcntl(arg1, cmd, &fl));
11866             if (ret == 0) {
11867                 ret = copyto(arg3, &fl);
11868             }
11869 	    break;
11870 
11871         case TARGET_F_SETLK64:
11872         case TARGET_F_SETLKW64:
11873             ret = copyfrom(&fl, arg3);
11874             if (ret) {
11875                 break;
11876             }
11877             ret = get_errno(safe_fcntl(arg1, cmd, &fl));
11878 	    break;
11879         default:
11880             ret = do_fcntl(arg1, arg2, arg3);
11881             break;
11882         }
11883         return ret;
11884     }
11885 #endif
11886 #ifdef TARGET_NR_cacheflush
11887     case TARGET_NR_cacheflush:
11888         /* self-modifying code is handled automatically, so nothing needed */
11889         return 0;
11890 #endif
11891 #ifdef TARGET_NR_getpagesize
11892     case TARGET_NR_getpagesize:
11893         return TARGET_PAGE_SIZE;
11894 #endif
11895     case TARGET_NR_gettid:
11896         return get_errno(sys_gettid());
11897 #ifdef TARGET_NR_readahead
11898     case TARGET_NR_readahead:
11899 #if TARGET_ABI_BITS == 32
11900         if (regpairs_aligned(cpu_env, num)) {
11901             arg2 = arg3;
11902             arg3 = arg4;
11903             arg4 = arg5;
11904         }
11905         ret = get_errno(readahead(arg1, target_offset64(arg2, arg3) , arg4));
11906 #else
11907         ret = get_errno(readahead(arg1, arg2, arg3));
11908 #endif
11909         return ret;
11910 #endif
11911 #ifdef CONFIG_ATTR
11912 #ifdef TARGET_NR_setxattr
11913     case TARGET_NR_listxattr:
11914     case TARGET_NR_llistxattr:
11915     {
11916         void *p, *b = 0;
11917         if (arg2) {
11918             b = lock_user(VERIFY_WRITE, arg2, arg3, 0);
11919             if (!b) {
11920                 return -TARGET_EFAULT;
11921             }
11922         }
11923         p = lock_user_string(arg1);
11924         if (p) {
11925             if (num == TARGET_NR_listxattr) {
11926                 ret = get_errno(listxattr(p, b, arg3));
11927             } else {
11928                 ret = get_errno(llistxattr(p, b, arg3));
11929             }
11930         } else {
11931             ret = -TARGET_EFAULT;
11932         }
11933         unlock_user(p, arg1, 0);
11934         unlock_user(b, arg2, arg3);
11935         return ret;
11936     }
11937     case TARGET_NR_flistxattr:
11938     {
11939         void *b = 0;
11940         if (arg2) {
11941             b = lock_user(VERIFY_WRITE, arg2, arg3, 0);
11942             if (!b) {
11943                 return -TARGET_EFAULT;
11944             }
11945         }
11946         ret = get_errno(flistxattr(arg1, b, arg3));
11947         unlock_user(b, arg2, arg3);
11948         return ret;
11949     }
11950     case TARGET_NR_setxattr:
11951     case TARGET_NR_lsetxattr:
11952         {
11953             void *p, *n, *v = 0;
11954             if (arg3) {
11955                 v = lock_user(VERIFY_READ, arg3, arg4, 1);
11956                 if (!v) {
11957                     return -TARGET_EFAULT;
11958                 }
11959             }
11960             p = lock_user_string(arg1);
11961             n = lock_user_string(arg2);
11962             if (p && n) {
11963                 if (num == TARGET_NR_setxattr) {
11964                     ret = get_errno(setxattr(p, n, v, arg4, arg5));
11965                 } else {
11966                     ret = get_errno(lsetxattr(p, n, v, arg4, arg5));
11967                 }
11968             } else {
11969                 ret = -TARGET_EFAULT;
11970             }
11971             unlock_user(p, arg1, 0);
11972             unlock_user(n, arg2, 0);
11973             unlock_user(v, arg3, 0);
11974         }
11975         return ret;
11976     case TARGET_NR_fsetxattr:
11977         {
11978             void *n, *v = 0;
11979             if (arg3) {
11980                 v = lock_user(VERIFY_READ, arg3, arg4, 1);
11981                 if (!v) {
11982                     return -TARGET_EFAULT;
11983                 }
11984             }
11985             n = lock_user_string(arg2);
11986             if (n) {
11987                 ret = get_errno(fsetxattr(arg1, n, v, arg4, arg5));
11988             } else {
11989                 ret = -TARGET_EFAULT;
11990             }
11991             unlock_user(n, arg2, 0);
11992             unlock_user(v, arg3, 0);
11993         }
11994         return ret;
11995     case TARGET_NR_getxattr:
11996     case TARGET_NR_lgetxattr:
11997         {
11998             void *p, *n, *v = 0;
11999             if (arg3) {
12000                 v = lock_user(VERIFY_WRITE, arg3, arg4, 0);
12001                 if (!v) {
12002                     return -TARGET_EFAULT;
12003                 }
12004             }
12005             p = lock_user_string(arg1);
12006             n = lock_user_string(arg2);
12007             if (p && n) {
12008                 if (num == TARGET_NR_getxattr) {
12009                     ret = get_errno(getxattr(p, n, v, arg4));
12010                 } else {
12011                     ret = get_errno(lgetxattr(p, n, v, arg4));
12012                 }
12013             } else {
12014                 ret = -TARGET_EFAULT;
12015             }
12016             unlock_user(p, arg1, 0);
12017             unlock_user(n, arg2, 0);
12018             unlock_user(v, arg3, arg4);
12019         }
12020         return ret;
12021     case TARGET_NR_fgetxattr:
12022         {
12023             void *n, *v = 0;
12024             if (arg3) {
12025                 v = lock_user(VERIFY_WRITE, arg3, arg4, 0);
12026                 if (!v) {
12027                     return -TARGET_EFAULT;
12028                 }
12029             }
12030             n = lock_user_string(arg2);
12031             if (n) {
12032                 ret = get_errno(fgetxattr(arg1, n, v, arg4));
12033             } else {
12034                 ret = -TARGET_EFAULT;
12035             }
12036             unlock_user(n, arg2, 0);
12037             unlock_user(v, arg3, arg4);
12038         }
12039         return ret;
12040     case TARGET_NR_removexattr:
12041     case TARGET_NR_lremovexattr:
12042         {
12043             void *p, *n;
12044             p = lock_user_string(arg1);
12045             n = lock_user_string(arg2);
12046             if (p && n) {
12047                 if (num == TARGET_NR_removexattr) {
12048                     ret = get_errno(removexattr(p, n));
12049                 } else {
12050                     ret = get_errno(lremovexattr(p, n));
12051                 }
12052             } else {
12053                 ret = -TARGET_EFAULT;
12054             }
12055             unlock_user(p, arg1, 0);
12056             unlock_user(n, arg2, 0);
12057         }
12058         return ret;
12059     case TARGET_NR_fremovexattr:
12060         {
12061             void *n;
12062             n = lock_user_string(arg2);
12063             if (n) {
12064                 ret = get_errno(fremovexattr(arg1, n));
12065             } else {
12066                 ret = -TARGET_EFAULT;
12067             }
12068             unlock_user(n, arg2, 0);
12069         }
12070         return ret;
12071 #endif
12072 #endif /* CONFIG_ATTR */
12073 #ifdef TARGET_NR_set_thread_area
12074     case TARGET_NR_set_thread_area:
12075 #if defined(TARGET_MIPS)
12076       ((CPUMIPSState *) cpu_env)->active_tc.CP0_UserLocal = arg1;
12077       return 0;
12078 #elif defined(TARGET_CRIS)
12079       if (arg1 & 0xff)
12080           ret = -TARGET_EINVAL;
12081       else {
12082           ((CPUCRISState *) cpu_env)->pregs[PR_PID] = arg1;
12083           ret = 0;
12084       }
12085       return ret;
12086 #elif defined(TARGET_I386) && defined(TARGET_ABI32)
12087       return do_set_thread_area(cpu_env, arg1);
12088 #elif defined(TARGET_M68K)
12089       {
12090           TaskState *ts = cpu->opaque;
12091           ts->tp_value = arg1;
12092           return 0;
12093       }
12094 #else
12095       return -TARGET_ENOSYS;
12096 #endif
12097 #endif
12098 #ifdef TARGET_NR_get_thread_area
12099     case TARGET_NR_get_thread_area:
12100 #if defined(TARGET_I386) && defined(TARGET_ABI32)
12101         return do_get_thread_area(cpu_env, arg1);
12102 #elif defined(TARGET_M68K)
12103         {
12104             TaskState *ts = cpu->opaque;
12105             return ts->tp_value;
12106         }
12107 #else
12108         return -TARGET_ENOSYS;
12109 #endif
12110 #endif
12111 #ifdef TARGET_NR_getdomainname
12112     case TARGET_NR_getdomainname:
12113         return -TARGET_ENOSYS;
12114 #endif
12115 
12116 #ifdef TARGET_NR_clock_settime
12117     case TARGET_NR_clock_settime:
12118     {
12119         struct timespec ts;
12120 
12121         ret = target_to_host_timespec(&ts, arg2);
12122         if (!is_error(ret)) {
12123             ret = get_errno(clock_settime(arg1, &ts));
12124         }
12125         return ret;
12126     }
12127 #endif
12128 #ifdef TARGET_NR_clock_settime64
12129     case TARGET_NR_clock_settime64:
12130     {
12131         struct timespec ts;
12132 
12133         ret = target_to_host_timespec64(&ts, arg2);
12134         if (!is_error(ret)) {
12135             ret = get_errno(clock_settime(arg1, &ts));
12136         }
12137         return ret;
12138     }
12139 #endif
12140 #ifdef TARGET_NR_clock_gettime
12141     case TARGET_NR_clock_gettime:
12142     {
12143         struct timespec ts;
12144         ret = get_errno(clock_gettime(arg1, &ts));
12145         if (!is_error(ret)) {
12146             ret = host_to_target_timespec(arg2, &ts);
12147         }
12148         return ret;
12149     }
12150 #endif
12151 #ifdef TARGET_NR_clock_gettime64
12152     case TARGET_NR_clock_gettime64:
12153     {
12154         struct timespec ts;
12155         ret = get_errno(clock_gettime(arg1, &ts));
12156         if (!is_error(ret)) {
12157             ret = host_to_target_timespec64(arg2, &ts);
12158         }
12159         return ret;
12160     }
12161 #endif
12162 #ifdef TARGET_NR_clock_getres
12163     case TARGET_NR_clock_getres:
12164     {
12165         struct timespec ts;
12166         ret = get_errno(clock_getres(arg1, &ts));
12167         if (!is_error(ret)) {
12168             host_to_target_timespec(arg2, &ts);
12169         }
12170         return ret;
12171     }
12172 #endif
12173 #ifdef TARGET_NR_clock_getres_time64
12174     case TARGET_NR_clock_getres_time64:
12175     {
12176         struct timespec ts;
12177         ret = get_errno(clock_getres(arg1, &ts));
12178         if (!is_error(ret)) {
12179             host_to_target_timespec64(arg2, &ts);
12180         }
12181         return ret;
12182     }
12183 #endif
12184 #ifdef TARGET_NR_clock_nanosleep
12185     case TARGET_NR_clock_nanosleep:
12186     {
12187         struct timespec ts;
12188         if (target_to_host_timespec(&ts, arg3)) {
12189             return -TARGET_EFAULT;
12190         }
12191         ret = get_errno(safe_clock_nanosleep(arg1, arg2,
12192                                              &ts, arg4 ? &ts : NULL));
12193         /*
12194          * if the call is interrupted by a signal handler, it fails
12195          * with error -TARGET_EINTR and if arg4 is not NULL and arg2 is not
12196          * TIMER_ABSTIME, it returns the remaining unslept time in arg4.
12197          */
12198         if (ret == -TARGET_EINTR && arg4 && arg2 != TIMER_ABSTIME &&
12199             host_to_target_timespec(arg4, &ts)) {
12200               return -TARGET_EFAULT;
12201         }
12202 
12203         return ret;
12204     }
12205 #endif
12206 #ifdef TARGET_NR_clock_nanosleep_time64
12207     case TARGET_NR_clock_nanosleep_time64:
12208     {
12209         struct timespec ts;
12210 
12211         if (target_to_host_timespec64(&ts, arg3)) {
12212             return -TARGET_EFAULT;
12213         }
12214 
12215         ret = get_errno(safe_clock_nanosleep(arg1, arg2,
12216                                              &ts, arg4 ? &ts : NULL));
12217 
12218         if (ret == -TARGET_EINTR && arg4 && arg2 != TIMER_ABSTIME &&
12219             host_to_target_timespec64(arg4, &ts)) {
12220             return -TARGET_EFAULT;
12221         }
12222         return ret;
12223     }
12224 #endif
12225 
12226 #if defined(TARGET_NR_set_tid_address) && defined(__NR_set_tid_address)
12227     case TARGET_NR_set_tid_address:
12228         return get_errno(set_tid_address((int *)g2h(arg1)));
12229 #endif
12230 
12231     case TARGET_NR_tkill:
12232         return get_errno(safe_tkill((int)arg1, target_to_host_signal(arg2)));
12233 
12234     case TARGET_NR_tgkill:
12235         return get_errno(safe_tgkill((int)arg1, (int)arg2,
12236                          target_to_host_signal(arg3)));
12237 
12238 #ifdef TARGET_NR_set_robust_list
12239     case TARGET_NR_set_robust_list:
12240     case TARGET_NR_get_robust_list:
12241         /* The ABI for supporting robust futexes has userspace pass
12242          * the kernel a pointer to a linked list which is updated by
12243          * userspace after the syscall; the list is walked by the kernel
12244          * when the thread exits. Since the linked list in QEMU guest
12245          * memory isn't a valid linked list for the host and we have
12246          * no way to reliably intercept the thread-death event, we can't
12247          * support these. Silently return ENOSYS so that guest userspace
12248          * falls back to a non-robust futex implementation (which should
12249          * be OK except in the corner case of the guest crashing while
12250          * holding a mutex that is shared with another process via
12251          * shared memory).
12252          */
12253         return -TARGET_ENOSYS;
12254 #endif
12255 
12256 #if defined(TARGET_NR_utimensat)
12257     case TARGET_NR_utimensat:
12258         {
12259             struct timespec *tsp, ts[2];
12260             if (!arg3) {
12261                 tsp = NULL;
12262             } else {
12263                 if (target_to_host_timespec(ts, arg3)) {
12264                     return -TARGET_EFAULT;
12265                 }
12266                 if (target_to_host_timespec(ts + 1, arg3 +
12267                                             sizeof(struct target_timespec))) {
12268                     return -TARGET_EFAULT;
12269                 }
12270                 tsp = ts;
12271             }
12272             if (!arg2)
12273                 ret = get_errno(sys_utimensat(arg1, NULL, tsp, arg4));
12274             else {
12275                 if (!(p = lock_user_string(arg2))) {
12276                     return -TARGET_EFAULT;
12277                 }
12278                 ret = get_errno(sys_utimensat(arg1, path(p), tsp, arg4));
12279                 unlock_user(p, arg2, 0);
12280             }
12281         }
12282         return ret;
12283 #endif
12284 #ifdef TARGET_NR_utimensat_time64
12285     case TARGET_NR_utimensat_time64:
12286         {
12287             struct timespec *tsp, ts[2];
12288             if (!arg3) {
12289                 tsp = NULL;
12290             } else {
12291                 if (target_to_host_timespec64(ts, arg3)) {
12292                     return -TARGET_EFAULT;
12293                 }
12294                 if (target_to_host_timespec64(ts + 1, arg3 +
12295                                      sizeof(struct target__kernel_timespec))) {
12296                     return -TARGET_EFAULT;
12297                 }
12298                 tsp = ts;
12299             }
12300             if (!arg2)
12301                 ret = get_errno(sys_utimensat(arg1, NULL, tsp, arg4));
12302             else {
12303                 p = lock_user_string(arg2);
12304                 if (!p) {
12305                     return -TARGET_EFAULT;
12306                 }
12307                 ret = get_errno(sys_utimensat(arg1, path(p), tsp, arg4));
12308                 unlock_user(p, arg2, 0);
12309             }
12310         }
12311         return ret;
12312 #endif
12313 #ifdef TARGET_NR_futex
12314     case TARGET_NR_futex:
12315         return do_futex(arg1, arg2, arg3, arg4, arg5, arg6);
12316 #endif
12317 #ifdef TARGET_NR_futex_time64
12318     case TARGET_NR_futex_time64:
12319         return do_futex_time64(arg1, arg2, arg3, arg4, arg5, arg6);
12320 #endif
12321 #if defined(TARGET_NR_inotify_init) && defined(__NR_inotify_init)
12322     case TARGET_NR_inotify_init:
12323         ret = get_errno(sys_inotify_init());
12324         if (ret >= 0) {
12325             fd_trans_register(ret, &target_inotify_trans);
12326         }
12327         return ret;
12328 #endif
12329 #ifdef CONFIG_INOTIFY1
12330 #if defined(TARGET_NR_inotify_init1) && defined(__NR_inotify_init1)
12331     case TARGET_NR_inotify_init1:
12332         ret = get_errno(sys_inotify_init1(target_to_host_bitmask(arg1,
12333                                           fcntl_flags_tbl)));
12334         if (ret >= 0) {
12335             fd_trans_register(ret, &target_inotify_trans);
12336         }
12337         return ret;
12338 #endif
12339 #endif
12340 #if defined(TARGET_NR_inotify_add_watch) && defined(__NR_inotify_add_watch)
12341     case TARGET_NR_inotify_add_watch:
12342         p = lock_user_string(arg2);
12343         ret = get_errno(sys_inotify_add_watch(arg1, path(p), arg3));
12344         unlock_user(p, arg2, 0);
12345         return ret;
12346 #endif
12347 #if defined(TARGET_NR_inotify_rm_watch) && defined(__NR_inotify_rm_watch)
12348     case TARGET_NR_inotify_rm_watch:
12349         return get_errno(sys_inotify_rm_watch(arg1, arg2));
12350 #endif
12351 
12352 #if defined(TARGET_NR_mq_open) && defined(__NR_mq_open)
12353     case TARGET_NR_mq_open:
12354         {
12355             struct mq_attr posix_mq_attr;
12356             struct mq_attr *pposix_mq_attr;
12357             int host_flags;
12358 
12359             host_flags = target_to_host_bitmask(arg2, fcntl_flags_tbl);
12360             pposix_mq_attr = NULL;
12361             if (arg4) {
12362                 if (copy_from_user_mq_attr(&posix_mq_attr, arg4) != 0) {
12363                     return -TARGET_EFAULT;
12364                 }
12365                 pposix_mq_attr = &posix_mq_attr;
12366             }
12367             p = lock_user_string(arg1 - 1);
12368             if (!p) {
12369                 return -TARGET_EFAULT;
12370             }
12371             ret = get_errno(mq_open(p, host_flags, arg3, pposix_mq_attr));
12372             unlock_user (p, arg1, 0);
12373         }
12374         return ret;
12375 
12376     case TARGET_NR_mq_unlink:
12377         p = lock_user_string(arg1 - 1);
12378         if (!p) {
12379             return -TARGET_EFAULT;
12380         }
12381         ret = get_errno(mq_unlink(p));
12382         unlock_user (p, arg1, 0);
12383         return ret;
12384 
12385 #ifdef TARGET_NR_mq_timedsend
12386     case TARGET_NR_mq_timedsend:
12387         {
12388             struct timespec ts;
12389 
12390             p = lock_user (VERIFY_READ, arg2, arg3, 1);
12391             if (arg5 != 0) {
12392                 if (target_to_host_timespec(&ts, arg5)) {
12393                     return -TARGET_EFAULT;
12394                 }
12395                 ret = get_errno(safe_mq_timedsend(arg1, p, arg3, arg4, &ts));
12396                 if (!is_error(ret) && host_to_target_timespec(arg5, &ts)) {
12397                     return -TARGET_EFAULT;
12398                 }
12399             } else {
12400                 ret = get_errno(safe_mq_timedsend(arg1, p, arg3, arg4, NULL));
12401             }
12402             unlock_user (p, arg2, arg3);
12403         }
12404         return ret;
12405 #endif
12406 #ifdef TARGET_NR_mq_timedsend_time64
12407     case TARGET_NR_mq_timedsend_time64:
12408         {
12409             struct timespec ts;
12410 
12411             p = lock_user(VERIFY_READ, arg2, arg3, 1);
12412             if (arg5 != 0) {
12413                 if (target_to_host_timespec64(&ts, arg5)) {
12414                     return -TARGET_EFAULT;
12415                 }
12416                 ret = get_errno(safe_mq_timedsend(arg1, p, arg3, arg4, &ts));
12417                 if (!is_error(ret) && host_to_target_timespec64(arg5, &ts)) {
12418                     return -TARGET_EFAULT;
12419                 }
12420             } else {
12421                 ret = get_errno(safe_mq_timedsend(arg1, p, arg3, arg4, NULL));
12422             }
12423             unlock_user(p, arg2, arg3);
12424         }
12425         return ret;
12426 #endif
12427 
12428 #ifdef TARGET_NR_mq_timedreceive
12429     case TARGET_NR_mq_timedreceive:
12430         {
12431             struct timespec ts;
12432             unsigned int prio;
12433 
12434             p = lock_user (VERIFY_READ, arg2, arg3, 1);
12435             if (arg5 != 0) {
12436                 if (target_to_host_timespec(&ts, arg5)) {
12437                     return -TARGET_EFAULT;
12438                 }
12439                 ret = get_errno(safe_mq_timedreceive(arg1, p, arg3,
12440                                                      &prio, &ts));
12441                 if (!is_error(ret) && host_to_target_timespec(arg5, &ts)) {
12442                     return -TARGET_EFAULT;
12443                 }
12444             } else {
12445                 ret = get_errno(safe_mq_timedreceive(arg1, p, arg3,
12446                                                      &prio, NULL));
12447             }
12448             unlock_user (p, arg2, arg3);
12449             if (arg4 != 0)
12450                 put_user_u32(prio, arg4);
12451         }
12452         return ret;
12453 #endif
12454 #ifdef TARGET_NR_mq_timedreceive_time64
12455     case TARGET_NR_mq_timedreceive_time64:
12456         {
12457             struct timespec ts;
12458             unsigned int prio;
12459 
12460             p = lock_user(VERIFY_READ, arg2, arg3, 1);
12461             if (arg5 != 0) {
12462                 if (target_to_host_timespec64(&ts, arg5)) {
12463                     return -TARGET_EFAULT;
12464                 }
12465                 ret = get_errno(safe_mq_timedreceive(arg1, p, arg3,
12466                                                      &prio, &ts));
12467                 if (!is_error(ret) && host_to_target_timespec64(arg5, &ts)) {
12468                     return -TARGET_EFAULT;
12469                 }
12470             } else {
12471                 ret = get_errno(safe_mq_timedreceive(arg1, p, arg3,
12472                                                      &prio, NULL));
12473             }
12474             unlock_user(p, arg2, arg3);
12475             if (arg4 != 0) {
12476                 put_user_u32(prio, arg4);
12477             }
12478         }
12479         return ret;
12480 #endif
12481 
12482     /* Not implemented for now... */
12483 /*     case TARGET_NR_mq_notify: */
12484 /*         break; */
12485 
12486     case TARGET_NR_mq_getsetattr:
12487         {
12488             struct mq_attr posix_mq_attr_in, posix_mq_attr_out;
12489             ret = 0;
12490             if (arg2 != 0) {
12491                 copy_from_user_mq_attr(&posix_mq_attr_in, arg2);
12492                 ret = get_errno(mq_setattr(arg1, &posix_mq_attr_in,
12493                                            &posix_mq_attr_out));
12494             } else if (arg3 != 0) {
12495                 ret = get_errno(mq_getattr(arg1, &posix_mq_attr_out));
12496             }
12497             if (ret == 0 && arg3 != 0) {
12498                 copy_to_user_mq_attr(arg3, &posix_mq_attr_out);
12499             }
12500         }
12501         return ret;
12502 #endif
12503 
12504 #ifdef CONFIG_SPLICE
12505 #ifdef TARGET_NR_tee
12506     case TARGET_NR_tee:
12507         {
12508             ret = get_errno(tee(arg1,arg2,arg3,arg4));
12509         }
12510         return ret;
12511 #endif
12512 #ifdef TARGET_NR_splice
12513     case TARGET_NR_splice:
12514         {
12515             loff_t loff_in, loff_out;
12516             loff_t *ploff_in = NULL, *ploff_out = NULL;
12517             if (arg2) {
12518                 if (get_user_u64(loff_in, arg2)) {
12519                     return -TARGET_EFAULT;
12520                 }
12521                 ploff_in = &loff_in;
12522             }
12523             if (arg4) {
12524                 if (get_user_u64(loff_out, arg4)) {
12525                     return -TARGET_EFAULT;
12526                 }
12527                 ploff_out = &loff_out;
12528             }
12529             ret = get_errno(splice(arg1, ploff_in, arg3, ploff_out, arg5, arg6));
12530             if (arg2) {
12531                 if (put_user_u64(loff_in, arg2)) {
12532                     return -TARGET_EFAULT;
12533                 }
12534             }
12535             if (arg4) {
12536                 if (put_user_u64(loff_out, arg4)) {
12537                     return -TARGET_EFAULT;
12538                 }
12539             }
12540         }
12541         return ret;
12542 #endif
12543 #ifdef TARGET_NR_vmsplice
12544 	case TARGET_NR_vmsplice:
12545         {
12546             struct iovec *vec = lock_iovec(VERIFY_READ, arg2, arg3, 1);
12547             if (vec != NULL) {
12548                 ret = get_errno(vmsplice(arg1, vec, arg3, arg4));
12549                 unlock_iovec(vec, arg2, arg3, 0);
12550             } else {
12551                 ret = -host_to_target_errno(errno);
12552             }
12553         }
12554         return ret;
12555 #endif
12556 #endif /* CONFIG_SPLICE */
12557 #ifdef CONFIG_EVENTFD
12558 #if defined(TARGET_NR_eventfd)
12559     case TARGET_NR_eventfd:
12560         ret = get_errno(eventfd(arg1, 0));
12561         if (ret >= 0) {
12562             fd_trans_register(ret, &target_eventfd_trans);
12563         }
12564         return ret;
12565 #endif
12566 #if defined(TARGET_NR_eventfd2)
12567     case TARGET_NR_eventfd2:
12568     {
12569         int host_flags = arg2 & (~(TARGET_O_NONBLOCK | TARGET_O_CLOEXEC));
12570         if (arg2 & TARGET_O_NONBLOCK) {
12571             host_flags |= O_NONBLOCK;
12572         }
12573         if (arg2 & TARGET_O_CLOEXEC) {
12574             host_flags |= O_CLOEXEC;
12575         }
12576         ret = get_errno(eventfd(arg1, host_flags));
12577         if (ret >= 0) {
12578             fd_trans_register(ret, &target_eventfd_trans);
12579         }
12580         return ret;
12581     }
12582 #endif
12583 #endif /* CONFIG_EVENTFD  */
12584 #if defined(CONFIG_FALLOCATE) && defined(TARGET_NR_fallocate)
12585     case TARGET_NR_fallocate:
12586 #if TARGET_ABI_BITS == 32
12587         ret = get_errno(fallocate(arg1, arg2, target_offset64(arg3, arg4),
12588                                   target_offset64(arg5, arg6)));
12589 #else
12590         ret = get_errno(fallocate(arg1, arg2, arg3, arg4));
12591 #endif
12592         return ret;
12593 #endif
12594 #if defined(CONFIG_SYNC_FILE_RANGE)
12595 #if defined(TARGET_NR_sync_file_range)
12596     case TARGET_NR_sync_file_range:
12597 #if TARGET_ABI_BITS == 32
12598 #if defined(TARGET_MIPS)
12599         ret = get_errno(sync_file_range(arg1, target_offset64(arg3, arg4),
12600                                         target_offset64(arg5, arg6), arg7));
12601 #else
12602         ret = get_errno(sync_file_range(arg1, target_offset64(arg2, arg3),
12603                                         target_offset64(arg4, arg5), arg6));
12604 #endif /* !TARGET_MIPS */
12605 #else
12606         ret = get_errno(sync_file_range(arg1, arg2, arg3, arg4));
12607 #endif
12608         return ret;
12609 #endif
12610 #if defined(TARGET_NR_sync_file_range2) || \
12611     defined(TARGET_NR_arm_sync_file_range)
12612 #if defined(TARGET_NR_sync_file_range2)
12613     case TARGET_NR_sync_file_range2:
12614 #endif
12615 #if defined(TARGET_NR_arm_sync_file_range)
12616     case TARGET_NR_arm_sync_file_range:
12617 #endif
12618         /* This is like sync_file_range but the arguments are reordered */
12619 #if TARGET_ABI_BITS == 32
12620         ret = get_errno(sync_file_range(arg1, target_offset64(arg3, arg4),
12621                                         target_offset64(arg5, arg6), arg2));
12622 #else
12623         ret = get_errno(sync_file_range(arg1, arg3, arg4, arg2));
12624 #endif
12625         return ret;
12626 #endif
12627 #endif
12628 #if defined(TARGET_NR_signalfd4)
12629     case TARGET_NR_signalfd4:
12630         return do_signalfd4(arg1, arg2, arg4);
12631 #endif
12632 #if defined(TARGET_NR_signalfd)
12633     case TARGET_NR_signalfd:
12634         return do_signalfd4(arg1, arg2, 0);
12635 #endif
12636 #if defined(CONFIG_EPOLL)
12637 #if defined(TARGET_NR_epoll_create)
12638     case TARGET_NR_epoll_create:
12639         return get_errno(epoll_create(arg1));
12640 #endif
12641 #if defined(TARGET_NR_epoll_create1) && defined(CONFIG_EPOLL_CREATE1)
12642     case TARGET_NR_epoll_create1:
12643         return get_errno(epoll_create1(target_to_host_bitmask(arg1, fcntl_flags_tbl)));
12644 #endif
12645 #if defined(TARGET_NR_epoll_ctl)
12646     case TARGET_NR_epoll_ctl:
12647     {
12648         struct epoll_event ep;
12649         struct epoll_event *epp = 0;
12650         if (arg4) {
12651             if (arg2 != EPOLL_CTL_DEL) {
12652                 struct target_epoll_event *target_ep;
12653                 if (!lock_user_struct(VERIFY_READ, target_ep, arg4, 1)) {
12654                     return -TARGET_EFAULT;
12655                 }
12656                 ep.events = tswap32(target_ep->events);
12657                 /*
12658                  * The epoll_data_t union is just opaque data to the kernel,
12659                  * so we transfer all 64 bits across and need not worry what
12660                  * actual data type it is.
12661                  */
12662                 ep.data.u64 = tswap64(target_ep->data.u64);
12663                 unlock_user_struct(target_ep, arg4, 0);
12664             }
12665             /*
12666              * before kernel 2.6.9, EPOLL_CTL_DEL operation required a
12667              * non-null pointer, even though this argument is ignored.
12668              *
12669              */
12670             epp = &ep;
12671         }
12672         return get_errno(epoll_ctl(arg1, arg2, arg3, epp));
12673     }
12674 #endif
12675 
12676 #if defined(TARGET_NR_epoll_wait) || defined(TARGET_NR_epoll_pwait)
12677 #if defined(TARGET_NR_epoll_wait)
12678     case TARGET_NR_epoll_wait:
12679 #endif
12680 #if defined(TARGET_NR_epoll_pwait)
12681     case TARGET_NR_epoll_pwait:
12682 #endif
12683     {
12684         struct target_epoll_event *target_ep;
12685         struct epoll_event *ep;
12686         int epfd = arg1;
12687         int maxevents = arg3;
12688         int timeout = arg4;
12689 
12690         if (maxevents <= 0 || maxevents > TARGET_EP_MAX_EVENTS) {
12691             return -TARGET_EINVAL;
12692         }
12693 
12694         target_ep = lock_user(VERIFY_WRITE, arg2,
12695                               maxevents * sizeof(struct target_epoll_event), 1);
12696         if (!target_ep) {
12697             return -TARGET_EFAULT;
12698         }
12699 
12700         ep = g_try_new(struct epoll_event, maxevents);
12701         if (!ep) {
12702             unlock_user(target_ep, arg2, 0);
12703             return -TARGET_ENOMEM;
12704         }
12705 
12706         switch (num) {
12707 #if defined(TARGET_NR_epoll_pwait)
12708         case TARGET_NR_epoll_pwait:
12709         {
12710             target_sigset_t *target_set;
12711             sigset_t _set, *set = &_set;
12712 
12713             if (arg5) {
12714                 if (arg6 != sizeof(target_sigset_t)) {
12715                     ret = -TARGET_EINVAL;
12716                     break;
12717                 }
12718 
12719                 target_set = lock_user(VERIFY_READ, arg5,
12720                                        sizeof(target_sigset_t), 1);
12721                 if (!target_set) {
12722                     ret = -TARGET_EFAULT;
12723                     break;
12724                 }
12725                 target_to_host_sigset(set, target_set);
12726                 unlock_user(target_set, arg5, 0);
12727             } else {
12728                 set = NULL;
12729             }
12730 
12731             ret = get_errno(safe_epoll_pwait(epfd, ep, maxevents, timeout,
12732                                              set, SIGSET_T_SIZE));
12733             break;
12734         }
12735 #endif
12736 #if defined(TARGET_NR_epoll_wait)
12737         case TARGET_NR_epoll_wait:
12738             ret = get_errno(safe_epoll_pwait(epfd, ep, maxevents, timeout,
12739                                              NULL, 0));
12740             break;
12741 #endif
12742         default:
12743             ret = -TARGET_ENOSYS;
12744         }
12745         if (!is_error(ret)) {
12746             int i;
12747             for (i = 0; i < ret; i++) {
12748                 target_ep[i].events = tswap32(ep[i].events);
12749                 target_ep[i].data.u64 = tswap64(ep[i].data.u64);
12750             }
12751             unlock_user(target_ep, arg2,
12752                         ret * sizeof(struct target_epoll_event));
12753         } else {
12754             unlock_user(target_ep, arg2, 0);
12755         }
12756         g_free(ep);
12757         return ret;
12758     }
12759 #endif
12760 #endif
12761 #ifdef TARGET_NR_prlimit64
12762     case TARGET_NR_prlimit64:
12763     {
12764         /* args: pid, resource number, ptr to new rlimit, ptr to old rlimit */
12765         struct target_rlimit64 *target_rnew, *target_rold;
12766         struct host_rlimit64 rnew, rold, *rnewp = 0;
12767         int resource = target_to_host_resource(arg2);
12768 
12769         if (arg3 && (resource != RLIMIT_AS &&
12770                      resource != RLIMIT_DATA &&
12771                      resource != RLIMIT_STACK)) {
12772             if (!lock_user_struct(VERIFY_READ, target_rnew, arg3, 1)) {
12773                 return -TARGET_EFAULT;
12774             }
12775             rnew.rlim_cur = tswap64(target_rnew->rlim_cur);
12776             rnew.rlim_max = tswap64(target_rnew->rlim_max);
12777             unlock_user_struct(target_rnew, arg3, 0);
12778             rnewp = &rnew;
12779         }
12780 
12781         ret = get_errno(sys_prlimit64(arg1, resource, rnewp, arg4 ? &rold : 0));
12782         if (!is_error(ret) && arg4) {
12783             if (!lock_user_struct(VERIFY_WRITE, target_rold, arg4, 1)) {
12784                 return -TARGET_EFAULT;
12785             }
12786             target_rold->rlim_cur = tswap64(rold.rlim_cur);
12787             target_rold->rlim_max = tswap64(rold.rlim_max);
12788             unlock_user_struct(target_rold, arg4, 1);
12789         }
12790         return ret;
12791     }
12792 #endif
12793 #ifdef TARGET_NR_gethostname
12794     case TARGET_NR_gethostname:
12795     {
12796         char *name = lock_user(VERIFY_WRITE, arg1, arg2, 0);
12797         if (name) {
12798             ret = get_errno(gethostname(name, arg2));
12799             unlock_user(name, arg1, arg2);
12800         } else {
12801             ret = -TARGET_EFAULT;
12802         }
12803         return ret;
12804     }
12805 #endif
12806 #ifdef TARGET_NR_atomic_cmpxchg_32
12807     case TARGET_NR_atomic_cmpxchg_32:
12808     {
12809         /* should use start_exclusive from main.c */
12810         abi_ulong mem_value;
12811         if (get_user_u32(mem_value, arg6)) {
12812             target_siginfo_t info;
12813             info.si_signo = SIGSEGV;
12814             info.si_errno = 0;
12815             info.si_code = TARGET_SEGV_MAPERR;
12816             info._sifields._sigfault._addr = arg6;
12817             queue_signal((CPUArchState *)cpu_env, info.si_signo,
12818                          QEMU_SI_FAULT, &info);
12819             ret = 0xdeadbeef;
12820 
12821         }
12822         if (mem_value == arg2)
12823             put_user_u32(arg1, arg6);
12824         return mem_value;
12825     }
12826 #endif
12827 #ifdef TARGET_NR_atomic_barrier
12828     case TARGET_NR_atomic_barrier:
12829         /* Like the kernel implementation and the
12830            qemu arm barrier, no-op this? */
12831         return 0;
12832 #endif
12833 
12834 #ifdef TARGET_NR_timer_create
12835     case TARGET_NR_timer_create:
12836     {
12837         /* args: clockid_t clockid, struct sigevent *sevp, timer_t *timerid */
12838 
12839         struct sigevent host_sevp = { {0}, }, *phost_sevp = NULL;
12840 
12841         int clkid = arg1;
12842         int timer_index = next_free_host_timer();
12843 
12844         if (timer_index < 0) {
12845             ret = -TARGET_EAGAIN;
12846         } else {
12847             timer_t *phtimer = g_posix_timers  + timer_index;
12848 
12849             if (arg2) {
12850                 phost_sevp = &host_sevp;
12851                 ret = target_to_host_sigevent(phost_sevp, arg2);
12852                 if (ret != 0) {
12853                     return ret;
12854                 }
12855             }
12856 
12857             ret = get_errno(timer_create(clkid, phost_sevp, phtimer));
12858             if (ret) {
12859                 phtimer = NULL;
12860             } else {
12861                 if (put_user(TIMER_MAGIC | timer_index, arg3, target_timer_t)) {
12862                     return -TARGET_EFAULT;
12863                 }
12864             }
12865         }
12866         return ret;
12867     }
12868 #endif
12869 
12870 #ifdef TARGET_NR_timer_settime
12871     case TARGET_NR_timer_settime:
12872     {
12873         /* args: timer_t timerid, int flags, const struct itimerspec *new_value,
12874          * struct itimerspec * old_value */
12875         target_timer_t timerid = get_timer_id(arg1);
12876 
12877         if (timerid < 0) {
12878             ret = timerid;
12879         } else if (arg3 == 0) {
12880             ret = -TARGET_EINVAL;
12881         } else {
12882             timer_t htimer = g_posix_timers[timerid];
12883             struct itimerspec hspec_new = {{0},}, hspec_old = {{0},};
12884 
12885             if (target_to_host_itimerspec(&hspec_new, arg3)) {
12886                 return -TARGET_EFAULT;
12887             }
12888             ret = get_errno(
12889                           timer_settime(htimer, arg2, &hspec_new, &hspec_old));
12890             if (arg4 && host_to_target_itimerspec(arg4, &hspec_old)) {
12891                 return -TARGET_EFAULT;
12892             }
12893         }
12894         return ret;
12895     }
12896 #endif
12897 
12898 #ifdef TARGET_NR_timer_settime64
12899     case TARGET_NR_timer_settime64:
12900     {
12901         target_timer_t timerid = get_timer_id(arg1);
12902 
12903         if (timerid < 0) {
12904             ret = timerid;
12905         } else if (arg3 == 0) {
12906             ret = -TARGET_EINVAL;
12907         } else {
12908             timer_t htimer = g_posix_timers[timerid];
12909             struct itimerspec hspec_new = {{0},}, hspec_old = {{0},};
12910 
12911             if (target_to_host_itimerspec64(&hspec_new, arg3)) {
12912                 return -TARGET_EFAULT;
12913             }
12914             ret = get_errno(
12915                           timer_settime(htimer, arg2, &hspec_new, &hspec_old));
12916             if (arg4 && host_to_target_itimerspec64(arg4, &hspec_old)) {
12917                 return -TARGET_EFAULT;
12918             }
12919         }
12920         return ret;
12921     }
12922 #endif
12923 
12924 #ifdef TARGET_NR_timer_gettime
12925     case TARGET_NR_timer_gettime:
12926     {
12927         /* args: timer_t timerid, struct itimerspec *curr_value */
12928         target_timer_t timerid = get_timer_id(arg1);
12929 
12930         if (timerid < 0) {
12931             ret = timerid;
12932         } else if (!arg2) {
12933             ret = -TARGET_EFAULT;
12934         } else {
12935             timer_t htimer = g_posix_timers[timerid];
12936             struct itimerspec hspec;
12937             ret = get_errno(timer_gettime(htimer, &hspec));
12938 
12939             if (host_to_target_itimerspec(arg2, &hspec)) {
12940                 ret = -TARGET_EFAULT;
12941             }
12942         }
12943         return ret;
12944     }
12945 #endif
12946 
12947 #ifdef TARGET_NR_timer_gettime64
12948     case TARGET_NR_timer_gettime64:
12949     {
12950         /* args: timer_t timerid, struct itimerspec64 *curr_value */
12951         target_timer_t timerid = get_timer_id(arg1);
12952 
12953         if (timerid < 0) {
12954             ret = timerid;
12955         } else if (!arg2) {
12956             ret = -TARGET_EFAULT;
12957         } else {
12958             timer_t htimer = g_posix_timers[timerid];
12959             struct itimerspec hspec;
12960             ret = get_errno(timer_gettime(htimer, &hspec));
12961 
12962             if (host_to_target_itimerspec64(arg2, &hspec)) {
12963                 ret = -TARGET_EFAULT;
12964             }
12965         }
12966         return ret;
12967     }
12968 #endif
12969 
12970 #ifdef TARGET_NR_timer_getoverrun
12971     case TARGET_NR_timer_getoverrun:
12972     {
12973         /* args: timer_t timerid */
12974         target_timer_t timerid = get_timer_id(arg1);
12975 
12976         if (timerid < 0) {
12977             ret = timerid;
12978         } else {
12979             timer_t htimer = g_posix_timers[timerid];
12980             ret = get_errno(timer_getoverrun(htimer));
12981         }
12982         return ret;
12983     }
12984 #endif
12985 
12986 #ifdef TARGET_NR_timer_delete
12987     case TARGET_NR_timer_delete:
12988     {
12989         /* args: timer_t timerid */
12990         target_timer_t timerid = get_timer_id(arg1);
12991 
12992         if (timerid < 0) {
12993             ret = timerid;
12994         } else {
12995             timer_t htimer = g_posix_timers[timerid];
12996             ret = get_errno(timer_delete(htimer));
12997             g_posix_timers[timerid] = 0;
12998         }
12999         return ret;
13000     }
13001 #endif
13002 
13003 #if defined(TARGET_NR_timerfd_create) && defined(CONFIG_TIMERFD)
13004     case TARGET_NR_timerfd_create:
13005         return get_errno(timerfd_create(arg1,
13006                           target_to_host_bitmask(arg2, fcntl_flags_tbl)));
13007 #endif
13008 
13009 #if defined(TARGET_NR_timerfd_gettime) && defined(CONFIG_TIMERFD)
13010     case TARGET_NR_timerfd_gettime:
13011         {
13012             struct itimerspec its_curr;
13013 
13014             ret = get_errno(timerfd_gettime(arg1, &its_curr));
13015 
13016             if (arg2 && host_to_target_itimerspec(arg2, &its_curr)) {
13017                 return -TARGET_EFAULT;
13018             }
13019         }
13020         return ret;
13021 #endif
13022 
13023 #if defined(TARGET_NR_timerfd_gettime64) && defined(CONFIG_TIMERFD)
13024     case TARGET_NR_timerfd_gettime64:
13025         {
13026             struct itimerspec its_curr;
13027 
13028             ret = get_errno(timerfd_gettime(arg1, &its_curr));
13029 
13030             if (arg2 && host_to_target_itimerspec64(arg2, &its_curr)) {
13031                 return -TARGET_EFAULT;
13032             }
13033         }
13034         return ret;
13035 #endif
13036 
13037 #if defined(TARGET_NR_timerfd_settime) && defined(CONFIG_TIMERFD)
13038     case TARGET_NR_timerfd_settime:
13039         {
13040             struct itimerspec its_new, its_old, *p_new;
13041 
13042             if (arg3) {
13043                 if (target_to_host_itimerspec(&its_new, arg3)) {
13044                     return -TARGET_EFAULT;
13045                 }
13046                 p_new = &its_new;
13047             } else {
13048                 p_new = NULL;
13049             }
13050 
13051             ret = get_errno(timerfd_settime(arg1, arg2, p_new, &its_old));
13052 
13053             if (arg4 && host_to_target_itimerspec(arg4, &its_old)) {
13054                 return -TARGET_EFAULT;
13055             }
13056         }
13057         return ret;
13058 #endif
13059 
13060 #if defined(TARGET_NR_timerfd_settime64) && defined(CONFIG_TIMERFD)
13061     case TARGET_NR_timerfd_settime64:
13062         {
13063             struct itimerspec its_new, its_old, *p_new;
13064 
13065             if (arg3) {
13066                 if (target_to_host_itimerspec64(&its_new, arg3)) {
13067                     return -TARGET_EFAULT;
13068                 }
13069                 p_new = &its_new;
13070             } else {
13071                 p_new = NULL;
13072             }
13073 
13074             ret = get_errno(timerfd_settime(arg1, arg2, p_new, &its_old));
13075 
13076             if (arg4 && host_to_target_itimerspec64(arg4, &its_old)) {
13077                 return -TARGET_EFAULT;
13078             }
13079         }
13080         return ret;
13081 #endif
13082 
13083 #if defined(TARGET_NR_ioprio_get) && defined(__NR_ioprio_get)
13084     case TARGET_NR_ioprio_get:
13085         return get_errno(ioprio_get(arg1, arg2));
13086 #endif
13087 
13088 #if defined(TARGET_NR_ioprio_set) && defined(__NR_ioprio_set)
13089     case TARGET_NR_ioprio_set:
13090         return get_errno(ioprio_set(arg1, arg2, arg3));
13091 #endif
13092 
13093 #if defined(TARGET_NR_setns) && defined(CONFIG_SETNS)
13094     case TARGET_NR_setns:
13095         return get_errno(setns(arg1, arg2));
13096 #endif
13097 #if defined(TARGET_NR_unshare) && defined(CONFIG_SETNS)
13098     case TARGET_NR_unshare:
13099         return get_errno(unshare(arg1));
13100 #endif
13101 #if defined(TARGET_NR_kcmp) && defined(__NR_kcmp)
13102     case TARGET_NR_kcmp:
13103         return get_errno(kcmp(arg1, arg2, arg3, arg4, arg5));
13104 #endif
13105 #ifdef TARGET_NR_swapcontext
13106     case TARGET_NR_swapcontext:
13107         /* PowerPC specific.  */
13108         return do_swapcontext(cpu_env, arg1, arg2, arg3);
13109 #endif
13110 #ifdef TARGET_NR_memfd_create
13111     case TARGET_NR_memfd_create:
13112         p = lock_user_string(arg1);
13113         if (!p) {
13114             return -TARGET_EFAULT;
13115         }
13116         ret = get_errno(memfd_create(p, arg2));
13117         fd_trans_unregister(ret);
13118         unlock_user(p, arg1, 0);
13119         return ret;
13120 #endif
13121 #if defined TARGET_NR_membarrier && defined __NR_membarrier
13122     case TARGET_NR_membarrier:
13123         return get_errno(membarrier(arg1, arg2));
13124 #endif
13125 
13126 #if defined(TARGET_NR_copy_file_range) && defined(__NR_copy_file_range)
13127     case TARGET_NR_copy_file_range:
13128         {
13129             loff_t inoff, outoff;
13130             loff_t *pinoff = NULL, *poutoff = NULL;
13131 
13132             if (arg2) {
13133                 if (get_user_u64(inoff, arg2)) {
13134                     return -TARGET_EFAULT;
13135                 }
13136                 pinoff = &inoff;
13137             }
13138             if (arg4) {
13139                 if (get_user_u64(outoff, arg4)) {
13140                     return -TARGET_EFAULT;
13141                 }
13142                 poutoff = &outoff;
13143             }
13144             ret = get_errno(safe_copy_file_range(arg1, pinoff, arg3, poutoff,
13145                                                  arg5, arg6));
13146             if (!is_error(ret) && ret > 0) {
13147                 if (arg2) {
13148                     if (put_user_u64(inoff, arg2)) {
13149                         return -TARGET_EFAULT;
13150                     }
13151                 }
13152                 if (arg4) {
13153                     if (put_user_u64(outoff, arg4)) {
13154                         return -TARGET_EFAULT;
13155                     }
13156                 }
13157             }
13158         }
13159         return ret;
13160 #endif
13161 
13162     default:
13163         qemu_log_mask(LOG_UNIMP, "Unsupported syscall: %d\n", num);
13164         return -TARGET_ENOSYS;
13165     }
13166     return ret;
13167 }
13168 
13169 abi_long do_syscall(void *cpu_env, int num, abi_long arg1,
13170                     abi_long arg2, abi_long arg3, abi_long arg4,
13171                     abi_long arg5, abi_long arg6, abi_long arg7,
13172                     abi_long arg8)
13173 {
13174     CPUState *cpu = env_cpu(cpu_env);
13175     abi_long ret;
13176 
13177 #ifdef DEBUG_ERESTARTSYS
13178     /* Debug-only code for exercising the syscall-restart code paths
13179      * in the per-architecture cpu main loops: restart every syscall
13180      * the guest makes once before letting it through.
13181      */
13182     {
13183         static bool flag;
13184         flag = !flag;
13185         if (flag) {
13186             return -TARGET_ERESTARTSYS;
13187         }
13188     }
13189 #endif
13190 
13191     record_syscall_start(cpu, num, arg1,
13192                          arg2, arg3, arg4, arg5, arg6, arg7, arg8);
13193 
13194     if (unlikely(qemu_loglevel_mask(LOG_STRACE))) {
13195         print_syscall(cpu_env, num, arg1, arg2, arg3, arg4, arg5, arg6);
13196     }
13197 
13198     ret = do_syscall1(cpu_env, num, arg1, arg2, arg3, arg4,
13199                       arg5, arg6, arg7, arg8);
13200 
13201     if (unlikely(qemu_loglevel_mask(LOG_STRACE))) {
13202         print_syscall_ret(cpu_env, num, ret, arg1, arg2,
13203                           arg3, arg4, arg5, arg6);
13204     }
13205 
13206     record_syscall_return(cpu, num, ret);
13207     return ret;
13208 }
13209