xref: /openbmc/qemu/linux-user/syscall.c (revision fe51b0a5)
1 /*
2  *  Linux syscalls
3  *
4  *  Copyright (c) 2003 Fabrice Bellard
5  *
6  *  This program is free software; you can redistribute it and/or modify
7  *  it under the terms of the GNU General Public License as published by
8  *  the Free Software Foundation; either version 2 of the License, or
9  *  (at your option) any later version.
10  *
11  *  This program is distributed in the hope that it will be useful,
12  *  but WITHOUT ANY WARRANTY; without even the implied warranty of
13  *  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
14  *  GNU General Public License for more details.
15  *
16  *  You should have received a copy of the GNU General Public License
17  *  along with this program; if not, see <http://www.gnu.org/licenses/>.
18  */
19 #define _ATFILE_SOURCE
20 #include "qemu/osdep.h"
21 #include "qemu/cutils.h"
22 #include "qemu/path.h"
23 #include "qemu/memfd.h"
24 #include "qemu/queue.h"
25 #include <elf.h>
26 #include <endian.h>
27 #include <grp.h>
28 #include <sys/ipc.h>
29 #include <sys/msg.h>
30 #include <sys/wait.h>
31 #include <sys/mount.h>
32 #include <sys/file.h>
33 #include <sys/fsuid.h>
34 #include <sys/personality.h>
35 #include <sys/prctl.h>
36 #include <sys/resource.h>
37 #include <sys/swap.h>
38 #include <linux/capability.h>
39 #include <sched.h>
40 #include <sys/timex.h>
41 #include <sys/socket.h>
42 #include <linux/sockios.h>
43 #include <sys/un.h>
44 #include <sys/uio.h>
45 #include <poll.h>
46 #include <sys/times.h>
47 #include <sys/shm.h>
48 #include <sys/sem.h>
49 #include <sys/statfs.h>
50 #include <utime.h>
51 #include <sys/sysinfo.h>
52 #include <sys/signalfd.h>
53 //#include <sys/user.h>
54 #include <netinet/ip.h>
55 #include <netinet/tcp.h>
56 #include <netinet/udp.h>
57 #include <linux/wireless.h>
58 #include <linux/icmp.h>
59 #include <linux/icmpv6.h>
60 #include <linux/if_tun.h>
61 #include <linux/errqueue.h>
62 #include <linux/random.h>
63 #ifdef CONFIG_TIMERFD
64 #include <sys/timerfd.h>
65 #endif
66 #ifdef CONFIG_EVENTFD
67 #include <sys/eventfd.h>
68 #endif
69 #ifdef CONFIG_EPOLL
70 #include <sys/epoll.h>
71 #endif
72 #ifdef CONFIG_ATTR
73 #include "qemu/xattr.h"
74 #endif
75 #ifdef CONFIG_SENDFILE
76 #include <sys/sendfile.h>
77 #endif
78 #ifdef HAVE_SYS_KCOV_H
79 #include <sys/kcov.h>
80 #endif
81 
82 #define termios host_termios
83 #define winsize host_winsize
84 #define termio host_termio
85 #define sgttyb host_sgttyb /* same as target */
86 #define tchars host_tchars /* same as target */
87 #define ltchars host_ltchars /* same as target */
88 
89 #include <linux/termios.h>
90 #include <linux/unistd.h>
91 #include <linux/cdrom.h>
92 #include <linux/hdreg.h>
93 #include <linux/soundcard.h>
94 #include <linux/kd.h>
95 #include <linux/mtio.h>
96 #include <linux/fs.h>
97 #include <linux/fd.h>
98 #if defined(CONFIG_FIEMAP)
99 #include <linux/fiemap.h>
100 #endif
101 #include <linux/fb.h>
102 #if defined(CONFIG_USBFS)
103 #include <linux/usbdevice_fs.h>
104 #include <linux/usb/ch9.h>
105 #endif
106 #include <linux/vt.h>
107 #include <linux/dm-ioctl.h>
108 #include <linux/reboot.h>
109 #include <linux/route.h>
110 #include <linux/filter.h>
111 #include <linux/blkpg.h>
112 #include <netpacket/packet.h>
113 #include <linux/netlink.h>
114 #include <linux/if_alg.h>
115 #include <linux/rtc.h>
116 #include <sound/asound.h>
117 #ifdef HAVE_BTRFS_H
118 #include <linux/btrfs.h>
119 #endif
120 #ifdef HAVE_DRM_H
121 #include <libdrm/drm.h>
122 #include <libdrm/i915_drm.h>
123 #endif
124 #include "linux_loop.h"
125 #include "uname.h"
126 
127 #include "qemu.h"
128 #include "qemu/guest-random.h"
129 #include "qemu/selfmap.h"
130 #include "user/syscall-trace.h"
131 #include "qapi/error.h"
132 #include "fd-trans.h"
133 #include "tcg/tcg.h"
134 
135 #ifndef CLONE_IO
136 #define CLONE_IO                0x80000000      /* Clone io context */
137 #endif
138 
139 /* We can't directly call the host clone syscall, because this will
140  * badly confuse libc (breaking mutexes, for example). So we must
141  * divide clone flags into:
142  *  * flag combinations that look like pthread_create()
143  *  * flag combinations that look like fork()
144  *  * flags we can implement within QEMU itself
145  *  * flags we can't support and will return an error for
146  */
147 /* For thread creation, all these flags must be present; for
148  * fork, none must be present.
149  */
150 #define CLONE_THREAD_FLAGS                              \
151     (CLONE_VM | CLONE_FS | CLONE_FILES |                \
152      CLONE_SIGHAND | CLONE_THREAD | CLONE_SYSVSEM)
153 
154 /* These flags are ignored:
155  * CLONE_DETACHED is now ignored by the kernel;
156  * CLONE_IO is just an optimisation hint to the I/O scheduler
157  */
158 #define CLONE_IGNORED_FLAGS                     \
159     (CLONE_DETACHED | CLONE_IO)
160 
161 /* Flags for fork which we can implement within QEMU itself */
162 #define CLONE_OPTIONAL_FORK_FLAGS               \
163     (CLONE_SETTLS | CLONE_PARENT_SETTID |       \
164      CLONE_CHILD_CLEARTID | CLONE_CHILD_SETTID)
165 
166 /* Flags for thread creation which we can implement within QEMU itself */
167 #define CLONE_OPTIONAL_THREAD_FLAGS                             \
168     (CLONE_SETTLS | CLONE_PARENT_SETTID |                       \
169      CLONE_CHILD_CLEARTID | CLONE_CHILD_SETTID | CLONE_PARENT)
170 
171 #define CLONE_INVALID_FORK_FLAGS                                        \
172     (~(CSIGNAL | CLONE_OPTIONAL_FORK_FLAGS | CLONE_IGNORED_FLAGS))
173 
174 #define CLONE_INVALID_THREAD_FLAGS                                      \
175     (~(CSIGNAL | CLONE_THREAD_FLAGS | CLONE_OPTIONAL_THREAD_FLAGS |     \
176        CLONE_IGNORED_FLAGS))
177 
178 /* CLONE_VFORK is special cased early in do_fork(). The other flag bits
179  * have almost all been allocated. We cannot support any of
180  * CLONE_NEWNS, CLONE_NEWCGROUP, CLONE_NEWUTS, CLONE_NEWIPC,
181  * CLONE_NEWUSER, CLONE_NEWPID, CLONE_NEWNET, CLONE_PTRACE, CLONE_UNTRACED.
182  * The checks against the invalid thread masks above will catch these.
183  * (The one remaining unallocated bit is 0x1000 which used to be CLONE_PID.)
184  */
185 
186 /* Define DEBUG_ERESTARTSYS to force every syscall to be restarted
187  * once. This exercises the codepaths for restart.
188  */
189 //#define DEBUG_ERESTARTSYS
190 
191 //#include <linux/msdos_fs.h>
192 #define	VFAT_IOCTL_READDIR_BOTH		_IOR('r', 1, struct linux_dirent [2])
193 #define	VFAT_IOCTL_READDIR_SHORT	_IOR('r', 2, struct linux_dirent [2])
194 
195 #undef _syscall0
196 #undef _syscall1
197 #undef _syscall2
198 #undef _syscall3
199 #undef _syscall4
200 #undef _syscall5
201 #undef _syscall6
202 
203 #define _syscall0(type,name)		\
204 static type name (void)			\
205 {					\
206 	return syscall(__NR_##name);	\
207 }
208 
209 #define _syscall1(type,name,type1,arg1)		\
210 static type name (type1 arg1)			\
211 {						\
212 	return syscall(__NR_##name, arg1);	\
213 }
214 
215 #define _syscall2(type,name,type1,arg1,type2,arg2)	\
216 static type name (type1 arg1,type2 arg2)		\
217 {							\
218 	return syscall(__NR_##name, arg1, arg2);	\
219 }
220 
221 #define _syscall3(type,name,type1,arg1,type2,arg2,type3,arg3)	\
222 static type name (type1 arg1,type2 arg2,type3 arg3)		\
223 {								\
224 	return syscall(__NR_##name, arg1, arg2, arg3);		\
225 }
226 
227 #define _syscall4(type,name,type1,arg1,type2,arg2,type3,arg3,type4,arg4)	\
228 static type name (type1 arg1,type2 arg2,type3 arg3,type4 arg4)			\
229 {										\
230 	return syscall(__NR_##name, arg1, arg2, arg3, arg4);			\
231 }
232 
233 #define _syscall5(type,name,type1,arg1,type2,arg2,type3,arg3,type4,arg4,	\
234 		  type5,arg5)							\
235 static type name (type1 arg1,type2 arg2,type3 arg3,type4 arg4,type5 arg5)	\
236 {										\
237 	return syscall(__NR_##name, arg1, arg2, arg3, arg4, arg5);		\
238 }
239 
240 
241 #define _syscall6(type,name,type1,arg1,type2,arg2,type3,arg3,type4,arg4,	\
242 		  type5,arg5,type6,arg6)					\
243 static type name (type1 arg1,type2 arg2,type3 arg3,type4 arg4,type5 arg5,	\
244                   type6 arg6)							\
245 {										\
246 	return syscall(__NR_##name, arg1, arg2, arg3, arg4, arg5, arg6);	\
247 }
248 
249 
250 #define __NR_sys_uname __NR_uname
251 #define __NR_sys_getcwd1 __NR_getcwd
252 #define __NR_sys_getdents __NR_getdents
253 #define __NR_sys_getdents64 __NR_getdents64
254 #define __NR_sys_getpriority __NR_getpriority
255 #define __NR_sys_rt_sigqueueinfo __NR_rt_sigqueueinfo
256 #define __NR_sys_rt_tgsigqueueinfo __NR_rt_tgsigqueueinfo
257 #define __NR_sys_syslog __NR_syslog
258 #if defined(__NR_futex)
259 # define __NR_sys_futex __NR_futex
260 #endif
261 #if defined(__NR_futex_time64)
262 # define __NR_sys_futex_time64 __NR_futex_time64
263 #endif
264 #define __NR_sys_inotify_init __NR_inotify_init
265 #define __NR_sys_inotify_add_watch __NR_inotify_add_watch
266 #define __NR_sys_inotify_rm_watch __NR_inotify_rm_watch
267 #define __NR_sys_statx __NR_statx
268 
269 #if defined(__alpha__) || defined(__x86_64__) || defined(__s390x__)
270 #define __NR__llseek __NR_lseek
271 #endif
272 
273 /* Newer kernel ports have llseek() instead of _llseek() */
274 #if defined(TARGET_NR_llseek) && !defined(TARGET_NR__llseek)
275 #define TARGET_NR__llseek TARGET_NR_llseek
276 #endif
277 
278 #define __NR_sys_gettid __NR_gettid
279 _syscall0(int, sys_gettid)
280 
281 /* For the 64-bit guest on 32-bit host case we must emulate
282  * getdents using getdents64, because otherwise the host
283  * might hand us back more dirent records than we can fit
284  * into the guest buffer after structure format conversion.
285  * Otherwise we emulate getdents with getdents if the host has it.
286  */
287 #if defined(__NR_getdents) && HOST_LONG_BITS >= TARGET_ABI_BITS
288 #define EMULATE_GETDENTS_WITH_GETDENTS
289 #endif
290 
291 #if defined(TARGET_NR_getdents) && defined(EMULATE_GETDENTS_WITH_GETDENTS)
292 _syscall3(int, sys_getdents, uint, fd, struct linux_dirent *, dirp, uint, count);
293 #endif
294 #if (defined(TARGET_NR_getdents) && \
295       !defined(EMULATE_GETDENTS_WITH_GETDENTS)) || \
296     (defined(TARGET_NR_getdents64) && defined(__NR_getdents64))
297 _syscall3(int, sys_getdents64, uint, fd, struct linux_dirent64 *, dirp, uint, count);
298 #endif
299 #if defined(TARGET_NR__llseek) && defined(__NR_llseek)
300 _syscall5(int, _llseek,  uint,  fd, ulong, hi, ulong, lo,
301           loff_t *, res, uint, wh);
302 #endif
303 _syscall3(int, sys_rt_sigqueueinfo, pid_t, pid, int, sig, siginfo_t *, uinfo)
304 _syscall4(int, sys_rt_tgsigqueueinfo, pid_t, pid, pid_t, tid, int, sig,
305           siginfo_t *, uinfo)
306 _syscall3(int,sys_syslog,int,type,char*,bufp,int,len)
307 #ifdef __NR_exit_group
308 _syscall1(int,exit_group,int,error_code)
309 #endif
310 #if defined(TARGET_NR_set_tid_address) && defined(__NR_set_tid_address)
311 _syscall1(int,set_tid_address,int *,tidptr)
312 #endif
313 #if defined(__NR_futex)
314 _syscall6(int,sys_futex,int *,uaddr,int,op,int,val,
315           const struct timespec *,timeout,int *,uaddr2,int,val3)
316 #endif
317 #if defined(__NR_futex_time64)
318 _syscall6(int,sys_futex_time64,int *,uaddr,int,op,int,val,
319           const struct timespec *,timeout,int *,uaddr2,int,val3)
320 #endif
321 #define __NR_sys_sched_getaffinity __NR_sched_getaffinity
322 _syscall3(int, sys_sched_getaffinity, pid_t, pid, unsigned int, len,
323           unsigned long *, user_mask_ptr);
324 #define __NR_sys_sched_setaffinity __NR_sched_setaffinity
325 _syscall3(int, sys_sched_setaffinity, pid_t, pid, unsigned int, len,
326           unsigned long *, user_mask_ptr);
327 #define __NR_sys_getcpu __NR_getcpu
328 _syscall3(int, sys_getcpu, unsigned *, cpu, unsigned *, node, void *, tcache);
329 _syscall4(int, reboot, int, magic1, int, magic2, unsigned int, cmd,
330           void *, arg);
331 _syscall2(int, capget, struct __user_cap_header_struct *, header,
332           struct __user_cap_data_struct *, data);
333 _syscall2(int, capset, struct __user_cap_header_struct *, header,
334           struct __user_cap_data_struct *, data);
335 #if defined(TARGET_NR_ioprio_get) && defined(__NR_ioprio_get)
336 _syscall2(int, ioprio_get, int, which, int, who)
337 #endif
338 #if defined(TARGET_NR_ioprio_set) && defined(__NR_ioprio_set)
339 _syscall3(int, ioprio_set, int, which, int, who, int, ioprio)
340 #endif
341 #if defined(TARGET_NR_getrandom) && defined(__NR_getrandom)
342 _syscall3(int, getrandom, void *, buf, size_t, buflen, unsigned int, flags)
343 #endif
344 
345 #if defined(TARGET_NR_kcmp) && defined(__NR_kcmp)
346 _syscall5(int, kcmp, pid_t, pid1, pid_t, pid2, int, type,
347           unsigned long, idx1, unsigned long, idx2)
348 #endif
349 
350 /*
351  * It is assumed that struct statx is architecture independent.
352  */
353 #if defined(TARGET_NR_statx) && defined(__NR_statx)
354 _syscall5(int, sys_statx, int, dirfd, const char *, pathname, int, flags,
355           unsigned int, mask, struct target_statx *, statxbuf)
356 #endif
357 #if defined(TARGET_NR_membarrier) && defined(__NR_membarrier)
358 _syscall2(int, membarrier, int, cmd, int, flags)
359 #endif
360 
361 static bitmask_transtbl fcntl_flags_tbl[] = {
362   { TARGET_O_ACCMODE,   TARGET_O_WRONLY,    O_ACCMODE,   O_WRONLY,    },
363   { TARGET_O_ACCMODE,   TARGET_O_RDWR,      O_ACCMODE,   O_RDWR,      },
364   { TARGET_O_CREAT,     TARGET_O_CREAT,     O_CREAT,     O_CREAT,     },
365   { TARGET_O_EXCL,      TARGET_O_EXCL,      O_EXCL,      O_EXCL,      },
366   { TARGET_O_NOCTTY,    TARGET_O_NOCTTY,    O_NOCTTY,    O_NOCTTY,    },
367   { TARGET_O_TRUNC,     TARGET_O_TRUNC,     O_TRUNC,     O_TRUNC,     },
368   { TARGET_O_APPEND,    TARGET_O_APPEND,    O_APPEND,    O_APPEND,    },
369   { TARGET_O_NONBLOCK,  TARGET_O_NONBLOCK,  O_NONBLOCK,  O_NONBLOCK,  },
370   { TARGET_O_SYNC,      TARGET_O_DSYNC,     O_SYNC,      O_DSYNC,     },
371   { TARGET_O_SYNC,      TARGET_O_SYNC,      O_SYNC,      O_SYNC,      },
372   { TARGET_FASYNC,      TARGET_FASYNC,      FASYNC,      FASYNC,      },
373   { TARGET_O_DIRECTORY, TARGET_O_DIRECTORY, O_DIRECTORY, O_DIRECTORY, },
374   { TARGET_O_NOFOLLOW,  TARGET_O_NOFOLLOW,  O_NOFOLLOW,  O_NOFOLLOW,  },
375 #if defined(O_DIRECT)
376   { TARGET_O_DIRECT,    TARGET_O_DIRECT,    O_DIRECT,    O_DIRECT,    },
377 #endif
378 #if defined(O_NOATIME)
379   { TARGET_O_NOATIME,   TARGET_O_NOATIME,   O_NOATIME,   O_NOATIME    },
380 #endif
381 #if defined(O_CLOEXEC)
382   { TARGET_O_CLOEXEC,   TARGET_O_CLOEXEC,   O_CLOEXEC,   O_CLOEXEC    },
383 #endif
384 #if defined(O_PATH)
385   { TARGET_O_PATH,      TARGET_O_PATH,      O_PATH,      O_PATH       },
386 #endif
387 #if defined(O_TMPFILE)
388   { TARGET_O_TMPFILE,   TARGET_O_TMPFILE,   O_TMPFILE,   O_TMPFILE    },
389 #endif
390   /* Don't terminate the list prematurely on 64-bit host+guest.  */
391 #if TARGET_O_LARGEFILE != 0 || O_LARGEFILE != 0
392   { TARGET_O_LARGEFILE, TARGET_O_LARGEFILE, O_LARGEFILE, O_LARGEFILE, },
393 #endif
394   { 0, 0, 0, 0 }
395 };
396 
397 _syscall2(int, sys_getcwd1, char *, buf, size_t, size)
398 
399 #if defined(TARGET_NR_utimensat) || defined(TARGET_NR_utimensat_time64)
400 #if defined(__NR_utimensat)
401 #define __NR_sys_utimensat __NR_utimensat
402 _syscall4(int,sys_utimensat,int,dirfd,const char *,pathname,
403           const struct timespec *,tsp,int,flags)
404 #else
405 static int sys_utimensat(int dirfd, const char *pathname,
406                          const struct timespec times[2], int flags)
407 {
408     errno = ENOSYS;
409     return -1;
410 }
411 #endif
412 #endif /* TARGET_NR_utimensat */
413 
414 #ifdef TARGET_NR_renameat2
415 #if defined(__NR_renameat2)
416 #define __NR_sys_renameat2 __NR_renameat2
417 _syscall5(int, sys_renameat2, int, oldfd, const char *, old, int, newfd,
418           const char *, new, unsigned int, flags)
419 #else
420 static int sys_renameat2(int oldfd, const char *old,
421                          int newfd, const char *new, int flags)
422 {
423     if (flags == 0) {
424         return renameat(oldfd, old, newfd, new);
425     }
426     errno = ENOSYS;
427     return -1;
428 }
429 #endif
430 #endif /* TARGET_NR_renameat2 */
431 
432 #ifdef CONFIG_INOTIFY
433 #include <sys/inotify.h>
434 
435 #if defined(TARGET_NR_inotify_init) && defined(__NR_inotify_init)
436 static int sys_inotify_init(void)
437 {
438   return (inotify_init());
439 }
440 #endif
441 #if defined(TARGET_NR_inotify_add_watch) && defined(__NR_inotify_add_watch)
442 static int sys_inotify_add_watch(int fd,const char *pathname, int32_t mask)
443 {
444   return (inotify_add_watch(fd, pathname, mask));
445 }
446 #endif
447 #if defined(TARGET_NR_inotify_rm_watch) && defined(__NR_inotify_rm_watch)
448 static int sys_inotify_rm_watch(int fd, int32_t wd)
449 {
450   return (inotify_rm_watch(fd, wd));
451 }
452 #endif
453 #ifdef CONFIG_INOTIFY1
454 #if defined(TARGET_NR_inotify_init1) && defined(__NR_inotify_init1)
455 static int sys_inotify_init1(int flags)
456 {
457   return (inotify_init1(flags));
458 }
459 #endif
460 #endif
461 #else
462 /* Userspace can usually survive runtime without inotify */
463 #undef TARGET_NR_inotify_init
464 #undef TARGET_NR_inotify_init1
465 #undef TARGET_NR_inotify_add_watch
466 #undef TARGET_NR_inotify_rm_watch
467 #endif /* CONFIG_INOTIFY  */
468 
469 #if defined(TARGET_NR_prlimit64)
470 #ifndef __NR_prlimit64
471 # define __NR_prlimit64 -1
472 #endif
473 #define __NR_sys_prlimit64 __NR_prlimit64
474 /* The glibc rlimit structure may not be that used by the underlying syscall */
475 struct host_rlimit64 {
476     uint64_t rlim_cur;
477     uint64_t rlim_max;
478 };
479 _syscall4(int, sys_prlimit64, pid_t, pid, int, resource,
480           const struct host_rlimit64 *, new_limit,
481           struct host_rlimit64 *, old_limit)
482 #endif
483 
484 
485 #if defined(TARGET_NR_timer_create)
486 /* Maximum of 32 active POSIX timers allowed at any one time. */
487 static timer_t g_posix_timers[32] = { 0, } ;
488 
489 static inline int next_free_host_timer(void)
490 {
491     int k ;
492     /* FIXME: Does finding the next free slot require a lock? */
493     for (k = 0; k < ARRAY_SIZE(g_posix_timers); k++) {
494         if (g_posix_timers[k] == 0) {
495             g_posix_timers[k] = (timer_t) 1;
496             return k;
497         }
498     }
499     return -1;
500 }
501 #endif
502 
503 #define ERRNO_TABLE_SIZE 1200
504 
505 /* target_to_host_errno_table[] is initialized from
506  * host_to_target_errno_table[] in syscall_init(). */
507 static uint16_t target_to_host_errno_table[ERRNO_TABLE_SIZE] = {
508 };
509 
510 /*
511  * This list is the union of errno values overridden in asm-<arch>/errno.h
512  * minus the errnos that are not actually generic to all archs.
513  */
514 static uint16_t host_to_target_errno_table[ERRNO_TABLE_SIZE] = {
515     [EAGAIN]		= TARGET_EAGAIN,
516     [EIDRM]		= TARGET_EIDRM,
517     [ECHRNG]		= TARGET_ECHRNG,
518     [EL2NSYNC]		= TARGET_EL2NSYNC,
519     [EL3HLT]		= TARGET_EL3HLT,
520     [EL3RST]		= TARGET_EL3RST,
521     [ELNRNG]		= TARGET_ELNRNG,
522     [EUNATCH]		= TARGET_EUNATCH,
523     [ENOCSI]		= TARGET_ENOCSI,
524     [EL2HLT]		= TARGET_EL2HLT,
525     [EDEADLK]		= TARGET_EDEADLK,
526     [ENOLCK]		= TARGET_ENOLCK,
527     [EBADE]		= TARGET_EBADE,
528     [EBADR]		= TARGET_EBADR,
529     [EXFULL]		= TARGET_EXFULL,
530     [ENOANO]		= TARGET_ENOANO,
531     [EBADRQC]		= TARGET_EBADRQC,
532     [EBADSLT]		= TARGET_EBADSLT,
533     [EBFONT]		= TARGET_EBFONT,
534     [ENOSTR]		= TARGET_ENOSTR,
535     [ENODATA]		= TARGET_ENODATA,
536     [ETIME]		= TARGET_ETIME,
537     [ENOSR]		= TARGET_ENOSR,
538     [ENONET]		= TARGET_ENONET,
539     [ENOPKG]		= TARGET_ENOPKG,
540     [EREMOTE]		= TARGET_EREMOTE,
541     [ENOLINK]		= TARGET_ENOLINK,
542     [EADV]		= TARGET_EADV,
543     [ESRMNT]		= TARGET_ESRMNT,
544     [ECOMM]		= TARGET_ECOMM,
545     [EPROTO]		= TARGET_EPROTO,
546     [EDOTDOT]		= TARGET_EDOTDOT,
547     [EMULTIHOP]		= TARGET_EMULTIHOP,
548     [EBADMSG]		= TARGET_EBADMSG,
549     [ENAMETOOLONG]	= TARGET_ENAMETOOLONG,
550     [EOVERFLOW]		= TARGET_EOVERFLOW,
551     [ENOTUNIQ]		= TARGET_ENOTUNIQ,
552     [EBADFD]		= TARGET_EBADFD,
553     [EREMCHG]		= TARGET_EREMCHG,
554     [ELIBACC]		= TARGET_ELIBACC,
555     [ELIBBAD]		= TARGET_ELIBBAD,
556     [ELIBSCN]		= TARGET_ELIBSCN,
557     [ELIBMAX]		= TARGET_ELIBMAX,
558     [ELIBEXEC]		= TARGET_ELIBEXEC,
559     [EILSEQ]		= TARGET_EILSEQ,
560     [ENOSYS]		= TARGET_ENOSYS,
561     [ELOOP]		= TARGET_ELOOP,
562     [ERESTART]		= TARGET_ERESTART,
563     [ESTRPIPE]		= TARGET_ESTRPIPE,
564     [ENOTEMPTY]		= TARGET_ENOTEMPTY,
565     [EUSERS]		= TARGET_EUSERS,
566     [ENOTSOCK]		= TARGET_ENOTSOCK,
567     [EDESTADDRREQ]	= TARGET_EDESTADDRREQ,
568     [EMSGSIZE]		= TARGET_EMSGSIZE,
569     [EPROTOTYPE]	= TARGET_EPROTOTYPE,
570     [ENOPROTOOPT]	= TARGET_ENOPROTOOPT,
571     [EPROTONOSUPPORT]	= TARGET_EPROTONOSUPPORT,
572     [ESOCKTNOSUPPORT]	= TARGET_ESOCKTNOSUPPORT,
573     [EOPNOTSUPP]	= TARGET_EOPNOTSUPP,
574     [EPFNOSUPPORT]	= TARGET_EPFNOSUPPORT,
575     [EAFNOSUPPORT]	= TARGET_EAFNOSUPPORT,
576     [EADDRINUSE]	= TARGET_EADDRINUSE,
577     [EADDRNOTAVAIL]	= TARGET_EADDRNOTAVAIL,
578     [ENETDOWN]		= TARGET_ENETDOWN,
579     [ENETUNREACH]	= TARGET_ENETUNREACH,
580     [ENETRESET]		= TARGET_ENETRESET,
581     [ECONNABORTED]	= TARGET_ECONNABORTED,
582     [ECONNRESET]	= TARGET_ECONNRESET,
583     [ENOBUFS]		= TARGET_ENOBUFS,
584     [EISCONN]		= TARGET_EISCONN,
585     [ENOTCONN]		= TARGET_ENOTCONN,
586     [EUCLEAN]		= TARGET_EUCLEAN,
587     [ENOTNAM]		= TARGET_ENOTNAM,
588     [ENAVAIL]		= TARGET_ENAVAIL,
589     [EISNAM]		= TARGET_EISNAM,
590     [EREMOTEIO]		= TARGET_EREMOTEIO,
591     [EDQUOT]            = TARGET_EDQUOT,
592     [ESHUTDOWN]		= TARGET_ESHUTDOWN,
593     [ETOOMANYREFS]	= TARGET_ETOOMANYREFS,
594     [ETIMEDOUT]		= TARGET_ETIMEDOUT,
595     [ECONNREFUSED]	= TARGET_ECONNREFUSED,
596     [EHOSTDOWN]		= TARGET_EHOSTDOWN,
597     [EHOSTUNREACH]	= TARGET_EHOSTUNREACH,
598     [EALREADY]		= TARGET_EALREADY,
599     [EINPROGRESS]	= TARGET_EINPROGRESS,
600     [ESTALE]		= TARGET_ESTALE,
601     [ECANCELED]		= TARGET_ECANCELED,
602     [ENOMEDIUM]		= TARGET_ENOMEDIUM,
603     [EMEDIUMTYPE]	= TARGET_EMEDIUMTYPE,
604 #ifdef ENOKEY
605     [ENOKEY]		= TARGET_ENOKEY,
606 #endif
607 #ifdef EKEYEXPIRED
608     [EKEYEXPIRED]	= TARGET_EKEYEXPIRED,
609 #endif
610 #ifdef EKEYREVOKED
611     [EKEYREVOKED]	= TARGET_EKEYREVOKED,
612 #endif
613 #ifdef EKEYREJECTED
614     [EKEYREJECTED]	= TARGET_EKEYREJECTED,
615 #endif
616 #ifdef EOWNERDEAD
617     [EOWNERDEAD]	= TARGET_EOWNERDEAD,
618 #endif
619 #ifdef ENOTRECOVERABLE
620     [ENOTRECOVERABLE]	= TARGET_ENOTRECOVERABLE,
621 #endif
622 #ifdef ENOMSG
623     [ENOMSG]            = TARGET_ENOMSG,
624 #endif
625 #ifdef ERKFILL
626     [ERFKILL]           = TARGET_ERFKILL,
627 #endif
628 #ifdef EHWPOISON
629     [EHWPOISON]         = TARGET_EHWPOISON,
630 #endif
631 };
632 
633 static inline int host_to_target_errno(int err)
634 {
635     if (err >= 0 && err < ERRNO_TABLE_SIZE &&
636         host_to_target_errno_table[err]) {
637         return host_to_target_errno_table[err];
638     }
639     return err;
640 }
641 
642 static inline int target_to_host_errno(int err)
643 {
644     if (err >= 0 && err < ERRNO_TABLE_SIZE &&
645         target_to_host_errno_table[err]) {
646         return target_to_host_errno_table[err];
647     }
648     return err;
649 }
650 
651 static inline abi_long get_errno(abi_long ret)
652 {
653     if (ret == -1)
654         return -host_to_target_errno(errno);
655     else
656         return ret;
657 }
658 
659 const char *target_strerror(int err)
660 {
661     if (err == TARGET_ERESTARTSYS) {
662         return "To be restarted";
663     }
664     if (err == TARGET_QEMU_ESIGRETURN) {
665         return "Successful exit from sigreturn";
666     }
667 
668     if ((err >= ERRNO_TABLE_SIZE) || (err < 0)) {
669         return NULL;
670     }
671     return strerror(target_to_host_errno(err));
672 }
673 
674 #define safe_syscall0(type, name) \
675 static type safe_##name(void) \
676 { \
677     return safe_syscall(__NR_##name); \
678 }
679 
680 #define safe_syscall1(type, name, type1, arg1) \
681 static type safe_##name(type1 arg1) \
682 { \
683     return safe_syscall(__NR_##name, arg1); \
684 }
685 
686 #define safe_syscall2(type, name, type1, arg1, type2, arg2) \
687 static type safe_##name(type1 arg1, type2 arg2) \
688 { \
689     return safe_syscall(__NR_##name, arg1, arg2); \
690 }
691 
692 #define safe_syscall3(type, name, type1, arg1, type2, arg2, type3, arg3) \
693 static type safe_##name(type1 arg1, type2 arg2, type3 arg3) \
694 { \
695     return safe_syscall(__NR_##name, arg1, arg2, arg3); \
696 }
697 
698 #define safe_syscall4(type, name, type1, arg1, type2, arg2, type3, arg3, \
699     type4, arg4) \
700 static type safe_##name(type1 arg1, type2 arg2, type3 arg3, type4 arg4) \
701 { \
702     return safe_syscall(__NR_##name, arg1, arg2, arg3, arg4); \
703 }
704 
705 #define safe_syscall5(type, name, type1, arg1, type2, arg2, type3, arg3, \
706     type4, arg4, type5, arg5) \
707 static type safe_##name(type1 arg1, type2 arg2, type3 arg3, type4 arg4, \
708     type5 arg5) \
709 { \
710     return safe_syscall(__NR_##name, arg1, arg2, arg3, arg4, arg5); \
711 }
712 
713 #define safe_syscall6(type, name, type1, arg1, type2, arg2, type3, arg3, \
714     type4, arg4, type5, arg5, type6, arg6) \
715 static type safe_##name(type1 arg1, type2 arg2, type3 arg3, type4 arg4, \
716     type5 arg5, type6 arg6) \
717 { \
718     return safe_syscall(__NR_##name, arg1, arg2, arg3, arg4, arg5, arg6); \
719 }
720 
721 safe_syscall3(ssize_t, read, int, fd, void *, buff, size_t, count)
722 safe_syscall3(ssize_t, write, int, fd, const void *, buff, size_t, count)
723 safe_syscall4(int, openat, int, dirfd, const char *, pathname, \
724               int, flags, mode_t, mode)
725 #if defined(TARGET_NR_wait4) || defined(TARGET_NR_waitpid)
726 safe_syscall4(pid_t, wait4, pid_t, pid, int *, status, int, options, \
727               struct rusage *, rusage)
728 #endif
729 safe_syscall5(int, waitid, idtype_t, idtype, id_t, id, siginfo_t *, infop, \
730               int, options, struct rusage *, rusage)
731 safe_syscall3(int, execve, const char *, filename, char **, argv, char **, envp)
732 #if defined(TARGET_NR_select) || defined(TARGET_NR__newselect) || \
733     defined(TARGET_NR_pselect6) || defined(TARGET_NR_pselect6_time64)
734 safe_syscall6(int, pselect6, int, nfds, fd_set *, readfds, fd_set *, writefds, \
735               fd_set *, exceptfds, struct timespec *, timeout, void *, sig)
736 #endif
737 #if defined(TARGET_NR_ppoll) || defined(TARGET_NR_ppoll_time64)
738 safe_syscall5(int, ppoll, struct pollfd *, ufds, unsigned int, nfds,
739               struct timespec *, tsp, const sigset_t *, sigmask,
740               size_t, sigsetsize)
741 #endif
742 safe_syscall6(int, epoll_pwait, int, epfd, struct epoll_event *, events,
743               int, maxevents, int, timeout, const sigset_t *, sigmask,
744               size_t, sigsetsize)
745 #if defined(__NR_futex)
746 safe_syscall6(int,futex,int *,uaddr,int,op,int,val, \
747               const struct timespec *,timeout,int *,uaddr2,int,val3)
748 #endif
749 #if defined(__NR_futex_time64)
750 safe_syscall6(int,futex_time64,int *,uaddr,int,op,int,val, \
751               const struct timespec *,timeout,int *,uaddr2,int,val3)
752 #endif
753 safe_syscall2(int, rt_sigsuspend, sigset_t *, newset, size_t, sigsetsize)
754 safe_syscall2(int, kill, pid_t, pid, int, sig)
755 safe_syscall2(int, tkill, int, tid, int, sig)
756 safe_syscall3(int, tgkill, int, tgid, int, pid, int, sig)
757 safe_syscall3(ssize_t, readv, int, fd, const struct iovec *, iov, int, iovcnt)
758 safe_syscall3(ssize_t, writev, int, fd, const struct iovec *, iov, int, iovcnt)
759 safe_syscall5(ssize_t, preadv, int, fd, const struct iovec *, iov, int, iovcnt,
760               unsigned long, pos_l, unsigned long, pos_h)
761 safe_syscall5(ssize_t, pwritev, int, fd, const struct iovec *, iov, int, iovcnt,
762               unsigned long, pos_l, unsigned long, pos_h)
763 safe_syscall3(int, connect, int, fd, const struct sockaddr *, addr,
764               socklen_t, addrlen)
765 safe_syscall6(ssize_t, sendto, int, fd, const void *, buf, size_t, len,
766               int, flags, const struct sockaddr *, addr, socklen_t, addrlen)
767 safe_syscall6(ssize_t, recvfrom, int, fd, void *, buf, size_t, len,
768               int, flags, struct sockaddr *, addr, socklen_t *, addrlen)
769 safe_syscall3(ssize_t, sendmsg, int, fd, const struct msghdr *, msg, int, flags)
770 safe_syscall3(ssize_t, recvmsg, int, fd, struct msghdr *, msg, int, flags)
771 safe_syscall2(int, flock, int, fd, int, operation)
772 #if defined(TARGET_NR_rt_sigtimedwait) || defined(TARGET_NR_rt_sigtimedwait_time64)
773 safe_syscall4(int, rt_sigtimedwait, const sigset_t *, these, siginfo_t *, uinfo,
774               const struct timespec *, uts, size_t, sigsetsize)
775 #endif
776 safe_syscall4(int, accept4, int, fd, struct sockaddr *, addr, socklen_t *, len,
777               int, flags)
778 #if defined(TARGET_NR_nanosleep)
779 safe_syscall2(int, nanosleep, const struct timespec *, req,
780               struct timespec *, rem)
781 #endif
782 #if defined(TARGET_NR_clock_nanosleep) || \
783     defined(TARGET_NR_clock_nanosleep_time64)
784 safe_syscall4(int, clock_nanosleep, const clockid_t, clock, int, flags,
785               const struct timespec *, req, struct timespec *, rem)
786 #endif
787 #ifdef __NR_ipc
788 #ifdef __s390x__
789 safe_syscall5(int, ipc, int, call, long, first, long, second, long, third,
790               void *, ptr)
791 #else
792 safe_syscall6(int, ipc, int, call, long, first, long, second, long, third,
793               void *, ptr, long, fifth)
794 #endif
795 #endif
796 #ifdef __NR_msgsnd
797 safe_syscall4(int, msgsnd, int, msgid, const void *, msgp, size_t, sz,
798               int, flags)
799 #endif
800 #ifdef __NR_msgrcv
801 safe_syscall5(int, msgrcv, int, msgid, void *, msgp, size_t, sz,
802               long, msgtype, int, flags)
803 #endif
804 #ifdef __NR_semtimedop
805 safe_syscall4(int, semtimedop, int, semid, struct sembuf *, tsops,
806               unsigned, nsops, const struct timespec *, timeout)
807 #endif
808 #if defined(TARGET_NR_mq_timedsend) || \
809     defined(TARGET_NR_mq_timedsend_time64)
810 safe_syscall5(int, mq_timedsend, int, mqdes, const char *, msg_ptr,
811               size_t, len, unsigned, prio, const struct timespec *, timeout)
812 #endif
813 #if defined(TARGET_NR_mq_timedreceive) || \
814     defined(TARGET_NR_mq_timedreceive_time64)
815 safe_syscall5(int, mq_timedreceive, int, mqdes, char *, msg_ptr,
816               size_t, len, unsigned *, prio, const struct timespec *, timeout)
817 #endif
818 #if defined(TARGET_NR_copy_file_range) && defined(__NR_copy_file_range)
819 safe_syscall6(ssize_t, copy_file_range, int, infd, loff_t *, pinoff,
820               int, outfd, loff_t *, poutoff, size_t, length,
821               unsigned int, flags)
822 #endif
823 
824 /* We do ioctl like this rather than via safe_syscall3 to preserve the
825  * "third argument might be integer or pointer or not present" behaviour of
826  * the libc function.
827  */
828 #define safe_ioctl(...) safe_syscall(__NR_ioctl, __VA_ARGS__)
829 /* Similarly for fcntl. Note that callers must always:
830  *  pass the F_GETLK64 etc constants rather than the unsuffixed F_GETLK
831  *  use the flock64 struct rather than unsuffixed flock
832  * This will then work and use a 64-bit offset for both 32-bit and 64-bit hosts.
833  */
834 #ifdef __NR_fcntl64
835 #define safe_fcntl(...) safe_syscall(__NR_fcntl64, __VA_ARGS__)
836 #else
837 #define safe_fcntl(...) safe_syscall(__NR_fcntl, __VA_ARGS__)
838 #endif
839 
840 static inline int host_to_target_sock_type(int host_type)
841 {
842     int target_type;
843 
844     switch (host_type & 0xf /* SOCK_TYPE_MASK */) {
845     case SOCK_DGRAM:
846         target_type = TARGET_SOCK_DGRAM;
847         break;
848     case SOCK_STREAM:
849         target_type = TARGET_SOCK_STREAM;
850         break;
851     default:
852         target_type = host_type & 0xf /* SOCK_TYPE_MASK */;
853         break;
854     }
855 
856 #if defined(SOCK_CLOEXEC)
857     if (host_type & SOCK_CLOEXEC) {
858         target_type |= TARGET_SOCK_CLOEXEC;
859     }
860 #endif
861 
862 #if defined(SOCK_NONBLOCK)
863     if (host_type & SOCK_NONBLOCK) {
864         target_type |= TARGET_SOCK_NONBLOCK;
865     }
866 #endif
867 
868     return target_type;
869 }
870 
871 static abi_ulong target_brk;
872 static abi_ulong target_original_brk;
873 static abi_ulong brk_page;
874 
875 void target_set_brk(abi_ulong new_brk)
876 {
877     target_original_brk = target_brk = HOST_PAGE_ALIGN(new_brk);
878     brk_page = HOST_PAGE_ALIGN(target_brk);
879 }
880 
881 //#define DEBUGF_BRK(message, args...) do { fprintf(stderr, (message), ## args); } while (0)
882 #define DEBUGF_BRK(message, args...)
883 
884 /* do_brk() must return target values and target errnos. */
885 abi_long do_brk(abi_ulong new_brk)
886 {
887     abi_long mapped_addr;
888     abi_ulong new_alloc_size;
889 
890     DEBUGF_BRK("do_brk(" TARGET_ABI_FMT_lx ") -> ", new_brk);
891 
892     if (!new_brk) {
893         DEBUGF_BRK(TARGET_ABI_FMT_lx " (!new_brk)\n", target_brk);
894         return target_brk;
895     }
896     if (new_brk < target_original_brk) {
897         DEBUGF_BRK(TARGET_ABI_FMT_lx " (new_brk < target_original_brk)\n",
898                    target_brk);
899         return target_brk;
900     }
901 
902     /* If the new brk is less than the highest page reserved to the
903      * target heap allocation, set it and we're almost done...  */
904     if (new_brk <= brk_page) {
905         /* Heap contents are initialized to zero, as for anonymous
906          * mapped pages.  */
907         if (new_brk > target_brk) {
908             memset(g2h(target_brk), 0, new_brk - target_brk);
909         }
910 	target_brk = new_brk;
911         DEBUGF_BRK(TARGET_ABI_FMT_lx " (new_brk <= brk_page)\n", target_brk);
912 	return target_brk;
913     }
914 
915     /* We need to allocate more memory after the brk... Note that
916      * we don't use MAP_FIXED because that will map over the top of
917      * any existing mapping (like the one with the host libc or qemu
918      * itself); instead we treat "mapped but at wrong address" as
919      * a failure and unmap again.
920      */
921     new_alloc_size = HOST_PAGE_ALIGN(new_brk - brk_page);
922     mapped_addr = get_errno(target_mmap(brk_page, new_alloc_size,
923                                         PROT_READ|PROT_WRITE,
924                                         MAP_ANON|MAP_PRIVATE, 0, 0));
925 
926     if (mapped_addr == brk_page) {
927         /* Heap contents are initialized to zero, as for anonymous
928          * mapped pages.  Technically the new pages are already
929          * initialized to zero since they *are* anonymous mapped
930          * pages, however we have to take care with the contents that
931          * come from the remaining part of the previous page: it may
932          * contains garbage data due to a previous heap usage (grown
933          * then shrunken).  */
934         memset(g2h(target_brk), 0, brk_page - target_brk);
935 
936         target_brk = new_brk;
937         brk_page = HOST_PAGE_ALIGN(target_brk);
938         DEBUGF_BRK(TARGET_ABI_FMT_lx " (mapped_addr == brk_page)\n",
939             target_brk);
940         return target_brk;
941     } else if (mapped_addr != -1) {
942         /* Mapped but at wrong address, meaning there wasn't actually
943          * enough space for this brk.
944          */
945         target_munmap(mapped_addr, new_alloc_size);
946         mapped_addr = -1;
947         DEBUGF_BRK(TARGET_ABI_FMT_lx " (mapped_addr != -1)\n", target_brk);
948     }
949     else {
950         DEBUGF_BRK(TARGET_ABI_FMT_lx " (otherwise)\n", target_brk);
951     }
952 
953 #if defined(TARGET_ALPHA)
954     /* We (partially) emulate OSF/1 on Alpha, which requires we
955        return a proper errno, not an unchanged brk value.  */
956     return -TARGET_ENOMEM;
957 #endif
958     /* For everything else, return the previous break. */
959     return target_brk;
960 }
961 
962 #if defined(TARGET_NR_select) || defined(TARGET_NR__newselect) || \
963     defined(TARGET_NR_pselect6) || defined(TARGET_NR_pselect6_time64)
964 static inline abi_long copy_from_user_fdset(fd_set *fds,
965                                             abi_ulong target_fds_addr,
966                                             int n)
967 {
968     int i, nw, j, k;
969     abi_ulong b, *target_fds;
970 
971     nw = DIV_ROUND_UP(n, TARGET_ABI_BITS);
972     if (!(target_fds = lock_user(VERIFY_READ,
973                                  target_fds_addr,
974                                  sizeof(abi_ulong) * nw,
975                                  1)))
976         return -TARGET_EFAULT;
977 
978     FD_ZERO(fds);
979     k = 0;
980     for (i = 0; i < nw; i++) {
981         /* grab the abi_ulong */
982         __get_user(b, &target_fds[i]);
983         for (j = 0; j < TARGET_ABI_BITS; j++) {
984             /* check the bit inside the abi_ulong */
985             if ((b >> j) & 1)
986                 FD_SET(k, fds);
987             k++;
988         }
989     }
990 
991     unlock_user(target_fds, target_fds_addr, 0);
992 
993     return 0;
994 }
995 
996 static inline abi_ulong copy_from_user_fdset_ptr(fd_set *fds, fd_set **fds_ptr,
997                                                  abi_ulong target_fds_addr,
998                                                  int n)
999 {
1000     if (target_fds_addr) {
1001         if (copy_from_user_fdset(fds, target_fds_addr, n))
1002             return -TARGET_EFAULT;
1003         *fds_ptr = fds;
1004     } else {
1005         *fds_ptr = NULL;
1006     }
1007     return 0;
1008 }
1009 
1010 static inline abi_long copy_to_user_fdset(abi_ulong target_fds_addr,
1011                                           const fd_set *fds,
1012                                           int n)
1013 {
1014     int i, nw, j, k;
1015     abi_long v;
1016     abi_ulong *target_fds;
1017 
1018     nw = DIV_ROUND_UP(n, TARGET_ABI_BITS);
1019     if (!(target_fds = lock_user(VERIFY_WRITE,
1020                                  target_fds_addr,
1021                                  sizeof(abi_ulong) * nw,
1022                                  0)))
1023         return -TARGET_EFAULT;
1024 
1025     k = 0;
1026     for (i = 0; i < nw; i++) {
1027         v = 0;
1028         for (j = 0; j < TARGET_ABI_BITS; j++) {
1029             v |= ((abi_ulong)(FD_ISSET(k, fds) != 0) << j);
1030             k++;
1031         }
1032         __put_user(v, &target_fds[i]);
1033     }
1034 
1035     unlock_user(target_fds, target_fds_addr, sizeof(abi_ulong) * nw);
1036 
1037     return 0;
1038 }
1039 #endif
1040 
1041 #if defined(__alpha__)
1042 #define HOST_HZ 1024
1043 #else
1044 #define HOST_HZ 100
1045 #endif
1046 
1047 static inline abi_long host_to_target_clock_t(long ticks)
1048 {
1049 #if HOST_HZ == TARGET_HZ
1050     return ticks;
1051 #else
1052     return ((int64_t)ticks * TARGET_HZ) / HOST_HZ;
1053 #endif
1054 }
1055 
1056 static inline abi_long host_to_target_rusage(abi_ulong target_addr,
1057                                              const struct rusage *rusage)
1058 {
1059     struct target_rusage *target_rusage;
1060 
1061     if (!lock_user_struct(VERIFY_WRITE, target_rusage, target_addr, 0))
1062         return -TARGET_EFAULT;
1063     target_rusage->ru_utime.tv_sec = tswapal(rusage->ru_utime.tv_sec);
1064     target_rusage->ru_utime.tv_usec = tswapal(rusage->ru_utime.tv_usec);
1065     target_rusage->ru_stime.tv_sec = tswapal(rusage->ru_stime.tv_sec);
1066     target_rusage->ru_stime.tv_usec = tswapal(rusage->ru_stime.tv_usec);
1067     target_rusage->ru_maxrss = tswapal(rusage->ru_maxrss);
1068     target_rusage->ru_ixrss = tswapal(rusage->ru_ixrss);
1069     target_rusage->ru_idrss = tswapal(rusage->ru_idrss);
1070     target_rusage->ru_isrss = tswapal(rusage->ru_isrss);
1071     target_rusage->ru_minflt = tswapal(rusage->ru_minflt);
1072     target_rusage->ru_majflt = tswapal(rusage->ru_majflt);
1073     target_rusage->ru_nswap = tswapal(rusage->ru_nswap);
1074     target_rusage->ru_inblock = tswapal(rusage->ru_inblock);
1075     target_rusage->ru_oublock = tswapal(rusage->ru_oublock);
1076     target_rusage->ru_msgsnd = tswapal(rusage->ru_msgsnd);
1077     target_rusage->ru_msgrcv = tswapal(rusage->ru_msgrcv);
1078     target_rusage->ru_nsignals = tswapal(rusage->ru_nsignals);
1079     target_rusage->ru_nvcsw = tswapal(rusage->ru_nvcsw);
1080     target_rusage->ru_nivcsw = tswapal(rusage->ru_nivcsw);
1081     unlock_user_struct(target_rusage, target_addr, 1);
1082 
1083     return 0;
1084 }
1085 
1086 #ifdef TARGET_NR_setrlimit
1087 static inline rlim_t target_to_host_rlim(abi_ulong target_rlim)
1088 {
1089     abi_ulong target_rlim_swap;
1090     rlim_t result;
1091 
1092     target_rlim_swap = tswapal(target_rlim);
1093     if (target_rlim_swap == TARGET_RLIM_INFINITY)
1094         return RLIM_INFINITY;
1095 
1096     result = target_rlim_swap;
1097     if (target_rlim_swap != (rlim_t)result)
1098         return RLIM_INFINITY;
1099 
1100     return result;
1101 }
1102 #endif
1103 
1104 #if defined(TARGET_NR_getrlimit) || defined(TARGET_NR_ugetrlimit)
1105 static inline abi_ulong host_to_target_rlim(rlim_t rlim)
1106 {
1107     abi_ulong target_rlim_swap;
1108     abi_ulong result;
1109 
1110     if (rlim == RLIM_INFINITY || rlim != (abi_long)rlim)
1111         target_rlim_swap = TARGET_RLIM_INFINITY;
1112     else
1113         target_rlim_swap = rlim;
1114     result = tswapal(target_rlim_swap);
1115 
1116     return result;
1117 }
1118 #endif
1119 
1120 static inline int target_to_host_resource(int code)
1121 {
1122     switch (code) {
1123     case TARGET_RLIMIT_AS:
1124         return RLIMIT_AS;
1125     case TARGET_RLIMIT_CORE:
1126         return RLIMIT_CORE;
1127     case TARGET_RLIMIT_CPU:
1128         return RLIMIT_CPU;
1129     case TARGET_RLIMIT_DATA:
1130         return RLIMIT_DATA;
1131     case TARGET_RLIMIT_FSIZE:
1132         return RLIMIT_FSIZE;
1133     case TARGET_RLIMIT_LOCKS:
1134         return RLIMIT_LOCKS;
1135     case TARGET_RLIMIT_MEMLOCK:
1136         return RLIMIT_MEMLOCK;
1137     case TARGET_RLIMIT_MSGQUEUE:
1138         return RLIMIT_MSGQUEUE;
1139     case TARGET_RLIMIT_NICE:
1140         return RLIMIT_NICE;
1141     case TARGET_RLIMIT_NOFILE:
1142         return RLIMIT_NOFILE;
1143     case TARGET_RLIMIT_NPROC:
1144         return RLIMIT_NPROC;
1145     case TARGET_RLIMIT_RSS:
1146         return RLIMIT_RSS;
1147     case TARGET_RLIMIT_RTPRIO:
1148         return RLIMIT_RTPRIO;
1149     case TARGET_RLIMIT_SIGPENDING:
1150         return RLIMIT_SIGPENDING;
1151     case TARGET_RLIMIT_STACK:
1152         return RLIMIT_STACK;
1153     default:
1154         return code;
1155     }
1156 }
1157 
1158 static inline abi_long copy_from_user_timeval(struct timeval *tv,
1159                                               abi_ulong target_tv_addr)
1160 {
1161     struct target_timeval *target_tv;
1162 
1163     if (!lock_user_struct(VERIFY_READ, target_tv, target_tv_addr, 1)) {
1164         return -TARGET_EFAULT;
1165     }
1166 
1167     __get_user(tv->tv_sec, &target_tv->tv_sec);
1168     __get_user(tv->tv_usec, &target_tv->tv_usec);
1169 
1170     unlock_user_struct(target_tv, target_tv_addr, 0);
1171 
1172     return 0;
1173 }
1174 
1175 static inline abi_long copy_to_user_timeval(abi_ulong target_tv_addr,
1176                                             const struct timeval *tv)
1177 {
1178     struct target_timeval *target_tv;
1179 
1180     if (!lock_user_struct(VERIFY_WRITE, target_tv, target_tv_addr, 0)) {
1181         return -TARGET_EFAULT;
1182     }
1183 
1184     __put_user(tv->tv_sec, &target_tv->tv_sec);
1185     __put_user(tv->tv_usec, &target_tv->tv_usec);
1186 
1187     unlock_user_struct(target_tv, target_tv_addr, 1);
1188 
1189     return 0;
1190 }
1191 
1192 #if defined(TARGET_NR_clock_adjtime64) && defined(CONFIG_CLOCK_ADJTIME)
1193 static inline abi_long copy_from_user_timeval64(struct timeval *tv,
1194                                                 abi_ulong target_tv_addr)
1195 {
1196     struct target__kernel_sock_timeval *target_tv;
1197 
1198     if (!lock_user_struct(VERIFY_READ, target_tv, target_tv_addr, 1)) {
1199         return -TARGET_EFAULT;
1200     }
1201 
1202     __get_user(tv->tv_sec, &target_tv->tv_sec);
1203     __get_user(tv->tv_usec, &target_tv->tv_usec);
1204 
1205     unlock_user_struct(target_tv, target_tv_addr, 0);
1206 
1207     return 0;
1208 }
1209 #endif
1210 
1211 static inline abi_long copy_to_user_timeval64(abi_ulong target_tv_addr,
1212                                               const struct timeval *tv)
1213 {
1214     struct target__kernel_sock_timeval *target_tv;
1215 
1216     if (!lock_user_struct(VERIFY_WRITE, target_tv, target_tv_addr, 0)) {
1217         return -TARGET_EFAULT;
1218     }
1219 
1220     __put_user(tv->tv_sec, &target_tv->tv_sec);
1221     __put_user(tv->tv_usec, &target_tv->tv_usec);
1222 
1223     unlock_user_struct(target_tv, target_tv_addr, 1);
1224 
1225     return 0;
1226 }
1227 
1228 #if defined(TARGET_NR_futex) || \
1229     defined(TARGET_NR_rt_sigtimedwait) || \
1230     defined(TARGET_NR_pselect6) || defined(TARGET_NR_pselect6) || \
1231     defined(TARGET_NR_nanosleep) || defined(TARGET_NR_clock_settime) || \
1232     defined(TARGET_NR_utimensat) || defined(TARGET_NR_mq_timedsend) || \
1233     defined(TARGET_NR_mq_timedreceive) || defined(TARGET_NR_ipc) || \
1234     defined(TARGET_NR_semop) || defined(TARGET_NR_semtimedop) || \
1235     defined(TARGET_NR_timer_settime) || \
1236     (defined(TARGET_NR_timerfd_settime) && defined(CONFIG_TIMERFD))
1237 static inline abi_long target_to_host_timespec(struct timespec *host_ts,
1238                                                abi_ulong target_addr)
1239 {
1240     struct target_timespec *target_ts;
1241 
1242     if (!lock_user_struct(VERIFY_READ, target_ts, target_addr, 1)) {
1243         return -TARGET_EFAULT;
1244     }
1245     __get_user(host_ts->tv_sec, &target_ts->tv_sec);
1246     __get_user(host_ts->tv_nsec, &target_ts->tv_nsec);
1247     unlock_user_struct(target_ts, target_addr, 0);
1248     return 0;
1249 }
1250 #endif
1251 
1252 #if defined(TARGET_NR_clock_settime64) || defined(TARGET_NR_futex_time64) || \
1253     defined(TARGET_NR_timer_settime64) || \
1254     defined(TARGET_NR_mq_timedsend_time64) || \
1255     defined(TARGET_NR_mq_timedreceive_time64) || \
1256     (defined(TARGET_NR_timerfd_settime64) && defined(CONFIG_TIMERFD)) || \
1257     defined(TARGET_NR_clock_nanosleep_time64) || \
1258     defined(TARGET_NR_rt_sigtimedwait_time64) || \
1259     defined(TARGET_NR_utimensat) || \
1260     defined(TARGET_NR_utimensat_time64) || \
1261     defined(TARGET_NR_semtimedop_time64) || \
1262     defined(TARGET_NR_pselect6_time64) || defined(TARGET_NR_ppoll_time64)
1263 static inline abi_long target_to_host_timespec64(struct timespec *host_ts,
1264                                                  abi_ulong target_addr)
1265 {
1266     struct target__kernel_timespec *target_ts;
1267 
1268     if (!lock_user_struct(VERIFY_READ, target_ts, target_addr, 1)) {
1269         return -TARGET_EFAULT;
1270     }
1271     __get_user(host_ts->tv_sec, &target_ts->tv_sec);
1272     __get_user(host_ts->tv_nsec, &target_ts->tv_nsec);
1273     /* in 32bit mode, this drops the padding */
1274     host_ts->tv_nsec = (long)(abi_long)host_ts->tv_nsec;
1275     unlock_user_struct(target_ts, target_addr, 0);
1276     return 0;
1277 }
1278 #endif
1279 
1280 static inline abi_long host_to_target_timespec(abi_ulong target_addr,
1281                                                struct timespec *host_ts)
1282 {
1283     struct target_timespec *target_ts;
1284 
1285     if (!lock_user_struct(VERIFY_WRITE, target_ts, target_addr, 0)) {
1286         return -TARGET_EFAULT;
1287     }
1288     __put_user(host_ts->tv_sec, &target_ts->tv_sec);
1289     __put_user(host_ts->tv_nsec, &target_ts->tv_nsec);
1290     unlock_user_struct(target_ts, target_addr, 1);
1291     return 0;
1292 }
1293 
1294 static inline abi_long host_to_target_timespec64(abi_ulong target_addr,
1295                                                  struct timespec *host_ts)
1296 {
1297     struct target__kernel_timespec *target_ts;
1298 
1299     if (!lock_user_struct(VERIFY_WRITE, target_ts, target_addr, 0)) {
1300         return -TARGET_EFAULT;
1301     }
1302     __put_user(host_ts->tv_sec, &target_ts->tv_sec);
1303     __put_user(host_ts->tv_nsec, &target_ts->tv_nsec);
1304     unlock_user_struct(target_ts, target_addr, 1);
1305     return 0;
1306 }
1307 
1308 #if defined(TARGET_NR_gettimeofday)
1309 static inline abi_long copy_to_user_timezone(abi_ulong target_tz_addr,
1310                                              struct timezone *tz)
1311 {
1312     struct target_timezone *target_tz;
1313 
1314     if (!lock_user_struct(VERIFY_WRITE, target_tz, target_tz_addr, 1)) {
1315         return -TARGET_EFAULT;
1316     }
1317 
1318     __put_user(tz->tz_minuteswest, &target_tz->tz_minuteswest);
1319     __put_user(tz->tz_dsttime, &target_tz->tz_dsttime);
1320 
1321     unlock_user_struct(target_tz, target_tz_addr, 1);
1322 
1323     return 0;
1324 }
1325 #endif
1326 
1327 #if defined(TARGET_NR_settimeofday)
1328 static inline abi_long copy_from_user_timezone(struct timezone *tz,
1329                                                abi_ulong target_tz_addr)
1330 {
1331     struct target_timezone *target_tz;
1332 
1333     if (!lock_user_struct(VERIFY_READ, target_tz, target_tz_addr, 1)) {
1334         return -TARGET_EFAULT;
1335     }
1336 
1337     __get_user(tz->tz_minuteswest, &target_tz->tz_minuteswest);
1338     __get_user(tz->tz_dsttime, &target_tz->tz_dsttime);
1339 
1340     unlock_user_struct(target_tz, target_tz_addr, 0);
1341 
1342     return 0;
1343 }
1344 #endif
1345 
1346 #if defined(TARGET_NR_mq_open) && defined(__NR_mq_open)
1347 #include <mqueue.h>
1348 
1349 static inline abi_long copy_from_user_mq_attr(struct mq_attr *attr,
1350                                               abi_ulong target_mq_attr_addr)
1351 {
1352     struct target_mq_attr *target_mq_attr;
1353 
1354     if (!lock_user_struct(VERIFY_READ, target_mq_attr,
1355                           target_mq_attr_addr, 1))
1356         return -TARGET_EFAULT;
1357 
1358     __get_user(attr->mq_flags, &target_mq_attr->mq_flags);
1359     __get_user(attr->mq_maxmsg, &target_mq_attr->mq_maxmsg);
1360     __get_user(attr->mq_msgsize, &target_mq_attr->mq_msgsize);
1361     __get_user(attr->mq_curmsgs, &target_mq_attr->mq_curmsgs);
1362 
1363     unlock_user_struct(target_mq_attr, target_mq_attr_addr, 0);
1364 
1365     return 0;
1366 }
1367 
1368 static inline abi_long copy_to_user_mq_attr(abi_ulong target_mq_attr_addr,
1369                                             const struct mq_attr *attr)
1370 {
1371     struct target_mq_attr *target_mq_attr;
1372 
1373     if (!lock_user_struct(VERIFY_WRITE, target_mq_attr,
1374                           target_mq_attr_addr, 0))
1375         return -TARGET_EFAULT;
1376 
1377     __put_user(attr->mq_flags, &target_mq_attr->mq_flags);
1378     __put_user(attr->mq_maxmsg, &target_mq_attr->mq_maxmsg);
1379     __put_user(attr->mq_msgsize, &target_mq_attr->mq_msgsize);
1380     __put_user(attr->mq_curmsgs, &target_mq_attr->mq_curmsgs);
1381 
1382     unlock_user_struct(target_mq_attr, target_mq_attr_addr, 1);
1383 
1384     return 0;
1385 }
1386 #endif
1387 
1388 #if defined(TARGET_NR_select) || defined(TARGET_NR__newselect)
1389 /* do_select() must return target values and target errnos. */
1390 static abi_long do_select(int n,
1391                           abi_ulong rfd_addr, abi_ulong wfd_addr,
1392                           abi_ulong efd_addr, abi_ulong target_tv_addr)
1393 {
1394     fd_set rfds, wfds, efds;
1395     fd_set *rfds_ptr, *wfds_ptr, *efds_ptr;
1396     struct timeval tv;
1397     struct timespec ts, *ts_ptr;
1398     abi_long ret;
1399 
1400     ret = copy_from_user_fdset_ptr(&rfds, &rfds_ptr, rfd_addr, n);
1401     if (ret) {
1402         return ret;
1403     }
1404     ret = copy_from_user_fdset_ptr(&wfds, &wfds_ptr, wfd_addr, n);
1405     if (ret) {
1406         return ret;
1407     }
1408     ret = copy_from_user_fdset_ptr(&efds, &efds_ptr, efd_addr, n);
1409     if (ret) {
1410         return ret;
1411     }
1412 
1413     if (target_tv_addr) {
1414         if (copy_from_user_timeval(&tv, target_tv_addr))
1415             return -TARGET_EFAULT;
1416         ts.tv_sec = tv.tv_sec;
1417         ts.tv_nsec = tv.tv_usec * 1000;
1418         ts_ptr = &ts;
1419     } else {
1420         ts_ptr = NULL;
1421     }
1422 
1423     ret = get_errno(safe_pselect6(n, rfds_ptr, wfds_ptr, efds_ptr,
1424                                   ts_ptr, NULL));
1425 
1426     if (!is_error(ret)) {
1427         if (rfd_addr && copy_to_user_fdset(rfd_addr, &rfds, n))
1428             return -TARGET_EFAULT;
1429         if (wfd_addr && copy_to_user_fdset(wfd_addr, &wfds, n))
1430             return -TARGET_EFAULT;
1431         if (efd_addr && copy_to_user_fdset(efd_addr, &efds, n))
1432             return -TARGET_EFAULT;
1433 
1434         if (target_tv_addr) {
1435             tv.tv_sec = ts.tv_sec;
1436             tv.tv_usec = ts.tv_nsec / 1000;
1437             if (copy_to_user_timeval(target_tv_addr, &tv)) {
1438                 return -TARGET_EFAULT;
1439             }
1440         }
1441     }
1442 
1443     return ret;
1444 }
1445 
1446 #if defined(TARGET_WANT_OLD_SYS_SELECT)
1447 static abi_long do_old_select(abi_ulong arg1)
1448 {
1449     struct target_sel_arg_struct *sel;
1450     abi_ulong inp, outp, exp, tvp;
1451     long nsel;
1452 
1453     if (!lock_user_struct(VERIFY_READ, sel, arg1, 1)) {
1454         return -TARGET_EFAULT;
1455     }
1456 
1457     nsel = tswapal(sel->n);
1458     inp = tswapal(sel->inp);
1459     outp = tswapal(sel->outp);
1460     exp = tswapal(sel->exp);
1461     tvp = tswapal(sel->tvp);
1462 
1463     unlock_user_struct(sel, arg1, 0);
1464 
1465     return do_select(nsel, inp, outp, exp, tvp);
1466 }
1467 #endif
1468 #endif
1469 
1470 #if defined(TARGET_NR_pselect6) || defined(TARGET_NR_pselect6_time64)
1471 static abi_long do_pselect6(abi_long arg1, abi_long arg2, abi_long arg3,
1472                             abi_long arg4, abi_long arg5, abi_long arg6,
1473                             bool time64)
1474 {
1475     abi_long rfd_addr, wfd_addr, efd_addr, n, ts_addr;
1476     fd_set rfds, wfds, efds;
1477     fd_set *rfds_ptr, *wfds_ptr, *efds_ptr;
1478     struct timespec ts, *ts_ptr;
1479     abi_long ret;
1480 
1481     /*
1482      * The 6th arg is actually two args smashed together,
1483      * so we cannot use the C library.
1484      */
1485     sigset_t set;
1486     struct {
1487         sigset_t *set;
1488         size_t size;
1489     } sig, *sig_ptr;
1490 
1491     abi_ulong arg_sigset, arg_sigsize, *arg7;
1492     target_sigset_t *target_sigset;
1493 
1494     n = arg1;
1495     rfd_addr = arg2;
1496     wfd_addr = arg3;
1497     efd_addr = arg4;
1498     ts_addr = arg5;
1499 
1500     ret = copy_from_user_fdset_ptr(&rfds, &rfds_ptr, rfd_addr, n);
1501     if (ret) {
1502         return ret;
1503     }
1504     ret = copy_from_user_fdset_ptr(&wfds, &wfds_ptr, wfd_addr, n);
1505     if (ret) {
1506         return ret;
1507     }
1508     ret = copy_from_user_fdset_ptr(&efds, &efds_ptr, efd_addr, n);
1509     if (ret) {
1510         return ret;
1511     }
1512 
1513     /*
1514      * This takes a timespec, and not a timeval, so we cannot
1515      * use the do_select() helper ...
1516      */
1517     if (ts_addr) {
1518         if (time64) {
1519             if (target_to_host_timespec64(&ts, ts_addr)) {
1520                 return -TARGET_EFAULT;
1521             }
1522         } else {
1523             if (target_to_host_timespec(&ts, ts_addr)) {
1524                 return -TARGET_EFAULT;
1525             }
1526         }
1527             ts_ptr = &ts;
1528     } else {
1529         ts_ptr = NULL;
1530     }
1531 
1532     /* Extract the two packed args for the sigset */
1533     if (arg6) {
1534         sig_ptr = &sig;
1535         sig.size = SIGSET_T_SIZE;
1536 
1537         arg7 = lock_user(VERIFY_READ, arg6, sizeof(*arg7) * 2, 1);
1538         if (!arg7) {
1539             return -TARGET_EFAULT;
1540         }
1541         arg_sigset = tswapal(arg7[0]);
1542         arg_sigsize = tswapal(arg7[1]);
1543         unlock_user(arg7, arg6, 0);
1544 
1545         if (arg_sigset) {
1546             sig.set = &set;
1547             if (arg_sigsize != sizeof(*target_sigset)) {
1548                 /* Like the kernel, we enforce correct size sigsets */
1549                 return -TARGET_EINVAL;
1550             }
1551             target_sigset = lock_user(VERIFY_READ, arg_sigset,
1552                                       sizeof(*target_sigset), 1);
1553             if (!target_sigset) {
1554                 return -TARGET_EFAULT;
1555             }
1556             target_to_host_sigset(&set, target_sigset);
1557             unlock_user(target_sigset, arg_sigset, 0);
1558         } else {
1559             sig.set = NULL;
1560         }
1561     } else {
1562         sig_ptr = NULL;
1563     }
1564 
1565     ret = get_errno(safe_pselect6(n, rfds_ptr, wfds_ptr, efds_ptr,
1566                                   ts_ptr, sig_ptr));
1567 
1568     if (!is_error(ret)) {
1569         if (rfd_addr && copy_to_user_fdset(rfd_addr, &rfds, n)) {
1570             return -TARGET_EFAULT;
1571         }
1572         if (wfd_addr && copy_to_user_fdset(wfd_addr, &wfds, n)) {
1573             return -TARGET_EFAULT;
1574         }
1575         if (efd_addr && copy_to_user_fdset(efd_addr, &efds, n)) {
1576             return -TARGET_EFAULT;
1577         }
1578         if (time64) {
1579             if (ts_addr && host_to_target_timespec64(ts_addr, &ts)) {
1580                 return -TARGET_EFAULT;
1581             }
1582         } else {
1583             if (ts_addr && host_to_target_timespec(ts_addr, &ts)) {
1584                 return -TARGET_EFAULT;
1585             }
1586         }
1587     }
1588     return ret;
1589 }
1590 #endif
1591 
1592 #if defined(TARGET_NR_poll) || defined(TARGET_NR_ppoll) || \
1593     defined(TARGET_NR_ppoll_time64)
1594 static abi_long do_ppoll(abi_long arg1, abi_long arg2, abi_long arg3,
1595                          abi_long arg4, abi_long arg5, bool ppoll, bool time64)
1596 {
1597     struct target_pollfd *target_pfd;
1598     unsigned int nfds = arg2;
1599     struct pollfd *pfd;
1600     unsigned int i;
1601     abi_long ret;
1602 
1603     pfd = NULL;
1604     target_pfd = NULL;
1605     if (nfds) {
1606         if (nfds > (INT_MAX / sizeof(struct target_pollfd))) {
1607             return -TARGET_EINVAL;
1608         }
1609         target_pfd = lock_user(VERIFY_WRITE, arg1,
1610                                sizeof(struct target_pollfd) * nfds, 1);
1611         if (!target_pfd) {
1612             return -TARGET_EFAULT;
1613         }
1614 
1615         pfd = alloca(sizeof(struct pollfd) * nfds);
1616         for (i = 0; i < nfds; i++) {
1617             pfd[i].fd = tswap32(target_pfd[i].fd);
1618             pfd[i].events = tswap16(target_pfd[i].events);
1619         }
1620     }
1621     if (ppoll) {
1622         struct timespec _timeout_ts, *timeout_ts = &_timeout_ts;
1623         target_sigset_t *target_set;
1624         sigset_t _set, *set = &_set;
1625 
1626         if (arg3) {
1627             if (time64) {
1628                 if (target_to_host_timespec64(timeout_ts, arg3)) {
1629                     unlock_user(target_pfd, arg1, 0);
1630                     return -TARGET_EFAULT;
1631                 }
1632             } else {
1633                 if (target_to_host_timespec(timeout_ts, arg3)) {
1634                     unlock_user(target_pfd, arg1, 0);
1635                     return -TARGET_EFAULT;
1636                 }
1637             }
1638         } else {
1639             timeout_ts = NULL;
1640         }
1641 
1642         if (arg4) {
1643             if (arg5 != sizeof(target_sigset_t)) {
1644                 unlock_user(target_pfd, arg1, 0);
1645                 return -TARGET_EINVAL;
1646             }
1647 
1648             target_set = lock_user(VERIFY_READ, arg4,
1649                                    sizeof(target_sigset_t), 1);
1650             if (!target_set) {
1651                 unlock_user(target_pfd, arg1, 0);
1652                 return -TARGET_EFAULT;
1653             }
1654             target_to_host_sigset(set, target_set);
1655         } else {
1656             set = NULL;
1657         }
1658 
1659         ret = get_errno(safe_ppoll(pfd, nfds, timeout_ts,
1660                                    set, SIGSET_T_SIZE));
1661 
1662         if (!is_error(ret) && arg3) {
1663             if (time64) {
1664                 if (host_to_target_timespec64(arg3, timeout_ts)) {
1665                     return -TARGET_EFAULT;
1666                 }
1667             } else {
1668                 if (host_to_target_timespec(arg3, timeout_ts)) {
1669                     return -TARGET_EFAULT;
1670                 }
1671             }
1672         }
1673         if (arg4) {
1674             unlock_user(target_set, arg4, 0);
1675         }
1676     } else {
1677           struct timespec ts, *pts;
1678 
1679           if (arg3 >= 0) {
1680               /* Convert ms to secs, ns */
1681               ts.tv_sec = arg3 / 1000;
1682               ts.tv_nsec = (arg3 % 1000) * 1000000LL;
1683               pts = &ts;
1684           } else {
1685               /* -ve poll() timeout means "infinite" */
1686               pts = NULL;
1687           }
1688           ret = get_errno(safe_ppoll(pfd, nfds, pts, NULL, 0));
1689     }
1690 
1691     if (!is_error(ret)) {
1692         for (i = 0; i < nfds; i++) {
1693             target_pfd[i].revents = tswap16(pfd[i].revents);
1694         }
1695     }
1696     unlock_user(target_pfd, arg1, sizeof(struct target_pollfd) * nfds);
1697     return ret;
1698 }
1699 #endif
1700 
1701 static abi_long do_pipe2(int host_pipe[], int flags)
1702 {
1703 #ifdef CONFIG_PIPE2
1704     return pipe2(host_pipe, flags);
1705 #else
1706     return -ENOSYS;
1707 #endif
1708 }
1709 
1710 static abi_long do_pipe(void *cpu_env, abi_ulong pipedes,
1711                         int flags, int is_pipe2)
1712 {
1713     int host_pipe[2];
1714     abi_long ret;
1715     ret = flags ? do_pipe2(host_pipe, flags) : pipe(host_pipe);
1716 
1717     if (is_error(ret))
1718         return get_errno(ret);
1719 
1720     /* Several targets have special calling conventions for the original
1721        pipe syscall, but didn't replicate this into the pipe2 syscall.  */
1722     if (!is_pipe2) {
1723 #if defined(TARGET_ALPHA)
1724         ((CPUAlphaState *)cpu_env)->ir[IR_A4] = host_pipe[1];
1725         return host_pipe[0];
1726 #elif defined(TARGET_MIPS)
1727         ((CPUMIPSState*)cpu_env)->active_tc.gpr[3] = host_pipe[1];
1728         return host_pipe[0];
1729 #elif defined(TARGET_SH4)
1730         ((CPUSH4State*)cpu_env)->gregs[1] = host_pipe[1];
1731         return host_pipe[0];
1732 #elif defined(TARGET_SPARC)
1733         ((CPUSPARCState*)cpu_env)->regwptr[1] = host_pipe[1];
1734         return host_pipe[0];
1735 #endif
1736     }
1737 
1738     if (put_user_s32(host_pipe[0], pipedes)
1739         || put_user_s32(host_pipe[1], pipedes + sizeof(host_pipe[0])))
1740         return -TARGET_EFAULT;
1741     return get_errno(ret);
1742 }
1743 
1744 static inline abi_long target_to_host_ip_mreq(struct ip_mreqn *mreqn,
1745                                               abi_ulong target_addr,
1746                                               socklen_t len)
1747 {
1748     struct target_ip_mreqn *target_smreqn;
1749 
1750     target_smreqn = lock_user(VERIFY_READ, target_addr, len, 1);
1751     if (!target_smreqn)
1752         return -TARGET_EFAULT;
1753     mreqn->imr_multiaddr.s_addr = target_smreqn->imr_multiaddr.s_addr;
1754     mreqn->imr_address.s_addr = target_smreqn->imr_address.s_addr;
1755     if (len == sizeof(struct target_ip_mreqn))
1756         mreqn->imr_ifindex = tswapal(target_smreqn->imr_ifindex);
1757     unlock_user(target_smreqn, target_addr, 0);
1758 
1759     return 0;
1760 }
1761 
1762 static inline abi_long target_to_host_sockaddr(int fd, struct sockaddr *addr,
1763                                                abi_ulong target_addr,
1764                                                socklen_t len)
1765 {
1766     const socklen_t unix_maxlen = sizeof (struct sockaddr_un);
1767     sa_family_t sa_family;
1768     struct target_sockaddr *target_saddr;
1769 
1770     if (fd_trans_target_to_host_addr(fd)) {
1771         return fd_trans_target_to_host_addr(fd)(addr, target_addr, len);
1772     }
1773 
1774     target_saddr = lock_user(VERIFY_READ, target_addr, len, 1);
1775     if (!target_saddr)
1776         return -TARGET_EFAULT;
1777 
1778     sa_family = tswap16(target_saddr->sa_family);
1779 
1780     /* Oops. The caller might send a incomplete sun_path; sun_path
1781      * must be terminated by \0 (see the manual page), but
1782      * unfortunately it is quite common to specify sockaddr_un
1783      * length as "strlen(x->sun_path)" while it should be
1784      * "strlen(...) + 1". We'll fix that here if needed.
1785      * Linux kernel has a similar feature.
1786      */
1787 
1788     if (sa_family == AF_UNIX) {
1789         if (len < unix_maxlen && len > 0) {
1790             char *cp = (char*)target_saddr;
1791 
1792             if ( cp[len-1] && !cp[len] )
1793                 len++;
1794         }
1795         if (len > unix_maxlen)
1796             len = unix_maxlen;
1797     }
1798 
1799     memcpy(addr, target_saddr, len);
1800     addr->sa_family = sa_family;
1801     if (sa_family == AF_NETLINK) {
1802         struct sockaddr_nl *nladdr;
1803 
1804         nladdr = (struct sockaddr_nl *)addr;
1805         nladdr->nl_pid = tswap32(nladdr->nl_pid);
1806         nladdr->nl_groups = tswap32(nladdr->nl_groups);
1807     } else if (sa_family == AF_PACKET) {
1808 	struct target_sockaddr_ll *lladdr;
1809 
1810 	lladdr = (struct target_sockaddr_ll *)addr;
1811 	lladdr->sll_ifindex = tswap32(lladdr->sll_ifindex);
1812 	lladdr->sll_hatype = tswap16(lladdr->sll_hatype);
1813     }
1814     unlock_user(target_saddr, target_addr, 0);
1815 
1816     return 0;
1817 }
1818 
1819 static inline abi_long host_to_target_sockaddr(abi_ulong target_addr,
1820                                                struct sockaddr *addr,
1821                                                socklen_t len)
1822 {
1823     struct target_sockaddr *target_saddr;
1824 
1825     if (len == 0) {
1826         return 0;
1827     }
1828     assert(addr);
1829 
1830     target_saddr = lock_user(VERIFY_WRITE, target_addr, len, 0);
1831     if (!target_saddr)
1832         return -TARGET_EFAULT;
1833     memcpy(target_saddr, addr, len);
1834     if (len >= offsetof(struct target_sockaddr, sa_family) +
1835         sizeof(target_saddr->sa_family)) {
1836         target_saddr->sa_family = tswap16(addr->sa_family);
1837     }
1838     if (addr->sa_family == AF_NETLINK &&
1839         len >= sizeof(struct target_sockaddr_nl)) {
1840         struct target_sockaddr_nl *target_nl =
1841                (struct target_sockaddr_nl *)target_saddr;
1842         target_nl->nl_pid = tswap32(target_nl->nl_pid);
1843         target_nl->nl_groups = tswap32(target_nl->nl_groups);
1844     } else if (addr->sa_family == AF_PACKET) {
1845         struct sockaddr_ll *target_ll = (struct sockaddr_ll *)target_saddr;
1846         target_ll->sll_ifindex = tswap32(target_ll->sll_ifindex);
1847         target_ll->sll_hatype = tswap16(target_ll->sll_hatype);
1848     } else if (addr->sa_family == AF_INET6 &&
1849                len >= sizeof(struct target_sockaddr_in6)) {
1850         struct target_sockaddr_in6 *target_in6 =
1851                (struct target_sockaddr_in6 *)target_saddr;
1852         target_in6->sin6_scope_id = tswap16(target_in6->sin6_scope_id);
1853     }
1854     unlock_user(target_saddr, target_addr, len);
1855 
1856     return 0;
1857 }
1858 
1859 static inline abi_long target_to_host_cmsg(struct msghdr *msgh,
1860                                            struct target_msghdr *target_msgh)
1861 {
1862     struct cmsghdr *cmsg = CMSG_FIRSTHDR(msgh);
1863     abi_long msg_controllen;
1864     abi_ulong target_cmsg_addr;
1865     struct target_cmsghdr *target_cmsg, *target_cmsg_start;
1866     socklen_t space = 0;
1867 
1868     msg_controllen = tswapal(target_msgh->msg_controllen);
1869     if (msg_controllen < sizeof (struct target_cmsghdr))
1870         goto the_end;
1871     target_cmsg_addr = tswapal(target_msgh->msg_control);
1872     target_cmsg = lock_user(VERIFY_READ, target_cmsg_addr, msg_controllen, 1);
1873     target_cmsg_start = target_cmsg;
1874     if (!target_cmsg)
1875         return -TARGET_EFAULT;
1876 
1877     while (cmsg && target_cmsg) {
1878         void *data = CMSG_DATA(cmsg);
1879         void *target_data = TARGET_CMSG_DATA(target_cmsg);
1880 
1881         int len = tswapal(target_cmsg->cmsg_len)
1882             - sizeof(struct target_cmsghdr);
1883 
1884         space += CMSG_SPACE(len);
1885         if (space > msgh->msg_controllen) {
1886             space -= CMSG_SPACE(len);
1887             /* This is a QEMU bug, since we allocated the payload
1888              * area ourselves (unlike overflow in host-to-target
1889              * conversion, which is just the guest giving us a buffer
1890              * that's too small). It can't happen for the payload types
1891              * we currently support; if it becomes an issue in future
1892              * we would need to improve our allocation strategy to
1893              * something more intelligent than "twice the size of the
1894              * target buffer we're reading from".
1895              */
1896             qemu_log_mask(LOG_UNIMP,
1897                           ("Unsupported ancillary data %d/%d: "
1898                            "unhandled msg size\n"),
1899                           tswap32(target_cmsg->cmsg_level),
1900                           tswap32(target_cmsg->cmsg_type));
1901             break;
1902         }
1903 
1904         if (tswap32(target_cmsg->cmsg_level) == TARGET_SOL_SOCKET) {
1905             cmsg->cmsg_level = SOL_SOCKET;
1906         } else {
1907             cmsg->cmsg_level = tswap32(target_cmsg->cmsg_level);
1908         }
1909         cmsg->cmsg_type = tswap32(target_cmsg->cmsg_type);
1910         cmsg->cmsg_len = CMSG_LEN(len);
1911 
1912         if (cmsg->cmsg_level == SOL_SOCKET && cmsg->cmsg_type == SCM_RIGHTS) {
1913             int *fd = (int *)data;
1914             int *target_fd = (int *)target_data;
1915             int i, numfds = len / sizeof(int);
1916 
1917             for (i = 0; i < numfds; i++) {
1918                 __get_user(fd[i], target_fd + i);
1919             }
1920         } else if (cmsg->cmsg_level == SOL_SOCKET
1921                &&  cmsg->cmsg_type == SCM_CREDENTIALS) {
1922             struct ucred *cred = (struct ucred *)data;
1923             struct target_ucred *target_cred =
1924                 (struct target_ucred *)target_data;
1925 
1926             __get_user(cred->pid, &target_cred->pid);
1927             __get_user(cred->uid, &target_cred->uid);
1928             __get_user(cred->gid, &target_cred->gid);
1929         } else {
1930             qemu_log_mask(LOG_UNIMP, "Unsupported ancillary data: %d/%d\n",
1931                           cmsg->cmsg_level, cmsg->cmsg_type);
1932             memcpy(data, target_data, len);
1933         }
1934 
1935         cmsg = CMSG_NXTHDR(msgh, cmsg);
1936         target_cmsg = TARGET_CMSG_NXTHDR(target_msgh, target_cmsg,
1937                                          target_cmsg_start);
1938     }
1939     unlock_user(target_cmsg, target_cmsg_addr, 0);
1940  the_end:
1941     msgh->msg_controllen = space;
1942     return 0;
1943 }
1944 
1945 static inline abi_long host_to_target_cmsg(struct target_msghdr *target_msgh,
1946                                            struct msghdr *msgh)
1947 {
1948     struct cmsghdr *cmsg = CMSG_FIRSTHDR(msgh);
1949     abi_long msg_controllen;
1950     abi_ulong target_cmsg_addr;
1951     struct target_cmsghdr *target_cmsg, *target_cmsg_start;
1952     socklen_t space = 0;
1953 
1954     msg_controllen = tswapal(target_msgh->msg_controllen);
1955     if (msg_controllen < sizeof (struct target_cmsghdr))
1956         goto the_end;
1957     target_cmsg_addr = tswapal(target_msgh->msg_control);
1958     target_cmsg = lock_user(VERIFY_WRITE, target_cmsg_addr, msg_controllen, 0);
1959     target_cmsg_start = target_cmsg;
1960     if (!target_cmsg)
1961         return -TARGET_EFAULT;
1962 
1963     while (cmsg && target_cmsg) {
1964         void *data = CMSG_DATA(cmsg);
1965         void *target_data = TARGET_CMSG_DATA(target_cmsg);
1966 
1967         int len = cmsg->cmsg_len - sizeof(struct cmsghdr);
1968         int tgt_len, tgt_space;
1969 
1970         /* We never copy a half-header but may copy half-data;
1971          * this is Linux's behaviour in put_cmsg(). Note that
1972          * truncation here is a guest problem (which we report
1973          * to the guest via the CTRUNC bit), unlike truncation
1974          * in target_to_host_cmsg, which is a QEMU bug.
1975          */
1976         if (msg_controllen < sizeof(struct target_cmsghdr)) {
1977             target_msgh->msg_flags |= tswap32(MSG_CTRUNC);
1978             break;
1979         }
1980 
1981         if (cmsg->cmsg_level == SOL_SOCKET) {
1982             target_cmsg->cmsg_level = tswap32(TARGET_SOL_SOCKET);
1983         } else {
1984             target_cmsg->cmsg_level = tswap32(cmsg->cmsg_level);
1985         }
1986         target_cmsg->cmsg_type = tswap32(cmsg->cmsg_type);
1987 
1988         /* Payload types which need a different size of payload on
1989          * the target must adjust tgt_len here.
1990          */
1991         tgt_len = len;
1992         switch (cmsg->cmsg_level) {
1993         case SOL_SOCKET:
1994             switch (cmsg->cmsg_type) {
1995             case SO_TIMESTAMP:
1996                 tgt_len = sizeof(struct target_timeval);
1997                 break;
1998             default:
1999                 break;
2000             }
2001             break;
2002         default:
2003             break;
2004         }
2005 
2006         if (msg_controllen < TARGET_CMSG_LEN(tgt_len)) {
2007             target_msgh->msg_flags |= tswap32(MSG_CTRUNC);
2008             tgt_len = msg_controllen - sizeof(struct target_cmsghdr);
2009         }
2010 
2011         /* We must now copy-and-convert len bytes of payload
2012          * into tgt_len bytes of destination space. Bear in mind
2013          * that in both source and destination we may be dealing
2014          * with a truncated value!
2015          */
2016         switch (cmsg->cmsg_level) {
2017         case SOL_SOCKET:
2018             switch (cmsg->cmsg_type) {
2019             case SCM_RIGHTS:
2020             {
2021                 int *fd = (int *)data;
2022                 int *target_fd = (int *)target_data;
2023                 int i, numfds = tgt_len / sizeof(int);
2024 
2025                 for (i = 0; i < numfds; i++) {
2026                     __put_user(fd[i], target_fd + i);
2027                 }
2028                 break;
2029             }
2030             case SO_TIMESTAMP:
2031             {
2032                 struct timeval *tv = (struct timeval *)data;
2033                 struct target_timeval *target_tv =
2034                     (struct target_timeval *)target_data;
2035 
2036                 if (len != sizeof(struct timeval) ||
2037                     tgt_len != sizeof(struct target_timeval)) {
2038                     goto unimplemented;
2039                 }
2040 
2041                 /* copy struct timeval to target */
2042                 __put_user(tv->tv_sec, &target_tv->tv_sec);
2043                 __put_user(tv->tv_usec, &target_tv->tv_usec);
2044                 break;
2045             }
2046             case SCM_CREDENTIALS:
2047             {
2048                 struct ucred *cred = (struct ucred *)data;
2049                 struct target_ucred *target_cred =
2050                     (struct target_ucred *)target_data;
2051 
2052                 __put_user(cred->pid, &target_cred->pid);
2053                 __put_user(cred->uid, &target_cred->uid);
2054                 __put_user(cred->gid, &target_cred->gid);
2055                 break;
2056             }
2057             default:
2058                 goto unimplemented;
2059             }
2060             break;
2061 
2062         case SOL_IP:
2063             switch (cmsg->cmsg_type) {
2064             case IP_TTL:
2065             {
2066                 uint32_t *v = (uint32_t *)data;
2067                 uint32_t *t_int = (uint32_t *)target_data;
2068 
2069                 if (len != sizeof(uint32_t) ||
2070                     tgt_len != sizeof(uint32_t)) {
2071                     goto unimplemented;
2072                 }
2073                 __put_user(*v, t_int);
2074                 break;
2075             }
2076             case IP_RECVERR:
2077             {
2078                 struct errhdr_t {
2079                    struct sock_extended_err ee;
2080                    struct sockaddr_in offender;
2081                 };
2082                 struct errhdr_t *errh = (struct errhdr_t *)data;
2083                 struct errhdr_t *target_errh =
2084                     (struct errhdr_t *)target_data;
2085 
2086                 if (len != sizeof(struct errhdr_t) ||
2087                     tgt_len != sizeof(struct errhdr_t)) {
2088                     goto unimplemented;
2089                 }
2090                 __put_user(errh->ee.ee_errno, &target_errh->ee.ee_errno);
2091                 __put_user(errh->ee.ee_origin, &target_errh->ee.ee_origin);
2092                 __put_user(errh->ee.ee_type,  &target_errh->ee.ee_type);
2093                 __put_user(errh->ee.ee_code, &target_errh->ee.ee_code);
2094                 __put_user(errh->ee.ee_pad, &target_errh->ee.ee_pad);
2095                 __put_user(errh->ee.ee_info, &target_errh->ee.ee_info);
2096                 __put_user(errh->ee.ee_data, &target_errh->ee.ee_data);
2097                 host_to_target_sockaddr((unsigned long) &target_errh->offender,
2098                     (void *) &errh->offender, sizeof(errh->offender));
2099                 break;
2100             }
2101             default:
2102                 goto unimplemented;
2103             }
2104             break;
2105 
2106         case SOL_IPV6:
2107             switch (cmsg->cmsg_type) {
2108             case IPV6_HOPLIMIT:
2109             {
2110                 uint32_t *v = (uint32_t *)data;
2111                 uint32_t *t_int = (uint32_t *)target_data;
2112 
2113                 if (len != sizeof(uint32_t) ||
2114                     tgt_len != sizeof(uint32_t)) {
2115                     goto unimplemented;
2116                 }
2117                 __put_user(*v, t_int);
2118                 break;
2119             }
2120             case IPV6_RECVERR:
2121             {
2122                 struct errhdr6_t {
2123                    struct sock_extended_err ee;
2124                    struct sockaddr_in6 offender;
2125                 };
2126                 struct errhdr6_t *errh = (struct errhdr6_t *)data;
2127                 struct errhdr6_t *target_errh =
2128                     (struct errhdr6_t *)target_data;
2129 
2130                 if (len != sizeof(struct errhdr6_t) ||
2131                     tgt_len != sizeof(struct errhdr6_t)) {
2132                     goto unimplemented;
2133                 }
2134                 __put_user(errh->ee.ee_errno, &target_errh->ee.ee_errno);
2135                 __put_user(errh->ee.ee_origin, &target_errh->ee.ee_origin);
2136                 __put_user(errh->ee.ee_type,  &target_errh->ee.ee_type);
2137                 __put_user(errh->ee.ee_code, &target_errh->ee.ee_code);
2138                 __put_user(errh->ee.ee_pad, &target_errh->ee.ee_pad);
2139                 __put_user(errh->ee.ee_info, &target_errh->ee.ee_info);
2140                 __put_user(errh->ee.ee_data, &target_errh->ee.ee_data);
2141                 host_to_target_sockaddr((unsigned long) &target_errh->offender,
2142                     (void *) &errh->offender, sizeof(errh->offender));
2143                 break;
2144             }
2145             default:
2146                 goto unimplemented;
2147             }
2148             break;
2149 
2150         default:
2151         unimplemented:
2152             qemu_log_mask(LOG_UNIMP, "Unsupported ancillary data: %d/%d\n",
2153                           cmsg->cmsg_level, cmsg->cmsg_type);
2154             memcpy(target_data, data, MIN(len, tgt_len));
2155             if (tgt_len > len) {
2156                 memset(target_data + len, 0, tgt_len - len);
2157             }
2158         }
2159 
2160         target_cmsg->cmsg_len = tswapal(TARGET_CMSG_LEN(tgt_len));
2161         tgt_space = TARGET_CMSG_SPACE(tgt_len);
2162         if (msg_controllen < tgt_space) {
2163             tgt_space = msg_controllen;
2164         }
2165         msg_controllen -= tgt_space;
2166         space += tgt_space;
2167         cmsg = CMSG_NXTHDR(msgh, cmsg);
2168         target_cmsg = TARGET_CMSG_NXTHDR(target_msgh, target_cmsg,
2169                                          target_cmsg_start);
2170     }
2171     unlock_user(target_cmsg, target_cmsg_addr, space);
2172  the_end:
2173     target_msgh->msg_controllen = tswapal(space);
2174     return 0;
2175 }
2176 
2177 /* do_setsockopt() Must return target values and target errnos. */
2178 static abi_long do_setsockopt(int sockfd, int level, int optname,
2179                               abi_ulong optval_addr, socklen_t optlen)
2180 {
2181     abi_long ret;
2182     int val;
2183     struct ip_mreqn *ip_mreq;
2184     struct ip_mreq_source *ip_mreq_source;
2185 
2186     switch(level) {
2187     case SOL_TCP:
2188     case SOL_UDP:
2189         /* TCP and UDP options all take an 'int' value.  */
2190         if (optlen < sizeof(uint32_t))
2191             return -TARGET_EINVAL;
2192 
2193         if (get_user_u32(val, optval_addr))
2194             return -TARGET_EFAULT;
2195         ret = get_errno(setsockopt(sockfd, level, optname, &val, sizeof(val)));
2196         break;
2197     case SOL_IP:
2198         switch(optname) {
2199         case IP_TOS:
2200         case IP_TTL:
2201         case IP_HDRINCL:
2202         case IP_ROUTER_ALERT:
2203         case IP_RECVOPTS:
2204         case IP_RETOPTS:
2205         case IP_PKTINFO:
2206         case IP_MTU_DISCOVER:
2207         case IP_RECVERR:
2208         case IP_RECVTTL:
2209         case IP_RECVTOS:
2210 #ifdef IP_FREEBIND
2211         case IP_FREEBIND:
2212 #endif
2213         case IP_MULTICAST_TTL:
2214         case IP_MULTICAST_LOOP:
2215             val = 0;
2216             if (optlen >= sizeof(uint32_t)) {
2217                 if (get_user_u32(val, optval_addr))
2218                     return -TARGET_EFAULT;
2219             } else if (optlen >= 1) {
2220                 if (get_user_u8(val, optval_addr))
2221                     return -TARGET_EFAULT;
2222             }
2223             ret = get_errno(setsockopt(sockfd, level, optname, &val, sizeof(val)));
2224             break;
2225         case IP_ADD_MEMBERSHIP:
2226         case IP_DROP_MEMBERSHIP:
2227             if (optlen < sizeof (struct target_ip_mreq) ||
2228                 optlen > sizeof (struct target_ip_mreqn))
2229                 return -TARGET_EINVAL;
2230 
2231             ip_mreq = (struct ip_mreqn *) alloca(optlen);
2232             target_to_host_ip_mreq(ip_mreq, optval_addr, optlen);
2233             ret = get_errno(setsockopt(sockfd, level, optname, ip_mreq, optlen));
2234             break;
2235 
2236         case IP_BLOCK_SOURCE:
2237         case IP_UNBLOCK_SOURCE:
2238         case IP_ADD_SOURCE_MEMBERSHIP:
2239         case IP_DROP_SOURCE_MEMBERSHIP:
2240             if (optlen != sizeof (struct target_ip_mreq_source))
2241                 return -TARGET_EINVAL;
2242 
2243             ip_mreq_source = lock_user(VERIFY_READ, optval_addr, optlen, 1);
2244             ret = get_errno(setsockopt(sockfd, level, optname, ip_mreq_source, optlen));
2245             unlock_user (ip_mreq_source, optval_addr, 0);
2246             break;
2247 
2248         default:
2249             goto unimplemented;
2250         }
2251         break;
2252     case SOL_IPV6:
2253         switch (optname) {
2254         case IPV6_MTU_DISCOVER:
2255         case IPV6_MTU:
2256         case IPV6_V6ONLY:
2257         case IPV6_RECVPKTINFO:
2258         case IPV6_UNICAST_HOPS:
2259         case IPV6_MULTICAST_HOPS:
2260         case IPV6_MULTICAST_LOOP:
2261         case IPV6_RECVERR:
2262         case IPV6_RECVHOPLIMIT:
2263         case IPV6_2292HOPLIMIT:
2264         case IPV6_CHECKSUM:
2265         case IPV6_ADDRFORM:
2266         case IPV6_2292PKTINFO:
2267         case IPV6_RECVTCLASS:
2268         case IPV6_RECVRTHDR:
2269         case IPV6_2292RTHDR:
2270         case IPV6_RECVHOPOPTS:
2271         case IPV6_2292HOPOPTS:
2272         case IPV6_RECVDSTOPTS:
2273         case IPV6_2292DSTOPTS:
2274         case IPV6_TCLASS:
2275 #ifdef IPV6_RECVPATHMTU
2276         case IPV6_RECVPATHMTU:
2277 #endif
2278 #ifdef IPV6_TRANSPARENT
2279         case IPV6_TRANSPARENT:
2280 #endif
2281 #ifdef IPV6_FREEBIND
2282         case IPV6_FREEBIND:
2283 #endif
2284 #ifdef IPV6_RECVORIGDSTADDR
2285         case IPV6_RECVORIGDSTADDR:
2286 #endif
2287             val = 0;
2288             if (optlen < sizeof(uint32_t)) {
2289                 return -TARGET_EINVAL;
2290             }
2291             if (get_user_u32(val, optval_addr)) {
2292                 return -TARGET_EFAULT;
2293             }
2294             ret = get_errno(setsockopt(sockfd, level, optname,
2295                                        &val, sizeof(val)));
2296             break;
2297         case IPV6_PKTINFO:
2298         {
2299             struct in6_pktinfo pki;
2300 
2301             if (optlen < sizeof(pki)) {
2302                 return -TARGET_EINVAL;
2303             }
2304 
2305             if (copy_from_user(&pki, optval_addr, sizeof(pki))) {
2306                 return -TARGET_EFAULT;
2307             }
2308 
2309             pki.ipi6_ifindex = tswap32(pki.ipi6_ifindex);
2310 
2311             ret = get_errno(setsockopt(sockfd, level, optname,
2312                                        &pki, sizeof(pki)));
2313             break;
2314         }
2315         case IPV6_ADD_MEMBERSHIP:
2316         case IPV6_DROP_MEMBERSHIP:
2317         {
2318             struct ipv6_mreq ipv6mreq;
2319 
2320             if (optlen < sizeof(ipv6mreq)) {
2321                 return -TARGET_EINVAL;
2322             }
2323 
2324             if (copy_from_user(&ipv6mreq, optval_addr, sizeof(ipv6mreq))) {
2325                 return -TARGET_EFAULT;
2326             }
2327 
2328             ipv6mreq.ipv6mr_interface = tswap32(ipv6mreq.ipv6mr_interface);
2329 
2330             ret = get_errno(setsockopt(sockfd, level, optname,
2331                                        &ipv6mreq, sizeof(ipv6mreq)));
2332             break;
2333         }
2334         default:
2335             goto unimplemented;
2336         }
2337         break;
2338     case SOL_ICMPV6:
2339         switch (optname) {
2340         case ICMPV6_FILTER:
2341         {
2342             struct icmp6_filter icmp6f;
2343 
2344             if (optlen > sizeof(icmp6f)) {
2345                 optlen = sizeof(icmp6f);
2346             }
2347 
2348             if (copy_from_user(&icmp6f, optval_addr, optlen)) {
2349                 return -TARGET_EFAULT;
2350             }
2351 
2352             for (val = 0; val < 8; val++) {
2353                 icmp6f.data[val] = tswap32(icmp6f.data[val]);
2354             }
2355 
2356             ret = get_errno(setsockopt(sockfd, level, optname,
2357                                        &icmp6f, optlen));
2358             break;
2359         }
2360         default:
2361             goto unimplemented;
2362         }
2363         break;
2364     case SOL_RAW:
2365         switch (optname) {
2366         case ICMP_FILTER:
2367         case IPV6_CHECKSUM:
2368             /* those take an u32 value */
2369             if (optlen < sizeof(uint32_t)) {
2370                 return -TARGET_EINVAL;
2371             }
2372 
2373             if (get_user_u32(val, optval_addr)) {
2374                 return -TARGET_EFAULT;
2375             }
2376             ret = get_errno(setsockopt(sockfd, level, optname,
2377                                        &val, sizeof(val)));
2378             break;
2379 
2380         default:
2381             goto unimplemented;
2382         }
2383         break;
2384 #if defined(SOL_ALG) && defined(ALG_SET_KEY) && defined(ALG_SET_AEAD_AUTHSIZE)
2385     case SOL_ALG:
2386         switch (optname) {
2387         case ALG_SET_KEY:
2388         {
2389             char *alg_key = g_malloc(optlen);
2390 
2391             if (!alg_key) {
2392                 return -TARGET_ENOMEM;
2393             }
2394             if (copy_from_user(alg_key, optval_addr, optlen)) {
2395                 g_free(alg_key);
2396                 return -TARGET_EFAULT;
2397             }
2398             ret = get_errno(setsockopt(sockfd, level, optname,
2399                                        alg_key, optlen));
2400             g_free(alg_key);
2401             break;
2402         }
2403         case ALG_SET_AEAD_AUTHSIZE:
2404         {
2405             ret = get_errno(setsockopt(sockfd, level, optname,
2406                                        NULL, optlen));
2407             break;
2408         }
2409         default:
2410             goto unimplemented;
2411         }
2412         break;
2413 #endif
2414     case TARGET_SOL_SOCKET:
2415         switch (optname) {
2416         case TARGET_SO_RCVTIMEO:
2417         {
2418                 struct timeval tv;
2419 
2420                 optname = SO_RCVTIMEO;
2421 
2422 set_timeout:
2423                 if (optlen != sizeof(struct target_timeval)) {
2424                     return -TARGET_EINVAL;
2425                 }
2426 
2427                 if (copy_from_user_timeval(&tv, optval_addr)) {
2428                     return -TARGET_EFAULT;
2429                 }
2430 
2431                 ret = get_errno(setsockopt(sockfd, SOL_SOCKET, optname,
2432                                 &tv, sizeof(tv)));
2433                 return ret;
2434         }
2435         case TARGET_SO_SNDTIMEO:
2436                 optname = SO_SNDTIMEO;
2437                 goto set_timeout;
2438         case TARGET_SO_ATTACH_FILTER:
2439         {
2440                 struct target_sock_fprog *tfprog;
2441                 struct target_sock_filter *tfilter;
2442                 struct sock_fprog fprog;
2443                 struct sock_filter *filter;
2444                 int i;
2445 
2446                 if (optlen != sizeof(*tfprog)) {
2447                     return -TARGET_EINVAL;
2448                 }
2449                 if (!lock_user_struct(VERIFY_READ, tfprog, optval_addr, 0)) {
2450                     return -TARGET_EFAULT;
2451                 }
2452                 if (!lock_user_struct(VERIFY_READ, tfilter,
2453                                       tswapal(tfprog->filter), 0)) {
2454                     unlock_user_struct(tfprog, optval_addr, 1);
2455                     return -TARGET_EFAULT;
2456                 }
2457 
2458                 fprog.len = tswap16(tfprog->len);
2459                 filter = g_try_new(struct sock_filter, fprog.len);
2460                 if (filter == NULL) {
2461                     unlock_user_struct(tfilter, tfprog->filter, 1);
2462                     unlock_user_struct(tfprog, optval_addr, 1);
2463                     return -TARGET_ENOMEM;
2464                 }
2465                 for (i = 0; i < fprog.len; i++) {
2466                     filter[i].code = tswap16(tfilter[i].code);
2467                     filter[i].jt = tfilter[i].jt;
2468                     filter[i].jf = tfilter[i].jf;
2469                     filter[i].k = tswap32(tfilter[i].k);
2470                 }
2471                 fprog.filter = filter;
2472 
2473                 ret = get_errno(setsockopt(sockfd, SOL_SOCKET,
2474                                 SO_ATTACH_FILTER, &fprog, sizeof(fprog)));
2475                 g_free(filter);
2476 
2477                 unlock_user_struct(tfilter, tfprog->filter, 1);
2478                 unlock_user_struct(tfprog, optval_addr, 1);
2479                 return ret;
2480         }
2481 	case TARGET_SO_BINDTODEVICE:
2482 	{
2483 		char *dev_ifname, *addr_ifname;
2484 
2485 		if (optlen > IFNAMSIZ - 1) {
2486 		    optlen = IFNAMSIZ - 1;
2487 		}
2488 		dev_ifname = lock_user(VERIFY_READ, optval_addr, optlen, 1);
2489 		if (!dev_ifname) {
2490 		    return -TARGET_EFAULT;
2491 		}
2492 		optname = SO_BINDTODEVICE;
2493 		addr_ifname = alloca(IFNAMSIZ);
2494 		memcpy(addr_ifname, dev_ifname, optlen);
2495 		addr_ifname[optlen] = 0;
2496 		ret = get_errno(setsockopt(sockfd, SOL_SOCKET, optname,
2497                                            addr_ifname, optlen));
2498 		unlock_user (dev_ifname, optval_addr, 0);
2499 		return ret;
2500 	}
2501         case TARGET_SO_LINGER:
2502         {
2503                 struct linger lg;
2504                 struct target_linger *tlg;
2505 
2506                 if (optlen != sizeof(struct target_linger)) {
2507                     return -TARGET_EINVAL;
2508                 }
2509                 if (!lock_user_struct(VERIFY_READ, tlg, optval_addr, 1)) {
2510                     return -TARGET_EFAULT;
2511                 }
2512                 __get_user(lg.l_onoff, &tlg->l_onoff);
2513                 __get_user(lg.l_linger, &tlg->l_linger);
2514                 ret = get_errno(setsockopt(sockfd, SOL_SOCKET, SO_LINGER,
2515                                 &lg, sizeof(lg)));
2516                 unlock_user_struct(tlg, optval_addr, 0);
2517                 return ret;
2518         }
2519             /* Options with 'int' argument.  */
2520         case TARGET_SO_DEBUG:
2521 		optname = SO_DEBUG;
2522 		break;
2523         case TARGET_SO_REUSEADDR:
2524 		optname = SO_REUSEADDR;
2525 		break;
2526 #ifdef SO_REUSEPORT
2527         case TARGET_SO_REUSEPORT:
2528                 optname = SO_REUSEPORT;
2529                 break;
2530 #endif
2531         case TARGET_SO_TYPE:
2532 		optname = SO_TYPE;
2533 		break;
2534         case TARGET_SO_ERROR:
2535 		optname = SO_ERROR;
2536 		break;
2537         case TARGET_SO_DONTROUTE:
2538 		optname = SO_DONTROUTE;
2539 		break;
2540         case TARGET_SO_BROADCAST:
2541 		optname = SO_BROADCAST;
2542 		break;
2543         case TARGET_SO_SNDBUF:
2544 		optname = SO_SNDBUF;
2545 		break;
2546         case TARGET_SO_SNDBUFFORCE:
2547                 optname = SO_SNDBUFFORCE;
2548                 break;
2549         case TARGET_SO_RCVBUF:
2550 		optname = SO_RCVBUF;
2551 		break;
2552         case TARGET_SO_RCVBUFFORCE:
2553                 optname = SO_RCVBUFFORCE;
2554                 break;
2555         case TARGET_SO_KEEPALIVE:
2556 		optname = SO_KEEPALIVE;
2557 		break;
2558         case TARGET_SO_OOBINLINE:
2559 		optname = SO_OOBINLINE;
2560 		break;
2561         case TARGET_SO_NO_CHECK:
2562 		optname = SO_NO_CHECK;
2563 		break;
2564         case TARGET_SO_PRIORITY:
2565 		optname = SO_PRIORITY;
2566 		break;
2567 #ifdef SO_BSDCOMPAT
2568         case TARGET_SO_BSDCOMPAT:
2569 		optname = SO_BSDCOMPAT;
2570 		break;
2571 #endif
2572         case TARGET_SO_PASSCRED:
2573 		optname = SO_PASSCRED;
2574 		break;
2575         case TARGET_SO_PASSSEC:
2576                 optname = SO_PASSSEC;
2577                 break;
2578         case TARGET_SO_TIMESTAMP:
2579 		optname = SO_TIMESTAMP;
2580 		break;
2581         case TARGET_SO_RCVLOWAT:
2582 		optname = SO_RCVLOWAT;
2583 		break;
2584         default:
2585             goto unimplemented;
2586         }
2587 	if (optlen < sizeof(uint32_t))
2588             return -TARGET_EINVAL;
2589 
2590 	if (get_user_u32(val, optval_addr))
2591             return -TARGET_EFAULT;
2592 	ret = get_errno(setsockopt(sockfd, SOL_SOCKET, optname, &val, sizeof(val)));
2593         break;
2594 #ifdef SOL_NETLINK
2595     case SOL_NETLINK:
2596         switch (optname) {
2597         case NETLINK_PKTINFO:
2598         case NETLINK_ADD_MEMBERSHIP:
2599         case NETLINK_DROP_MEMBERSHIP:
2600         case NETLINK_BROADCAST_ERROR:
2601         case NETLINK_NO_ENOBUFS:
2602 #if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 2, 0)
2603         case NETLINK_LISTEN_ALL_NSID:
2604         case NETLINK_CAP_ACK:
2605 #endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(4, 2, 0) */
2606 #if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 12, 0)
2607         case NETLINK_EXT_ACK:
2608 #endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(4, 12, 0) */
2609 #if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 20, 0)
2610         case NETLINK_GET_STRICT_CHK:
2611 #endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(4, 12, 0) */
2612             break;
2613         default:
2614             goto unimplemented;
2615         }
2616         val = 0;
2617         if (optlen < sizeof(uint32_t)) {
2618             return -TARGET_EINVAL;
2619         }
2620         if (get_user_u32(val, optval_addr)) {
2621             return -TARGET_EFAULT;
2622         }
2623         ret = get_errno(setsockopt(sockfd, SOL_NETLINK, optname, &val,
2624                                    sizeof(val)));
2625         break;
2626 #endif /* SOL_NETLINK */
2627     default:
2628     unimplemented:
2629         qemu_log_mask(LOG_UNIMP, "Unsupported setsockopt level=%d optname=%d\n",
2630                       level, optname);
2631         ret = -TARGET_ENOPROTOOPT;
2632     }
2633     return ret;
2634 }
2635 
2636 /* do_getsockopt() Must return target values and target errnos. */
2637 static abi_long do_getsockopt(int sockfd, int level, int optname,
2638                               abi_ulong optval_addr, abi_ulong optlen)
2639 {
2640     abi_long ret;
2641     int len, val;
2642     socklen_t lv;
2643 
2644     switch(level) {
2645     case TARGET_SOL_SOCKET:
2646         level = SOL_SOCKET;
2647         switch (optname) {
2648         /* These don't just return a single integer */
2649         case TARGET_SO_PEERNAME:
2650             goto unimplemented;
2651         case TARGET_SO_RCVTIMEO: {
2652             struct timeval tv;
2653             socklen_t tvlen;
2654 
2655             optname = SO_RCVTIMEO;
2656 
2657 get_timeout:
2658             if (get_user_u32(len, optlen)) {
2659                 return -TARGET_EFAULT;
2660             }
2661             if (len < 0) {
2662                 return -TARGET_EINVAL;
2663             }
2664 
2665             tvlen = sizeof(tv);
2666             ret = get_errno(getsockopt(sockfd, level, optname,
2667                                        &tv, &tvlen));
2668             if (ret < 0) {
2669                 return ret;
2670             }
2671             if (len > sizeof(struct target_timeval)) {
2672                 len = sizeof(struct target_timeval);
2673             }
2674             if (copy_to_user_timeval(optval_addr, &tv)) {
2675                 return -TARGET_EFAULT;
2676             }
2677             if (put_user_u32(len, optlen)) {
2678                 return -TARGET_EFAULT;
2679             }
2680             break;
2681         }
2682         case TARGET_SO_SNDTIMEO:
2683             optname = SO_SNDTIMEO;
2684             goto get_timeout;
2685         case TARGET_SO_PEERCRED: {
2686             struct ucred cr;
2687             socklen_t crlen;
2688             struct target_ucred *tcr;
2689 
2690             if (get_user_u32(len, optlen)) {
2691                 return -TARGET_EFAULT;
2692             }
2693             if (len < 0) {
2694                 return -TARGET_EINVAL;
2695             }
2696 
2697             crlen = sizeof(cr);
2698             ret = get_errno(getsockopt(sockfd, level, SO_PEERCRED,
2699                                        &cr, &crlen));
2700             if (ret < 0) {
2701                 return ret;
2702             }
2703             if (len > crlen) {
2704                 len = crlen;
2705             }
2706             if (!lock_user_struct(VERIFY_WRITE, tcr, optval_addr, 0)) {
2707                 return -TARGET_EFAULT;
2708             }
2709             __put_user(cr.pid, &tcr->pid);
2710             __put_user(cr.uid, &tcr->uid);
2711             __put_user(cr.gid, &tcr->gid);
2712             unlock_user_struct(tcr, optval_addr, 1);
2713             if (put_user_u32(len, optlen)) {
2714                 return -TARGET_EFAULT;
2715             }
2716             break;
2717         }
2718         case TARGET_SO_PEERSEC: {
2719             char *name;
2720 
2721             if (get_user_u32(len, optlen)) {
2722                 return -TARGET_EFAULT;
2723             }
2724             if (len < 0) {
2725                 return -TARGET_EINVAL;
2726             }
2727             name = lock_user(VERIFY_WRITE, optval_addr, len, 0);
2728             if (!name) {
2729                 return -TARGET_EFAULT;
2730             }
2731             lv = len;
2732             ret = get_errno(getsockopt(sockfd, level, SO_PEERSEC,
2733                                        name, &lv));
2734             if (put_user_u32(lv, optlen)) {
2735                 ret = -TARGET_EFAULT;
2736             }
2737             unlock_user(name, optval_addr, lv);
2738             break;
2739         }
2740         case TARGET_SO_LINGER:
2741         {
2742             struct linger lg;
2743             socklen_t lglen;
2744             struct target_linger *tlg;
2745 
2746             if (get_user_u32(len, optlen)) {
2747                 return -TARGET_EFAULT;
2748             }
2749             if (len < 0) {
2750                 return -TARGET_EINVAL;
2751             }
2752 
2753             lglen = sizeof(lg);
2754             ret = get_errno(getsockopt(sockfd, level, SO_LINGER,
2755                                        &lg, &lglen));
2756             if (ret < 0) {
2757                 return ret;
2758             }
2759             if (len > lglen) {
2760                 len = lglen;
2761             }
2762             if (!lock_user_struct(VERIFY_WRITE, tlg, optval_addr, 0)) {
2763                 return -TARGET_EFAULT;
2764             }
2765             __put_user(lg.l_onoff, &tlg->l_onoff);
2766             __put_user(lg.l_linger, &tlg->l_linger);
2767             unlock_user_struct(tlg, optval_addr, 1);
2768             if (put_user_u32(len, optlen)) {
2769                 return -TARGET_EFAULT;
2770             }
2771             break;
2772         }
2773         /* Options with 'int' argument.  */
2774         case TARGET_SO_DEBUG:
2775             optname = SO_DEBUG;
2776             goto int_case;
2777         case TARGET_SO_REUSEADDR:
2778             optname = SO_REUSEADDR;
2779             goto int_case;
2780 #ifdef SO_REUSEPORT
2781         case TARGET_SO_REUSEPORT:
2782             optname = SO_REUSEPORT;
2783             goto int_case;
2784 #endif
2785         case TARGET_SO_TYPE:
2786             optname = SO_TYPE;
2787             goto int_case;
2788         case TARGET_SO_ERROR:
2789             optname = SO_ERROR;
2790             goto int_case;
2791         case TARGET_SO_DONTROUTE:
2792             optname = SO_DONTROUTE;
2793             goto int_case;
2794         case TARGET_SO_BROADCAST:
2795             optname = SO_BROADCAST;
2796             goto int_case;
2797         case TARGET_SO_SNDBUF:
2798             optname = SO_SNDBUF;
2799             goto int_case;
2800         case TARGET_SO_RCVBUF:
2801             optname = SO_RCVBUF;
2802             goto int_case;
2803         case TARGET_SO_KEEPALIVE:
2804             optname = SO_KEEPALIVE;
2805             goto int_case;
2806         case TARGET_SO_OOBINLINE:
2807             optname = SO_OOBINLINE;
2808             goto int_case;
2809         case TARGET_SO_NO_CHECK:
2810             optname = SO_NO_CHECK;
2811             goto int_case;
2812         case TARGET_SO_PRIORITY:
2813             optname = SO_PRIORITY;
2814             goto int_case;
2815 #ifdef SO_BSDCOMPAT
2816         case TARGET_SO_BSDCOMPAT:
2817             optname = SO_BSDCOMPAT;
2818             goto int_case;
2819 #endif
2820         case TARGET_SO_PASSCRED:
2821             optname = SO_PASSCRED;
2822             goto int_case;
2823         case TARGET_SO_TIMESTAMP:
2824             optname = SO_TIMESTAMP;
2825             goto int_case;
2826         case TARGET_SO_RCVLOWAT:
2827             optname = SO_RCVLOWAT;
2828             goto int_case;
2829         case TARGET_SO_ACCEPTCONN:
2830             optname = SO_ACCEPTCONN;
2831             goto int_case;
2832         default:
2833             goto int_case;
2834         }
2835         break;
2836     case SOL_TCP:
2837     case SOL_UDP:
2838         /* TCP and UDP options all take an 'int' value.  */
2839     int_case:
2840         if (get_user_u32(len, optlen))
2841             return -TARGET_EFAULT;
2842         if (len < 0)
2843             return -TARGET_EINVAL;
2844         lv = sizeof(lv);
2845         ret = get_errno(getsockopt(sockfd, level, optname, &val, &lv));
2846         if (ret < 0)
2847             return ret;
2848         if (optname == SO_TYPE) {
2849             val = host_to_target_sock_type(val);
2850         }
2851         if (len > lv)
2852             len = lv;
2853         if (len == 4) {
2854             if (put_user_u32(val, optval_addr))
2855                 return -TARGET_EFAULT;
2856         } else {
2857             if (put_user_u8(val, optval_addr))
2858                 return -TARGET_EFAULT;
2859         }
2860         if (put_user_u32(len, optlen))
2861             return -TARGET_EFAULT;
2862         break;
2863     case SOL_IP:
2864         switch(optname) {
2865         case IP_TOS:
2866         case IP_TTL:
2867         case IP_HDRINCL:
2868         case IP_ROUTER_ALERT:
2869         case IP_RECVOPTS:
2870         case IP_RETOPTS:
2871         case IP_PKTINFO:
2872         case IP_MTU_DISCOVER:
2873         case IP_RECVERR:
2874         case IP_RECVTOS:
2875 #ifdef IP_FREEBIND
2876         case IP_FREEBIND:
2877 #endif
2878         case IP_MULTICAST_TTL:
2879         case IP_MULTICAST_LOOP:
2880             if (get_user_u32(len, optlen))
2881                 return -TARGET_EFAULT;
2882             if (len < 0)
2883                 return -TARGET_EINVAL;
2884             lv = sizeof(lv);
2885             ret = get_errno(getsockopt(sockfd, level, optname, &val, &lv));
2886             if (ret < 0)
2887                 return ret;
2888             if (len < sizeof(int) && len > 0 && val >= 0 && val < 255) {
2889                 len = 1;
2890                 if (put_user_u32(len, optlen)
2891                     || put_user_u8(val, optval_addr))
2892                     return -TARGET_EFAULT;
2893             } else {
2894                 if (len > sizeof(int))
2895                     len = sizeof(int);
2896                 if (put_user_u32(len, optlen)
2897                     || put_user_u32(val, optval_addr))
2898                     return -TARGET_EFAULT;
2899             }
2900             break;
2901         default:
2902             ret = -TARGET_ENOPROTOOPT;
2903             break;
2904         }
2905         break;
2906     case SOL_IPV6:
2907         switch (optname) {
2908         case IPV6_MTU_DISCOVER:
2909         case IPV6_MTU:
2910         case IPV6_V6ONLY:
2911         case IPV6_RECVPKTINFO:
2912         case IPV6_UNICAST_HOPS:
2913         case IPV6_MULTICAST_HOPS:
2914         case IPV6_MULTICAST_LOOP:
2915         case IPV6_RECVERR:
2916         case IPV6_RECVHOPLIMIT:
2917         case IPV6_2292HOPLIMIT:
2918         case IPV6_CHECKSUM:
2919         case IPV6_ADDRFORM:
2920         case IPV6_2292PKTINFO:
2921         case IPV6_RECVTCLASS:
2922         case IPV6_RECVRTHDR:
2923         case IPV6_2292RTHDR:
2924         case IPV6_RECVHOPOPTS:
2925         case IPV6_2292HOPOPTS:
2926         case IPV6_RECVDSTOPTS:
2927         case IPV6_2292DSTOPTS:
2928         case IPV6_TCLASS:
2929 #ifdef IPV6_RECVPATHMTU
2930         case IPV6_RECVPATHMTU:
2931 #endif
2932 #ifdef IPV6_TRANSPARENT
2933         case IPV6_TRANSPARENT:
2934 #endif
2935 #ifdef IPV6_FREEBIND
2936         case IPV6_FREEBIND:
2937 #endif
2938 #ifdef IPV6_RECVORIGDSTADDR
2939         case IPV6_RECVORIGDSTADDR:
2940 #endif
2941             if (get_user_u32(len, optlen))
2942                 return -TARGET_EFAULT;
2943             if (len < 0)
2944                 return -TARGET_EINVAL;
2945             lv = sizeof(lv);
2946             ret = get_errno(getsockopt(sockfd, level, optname, &val, &lv));
2947             if (ret < 0)
2948                 return ret;
2949             if (len < sizeof(int) && len > 0 && val >= 0 && val < 255) {
2950                 len = 1;
2951                 if (put_user_u32(len, optlen)
2952                     || put_user_u8(val, optval_addr))
2953                     return -TARGET_EFAULT;
2954             } else {
2955                 if (len > sizeof(int))
2956                     len = sizeof(int);
2957                 if (put_user_u32(len, optlen)
2958                     || put_user_u32(val, optval_addr))
2959                     return -TARGET_EFAULT;
2960             }
2961             break;
2962         default:
2963             ret = -TARGET_ENOPROTOOPT;
2964             break;
2965         }
2966         break;
2967 #ifdef SOL_NETLINK
2968     case SOL_NETLINK:
2969         switch (optname) {
2970         case NETLINK_PKTINFO:
2971         case NETLINK_BROADCAST_ERROR:
2972         case NETLINK_NO_ENOBUFS:
2973 #if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 2, 0)
2974         case NETLINK_LISTEN_ALL_NSID:
2975         case NETLINK_CAP_ACK:
2976 #endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(4, 2, 0) */
2977 #if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 12, 0)
2978         case NETLINK_EXT_ACK:
2979 #endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(4, 12, 0) */
2980 #if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 20, 0)
2981         case NETLINK_GET_STRICT_CHK:
2982 #endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(4, 12, 0) */
2983             if (get_user_u32(len, optlen)) {
2984                 return -TARGET_EFAULT;
2985             }
2986             if (len != sizeof(val)) {
2987                 return -TARGET_EINVAL;
2988             }
2989             lv = len;
2990             ret = get_errno(getsockopt(sockfd, level, optname, &val, &lv));
2991             if (ret < 0) {
2992                 return ret;
2993             }
2994             if (put_user_u32(lv, optlen)
2995                 || put_user_u32(val, optval_addr)) {
2996                 return -TARGET_EFAULT;
2997             }
2998             break;
2999 #if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 2, 0)
3000         case NETLINK_LIST_MEMBERSHIPS:
3001         {
3002             uint32_t *results;
3003             int i;
3004             if (get_user_u32(len, optlen)) {
3005                 return -TARGET_EFAULT;
3006             }
3007             if (len < 0) {
3008                 return -TARGET_EINVAL;
3009             }
3010             results = lock_user(VERIFY_WRITE, optval_addr, len, 1);
3011             if (!results) {
3012                 return -TARGET_EFAULT;
3013             }
3014             lv = len;
3015             ret = get_errno(getsockopt(sockfd, level, optname, results, &lv));
3016             if (ret < 0) {
3017                 unlock_user(results, optval_addr, 0);
3018                 return ret;
3019             }
3020             /* swap host endianess to target endianess. */
3021             for (i = 0; i < (len / sizeof(uint32_t)); i++) {
3022                 results[i] = tswap32(results[i]);
3023             }
3024             if (put_user_u32(lv, optlen)) {
3025                 return -TARGET_EFAULT;
3026             }
3027             unlock_user(results, optval_addr, 0);
3028             break;
3029         }
3030 #endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(4, 2, 0) */
3031         default:
3032             goto unimplemented;
3033         }
3034         break;
3035 #endif /* SOL_NETLINK */
3036     default:
3037     unimplemented:
3038         qemu_log_mask(LOG_UNIMP,
3039                       "getsockopt level=%d optname=%d not yet supported\n",
3040                       level, optname);
3041         ret = -TARGET_EOPNOTSUPP;
3042         break;
3043     }
3044     return ret;
3045 }
3046 
3047 /* Convert target low/high pair representing file offset into the host
3048  * low/high pair. This function doesn't handle offsets bigger than 64 bits
3049  * as the kernel doesn't handle them either.
3050  */
3051 static void target_to_host_low_high(abi_ulong tlow,
3052                                     abi_ulong thigh,
3053                                     unsigned long *hlow,
3054                                     unsigned long *hhigh)
3055 {
3056     uint64_t off = tlow |
3057         ((unsigned long long)thigh << TARGET_LONG_BITS / 2) <<
3058         TARGET_LONG_BITS / 2;
3059 
3060     *hlow = off;
3061     *hhigh = (off >> HOST_LONG_BITS / 2) >> HOST_LONG_BITS / 2;
3062 }
3063 
3064 static struct iovec *lock_iovec(int type, abi_ulong target_addr,
3065                                 abi_ulong count, int copy)
3066 {
3067     struct target_iovec *target_vec;
3068     struct iovec *vec;
3069     abi_ulong total_len, max_len;
3070     int i;
3071     int err = 0;
3072     bool bad_address = false;
3073 
3074     if (count == 0) {
3075         errno = 0;
3076         return NULL;
3077     }
3078     if (count > IOV_MAX) {
3079         errno = EINVAL;
3080         return NULL;
3081     }
3082 
3083     vec = g_try_new0(struct iovec, count);
3084     if (vec == NULL) {
3085         errno = ENOMEM;
3086         return NULL;
3087     }
3088 
3089     target_vec = lock_user(VERIFY_READ, target_addr,
3090                            count * sizeof(struct target_iovec), 1);
3091     if (target_vec == NULL) {
3092         err = EFAULT;
3093         goto fail2;
3094     }
3095 
3096     /* ??? If host page size > target page size, this will result in a
3097        value larger than what we can actually support.  */
3098     max_len = 0x7fffffff & TARGET_PAGE_MASK;
3099     total_len = 0;
3100 
3101     for (i = 0; i < count; i++) {
3102         abi_ulong base = tswapal(target_vec[i].iov_base);
3103         abi_long len = tswapal(target_vec[i].iov_len);
3104 
3105         if (len < 0) {
3106             err = EINVAL;
3107             goto fail;
3108         } else if (len == 0) {
3109             /* Zero length pointer is ignored.  */
3110             vec[i].iov_base = 0;
3111         } else {
3112             vec[i].iov_base = lock_user(type, base, len, copy);
3113             /* If the first buffer pointer is bad, this is a fault.  But
3114              * subsequent bad buffers will result in a partial write; this
3115              * is realized by filling the vector with null pointers and
3116              * zero lengths. */
3117             if (!vec[i].iov_base) {
3118                 if (i == 0) {
3119                     err = EFAULT;
3120                     goto fail;
3121                 } else {
3122                     bad_address = true;
3123                 }
3124             }
3125             if (bad_address) {
3126                 len = 0;
3127             }
3128             if (len > max_len - total_len) {
3129                 len = max_len - total_len;
3130             }
3131         }
3132         vec[i].iov_len = len;
3133         total_len += len;
3134     }
3135 
3136     unlock_user(target_vec, target_addr, 0);
3137     return vec;
3138 
3139  fail:
3140     while (--i >= 0) {
3141         if (tswapal(target_vec[i].iov_len) > 0) {
3142             unlock_user(vec[i].iov_base, tswapal(target_vec[i].iov_base), 0);
3143         }
3144     }
3145     unlock_user(target_vec, target_addr, 0);
3146  fail2:
3147     g_free(vec);
3148     errno = err;
3149     return NULL;
3150 }
3151 
3152 static void unlock_iovec(struct iovec *vec, abi_ulong target_addr,
3153                          abi_ulong count, int copy)
3154 {
3155     struct target_iovec *target_vec;
3156     int i;
3157 
3158     target_vec = lock_user(VERIFY_READ, target_addr,
3159                            count * sizeof(struct target_iovec), 1);
3160     if (target_vec) {
3161         for (i = 0; i < count; i++) {
3162             abi_ulong base = tswapal(target_vec[i].iov_base);
3163             abi_long len = tswapal(target_vec[i].iov_len);
3164             if (len < 0) {
3165                 break;
3166             }
3167             unlock_user(vec[i].iov_base, base, copy ? vec[i].iov_len : 0);
3168         }
3169         unlock_user(target_vec, target_addr, 0);
3170     }
3171 
3172     g_free(vec);
3173 }
3174 
3175 static inline int target_to_host_sock_type(int *type)
3176 {
3177     int host_type = 0;
3178     int target_type = *type;
3179 
3180     switch (target_type & TARGET_SOCK_TYPE_MASK) {
3181     case TARGET_SOCK_DGRAM:
3182         host_type = SOCK_DGRAM;
3183         break;
3184     case TARGET_SOCK_STREAM:
3185         host_type = SOCK_STREAM;
3186         break;
3187     default:
3188         host_type = target_type & TARGET_SOCK_TYPE_MASK;
3189         break;
3190     }
3191     if (target_type & TARGET_SOCK_CLOEXEC) {
3192 #if defined(SOCK_CLOEXEC)
3193         host_type |= SOCK_CLOEXEC;
3194 #else
3195         return -TARGET_EINVAL;
3196 #endif
3197     }
3198     if (target_type & TARGET_SOCK_NONBLOCK) {
3199 #if defined(SOCK_NONBLOCK)
3200         host_type |= SOCK_NONBLOCK;
3201 #elif !defined(O_NONBLOCK)
3202         return -TARGET_EINVAL;
3203 #endif
3204     }
3205     *type = host_type;
3206     return 0;
3207 }
3208 
3209 /* Try to emulate socket type flags after socket creation.  */
3210 static int sock_flags_fixup(int fd, int target_type)
3211 {
3212 #if !defined(SOCK_NONBLOCK) && defined(O_NONBLOCK)
3213     if (target_type & TARGET_SOCK_NONBLOCK) {
3214         int flags = fcntl(fd, F_GETFL);
3215         if (fcntl(fd, F_SETFL, O_NONBLOCK | flags) == -1) {
3216             close(fd);
3217             return -TARGET_EINVAL;
3218         }
3219     }
3220 #endif
3221     return fd;
3222 }
3223 
3224 /* do_socket() Must return target values and target errnos. */
3225 static abi_long do_socket(int domain, int type, int protocol)
3226 {
3227     int target_type = type;
3228     int ret;
3229 
3230     ret = target_to_host_sock_type(&type);
3231     if (ret) {
3232         return ret;
3233     }
3234 
3235     if (domain == PF_NETLINK && !(
3236 #ifdef CONFIG_RTNETLINK
3237          protocol == NETLINK_ROUTE ||
3238 #endif
3239          protocol == NETLINK_KOBJECT_UEVENT ||
3240          protocol == NETLINK_AUDIT)) {
3241         return -TARGET_EPROTONOSUPPORT;
3242     }
3243 
3244     if (domain == AF_PACKET ||
3245         (domain == AF_INET && type == SOCK_PACKET)) {
3246         protocol = tswap16(protocol);
3247     }
3248 
3249     ret = get_errno(socket(domain, type, protocol));
3250     if (ret >= 0) {
3251         ret = sock_flags_fixup(ret, target_type);
3252         if (type == SOCK_PACKET) {
3253             /* Manage an obsolete case :
3254              * if socket type is SOCK_PACKET, bind by name
3255              */
3256             fd_trans_register(ret, &target_packet_trans);
3257         } else if (domain == PF_NETLINK) {
3258             switch (protocol) {
3259 #ifdef CONFIG_RTNETLINK
3260             case NETLINK_ROUTE:
3261                 fd_trans_register(ret, &target_netlink_route_trans);
3262                 break;
3263 #endif
3264             case NETLINK_KOBJECT_UEVENT:
3265                 /* nothing to do: messages are strings */
3266                 break;
3267             case NETLINK_AUDIT:
3268                 fd_trans_register(ret, &target_netlink_audit_trans);
3269                 break;
3270             default:
3271                 g_assert_not_reached();
3272             }
3273         }
3274     }
3275     return ret;
3276 }
3277 
3278 /* do_bind() Must return target values and target errnos. */
3279 static abi_long do_bind(int sockfd, abi_ulong target_addr,
3280                         socklen_t addrlen)
3281 {
3282     void *addr;
3283     abi_long ret;
3284 
3285     if ((int)addrlen < 0) {
3286         return -TARGET_EINVAL;
3287     }
3288 
3289     addr = alloca(addrlen+1);
3290 
3291     ret = target_to_host_sockaddr(sockfd, addr, target_addr, addrlen);
3292     if (ret)
3293         return ret;
3294 
3295     return get_errno(bind(sockfd, addr, addrlen));
3296 }
3297 
3298 /* do_connect() Must return target values and target errnos. */
3299 static abi_long do_connect(int sockfd, abi_ulong target_addr,
3300                            socklen_t addrlen)
3301 {
3302     void *addr;
3303     abi_long ret;
3304 
3305     if ((int)addrlen < 0) {
3306         return -TARGET_EINVAL;
3307     }
3308 
3309     addr = alloca(addrlen+1);
3310 
3311     ret = target_to_host_sockaddr(sockfd, addr, target_addr, addrlen);
3312     if (ret)
3313         return ret;
3314 
3315     return get_errno(safe_connect(sockfd, addr, addrlen));
3316 }
3317 
3318 /* do_sendrecvmsg_locked() Must return target values and target errnos. */
3319 static abi_long do_sendrecvmsg_locked(int fd, struct target_msghdr *msgp,
3320                                       int flags, int send)
3321 {
3322     abi_long ret, len;
3323     struct msghdr msg;
3324     abi_ulong count;
3325     struct iovec *vec;
3326     abi_ulong target_vec;
3327 
3328     if (msgp->msg_name) {
3329         msg.msg_namelen = tswap32(msgp->msg_namelen);
3330         msg.msg_name = alloca(msg.msg_namelen+1);
3331         ret = target_to_host_sockaddr(fd, msg.msg_name,
3332                                       tswapal(msgp->msg_name),
3333                                       msg.msg_namelen);
3334         if (ret == -TARGET_EFAULT) {
3335             /* For connected sockets msg_name and msg_namelen must
3336              * be ignored, so returning EFAULT immediately is wrong.
3337              * Instead, pass a bad msg_name to the host kernel, and
3338              * let it decide whether to return EFAULT or not.
3339              */
3340             msg.msg_name = (void *)-1;
3341         } else if (ret) {
3342             goto out2;
3343         }
3344     } else {
3345         msg.msg_name = NULL;
3346         msg.msg_namelen = 0;
3347     }
3348     msg.msg_controllen = 2 * tswapal(msgp->msg_controllen);
3349     msg.msg_control = alloca(msg.msg_controllen);
3350     memset(msg.msg_control, 0, msg.msg_controllen);
3351 
3352     msg.msg_flags = tswap32(msgp->msg_flags);
3353 
3354     count = tswapal(msgp->msg_iovlen);
3355     target_vec = tswapal(msgp->msg_iov);
3356 
3357     if (count > IOV_MAX) {
3358         /* sendrcvmsg returns a different errno for this condition than
3359          * readv/writev, so we must catch it here before lock_iovec() does.
3360          */
3361         ret = -TARGET_EMSGSIZE;
3362         goto out2;
3363     }
3364 
3365     vec = lock_iovec(send ? VERIFY_READ : VERIFY_WRITE,
3366                      target_vec, count, send);
3367     if (vec == NULL) {
3368         ret = -host_to_target_errno(errno);
3369         goto out2;
3370     }
3371     msg.msg_iovlen = count;
3372     msg.msg_iov = vec;
3373 
3374     if (send) {
3375         if (fd_trans_target_to_host_data(fd)) {
3376             void *host_msg;
3377 
3378             host_msg = g_malloc(msg.msg_iov->iov_len);
3379             memcpy(host_msg, msg.msg_iov->iov_base, msg.msg_iov->iov_len);
3380             ret = fd_trans_target_to_host_data(fd)(host_msg,
3381                                                    msg.msg_iov->iov_len);
3382             if (ret >= 0) {
3383                 msg.msg_iov->iov_base = host_msg;
3384                 ret = get_errno(safe_sendmsg(fd, &msg, flags));
3385             }
3386             g_free(host_msg);
3387         } else {
3388             ret = target_to_host_cmsg(&msg, msgp);
3389             if (ret == 0) {
3390                 ret = get_errno(safe_sendmsg(fd, &msg, flags));
3391             }
3392         }
3393     } else {
3394         ret = get_errno(safe_recvmsg(fd, &msg, flags));
3395         if (!is_error(ret)) {
3396             len = ret;
3397             if (fd_trans_host_to_target_data(fd)) {
3398                 ret = fd_trans_host_to_target_data(fd)(msg.msg_iov->iov_base,
3399                                                MIN(msg.msg_iov->iov_len, len));
3400             } else {
3401                 ret = host_to_target_cmsg(msgp, &msg);
3402             }
3403             if (!is_error(ret)) {
3404                 msgp->msg_namelen = tswap32(msg.msg_namelen);
3405                 msgp->msg_flags = tswap32(msg.msg_flags);
3406                 if (msg.msg_name != NULL && msg.msg_name != (void *)-1) {
3407                     ret = host_to_target_sockaddr(tswapal(msgp->msg_name),
3408                                     msg.msg_name, msg.msg_namelen);
3409                     if (ret) {
3410                         goto out;
3411                     }
3412                 }
3413 
3414                 ret = len;
3415             }
3416         }
3417     }
3418 
3419 out:
3420     unlock_iovec(vec, target_vec, count, !send);
3421 out2:
3422     return ret;
3423 }
3424 
3425 static abi_long do_sendrecvmsg(int fd, abi_ulong target_msg,
3426                                int flags, int send)
3427 {
3428     abi_long ret;
3429     struct target_msghdr *msgp;
3430 
3431     if (!lock_user_struct(send ? VERIFY_READ : VERIFY_WRITE,
3432                           msgp,
3433                           target_msg,
3434                           send ? 1 : 0)) {
3435         return -TARGET_EFAULT;
3436     }
3437     ret = do_sendrecvmsg_locked(fd, msgp, flags, send);
3438     unlock_user_struct(msgp, target_msg, send ? 0 : 1);
3439     return ret;
3440 }
3441 
3442 /* We don't rely on the C library to have sendmmsg/recvmmsg support,
3443  * so it might not have this *mmsg-specific flag either.
3444  */
3445 #ifndef MSG_WAITFORONE
3446 #define MSG_WAITFORONE 0x10000
3447 #endif
3448 
3449 static abi_long do_sendrecvmmsg(int fd, abi_ulong target_msgvec,
3450                                 unsigned int vlen, unsigned int flags,
3451                                 int send)
3452 {
3453     struct target_mmsghdr *mmsgp;
3454     abi_long ret = 0;
3455     int i;
3456 
3457     if (vlen > UIO_MAXIOV) {
3458         vlen = UIO_MAXIOV;
3459     }
3460 
3461     mmsgp = lock_user(VERIFY_WRITE, target_msgvec, sizeof(*mmsgp) * vlen, 1);
3462     if (!mmsgp) {
3463         return -TARGET_EFAULT;
3464     }
3465 
3466     for (i = 0; i < vlen; i++) {
3467         ret = do_sendrecvmsg_locked(fd, &mmsgp[i].msg_hdr, flags, send);
3468         if (is_error(ret)) {
3469             break;
3470         }
3471         mmsgp[i].msg_len = tswap32(ret);
3472         /* MSG_WAITFORONE turns on MSG_DONTWAIT after one packet */
3473         if (flags & MSG_WAITFORONE) {
3474             flags |= MSG_DONTWAIT;
3475         }
3476     }
3477 
3478     unlock_user(mmsgp, target_msgvec, sizeof(*mmsgp) * i);
3479 
3480     /* Return number of datagrams sent if we sent any at all;
3481      * otherwise return the error.
3482      */
3483     if (i) {
3484         return i;
3485     }
3486     return ret;
3487 }
3488 
3489 /* do_accept4() Must return target values and target errnos. */
3490 static abi_long do_accept4(int fd, abi_ulong target_addr,
3491                            abi_ulong target_addrlen_addr, int flags)
3492 {
3493     socklen_t addrlen, ret_addrlen;
3494     void *addr;
3495     abi_long ret;
3496     int host_flags;
3497 
3498     host_flags = target_to_host_bitmask(flags, fcntl_flags_tbl);
3499 
3500     if (target_addr == 0) {
3501         return get_errno(safe_accept4(fd, NULL, NULL, host_flags));
3502     }
3503 
3504     /* linux returns EFAULT if addrlen pointer is invalid */
3505     if (get_user_u32(addrlen, target_addrlen_addr))
3506         return -TARGET_EFAULT;
3507 
3508     if ((int)addrlen < 0) {
3509         return -TARGET_EINVAL;
3510     }
3511 
3512     if (!access_ok(VERIFY_WRITE, target_addr, addrlen))
3513         return -TARGET_EFAULT;
3514 
3515     addr = alloca(addrlen);
3516 
3517     ret_addrlen = addrlen;
3518     ret = get_errno(safe_accept4(fd, addr, &ret_addrlen, host_flags));
3519     if (!is_error(ret)) {
3520         host_to_target_sockaddr(target_addr, addr, MIN(addrlen, ret_addrlen));
3521         if (put_user_u32(ret_addrlen, target_addrlen_addr)) {
3522             ret = -TARGET_EFAULT;
3523         }
3524     }
3525     return ret;
3526 }
3527 
3528 /* do_getpeername() Must return target values and target errnos. */
3529 static abi_long do_getpeername(int fd, abi_ulong target_addr,
3530                                abi_ulong target_addrlen_addr)
3531 {
3532     socklen_t addrlen, ret_addrlen;
3533     void *addr;
3534     abi_long ret;
3535 
3536     if (get_user_u32(addrlen, target_addrlen_addr))
3537         return -TARGET_EFAULT;
3538 
3539     if ((int)addrlen < 0) {
3540         return -TARGET_EINVAL;
3541     }
3542 
3543     if (!access_ok(VERIFY_WRITE, target_addr, addrlen))
3544         return -TARGET_EFAULT;
3545 
3546     addr = alloca(addrlen);
3547 
3548     ret_addrlen = addrlen;
3549     ret = get_errno(getpeername(fd, addr, &ret_addrlen));
3550     if (!is_error(ret)) {
3551         host_to_target_sockaddr(target_addr, addr, MIN(addrlen, ret_addrlen));
3552         if (put_user_u32(ret_addrlen, target_addrlen_addr)) {
3553             ret = -TARGET_EFAULT;
3554         }
3555     }
3556     return ret;
3557 }
3558 
3559 /* do_getsockname() Must return target values and target errnos. */
3560 static abi_long do_getsockname(int fd, abi_ulong target_addr,
3561                                abi_ulong target_addrlen_addr)
3562 {
3563     socklen_t addrlen, ret_addrlen;
3564     void *addr;
3565     abi_long ret;
3566 
3567     if (get_user_u32(addrlen, target_addrlen_addr))
3568         return -TARGET_EFAULT;
3569 
3570     if ((int)addrlen < 0) {
3571         return -TARGET_EINVAL;
3572     }
3573 
3574     if (!access_ok(VERIFY_WRITE, target_addr, addrlen))
3575         return -TARGET_EFAULT;
3576 
3577     addr = alloca(addrlen);
3578 
3579     ret_addrlen = addrlen;
3580     ret = get_errno(getsockname(fd, addr, &ret_addrlen));
3581     if (!is_error(ret)) {
3582         host_to_target_sockaddr(target_addr, addr, MIN(addrlen, ret_addrlen));
3583         if (put_user_u32(ret_addrlen, target_addrlen_addr)) {
3584             ret = -TARGET_EFAULT;
3585         }
3586     }
3587     return ret;
3588 }
3589 
3590 /* do_socketpair() Must return target values and target errnos. */
3591 static abi_long do_socketpair(int domain, int type, int protocol,
3592                               abi_ulong target_tab_addr)
3593 {
3594     int tab[2];
3595     abi_long ret;
3596 
3597     target_to_host_sock_type(&type);
3598 
3599     ret = get_errno(socketpair(domain, type, protocol, tab));
3600     if (!is_error(ret)) {
3601         if (put_user_s32(tab[0], target_tab_addr)
3602             || put_user_s32(tab[1], target_tab_addr + sizeof(tab[0])))
3603             ret = -TARGET_EFAULT;
3604     }
3605     return ret;
3606 }
3607 
3608 /* do_sendto() Must return target values and target errnos. */
3609 static abi_long do_sendto(int fd, abi_ulong msg, size_t len, int flags,
3610                           abi_ulong target_addr, socklen_t addrlen)
3611 {
3612     void *addr;
3613     void *host_msg;
3614     void *copy_msg = NULL;
3615     abi_long ret;
3616 
3617     if ((int)addrlen < 0) {
3618         return -TARGET_EINVAL;
3619     }
3620 
3621     host_msg = lock_user(VERIFY_READ, msg, len, 1);
3622     if (!host_msg)
3623         return -TARGET_EFAULT;
3624     if (fd_trans_target_to_host_data(fd)) {
3625         copy_msg = host_msg;
3626         host_msg = g_malloc(len);
3627         memcpy(host_msg, copy_msg, len);
3628         ret = fd_trans_target_to_host_data(fd)(host_msg, len);
3629         if (ret < 0) {
3630             goto fail;
3631         }
3632     }
3633     if (target_addr) {
3634         addr = alloca(addrlen+1);
3635         ret = target_to_host_sockaddr(fd, addr, target_addr, addrlen);
3636         if (ret) {
3637             goto fail;
3638         }
3639         ret = get_errno(safe_sendto(fd, host_msg, len, flags, addr, addrlen));
3640     } else {
3641         ret = get_errno(safe_sendto(fd, host_msg, len, flags, NULL, 0));
3642     }
3643 fail:
3644     if (copy_msg) {
3645         g_free(host_msg);
3646         host_msg = copy_msg;
3647     }
3648     unlock_user(host_msg, msg, 0);
3649     return ret;
3650 }
3651 
3652 /* do_recvfrom() Must return target values and target errnos. */
3653 static abi_long do_recvfrom(int fd, abi_ulong msg, size_t len, int flags,
3654                             abi_ulong target_addr,
3655                             abi_ulong target_addrlen)
3656 {
3657     socklen_t addrlen, ret_addrlen;
3658     void *addr;
3659     void *host_msg;
3660     abi_long ret;
3661 
3662     host_msg = lock_user(VERIFY_WRITE, msg, len, 0);
3663     if (!host_msg)
3664         return -TARGET_EFAULT;
3665     if (target_addr) {
3666         if (get_user_u32(addrlen, target_addrlen)) {
3667             ret = -TARGET_EFAULT;
3668             goto fail;
3669         }
3670         if ((int)addrlen < 0) {
3671             ret = -TARGET_EINVAL;
3672             goto fail;
3673         }
3674         addr = alloca(addrlen);
3675         ret_addrlen = addrlen;
3676         ret = get_errno(safe_recvfrom(fd, host_msg, len, flags,
3677                                       addr, &ret_addrlen));
3678     } else {
3679         addr = NULL; /* To keep compiler quiet.  */
3680         addrlen = 0; /* To keep compiler quiet.  */
3681         ret = get_errno(safe_recvfrom(fd, host_msg, len, flags, NULL, 0));
3682     }
3683     if (!is_error(ret)) {
3684         if (fd_trans_host_to_target_data(fd)) {
3685             abi_long trans;
3686             trans = fd_trans_host_to_target_data(fd)(host_msg, MIN(ret, len));
3687             if (is_error(trans)) {
3688                 ret = trans;
3689                 goto fail;
3690             }
3691         }
3692         if (target_addr) {
3693             host_to_target_sockaddr(target_addr, addr,
3694                                     MIN(addrlen, ret_addrlen));
3695             if (put_user_u32(ret_addrlen, target_addrlen)) {
3696                 ret = -TARGET_EFAULT;
3697                 goto fail;
3698             }
3699         }
3700         unlock_user(host_msg, msg, len);
3701     } else {
3702 fail:
3703         unlock_user(host_msg, msg, 0);
3704     }
3705     return ret;
3706 }
3707 
3708 #ifdef TARGET_NR_socketcall
3709 /* do_socketcall() must return target values and target errnos. */
3710 static abi_long do_socketcall(int num, abi_ulong vptr)
3711 {
3712     static const unsigned nargs[] = { /* number of arguments per operation */
3713         [TARGET_SYS_SOCKET] = 3,      /* domain, type, protocol */
3714         [TARGET_SYS_BIND] = 3,        /* fd, addr, addrlen */
3715         [TARGET_SYS_CONNECT] = 3,     /* fd, addr, addrlen */
3716         [TARGET_SYS_LISTEN] = 2,      /* fd, backlog */
3717         [TARGET_SYS_ACCEPT] = 3,      /* fd, addr, addrlen */
3718         [TARGET_SYS_GETSOCKNAME] = 3, /* fd, addr, addrlen */
3719         [TARGET_SYS_GETPEERNAME] = 3, /* fd, addr, addrlen */
3720         [TARGET_SYS_SOCKETPAIR] = 4,  /* domain, type, protocol, tab */
3721         [TARGET_SYS_SEND] = 4,        /* fd, msg, len, flags */
3722         [TARGET_SYS_RECV] = 4,        /* fd, msg, len, flags */
3723         [TARGET_SYS_SENDTO] = 6,      /* fd, msg, len, flags, addr, addrlen */
3724         [TARGET_SYS_RECVFROM] = 6,    /* fd, msg, len, flags, addr, addrlen */
3725         [TARGET_SYS_SHUTDOWN] = 2,    /* fd, how */
3726         [TARGET_SYS_SETSOCKOPT] = 5,  /* fd, level, optname, optval, optlen */
3727         [TARGET_SYS_GETSOCKOPT] = 5,  /* fd, level, optname, optval, optlen */
3728         [TARGET_SYS_SENDMSG] = 3,     /* fd, msg, flags */
3729         [TARGET_SYS_RECVMSG] = 3,     /* fd, msg, flags */
3730         [TARGET_SYS_ACCEPT4] = 4,     /* fd, addr, addrlen, flags */
3731         [TARGET_SYS_RECVMMSG] = 4,    /* fd, msgvec, vlen, flags */
3732         [TARGET_SYS_SENDMMSG] = 4,    /* fd, msgvec, vlen, flags */
3733     };
3734     abi_long a[6]; /* max 6 args */
3735     unsigned i;
3736 
3737     /* check the range of the first argument num */
3738     /* (TARGET_SYS_SENDMMSG is the highest among TARGET_SYS_xxx) */
3739     if (num < 1 || num > TARGET_SYS_SENDMMSG) {
3740         return -TARGET_EINVAL;
3741     }
3742     /* ensure we have space for args */
3743     if (nargs[num] > ARRAY_SIZE(a)) {
3744         return -TARGET_EINVAL;
3745     }
3746     /* collect the arguments in a[] according to nargs[] */
3747     for (i = 0; i < nargs[num]; ++i) {
3748         if (get_user_ual(a[i], vptr + i * sizeof(abi_long)) != 0) {
3749             return -TARGET_EFAULT;
3750         }
3751     }
3752     /* now when we have the args, invoke the appropriate underlying function */
3753     switch (num) {
3754     case TARGET_SYS_SOCKET: /* domain, type, protocol */
3755         return do_socket(a[0], a[1], a[2]);
3756     case TARGET_SYS_BIND: /* sockfd, addr, addrlen */
3757         return do_bind(a[0], a[1], a[2]);
3758     case TARGET_SYS_CONNECT: /* sockfd, addr, addrlen */
3759         return do_connect(a[0], a[1], a[2]);
3760     case TARGET_SYS_LISTEN: /* sockfd, backlog */
3761         return get_errno(listen(a[0], a[1]));
3762     case TARGET_SYS_ACCEPT: /* sockfd, addr, addrlen */
3763         return do_accept4(a[0], a[1], a[2], 0);
3764     case TARGET_SYS_GETSOCKNAME: /* sockfd, addr, addrlen */
3765         return do_getsockname(a[0], a[1], a[2]);
3766     case TARGET_SYS_GETPEERNAME: /* sockfd, addr, addrlen */
3767         return do_getpeername(a[0], a[1], a[2]);
3768     case TARGET_SYS_SOCKETPAIR: /* domain, type, protocol, tab */
3769         return do_socketpair(a[0], a[1], a[2], a[3]);
3770     case TARGET_SYS_SEND: /* sockfd, msg, len, flags */
3771         return do_sendto(a[0], a[1], a[2], a[3], 0, 0);
3772     case TARGET_SYS_RECV: /* sockfd, msg, len, flags */
3773         return do_recvfrom(a[0], a[1], a[2], a[3], 0, 0);
3774     case TARGET_SYS_SENDTO: /* sockfd, msg, len, flags, addr, addrlen */
3775         return do_sendto(a[0], a[1], a[2], a[3], a[4], a[5]);
3776     case TARGET_SYS_RECVFROM: /* sockfd, msg, len, flags, addr, addrlen */
3777         return do_recvfrom(a[0], a[1], a[2], a[3], a[4], a[5]);
3778     case TARGET_SYS_SHUTDOWN: /* sockfd, how */
3779         return get_errno(shutdown(a[0], a[1]));
3780     case TARGET_SYS_SETSOCKOPT: /* sockfd, level, optname, optval, optlen */
3781         return do_setsockopt(a[0], a[1], a[2], a[3], a[4]);
3782     case TARGET_SYS_GETSOCKOPT: /* sockfd, level, optname, optval, optlen */
3783         return do_getsockopt(a[0], a[1], a[2], a[3], a[4]);
3784     case TARGET_SYS_SENDMSG: /* sockfd, msg, flags */
3785         return do_sendrecvmsg(a[0], a[1], a[2], 1);
3786     case TARGET_SYS_RECVMSG: /* sockfd, msg, flags */
3787         return do_sendrecvmsg(a[0], a[1], a[2], 0);
3788     case TARGET_SYS_ACCEPT4: /* sockfd, addr, addrlen, flags */
3789         return do_accept4(a[0], a[1], a[2], a[3]);
3790     case TARGET_SYS_RECVMMSG: /* sockfd, msgvec, vlen, flags */
3791         return do_sendrecvmmsg(a[0], a[1], a[2], a[3], 0);
3792     case TARGET_SYS_SENDMMSG: /* sockfd, msgvec, vlen, flags */
3793         return do_sendrecvmmsg(a[0], a[1], a[2], a[3], 1);
3794     default:
3795         qemu_log_mask(LOG_UNIMP, "Unsupported socketcall: %d\n", num);
3796         return -TARGET_EINVAL;
3797     }
3798 }
3799 #endif
3800 
3801 #define N_SHM_REGIONS	32
3802 
3803 static struct shm_region {
3804     abi_ulong start;
3805     abi_ulong size;
3806     bool in_use;
3807 } shm_regions[N_SHM_REGIONS];
3808 
3809 #ifndef TARGET_SEMID64_DS
3810 /* asm-generic version of this struct */
3811 struct target_semid64_ds
3812 {
3813   struct target_ipc_perm sem_perm;
3814   abi_ulong sem_otime;
3815 #if TARGET_ABI_BITS == 32
3816   abi_ulong __unused1;
3817 #endif
3818   abi_ulong sem_ctime;
3819 #if TARGET_ABI_BITS == 32
3820   abi_ulong __unused2;
3821 #endif
3822   abi_ulong sem_nsems;
3823   abi_ulong __unused3;
3824   abi_ulong __unused4;
3825 };
3826 #endif
3827 
3828 static inline abi_long target_to_host_ipc_perm(struct ipc_perm *host_ip,
3829                                                abi_ulong target_addr)
3830 {
3831     struct target_ipc_perm *target_ip;
3832     struct target_semid64_ds *target_sd;
3833 
3834     if (!lock_user_struct(VERIFY_READ, target_sd, target_addr, 1))
3835         return -TARGET_EFAULT;
3836     target_ip = &(target_sd->sem_perm);
3837     host_ip->__key = tswap32(target_ip->__key);
3838     host_ip->uid = tswap32(target_ip->uid);
3839     host_ip->gid = tswap32(target_ip->gid);
3840     host_ip->cuid = tswap32(target_ip->cuid);
3841     host_ip->cgid = tswap32(target_ip->cgid);
3842 #if defined(TARGET_ALPHA) || defined(TARGET_MIPS) || defined(TARGET_PPC)
3843     host_ip->mode = tswap32(target_ip->mode);
3844 #else
3845     host_ip->mode = tswap16(target_ip->mode);
3846 #endif
3847 #if defined(TARGET_PPC)
3848     host_ip->__seq = tswap32(target_ip->__seq);
3849 #else
3850     host_ip->__seq = tswap16(target_ip->__seq);
3851 #endif
3852     unlock_user_struct(target_sd, target_addr, 0);
3853     return 0;
3854 }
3855 
3856 static inline abi_long host_to_target_ipc_perm(abi_ulong target_addr,
3857                                                struct ipc_perm *host_ip)
3858 {
3859     struct target_ipc_perm *target_ip;
3860     struct target_semid64_ds *target_sd;
3861 
3862     if (!lock_user_struct(VERIFY_WRITE, target_sd, target_addr, 0))
3863         return -TARGET_EFAULT;
3864     target_ip = &(target_sd->sem_perm);
3865     target_ip->__key = tswap32(host_ip->__key);
3866     target_ip->uid = tswap32(host_ip->uid);
3867     target_ip->gid = tswap32(host_ip->gid);
3868     target_ip->cuid = tswap32(host_ip->cuid);
3869     target_ip->cgid = tswap32(host_ip->cgid);
3870 #if defined(TARGET_ALPHA) || defined(TARGET_MIPS) || defined(TARGET_PPC)
3871     target_ip->mode = tswap32(host_ip->mode);
3872 #else
3873     target_ip->mode = tswap16(host_ip->mode);
3874 #endif
3875 #if defined(TARGET_PPC)
3876     target_ip->__seq = tswap32(host_ip->__seq);
3877 #else
3878     target_ip->__seq = tswap16(host_ip->__seq);
3879 #endif
3880     unlock_user_struct(target_sd, target_addr, 1);
3881     return 0;
3882 }
3883 
3884 static inline abi_long target_to_host_semid_ds(struct semid_ds *host_sd,
3885                                                abi_ulong target_addr)
3886 {
3887     struct target_semid64_ds *target_sd;
3888 
3889     if (!lock_user_struct(VERIFY_READ, target_sd, target_addr, 1))
3890         return -TARGET_EFAULT;
3891     if (target_to_host_ipc_perm(&(host_sd->sem_perm),target_addr))
3892         return -TARGET_EFAULT;
3893     host_sd->sem_nsems = tswapal(target_sd->sem_nsems);
3894     host_sd->sem_otime = tswapal(target_sd->sem_otime);
3895     host_sd->sem_ctime = tswapal(target_sd->sem_ctime);
3896     unlock_user_struct(target_sd, target_addr, 0);
3897     return 0;
3898 }
3899 
3900 static inline abi_long host_to_target_semid_ds(abi_ulong target_addr,
3901                                                struct semid_ds *host_sd)
3902 {
3903     struct target_semid64_ds *target_sd;
3904 
3905     if (!lock_user_struct(VERIFY_WRITE, target_sd, target_addr, 0))
3906         return -TARGET_EFAULT;
3907     if (host_to_target_ipc_perm(target_addr,&(host_sd->sem_perm)))
3908         return -TARGET_EFAULT;
3909     target_sd->sem_nsems = tswapal(host_sd->sem_nsems);
3910     target_sd->sem_otime = tswapal(host_sd->sem_otime);
3911     target_sd->sem_ctime = tswapal(host_sd->sem_ctime);
3912     unlock_user_struct(target_sd, target_addr, 1);
3913     return 0;
3914 }
3915 
3916 struct target_seminfo {
3917     int semmap;
3918     int semmni;
3919     int semmns;
3920     int semmnu;
3921     int semmsl;
3922     int semopm;
3923     int semume;
3924     int semusz;
3925     int semvmx;
3926     int semaem;
3927 };
3928 
3929 static inline abi_long host_to_target_seminfo(abi_ulong target_addr,
3930                                               struct seminfo *host_seminfo)
3931 {
3932     struct target_seminfo *target_seminfo;
3933     if (!lock_user_struct(VERIFY_WRITE, target_seminfo, target_addr, 0))
3934         return -TARGET_EFAULT;
3935     __put_user(host_seminfo->semmap, &target_seminfo->semmap);
3936     __put_user(host_seminfo->semmni, &target_seminfo->semmni);
3937     __put_user(host_seminfo->semmns, &target_seminfo->semmns);
3938     __put_user(host_seminfo->semmnu, &target_seminfo->semmnu);
3939     __put_user(host_seminfo->semmsl, &target_seminfo->semmsl);
3940     __put_user(host_seminfo->semopm, &target_seminfo->semopm);
3941     __put_user(host_seminfo->semume, &target_seminfo->semume);
3942     __put_user(host_seminfo->semusz, &target_seminfo->semusz);
3943     __put_user(host_seminfo->semvmx, &target_seminfo->semvmx);
3944     __put_user(host_seminfo->semaem, &target_seminfo->semaem);
3945     unlock_user_struct(target_seminfo, target_addr, 1);
3946     return 0;
3947 }
3948 
3949 union semun {
3950 	int val;
3951 	struct semid_ds *buf;
3952 	unsigned short *array;
3953 	struct seminfo *__buf;
3954 };
3955 
3956 union target_semun {
3957 	int val;
3958 	abi_ulong buf;
3959 	abi_ulong array;
3960 	abi_ulong __buf;
3961 };
3962 
3963 static inline abi_long target_to_host_semarray(int semid, unsigned short **host_array,
3964                                                abi_ulong target_addr)
3965 {
3966     int nsems;
3967     unsigned short *array;
3968     union semun semun;
3969     struct semid_ds semid_ds;
3970     int i, ret;
3971 
3972     semun.buf = &semid_ds;
3973 
3974     ret = semctl(semid, 0, IPC_STAT, semun);
3975     if (ret == -1)
3976         return get_errno(ret);
3977 
3978     nsems = semid_ds.sem_nsems;
3979 
3980     *host_array = g_try_new(unsigned short, nsems);
3981     if (!*host_array) {
3982         return -TARGET_ENOMEM;
3983     }
3984     array = lock_user(VERIFY_READ, target_addr,
3985                       nsems*sizeof(unsigned short), 1);
3986     if (!array) {
3987         g_free(*host_array);
3988         return -TARGET_EFAULT;
3989     }
3990 
3991     for(i=0; i<nsems; i++) {
3992         __get_user((*host_array)[i], &array[i]);
3993     }
3994     unlock_user(array, target_addr, 0);
3995 
3996     return 0;
3997 }
3998 
3999 static inline abi_long host_to_target_semarray(int semid, abi_ulong target_addr,
4000                                                unsigned short **host_array)
4001 {
4002     int nsems;
4003     unsigned short *array;
4004     union semun semun;
4005     struct semid_ds semid_ds;
4006     int i, ret;
4007 
4008     semun.buf = &semid_ds;
4009 
4010     ret = semctl(semid, 0, IPC_STAT, semun);
4011     if (ret == -1)
4012         return get_errno(ret);
4013 
4014     nsems = semid_ds.sem_nsems;
4015 
4016     array = lock_user(VERIFY_WRITE, target_addr,
4017                       nsems*sizeof(unsigned short), 0);
4018     if (!array)
4019         return -TARGET_EFAULT;
4020 
4021     for(i=0; i<nsems; i++) {
4022         __put_user((*host_array)[i], &array[i]);
4023     }
4024     g_free(*host_array);
4025     unlock_user(array, target_addr, 1);
4026 
4027     return 0;
4028 }
4029 
4030 static inline abi_long do_semctl(int semid, int semnum, int cmd,
4031                                  abi_ulong target_arg)
4032 {
4033     union target_semun target_su = { .buf = target_arg };
4034     union semun arg;
4035     struct semid_ds dsarg;
4036     unsigned short *array = NULL;
4037     struct seminfo seminfo;
4038     abi_long ret = -TARGET_EINVAL;
4039     abi_long err;
4040     cmd &= 0xff;
4041 
4042     switch( cmd ) {
4043 	case GETVAL:
4044 	case SETVAL:
4045             /* In 64 bit cross-endian situations, we will erroneously pick up
4046              * the wrong half of the union for the "val" element.  To rectify
4047              * this, the entire 8-byte structure is byteswapped, followed by
4048 	     * a swap of the 4 byte val field. In other cases, the data is
4049 	     * already in proper host byte order. */
4050 	    if (sizeof(target_su.val) != (sizeof(target_su.buf))) {
4051 		target_su.buf = tswapal(target_su.buf);
4052 		arg.val = tswap32(target_su.val);
4053 	    } else {
4054 		arg.val = target_su.val;
4055 	    }
4056             ret = get_errno(semctl(semid, semnum, cmd, arg));
4057             break;
4058 	case GETALL:
4059 	case SETALL:
4060             err = target_to_host_semarray(semid, &array, target_su.array);
4061             if (err)
4062                 return err;
4063             arg.array = array;
4064             ret = get_errno(semctl(semid, semnum, cmd, arg));
4065             err = host_to_target_semarray(semid, target_su.array, &array);
4066             if (err)
4067                 return err;
4068             break;
4069 	case IPC_STAT:
4070 	case IPC_SET:
4071 	case SEM_STAT:
4072             err = target_to_host_semid_ds(&dsarg, target_su.buf);
4073             if (err)
4074                 return err;
4075             arg.buf = &dsarg;
4076             ret = get_errno(semctl(semid, semnum, cmd, arg));
4077             err = host_to_target_semid_ds(target_su.buf, &dsarg);
4078             if (err)
4079                 return err;
4080             break;
4081 	case IPC_INFO:
4082 	case SEM_INFO:
4083             arg.__buf = &seminfo;
4084             ret = get_errno(semctl(semid, semnum, cmd, arg));
4085             err = host_to_target_seminfo(target_su.__buf, &seminfo);
4086             if (err)
4087                 return err;
4088             break;
4089 	case IPC_RMID:
4090 	case GETPID:
4091 	case GETNCNT:
4092 	case GETZCNT:
4093             ret = get_errno(semctl(semid, semnum, cmd, NULL));
4094             break;
4095     }
4096 
4097     return ret;
4098 }
4099 
4100 struct target_sembuf {
4101     unsigned short sem_num;
4102     short sem_op;
4103     short sem_flg;
4104 };
4105 
4106 static inline abi_long target_to_host_sembuf(struct sembuf *host_sembuf,
4107                                              abi_ulong target_addr,
4108                                              unsigned nsops)
4109 {
4110     struct target_sembuf *target_sembuf;
4111     int i;
4112 
4113     target_sembuf = lock_user(VERIFY_READ, target_addr,
4114                               nsops*sizeof(struct target_sembuf), 1);
4115     if (!target_sembuf)
4116         return -TARGET_EFAULT;
4117 
4118     for(i=0; i<nsops; i++) {
4119         __get_user(host_sembuf[i].sem_num, &target_sembuf[i].sem_num);
4120         __get_user(host_sembuf[i].sem_op, &target_sembuf[i].sem_op);
4121         __get_user(host_sembuf[i].sem_flg, &target_sembuf[i].sem_flg);
4122     }
4123 
4124     unlock_user(target_sembuf, target_addr, 0);
4125 
4126     return 0;
4127 }
4128 
4129 #if defined(TARGET_NR_ipc) || defined(TARGET_NR_semop) || \
4130     defined(TARGET_NR_semtimedop) || defined(TARGET_NR_semtimedop_time64)
4131 
4132 /*
4133  * This macro is required to handle the s390 variants, which passes the
4134  * arguments in a different order than default.
4135  */
4136 #ifdef __s390x__
4137 #define SEMTIMEDOP_IPC_ARGS(__nsops, __sops, __timeout) \
4138   (__nsops), (__timeout), (__sops)
4139 #else
4140 #define SEMTIMEDOP_IPC_ARGS(__nsops, __sops, __timeout) \
4141   (__nsops), 0, (__sops), (__timeout)
4142 #endif
4143 
4144 static inline abi_long do_semtimedop(int semid,
4145                                      abi_long ptr,
4146                                      unsigned nsops,
4147                                      abi_long timeout, bool time64)
4148 {
4149     struct sembuf *sops;
4150     struct timespec ts, *pts = NULL;
4151     abi_long ret;
4152 
4153     if (timeout) {
4154         pts = &ts;
4155         if (time64) {
4156             if (target_to_host_timespec64(pts, timeout)) {
4157                 return -TARGET_EFAULT;
4158             }
4159         } else {
4160             if (target_to_host_timespec(pts, timeout)) {
4161                 return -TARGET_EFAULT;
4162             }
4163         }
4164     }
4165 
4166     if (nsops > TARGET_SEMOPM) {
4167         return -TARGET_E2BIG;
4168     }
4169 
4170     sops = g_new(struct sembuf, nsops);
4171 
4172     if (target_to_host_sembuf(sops, ptr, nsops)) {
4173         g_free(sops);
4174         return -TARGET_EFAULT;
4175     }
4176 
4177     ret = -TARGET_ENOSYS;
4178 #ifdef __NR_semtimedop
4179     ret = get_errno(safe_semtimedop(semid, sops, nsops, pts));
4180 #endif
4181 #ifdef __NR_ipc
4182     if (ret == -TARGET_ENOSYS) {
4183         ret = get_errno(safe_ipc(IPCOP_semtimedop, semid,
4184                                  SEMTIMEDOP_IPC_ARGS(nsops, sops, (long)pts)));
4185     }
4186 #endif
4187     g_free(sops);
4188     return ret;
4189 }
4190 #endif
4191 
4192 struct target_msqid_ds
4193 {
4194     struct target_ipc_perm msg_perm;
4195     abi_ulong msg_stime;
4196 #if TARGET_ABI_BITS == 32
4197     abi_ulong __unused1;
4198 #endif
4199     abi_ulong msg_rtime;
4200 #if TARGET_ABI_BITS == 32
4201     abi_ulong __unused2;
4202 #endif
4203     abi_ulong msg_ctime;
4204 #if TARGET_ABI_BITS == 32
4205     abi_ulong __unused3;
4206 #endif
4207     abi_ulong __msg_cbytes;
4208     abi_ulong msg_qnum;
4209     abi_ulong msg_qbytes;
4210     abi_ulong msg_lspid;
4211     abi_ulong msg_lrpid;
4212     abi_ulong __unused4;
4213     abi_ulong __unused5;
4214 };
4215 
4216 static inline abi_long target_to_host_msqid_ds(struct msqid_ds *host_md,
4217                                                abi_ulong target_addr)
4218 {
4219     struct target_msqid_ds *target_md;
4220 
4221     if (!lock_user_struct(VERIFY_READ, target_md, target_addr, 1))
4222         return -TARGET_EFAULT;
4223     if (target_to_host_ipc_perm(&(host_md->msg_perm),target_addr))
4224         return -TARGET_EFAULT;
4225     host_md->msg_stime = tswapal(target_md->msg_stime);
4226     host_md->msg_rtime = tswapal(target_md->msg_rtime);
4227     host_md->msg_ctime = tswapal(target_md->msg_ctime);
4228     host_md->__msg_cbytes = tswapal(target_md->__msg_cbytes);
4229     host_md->msg_qnum = tswapal(target_md->msg_qnum);
4230     host_md->msg_qbytes = tswapal(target_md->msg_qbytes);
4231     host_md->msg_lspid = tswapal(target_md->msg_lspid);
4232     host_md->msg_lrpid = tswapal(target_md->msg_lrpid);
4233     unlock_user_struct(target_md, target_addr, 0);
4234     return 0;
4235 }
4236 
4237 static inline abi_long host_to_target_msqid_ds(abi_ulong target_addr,
4238                                                struct msqid_ds *host_md)
4239 {
4240     struct target_msqid_ds *target_md;
4241 
4242     if (!lock_user_struct(VERIFY_WRITE, target_md, target_addr, 0))
4243         return -TARGET_EFAULT;
4244     if (host_to_target_ipc_perm(target_addr,&(host_md->msg_perm)))
4245         return -TARGET_EFAULT;
4246     target_md->msg_stime = tswapal(host_md->msg_stime);
4247     target_md->msg_rtime = tswapal(host_md->msg_rtime);
4248     target_md->msg_ctime = tswapal(host_md->msg_ctime);
4249     target_md->__msg_cbytes = tswapal(host_md->__msg_cbytes);
4250     target_md->msg_qnum = tswapal(host_md->msg_qnum);
4251     target_md->msg_qbytes = tswapal(host_md->msg_qbytes);
4252     target_md->msg_lspid = tswapal(host_md->msg_lspid);
4253     target_md->msg_lrpid = tswapal(host_md->msg_lrpid);
4254     unlock_user_struct(target_md, target_addr, 1);
4255     return 0;
4256 }
4257 
4258 struct target_msginfo {
4259     int msgpool;
4260     int msgmap;
4261     int msgmax;
4262     int msgmnb;
4263     int msgmni;
4264     int msgssz;
4265     int msgtql;
4266     unsigned short int msgseg;
4267 };
4268 
4269 static inline abi_long host_to_target_msginfo(abi_ulong target_addr,
4270                                               struct msginfo *host_msginfo)
4271 {
4272     struct target_msginfo *target_msginfo;
4273     if (!lock_user_struct(VERIFY_WRITE, target_msginfo, target_addr, 0))
4274         return -TARGET_EFAULT;
4275     __put_user(host_msginfo->msgpool, &target_msginfo->msgpool);
4276     __put_user(host_msginfo->msgmap, &target_msginfo->msgmap);
4277     __put_user(host_msginfo->msgmax, &target_msginfo->msgmax);
4278     __put_user(host_msginfo->msgmnb, &target_msginfo->msgmnb);
4279     __put_user(host_msginfo->msgmni, &target_msginfo->msgmni);
4280     __put_user(host_msginfo->msgssz, &target_msginfo->msgssz);
4281     __put_user(host_msginfo->msgtql, &target_msginfo->msgtql);
4282     __put_user(host_msginfo->msgseg, &target_msginfo->msgseg);
4283     unlock_user_struct(target_msginfo, target_addr, 1);
4284     return 0;
4285 }
4286 
4287 static inline abi_long do_msgctl(int msgid, int cmd, abi_long ptr)
4288 {
4289     struct msqid_ds dsarg;
4290     struct msginfo msginfo;
4291     abi_long ret = -TARGET_EINVAL;
4292 
4293     cmd &= 0xff;
4294 
4295     switch (cmd) {
4296     case IPC_STAT:
4297     case IPC_SET:
4298     case MSG_STAT:
4299         if (target_to_host_msqid_ds(&dsarg,ptr))
4300             return -TARGET_EFAULT;
4301         ret = get_errno(msgctl(msgid, cmd, &dsarg));
4302         if (host_to_target_msqid_ds(ptr,&dsarg))
4303             return -TARGET_EFAULT;
4304         break;
4305     case IPC_RMID:
4306         ret = get_errno(msgctl(msgid, cmd, NULL));
4307         break;
4308     case IPC_INFO:
4309     case MSG_INFO:
4310         ret = get_errno(msgctl(msgid, cmd, (struct msqid_ds *)&msginfo));
4311         if (host_to_target_msginfo(ptr, &msginfo))
4312             return -TARGET_EFAULT;
4313         break;
4314     }
4315 
4316     return ret;
4317 }
4318 
4319 struct target_msgbuf {
4320     abi_long mtype;
4321     char	mtext[1];
4322 };
4323 
4324 static inline abi_long do_msgsnd(int msqid, abi_long msgp,
4325                                  ssize_t msgsz, int msgflg)
4326 {
4327     struct target_msgbuf *target_mb;
4328     struct msgbuf *host_mb;
4329     abi_long ret = 0;
4330 
4331     if (msgsz < 0) {
4332         return -TARGET_EINVAL;
4333     }
4334 
4335     if (!lock_user_struct(VERIFY_READ, target_mb, msgp, 0))
4336         return -TARGET_EFAULT;
4337     host_mb = g_try_malloc(msgsz + sizeof(long));
4338     if (!host_mb) {
4339         unlock_user_struct(target_mb, msgp, 0);
4340         return -TARGET_ENOMEM;
4341     }
4342     host_mb->mtype = (abi_long) tswapal(target_mb->mtype);
4343     memcpy(host_mb->mtext, target_mb->mtext, msgsz);
4344     ret = -TARGET_ENOSYS;
4345 #ifdef __NR_msgsnd
4346     ret = get_errno(safe_msgsnd(msqid, host_mb, msgsz, msgflg));
4347 #endif
4348 #ifdef __NR_ipc
4349     if (ret == -TARGET_ENOSYS) {
4350 #ifdef __s390x__
4351         ret = get_errno(safe_ipc(IPCOP_msgsnd, msqid, msgsz, msgflg,
4352                                  host_mb));
4353 #else
4354         ret = get_errno(safe_ipc(IPCOP_msgsnd, msqid, msgsz, msgflg,
4355                                  host_mb, 0));
4356 #endif
4357     }
4358 #endif
4359     g_free(host_mb);
4360     unlock_user_struct(target_mb, msgp, 0);
4361 
4362     return ret;
4363 }
4364 
4365 #ifdef __NR_ipc
4366 #if defined(__sparc__)
4367 /* SPARC for msgrcv it does not use the kludge on final 2 arguments.  */
4368 #define MSGRCV_ARGS(__msgp, __msgtyp) __msgp, __msgtyp
4369 #elif defined(__s390x__)
4370 /* The s390 sys_ipc variant has only five parameters.  */
4371 #define MSGRCV_ARGS(__msgp, __msgtyp) \
4372     ((long int[]){(long int)__msgp, __msgtyp})
4373 #else
4374 #define MSGRCV_ARGS(__msgp, __msgtyp) \
4375     ((long int[]){(long int)__msgp, __msgtyp}), 0
4376 #endif
4377 #endif
4378 
4379 static inline abi_long do_msgrcv(int msqid, abi_long msgp,
4380                                  ssize_t msgsz, abi_long msgtyp,
4381                                  int msgflg)
4382 {
4383     struct target_msgbuf *target_mb;
4384     char *target_mtext;
4385     struct msgbuf *host_mb;
4386     abi_long ret = 0;
4387 
4388     if (msgsz < 0) {
4389         return -TARGET_EINVAL;
4390     }
4391 
4392     if (!lock_user_struct(VERIFY_WRITE, target_mb, msgp, 0))
4393         return -TARGET_EFAULT;
4394 
4395     host_mb = g_try_malloc(msgsz + sizeof(long));
4396     if (!host_mb) {
4397         ret = -TARGET_ENOMEM;
4398         goto end;
4399     }
4400     ret = -TARGET_ENOSYS;
4401 #ifdef __NR_msgrcv
4402     ret = get_errno(safe_msgrcv(msqid, host_mb, msgsz, msgtyp, msgflg));
4403 #endif
4404 #ifdef __NR_ipc
4405     if (ret == -TARGET_ENOSYS) {
4406         ret = get_errno(safe_ipc(IPCOP_CALL(1, IPCOP_msgrcv), msqid, msgsz,
4407                         msgflg, MSGRCV_ARGS(host_mb, msgtyp)));
4408     }
4409 #endif
4410 
4411     if (ret > 0) {
4412         abi_ulong target_mtext_addr = msgp + sizeof(abi_ulong);
4413         target_mtext = lock_user(VERIFY_WRITE, target_mtext_addr, ret, 0);
4414         if (!target_mtext) {
4415             ret = -TARGET_EFAULT;
4416             goto end;
4417         }
4418         memcpy(target_mb->mtext, host_mb->mtext, ret);
4419         unlock_user(target_mtext, target_mtext_addr, ret);
4420     }
4421 
4422     target_mb->mtype = tswapal(host_mb->mtype);
4423 
4424 end:
4425     if (target_mb)
4426         unlock_user_struct(target_mb, msgp, 1);
4427     g_free(host_mb);
4428     return ret;
4429 }
4430 
4431 static inline abi_long target_to_host_shmid_ds(struct shmid_ds *host_sd,
4432                                                abi_ulong target_addr)
4433 {
4434     struct target_shmid_ds *target_sd;
4435 
4436     if (!lock_user_struct(VERIFY_READ, target_sd, target_addr, 1))
4437         return -TARGET_EFAULT;
4438     if (target_to_host_ipc_perm(&(host_sd->shm_perm), target_addr))
4439         return -TARGET_EFAULT;
4440     __get_user(host_sd->shm_segsz, &target_sd->shm_segsz);
4441     __get_user(host_sd->shm_atime, &target_sd->shm_atime);
4442     __get_user(host_sd->shm_dtime, &target_sd->shm_dtime);
4443     __get_user(host_sd->shm_ctime, &target_sd->shm_ctime);
4444     __get_user(host_sd->shm_cpid, &target_sd->shm_cpid);
4445     __get_user(host_sd->shm_lpid, &target_sd->shm_lpid);
4446     __get_user(host_sd->shm_nattch, &target_sd->shm_nattch);
4447     unlock_user_struct(target_sd, target_addr, 0);
4448     return 0;
4449 }
4450 
4451 static inline abi_long host_to_target_shmid_ds(abi_ulong target_addr,
4452                                                struct shmid_ds *host_sd)
4453 {
4454     struct target_shmid_ds *target_sd;
4455 
4456     if (!lock_user_struct(VERIFY_WRITE, target_sd, target_addr, 0))
4457         return -TARGET_EFAULT;
4458     if (host_to_target_ipc_perm(target_addr, &(host_sd->shm_perm)))
4459         return -TARGET_EFAULT;
4460     __put_user(host_sd->shm_segsz, &target_sd->shm_segsz);
4461     __put_user(host_sd->shm_atime, &target_sd->shm_atime);
4462     __put_user(host_sd->shm_dtime, &target_sd->shm_dtime);
4463     __put_user(host_sd->shm_ctime, &target_sd->shm_ctime);
4464     __put_user(host_sd->shm_cpid, &target_sd->shm_cpid);
4465     __put_user(host_sd->shm_lpid, &target_sd->shm_lpid);
4466     __put_user(host_sd->shm_nattch, &target_sd->shm_nattch);
4467     unlock_user_struct(target_sd, target_addr, 1);
4468     return 0;
4469 }
4470 
4471 struct  target_shminfo {
4472     abi_ulong shmmax;
4473     abi_ulong shmmin;
4474     abi_ulong shmmni;
4475     abi_ulong shmseg;
4476     abi_ulong shmall;
4477 };
4478 
4479 static inline abi_long host_to_target_shminfo(abi_ulong target_addr,
4480                                               struct shminfo *host_shminfo)
4481 {
4482     struct target_shminfo *target_shminfo;
4483     if (!lock_user_struct(VERIFY_WRITE, target_shminfo, target_addr, 0))
4484         return -TARGET_EFAULT;
4485     __put_user(host_shminfo->shmmax, &target_shminfo->shmmax);
4486     __put_user(host_shminfo->shmmin, &target_shminfo->shmmin);
4487     __put_user(host_shminfo->shmmni, &target_shminfo->shmmni);
4488     __put_user(host_shminfo->shmseg, &target_shminfo->shmseg);
4489     __put_user(host_shminfo->shmall, &target_shminfo->shmall);
4490     unlock_user_struct(target_shminfo, target_addr, 1);
4491     return 0;
4492 }
4493 
4494 struct target_shm_info {
4495     int used_ids;
4496     abi_ulong shm_tot;
4497     abi_ulong shm_rss;
4498     abi_ulong shm_swp;
4499     abi_ulong swap_attempts;
4500     abi_ulong swap_successes;
4501 };
4502 
4503 static inline abi_long host_to_target_shm_info(abi_ulong target_addr,
4504                                                struct shm_info *host_shm_info)
4505 {
4506     struct target_shm_info *target_shm_info;
4507     if (!lock_user_struct(VERIFY_WRITE, target_shm_info, target_addr, 0))
4508         return -TARGET_EFAULT;
4509     __put_user(host_shm_info->used_ids, &target_shm_info->used_ids);
4510     __put_user(host_shm_info->shm_tot, &target_shm_info->shm_tot);
4511     __put_user(host_shm_info->shm_rss, &target_shm_info->shm_rss);
4512     __put_user(host_shm_info->shm_swp, &target_shm_info->shm_swp);
4513     __put_user(host_shm_info->swap_attempts, &target_shm_info->swap_attempts);
4514     __put_user(host_shm_info->swap_successes, &target_shm_info->swap_successes);
4515     unlock_user_struct(target_shm_info, target_addr, 1);
4516     return 0;
4517 }
4518 
4519 static inline abi_long do_shmctl(int shmid, int cmd, abi_long buf)
4520 {
4521     struct shmid_ds dsarg;
4522     struct shminfo shminfo;
4523     struct shm_info shm_info;
4524     abi_long ret = -TARGET_EINVAL;
4525 
4526     cmd &= 0xff;
4527 
4528     switch(cmd) {
4529     case IPC_STAT:
4530     case IPC_SET:
4531     case SHM_STAT:
4532         if (target_to_host_shmid_ds(&dsarg, buf))
4533             return -TARGET_EFAULT;
4534         ret = get_errno(shmctl(shmid, cmd, &dsarg));
4535         if (host_to_target_shmid_ds(buf, &dsarg))
4536             return -TARGET_EFAULT;
4537         break;
4538     case IPC_INFO:
4539         ret = get_errno(shmctl(shmid, cmd, (struct shmid_ds *)&shminfo));
4540         if (host_to_target_shminfo(buf, &shminfo))
4541             return -TARGET_EFAULT;
4542         break;
4543     case SHM_INFO:
4544         ret = get_errno(shmctl(shmid, cmd, (struct shmid_ds *)&shm_info));
4545         if (host_to_target_shm_info(buf, &shm_info))
4546             return -TARGET_EFAULT;
4547         break;
4548     case IPC_RMID:
4549     case SHM_LOCK:
4550     case SHM_UNLOCK:
4551         ret = get_errno(shmctl(shmid, cmd, NULL));
4552         break;
4553     }
4554 
4555     return ret;
4556 }
4557 
4558 #ifndef TARGET_FORCE_SHMLBA
4559 /* For most architectures, SHMLBA is the same as the page size;
4560  * some architectures have larger values, in which case they should
4561  * define TARGET_FORCE_SHMLBA and provide a target_shmlba() function.
4562  * This corresponds to the kernel arch code defining __ARCH_FORCE_SHMLBA
4563  * and defining its own value for SHMLBA.
4564  *
4565  * The kernel also permits SHMLBA to be set by the architecture to a
4566  * value larger than the page size without setting __ARCH_FORCE_SHMLBA;
4567  * this means that addresses are rounded to the large size if
4568  * SHM_RND is set but addresses not aligned to that size are not rejected
4569  * as long as they are at least page-aligned. Since the only architecture
4570  * which uses this is ia64 this code doesn't provide for that oddity.
4571  */
4572 static inline abi_ulong target_shmlba(CPUArchState *cpu_env)
4573 {
4574     return TARGET_PAGE_SIZE;
4575 }
4576 #endif
4577 
4578 static inline abi_ulong do_shmat(CPUArchState *cpu_env,
4579                                  int shmid, abi_ulong shmaddr, int shmflg)
4580 {
4581     abi_long raddr;
4582     void *host_raddr;
4583     struct shmid_ds shm_info;
4584     int i,ret;
4585     abi_ulong shmlba;
4586 
4587     /* find out the length of the shared memory segment */
4588     ret = get_errno(shmctl(shmid, IPC_STAT, &shm_info));
4589     if (is_error(ret)) {
4590         /* can't get length, bail out */
4591         return ret;
4592     }
4593 
4594     shmlba = target_shmlba(cpu_env);
4595 
4596     if (shmaddr & (shmlba - 1)) {
4597         if (shmflg & SHM_RND) {
4598             shmaddr &= ~(shmlba - 1);
4599         } else {
4600             return -TARGET_EINVAL;
4601         }
4602     }
4603     if (!guest_range_valid(shmaddr, shm_info.shm_segsz)) {
4604         return -TARGET_EINVAL;
4605     }
4606 
4607     mmap_lock();
4608 
4609     if (shmaddr)
4610         host_raddr = shmat(shmid, (void *)g2h(shmaddr), shmflg);
4611     else {
4612         abi_ulong mmap_start;
4613 
4614         /* In order to use the host shmat, we need to honor host SHMLBA.  */
4615         mmap_start = mmap_find_vma(0, shm_info.shm_segsz, MAX(SHMLBA, shmlba));
4616 
4617         if (mmap_start == -1) {
4618             errno = ENOMEM;
4619             host_raddr = (void *)-1;
4620         } else
4621             host_raddr = shmat(shmid, g2h(mmap_start), shmflg | SHM_REMAP);
4622     }
4623 
4624     if (host_raddr == (void *)-1) {
4625         mmap_unlock();
4626         return get_errno((long)host_raddr);
4627     }
4628     raddr=h2g((unsigned long)host_raddr);
4629 
4630     page_set_flags(raddr, raddr + shm_info.shm_segsz,
4631                    PAGE_VALID | PAGE_READ |
4632                    ((shmflg & SHM_RDONLY)? 0 : PAGE_WRITE));
4633 
4634     for (i = 0; i < N_SHM_REGIONS; i++) {
4635         if (!shm_regions[i].in_use) {
4636             shm_regions[i].in_use = true;
4637             shm_regions[i].start = raddr;
4638             shm_regions[i].size = shm_info.shm_segsz;
4639             break;
4640         }
4641     }
4642 
4643     mmap_unlock();
4644     return raddr;
4645 
4646 }
4647 
4648 static inline abi_long do_shmdt(abi_ulong shmaddr)
4649 {
4650     int i;
4651     abi_long rv;
4652 
4653     mmap_lock();
4654 
4655     for (i = 0; i < N_SHM_REGIONS; ++i) {
4656         if (shm_regions[i].in_use && shm_regions[i].start == shmaddr) {
4657             shm_regions[i].in_use = false;
4658             page_set_flags(shmaddr, shmaddr + shm_regions[i].size, 0);
4659             break;
4660         }
4661     }
4662     rv = get_errno(shmdt(g2h(shmaddr)));
4663 
4664     mmap_unlock();
4665 
4666     return rv;
4667 }
4668 
4669 #ifdef TARGET_NR_ipc
4670 /* ??? This only works with linear mappings.  */
4671 /* do_ipc() must return target values and target errnos. */
4672 static abi_long do_ipc(CPUArchState *cpu_env,
4673                        unsigned int call, abi_long first,
4674                        abi_long second, abi_long third,
4675                        abi_long ptr, abi_long fifth)
4676 {
4677     int version;
4678     abi_long ret = 0;
4679 
4680     version = call >> 16;
4681     call &= 0xffff;
4682 
4683     switch (call) {
4684     case IPCOP_semop:
4685         ret = do_semtimedop(first, ptr, second, 0, false);
4686         break;
4687     case IPCOP_semtimedop:
4688     /*
4689      * The s390 sys_ipc variant has only five parameters instead of six
4690      * (as for default variant) and the only difference is the handling of
4691      * SEMTIMEDOP where on s390 the third parameter is used as a pointer
4692      * to a struct timespec where the generic variant uses fifth parameter.
4693      */
4694 #if defined(TARGET_S390X)
4695         ret = do_semtimedop(first, ptr, second, third, TARGET_ABI_BITS == 64);
4696 #else
4697         ret = do_semtimedop(first, ptr, second, fifth, TARGET_ABI_BITS == 64);
4698 #endif
4699         break;
4700 
4701     case IPCOP_semget:
4702         ret = get_errno(semget(first, second, third));
4703         break;
4704 
4705     case IPCOP_semctl: {
4706         /* The semun argument to semctl is passed by value, so dereference the
4707          * ptr argument. */
4708         abi_ulong atptr;
4709         get_user_ual(atptr, ptr);
4710         ret = do_semctl(first, second, third, atptr);
4711         break;
4712     }
4713 
4714     case IPCOP_msgget:
4715         ret = get_errno(msgget(first, second));
4716         break;
4717 
4718     case IPCOP_msgsnd:
4719         ret = do_msgsnd(first, ptr, second, third);
4720         break;
4721 
4722     case IPCOP_msgctl:
4723         ret = do_msgctl(first, second, ptr);
4724         break;
4725 
4726     case IPCOP_msgrcv:
4727         switch (version) {
4728         case 0:
4729             {
4730                 struct target_ipc_kludge {
4731                     abi_long msgp;
4732                     abi_long msgtyp;
4733                 } *tmp;
4734 
4735                 if (!lock_user_struct(VERIFY_READ, tmp, ptr, 1)) {
4736                     ret = -TARGET_EFAULT;
4737                     break;
4738                 }
4739 
4740                 ret = do_msgrcv(first, tswapal(tmp->msgp), second, tswapal(tmp->msgtyp), third);
4741 
4742                 unlock_user_struct(tmp, ptr, 0);
4743                 break;
4744             }
4745         default:
4746             ret = do_msgrcv(first, ptr, second, fifth, third);
4747         }
4748         break;
4749 
4750     case IPCOP_shmat:
4751         switch (version) {
4752         default:
4753         {
4754             abi_ulong raddr;
4755             raddr = do_shmat(cpu_env, first, ptr, second);
4756             if (is_error(raddr))
4757                 return get_errno(raddr);
4758             if (put_user_ual(raddr, third))
4759                 return -TARGET_EFAULT;
4760             break;
4761         }
4762         case 1:
4763             ret = -TARGET_EINVAL;
4764             break;
4765         }
4766 	break;
4767     case IPCOP_shmdt:
4768         ret = do_shmdt(ptr);
4769 	break;
4770 
4771     case IPCOP_shmget:
4772 	/* IPC_* flag values are the same on all linux platforms */
4773 	ret = get_errno(shmget(first, second, third));
4774 	break;
4775 
4776 	/* IPC_* and SHM_* command values are the same on all linux platforms */
4777     case IPCOP_shmctl:
4778         ret = do_shmctl(first, second, ptr);
4779         break;
4780     default:
4781         qemu_log_mask(LOG_UNIMP, "Unsupported ipc call: %d (version %d)\n",
4782                       call, version);
4783 	ret = -TARGET_ENOSYS;
4784 	break;
4785     }
4786     return ret;
4787 }
4788 #endif
4789 
4790 /* kernel structure types definitions */
4791 
4792 #define STRUCT(name, ...) STRUCT_ ## name,
4793 #define STRUCT_SPECIAL(name) STRUCT_ ## name,
4794 enum {
4795 #include "syscall_types.h"
4796 STRUCT_MAX
4797 };
4798 #undef STRUCT
4799 #undef STRUCT_SPECIAL
4800 
4801 #define STRUCT(name, ...) static const argtype struct_ ## name ## _def[] = {  __VA_ARGS__, TYPE_NULL };
4802 #define STRUCT_SPECIAL(name)
4803 #include "syscall_types.h"
4804 #undef STRUCT
4805 #undef STRUCT_SPECIAL
4806 
4807 #define MAX_STRUCT_SIZE 4096
4808 
4809 #ifdef CONFIG_FIEMAP
4810 /* So fiemap access checks don't overflow on 32 bit systems.
4811  * This is very slightly smaller than the limit imposed by
4812  * the underlying kernel.
4813  */
4814 #define FIEMAP_MAX_EXTENTS ((UINT_MAX - sizeof(struct fiemap))  \
4815                             / sizeof(struct fiemap_extent))
4816 
4817 static abi_long do_ioctl_fs_ioc_fiemap(const IOCTLEntry *ie, uint8_t *buf_temp,
4818                                        int fd, int cmd, abi_long arg)
4819 {
4820     /* The parameter for this ioctl is a struct fiemap followed
4821      * by an array of struct fiemap_extent whose size is set
4822      * in fiemap->fm_extent_count. The array is filled in by the
4823      * ioctl.
4824      */
4825     int target_size_in, target_size_out;
4826     struct fiemap *fm;
4827     const argtype *arg_type = ie->arg_type;
4828     const argtype extent_arg_type[] = { MK_STRUCT(STRUCT_fiemap_extent) };
4829     void *argptr, *p;
4830     abi_long ret;
4831     int i, extent_size = thunk_type_size(extent_arg_type, 0);
4832     uint32_t outbufsz;
4833     int free_fm = 0;
4834 
4835     assert(arg_type[0] == TYPE_PTR);
4836     assert(ie->access == IOC_RW);
4837     arg_type++;
4838     target_size_in = thunk_type_size(arg_type, 0);
4839     argptr = lock_user(VERIFY_READ, arg, target_size_in, 1);
4840     if (!argptr) {
4841         return -TARGET_EFAULT;
4842     }
4843     thunk_convert(buf_temp, argptr, arg_type, THUNK_HOST);
4844     unlock_user(argptr, arg, 0);
4845     fm = (struct fiemap *)buf_temp;
4846     if (fm->fm_extent_count > FIEMAP_MAX_EXTENTS) {
4847         return -TARGET_EINVAL;
4848     }
4849 
4850     outbufsz = sizeof (*fm) +
4851         (sizeof(struct fiemap_extent) * fm->fm_extent_count);
4852 
4853     if (outbufsz > MAX_STRUCT_SIZE) {
4854         /* We can't fit all the extents into the fixed size buffer.
4855          * Allocate one that is large enough and use it instead.
4856          */
4857         fm = g_try_malloc(outbufsz);
4858         if (!fm) {
4859             return -TARGET_ENOMEM;
4860         }
4861         memcpy(fm, buf_temp, sizeof(struct fiemap));
4862         free_fm = 1;
4863     }
4864     ret = get_errno(safe_ioctl(fd, ie->host_cmd, fm));
4865     if (!is_error(ret)) {
4866         target_size_out = target_size_in;
4867         /* An extent_count of 0 means we were only counting the extents
4868          * so there are no structs to copy
4869          */
4870         if (fm->fm_extent_count != 0) {
4871             target_size_out += fm->fm_mapped_extents * extent_size;
4872         }
4873         argptr = lock_user(VERIFY_WRITE, arg, target_size_out, 0);
4874         if (!argptr) {
4875             ret = -TARGET_EFAULT;
4876         } else {
4877             /* Convert the struct fiemap */
4878             thunk_convert(argptr, fm, arg_type, THUNK_TARGET);
4879             if (fm->fm_extent_count != 0) {
4880                 p = argptr + target_size_in;
4881                 /* ...and then all the struct fiemap_extents */
4882                 for (i = 0; i < fm->fm_mapped_extents; i++) {
4883                     thunk_convert(p, &fm->fm_extents[i], extent_arg_type,
4884                                   THUNK_TARGET);
4885                     p += extent_size;
4886                 }
4887             }
4888             unlock_user(argptr, arg, target_size_out);
4889         }
4890     }
4891     if (free_fm) {
4892         g_free(fm);
4893     }
4894     return ret;
4895 }
4896 #endif
4897 
4898 static abi_long do_ioctl_ifconf(const IOCTLEntry *ie, uint8_t *buf_temp,
4899                                 int fd, int cmd, abi_long arg)
4900 {
4901     const argtype *arg_type = ie->arg_type;
4902     int target_size;
4903     void *argptr;
4904     int ret;
4905     struct ifconf *host_ifconf;
4906     uint32_t outbufsz;
4907     const argtype ifreq_arg_type[] = { MK_STRUCT(STRUCT_sockaddr_ifreq) };
4908     int target_ifreq_size;
4909     int nb_ifreq;
4910     int free_buf = 0;
4911     int i;
4912     int target_ifc_len;
4913     abi_long target_ifc_buf;
4914     int host_ifc_len;
4915     char *host_ifc_buf;
4916 
4917     assert(arg_type[0] == TYPE_PTR);
4918     assert(ie->access == IOC_RW);
4919 
4920     arg_type++;
4921     target_size = thunk_type_size(arg_type, 0);
4922 
4923     argptr = lock_user(VERIFY_READ, arg, target_size, 1);
4924     if (!argptr)
4925         return -TARGET_EFAULT;
4926     thunk_convert(buf_temp, argptr, arg_type, THUNK_HOST);
4927     unlock_user(argptr, arg, 0);
4928 
4929     host_ifconf = (struct ifconf *)(unsigned long)buf_temp;
4930     target_ifc_buf = (abi_long)(unsigned long)host_ifconf->ifc_buf;
4931     target_ifreq_size = thunk_type_size(ifreq_arg_type, 0);
4932 
4933     if (target_ifc_buf != 0) {
4934         target_ifc_len = host_ifconf->ifc_len;
4935         nb_ifreq = target_ifc_len / target_ifreq_size;
4936         host_ifc_len = nb_ifreq * sizeof(struct ifreq);
4937 
4938         outbufsz = sizeof(*host_ifconf) + host_ifc_len;
4939         if (outbufsz > MAX_STRUCT_SIZE) {
4940             /*
4941              * We can't fit all the extents into the fixed size buffer.
4942              * Allocate one that is large enough and use it instead.
4943              */
4944             host_ifconf = malloc(outbufsz);
4945             if (!host_ifconf) {
4946                 return -TARGET_ENOMEM;
4947             }
4948             memcpy(host_ifconf, buf_temp, sizeof(*host_ifconf));
4949             free_buf = 1;
4950         }
4951         host_ifc_buf = (char *)host_ifconf + sizeof(*host_ifconf);
4952 
4953         host_ifconf->ifc_len = host_ifc_len;
4954     } else {
4955       host_ifc_buf = NULL;
4956     }
4957     host_ifconf->ifc_buf = host_ifc_buf;
4958 
4959     ret = get_errno(safe_ioctl(fd, ie->host_cmd, host_ifconf));
4960     if (!is_error(ret)) {
4961 	/* convert host ifc_len to target ifc_len */
4962 
4963         nb_ifreq = host_ifconf->ifc_len / sizeof(struct ifreq);
4964         target_ifc_len = nb_ifreq * target_ifreq_size;
4965         host_ifconf->ifc_len = target_ifc_len;
4966 
4967 	/* restore target ifc_buf */
4968 
4969         host_ifconf->ifc_buf = (char *)(unsigned long)target_ifc_buf;
4970 
4971 	/* copy struct ifconf to target user */
4972 
4973         argptr = lock_user(VERIFY_WRITE, arg, target_size, 0);
4974         if (!argptr)
4975             return -TARGET_EFAULT;
4976         thunk_convert(argptr, host_ifconf, arg_type, THUNK_TARGET);
4977         unlock_user(argptr, arg, target_size);
4978 
4979         if (target_ifc_buf != 0) {
4980             /* copy ifreq[] to target user */
4981             argptr = lock_user(VERIFY_WRITE, target_ifc_buf, target_ifc_len, 0);
4982             for (i = 0; i < nb_ifreq ; i++) {
4983                 thunk_convert(argptr + i * target_ifreq_size,
4984                               host_ifc_buf + i * sizeof(struct ifreq),
4985                               ifreq_arg_type, THUNK_TARGET);
4986             }
4987             unlock_user(argptr, target_ifc_buf, target_ifc_len);
4988         }
4989     }
4990 
4991     if (free_buf) {
4992         free(host_ifconf);
4993     }
4994 
4995     return ret;
4996 }
4997 
4998 #if defined(CONFIG_USBFS)
4999 #if HOST_LONG_BITS > 64
5000 #error USBDEVFS thunks do not support >64 bit hosts yet.
5001 #endif
5002 struct live_urb {
5003     uint64_t target_urb_adr;
5004     uint64_t target_buf_adr;
5005     char *target_buf_ptr;
5006     struct usbdevfs_urb host_urb;
5007 };
5008 
5009 static GHashTable *usbdevfs_urb_hashtable(void)
5010 {
5011     static GHashTable *urb_hashtable;
5012 
5013     if (!urb_hashtable) {
5014         urb_hashtable = g_hash_table_new(g_int64_hash, g_int64_equal);
5015     }
5016     return urb_hashtable;
5017 }
5018 
5019 static void urb_hashtable_insert(struct live_urb *urb)
5020 {
5021     GHashTable *urb_hashtable = usbdevfs_urb_hashtable();
5022     g_hash_table_insert(urb_hashtable, urb, urb);
5023 }
5024 
5025 static struct live_urb *urb_hashtable_lookup(uint64_t target_urb_adr)
5026 {
5027     GHashTable *urb_hashtable = usbdevfs_urb_hashtable();
5028     return g_hash_table_lookup(urb_hashtable, &target_urb_adr);
5029 }
5030 
5031 static void urb_hashtable_remove(struct live_urb *urb)
5032 {
5033     GHashTable *urb_hashtable = usbdevfs_urb_hashtable();
5034     g_hash_table_remove(urb_hashtable, urb);
5035 }
5036 
5037 static abi_long
5038 do_ioctl_usbdevfs_reapurb(const IOCTLEntry *ie, uint8_t *buf_temp,
5039                           int fd, int cmd, abi_long arg)
5040 {
5041     const argtype usbfsurb_arg_type[] = { MK_STRUCT(STRUCT_usbdevfs_urb) };
5042     const argtype ptrvoid_arg_type[] = { TYPE_PTRVOID, 0, 0 };
5043     struct live_urb *lurb;
5044     void *argptr;
5045     uint64_t hurb;
5046     int target_size;
5047     uintptr_t target_urb_adr;
5048     abi_long ret;
5049 
5050     target_size = thunk_type_size(usbfsurb_arg_type, THUNK_TARGET);
5051 
5052     memset(buf_temp, 0, sizeof(uint64_t));
5053     ret = get_errno(safe_ioctl(fd, ie->host_cmd, buf_temp));
5054     if (is_error(ret)) {
5055         return ret;
5056     }
5057 
5058     memcpy(&hurb, buf_temp, sizeof(uint64_t));
5059     lurb = (void *)((uintptr_t)hurb - offsetof(struct live_urb, host_urb));
5060     if (!lurb->target_urb_adr) {
5061         return -TARGET_EFAULT;
5062     }
5063     urb_hashtable_remove(lurb);
5064     unlock_user(lurb->target_buf_ptr, lurb->target_buf_adr,
5065         lurb->host_urb.buffer_length);
5066     lurb->target_buf_ptr = NULL;
5067 
5068     /* restore the guest buffer pointer */
5069     lurb->host_urb.buffer = (void *)(uintptr_t)lurb->target_buf_adr;
5070 
5071     /* update the guest urb struct */
5072     argptr = lock_user(VERIFY_WRITE, lurb->target_urb_adr, target_size, 0);
5073     if (!argptr) {
5074         g_free(lurb);
5075         return -TARGET_EFAULT;
5076     }
5077     thunk_convert(argptr, &lurb->host_urb, usbfsurb_arg_type, THUNK_TARGET);
5078     unlock_user(argptr, lurb->target_urb_adr, target_size);
5079 
5080     target_size = thunk_type_size(ptrvoid_arg_type, THUNK_TARGET);
5081     /* write back the urb handle */
5082     argptr = lock_user(VERIFY_WRITE, arg, target_size, 0);
5083     if (!argptr) {
5084         g_free(lurb);
5085         return -TARGET_EFAULT;
5086     }
5087 
5088     /* GHashTable uses 64-bit keys but thunk_convert expects uintptr_t */
5089     target_urb_adr = lurb->target_urb_adr;
5090     thunk_convert(argptr, &target_urb_adr, ptrvoid_arg_type, THUNK_TARGET);
5091     unlock_user(argptr, arg, target_size);
5092 
5093     g_free(lurb);
5094     return ret;
5095 }
5096 
5097 static abi_long
5098 do_ioctl_usbdevfs_discardurb(const IOCTLEntry *ie,
5099                              uint8_t *buf_temp __attribute__((unused)),
5100                              int fd, int cmd, abi_long arg)
5101 {
5102     struct live_urb *lurb;
5103 
5104     /* map target address back to host URB with metadata. */
5105     lurb = urb_hashtable_lookup(arg);
5106     if (!lurb) {
5107         return -TARGET_EFAULT;
5108     }
5109     return get_errno(safe_ioctl(fd, ie->host_cmd, &lurb->host_urb));
5110 }
5111 
5112 static abi_long
5113 do_ioctl_usbdevfs_submiturb(const IOCTLEntry *ie, uint8_t *buf_temp,
5114                             int fd, int cmd, abi_long arg)
5115 {
5116     const argtype *arg_type = ie->arg_type;
5117     int target_size;
5118     abi_long ret;
5119     void *argptr;
5120     int rw_dir;
5121     struct live_urb *lurb;
5122 
5123     /*
5124      * each submitted URB needs to map to a unique ID for the
5125      * kernel, and that unique ID needs to be a pointer to
5126      * host memory.  hence, we need to malloc for each URB.
5127      * isochronous transfers have a variable length struct.
5128      */
5129     arg_type++;
5130     target_size = thunk_type_size(arg_type, THUNK_TARGET);
5131 
5132     /* construct host copy of urb and metadata */
5133     lurb = g_try_malloc0(sizeof(struct live_urb));
5134     if (!lurb) {
5135         return -TARGET_ENOMEM;
5136     }
5137 
5138     argptr = lock_user(VERIFY_READ, arg, target_size, 1);
5139     if (!argptr) {
5140         g_free(lurb);
5141         return -TARGET_EFAULT;
5142     }
5143     thunk_convert(&lurb->host_urb, argptr, arg_type, THUNK_HOST);
5144     unlock_user(argptr, arg, 0);
5145 
5146     lurb->target_urb_adr = arg;
5147     lurb->target_buf_adr = (uintptr_t)lurb->host_urb.buffer;
5148 
5149     /* buffer space used depends on endpoint type so lock the entire buffer */
5150     /* control type urbs should check the buffer contents for true direction */
5151     rw_dir = lurb->host_urb.endpoint & USB_DIR_IN ? VERIFY_WRITE : VERIFY_READ;
5152     lurb->target_buf_ptr = lock_user(rw_dir, lurb->target_buf_adr,
5153         lurb->host_urb.buffer_length, 1);
5154     if (lurb->target_buf_ptr == NULL) {
5155         g_free(lurb);
5156         return -TARGET_EFAULT;
5157     }
5158 
5159     /* update buffer pointer in host copy */
5160     lurb->host_urb.buffer = lurb->target_buf_ptr;
5161 
5162     ret = get_errno(safe_ioctl(fd, ie->host_cmd, &lurb->host_urb));
5163     if (is_error(ret)) {
5164         unlock_user(lurb->target_buf_ptr, lurb->target_buf_adr, 0);
5165         g_free(lurb);
5166     } else {
5167         urb_hashtable_insert(lurb);
5168     }
5169 
5170     return ret;
5171 }
5172 #endif /* CONFIG_USBFS */
5173 
5174 static abi_long do_ioctl_dm(const IOCTLEntry *ie, uint8_t *buf_temp, int fd,
5175                             int cmd, abi_long arg)
5176 {
5177     void *argptr;
5178     struct dm_ioctl *host_dm;
5179     abi_long guest_data;
5180     uint32_t guest_data_size;
5181     int target_size;
5182     const argtype *arg_type = ie->arg_type;
5183     abi_long ret;
5184     void *big_buf = NULL;
5185     char *host_data;
5186 
5187     arg_type++;
5188     target_size = thunk_type_size(arg_type, 0);
5189     argptr = lock_user(VERIFY_READ, arg, target_size, 1);
5190     if (!argptr) {
5191         ret = -TARGET_EFAULT;
5192         goto out;
5193     }
5194     thunk_convert(buf_temp, argptr, arg_type, THUNK_HOST);
5195     unlock_user(argptr, arg, 0);
5196 
5197     /* buf_temp is too small, so fetch things into a bigger buffer */
5198     big_buf = g_malloc0(((struct dm_ioctl*)buf_temp)->data_size * 2);
5199     memcpy(big_buf, buf_temp, target_size);
5200     buf_temp = big_buf;
5201     host_dm = big_buf;
5202 
5203     guest_data = arg + host_dm->data_start;
5204     if ((guest_data - arg) < 0) {
5205         ret = -TARGET_EINVAL;
5206         goto out;
5207     }
5208     guest_data_size = host_dm->data_size - host_dm->data_start;
5209     host_data = (char*)host_dm + host_dm->data_start;
5210 
5211     argptr = lock_user(VERIFY_READ, guest_data, guest_data_size, 1);
5212     if (!argptr) {
5213         ret = -TARGET_EFAULT;
5214         goto out;
5215     }
5216 
5217     switch (ie->host_cmd) {
5218     case DM_REMOVE_ALL:
5219     case DM_LIST_DEVICES:
5220     case DM_DEV_CREATE:
5221     case DM_DEV_REMOVE:
5222     case DM_DEV_SUSPEND:
5223     case DM_DEV_STATUS:
5224     case DM_DEV_WAIT:
5225     case DM_TABLE_STATUS:
5226     case DM_TABLE_CLEAR:
5227     case DM_TABLE_DEPS:
5228     case DM_LIST_VERSIONS:
5229         /* no input data */
5230         break;
5231     case DM_DEV_RENAME:
5232     case DM_DEV_SET_GEOMETRY:
5233         /* data contains only strings */
5234         memcpy(host_data, argptr, guest_data_size);
5235         break;
5236     case DM_TARGET_MSG:
5237         memcpy(host_data, argptr, guest_data_size);
5238         *(uint64_t*)host_data = tswap64(*(uint64_t*)argptr);
5239         break;
5240     case DM_TABLE_LOAD:
5241     {
5242         void *gspec = argptr;
5243         void *cur_data = host_data;
5244         const argtype arg_type[] = { MK_STRUCT(STRUCT_dm_target_spec) };
5245         int spec_size = thunk_type_size(arg_type, 0);
5246         int i;
5247 
5248         for (i = 0; i < host_dm->target_count; i++) {
5249             struct dm_target_spec *spec = cur_data;
5250             uint32_t next;
5251             int slen;
5252 
5253             thunk_convert(spec, gspec, arg_type, THUNK_HOST);
5254             slen = strlen((char*)gspec + spec_size) + 1;
5255             next = spec->next;
5256             spec->next = sizeof(*spec) + slen;
5257             strcpy((char*)&spec[1], gspec + spec_size);
5258             gspec += next;
5259             cur_data += spec->next;
5260         }
5261         break;
5262     }
5263     default:
5264         ret = -TARGET_EINVAL;
5265         unlock_user(argptr, guest_data, 0);
5266         goto out;
5267     }
5268     unlock_user(argptr, guest_data, 0);
5269 
5270     ret = get_errno(safe_ioctl(fd, ie->host_cmd, buf_temp));
5271     if (!is_error(ret)) {
5272         guest_data = arg + host_dm->data_start;
5273         guest_data_size = host_dm->data_size - host_dm->data_start;
5274         argptr = lock_user(VERIFY_WRITE, guest_data, guest_data_size, 0);
5275         switch (ie->host_cmd) {
5276         case DM_REMOVE_ALL:
5277         case DM_DEV_CREATE:
5278         case DM_DEV_REMOVE:
5279         case DM_DEV_RENAME:
5280         case DM_DEV_SUSPEND:
5281         case DM_DEV_STATUS:
5282         case DM_TABLE_LOAD:
5283         case DM_TABLE_CLEAR:
5284         case DM_TARGET_MSG:
5285         case DM_DEV_SET_GEOMETRY:
5286             /* no return data */
5287             break;
5288         case DM_LIST_DEVICES:
5289         {
5290             struct dm_name_list *nl = (void*)host_dm + host_dm->data_start;
5291             uint32_t remaining_data = guest_data_size;
5292             void *cur_data = argptr;
5293             const argtype arg_type[] = { MK_STRUCT(STRUCT_dm_name_list) };
5294             int nl_size = 12; /* can't use thunk_size due to alignment */
5295 
5296             while (1) {
5297                 uint32_t next = nl->next;
5298                 if (next) {
5299                     nl->next = nl_size + (strlen(nl->name) + 1);
5300                 }
5301                 if (remaining_data < nl->next) {
5302                     host_dm->flags |= DM_BUFFER_FULL_FLAG;
5303                     break;
5304                 }
5305                 thunk_convert(cur_data, nl, arg_type, THUNK_TARGET);
5306                 strcpy(cur_data + nl_size, nl->name);
5307                 cur_data += nl->next;
5308                 remaining_data -= nl->next;
5309                 if (!next) {
5310                     break;
5311                 }
5312                 nl = (void*)nl + next;
5313             }
5314             break;
5315         }
5316         case DM_DEV_WAIT:
5317         case DM_TABLE_STATUS:
5318         {
5319             struct dm_target_spec *spec = (void*)host_dm + host_dm->data_start;
5320             void *cur_data = argptr;
5321             const argtype arg_type[] = { MK_STRUCT(STRUCT_dm_target_spec) };
5322             int spec_size = thunk_type_size(arg_type, 0);
5323             int i;
5324 
5325             for (i = 0; i < host_dm->target_count; i++) {
5326                 uint32_t next = spec->next;
5327                 int slen = strlen((char*)&spec[1]) + 1;
5328                 spec->next = (cur_data - argptr) + spec_size + slen;
5329                 if (guest_data_size < spec->next) {
5330                     host_dm->flags |= DM_BUFFER_FULL_FLAG;
5331                     break;
5332                 }
5333                 thunk_convert(cur_data, spec, arg_type, THUNK_TARGET);
5334                 strcpy(cur_data + spec_size, (char*)&spec[1]);
5335                 cur_data = argptr + spec->next;
5336                 spec = (void*)host_dm + host_dm->data_start + next;
5337             }
5338             break;
5339         }
5340         case DM_TABLE_DEPS:
5341         {
5342             void *hdata = (void*)host_dm + host_dm->data_start;
5343             int count = *(uint32_t*)hdata;
5344             uint64_t *hdev = hdata + 8;
5345             uint64_t *gdev = argptr + 8;
5346             int i;
5347 
5348             *(uint32_t*)argptr = tswap32(count);
5349             for (i = 0; i < count; i++) {
5350                 *gdev = tswap64(*hdev);
5351                 gdev++;
5352                 hdev++;
5353             }
5354             break;
5355         }
5356         case DM_LIST_VERSIONS:
5357         {
5358             struct dm_target_versions *vers = (void*)host_dm + host_dm->data_start;
5359             uint32_t remaining_data = guest_data_size;
5360             void *cur_data = argptr;
5361             const argtype arg_type[] = { MK_STRUCT(STRUCT_dm_target_versions) };
5362             int vers_size = thunk_type_size(arg_type, 0);
5363 
5364             while (1) {
5365                 uint32_t next = vers->next;
5366                 if (next) {
5367                     vers->next = vers_size + (strlen(vers->name) + 1);
5368                 }
5369                 if (remaining_data < vers->next) {
5370                     host_dm->flags |= DM_BUFFER_FULL_FLAG;
5371                     break;
5372                 }
5373                 thunk_convert(cur_data, vers, arg_type, THUNK_TARGET);
5374                 strcpy(cur_data + vers_size, vers->name);
5375                 cur_data += vers->next;
5376                 remaining_data -= vers->next;
5377                 if (!next) {
5378                     break;
5379                 }
5380                 vers = (void*)vers + next;
5381             }
5382             break;
5383         }
5384         default:
5385             unlock_user(argptr, guest_data, 0);
5386             ret = -TARGET_EINVAL;
5387             goto out;
5388         }
5389         unlock_user(argptr, guest_data, guest_data_size);
5390 
5391         argptr = lock_user(VERIFY_WRITE, arg, target_size, 0);
5392         if (!argptr) {
5393             ret = -TARGET_EFAULT;
5394             goto out;
5395         }
5396         thunk_convert(argptr, buf_temp, arg_type, THUNK_TARGET);
5397         unlock_user(argptr, arg, target_size);
5398     }
5399 out:
5400     g_free(big_buf);
5401     return ret;
5402 }
5403 
5404 static abi_long do_ioctl_blkpg(const IOCTLEntry *ie, uint8_t *buf_temp, int fd,
5405                                int cmd, abi_long arg)
5406 {
5407     void *argptr;
5408     int target_size;
5409     const argtype *arg_type = ie->arg_type;
5410     const argtype part_arg_type[] = { MK_STRUCT(STRUCT_blkpg_partition) };
5411     abi_long ret;
5412 
5413     struct blkpg_ioctl_arg *host_blkpg = (void*)buf_temp;
5414     struct blkpg_partition host_part;
5415 
5416     /* Read and convert blkpg */
5417     arg_type++;
5418     target_size = thunk_type_size(arg_type, 0);
5419     argptr = lock_user(VERIFY_READ, arg, target_size, 1);
5420     if (!argptr) {
5421         ret = -TARGET_EFAULT;
5422         goto out;
5423     }
5424     thunk_convert(buf_temp, argptr, arg_type, THUNK_HOST);
5425     unlock_user(argptr, arg, 0);
5426 
5427     switch (host_blkpg->op) {
5428     case BLKPG_ADD_PARTITION:
5429     case BLKPG_DEL_PARTITION:
5430         /* payload is struct blkpg_partition */
5431         break;
5432     default:
5433         /* Unknown opcode */
5434         ret = -TARGET_EINVAL;
5435         goto out;
5436     }
5437 
5438     /* Read and convert blkpg->data */
5439     arg = (abi_long)(uintptr_t)host_blkpg->data;
5440     target_size = thunk_type_size(part_arg_type, 0);
5441     argptr = lock_user(VERIFY_READ, arg, target_size, 1);
5442     if (!argptr) {
5443         ret = -TARGET_EFAULT;
5444         goto out;
5445     }
5446     thunk_convert(&host_part, argptr, part_arg_type, THUNK_HOST);
5447     unlock_user(argptr, arg, 0);
5448 
5449     /* Swizzle the data pointer to our local copy and call! */
5450     host_blkpg->data = &host_part;
5451     ret = get_errno(safe_ioctl(fd, ie->host_cmd, host_blkpg));
5452 
5453 out:
5454     return ret;
5455 }
5456 
5457 static abi_long do_ioctl_rt(const IOCTLEntry *ie, uint8_t *buf_temp,
5458                                 int fd, int cmd, abi_long arg)
5459 {
5460     const argtype *arg_type = ie->arg_type;
5461     const StructEntry *se;
5462     const argtype *field_types;
5463     const int *dst_offsets, *src_offsets;
5464     int target_size;
5465     void *argptr;
5466     abi_ulong *target_rt_dev_ptr = NULL;
5467     unsigned long *host_rt_dev_ptr = NULL;
5468     abi_long ret;
5469     int i;
5470 
5471     assert(ie->access == IOC_W);
5472     assert(*arg_type == TYPE_PTR);
5473     arg_type++;
5474     assert(*arg_type == TYPE_STRUCT);
5475     target_size = thunk_type_size(arg_type, 0);
5476     argptr = lock_user(VERIFY_READ, arg, target_size, 1);
5477     if (!argptr) {
5478         return -TARGET_EFAULT;
5479     }
5480     arg_type++;
5481     assert(*arg_type == (int)STRUCT_rtentry);
5482     se = struct_entries + *arg_type++;
5483     assert(se->convert[0] == NULL);
5484     /* convert struct here to be able to catch rt_dev string */
5485     field_types = se->field_types;
5486     dst_offsets = se->field_offsets[THUNK_HOST];
5487     src_offsets = se->field_offsets[THUNK_TARGET];
5488     for (i = 0; i < se->nb_fields; i++) {
5489         if (dst_offsets[i] == offsetof(struct rtentry, rt_dev)) {
5490             assert(*field_types == TYPE_PTRVOID);
5491             target_rt_dev_ptr = (abi_ulong *)(argptr + src_offsets[i]);
5492             host_rt_dev_ptr = (unsigned long *)(buf_temp + dst_offsets[i]);
5493             if (*target_rt_dev_ptr != 0) {
5494                 *host_rt_dev_ptr = (unsigned long)lock_user_string(
5495                                                   tswapal(*target_rt_dev_ptr));
5496                 if (!*host_rt_dev_ptr) {
5497                     unlock_user(argptr, arg, 0);
5498                     return -TARGET_EFAULT;
5499                 }
5500             } else {
5501                 *host_rt_dev_ptr = 0;
5502             }
5503             field_types++;
5504             continue;
5505         }
5506         field_types = thunk_convert(buf_temp + dst_offsets[i],
5507                                     argptr + src_offsets[i],
5508                                     field_types, THUNK_HOST);
5509     }
5510     unlock_user(argptr, arg, 0);
5511 
5512     ret = get_errno(safe_ioctl(fd, ie->host_cmd, buf_temp));
5513 
5514     assert(host_rt_dev_ptr != NULL);
5515     assert(target_rt_dev_ptr != NULL);
5516     if (*host_rt_dev_ptr != 0) {
5517         unlock_user((void *)*host_rt_dev_ptr,
5518                     *target_rt_dev_ptr, 0);
5519     }
5520     return ret;
5521 }
5522 
5523 static abi_long do_ioctl_kdsigaccept(const IOCTLEntry *ie, uint8_t *buf_temp,
5524                                      int fd, int cmd, abi_long arg)
5525 {
5526     int sig = target_to_host_signal(arg);
5527     return get_errno(safe_ioctl(fd, ie->host_cmd, sig));
5528 }
5529 
5530 static abi_long do_ioctl_SIOCGSTAMP(const IOCTLEntry *ie, uint8_t *buf_temp,
5531                                     int fd, int cmd, abi_long arg)
5532 {
5533     struct timeval tv;
5534     abi_long ret;
5535 
5536     ret = get_errno(safe_ioctl(fd, SIOCGSTAMP, &tv));
5537     if (is_error(ret)) {
5538         return ret;
5539     }
5540 
5541     if (cmd == (int)TARGET_SIOCGSTAMP_OLD) {
5542         if (copy_to_user_timeval(arg, &tv)) {
5543             return -TARGET_EFAULT;
5544         }
5545     } else {
5546         if (copy_to_user_timeval64(arg, &tv)) {
5547             return -TARGET_EFAULT;
5548         }
5549     }
5550 
5551     return ret;
5552 }
5553 
5554 static abi_long do_ioctl_SIOCGSTAMPNS(const IOCTLEntry *ie, uint8_t *buf_temp,
5555                                       int fd, int cmd, abi_long arg)
5556 {
5557     struct timespec ts;
5558     abi_long ret;
5559 
5560     ret = get_errno(safe_ioctl(fd, SIOCGSTAMPNS, &ts));
5561     if (is_error(ret)) {
5562         return ret;
5563     }
5564 
5565     if (cmd == (int)TARGET_SIOCGSTAMPNS_OLD) {
5566         if (host_to_target_timespec(arg, &ts)) {
5567             return -TARGET_EFAULT;
5568         }
5569     } else{
5570         if (host_to_target_timespec64(arg, &ts)) {
5571             return -TARGET_EFAULT;
5572         }
5573     }
5574 
5575     return ret;
5576 }
5577 
5578 #ifdef TIOCGPTPEER
5579 static abi_long do_ioctl_tiocgptpeer(const IOCTLEntry *ie, uint8_t *buf_temp,
5580                                      int fd, int cmd, abi_long arg)
5581 {
5582     int flags = target_to_host_bitmask(arg, fcntl_flags_tbl);
5583     return get_errno(safe_ioctl(fd, ie->host_cmd, flags));
5584 }
5585 #endif
5586 
5587 #ifdef HAVE_DRM_H
5588 
5589 static void unlock_drm_version(struct drm_version *host_ver,
5590                                struct target_drm_version *target_ver,
5591                                bool copy)
5592 {
5593     unlock_user(host_ver->name, target_ver->name,
5594                                 copy ? host_ver->name_len : 0);
5595     unlock_user(host_ver->date, target_ver->date,
5596                                 copy ? host_ver->date_len : 0);
5597     unlock_user(host_ver->desc, target_ver->desc,
5598                                 copy ? host_ver->desc_len : 0);
5599 }
5600 
5601 static inline abi_long target_to_host_drmversion(struct drm_version *host_ver,
5602                                           struct target_drm_version *target_ver)
5603 {
5604     memset(host_ver, 0, sizeof(*host_ver));
5605 
5606     __get_user(host_ver->name_len, &target_ver->name_len);
5607     if (host_ver->name_len) {
5608         host_ver->name = lock_user(VERIFY_WRITE, target_ver->name,
5609                                    target_ver->name_len, 0);
5610         if (!host_ver->name) {
5611             return -EFAULT;
5612         }
5613     }
5614 
5615     __get_user(host_ver->date_len, &target_ver->date_len);
5616     if (host_ver->date_len) {
5617         host_ver->date = lock_user(VERIFY_WRITE, target_ver->date,
5618                                    target_ver->date_len, 0);
5619         if (!host_ver->date) {
5620             goto err;
5621         }
5622     }
5623 
5624     __get_user(host_ver->desc_len, &target_ver->desc_len);
5625     if (host_ver->desc_len) {
5626         host_ver->desc = lock_user(VERIFY_WRITE, target_ver->desc,
5627                                    target_ver->desc_len, 0);
5628         if (!host_ver->desc) {
5629             goto err;
5630         }
5631     }
5632 
5633     return 0;
5634 err:
5635     unlock_drm_version(host_ver, target_ver, false);
5636     return -EFAULT;
5637 }
5638 
5639 static inline void host_to_target_drmversion(
5640                                           struct target_drm_version *target_ver,
5641                                           struct drm_version *host_ver)
5642 {
5643     __put_user(host_ver->version_major, &target_ver->version_major);
5644     __put_user(host_ver->version_minor, &target_ver->version_minor);
5645     __put_user(host_ver->version_patchlevel, &target_ver->version_patchlevel);
5646     __put_user(host_ver->name_len, &target_ver->name_len);
5647     __put_user(host_ver->date_len, &target_ver->date_len);
5648     __put_user(host_ver->desc_len, &target_ver->desc_len);
5649     unlock_drm_version(host_ver, target_ver, true);
5650 }
5651 
5652 static abi_long do_ioctl_drm(const IOCTLEntry *ie, uint8_t *buf_temp,
5653                              int fd, int cmd, abi_long arg)
5654 {
5655     struct drm_version *ver;
5656     struct target_drm_version *target_ver;
5657     abi_long ret;
5658 
5659     switch (ie->host_cmd) {
5660     case DRM_IOCTL_VERSION:
5661         if (!lock_user_struct(VERIFY_WRITE, target_ver, arg, 0)) {
5662             return -TARGET_EFAULT;
5663         }
5664         ver = (struct drm_version *)buf_temp;
5665         ret = target_to_host_drmversion(ver, target_ver);
5666         if (!is_error(ret)) {
5667             ret = get_errno(safe_ioctl(fd, ie->host_cmd, ver));
5668             if (is_error(ret)) {
5669                 unlock_drm_version(ver, target_ver, false);
5670             } else {
5671                 host_to_target_drmversion(target_ver, ver);
5672             }
5673         }
5674         unlock_user_struct(target_ver, arg, 0);
5675         return ret;
5676     }
5677     return -TARGET_ENOSYS;
5678 }
5679 
5680 static abi_long do_ioctl_drm_i915_getparam(const IOCTLEntry *ie,
5681                                            struct drm_i915_getparam *gparam,
5682                                            int fd, abi_long arg)
5683 {
5684     abi_long ret;
5685     int value;
5686     struct target_drm_i915_getparam *target_gparam;
5687 
5688     if (!lock_user_struct(VERIFY_READ, target_gparam, arg, 0)) {
5689         return -TARGET_EFAULT;
5690     }
5691 
5692     __get_user(gparam->param, &target_gparam->param);
5693     gparam->value = &value;
5694     ret = get_errno(safe_ioctl(fd, ie->host_cmd, gparam));
5695     put_user_s32(value, target_gparam->value);
5696 
5697     unlock_user_struct(target_gparam, arg, 0);
5698     return ret;
5699 }
5700 
5701 static abi_long do_ioctl_drm_i915(const IOCTLEntry *ie, uint8_t *buf_temp,
5702                                   int fd, int cmd, abi_long arg)
5703 {
5704     switch (ie->host_cmd) {
5705     case DRM_IOCTL_I915_GETPARAM:
5706         return do_ioctl_drm_i915_getparam(ie,
5707                                           (struct drm_i915_getparam *)buf_temp,
5708                                           fd, arg);
5709     default:
5710         return -TARGET_ENOSYS;
5711     }
5712 }
5713 
5714 #endif
5715 
5716 static abi_long do_ioctl_TUNSETTXFILTER(const IOCTLEntry *ie, uint8_t *buf_temp,
5717                                         int fd, int cmd, abi_long arg)
5718 {
5719     struct tun_filter *filter = (struct tun_filter *)buf_temp;
5720     struct tun_filter *target_filter;
5721     char *target_addr;
5722 
5723     assert(ie->access == IOC_W);
5724 
5725     target_filter = lock_user(VERIFY_READ, arg, sizeof(*target_filter), 1);
5726     if (!target_filter) {
5727         return -TARGET_EFAULT;
5728     }
5729     filter->flags = tswap16(target_filter->flags);
5730     filter->count = tswap16(target_filter->count);
5731     unlock_user(target_filter, arg, 0);
5732 
5733     if (filter->count) {
5734         if (offsetof(struct tun_filter, addr) + filter->count * ETH_ALEN >
5735             MAX_STRUCT_SIZE) {
5736             return -TARGET_EFAULT;
5737         }
5738 
5739         target_addr = lock_user(VERIFY_READ,
5740                                 arg + offsetof(struct tun_filter, addr),
5741                                 filter->count * ETH_ALEN, 1);
5742         if (!target_addr) {
5743             return -TARGET_EFAULT;
5744         }
5745         memcpy(filter->addr, target_addr, filter->count * ETH_ALEN);
5746         unlock_user(target_addr, arg + offsetof(struct tun_filter, addr), 0);
5747     }
5748 
5749     return get_errno(safe_ioctl(fd, ie->host_cmd, filter));
5750 }
5751 
5752 IOCTLEntry ioctl_entries[] = {
5753 #define IOCTL(cmd, access, ...) \
5754     { TARGET_ ## cmd, cmd, #cmd, access, 0, {  __VA_ARGS__ } },
5755 #define IOCTL_SPECIAL(cmd, access, dofn, ...)                      \
5756     { TARGET_ ## cmd, cmd, #cmd, access, dofn, {  __VA_ARGS__ } },
5757 #define IOCTL_IGNORE(cmd) \
5758     { TARGET_ ## cmd, 0, #cmd },
5759 #include "ioctls.h"
5760     { 0, 0, },
5761 };
5762 
5763 /* ??? Implement proper locking for ioctls.  */
5764 /* do_ioctl() Must return target values and target errnos. */
5765 static abi_long do_ioctl(int fd, int cmd, abi_long arg)
5766 {
5767     const IOCTLEntry *ie;
5768     const argtype *arg_type;
5769     abi_long ret;
5770     uint8_t buf_temp[MAX_STRUCT_SIZE];
5771     int target_size;
5772     void *argptr;
5773 
5774     ie = ioctl_entries;
5775     for(;;) {
5776         if (ie->target_cmd == 0) {
5777             qemu_log_mask(
5778                 LOG_UNIMP, "Unsupported ioctl: cmd=0x%04lx\n", (long)cmd);
5779             return -TARGET_ENOSYS;
5780         }
5781         if (ie->target_cmd == cmd)
5782             break;
5783         ie++;
5784     }
5785     arg_type = ie->arg_type;
5786     if (ie->do_ioctl) {
5787         return ie->do_ioctl(ie, buf_temp, fd, cmd, arg);
5788     } else if (!ie->host_cmd) {
5789         /* Some architectures define BSD ioctls in their headers
5790            that are not implemented in Linux.  */
5791         return -TARGET_ENOSYS;
5792     }
5793 
5794     switch(arg_type[0]) {
5795     case TYPE_NULL:
5796         /* no argument */
5797         ret = get_errno(safe_ioctl(fd, ie->host_cmd));
5798         break;
5799     case TYPE_PTRVOID:
5800     case TYPE_INT:
5801     case TYPE_LONG:
5802     case TYPE_ULONG:
5803         ret = get_errno(safe_ioctl(fd, ie->host_cmd, arg));
5804         break;
5805     case TYPE_PTR:
5806         arg_type++;
5807         target_size = thunk_type_size(arg_type, 0);
5808         switch(ie->access) {
5809         case IOC_R:
5810             ret = get_errno(safe_ioctl(fd, ie->host_cmd, buf_temp));
5811             if (!is_error(ret)) {
5812                 argptr = lock_user(VERIFY_WRITE, arg, target_size, 0);
5813                 if (!argptr)
5814                     return -TARGET_EFAULT;
5815                 thunk_convert(argptr, buf_temp, arg_type, THUNK_TARGET);
5816                 unlock_user(argptr, arg, target_size);
5817             }
5818             break;
5819         case IOC_W:
5820             argptr = lock_user(VERIFY_READ, arg, target_size, 1);
5821             if (!argptr)
5822                 return -TARGET_EFAULT;
5823             thunk_convert(buf_temp, argptr, arg_type, THUNK_HOST);
5824             unlock_user(argptr, arg, 0);
5825             ret = get_errno(safe_ioctl(fd, ie->host_cmd, buf_temp));
5826             break;
5827         default:
5828         case IOC_RW:
5829             argptr = lock_user(VERIFY_READ, arg, target_size, 1);
5830             if (!argptr)
5831                 return -TARGET_EFAULT;
5832             thunk_convert(buf_temp, argptr, arg_type, THUNK_HOST);
5833             unlock_user(argptr, arg, 0);
5834             ret = get_errno(safe_ioctl(fd, ie->host_cmd, buf_temp));
5835             if (!is_error(ret)) {
5836                 argptr = lock_user(VERIFY_WRITE, arg, target_size, 0);
5837                 if (!argptr)
5838                     return -TARGET_EFAULT;
5839                 thunk_convert(argptr, buf_temp, arg_type, THUNK_TARGET);
5840                 unlock_user(argptr, arg, target_size);
5841             }
5842             break;
5843         }
5844         break;
5845     default:
5846         qemu_log_mask(LOG_UNIMP,
5847                       "Unsupported ioctl type: cmd=0x%04lx type=%d\n",
5848                       (long)cmd, arg_type[0]);
5849         ret = -TARGET_ENOSYS;
5850         break;
5851     }
5852     return ret;
5853 }
5854 
5855 static const bitmask_transtbl iflag_tbl[] = {
5856         { TARGET_IGNBRK, TARGET_IGNBRK, IGNBRK, IGNBRK },
5857         { TARGET_BRKINT, TARGET_BRKINT, BRKINT, BRKINT },
5858         { TARGET_IGNPAR, TARGET_IGNPAR, IGNPAR, IGNPAR },
5859         { TARGET_PARMRK, TARGET_PARMRK, PARMRK, PARMRK },
5860         { TARGET_INPCK, TARGET_INPCK, INPCK, INPCK },
5861         { TARGET_ISTRIP, TARGET_ISTRIP, ISTRIP, ISTRIP },
5862         { TARGET_INLCR, TARGET_INLCR, INLCR, INLCR },
5863         { TARGET_IGNCR, TARGET_IGNCR, IGNCR, IGNCR },
5864         { TARGET_ICRNL, TARGET_ICRNL, ICRNL, ICRNL },
5865         { TARGET_IUCLC, TARGET_IUCLC, IUCLC, IUCLC },
5866         { TARGET_IXON, TARGET_IXON, IXON, IXON },
5867         { TARGET_IXANY, TARGET_IXANY, IXANY, IXANY },
5868         { TARGET_IXOFF, TARGET_IXOFF, IXOFF, IXOFF },
5869         { TARGET_IMAXBEL, TARGET_IMAXBEL, IMAXBEL, IMAXBEL },
5870         { TARGET_IUTF8, TARGET_IUTF8, IUTF8, IUTF8},
5871         { 0, 0, 0, 0 }
5872 };
5873 
5874 static const bitmask_transtbl oflag_tbl[] = {
5875 	{ TARGET_OPOST, TARGET_OPOST, OPOST, OPOST },
5876 	{ TARGET_OLCUC, TARGET_OLCUC, OLCUC, OLCUC },
5877 	{ TARGET_ONLCR, TARGET_ONLCR, ONLCR, ONLCR },
5878 	{ TARGET_OCRNL, TARGET_OCRNL, OCRNL, OCRNL },
5879 	{ TARGET_ONOCR, TARGET_ONOCR, ONOCR, ONOCR },
5880 	{ TARGET_ONLRET, TARGET_ONLRET, ONLRET, ONLRET },
5881 	{ TARGET_OFILL, TARGET_OFILL, OFILL, OFILL },
5882 	{ TARGET_OFDEL, TARGET_OFDEL, OFDEL, OFDEL },
5883 	{ TARGET_NLDLY, TARGET_NL0, NLDLY, NL0 },
5884 	{ TARGET_NLDLY, TARGET_NL1, NLDLY, NL1 },
5885 	{ TARGET_CRDLY, TARGET_CR0, CRDLY, CR0 },
5886 	{ TARGET_CRDLY, TARGET_CR1, CRDLY, CR1 },
5887 	{ TARGET_CRDLY, TARGET_CR2, CRDLY, CR2 },
5888 	{ TARGET_CRDLY, TARGET_CR3, CRDLY, CR3 },
5889 	{ TARGET_TABDLY, TARGET_TAB0, TABDLY, TAB0 },
5890 	{ TARGET_TABDLY, TARGET_TAB1, TABDLY, TAB1 },
5891 	{ TARGET_TABDLY, TARGET_TAB2, TABDLY, TAB2 },
5892 	{ TARGET_TABDLY, TARGET_TAB3, TABDLY, TAB3 },
5893 	{ TARGET_BSDLY, TARGET_BS0, BSDLY, BS0 },
5894 	{ TARGET_BSDLY, TARGET_BS1, BSDLY, BS1 },
5895 	{ TARGET_VTDLY, TARGET_VT0, VTDLY, VT0 },
5896 	{ TARGET_VTDLY, TARGET_VT1, VTDLY, VT1 },
5897 	{ TARGET_FFDLY, TARGET_FF0, FFDLY, FF0 },
5898 	{ TARGET_FFDLY, TARGET_FF1, FFDLY, FF1 },
5899 	{ 0, 0, 0, 0 }
5900 };
5901 
5902 static const bitmask_transtbl cflag_tbl[] = {
5903 	{ TARGET_CBAUD, TARGET_B0, CBAUD, B0 },
5904 	{ TARGET_CBAUD, TARGET_B50, CBAUD, B50 },
5905 	{ TARGET_CBAUD, TARGET_B75, CBAUD, B75 },
5906 	{ TARGET_CBAUD, TARGET_B110, CBAUD, B110 },
5907 	{ TARGET_CBAUD, TARGET_B134, CBAUD, B134 },
5908 	{ TARGET_CBAUD, TARGET_B150, CBAUD, B150 },
5909 	{ TARGET_CBAUD, TARGET_B200, CBAUD, B200 },
5910 	{ TARGET_CBAUD, TARGET_B300, CBAUD, B300 },
5911 	{ TARGET_CBAUD, TARGET_B600, CBAUD, B600 },
5912 	{ TARGET_CBAUD, TARGET_B1200, CBAUD, B1200 },
5913 	{ TARGET_CBAUD, TARGET_B1800, CBAUD, B1800 },
5914 	{ TARGET_CBAUD, TARGET_B2400, CBAUD, B2400 },
5915 	{ TARGET_CBAUD, TARGET_B4800, CBAUD, B4800 },
5916 	{ TARGET_CBAUD, TARGET_B9600, CBAUD, B9600 },
5917 	{ TARGET_CBAUD, TARGET_B19200, CBAUD, B19200 },
5918 	{ TARGET_CBAUD, TARGET_B38400, CBAUD, B38400 },
5919 	{ TARGET_CBAUD, TARGET_B57600, CBAUD, B57600 },
5920 	{ TARGET_CBAUD, TARGET_B115200, CBAUD, B115200 },
5921 	{ TARGET_CBAUD, TARGET_B230400, CBAUD, B230400 },
5922 	{ TARGET_CBAUD, TARGET_B460800, CBAUD, B460800 },
5923 	{ TARGET_CSIZE, TARGET_CS5, CSIZE, CS5 },
5924 	{ TARGET_CSIZE, TARGET_CS6, CSIZE, CS6 },
5925 	{ TARGET_CSIZE, TARGET_CS7, CSIZE, CS7 },
5926 	{ TARGET_CSIZE, TARGET_CS8, CSIZE, CS8 },
5927 	{ TARGET_CSTOPB, TARGET_CSTOPB, CSTOPB, CSTOPB },
5928 	{ TARGET_CREAD, TARGET_CREAD, CREAD, CREAD },
5929 	{ TARGET_PARENB, TARGET_PARENB, PARENB, PARENB },
5930 	{ TARGET_PARODD, TARGET_PARODD, PARODD, PARODD },
5931 	{ TARGET_HUPCL, TARGET_HUPCL, HUPCL, HUPCL },
5932 	{ TARGET_CLOCAL, TARGET_CLOCAL, CLOCAL, CLOCAL },
5933 	{ TARGET_CRTSCTS, TARGET_CRTSCTS, CRTSCTS, CRTSCTS },
5934 	{ 0, 0, 0, 0 }
5935 };
5936 
5937 static const bitmask_transtbl lflag_tbl[] = {
5938   { TARGET_ISIG, TARGET_ISIG, ISIG, ISIG },
5939   { TARGET_ICANON, TARGET_ICANON, ICANON, ICANON },
5940   { TARGET_XCASE, TARGET_XCASE, XCASE, XCASE },
5941   { TARGET_ECHO, TARGET_ECHO, ECHO, ECHO },
5942   { TARGET_ECHOE, TARGET_ECHOE, ECHOE, ECHOE },
5943   { TARGET_ECHOK, TARGET_ECHOK, ECHOK, ECHOK },
5944   { TARGET_ECHONL, TARGET_ECHONL, ECHONL, ECHONL },
5945   { TARGET_NOFLSH, TARGET_NOFLSH, NOFLSH, NOFLSH },
5946   { TARGET_TOSTOP, TARGET_TOSTOP, TOSTOP, TOSTOP },
5947   { TARGET_ECHOCTL, TARGET_ECHOCTL, ECHOCTL, ECHOCTL },
5948   { TARGET_ECHOPRT, TARGET_ECHOPRT, ECHOPRT, ECHOPRT },
5949   { TARGET_ECHOKE, TARGET_ECHOKE, ECHOKE, ECHOKE },
5950   { TARGET_FLUSHO, TARGET_FLUSHO, FLUSHO, FLUSHO },
5951   { TARGET_PENDIN, TARGET_PENDIN, PENDIN, PENDIN },
5952   { TARGET_IEXTEN, TARGET_IEXTEN, IEXTEN, IEXTEN },
5953   { TARGET_EXTPROC, TARGET_EXTPROC, EXTPROC, EXTPROC},
5954   { 0, 0, 0, 0 }
5955 };
5956 
5957 static void target_to_host_termios (void *dst, const void *src)
5958 {
5959     struct host_termios *host = dst;
5960     const struct target_termios *target = src;
5961 
5962     host->c_iflag =
5963         target_to_host_bitmask(tswap32(target->c_iflag), iflag_tbl);
5964     host->c_oflag =
5965         target_to_host_bitmask(tswap32(target->c_oflag), oflag_tbl);
5966     host->c_cflag =
5967         target_to_host_bitmask(tswap32(target->c_cflag), cflag_tbl);
5968     host->c_lflag =
5969         target_to_host_bitmask(tswap32(target->c_lflag), lflag_tbl);
5970     host->c_line = target->c_line;
5971 
5972     memset(host->c_cc, 0, sizeof(host->c_cc));
5973     host->c_cc[VINTR] = target->c_cc[TARGET_VINTR];
5974     host->c_cc[VQUIT] = target->c_cc[TARGET_VQUIT];
5975     host->c_cc[VERASE] = target->c_cc[TARGET_VERASE];
5976     host->c_cc[VKILL] = target->c_cc[TARGET_VKILL];
5977     host->c_cc[VEOF] = target->c_cc[TARGET_VEOF];
5978     host->c_cc[VTIME] = target->c_cc[TARGET_VTIME];
5979     host->c_cc[VMIN] = target->c_cc[TARGET_VMIN];
5980     host->c_cc[VSWTC] = target->c_cc[TARGET_VSWTC];
5981     host->c_cc[VSTART] = target->c_cc[TARGET_VSTART];
5982     host->c_cc[VSTOP] = target->c_cc[TARGET_VSTOP];
5983     host->c_cc[VSUSP] = target->c_cc[TARGET_VSUSP];
5984     host->c_cc[VEOL] = target->c_cc[TARGET_VEOL];
5985     host->c_cc[VREPRINT] = target->c_cc[TARGET_VREPRINT];
5986     host->c_cc[VDISCARD] = target->c_cc[TARGET_VDISCARD];
5987     host->c_cc[VWERASE] = target->c_cc[TARGET_VWERASE];
5988     host->c_cc[VLNEXT] = target->c_cc[TARGET_VLNEXT];
5989     host->c_cc[VEOL2] = target->c_cc[TARGET_VEOL2];
5990 }
5991 
5992 static void host_to_target_termios (void *dst, const void *src)
5993 {
5994     struct target_termios *target = dst;
5995     const struct host_termios *host = src;
5996 
5997     target->c_iflag =
5998         tswap32(host_to_target_bitmask(host->c_iflag, iflag_tbl));
5999     target->c_oflag =
6000         tswap32(host_to_target_bitmask(host->c_oflag, oflag_tbl));
6001     target->c_cflag =
6002         tswap32(host_to_target_bitmask(host->c_cflag, cflag_tbl));
6003     target->c_lflag =
6004         tswap32(host_to_target_bitmask(host->c_lflag, lflag_tbl));
6005     target->c_line = host->c_line;
6006 
6007     memset(target->c_cc, 0, sizeof(target->c_cc));
6008     target->c_cc[TARGET_VINTR] = host->c_cc[VINTR];
6009     target->c_cc[TARGET_VQUIT] = host->c_cc[VQUIT];
6010     target->c_cc[TARGET_VERASE] = host->c_cc[VERASE];
6011     target->c_cc[TARGET_VKILL] = host->c_cc[VKILL];
6012     target->c_cc[TARGET_VEOF] = host->c_cc[VEOF];
6013     target->c_cc[TARGET_VTIME] = host->c_cc[VTIME];
6014     target->c_cc[TARGET_VMIN] = host->c_cc[VMIN];
6015     target->c_cc[TARGET_VSWTC] = host->c_cc[VSWTC];
6016     target->c_cc[TARGET_VSTART] = host->c_cc[VSTART];
6017     target->c_cc[TARGET_VSTOP] = host->c_cc[VSTOP];
6018     target->c_cc[TARGET_VSUSP] = host->c_cc[VSUSP];
6019     target->c_cc[TARGET_VEOL] = host->c_cc[VEOL];
6020     target->c_cc[TARGET_VREPRINT] = host->c_cc[VREPRINT];
6021     target->c_cc[TARGET_VDISCARD] = host->c_cc[VDISCARD];
6022     target->c_cc[TARGET_VWERASE] = host->c_cc[VWERASE];
6023     target->c_cc[TARGET_VLNEXT] = host->c_cc[VLNEXT];
6024     target->c_cc[TARGET_VEOL2] = host->c_cc[VEOL2];
6025 }
6026 
6027 static const StructEntry struct_termios_def = {
6028     .convert = { host_to_target_termios, target_to_host_termios },
6029     .size = { sizeof(struct target_termios), sizeof(struct host_termios) },
6030     .align = { __alignof__(struct target_termios), __alignof__(struct host_termios) },
6031     .print = print_termios,
6032 };
6033 
6034 static bitmask_transtbl mmap_flags_tbl[] = {
6035     { TARGET_MAP_SHARED, TARGET_MAP_SHARED, MAP_SHARED, MAP_SHARED },
6036     { TARGET_MAP_PRIVATE, TARGET_MAP_PRIVATE, MAP_PRIVATE, MAP_PRIVATE },
6037     { TARGET_MAP_FIXED, TARGET_MAP_FIXED, MAP_FIXED, MAP_FIXED },
6038     { TARGET_MAP_ANONYMOUS, TARGET_MAP_ANONYMOUS,
6039       MAP_ANONYMOUS, MAP_ANONYMOUS },
6040     { TARGET_MAP_GROWSDOWN, TARGET_MAP_GROWSDOWN,
6041       MAP_GROWSDOWN, MAP_GROWSDOWN },
6042     { TARGET_MAP_DENYWRITE, TARGET_MAP_DENYWRITE,
6043       MAP_DENYWRITE, MAP_DENYWRITE },
6044     { TARGET_MAP_EXECUTABLE, TARGET_MAP_EXECUTABLE,
6045       MAP_EXECUTABLE, MAP_EXECUTABLE },
6046     { TARGET_MAP_LOCKED, TARGET_MAP_LOCKED, MAP_LOCKED, MAP_LOCKED },
6047     { TARGET_MAP_NORESERVE, TARGET_MAP_NORESERVE,
6048       MAP_NORESERVE, MAP_NORESERVE },
6049     { TARGET_MAP_HUGETLB, TARGET_MAP_HUGETLB, MAP_HUGETLB, MAP_HUGETLB },
6050     /* MAP_STACK had been ignored by the kernel for quite some time.
6051        Recognize it for the target insofar as we do not want to pass
6052        it through to the host.  */
6053     { TARGET_MAP_STACK, TARGET_MAP_STACK, 0, 0 },
6054     { 0, 0, 0, 0 }
6055 };
6056 
6057 /*
6058  * NOTE: TARGET_ABI32 is defined for TARGET_I386 (but not for TARGET_X86_64)
6059  *       TARGET_I386 is defined if TARGET_X86_64 is defined
6060  */
6061 #if defined(TARGET_I386)
6062 
6063 /* NOTE: there is really one LDT for all the threads */
6064 static uint8_t *ldt_table;
6065 
6066 static abi_long read_ldt(abi_ulong ptr, unsigned long bytecount)
6067 {
6068     int size;
6069     void *p;
6070 
6071     if (!ldt_table)
6072         return 0;
6073     size = TARGET_LDT_ENTRIES * TARGET_LDT_ENTRY_SIZE;
6074     if (size > bytecount)
6075         size = bytecount;
6076     p = lock_user(VERIFY_WRITE, ptr, size, 0);
6077     if (!p)
6078         return -TARGET_EFAULT;
6079     /* ??? Should this by byteswapped?  */
6080     memcpy(p, ldt_table, size);
6081     unlock_user(p, ptr, size);
6082     return size;
6083 }
6084 
6085 /* XXX: add locking support */
6086 static abi_long write_ldt(CPUX86State *env,
6087                           abi_ulong ptr, unsigned long bytecount, int oldmode)
6088 {
6089     struct target_modify_ldt_ldt_s ldt_info;
6090     struct target_modify_ldt_ldt_s *target_ldt_info;
6091     int seg_32bit, contents, read_exec_only, limit_in_pages;
6092     int seg_not_present, useable, lm;
6093     uint32_t *lp, entry_1, entry_2;
6094 
6095     if (bytecount != sizeof(ldt_info))
6096         return -TARGET_EINVAL;
6097     if (!lock_user_struct(VERIFY_READ, target_ldt_info, ptr, 1))
6098         return -TARGET_EFAULT;
6099     ldt_info.entry_number = tswap32(target_ldt_info->entry_number);
6100     ldt_info.base_addr = tswapal(target_ldt_info->base_addr);
6101     ldt_info.limit = tswap32(target_ldt_info->limit);
6102     ldt_info.flags = tswap32(target_ldt_info->flags);
6103     unlock_user_struct(target_ldt_info, ptr, 0);
6104 
6105     if (ldt_info.entry_number >= TARGET_LDT_ENTRIES)
6106         return -TARGET_EINVAL;
6107     seg_32bit = ldt_info.flags & 1;
6108     contents = (ldt_info.flags >> 1) & 3;
6109     read_exec_only = (ldt_info.flags >> 3) & 1;
6110     limit_in_pages = (ldt_info.flags >> 4) & 1;
6111     seg_not_present = (ldt_info.flags >> 5) & 1;
6112     useable = (ldt_info.flags >> 6) & 1;
6113 #ifdef TARGET_ABI32
6114     lm = 0;
6115 #else
6116     lm = (ldt_info.flags >> 7) & 1;
6117 #endif
6118     if (contents == 3) {
6119         if (oldmode)
6120             return -TARGET_EINVAL;
6121         if (seg_not_present == 0)
6122             return -TARGET_EINVAL;
6123     }
6124     /* allocate the LDT */
6125     if (!ldt_table) {
6126         env->ldt.base = target_mmap(0,
6127                                     TARGET_LDT_ENTRIES * TARGET_LDT_ENTRY_SIZE,
6128                                     PROT_READ|PROT_WRITE,
6129                                     MAP_ANONYMOUS|MAP_PRIVATE, -1, 0);
6130         if (env->ldt.base == -1)
6131             return -TARGET_ENOMEM;
6132         memset(g2h(env->ldt.base), 0,
6133                TARGET_LDT_ENTRIES * TARGET_LDT_ENTRY_SIZE);
6134         env->ldt.limit = 0xffff;
6135         ldt_table = g2h(env->ldt.base);
6136     }
6137 
6138     /* NOTE: same code as Linux kernel */
6139     /* Allow LDTs to be cleared by the user. */
6140     if (ldt_info.base_addr == 0 && ldt_info.limit == 0) {
6141         if (oldmode ||
6142             (contents == 0		&&
6143              read_exec_only == 1	&&
6144              seg_32bit == 0		&&
6145              limit_in_pages == 0	&&
6146              seg_not_present == 1	&&
6147              useable == 0 )) {
6148             entry_1 = 0;
6149             entry_2 = 0;
6150             goto install;
6151         }
6152     }
6153 
6154     entry_1 = ((ldt_info.base_addr & 0x0000ffff) << 16) |
6155         (ldt_info.limit & 0x0ffff);
6156     entry_2 = (ldt_info.base_addr & 0xff000000) |
6157         ((ldt_info.base_addr & 0x00ff0000) >> 16) |
6158         (ldt_info.limit & 0xf0000) |
6159         ((read_exec_only ^ 1) << 9) |
6160         (contents << 10) |
6161         ((seg_not_present ^ 1) << 15) |
6162         (seg_32bit << 22) |
6163         (limit_in_pages << 23) |
6164         (lm << 21) |
6165         0x7000;
6166     if (!oldmode)
6167         entry_2 |= (useable << 20);
6168 
6169     /* Install the new entry ...  */
6170 install:
6171     lp = (uint32_t *)(ldt_table + (ldt_info.entry_number << 3));
6172     lp[0] = tswap32(entry_1);
6173     lp[1] = tswap32(entry_2);
6174     return 0;
6175 }
6176 
6177 /* specific and weird i386 syscalls */
6178 static abi_long do_modify_ldt(CPUX86State *env, int func, abi_ulong ptr,
6179                               unsigned long bytecount)
6180 {
6181     abi_long ret;
6182 
6183     switch (func) {
6184     case 0:
6185         ret = read_ldt(ptr, bytecount);
6186         break;
6187     case 1:
6188         ret = write_ldt(env, ptr, bytecount, 1);
6189         break;
6190     case 0x11:
6191         ret = write_ldt(env, ptr, bytecount, 0);
6192         break;
6193     default:
6194         ret = -TARGET_ENOSYS;
6195         break;
6196     }
6197     return ret;
6198 }
6199 
6200 #if defined(TARGET_ABI32)
6201 abi_long do_set_thread_area(CPUX86State *env, abi_ulong ptr)
6202 {
6203     uint64_t *gdt_table = g2h(env->gdt.base);
6204     struct target_modify_ldt_ldt_s ldt_info;
6205     struct target_modify_ldt_ldt_s *target_ldt_info;
6206     int seg_32bit, contents, read_exec_only, limit_in_pages;
6207     int seg_not_present, useable, lm;
6208     uint32_t *lp, entry_1, entry_2;
6209     int i;
6210 
6211     lock_user_struct(VERIFY_WRITE, target_ldt_info, ptr, 1);
6212     if (!target_ldt_info)
6213         return -TARGET_EFAULT;
6214     ldt_info.entry_number = tswap32(target_ldt_info->entry_number);
6215     ldt_info.base_addr = tswapal(target_ldt_info->base_addr);
6216     ldt_info.limit = tswap32(target_ldt_info->limit);
6217     ldt_info.flags = tswap32(target_ldt_info->flags);
6218     if (ldt_info.entry_number == -1) {
6219         for (i=TARGET_GDT_ENTRY_TLS_MIN; i<=TARGET_GDT_ENTRY_TLS_MAX; i++) {
6220             if (gdt_table[i] == 0) {
6221                 ldt_info.entry_number = i;
6222                 target_ldt_info->entry_number = tswap32(i);
6223                 break;
6224             }
6225         }
6226     }
6227     unlock_user_struct(target_ldt_info, ptr, 1);
6228 
6229     if (ldt_info.entry_number < TARGET_GDT_ENTRY_TLS_MIN ||
6230         ldt_info.entry_number > TARGET_GDT_ENTRY_TLS_MAX)
6231            return -TARGET_EINVAL;
6232     seg_32bit = ldt_info.flags & 1;
6233     contents = (ldt_info.flags >> 1) & 3;
6234     read_exec_only = (ldt_info.flags >> 3) & 1;
6235     limit_in_pages = (ldt_info.flags >> 4) & 1;
6236     seg_not_present = (ldt_info.flags >> 5) & 1;
6237     useable = (ldt_info.flags >> 6) & 1;
6238 #ifdef TARGET_ABI32
6239     lm = 0;
6240 #else
6241     lm = (ldt_info.flags >> 7) & 1;
6242 #endif
6243 
6244     if (contents == 3) {
6245         if (seg_not_present == 0)
6246             return -TARGET_EINVAL;
6247     }
6248 
6249     /* NOTE: same code as Linux kernel */
6250     /* Allow LDTs to be cleared by the user. */
6251     if (ldt_info.base_addr == 0 && ldt_info.limit == 0) {
6252         if ((contents == 0             &&
6253              read_exec_only == 1       &&
6254              seg_32bit == 0            &&
6255              limit_in_pages == 0       &&
6256              seg_not_present == 1      &&
6257              useable == 0 )) {
6258             entry_1 = 0;
6259             entry_2 = 0;
6260             goto install;
6261         }
6262     }
6263 
6264     entry_1 = ((ldt_info.base_addr & 0x0000ffff) << 16) |
6265         (ldt_info.limit & 0x0ffff);
6266     entry_2 = (ldt_info.base_addr & 0xff000000) |
6267         ((ldt_info.base_addr & 0x00ff0000) >> 16) |
6268         (ldt_info.limit & 0xf0000) |
6269         ((read_exec_only ^ 1) << 9) |
6270         (contents << 10) |
6271         ((seg_not_present ^ 1) << 15) |
6272         (seg_32bit << 22) |
6273         (limit_in_pages << 23) |
6274         (useable << 20) |
6275         (lm << 21) |
6276         0x7000;
6277 
6278     /* Install the new entry ...  */
6279 install:
6280     lp = (uint32_t *)(gdt_table + ldt_info.entry_number);
6281     lp[0] = tswap32(entry_1);
6282     lp[1] = tswap32(entry_2);
6283     return 0;
6284 }
6285 
6286 static abi_long do_get_thread_area(CPUX86State *env, abi_ulong ptr)
6287 {
6288     struct target_modify_ldt_ldt_s *target_ldt_info;
6289     uint64_t *gdt_table = g2h(env->gdt.base);
6290     uint32_t base_addr, limit, flags;
6291     int seg_32bit, contents, read_exec_only, limit_in_pages, idx;
6292     int seg_not_present, useable, lm;
6293     uint32_t *lp, entry_1, entry_2;
6294 
6295     lock_user_struct(VERIFY_WRITE, target_ldt_info, ptr, 1);
6296     if (!target_ldt_info)
6297         return -TARGET_EFAULT;
6298     idx = tswap32(target_ldt_info->entry_number);
6299     if (idx < TARGET_GDT_ENTRY_TLS_MIN ||
6300         idx > TARGET_GDT_ENTRY_TLS_MAX) {
6301         unlock_user_struct(target_ldt_info, ptr, 1);
6302         return -TARGET_EINVAL;
6303     }
6304     lp = (uint32_t *)(gdt_table + idx);
6305     entry_1 = tswap32(lp[0]);
6306     entry_2 = tswap32(lp[1]);
6307 
6308     read_exec_only = ((entry_2 >> 9) & 1) ^ 1;
6309     contents = (entry_2 >> 10) & 3;
6310     seg_not_present = ((entry_2 >> 15) & 1) ^ 1;
6311     seg_32bit = (entry_2 >> 22) & 1;
6312     limit_in_pages = (entry_2 >> 23) & 1;
6313     useable = (entry_2 >> 20) & 1;
6314 #ifdef TARGET_ABI32
6315     lm = 0;
6316 #else
6317     lm = (entry_2 >> 21) & 1;
6318 #endif
6319     flags = (seg_32bit << 0) | (contents << 1) |
6320         (read_exec_only << 3) | (limit_in_pages << 4) |
6321         (seg_not_present << 5) | (useable << 6) | (lm << 7);
6322     limit = (entry_1 & 0xffff) | (entry_2  & 0xf0000);
6323     base_addr = (entry_1 >> 16) |
6324         (entry_2 & 0xff000000) |
6325         ((entry_2 & 0xff) << 16);
6326     target_ldt_info->base_addr = tswapal(base_addr);
6327     target_ldt_info->limit = tswap32(limit);
6328     target_ldt_info->flags = tswap32(flags);
6329     unlock_user_struct(target_ldt_info, ptr, 1);
6330     return 0;
6331 }
6332 
6333 abi_long do_arch_prctl(CPUX86State *env, int code, abi_ulong addr)
6334 {
6335     return -TARGET_ENOSYS;
6336 }
6337 #else
6338 abi_long do_arch_prctl(CPUX86State *env, int code, abi_ulong addr)
6339 {
6340     abi_long ret = 0;
6341     abi_ulong val;
6342     int idx;
6343 
6344     switch(code) {
6345     case TARGET_ARCH_SET_GS:
6346     case TARGET_ARCH_SET_FS:
6347         if (code == TARGET_ARCH_SET_GS)
6348             idx = R_GS;
6349         else
6350             idx = R_FS;
6351         cpu_x86_load_seg(env, idx, 0);
6352         env->segs[idx].base = addr;
6353         break;
6354     case TARGET_ARCH_GET_GS:
6355     case TARGET_ARCH_GET_FS:
6356         if (code == TARGET_ARCH_GET_GS)
6357             idx = R_GS;
6358         else
6359             idx = R_FS;
6360         val = env->segs[idx].base;
6361         if (put_user(val, addr, abi_ulong))
6362             ret = -TARGET_EFAULT;
6363         break;
6364     default:
6365         ret = -TARGET_EINVAL;
6366         break;
6367     }
6368     return ret;
6369 }
6370 #endif /* defined(TARGET_ABI32 */
6371 
6372 #endif /* defined(TARGET_I386) */
6373 
6374 #define NEW_STACK_SIZE 0x40000
6375 
6376 
6377 static pthread_mutex_t clone_lock = PTHREAD_MUTEX_INITIALIZER;
6378 typedef struct {
6379     CPUArchState *env;
6380     pthread_mutex_t mutex;
6381     pthread_cond_t cond;
6382     pthread_t thread;
6383     uint32_t tid;
6384     abi_ulong child_tidptr;
6385     abi_ulong parent_tidptr;
6386     sigset_t sigmask;
6387 } new_thread_info;
6388 
6389 static void *clone_func(void *arg)
6390 {
6391     new_thread_info *info = arg;
6392     CPUArchState *env;
6393     CPUState *cpu;
6394     TaskState *ts;
6395 
6396     rcu_register_thread();
6397     tcg_register_thread();
6398     env = info->env;
6399     cpu = env_cpu(env);
6400     thread_cpu = cpu;
6401     ts = (TaskState *)cpu->opaque;
6402     info->tid = sys_gettid();
6403     task_settid(ts);
6404     if (info->child_tidptr)
6405         put_user_u32(info->tid, info->child_tidptr);
6406     if (info->parent_tidptr)
6407         put_user_u32(info->tid, info->parent_tidptr);
6408     qemu_guest_random_seed_thread_part2(cpu->random_seed);
6409     /* Enable signals.  */
6410     sigprocmask(SIG_SETMASK, &info->sigmask, NULL);
6411     /* Signal to the parent that we're ready.  */
6412     pthread_mutex_lock(&info->mutex);
6413     pthread_cond_broadcast(&info->cond);
6414     pthread_mutex_unlock(&info->mutex);
6415     /* Wait until the parent has finished initializing the tls state.  */
6416     pthread_mutex_lock(&clone_lock);
6417     pthread_mutex_unlock(&clone_lock);
6418     cpu_loop(env);
6419     /* never exits */
6420     return NULL;
6421 }
6422 
6423 /* do_fork() Must return host values and target errnos (unlike most
6424    do_*() functions). */
6425 static int do_fork(CPUArchState *env, unsigned int flags, abi_ulong newsp,
6426                    abi_ulong parent_tidptr, target_ulong newtls,
6427                    abi_ulong child_tidptr)
6428 {
6429     CPUState *cpu = env_cpu(env);
6430     int ret;
6431     TaskState *ts;
6432     CPUState *new_cpu;
6433     CPUArchState *new_env;
6434     sigset_t sigmask;
6435 
6436     flags &= ~CLONE_IGNORED_FLAGS;
6437 
6438     /* Emulate vfork() with fork() */
6439     if (flags & CLONE_VFORK)
6440         flags &= ~(CLONE_VFORK | CLONE_VM);
6441 
6442     if (flags & CLONE_VM) {
6443         TaskState *parent_ts = (TaskState *)cpu->opaque;
6444         new_thread_info info;
6445         pthread_attr_t attr;
6446 
6447         if (((flags & CLONE_THREAD_FLAGS) != CLONE_THREAD_FLAGS) ||
6448             (flags & CLONE_INVALID_THREAD_FLAGS)) {
6449             return -TARGET_EINVAL;
6450         }
6451 
6452         ts = g_new0(TaskState, 1);
6453         init_task_state(ts);
6454 
6455         /* Grab a mutex so that thread setup appears atomic.  */
6456         pthread_mutex_lock(&clone_lock);
6457 
6458         /* we create a new CPU instance. */
6459         new_env = cpu_copy(env);
6460         /* Init regs that differ from the parent.  */
6461         cpu_clone_regs_child(new_env, newsp, flags);
6462         cpu_clone_regs_parent(env, flags);
6463         new_cpu = env_cpu(new_env);
6464         new_cpu->opaque = ts;
6465         ts->bprm = parent_ts->bprm;
6466         ts->info = parent_ts->info;
6467         ts->signal_mask = parent_ts->signal_mask;
6468 
6469         if (flags & CLONE_CHILD_CLEARTID) {
6470             ts->child_tidptr = child_tidptr;
6471         }
6472 
6473         if (flags & CLONE_SETTLS) {
6474             cpu_set_tls (new_env, newtls);
6475         }
6476 
6477         memset(&info, 0, sizeof(info));
6478         pthread_mutex_init(&info.mutex, NULL);
6479         pthread_mutex_lock(&info.mutex);
6480         pthread_cond_init(&info.cond, NULL);
6481         info.env = new_env;
6482         if (flags & CLONE_CHILD_SETTID) {
6483             info.child_tidptr = child_tidptr;
6484         }
6485         if (flags & CLONE_PARENT_SETTID) {
6486             info.parent_tidptr = parent_tidptr;
6487         }
6488 
6489         ret = pthread_attr_init(&attr);
6490         ret = pthread_attr_setstacksize(&attr, NEW_STACK_SIZE);
6491         ret = pthread_attr_setdetachstate(&attr, PTHREAD_CREATE_DETACHED);
6492         /* It is not safe to deliver signals until the child has finished
6493            initializing, so temporarily block all signals.  */
6494         sigfillset(&sigmask);
6495         sigprocmask(SIG_BLOCK, &sigmask, &info.sigmask);
6496         cpu->random_seed = qemu_guest_random_seed_thread_part1();
6497 
6498         /* If this is our first additional thread, we need to ensure we
6499          * generate code for parallel execution and flush old translations.
6500          */
6501         if (!parallel_cpus) {
6502             parallel_cpus = true;
6503             tb_flush(cpu);
6504         }
6505 
6506         ret = pthread_create(&info.thread, &attr, clone_func, &info);
6507         /* TODO: Free new CPU state if thread creation failed.  */
6508 
6509         sigprocmask(SIG_SETMASK, &info.sigmask, NULL);
6510         pthread_attr_destroy(&attr);
6511         if (ret == 0) {
6512             /* Wait for the child to initialize.  */
6513             pthread_cond_wait(&info.cond, &info.mutex);
6514             ret = info.tid;
6515         } else {
6516             ret = -1;
6517         }
6518         pthread_mutex_unlock(&info.mutex);
6519         pthread_cond_destroy(&info.cond);
6520         pthread_mutex_destroy(&info.mutex);
6521         pthread_mutex_unlock(&clone_lock);
6522     } else {
6523         /* if no CLONE_VM, we consider it is a fork */
6524         if (flags & CLONE_INVALID_FORK_FLAGS) {
6525             return -TARGET_EINVAL;
6526         }
6527 
6528         /* We can't support custom termination signals */
6529         if ((flags & CSIGNAL) != TARGET_SIGCHLD) {
6530             return -TARGET_EINVAL;
6531         }
6532 
6533         if (block_signals()) {
6534             return -TARGET_ERESTARTSYS;
6535         }
6536 
6537         fork_start();
6538         ret = fork();
6539         if (ret == 0) {
6540             /* Child Process.  */
6541             cpu_clone_regs_child(env, newsp, flags);
6542             fork_end(1);
6543             /* There is a race condition here.  The parent process could
6544                theoretically read the TID in the child process before the child
6545                tid is set.  This would require using either ptrace
6546                (not implemented) or having *_tidptr to point at a shared memory
6547                mapping.  We can't repeat the spinlock hack used above because
6548                the child process gets its own copy of the lock.  */
6549             if (flags & CLONE_CHILD_SETTID)
6550                 put_user_u32(sys_gettid(), child_tidptr);
6551             if (flags & CLONE_PARENT_SETTID)
6552                 put_user_u32(sys_gettid(), parent_tidptr);
6553             ts = (TaskState *)cpu->opaque;
6554             if (flags & CLONE_SETTLS)
6555                 cpu_set_tls (env, newtls);
6556             if (flags & CLONE_CHILD_CLEARTID)
6557                 ts->child_tidptr = child_tidptr;
6558         } else {
6559             cpu_clone_regs_parent(env, flags);
6560             fork_end(0);
6561         }
6562     }
6563     return ret;
6564 }
6565 
6566 /* warning : doesn't handle linux specific flags... */
6567 static int target_to_host_fcntl_cmd(int cmd)
6568 {
6569     int ret;
6570 
6571     switch(cmd) {
6572     case TARGET_F_DUPFD:
6573     case TARGET_F_GETFD:
6574     case TARGET_F_SETFD:
6575     case TARGET_F_GETFL:
6576     case TARGET_F_SETFL:
6577     case TARGET_F_OFD_GETLK:
6578     case TARGET_F_OFD_SETLK:
6579     case TARGET_F_OFD_SETLKW:
6580         ret = cmd;
6581         break;
6582     case TARGET_F_GETLK:
6583         ret = F_GETLK64;
6584         break;
6585     case TARGET_F_SETLK:
6586         ret = F_SETLK64;
6587         break;
6588     case TARGET_F_SETLKW:
6589         ret = F_SETLKW64;
6590         break;
6591     case TARGET_F_GETOWN:
6592         ret = F_GETOWN;
6593         break;
6594     case TARGET_F_SETOWN:
6595         ret = F_SETOWN;
6596         break;
6597     case TARGET_F_GETSIG:
6598         ret = F_GETSIG;
6599         break;
6600     case TARGET_F_SETSIG:
6601         ret = F_SETSIG;
6602         break;
6603 #if TARGET_ABI_BITS == 32
6604     case TARGET_F_GETLK64:
6605         ret = F_GETLK64;
6606         break;
6607     case TARGET_F_SETLK64:
6608         ret = F_SETLK64;
6609         break;
6610     case TARGET_F_SETLKW64:
6611         ret = F_SETLKW64;
6612         break;
6613 #endif
6614     case TARGET_F_SETLEASE:
6615         ret = F_SETLEASE;
6616         break;
6617     case TARGET_F_GETLEASE:
6618         ret = F_GETLEASE;
6619         break;
6620 #ifdef F_DUPFD_CLOEXEC
6621     case TARGET_F_DUPFD_CLOEXEC:
6622         ret = F_DUPFD_CLOEXEC;
6623         break;
6624 #endif
6625     case TARGET_F_NOTIFY:
6626         ret = F_NOTIFY;
6627         break;
6628 #ifdef F_GETOWN_EX
6629     case TARGET_F_GETOWN_EX:
6630         ret = F_GETOWN_EX;
6631         break;
6632 #endif
6633 #ifdef F_SETOWN_EX
6634     case TARGET_F_SETOWN_EX:
6635         ret = F_SETOWN_EX;
6636         break;
6637 #endif
6638 #ifdef F_SETPIPE_SZ
6639     case TARGET_F_SETPIPE_SZ:
6640         ret = F_SETPIPE_SZ;
6641         break;
6642     case TARGET_F_GETPIPE_SZ:
6643         ret = F_GETPIPE_SZ;
6644         break;
6645 #endif
6646 #ifdef F_ADD_SEALS
6647     case TARGET_F_ADD_SEALS:
6648         ret = F_ADD_SEALS;
6649         break;
6650     case TARGET_F_GET_SEALS:
6651         ret = F_GET_SEALS;
6652         break;
6653 #endif
6654     default:
6655         ret = -TARGET_EINVAL;
6656         break;
6657     }
6658 
6659 #if defined(__powerpc64__)
6660     /* On PPC64, glibc headers has the F_*LK* defined to 12, 13 and 14 and
6661      * is not supported by kernel. The glibc fcntl call actually adjusts
6662      * them to 5, 6 and 7 before making the syscall(). Since we make the
6663      * syscall directly, adjust to what is supported by the kernel.
6664      */
6665     if (ret >= F_GETLK64 && ret <= F_SETLKW64) {
6666         ret -= F_GETLK64 - 5;
6667     }
6668 #endif
6669 
6670     return ret;
6671 }
6672 
6673 #define FLOCK_TRANSTBL \
6674     switch (type) { \
6675     TRANSTBL_CONVERT(F_RDLCK); \
6676     TRANSTBL_CONVERT(F_WRLCK); \
6677     TRANSTBL_CONVERT(F_UNLCK); \
6678     TRANSTBL_CONVERT(F_EXLCK); \
6679     TRANSTBL_CONVERT(F_SHLCK); \
6680     }
6681 
6682 static int target_to_host_flock(int type)
6683 {
6684 #define TRANSTBL_CONVERT(a) case TARGET_##a: return a
6685     FLOCK_TRANSTBL
6686 #undef  TRANSTBL_CONVERT
6687     return -TARGET_EINVAL;
6688 }
6689 
6690 static int host_to_target_flock(int type)
6691 {
6692 #define TRANSTBL_CONVERT(a) case a: return TARGET_##a
6693     FLOCK_TRANSTBL
6694 #undef  TRANSTBL_CONVERT
6695     /* if we don't know how to convert the value coming
6696      * from the host we copy to the target field as-is
6697      */
6698     return type;
6699 }
6700 
6701 static inline abi_long copy_from_user_flock(struct flock64 *fl,
6702                                             abi_ulong target_flock_addr)
6703 {
6704     struct target_flock *target_fl;
6705     int l_type;
6706 
6707     if (!lock_user_struct(VERIFY_READ, target_fl, target_flock_addr, 1)) {
6708         return -TARGET_EFAULT;
6709     }
6710 
6711     __get_user(l_type, &target_fl->l_type);
6712     l_type = target_to_host_flock(l_type);
6713     if (l_type < 0) {
6714         return l_type;
6715     }
6716     fl->l_type = l_type;
6717     __get_user(fl->l_whence, &target_fl->l_whence);
6718     __get_user(fl->l_start, &target_fl->l_start);
6719     __get_user(fl->l_len, &target_fl->l_len);
6720     __get_user(fl->l_pid, &target_fl->l_pid);
6721     unlock_user_struct(target_fl, target_flock_addr, 0);
6722     return 0;
6723 }
6724 
6725 static inline abi_long copy_to_user_flock(abi_ulong target_flock_addr,
6726                                           const struct flock64 *fl)
6727 {
6728     struct target_flock *target_fl;
6729     short l_type;
6730 
6731     if (!lock_user_struct(VERIFY_WRITE, target_fl, target_flock_addr, 0)) {
6732         return -TARGET_EFAULT;
6733     }
6734 
6735     l_type = host_to_target_flock(fl->l_type);
6736     __put_user(l_type, &target_fl->l_type);
6737     __put_user(fl->l_whence, &target_fl->l_whence);
6738     __put_user(fl->l_start, &target_fl->l_start);
6739     __put_user(fl->l_len, &target_fl->l_len);
6740     __put_user(fl->l_pid, &target_fl->l_pid);
6741     unlock_user_struct(target_fl, target_flock_addr, 1);
6742     return 0;
6743 }
6744 
6745 typedef abi_long from_flock64_fn(struct flock64 *fl, abi_ulong target_addr);
6746 typedef abi_long to_flock64_fn(abi_ulong target_addr, const struct flock64 *fl);
6747 
6748 #if defined(TARGET_ARM) && TARGET_ABI_BITS == 32
6749 static inline abi_long copy_from_user_oabi_flock64(struct flock64 *fl,
6750                                                    abi_ulong target_flock_addr)
6751 {
6752     struct target_oabi_flock64 *target_fl;
6753     int l_type;
6754 
6755     if (!lock_user_struct(VERIFY_READ, target_fl, target_flock_addr, 1)) {
6756         return -TARGET_EFAULT;
6757     }
6758 
6759     __get_user(l_type, &target_fl->l_type);
6760     l_type = target_to_host_flock(l_type);
6761     if (l_type < 0) {
6762         return l_type;
6763     }
6764     fl->l_type = l_type;
6765     __get_user(fl->l_whence, &target_fl->l_whence);
6766     __get_user(fl->l_start, &target_fl->l_start);
6767     __get_user(fl->l_len, &target_fl->l_len);
6768     __get_user(fl->l_pid, &target_fl->l_pid);
6769     unlock_user_struct(target_fl, target_flock_addr, 0);
6770     return 0;
6771 }
6772 
6773 static inline abi_long copy_to_user_oabi_flock64(abi_ulong target_flock_addr,
6774                                                  const struct flock64 *fl)
6775 {
6776     struct target_oabi_flock64 *target_fl;
6777     short l_type;
6778 
6779     if (!lock_user_struct(VERIFY_WRITE, target_fl, target_flock_addr, 0)) {
6780         return -TARGET_EFAULT;
6781     }
6782 
6783     l_type = host_to_target_flock(fl->l_type);
6784     __put_user(l_type, &target_fl->l_type);
6785     __put_user(fl->l_whence, &target_fl->l_whence);
6786     __put_user(fl->l_start, &target_fl->l_start);
6787     __put_user(fl->l_len, &target_fl->l_len);
6788     __put_user(fl->l_pid, &target_fl->l_pid);
6789     unlock_user_struct(target_fl, target_flock_addr, 1);
6790     return 0;
6791 }
6792 #endif
6793 
6794 static inline abi_long copy_from_user_flock64(struct flock64 *fl,
6795                                               abi_ulong target_flock_addr)
6796 {
6797     struct target_flock64 *target_fl;
6798     int l_type;
6799 
6800     if (!lock_user_struct(VERIFY_READ, target_fl, target_flock_addr, 1)) {
6801         return -TARGET_EFAULT;
6802     }
6803 
6804     __get_user(l_type, &target_fl->l_type);
6805     l_type = target_to_host_flock(l_type);
6806     if (l_type < 0) {
6807         return l_type;
6808     }
6809     fl->l_type = l_type;
6810     __get_user(fl->l_whence, &target_fl->l_whence);
6811     __get_user(fl->l_start, &target_fl->l_start);
6812     __get_user(fl->l_len, &target_fl->l_len);
6813     __get_user(fl->l_pid, &target_fl->l_pid);
6814     unlock_user_struct(target_fl, target_flock_addr, 0);
6815     return 0;
6816 }
6817 
6818 static inline abi_long copy_to_user_flock64(abi_ulong target_flock_addr,
6819                                             const struct flock64 *fl)
6820 {
6821     struct target_flock64 *target_fl;
6822     short l_type;
6823 
6824     if (!lock_user_struct(VERIFY_WRITE, target_fl, target_flock_addr, 0)) {
6825         return -TARGET_EFAULT;
6826     }
6827 
6828     l_type = host_to_target_flock(fl->l_type);
6829     __put_user(l_type, &target_fl->l_type);
6830     __put_user(fl->l_whence, &target_fl->l_whence);
6831     __put_user(fl->l_start, &target_fl->l_start);
6832     __put_user(fl->l_len, &target_fl->l_len);
6833     __put_user(fl->l_pid, &target_fl->l_pid);
6834     unlock_user_struct(target_fl, target_flock_addr, 1);
6835     return 0;
6836 }
6837 
6838 static abi_long do_fcntl(int fd, int cmd, abi_ulong arg)
6839 {
6840     struct flock64 fl64;
6841 #ifdef F_GETOWN_EX
6842     struct f_owner_ex fox;
6843     struct target_f_owner_ex *target_fox;
6844 #endif
6845     abi_long ret;
6846     int host_cmd = target_to_host_fcntl_cmd(cmd);
6847 
6848     if (host_cmd == -TARGET_EINVAL)
6849 	    return host_cmd;
6850 
6851     switch(cmd) {
6852     case TARGET_F_GETLK:
6853         ret = copy_from_user_flock(&fl64, arg);
6854         if (ret) {
6855             return ret;
6856         }
6857         ret = get_errno(safe_fcntl(fd, host_cmd, &fl64));
6858         if (ret == 0) {
6859             ret = copy_to_user_flock(arg, &fl64);
6860         }
6861         break;
6862 
6863     case TARGET_F_SETLK:
6864     case TARGET_F_SETLKW:
6865         ret = copy_from_user_flock(&fl64, arg);
6866         if (ret) {
6867             return ret;
6868         }
6869         ret = get_errno(safe_fcntl(fd, host_cmd, &fl64));
6870         break;
6871 
6872     case TARGET_F_GETLK64:
6873     case TARGET_F_OFD_GETLK:
6874         ret = copy_from_user_flock64(&fl64, arg);
6875         if (ret) {
6876             return ret;
6877         }
6878         ret = get_errno(safe_fcntl(fd, host_cmd, &fl64));
6879         if (ret == 0) {
6880             ret = copy_to_user_flock64(arg, &fl64);
6881         }
6882         break;
6883     case TARGET_F_SETLK64:
6884     case TARGET_F_SETLKW64:
6885     case TARGET_F_OFD_SETLK:
6886     case TARGET_F_OFD_SETLKW:
6887         ret = copy_from_user_flock64(&fl64, arg);
6888         if (ret) {
6889             return ret;
6890         }
6891         ret = get_errno(safe_fcntl(fd, host_cmd, &fl64));
6892         break;
6893 
6894     case TARGET_F_GETFL:
6895         ret = get_errno(safe_fcntl(fd, host_cmd, arg));
6896         if (ret >= 0) {
6897             ret = host_to_target_bitmask(ret, fcntl_flags_tbl);
6898         }
6899         break;
6900 
6901     case TARGET_F_SETFL:
6902         ret = get_errno(safe_fcntl(fd, host_cmd,
6903                                    target_to_host_bitmask(arg,
6904                                                           fcntl_flags_tbl)));
6905         break;
6906 
6907 #ifdef F_GETOWN_EX
6908     case TARGET_F_GETOWN_EX:
6909         ret = get_errno(safe_fcntl(fd, host_cmd, &fox));
6910         if (ret >= 0) {
6911             if (!lock_user_struct(VERIFY_WRITE, target_fox, arg, 0))
6912                 return -TARGET_EFAULT;
6913             target_fox->type = tswap32(fox.type);
6914             target_fox->pid = tswap32(fox.pid);
6915             unlock_user_struct(target_fox, arg, 1);
6916         }
6917         break;
6918 #endif
6919 
6920 #ifdef F_SETOWN_EX
6921     case TARGET_F_SETOWN_EX:
6922         if (!lock_user_struct(VERIFY_READ, target_fox, arg, 1))
6923             return -TARGET_EFAULT;
6924         fox.type = tswap32(target_fox->type);
6925         fox.pid = tswap32(target_fox->pid);
6926         unlock_user_struct(target_fox, arg, 0);
6927         ret = get_errno(safe_fcntl(fd, host_cmd, &fox));
6928         break;
6929 #endif
6930 
6931     case TARGET_F_SETSIG:
6932         ret = get_errno(safe_fcntl(fd, host_cmd, target_to_host_signal(arg)));
6933         break;
6934 
6935     case TARGET_F_GETSIG:
6936         ret = host_to_target_signal(get_errno(safe_fcntl(fd, host_cmd, arg)));
6937         break;
6938 
6939     case TARGET_F_SETOWN:
6940     case TARGET_F_GETOWN:
6941     case TARGET_F_SETLEASE:
6942     case TARGET_F_GETLEASE:
6943     case TARGET_F_SETPIPE_SZ:
6944     case TARGET_F_GETPIPE_SZ:
6945     case TARGET_F_ADD_SEALS:
6946     case TARGET_F_GET_SEALS:
6947         ret = get_errno(safe_fcntl(fd, host_cmd, arg));
6948         break;
6949 
6950     default:
6951         ret = get_errno(safe_fcntl(fd, cmd, arg));
6952         break;
6953     }
6954     return ret;
6955 }
6956 
6957 #ifdef USE_UID16
6958 
6959 static inline int high2lowuid(int uid)
6960 {
6961     if (uid > 65535)
6962         return 65534;
6963     else
6964         return uid;
6965 }
6966 
6967 static inline int high2lowgid(int gid)
6968 {
6969     if (gid > 65535)
6970         return 65534;
6971     else
6972         return gid;
6973 }
6974 
6975 static inline int low2highuid(int uid)
6976 {
6977     if ((int16_t)uid == -1)
6978         return -1;
6979     else
6980         return uid;
6981 }
6982 
6983 static inline int low2highgid(int gid)
6984 {
6985     if ((int16_t)gid == -1)
6986         return -1;
6987     else
6988         return gid;
6989 }
6990 static inline int tswapid(int id)
6991 {
6992     return tswap16(id);
6993 }
6994 
6995 #define put_user_id(x, gaddr) put_user_u16(x, gaddr)
6996 
6997 #else /* !USE_UID16 */
6998 static inline int high2lowuid(int uid)
6999 {
7000     return uid;
7001 }
7002 static inline int high2lowgid(int gid)
7003 {
7004     return gid;
7005 }
7006 static inline int low2highuid(int uid)
7007 {
7008     return uid;
7009 }
7010 static inline int low2highgid(int gid)
7011 {
7012     return gid;
7013 }
7014 static inline int tswapid(int id)
7015 {
7016     return tswap32(id);
7017 }
7018 
7019 #define put_user_id(x, gaddr) put_user_u32(x, gaddr)
7020 
7021 #endif /* USE_UID16 */
7022 
7023 /* We must do direct syscalls for setting UID/GID, because we want to
7024  * implement the Linux system call semantics of "change only for this thread",
7025  * not the libc/POSIX semantics of "change for all threads in process".
7026  * (See http://ewontfix.com/17/ for more details.)
7027  * We use the 32-bit version of the syscalls if present; if it is not
7028  * then either the host architecture supports 32-bit UIDs natively with
7029  * the standard syscall, or the 16-bit UID is the best we can do.
7030  */
7031 #ifdef __NR_setuid32
7032 #define __NR_sys_setuid __NR_setuid32
7033 #else
7034 #define __NR_sys_setuid __NR_setuid
7035 #endif
7036 #ifdef __NR_setgid32
7037 #define __NR_sys_setgid __NR_setgid32
7038 #else
7039 #define __NR_sys_setgid __NR_setgid
7040 #endif
7041 #ifdef __NR_setresuid32
7042 #define __NR_sys_setresuid __NR_setresuid32
7043 #else
7044 #define __NR_sys_setresuid __NR_setresuid
7045 #endif
7046 #ifdef __NR_setresgid32
7047 #define __NR_sys_setresgid __NR_setresgid32
7048 #else
7049 #define __NR_sys_setresgid __NR_setresgid
7050 #endif
7051 
7052 _syscall1(int, sys_setuid, uid_t, uid)
7053 _syscall1(int, sys_setgid, gid_t, gid)
7054 _syscall3(int, sys_setresuid, uid_t, ruid, uid_t, euid, uid_t, suid)
7055 _syscall3(int, sys_setresgid, gid_t, rgid, gid_t, egid, gid_t, sgid)
7056 
7057 void syscall_init(void)
7058 {
7059     IOCTLEntry *ie;
7060     const argtype *arg_type;
7061     int size;
7062     int i;
7063 
7064     thunk_init(STRUCT_MAX);
7065 
7066 #define STRUCT(name, ...) thunk_register_struct(STRUCT_ ## name, #name, struct_ ## name ## _def);
7067 #define STRUCT_SPECIAL(name) thunk_register_struct_direct(STRUCT_ ## name, #name, &struct_ ## name ## _def);
7068 #include "syscall_types.h"
7069 #undef STRUCT
7070 #undef STRUCT_SPECIAL
7071 
7072     /* Build target_to_host_errno_table[] table from
7073      * host_to_target_errno_table[]. */
7074     for (i = 0; i < ERRNO_TABLE_SIZE; i++) {
7075         target_to_host_errno_table[host_to_target_errno_table[i]] = i;
7076     }
7077 
7078     /* we patch the ioctl size if necessary. We rely on the fact that
7079        no ioctl has all the bits at '1' in the size field */
7080     ie = ioctl_entries;
7081     while (ie->target_cmd != 0) {
7082         if (((ie->target_cmd >> TARGET_IOC_SIZESHIFT) & TARGET_IOC_SIZEMASK) ==
7083             TARGET_IOC_SIZEMASK) {
7084             arg_type = ie->arg_type;
7085             if (arg_type[0] != TYPE_PTR) {
7086                 fprintf(stderr, "cannot patch size for ioctl 0x%x\n",
7087                         ie->target_cmd);
7088                 exit(1);
7089             }
7090             arg_type++;
7091             size = thunk_type_size(arg_type, 0);
7092             ie->target_cmd = (ie->target_cmd &
7093                               ~(TARGET_IOC_SIZEMASK << TARGET_IOC_SIZESHIFT)) |
7094                 (size << TARGET_IOC_SIZESHIFT);
7095         }
7096 
7097         /* automatic consistency check if same arch */
7098 #if (defined(__i386__) && defined(TARGET_I386) && defined(TARGET_ABI32)) || \
7099     (defined(__x86_64__) && defined(TARGET_X86_64))
7100         if (unlikely(ie->target_cmd != ie->host_cmd)) {
7101             fprintf(stderr, "ERROR: ioctl(%s): target=0x%x host=0x%x\n",
7102                     ie->name, ie->target_cmd, ie->host_cmd);
7103         }
7104 #endif
7105         ie++;
7106     }
7107 }
7108 
7109 #ifdef TARGET_NR_truncate64
7110 static inline abi_long target_truncate64(void *cpu_env, const char *arg1,
7111                                          abi_long arg2,
7112                                          abi_long arg3,
7113                                          abi_long arg4)
7114 {
7115     if (regpairs_aligned(cpu_env, TARGET_NR_truncate64)) {
7116         arg2 = arg3;
7117         arg3 = arg4;
7118     }
7119     return get_errno(truncate64(arg1, target_offset64(arg2, arg3)));
7120 }
7121 #endif
7122 
7123 #ifdef TARGET_NR_ftruncate64
7124 static inline abi_long target_ftruncate64(void *cpu_env, abi_long arg1,
7125                                           abi_long arg2,
7126                                           abi_long arg3,
7127                                           abi_long arg4)
7128 {
7129     if (regpairs_aligned(cpu_env, TARGET_NR_ftruncate64)) {
7130         arg2 = arg3;
7131         arg3 = arg4;
7132     }
7133     return get_errno(ftruncate64(arg1, target_offset64(arg2, arg3)));
7134 }
7135 #endif
7136 
7137 #if defined(TARGET_NR_timer_settime) || \
7138     (defined(TARGET_NR_timerfd_settime) && defined(CONFIG_TIMERFD))
7139 static inline abi_long target_to_host_itimerspec(struct itimerspec *host_its,
7140                                                  abi_ulong target_addr)
7141 {
7142     if (target_to_host_timespec(&host_its->it_interval, target_addr +
7143                                 offsetof(struct target_itimerspec,
7144                                          it_interval)) ||
7145         target_to_host_timespec(&host_its->it_value, target_addr +
7146                                 offsetof(struct target_itimerspec,
7147                                          it_value))) {
7148         return -TARGET_EFAULT;
7149     }
7150 
7151     return 0;
7152 }
7153 #endif
7154 
7155 #if defined(TARGET_NR_timer_settime64) || \
7156     (defined(TARGET_NR_timerfd_settime64) && defined(CONFIG_TIMERFD))
7157 static inline abi_long target_to_host_itimerspec64(struct itimerspec *host_its,
7158                                                    abi_ulong target_addr)
7159 {
7160     if (target_to_host_timespec64(&host_its->it_interval, target_addr +
7161                                   offsetof(struct target__kernel_itimerspec,
7162                                            it_interval)) ||
7163         target_to_host_timespec64(&host_its->it_value, target_addr +
7164                                   offsetof(struct target__kernel_itimerspec,
7165                                            it_value))) {
7166         return -TARGET_EFAULT;
7167     }
7168 
7169     return 0;
7170 }
7171 #endif
7172 
7173 #if ((defined(TARGET_NR_timerfd_gettime) || \
7174       defined(TARGET_NR_timerfd_settime)) && defined(CONFIG_TIMERFD)) || \
7175       defined(TARGET_NR_timer_gettime) || defined(TARGET_NR_timer_settime)
7176 static inline abi_long host_to_target_itimerspec(abi_ulong target_addr,
7177                                                  struct itimerspec *host_its)
7178 {
7179     if (host_to_target_timespec(target_addr + offsetof(struct target_itimerspec,
7180                                                        it_interval),
7181                                 &host_its->it_interval) ||
7182         host_to_target_timespec(target_addr + offsetof(struct target_itimerspec,
7183                                                        it_value),
7184                                 &host_its->it_value)) {
7185         return -TARGET_EFAULT;
7186     }
7187     return 0;
7188 }
7189 #endif
7190 
7191 #if ((defined(TARGET_NR_timerfd_gettime64) || \
7192       defined(TARGET_NR_timerfd_settime64)) && defined(CONFIG_TIMERFD)) || \
7193       defined(TARGET_NR_timer_gettime64) || defined(TARGET_NR_timer_settime64)
7194 static inline abi_long host_to_target_itimerspec64(abi_ulong target_addr,
7195                                                    struct itimerspec *host_its)
7196 {
7197     if (host_to_target_timespec64(target_addr +
7198                                   offsetof(struct target__kernel_itimerspec,
7199                                            it_interval),
7200                                   &host_its->it_interval) ||
7201         host_to_target_timespec64(target_addr +
7202                                   offsetof(struct target__kernel_itimerspec,
7203                                            it_value),
7204                                   &host_its->it_value)) {
7205         return -TARGET_EFAULT;
7206     }
7207     return 0;
7208 }
7209 #endif
7210 
7211 #if defined(TARGET_NR_adjtimex) || \
7212     (defined(TARGET_NR_clock_adjtime) && defined(CONFIG_CLOCK_ADJTIME))
7213 static inline abi_long target_to_host_timex(struct timex *host_tx,
7214                                             abi_long target_addr)
7215 {
7216     struct target_timex *target_tx;
7217 
7218     if (!lock_user_struct(VERIFY_READ, target_tx, target_addr, 1)) {
7219         return -TARGET_EFAULT;
7220     }
7221 
7222     __get_user(host_tx->modes, &target_tx->modes);
7223     __get_user(host_tx->offset, &target_tx->offset);
7224     __get_user(host_tx->freq, &target_tx->freq);
7225     __get_user(host_tx->maxerror, &target_tx->maxerror);
7226     __get_user(host_tx->esterror, &target_tx->esterror);
7227     __get_user(host_tx->status, &target_tx->status);
7228     __get_user(host_tx->constant, &target_tx->constant);
7229     __get_user(host_tx->precision, &target_tx->precision);
7230     __get_user(host_tx->tolerance, &target_tx->tolerance);
7231     __get_user(host_tx->time.tv_sec, &target_tx->time.tv_sec);
7232     __get_user(host_tx->time.tv_usec, &target_tx->time.tv_usec);
7233     __get_user(host_tx->tick, &target_tx->tick);
7234     __get_user(host_tx->ppsfreq, &target_tx->ppsfreq);
7235     __get_user(host_tx->jitter, &target_tx->jitter);
7236     __get_user(host_tx->shift, &target_tx->shift);
7237     __get_user(host_tx->stabil, &target_tx->stabil);
7238     __get_user(host_tx->jitcnt, &target_tx->jitcnt);
7239     __get_user(host_tx->calcnt, &target_tx->calcnt);
7240     __get_user(host_tx->errcnt, &target_tx->errcnt);
7241     __get_user(host_tx->stbcnt, &target_tx->stbcnt);
7242     __get_user(host_tx->tai, &target_tx->tai);
7243 
7244     unlock_user_struct(target_tx, target_addr, 0);
7245     return 0;
7246 }
7247 
7248 static inline abi_long host_to_target_timex(abi_long target_addr,
7249                                             struct timex *host_tx)
7250 {
7251     struct target_timex *target_tx;
7252 
7253     if (!lock_user_struct(VERIFY_WRITE, target_tx, target_addr, 0)) {
7254         return -TARGET_EFAULT;
7255     }
7256 
7257     __put_user(host_tx->modes, &target_tx->modes);
7258     __put_user(host_tx->offset, &target_tx->offset);
7259     __put_user(host_tx->freq, &target_tx->freq);
7260     __put_user(host_tx->maxerror, &target_tx->maxerror);
7261     __put_user(host_tx->esterror, &target_tx->esterror);
7262     __put_user(host_tx->status, &target_tx->status);
7263     __put_user(host_tx->constant, &target_tx->constant);
7264     __put_user(host_tx->precision, &target_tx->precision);
7265     __put_user(host_tx->tolerance, &target_tx->tolerance);
7266     __put_user(host_tx->time.tv_sec, &target_tx->time.tv_sec);
7267     __put_user(host_tx->time.tv_usec, &target_tx->time.tv_usec);
7268     __put_user(host_tx->tick, &target_tx->tick);
7269     __put_user(host_tx->ppsfreq, &target_tx->ppsfreq);
7270     __put_user(host_tx->jitter, &target_tx->jitter);
7271     __put_user(host_tx->shift, &target_tx->shift);
7272     __put_user(host_tx->stabil, &target_tx->stabil);
7273     __put_user(host_tx->jitcnt, &target_tx->jitcnt);
7274     __put_user(host_tx->calcnt, &target_tx->calcnt);
7275     __put_user(host_tx->errcnt, &target_tx->errcnt);
7276     __put_user(host_tx->stbcnt, &target_tx->stbcnt);
7277     __put_user(host_tx->tai, &target_tx->tai);
7278 
7279     unlock_user_struct(target_tx, target_addr, 1);
7280     return 0;
7281 }
7282 #endif
7283 
7284 
7285 #if defined(TARGET_NR_clock_adjtime64) && defined(CONFIG_CLOCK_ADJTIME)
7286 static inline abi_long target_to_host_timex64(struct timex *host_tx,
7287                                               abi_long target_addr)
7288 {
7289     struct target__kernel_timex *target_tx;
7290 
7291     if (copy_from_user_timeval64(&host_tx->time, target_addr +
7292                                  offsetof(struct target__kernel_timex,
7293                                           time))) {
7294         return -TARGET_EFAULT;
7295     }
7296 
7297     if (!lock_user_struct(VERIFY_READ, target_tx, target_addr, 1)) {
7298         return -TARGET_EFAULT;
7299     }
7300 
7301     __get_user(host_tx->modes, &target_tx->modes);
7302     __get_user(host_tx->offset, &target_tx->offset);
7303     __get_user(host_tx->freq, &target_tx->freq);
7304     __get_user(host_tx->maxerror, &target_tx->maxerror);
7305     __get_user(host_tx->esterror, &target_tx->esterror);
7306     __get_user(host_tx->status, &target_tx->status);
7307     __get_user(host_tx->constant, &target_tx->constant);
7308     __get_user(host_tx->precision, &target_tx->precision);
7309     __get_user(host_tx->tolerance, &target_tx->tolerance);
7310     __get_user(host_tx->tick, &target_tx->tick);
7311     __get_user(host_tx->ppsfreq, &target_tx->ppsfreq);
7312     __get_user(host_tx->jitter, &target_tx->jitter);
7313     __get_user(host_tx->shift, &target_tx->shift);
7314     __get_user(host_tx->stabil, &target_tx->stabil);
7315     __get_user(host_tx->jitcnt, &target_tx->jitcnt);
7316     __get_user(host_tx->calcnt, &target_tx->calcnt);
7317     __get_user(host_tx->errcnt, &target_tx->errcnt);
7318     __get_user(host_tx->stbcnt, &target_tx->stbcnt);
7319     __get_user(host_tx->tai, &target_tx->tai);
7320 
7321     unlock_user_struct(target_tx, target_addr, 0);
7322     return 0;
7323 }
7324 
7325 static inline abi_long host_to_target_timex64(abi_long target_addr,
7326                                               struct timex *host_tx)
7327 {
7328     struct target__kernel_timex *target_tx;
7329 
7330    if (copy_to_user_timeval64(target_addr +
7331                               offsetof(struct target__kernel_timex, time),
7332                               &host_tx->time)) {
7333         return -TARGET_EFAULT;
7334     }
7335 
7336     if (!lock_user_struct(VERIFY_WRITE, target_tx, target_addr, 0)) {
7337         return -TARGET_EFAULT;
7338     }
7339 
7340     __put_user(host_tx->modes, &target_tx->modes);
7341     __put_user(host_tx->offset, &target_tx->offset);
7342     __put_user(host_tx->freq, &target_tx->freq);
7343     __put_user(host_tx->maxerror, &target_tx->maxerror);
7344     __put_user(host_tx->esterror, &target_tx->esterror);
7345     __put_user(host_tx->status, &target_tx->status);
7346     __put_user(host_tx->constant, &target_tx->constant);
7347     __put_user(host_tx->precision, &target_tx->precision);
7348     __put_user(host_tx->tolerance, &target_tx->tolerance);
7349     __put_user(host_tx->tick, &target_tx->tick);
7350     __put_user(host_tx->ppsfreq, &target_tx->ppsfreq);
7351     __put_user(host_tx->jitter, &target_tx->jitter);
7352     __put_user(host_tx->shift, &target_tx->shift);
7353     __put_user(host_tx->stabil, &target_tx->stabil);
7354     __put_user(host_tx->jitcnt, &target_tx->jitcnt);
7355     __put_user(host_tx->calcnt, &target_tx->calcnt);
7356     __put_user(host_tx->errcnt, &target_tx->errcnt);
7357     __put_user(host_tx->stbcnt, &target_tx->stbcnt);
7358     __put_user(host_tx->tai, &target_tx->tai);
7359 
7360     unlock_user_struct(target_tx, target_addr, 1);
7361     return 0;
7362 }
7363 #endif
7364 
7365 static inline abi_long target_to_host_sigevent(struct sigevent *host_sevp,
7366                                                abi_ulong target_addr)
7367 {
7368     struct target_sigevent *target_sevp;
7369 
7370     if (!lock_user_struct(VERIFY_READ, target_sevp, target_addr, 1)) {
7371         return -TARGET_EFAULT;
7372     }
7373 
7374     /* This union is awkward on 64 bit systems because it has a 32 bit
7375      * integer and a pointer in it; we follow the conversion approach
7376      * used for handling sigval types in signal.c so the guest should get
7377      * the correct value back even if we did a 64 bit byteswap and it's
7378      * using the 32 bit integer.
7379      */
7380     host_sevp->sigev_value.sival_ptr =
7381         (void *)(uintptr_t)tswapal(target_sevp->sigev_value.sival_ptr);
7382     host_sevp->sigev_signo =
7383         target_to_host_signal(tswap32(target_sevp->sigev_signo));
7384     host_sevp->sigev_notify = tswap32(target_sevp->sigev_notify);
7385     host_sevp->_sigev_un._tid = tswap32(target_sevp->_sigev_un._tid);
7386 
7387     unlock_user_struct(target_sevp, target_addr, 1);
7388     return 0;
7389 }
7390 
7391 #if defined(TARGET_NR_mlockall)
7392 static inline int target_to_host_mlockall_arg(int arg)
7393 {
7394     int result = 0;
7395 
7396     if (arg & TARGET_MCL_CURRENT) {
7397         result |= MCL_CURRENT;
7398     }
7399     if (arg & TARGET_MCL_FUTURE) {
7400         result |= MCL_FUTURE;
7401     }
7402 #ifdef MCL_ONFAULT
7403     if (arg & TARGET_MCL_ONFAULT) {
7404         result |= MCL_ONFAULT;
7405     }
7406 #endif
7407 
7408     return result;
7409 }
7410 #endif
7411 
7412 #if (defined(TARGET_NR_stat64) || defined(TARGET_NR_lstat64) ||     \
7413      defined(TARGET_NR_fstat64) || defined(TARGET_NR_fstatat64) ||  \
7414      defined(TARGET_NR_newfstatat))
7415 static inline abi_long host_to_target_stat64(void *cpu_env,
7416                                              abi_ulong target_addr,
7417                                              struct stat *host_st)
7418 {
7419 #if defined(TARGET_ARM) && defined(TARGET_ABI32)
7420     if (((CPUARMState *)cpu_env)->eabi) {
7421         struct target_eabi_stat64 *target_st;
7422 
7423         if (!lock_user_struct(VERIFY_WRITE, target_st, target_addr, 0))
7424             return -TARGET_EFAULT;
7425         memset(target_st, 0, sizeof(struct target_eabi_stat64));
7426         __put_user(host_st->st_dev, &target_st->st_dev);
7427         __put_user(host_st->st_ino, &target_st->st_ino);
7428 #ifdef TARGET_STAT64_HAS_BROKEN_ST_INO
7429         __put_user(host_st->st_ino, &target_st->__st_ino);
7430 #endif
7431         __put_user(host_st->st_mode, &target_st->st_mode);
7432         __put_user(host_st->st_nlink, &target_st->st_nlink);
7433         __put_user(host_st->st_uid, &target_st->st_uid);
7434         __put_user(host_st->st_gid, &target_st->st_gid);
7435         __put_user(host_st->st_rdev, &target_st->st_rdev);
7436         __put_user(host_st->st_size, &target_st->st_size);
7437         __put_user(host_st->st_blksize, &target_st->st_blksize);
7438         __put_user(host_st->st_blocks, &target_st->st_blocks);
7439         __put_user(host_st->st_atime, &target_st->target_st_atime);
7440         __put_user(host_st->st_mtime, &target_st->target_st_mtime);
7441         __put_user(host_st->st_ctime, &target_st->target_st_ctime);
7442 #if _POSIX_C_SOURCE >= 200809L || _XOPEN_SOURCE >= 700
7443         __put_user(host_st->st_atim.tv_nsec, &target_st->target_st_atime_nsec);
7444         __put_user(host_st->st_mtim.tv_nsec, &target_st->target_st_mtime_nsec);
7445         __put_user(host_st->st_ctim.tv_nsec, &target_st->target_st_ctime_nsec);
7446 #endif
7447         unlock_user_struct(target_st, target_addr, 1);
7448     } else
7449 #endif
7450     {
7451 #if defined(TARGET_HAS_STRUCT_STAT64)
7452         struct target_stat64 *target_st;
7453 #else
7454         struct target_stat *target_st;
7455 #endif
7456 
7457         if (!lock_user_struct(VERIFY_WRITE, target_st, target_addr, 0))
7458             return -TARGET_EFAULT;
7459         memset(target_st, 0, sizeof(*target_st));
7460         __put_user(host_st->st_dev, &target_st->st_dev);
7461         __put_user(host_st->st_ino, &target_st->st_ino);
7462 #ifdef TARGET_STAT64_HAS_BROKEN_ST_INO
7463         __put_user(host_st->st_ino, &target_st->__st_ino);
7464 #endif
7465         __put_user(host_st->st_mode, &target_st->st_mode);
7466         __put_user(host_st->st_nlink, &target_st->st_nlink);
7467         __put_user(host_st->st_uid, &target_st->st_uid);
7468         __put_user(host_st->st_gid, &target_st->st_gid);
7469         __put_user(host_st->st_rdev, &target_st->st_rdev);
7470         /* XXX: better use of kernel struct */
7471         __put_user(host_st->st_size, &target_st->st_size);
7472         __put_user(host_st->st_blksize, &target_st->st_blksize);
7473         __put_user(host_st->st_blocks, &target_st->st_blocks);
7474         __put_user(host_st->st_atime, &target_st->target_st_atime);
7475         __put_user(host_st->st_mtime, &target_st->target_st_mtime);
7476         __put_user(host_st->st_ctime, &target_st->target_st_ctime);
7477 #if _POSIX_C_SOURCE >= 200809L || _XOPEN_SOURCE >= 700
7478         __put_user(host_st->st_atim.tv_nsec, &target_st->target_st_atime_nsec);
7479         __put_user(host_st->st_mtim.tv_nsec, &target_st->target_st_mtime_nsec);
7480         __put_user(host_st->st_ctim.tv_nsec, &target_st->target_st_ctime_nsec);
7481 #endif
7482         unlock_user_struct(target_st, target_addr, 1);
7483     }
7484 
7485     return 0;
7486 }
7487 #endif
7488 
7489 #if defined(TARGET_NR_statx) && defined(__NR_statx)
7490 static inline abi_long host_to_target_statx(struct target_statx *host_stx,
7491                                             abi_ulong target_addr)
7492 {
7493     struct target_statx *target_stx;
7494 
7495     if (!lock_user_struct(VERIFY_WRITE, target_stx, target_addr,  0)) {
7496         return -TARGET_EFAULT;
7497     }
7498     memset(target_stx, 0, sizeof(*target_stx));
7499 
7500     __put_user(host_stx->stx_mask, &target_stx->stx_mask);
7501     __put_user(host_stx->stx_blksize, &target_stx->stx_blksize);
7502     __put_user(host_stx->stx_attributes, &target_stx->stx_attributes);
7503     __put_user(host_stx->stx_nlink, &target_stx->stx_nlink);
7504     __put_user(host_stx->stx_uid, &target_stx->stx_uid);
7505     __put_user(host_stx->stx_gid, &target_stx->stx_gid);
7506     __put_user(host_stx->stx_mode, &target_stx->stx_mode);
7507     __put_user(host_stx->stx_ino, &target_stx->stx_ino);
7508     __put_user(host_stx->stx_size, &target_stx->stx_size);
7509     __put_user(host_stx->stx_blocks, &target_stx->stx_blocks);
7510     __put_user(host_stx->stx_attributes_mask, &target_stx->stx_attributes_mask);
7511     __put_user(host_stx->stx_atime.tv_sec, &target_stx->stx_atime.tv_sec);
7512     __put_user(host_stx->stx_atime.tv_nsec, &target_stx->stx_atime.tv_nsec);
7513     __put_user(host_stx->stx_btime.tv_sec, &target_stx->stx_btime.tv_sec);
7514     __put_user(host_stx->stx_btime.tv_nsec, &target_stx->stx_btime.tv_nsec);
7515     __put_user(host_stx->stx_ctime.tv_sec, &target_stx->stx_ctime.tv_sec);
7516     __put_user(host_stx->stx_ctime.tv_nsec, &target_stx->stx_ctime.tv_nsec);
7517     __put_user(host_stx->stx_mtime.tv_sec, &target_stx->stx_mtime.tv_sec);
7518     __put_user(host_stx->stx_mtime.tv_nsec, &target_stx->stx_mtime.tv_nsec);
7519     __put_user(host_stx->stx_rdev_major, &target_stx->stx_rdev_major);
7520     __put_user(host_stx->stx_rdev_minor, &target_stx->stx_rdev_minor);
7521     __put_user(host_stx->stx_dev_major, &target_stx->stx_dev_major);
7522     __put_user(host_stx->stx_dev_minor, &target_stx->stx_dev_minor);
7523 
7524     unlock_user_struct(target_stx, target_addr, 1);
7525 
7526     return 0;
7527 }
7528 #endif
7529 
7530 static int do_sys_futex(int *uaddr, int op, int val,
7531                          const struct timespec *timeout, int *uaddr2,
7532                          int val3)
7533 {
7534 #if HOST_LONG_BITS == 64
7535 #if defined(__NR_futex)
7536     /* always a 64-bit time_t, it doesn't define _time64 version  */
7537     return sys_futex(uaddr, op, val, timeout, uaddr2, val3);
7538 
7539 #endif
7540 #else /* HOST_LONG_BITS == 64 */
7541 #if defined(__NR_futex_time64)
7542     if (sizeof(timeout->tv_sec) == 8) {
7543         /* _time64 function on 32bit arch */
7544         return sys_futex_time64(uaddr, op, val, timeout, uaddr2, val3);
7545     }
7546 #endif
7547 #if defined(__NR_futex)
7548     /* old function on 32bit arch */
7549     return sys_futex(uaddr, op, val, timeout, uaddr2, val3);
7550 #endif
7551 #endif /* HOST_LONG_BITS == 64 */
7552     g_assert_not_reached();
7553 }
7554 
7555 static int do_safe_futex(int *uaddr, int op, int val,
7556                          const struct timespec *timeout, int *uaddr2,
7557                          int val3)
7558 {
7559 #if HOST_LONG_BITS == 64
7560 #if defined(__NR_futex)
7561     /* always a 64-bit time_t, it doesn't define _time64 version  */
7562     return get_errno(safe_futex(uaddr, op, val, timeout, uaddr2, val3));
7563 #endif
7564 #else /* HOST_LONG_BITS == 64 */
7565 #if defined(__NR_futex_time64)
7566     if (sizeof(timeout->tv_sec) == 8) {
7567         /* _time64 function on 32bit arch */
7568         return get_errno(safe_futex_time64(uaddr, op, val, timeout, uaddr2,
7569                                            val3));
7570     }
7571 #endif
7572 #if defined(__NR_futex)
7573     /* old function on 32bit arch */
7574     return get_errno(safe_futex(uaddr, op, val, timeout, uaddr2, val3));
7575 #endif
7576 #endif /* HOST_LONG_BITS == 64 */
7577     return -TARGET_ENOSYS;
7578 }
7579 
7580 /* ??? Using host futex calls even when target atomic operations
7581    are not really atomic probably breaks things.  However implementing
7582    futexes locally would make futexes shared between multiple processes
7583    tricky.  However they're probably useless because guest atomic
7584    operations won't work either.  */
7585 #if defined(TARGET_NR_futex)
7586 static int do_futex(target_ulong uaddr, int op, int val, target_ulong timeout,
7587                     target_ulong uaddr2, int val3)
7588 {
7589     struct timespec ts, *pts;
7590     int base_op;
7591 
7592     /* ??? We assume FUTEX_* constants are the same on both host
7593        and target.  */
7594 #ifdef FUTEX_CMD_MASK
7595     base_op = op & FUTEX_CMD_MASK;
7596 #else
7597     base_op = op;
7598 #endif
7599     switch (base_op) {
7600     case FUTEX_WAIT:
7601     case FUTEX_WAIT_BITSET:
7602         if (timeout) {
7603             pts = &ts;
7604             target_to_host_timespec(pts, timeout);
7605         } else {
7606             pts = NULL;
7607         }
7608         return do_safe_futex(g2h(uaddr), op, tswap32(val), pts, NULL, val3);
7609     case FUTEX_WAKE:
7610         return do_safe_futex(g2h(uaddr), op, val, NULL, NULL, 0);
7611     case FUTEX_FD:
7612         return do_safe_futex(g2h(uaddr), op, val, NULL, NULL, 0);
7613     case FUTEX_REQUEUE:
7614     case FUTEX_CMP_REQUEUE:
7615     case FUTEX_WAKE_OP:
7616         /* For FUTEX_REQUEUE, FUTEX_CMP_REQUEUE, and FUTEX_WAKE_OP, the
7617            TIMEOUT parameter is interpreted as a uint32_t by the kernel.
7618            But the prototype takes a `struct timespec *'; insert casts
7619            to satisfy the compiler.  We do not need to tswap TIMEOUT
7620            since it's not compared to guest memory.  */
7621         pts = (struct timespec *)(uintptr_t) timeout;
7622         return do_safe_futex(g2h(uaddr), op, val, pts, g2h(uaddr2),
7623                              (base_op == FUTEX_CMP_REQUEUE
7624                                       ? tswap32(val3)
7625                                       : val3));
7626     default:
7627         return -TARGET_ENOSYS;
7628     }
7629 }
7630 #endif
7631 
7632 #if defined(TARGET_NR_futex_time64)
7633 static int do_futex_time64(target_ulong uaddr, int op, int val, target_ulong timeout,
7634                            target_ulong uaddr2, int val3)
7635 {
7636     struct timespec ts, *pts;
7637     int base_op;
7638 
7639     /* ??? We assume FUTEX_* constants are the same on both host
7640        and target.  */
7641 #ifdef FUTEX_CMD_MASK
7642     base_op = op & FUTEX_CMD_MASK;
7643 #else
7644     base_op = op;
7645 #endif
7646     switch (base_op) {
7647     case FUTEX_WAIT:
7648     case FUTEX_WAIT_BITSET:
7649         if (timeout) {
7650             pts = &ts;
7651             if (target_to_host_timespec64(pts, timeout)) {
7652                 return -TARGET_EFAULT;
7653             }
7654         } else {
7655             pts = NULL;
7656         }
7657         return do_safe_futex(g2h(uaddr), op, tswap32(val), pts, NULL, val3);
7658     case FUTEX_WAKE:
7659         return do_safe_futex(g2h(uaddr), op, val, NULL, NULL, 0);
7660     case FUTEX_FD:
7661         return do_safe_futex(g2h(uaddr), op, val, NULL, NULL, 0);
7662     case FUTEX_REQUEUE:
7663     case FUTEX_CMP_REQUEUE:
7664     case FUTEX_WAKE_OP:
7665         /* For FUTEX_REQUEUE, FUTEX_CMP_REQUEUE, and FUTEX_WAKE_OP, the
7666            TIMEOUT parameter is interpreted as a uint32_t by the kernel.
7667            But the prototype takes a `struct timespec *'; insert casts
7668            to satisfy the compiler.  We do not need to tswap TIMEOUT
7669            since it's not compared to guest memory.  */
7670         pts = (struct timespec *)(uintptr_t) timeout;
7671         return do_safe_futex(g2h(uaddr), op, val, pts, g2h(uaddr2),
7672                              (base_op == FUTEX_CMP_REQUEUE
7673                                       ? tswap32(val3)
7674                                       : val3));
7675     default:
7676         return -TARGET_ENOSYS;
7677     }
7678 }
7679 #endif
7680 
7681 #if defined(TARGET_NR_name_to_handle_at) && defined(CONFIG_OPEN_BY_HANDLE)
7682 static abi_long do_name_to_handle_at(abi_long dirfd, abi_long pathname,
7683                                      abi_long handle, abi_long mount_id,
7684                                      abi_long flags)
7685 {
7686     struct file_handle *target_fh;
7687     struct file_handle *fh;
7688     int mid = 0;
7689     abi_long ret;
7690     char *name;
7691     unsigned int size, total_size;
7692 
7693     if (get_user_s32(size, handle)) {
7694         return -TARGET_EFAULT;
7695     }
7696 
7697     name = lock_user_string(pathname);
7698     if (!name) {
7699         return -TARGET_EFAULT;
7700     }
7701 
7702     total_size = sizeof(struct file_handle) + size;
7703     target_fh = lock_user(VERIFY_WRITE, handle, total_size, 0);
7704     if (!target_fh) {
7705         unlock_user(name, pathname, 0);
7706         return -TARGET_EFAULT;
7707     }
7708 
7709     fh = g_malloc0(total_size);
7710     fh->handle_bytes = size;
7711 
7712     ret = get_errno(name_to_handle_at(dirfd, path(name), fh, &mid, flags));
7713     unlock_user(name, pathname, 0);
7714 
7715     /* man name_to_handle_at(2):
7716      * Other than the use of the handle_bytes field, the caller should treat
7717      * the file_handle structure as an opaque data type
7718      */
7719 
7720     memcpy(target_fh, fh, total_size);
7721     target_fh->handle_bytes = tswap32(fh->handle_bytes);
7722     target_fh->handle_type = tswap32(fh->handle_type);
7723     g_free(fh);
7724     unlock_user(target_fh, handle, total_size);
7725 
7726     if (put_user_s32(mid, mount_id)) {
7727         return -TARGET_EFAULT;
7728     }
7729 
7730     return ret;
7731 
7732 }
7733 #endif
7734 
7735 #if defined(TARGET_NR_open_by_handle_at) && defined(CONFIG_OPEN_BY_HANDLE)
7736 static abi_long do_open_by_handle_at(abi_long mount_fd, abi_long handle,
7737                                      abi_long flags)
7738 {
7739     struct file_handle *target_fh;
7740     struct file_handle *fh;
7741     unsigned int size, total_size;
7742     abi_long ret;
7743 
7744     if (get_user_s32(size, handle)) {
7745         return -TARGET_EFAULT;
7746     }
7747 
7748     total_size = sizeof(struct file_handle) + size;
7749     target_fh = lock_user(VERIFY_READ, handle, total_size, 1);
7750     if (!target_fh) {
7751         return -TARGET_EFAULT;
7752     }
7753 
7754     fh = g_memdup(target_fh, total_size);
7755     fh->handle_bytes = size;
7756     fh->handle_type = tswap32(target_fh->handle_type);
7757 
7758     ret = get_errno(open_by_handle_at(mount_fd, fh,
7759                     target_to_host_bitmask(flags, fcntl_flags_tbl)));
7760 
7761     g_free(fh);
7762 
7763     unlock_user(target_fh, handle, total_size);
7764 
7765     return ret;
7766 }
7767 #endif
7768 
7769 #if defined(TARGET_NR_signalfd) || defined(TARGET_NR_signalfd4)
7770 
7771 static abi_long do_signalfd4(int fd, abi_long mask, int flags)
7772 {
7773     int host_flags;
7774     target_sigset_t *target_mask;
7775     sigset_t host_mask;
7776     abi_long ret;
7777 
7778     if (flags & ~(TARGET_O_NONBLOCK | TARGET_O_CLOEXEC)) {
7779         return -TARGET_EINVAL;
7780     }
7781     if (!lock_user_struct(VERIFY_READ, target_mask, mask, 1)) {
7782         return -TARGET_EFAULT;
7783     }
7784 
7785     target_to_host_sigset(&host_mask, target_mask);
7786 
7787     host_flags = target_to_host_bitmask(flags, fcntl_flags_tbl);
7788 
7789     ret = get_errno(signalfd(fd, &host_mask, host_flags));
7790     if (ret >= 0) {
7791         fd_trans_register(ret, &target_signalfd_trans);
7792     }
7793 
7794     unlock_user_struct(target_mask, mask, 0);
7795 
7796     return ret;
7797 }
7798 #endif
7799 
7800 /* Map host to target signal numbers for the wait family of syscalls.
7801    Assume all other status bits are the same.  */
7802 int host_to_target_waitstatus(int status)
7803 {
7804     if (WIFSIGNALED(status)) {
7805         return host_to_target_signal(WTERMSIG(status)) | (status & ~0x7f);
7806     }
7807     if (WIFSTOPPED(status)) {
7808         return (host_to_target_signal(WSTOPSIG(status)) << 8)
7809                | (status & 0xff);
7810     }
7811     return status;
7812 }
7813 
7814 static int open_self_cmdline(void *cpu_env, int fd)
7815 {
7816     CPUState *cpu = env_cpu((CPUArchState *)cpu_env);
7817     struct linux_binprm *bprm = ((TaskState *)cpu->opaque)->bprm;
7818     int i;
7819 
7820     for (i = 0; i < bprm->argc; i++) {
7821         size_t len = strlen(bprm->argv[i]) + 1;
7822 
7823         if (write(fd, bprm->argv[i], len) != len) {
7824             return -1;
7825         }
7826     }
7827 
7828     return 0;
7829 }
7830 
7831 static int open_self_maps(void *cpu_env, int fd)
7832 {
7833     CPUState *cpu = env_cpu((CPUArchState *)cpu_env);
7834     TaskState *ts = cpu->opaque;
7835     GSList *map_info = read_self_maps();
7836     GSList *s;
7837     int count;
7838 
7839     for (s = map_info; s; s = g_slist_next(s)) {
7840         MapInfo *e = (MapInfo *) s->data;
7841 
7842         if (h2g_valid(e->start)) {
7843             unsigned long min = e->start;
7844             unsigned long max = e->end;
7845             int flags = page_get_flags(h2g(min));
7846             const char *path;
7847 
7848             max = h2g_valid(max - 1) ?
7849                 max : (uintptr_t) g2h(GUEST_ADDR_MAX) + 1;
7850 
7851             if (page_check_range(h2g(min), max - min, flags) == -1) {
7852                 continue;
7853             }
7854 
7855             if (h2g(min) == ts->info->stack_limit) {
7856                 path = "[stack]";
7857             } else {
7858                 path = e->path;
7859             }
7860 
7861             count = dprintf(fd, TARGET_ABI_FMT_ptr "-" TARGET_ABI_FMT_ptr
7862                             " %c%c%c%c %08" PRIx64 " %s %"PRId64,
7863                             h2g(min), h2g(max - 1) + 1,
7864                             e->is_read ? 'r' : '-',
7865                             e->is_write ? 'w' : '-',
7866                             e->is_exec ? 'x' : '-',
7867                             e->is_priv ? 'p' : '-',
7868                             (uint64_t) e->offset, e->dev, e->inode);
7869             if (path) {
7870                 dprintf(fd, "%*s%s\n", 73 - count, "", path);
7871             } else {
7872                 dprintf(fd, "\n");
7873             }
7874         }
7875     }
7876 
7877     free_self_maps(map_info);
7878 
7879 #ifdef TARGET_VSYSCALL_PAGE
7880     /*
7881      * We only support execution from the vsyscall page.
7882      * This is as if CONFIG_LEGACY_VSYSCALL_XONLY=y from v5.3.
7883      */
7884     count = dprintf(fd, TARGET_FMT_lx "-" TARGET_FMT_lx
7885                     " --xp 00000000 00:00 0",
7886                     TARGET_VSYSCALL_PAGE, TARGET_VSYSCALL_PAGE + TARGET_PAGE_SIZE);
7887     dprintf(fd, "%*s%s\n", 73 - count, "",  "[vsyscall]");
7888 #endif
7889 
7890     return 0;
7891 }
7892 
7893 static int open_self_stat(void *cpu_env, int fd)
7894 {
7895     CPUState *cpu = env_cpu((CPUArchState *)cpu_env);
7896     TaskState *ts = cpu->opaque;
7897     g_autoptr(GString) buf = g_string_new(NULL);
7898     int i;
7899 
7900     for (i = 0; i < 44; i++) {
7901         if (i == 0) {
7902             /* pid */
7903             g_string_printf(buf, FMT_pid " ", getpid());
7904         } else if (i == 1) {
7905             /* app name */
7906             gchar *bin = g_strrstr(ts->bprm->argv[0], "/");
7907             bin = bin ? bin + 1 : ts->bprm->argv[0];
7908             g_string_printf(buf, "(%.15s) ", bin);
7909         } else if (i == 27) {
7910             /* stack bottom */
7911             g_string_printf(buf, TARGET_ABI_FMT_ld " ", ts->info->start_stack);
7912         } else {
7913             /* for the rest, there is MasterCard */
7914             g_string_printf(buf, "0%c", i == 43 ? '\n' : ' ');
7915         }
7916 
7917         if (write(fd, buf->str, buf->len) != buf->len) {
7918             return -1;
7919         }
7920     }
7921 
7922     return 0;
7923 }
7924 
7925 static int open_self_auxv(void *cpu_env, int fd)
7926 {
7927     CPUState *cpu = env_cpu((CPUArchState *)cpu_env);
7928     TaskState *ts = cpu->opaque;
7929     abi_ulong auxv = ts->info->saved_auxv;
7930     abi_ulong len = ts->info->auxv_len;
7931     char *ptr;
7932 
7933     /*
7934      * Auxiliary vector is stored in target process stack.
7935      * read in whole auxv vector and copy it to file
7936      */
7937     ptr = lock_user(VERIFY_READ, auxv, len, 0);
7938     if (ptr != NULL) {
7939         while (len > 0) {
7940             ssize_t r;
7941             r = write(fd, ptr, len);
7942             if (r <= 0) {
7943                 break;
7944             }
7945             len -= r;
7946             ptr += r;
7947         }
7948         lseek(fd, 0, SEEK_SET);
7949         unlock_user(ptr, auxv, len);
7950     }
7951 
7952     return 0;
7953 }
7954 
7955 static int is_proc_myself(const char *filename, const char *entry)
7956 {
7957     if (!strncmp(filename, "/proc/", strlen("/proc/"))) {
7958         filename += strlen("/proc/");
7959         if (!strncmp(filename, "self/", strlen("self/"))) {
7960             filename += strlen("self/");
7961         } else if (*filename >= '1' && *filename <= '9') {
7962             char myself[80];
7963             snprintf(myself, sizeof(myself), "%d/", getpid());
7964             if (!strncmp(filename, myself, strlen(myself))) {
7965                 filename += strlen(myself);
7966             } else {
7967                 return 0;
7968             }
7969         } else {
7970             return 0;
7971         }
7972         if (!strcmp(filename, entry)) {
7973             return 1;
7974         }
7975     }
7976     return 0;
7977 }
7978 
7979 #if defined(HOST_WORDS_BIGENDIAN) != defined(TARGET_WORDS_BIGENDIAN) || \
7980     defined(TARGET_SPARC) || defined(TARGET_M68K) || defined(TARGET_HPPA)
7981 static int is_proc(const char *filename, const char *entry)
7982 {
7983     return strcmp(filename, entry) == 0;
7984 }
7985 #endif
7986 
7987 #if defined(HOST_WORDS_BIGENDIAN) != defined(TARGET_WORDS_BIGENDIAN)
7988 static int open_net_route(void *cpu_env, int fd)
7989 {
7990     FILE *fp;
7991     char *line = NULL;
7992     size_t len = 0;
7993     ssize_t read;
7994 
7995     fp = fopen("/proc/net/route", "r");
7996     if (fp == NULL) {
7997         return -1;
7998     }
7999 
8000     /* read header */
8001 
8002     read = getline(&line, &len, fp);
8003     dprintf(fd, "%s", line);
8004 
8005     /* read routes */
8006 
8007     while ((read = getline(&line, &len, fp)) != -1) {
8008         char iface[16];
8009         uint32_t dest, gw, mask;
8010         unsigned int flags, refcnt, use, metric, mtu, window, irtt;
8011         int fields;
8012 
8013         fields = sscanf(line,
8014                         "%s\t%08x\t%08x\t%04x\t%d\t%d\t%d\t%08x\t%d\t%u\t%u\n",
8015                         iface, &dest, &gw, &flags, &refcnt, &use, &metric,
8016                         &mask, &mtu, &window, &irtt);
8017         if (fields != 11) {
8018             continue;
8019         }
8020         dprintf(fd, "%s\t%08x\t%08x\t%04x\t%d\t%d\t%d\t%08x\t%d\t%u\t%u\n",
8021                 iface, tswap32(dest), tswap32(gw), flags, refcnt, use,
8022                 metric, tswap32(mask), mtu, window, irtt);
8023     }
8024 
8025     free(line);
8026     fclose(fp);
8027 
8028     return 0;
8029 }
8030 #endif
8031 
8032 #if defined(TARGET_SPARC)
8033 static int open_cpuinfo(void *cpu_env, int fd)
8034 {
8035     dprintf(fd, "type\t\t: sun4u\n");
8036     return 0;
8037 }
8038 #endif
8039 
8040 #if defined(TARGET_HPPA)
8041 static int open_cpuinfo(void *cpu_env, int fd)
8042 {
8043     dprintf(fd, "cpu family\t: PA-RISC 1.1e\n");
8044     dprintf(fd, "cpu\t\t: PA7300LC (PCX-L2)\n");
8045     dprintf(fd, "capabilities\t: os32\n");
8046     dprintf(fd, "model\t\t: 9000/778/B160L\n");
8047     dprintf(fd, "model name\t: Merlin L2 160 QEMU (9000/778/B160L)\n");
8048     return 0;
8049 }
8050 #endif
8051 
8052 #if defined(TARGET_M68K)
8053 static int open_hardware(void *cpu_env, int fd)
8054 {
8055     dprintf(fd, "Model:\t\tqemu-m68k\n");
8056     return 0;
8057 }
8058 #endif
8059 
8060 static int do_openat(void *cpu_env, int dirfd, const char *pathname, int flags, mode_t mode)
8061 {
8062     struct fake_open {
8063         const char *filename;
8064         int (*fill)(void *cpu_env, int fd);
8065         int (*cmp)(const char *s1, const char *s2);
8066     };
8067     const struct fake_open *fake_open;
8068     static const struct fake_open fakes[] = {
8069         { "maps", open_self_maps, is_proc_myself },
8070         { "stat", open_self_stat, is_proc_myself },
8071         { "auxv", open_self_auxv, is_proc_myself },
8072         { "cmdline", open_self_cmdline, is_proc_myself },
8073 #if defined(HOST_WORDS_BIGENDIAN) != defined(TARGET_WORDS_BIGENDIAN)
8074         { "/proc/net/route", open_net_route, is_proc },
8075 #endif
8076 #if defined(TARGET_SPARC) || defined(TARGET_HPPA)
8077         { "/proc/cpuinfo", open_cpuinfo, is_proc },
8078 #endif
8079 #if defined(TARGET_M68K)
8080         { "/proc/hardware", open_hardware, is_proc },
8081 #endif
8082         { NULL, NULL, NULL }
8083     };
8084 
8085     if (is_proc_myself(pathname, "exe")) {
8086         int execfd = qemu_getauxval(AT_EXECFD);
8087         return execfd ? execfd : safe_openat(dirfd, exec_path, flags, mode);
8088     }
8089 
8090     for (fake_open = fakes; fake_open->filename; fake_open++) {
8091         if (fake_open->cmp(pathname, fake_open->filename)) {
8092             break;
8093         }
8094     }
8095 
8096     if (fake_open->filename) {
8097         const char *tmpdir;
8098         char filename[PATH_MAX];
8099         int fd, r;
8100 
8101         /* create temporary file to map stat to */
8102         tmpdir = getenv("TMPDIR");
8103         if (!tmpdir)
8104             tmpdir = "/tmp";
8105         snprintf(filename, sizeof(filename), "%s/qemu-open.XXXXXX", tmpdir);
8106         fd = mkstemp(filename);
8107         if (fd < 0) {
8108             return fd;
8109         }
8110         unlink(filename);
8111 
8112         if ((r = fake_open->fill(cpu_env, fd))) {
8113             int e = errno;
8114             close(fd);
8115             errno = e;
8116             return r;
8117         }
8118         lseek(fd, 0, SEEK_SET);
8119 
8120         return fd;
8121     }
8122 
8123     return safe_openat(dirfd, path(pathname), flags, mode);
8124 }
8125 
8126 #define TIMER_MAGIC 0x0caf0000
8127 #define TIMER_MAGIC_MASK 0xffff0000
8128 
8129 /* Convert QEMU provided timer ID back to internal 16bit index format */
8130 static target_timer_t get_timer_id(abi_long arg)
8131 {
8132     target_timer_t timerid = arg;
8133 
8134     if ((timerid & TIMER_MAGIC_MASK) != TIMER_MAGIC) {
8135         return -TARGET_EINVAL;
8136     }
8137 
8138     timerid &= 0xffff;
8139 
8140     if (timerid >= ARRAY_SIZE(g_posix_timers)) {
8141         return -TARGET_EINVAL;
8142     }
8143 
8144     return timerid;
8145 }
8146 
8147 static int target_to_host_cpu_mask(unsigned long *host_mask,
8148                                    size_t host_size,
8149                                    abi_ulong target_addr,
8150                                    size_t target_size)
8151 {
8152     unsigned target_bits = sizeof(abi_ulong) * 8;
8153     unsigned host_bits = sizeof(*host_mask) * 8;
8154     abi_ulong *target_mask;
8155     unsigned i, j;
8156 
8157     assert(host_size >= target_size);
8158 
8159     target_mask = lock_user(VERIFY_READ, target_addr, target_size, 1);
8160     if (!target_mask) {
8161         return -TARGET_EFAULT;
8162     }
8163     memset(host_mask, 0, host_size);
8164 
8165     for (i = 0 ; i < target_size / sizeof(abi_ulong); i++) {
8166         unsigned bit = i * target_bits;
8167         abi_ulong val;
8168 
8169         __get_user(val, &target_mask[i]);
8170         for (j = 0; j < target_bits; j++, bit++) {
8171             if (val & (1UL << j)) {
8172                 host_mask[bit / host_bits] |= 1UL << (bit % host_bits);
8173             }
8174         }
8175     }
8176 
8177     unlock_user(target_mask, target_addr, 0);
8178     return 0;
8179 }
8180 
8181 static int host_to_target_cpu_mask(const unsigned long *host_mask,
8182                                    size_t host_size,
8183                                    abi_ulong target_addr,
8184                                    size_t target_size)
8185 {
8186     unsigned target_bits = sizeof(abi_ulong) * 8;
8187     unsigned host_bits = sizeof(*host_mask) * 8;
8188     abi_ulong *target_mask;
8189     unsigned i, j;
8190 
8191     assert(host_size >= target_size);
8192 
8193     target_mask = lock_user(VERIFY_WRITE, target_addr, target_size, 0);
8194     if (!target_mask) {
8195         return -TARGET_EFAULT;
8196     }
8197 
8198     for (i = 0 ; i < target_size / sizeof(abi_ulong); i++) {
8199         unsigned bit = i * target_bits;
8200         abi_ulong val = 0;
8201 
8202         for (j = 0; j < target_bits; j++, bit++) {
8203             if (host_mask[bit / host_bits] & (1UL << (bit % host_bits))) {
8204                 val |= 1UL << j;
8205             }
8206         }
8207         __put_user(val, &target_mask[i]);
8208     }
8209 
8210     unlock_user(target_mask, target_addr, target_size);
8211     return 0;
8212 }
8213 
8214 /* This is an internal helper for do_syscall so that it is easier
8215  * to have a single return point, so that actions, such as logging
8216  * of syscall results, can be performed.
8217  * All errnos that do_syscall() returns must be -TARGET_<errcode>.
8218  */
8219 static abi_long do_syscall1(void *cpu_env, int num, abi_long arg1,
8220                             abi_long arg2, abi_long arg3, abi_long arg4,
8221                             abi_long arg5, abi_long arg6, abi_long arg7,
8222                             abi_long arg8)
8223 {
8224     CPUState *cpu = env_cpu(cpu_env);
8225     abi_long ret;
8226 #if defined(TARGET_NR_stat) || defined(TARGET_NR_stat64) \
8227     || defined(TARGET_NR_lstat) || defined(TARGET_NR_lstat64) \
8228     || defined(TARGET_NR_fstat) || defined(TARGET_NR_fstat64) \
8229     || defined(TARGET_NR_statx)
8230     struct stat st;
8231 #endif
8232 #if defined(TARGET_NR_statfs) || defined(TARGET_NR_statfs64) \
8233     || defined(TARGET_NR_fstatfs)
8234     struct statfs stfs;
8235 #endif
8236     void *p;
8237 
8238     switch(num) {
8239     case TARGET_NR_exit:
8240         /* In old applications this may be used to implement _exit(2).
8241            However in threaded applications it is used for thread termination,
8242            and _exit_group is used for application termination.
8243            Do thread termination if we have more then one thread.  */
8244 
8245         if (block_signals()) {
8246             return -TARGET_ERESTARTSYS;
8247         }
8248 
8249         pthread_mutex_lock(&clone_lock);
8250 
8251         if (CPU_NEXT(first_cpu)) {
8252             TaskState *ts = cpu->opaque;
8253 
8254             object_property_set_bool(OBJECT(cpu), "realized", false, NULL);
8255             object_unref(OBJECT(cpu));
8256             /*
8257              * At this point the CPU should be unrealized and removed
8258              * from cpu lists. We can clean-up the rest of the thread
8259              * data without the lock held.
8260              */
8261 
8262             pthread_mutex_unlock(&clone_lock);
8263 
8264             if (ts->child_tidptr) {
8265                 put_user_u32(0, ts->child_tidptr);
8266                 do_sys_futex(g2h(ts->child_tidptr), FUTEX_WAKE, INT_MAX,
8267                           NULL, NULL, 0);
8268             }
8269             thread_cpu = NULL;
8270             g_free(ts);
8271             rcu_unregister_thread();
8272             pthread_exit(NULL);
8273         }
8274 
8275         pthread_mutex_unlock(&clone_lock);
8276         preexit_cleanup(cpu_env, arg1);
8277         _exit(arg1);
8278         return 0; /* avoid warning */
8279     case TARGET_NR_read:
8280         if (arg2 == 0 && arg3 == 0) {
8281             return get_errno(safe_read(arg1, 0, 0));
8282         } else {
8283             if (!(p = lock_user(VERIFY_WRITE, arg2, arg3, 0)))
8284                 return -TARGET_EFAULT;
8285             ret = get_errno(safe_read(arg1, p, arg3));
8286             if (ret >= 0 &&
8287                 fd_trans_host_to_target_data(arg1)) {
8288                 ret = fd_trans_host_to_target_data(arg1)(p, ret);
8289             }
8290             unlock_user(p, arg2, ret);
8291         }
8292         return ret;
8293     case TARGET_NR_write:
8294         if (arg2 == 0 && arg3 == 0) {
8295             return get_errno(safe_write(arg1, 0, 0));
8296         }
8297         if (!(p = lock_user(VERIFY_READ, arg2, arg3, 1)))
8298             return -TARGET_EFAULT;
8299         if (fd_trans_target_to_host_data(arg1)) {
8300             void *copy = g_malloc(arg3);
8301             memcpy(copy, p, arg3);
8302             ret = fd_trans_target_to_host_data(arg1)(copy, arg3);
8303             if (ret >= 0) {
8304                 ret = get_errno(safe_write(arg1, copy, ret));
8305             }
8306             g_free(copy);
8307         } else {
8308             ret = get_errno(safe_write(arg1, p, arg3));
8309         }
8310         unlock_user(p, arg2, 0);
8311         return ret;
8312 
8313 #ifdef TARGET_NR_open
8314     case TARGET_NR_open:
8315         if (!(p = lock_user_string(arg1)))
8316             return -TARGET_EFAULT;
8317         ret = get_errno(do_openat(cpu_env, AT_FDCWD, p,
8318                                   target_to_host_bitmask(arg2, fcntl_flags_tbl),
8319                                   arg3));
8320         fd_trans_unregister(ret);
8321         unlock_user(p, arg1, 0);
8322         return ret;
8323 #endif
8324     case TARGET_NR_openat:
8325         if (!(p = lock_user_string(arg2)))
8326             return -TARGET_EFAULT;
8327         ret = get_errno(do_openat(cpu_env, arg1, p,
8328                                   target_to_host_bitmask(arg3, fcntl_flags_tbl),
8329                                   arg4));
8330         fd_trans_unregister(ret);
8331         unlock_user(p, arg2, 0);
8332         return ret;
8333 #if defined(TARGET_NR_name_to_handle_at) && defined(CONFIG_OPEN_BY_HANDLE)
8334     case TARGET_NR_name_to_handle_at:
8335         ret = do_name_to_handle_at(arg1, arg2, arg3, arg4, arg5);
8336         return ret;
8337 #endif
8338 #if defined(TARGET_NR_open_by_handle_at) && defined(CONFIG_OPEN_BY_HANDLE)
8339     case TARGET_NR_open_by_handle_at:
8340         ret = do_open_by_handle_at(arg1, arg2, arg3);
8341         fd_trans_unregister(ret);
8342         return ret;
8343 #endif
8344     case TARGET_NR_close:
8345         fd_trans_unregister(arg1);
8346         return get_errno(close(arg1));
8347 
8348     case TARGET_NR_brk:
8349         return do_brk(arg1);
8350 #ifdef TARGET_NR_fork
8351     case TARGET_NR_fork:
8352         return get_errno(do_fork(cpu_env, TARGET_SIGCHLD, 0, 0, 0, 0));
8353 #endif
8354 #ifdef TARGET_NR_waitpid
8355     case TARGET_NR_waitpid:
8356         {
8357             int status;
8358             ret = get_errno(safe_wait4(arg1, &status, arg3, 0));
8359             if (!is_error(ret) && arg2 && ret
8360                 && put_user_s32(host_to_target_waitstatus(status), arg2))
8361                 return -TARGET_EFAULT;
8362         }
8363         return ret;
8364 #endif
8365 #ifdef TARGET_NR_waitid
8366     case TARGET_NR_waitid:
8367         {
8368             siginfo_t info;
8369             info.si_pid = 0;
8370             ret = get_errno(safe_waitid(arg1, arg2, &info, arg4, NULL));
8371             if (!is_error(ret) && arg3 && info.si_pid != 0) {
8372                 if (!(p = lock_user(VERIFY_WRITE, arg3, sizeof(target_siginfo_t), 0)))
8373                     return -TARGET_EFAULT;
8374                 host_to_target_siginfo(p, &info);
8375                 unlock_user(p, arg3, sizeof(target_siginfo_t));
8376             }
8377         }
8378         return ret;
8379 #endif
8380 #ifdef TARGET_NR_creat /* not on alpha */
8381     case TARGET_NR_creat:
8382         if (!(p = lock_user_string(arg1)))
8383             return -TARGET_EFAULT;
8384         ret = get_errno(creat(p, arg2));
8385         fd_trans_unregister(ret);
8386         unlock_user(p, arg1, 0);
8387         return ret;
8388 #endif
8389 #ifdef TARGET_NR_link
8390     case TARGET_NR_link:
8391         {
8392             void * p2;
8393             p = lock_user_string(arg1);
8394             p2 = lock_user_string(arg2);
8395             if (!p || !p2)
8396                 ret = -TARGET_EFAULT;
8397             else
8398                 ret = get_errno(link(p, p2));
8399             unlock_user(p2, arg2, 0);
8400             unlock_user(p, arg1, 0);
8401         }
8402         return ret;
8403 #endif
8404 #if defined(TARGET_NR_linkat)
8405     case TARGET_NR_linkat:
8406         {
8407             void * p2 = NULL;
8408             if (!arg2 || !arg4)
8409                 return -TARGET_EFAULT;
8410             p  = lock_user_string(arg2);
8411             p2 = lock_user_string(arg4);
8412             if (!p || !p2)
8413                 ret = -TARGET_EFAULT;
8414             else
8415                 ret = get_errno(linkat(arg1, p, arg3, p2, arg5));
8416             unlock_user(p, arg2, 0);
8417             unlock_user(p2, arg4, 0);
8418         }
8419         return ret;
8420 #endif
8421 #ifdef TARGET_NR_unlink
8422     case TARGET_NR_unlink:
8423         if (!(p = lock_user_string(arg1)))
8424             return -TARGET_EFAULT;
8425         ret = get_errno(unlink(p));
8426         unlock_user(p, arg1, 0);
8427         return ret;
8428 #endif
8429 #if defined(TARGET_NR_unlinkat)
8430     case TARGET_NR_unlinkat:
8431         if (!(p = lock_user_string(arg2)))
8432             return -TARGET_EFAULT;
8433         ret = get_errno(unlinkat(arg1, p, arg3));
8434         unlock_user(p, arg2, 0);
8435         return ret;
8436 #endif
8437     case TARGET_NR_execve:
8438         {
8439             char **argp, **envp;
8440             int argc, envc;
8441             abi_ulong gp;
8442             abi_ulong guest_argp;
8443             abi_ulong guest_envp;
8444             abi_ulong addr;
8445             char **q;
8446             int total_size = 0;
8447 
8448             argc = 0;
8449             guest_argp = arg2;
8450             for (gp = guest_argp; gp; gp += sizeof(abi_ulong)) {
8451                 if (get_user_ual(addr, gp))
8452                     return -TARGET_EFAULT;
8453                 if (!addr)
8454                     break;
8455                 argc++;
8456             }
8457             envc = 0;
8458             guest_envp = arg3;
8459             for (gp = guest_envp; gp; gp += sizeof(abi_ulong)) {
8460                 if (get_user_ual(addr, gp))
8461                     return -TARGET_EFAULT;
8462                 if (!addr)
8463                     break;
8464                 envc++;
8465             }
8466 
8467             argp = g_new0(char *, argc + 1);
8468             envp = g_new0(char *, envc + 1);
8469 
8470             for (gp = guest_argp, q = argp; gp;
8471                   gp += sizeof(abi_ulong), q++) {
8472                 if (get_user_ual(addr, gp))
8473                     goto execve_efault;
8474                 if (!addr)
8475                     break;
8476                 if (!(*q = lock_user_string(addr)))
8477                     goto execve_efault;
8478                 total_size += strlen(*q) + 1;
8479             }
8480             *q = NULL;
8481 
8482             for (gp = guest_envp, q = envp; gp;
8483                   gp += sizeof(abi_ulong), q++) {
8484                 if (get_user_ual(addr, gp))
8485                     goto execve_efault;
8486                 if (!addr)
8487                     break;
8488                 if (!(*q = lock_user_string(addr)))
8489                     goto execve_efault;
8490                 total_size += strlen(*q) + 1;
8491             }
8492             *q = NULL;
8493 
8494             if (!(p = lock_user_string(arg1)))
8495                 goto execve_efault;
8496             /* Although execve() is not an interruptible syscall it is
8497              * a special case where we must use the safe_syscall wrapper:
8498              * if we allow a signal to happen before we make the host
8499              * syscall then we will 'lose' it, because at the point of
8500              * execve the process leaves QEMU's control. So we use the
8501              * safe syscall wrapper to ensure that we either take the
8502              * signal as a guest signal, or else it does not happen
8503              * before the execve completes and makes it the other
8504              * program's problem.
8505              */
8506             ret = get_errno(safe_execve(p, argp, envp));
8507             unlock_user(p, arg1, 0);
8508 
8509             goto execve_end;
8510 
8511         execve_efault:
8512             ret = -TARGET_EFAULT;
8513 
8514         execve_end:
8515             for (gp = guest_argp, q = argp; *q;
8516                   gp += sizeof(abi_ulong), q++) {
8517                 if (get_user_ual(addr, gp)
8518                     || !addr)
8519                     break;
8520                 unlock_user(*q, addr, 0);
8521             }
8522             for (gp = guest_envp, q = envp; *q;
8523                   gp += sizeof(abi_ulong), q++) {
8524                 if (get_user_ual(addr, gp)
8525                     || !addr)
8526                     break;
8527                 unlock_user(*q, addr, 0);
8528             }
8529 
8530             g_free(argp);
8531             g_free(envp);
8532         }
8533         return ret;
8534     case TARGET_NR_chdir:
8535         if (!(p = lock_user_string(arg1)))
8536             return -TARGET_EFAULT;
8537         ret = get_errno(chdir(p));
8538         unlock_user(p, arg1, 0);
8539         return ret;
8540 #ifdef TARGET_NR_time
8541     case TARGET_NR_time:
8542         {
8543             time_t host_time;
8544             ret = get_errno(time(&host_time));
8545             if (!is_error(ret)
8546                 && arg1
8547                 && put_user_sal(host_time, arg1))
8548                 return -TARGET_EFAULT;
8549         }
8550         return ret;
8551 #endif
8552 #ifdef TARGET_NR_mknod
8553     case TARGET_NR_mknod:
8554         if (!(p = lock_user_string(arg1)))
8555             return -TARGET_EFAULT;
8556         ret = get_errno(mknod(p, arg2, arg3));
8557         unlock_user(p, arg1, 0);
8558         return ret;
8559 #endif
8560 #if defined(TARGET_NR_mknodat)
8561     case TARGET_NR_mknodat:
8562         if (!(p = lock_user_string(arg2)))
8563             return -TARGET_EFAULT;
8564         ret = get_errno(mknodat(arg1, p, arg3, arg4));
8565         unlock_user(p, arg2, 0);
8566         return ret;
8567 #endif
8568 #ifdef TARGET_NR_chmod
8569     case TARGET_NR_chmod:
8570         if (!(p = lock_user_string(arg1)))
8571             return -TARGET_EFAULT;
8572         ret = get_errno(chmod(p, arg2));
8573         unlock_user(p, arg1, 0);
8574         return ret;
8575 #endif
8576 #ifdef TARGET_NR_lseek
8577     case TARGET_NR_lseek:
8578         return get_errno(lseek(arg1, arg2, arg3));
8579 #endif
8580 #if defined(TARGET_NR_getxpid) && defined(TARGET_ALPHA)
8581     /* Alpha specific */
8582     case TARGET_NR_getxpid:
8583         ((CPUAlphaState *)cpu_env)->ir[IR_A4] = getppid();
8584         return get_errno(getpid());
8585 #endif
8586 #ifdef TARGET_NR_getpid
8587     case TARGET_NR_getpid:
8588         return get_errno(getpid());
8589 #endif
8590     case TARGET_NR_mount:
8591         {
8592             /* need to look at the data field */
8593             void *p2, *p3;
8594 
8595             if (arg1) {
8596                 p = lock_user_string(arg1);
8597                 if (!p) {
8598                     return -TARGET_EFAULT;
8599                 }
8600             } else {
8601                 p = NULL;
8602             }
8603 
8604             p2 = lock_user_string(arg2);
8605             if (!p2) {
8606                 if (arg1) {
8607                     unlock_user(p, arg1, 0);
8608                 }
8609                 return -TARGET_EFAULT;
8610             }
8611 
8612             if (arg3) {
8613                 p3 = lock_user_string(arg3);
8614                 if (!p3) {
8615                     if (arg1) {
8616                         unlock_user(p, arg1, 0);
8617                     }
8618                     unlock_user(p2, arg2, 0);
8619                     return -TARGET_EFAULT;
8620                 }
8621             } else {
8622                 p3 = NULL;
8623             }
8624 
8625             /* FIXME - arg5 should be locked, but it isn't clear how to
8626              * do that since it's not guaranteed to be a NULL-terminated
8627              * string.
8628              */
8629             if (!arg5) {
8630                 ret = mount(p, p2, p3, (unsigned long)arg4, NULL);
8631             } else {
8632                 ret = mount(p, p2, p3, (unsigned long)arg4, g2h(arg5));
8633             }
8634             ret = get_errno(ret);
8635 
8636             if (arg1) {
8637                 unlock_user(p, arg1, 0);
8638             }
8639             unlock_user(p2, arg2, 0);
8640             if (arg3) {
8641                 unlock_user(p3, arg3, 0);
8642             }
8643         }
8644         return ret;
8645 #if defined(TARGET_NR_umount) || defined(TARGET_NR_oldumount)
8646 #if defined(TARGET_NR_umount)
8647     case TARGET_NR_umount:
8648 #endif
8649 #if defined(TARGET_NR_oldumount)
8650     case TARGET_NR_oldumount:
8651 #endif
8652         if (!(p = lock_user_string(arg1)))
8653             return -TARGET_EFAULT;
8654         ret = get_errno(umount(p));
8655         unlock_user(p, arg1, 0);
8656         return ret;
8657 #endif
8658 #ifdef TARGET_NR_stime /* not on alpha */
8659     case TARGET_NR_stime:
8660         {
8661             struct timespec ts;
8662             ts.tv_nsec = 0;
8663             if (get_user_sal(ts.tv_sec, arg1)) {
8664                 return -TARGET_EFAULT;
8665             }
8666             return get_errno(clock_settime(CLOCK_REALTIME, &ts));
8667         }
8668 #endif
8669 #ifdef TARGET_NR_alarm /* not on alpha */
8670     case TARGET_NR_alarm:
8671         return alarm(arg1);
8672 #endif
8673 #ifdef TARGET_NR_pause /* not on alpha */
8674     case TARGET_NR_pause:
8675         if (!block_signals()) {
8676             sigsuspend(&((TaskState *)cpu->opaque)->signal_mask);
8677         }
8678         return -TARGET_EINTR;
8679 #endif
8680 #ifdef TARGET_NR_utime
8681     case TARGET_NR_utime:
8682         {
8683             struct utimbuf tbuf, *host_tbuf;
8684             struct target_utimbuf *target_tbuf;
8685             if (arg2) {
8686                 if (!lock_user_struct(VERIFY_READ, target_tbuf, arg2, 1))
8687                     return -TARGET_EFAULT;
8688                 tbuf.actime = tswapal(target_tbuf->actime);
8689                 tbuf.modtime = tswapal(target_tbuf->modtime);
8690                 unlock_user_struct(target_tbuf, arg2, 0);
8691                 host_tbuf = &tbuf;
8692             } else {
8693                 host_tbuf = NULL;
8694             }
8695             if (!(p = lock_user_string(arg1)))
8696                 return -TARGET_EFAULT;
8697             ret = get_errno(utime(p, host_tbuf));
8698             unlock_user(p, arg1, 0);
8699         }
8700         return ret;
8701 #endif
8702 #ifdef TARGET_NR_utimes
8703     case TARGET_NR_utimes:
8704         {
8705             struct timeval *tvp, tv[2];
8706             if (arg2) {
8707                 if (copy_from_user_timeval(&tv[0], arg2)
8708                     || copy_from_user_timeval(&tv[1],
8709                                               arg2 + sizeof(struct target_timeval)))
8710                     return -TARGET_EFAULT;
8711                 tvp = tv;
8712             } else {
8713                 tvp = NULL;
8714             }
8715             if (!(p = lock_user_string(arg1)))
8716                 return -TARGET_EFAULT;
8717             ret = get_errno(utimes(p, tvp));
8718             unlock_user(p, arg1, 0);
8719         }
8720         return ret;
8721 #endif
8722 #if defined(TARGET_NR_futimesat)
8723     case TARGET_NR_futimesat:
8724         {
8725             struct timeval *tvp, tv[2];
8726             if (arg3) {
8727                 if (copy_from_user_timeval(&tv[0], arg3)
8728                     || copy_from_user_timeval(&tv[1],
8729                                               arg3 + sizeof(struct target_timeval)))
8730                     return -TARGET_EFAULT;
8731                 tvp = tv;
8732             } else {
8733                 tvp = NULL;
8734             }
8735             if (!(p = lock_user_string(arg2))) {
8736                 return -TARGET_EFAULT;
8737             }
8738             ret = get_errno(futimesat(arg1, path(p), tvp));
8739             unlock_user(p, arg2, 0);
8740         }
8741         return ret;
8742 #endif
8743 #ifdef TARGET_NR_access
8744     case TARGET_NR_access:
8745         if (!(p = lock_user_string(arg1))) {
8746             return -TARGET_EFAULT;
8747         }
8748         ret = get_errno(access(path(p), arg2));
8749         unlock_user(p, arg1, 0);
8750         return ret;
8751 #endif
8752 #if defined(TARGET_NR_faccessat) && defined(__NR_faccessat)
8753     case TARGET_NR_faccessat:
8754         if (!(p = lock_user_string(arg2))) {
8755             return -TARGET_EFAULT;
8756         }
8757         ret = get_errno(faccessat(arg1, p, arg3, 0));
8758         unlock_user(p, arg2, 0);
8759         return ret;
8760 #endif
8761 #ifdef TARGET_NR_nice /* not on alpha */
8762     case TARGET_NR_nice:
8763         return get_errno(nice(arg1));
8764 #endif
8765     case TARGET_NR_sync:
8766         sync();
8767         return 0;
8768 #if defined(TARGET_NR_syncfs) && defined(CONFIG_SYNCFS)
8769     case TARGET_NR_syncfs:
8770         return get_errno(syncfs(arg1));
8771 #endif
8772     case TARGET_NR_kill:
8773         return get_errno(safe_kill(arg1, target_to_host_signal(arg2)));
8774 #ifdef TARGET_NR_rename
8775     case TARGET_NR_rename:
8776         {
8777             void *p2;
8778             p = lock_user_string(arg1);
8779             p2 = lock_user_string(arg2);
8780             if (!p || !p2)
8781                 ret = -TARGET_EFAULT;
8782             else
8783                 ret = get_errno(rename(p, p2));
8784             unlock_user(p2, arg2, 0);
8785             unlock_user(p, arg1, 0);
8786         }
8787         return ret;
8788 #endif
8789 #if defined(TARGET_NR_renameat)
8790     case TARGET_NR_renameat:
8791         {
8792             void *p2;
8793             p  = lock_user_string(arg2);
8794             p2 = lock_user_string(arg4);
8795             if (!p || !p2)
8796                 ret = -TARGET_EFAULT;
8797             else
8798                 ret = get_errno(renameat(arg1, p, arg3, p2));
8799             unlock_user(p2, arg4, 0);
8800             unlock_user(p, arg2, 0);
8801         }
8802         return ret;
8803 #endif
8804 #if defined(TARGET_NR_renameat2)
8805     case TARGET_NR_renameat2:
8806         {
8807             void *p2;
8808             p  = lock_user_string(arg2);
8809             p2 = lock_user_string(arg4);
8810             if (!p || !p2) {
8811                 ret = -TARGET_EFAULT;
8812             } else {
8813                 ret = get_errno(sys_renameat2(arg1, p, arg3, p2, arg5));
8814             }
8815             unlock_user(p2, arg4, 0);
8816             unlock_user(p, arg2, 0);
8817         }
8818         return ret;
8819 #endif
8820 #ifdef TARGET_NR_mkdir
8821     case TARGET_NR_mkdir:
8822         if (!(p = lock_user_string(arg1)))
8823             return -TARGET_EFAULT;
8824         ret = get_errno(mkdir(p, arg2));
8825         unlock_user(p, arg1, 0);
8826         return ret;
8827 #endif
8828 #if defined(TARGET_NR_mkdirat)
8829     case TARGET_NR_mkdirat:
8830         if (!(p = lock_user_string(arg2)))
8831             return -TARGET_EFAULT;
8832         ret = get_errno(mkdirat(arg1, p, arg3));
8833         unlock_user(p, arg2, 0);
8834         return ret;
8835 #endif
8836 #ifdef TARGET_NR_rmdir
8837     case TARGET_NR_rmdir:
8838         if (!(p = lock_user_string(arg1)))
8839             return -TARGET_EFAULT;
8840         ret = get_errno(rmdir(p));
8841         unlock_user(p, arg1, 0);
8842         return ret;
8843 #endif
8844     case TARGET_NR_dup:
8845         ret = get_errno(dup(arg1));
8846         if (ret >= 0) {
8847             fd_trans_dup(arg1, ret);
8848         }
8849         return ret;
8850 #ifdef TARGET_NR_pipe
8851     case TARGET_NR_pipe:
8852         return do_pipe(cpu_env, arg1, 0, 0);
8853 #endif
8854 #ifdef TARGET_NR_pipe2
8855     case TARGET_NR_pipe2:
8856         return do_pipe(cpu_env, arg1,
8857                        target_to_host_bitmask(arg2, fcntl_flags_tbl), 1);
8858 #endif
8859     case TARGET_NR_times:
8860         {
8861             struct target_tms *tmsp;
8862             struct tms tms;
8863             ret = get_errno(times(&tms));
8864             if (arg1) {
8865                 tmsp = lock_user(VERIFY_WRITE, arg1, sizeof(struct target_tms), 0);
8866                 if (!tmsp)
8867                     return -TARGET_EFAULT;
8868                 tmsp->tms_utime = tswapal(host_to_target_clock_t(tms.tms_utime));
8869                 tmsp->tms_stime = tswapal(host_to_target_clock_t(tms.tms_stime));
8870                 tmsp->tms_cutime = tswapal(host_to_target_clock_t(tms.tms_cutime));
8871                 tmsp->tms_cstime = tswapal(host_to_target_clock_t(tms.tms_cstime));
8872             }
8873             if (!is_error(ret))
8874                 ret = host_to_target_clock_t(ret);
8875         }
8876         return ret;
8877     case TARGET_NR_acct:
8878         if (arg1 == 0) {
8879             ret = get_errno(acct(NULL));
8880         } else {
8881             if (!(p = lock_user_string(arg1))) {
8882                 return -TARGET_EFAULT;
8883             }
8884             ret = get_errno(acct(path(p)));
8885             unlock_user(p, arg1, 0);
8886         }
8887         return ret;
8888 #ifdef TARGET_NR_umount2
8889     case TARGET_NR_umount2:
8890         if (!(p = lock_user_string(arg1)))
8891             return -TARGET_EFAULT;
8892         ret = get_errno(umount2(p, arg2));
8893         unlock_user(p, arg1, 0);
8894         return ret;
8895 #endif
8896     case TARGET_NR_ioctl:
8897         return do_ioctl(arg1, arg2, arg3);
8898 #ifdef TARGET_NR_fcntl
8899     case TARGET_NR_fcntl:
8900         return do_fcntl(arg1, arg2, arg3);
8901 #endif
8902     case TARGET_NR_setpgid:
8903         return get_errno(setpgid(arg1, arg2));
8904     case TARGET_NR_umask:
8905         return get_errno(umask(arg1));
8906     case TARGET_NR_chroot:
8907         if (!(p = lock_user_string(arg1)))
8908             return -TARGET_EFAULT;
8909         ret = get_errno(chroot(p));
8910         unlock_user(p, arg1, 0);
8911         return ret;
8912 #ifdef TARGET_NR_dup2
8913     case TARGET_NR_dup2:
8914         ret = get_errno(dup2(arg1, arg2));
8915         if (ret >= 0) {
8916             fd_trans_dup(arg1, arg2);
8917         }
8918         return ret;
8919 #endif
8920 #if defined(CONFIG_DUP3) && defined(TARGET_NR_dup3)
8921     case TARGET_NR_dup3:
8922     {
8923         int host_flags;
8924 
8925         if ((arg3 & ~TARGET_O_CLOEXEC) != 0) {
8926             return -EINVAL;
8927         }
8928         host_flags = target_to_host_bitmask(arg3, fcntl_flags_tbl);
8929         ret = get_errno(dup3(arg1, arg2, host_flags));
8930         if (ret >= 0) {
8931             fd_trans_dup(arg1, arg2);
8932         }
8933         return ret;
8934     }
8935 #endif
8936 #ifdef TARGET_NR_getppid /* not on alpha */
8937     case TARGET_NR_getppid:
8938         return get_errno(getppid());
8939 #endif
8940 #ifdef TARGET_NR_getpgrp
8941     case TARGET_NR_getpgrp:
8942         return get_errno(getpgrp());
8943 #endif
8944     case TARGET_NR_setsid:
8945         return get_errno(setsid());
8946 #ifdef TARGET_NR_sigaction
8947     case TARGET_NR_sigaction:
8948         {
8949 #if defined(TARGET_ALPHA)
8950             struct target_sigaction act, oact, *pact = 0;
8951             struct target_old_sigaction *old_act;
8952             if (arg2) {
8953                 if (!lock_user_struct(VERIFY_READ, old_act, arg2, 1))
8954                     return -TARGET_EFAULT;
8955                 act._sa_handler = old_act->_sa_handler;
8956                 target_siginitset(&act.sa_mask, old_act->sa_mask);
8957                 act.sa_flags = old_act->sa_flags;
8958                 act.sa_restorer = 0;
8959                 unlock_user_struct(old_act, arg2, 0);
8960                 pact = &act;
8961             }
8962             ret = get_errno(do_sigaction(arg1, pact, &oact));
8963             if (!is_error(ret) && arg3) {
8964                 if (!lock_user_struct(VERIFY_WRITE, old_act, arg3, 0))
8965                     return -TARGET_EFAULT;
8966                 old_act->_sa_handler = oact._sa_handler;
8967                 old_act->sa_mask = oact.sa_mask.sig[0];
8968                 old_act->sa_flags = oact.sa_flags;
8969                 unlock_user_struct(old_act, arg3, 1);
8970             }
8971 #elif defined(TARGET_MIPS)
8972 	    struct target_sigaction act, oact, *pact, *old_act;
8973 
8974 	    if (arg2) {
8975                 if (!lock_user_struct(VERIFY_READ, old_act, arg2, 1))
8976                     return -TARGET_EFAULT;
8977 		act._sa_handler = old_act->_sa_handler;
8978 		target_siginitset(&act.sa_mask, old_act->sa_mask.sig[0]);
8979 		act.sa_flags = old_act->sa_flags;
8980 		unlock_user_struct(old_act, arg2, 0);
8981 		pact = &act;
8982 	    } else {
8983 		pact = NULL;
8984 	    }
8985 
8986 	    ret = get_errno(do_sigaction(arg1, pact, &oact));
8987 
8988 	    if (!is_error(ret) && arg3) {
8989                 if (!lock_user_struct(VERIFY_WRITE, old_act, arg3, 0))
8990                     return -TARGET_EFAULT;
8991 		old_act->_sa_handler = oact._sa_handler;
8992 		old_act->sa_flags = oact.sa_flags;
8993 		old_act->sa_mask.sig[0] = oact.sa_mask.sig[0];
8994 		old_act->sa_mask.sig[1] = 0;
8995 		old_act->sa_mask.sig[2] = 0;
8996 		old_act->sa_mask.sig[3] = 0;
8997 		unlock_user_struct(old_act, arg3, 1);
8998 	    }
8999 #else
9000             struct target_old_sigaction *old_act;
9001             struct target_sigaction act, oact, *pact;
9002             if (arg2) {
9003                 if (!lock_user_struct(VERIFY_READ, old_act, arg2, 1))
9004                     return -TARGET_EFAULT;
9005                 act._sa_handler = old_act->_sa_handler;
9006                 target_siginitset(&act.sa_mask, old_act->sa_mask);
9007                 act.sa_flags = old_act->sa_flags;
9008                 act.sa_restorer = old_act->sa_restorer;
9009 #ifdef TARGET_ARCH_HAS_KA_RESTORER
9010                 act.ka_restorer = 0;
9011 #endif
9012                 unlock_user_struct(old_act, arg2, 0);
9013                 pact = &act;
9014             } else {
9015                 pact = NULL;
9016             }
9017             ret = get_errno(do_sigaction(arg1, pact, &oact));
9018             if (!is_error(ret) && arg3) {
9019                 if (!lock_user_struct(VERIFY_WRITE, old_act, arg3, 0))
9020                     return -TARGET_EFAULT;
9021                 old_act->_sa_handler = oact._sa_handler;
9022                 old_act->sa_mask = oact.sa_mask.sig[0];
9023                 old_act->sa_flags = oact.sa_flags;
9024                 old_act->sa_restorer = oact.sa_restorer;
9025                 unlock_user_struct(old_act, arg3, 1);
9026             }
9027 #endif
9028         }
9029         return ret;
9030 #endif
9031     case TARGET_NR_rt_sigaction:
9032         {
9033 #if defined(TARGET_ALPHA)
9034             /* For Alpha and SPARC this is a 5 argument syscall, with
9035              * a 'restorer' parameter which must be copied into the
9036              * sa_restorer field of the sigaction struct.
9037              * For Alpha that 'restorer' is arg5; for SPARC it is arg4,
9038              * and arg5 is the sigsetsize.
9039              * Alpha also has a separate rt_sigaction struct that it uses
9040              * here; SPARC uses the usual sigaction struct.
9041              */
9042             struct target_rt_sigaction *rt_act;
9043             struct target_sigaction act, oact, *pact = 0;
9044 
9045             if (arg4 != sizeof(target_sigset_t)) {
9046                 return -TARGET_EINVAL;
9047             }
9048             if (arg2) {
9049                 if (!lock_user_struct(VERIFY_READ, rt_act, arg2, 1))
9050                     return -TARGET_EFAULT;
9051                 act._sa_handler = rt_act->_sa_handler;
9052                 act.sa_mask = rt_act->sa_mask;
9053                 act.sa_flags = rt_act->sa_flags;
9054                 act.sa_restorer = arg5;
9055                 unlock_user_struct(rt_act, arg2, 0);
9056                 pact = &act;
9057             }
9058             ret = get_errno(do_sigaction(arg1, pact, &oact));
9059             if (!is_error(ret) && arg3) {
9060                 if (!lock_user_struct(VERIFY_WRITE, rt_act, arg3, 0))
9061                     return -TARGET_EFAULT;
9062                 rt_act->_sa_handler = oact._sa_handler;
9063                 rt_act->sa_mask = oact.sa_mask;
9064                 rt_act->sa_flags = oact.sa_flags;
9065                 unlock_user_struct(rt_act, arg3, 1);
9066             }
9067 #else
9068 #ifdef TARGET_SPARC
9069             target_ulong restorer = arg4;
9070             target_ulong sigsetsize = arg5;
9071 #else
9072             target_ulong sigsetsize = arg4;
9073 #endif
9074             struct target_sigaction *act;
9075             struct target_sigaction *oact;
9076 
9077             if (sigsetsize != sizeof(target_sigset_t)) {
9078                 return -TARGET_EINVAL;
9079             }
9080             if (arg2) {
9081                 if (!lock_user_struct(VERIFY_READ, act, arg2, 1)) {
9082                     return -TARGET_EFAULT;
9083                 }
9084 #ifdef TARGET_ARCH_HAS_KA_RESTORER
9085                 act->ka_restorer = restorer;
9086 #endif
9087             } else {
9088                 act = NULL;
9089             }
9090             if (arg3) {
9091                 if (!lock_user_struct(VERIFY_WRITE, oact, arg3, 0)) {
9092                     ret = -TARGET_EFAULT;
9093                     goto rt_sigaction_fail;
9094                 }
9095             } else
9096                 oact = NULL;
9097             ret = get_errno(do_sigaction(arg1, act, oact));
9098 	rt_sigaction_fail:
9099             if (act)
9100                 unlock_user_struct(act, arg2, 0);
9101             if (oact)
9102                 unlock_user_struct(oact, arg3, 1);
9103 #endif
9104         }
9105         return ret;
9106 #ifdef TARGET_NR_sgetmask /* not on alpha */
9107     case TARGET_NR_sgetmask:
9108         {
9109             sigset_t cur_set;
9110             abi_ulong target_set;
9111             ret = do_sigprocmask(0, NULL, &cur_set);
9112             if (!ret) {
9113                 host_to_target_old_sigset(&target_set, &cur_set);
9114                 ret = target_set;
9115             }
9116         }
9117         return ret;
9118 #endif
9119 #ifdef TARGET_NR_ssetmask /* not on alpha */
9120     case TARGET_NR_ssetmask:
9121         {
9122             sigset_t set, oset;
9123             abi_ulong target_set = arg1;
9124             target_to_host_old_sigset(&set, &target_set);
9125             ret = do_sigprocmask(SIG_SETMASK, &set, &oset);
9126             if (!ret) {
9127                 host_to_target_old_sigset(&target_set, &oset);
9128                 ret = target_set;
9129             }
9130         }
9131         return ret;
9132 #endif
9133 #ifdef TARGET_NR_sigprocmask
9134     case TARGET_NR_sigprocmask:
9135         {
9136 #if defined(TARGET_ALPHA)
9137             sigset_t set, oldset;
9138             abi_ulong mask;
9139             int how;
9140 
9141             switch (arg1) {
9142             case TARGET_SIG_BLOCK:
9143                 how = SIG_BLOCK;
9144                 break;
9145             case TARGET_SIG_UNBLOCK:
9146                 how = SIG_UNBLOCK;
9147                 break;
9148             case TARGET_SIG_SETMASK:
9149                 how = SIG_SETMASK;
9150                 break;
9151             default:
9152                 return -TARGET_EINVAL;
9153             }
9154             mask = arg2;
9155             target_to_host_old_sigset(&set, &mask);
9156 
9157             ret = do_sigprocmask(how, &set, &oldset);
9158             if (!is_error(ret)) {
9159                 host_to_target_old_sigset(&mask, &oldset);
9160                 ret = mask;
9161                 ((CPUAlphaState *)cpu_env)->ir[IR_V0] = 0; /* force no error */
9162             }
9163 #else
9164             sigset_t set, oldset, *set_ptr;
9165             int how;
9166 
9167             if (arg2) {
9168                 switch (arg1) {
9169                 case TARGET_SIG_BLOCK:
9170                     how = SIG_BLOCK;
9171                     break;
9172                 case TARGET_SIG_UNBLOCK:
9173                     how = SIG_UNBLOCK;
9174                     break;
9175                 case TARGET_SIG_SETMASK:
9176                     how = SIG_SETMASK;
9177                     break;
9178                 default:
9179                     return -TARGET_EINVAL;
9180                 }
9181                 if (!(p = lock_user(VERIFY_READ, arg2, sizeof(target_sigset_t), 1)))
9182                     return -TARGET_EFAULT;
9183                 target_to_host_old_sigset(&set, p);
9184                 unlock_user(p, arg2, 0);
9185                 set_ptr = &set;
9186             } else {
9187                 how = 0;
9188                 set_ptr = NULL;
9189             }
9190             ret = do_sigprocmask(how, set_ptr, &oldset);
9191             if (!is_error(ret) && arg3) {
9192                 if (!(p = lock_user(VERIFY_WRITE, arg3, sizeof(target_sigset_t), 0)))
9193                     return -TARGET_EFAULT;
9194                 host_to_target_old_sigset(p, &oldset);
9195                 unlock_user(p, arg3, sizeof(target_sigset_t));
9196             }
9197 #endif
9198         }
9199         return ret;
9200 #endif
9201     case TARGET_NR_rt_sigprocmask:
9202         {
9203             int how = arg1;
9204             sigset_t set, oldset, *set_ptr;
9205 
9206             if (arg4 != sizeof(target_sigset_t)) {
9207                 return -TARGET_EINVAL;
9208             }
9209 
9210             if (arg2) {
9211                 switch(how) {
9212                 case TARGET_SIG_BLOCK:
9213                     how = SIG_BLOCK;
9214                     break;
9215                 case TARGET_SIG_UNBLOCK:
9216                     how = SIG_UNBLOCK;
9217                     break;
9218                 case TARGET_SIG_SETMASK:
9219                     how = SIG_SETMASK;
9220                     break;
9221                 default:
9222                     return -TARGET_EINVAL;
9223                 }
9224                 if (!(p = lock_user(VERIFY_READ, arg2, sizeof(target_sigset_t), 1)))
9225                     return -TARGET_EFAULT;
9226                 target_to_host_sigset(&set, p);
9227                 unlock_user(p, arg2, 0);
9228                 set_ptr = &set;
9229             } else {
9230                 how = 0;
9231                 set_ptr = NULL;
9232             }
9233             ret = do_sigprocmask(how, set_ptr, &oldset);
9234             if (!is_error(ret) && arg3) {
9235                 if (!(p = lock_user(VERIFY_WRITE, arg3, sizeof(target_sigset_t), 0)))
9236                     return -TARGET_EFAULT;
9237                 host_to_target_sigset(p, &oldset);
9238                 unlock_user(p, arg3, sizeof(target_sigset_t));
9239             }
9240         }
9241         return ret;
9242 #ifdef TARGET_NR_sigpending
9243     case TARGET_NR_sigpending:
9244         {
9245             sigset_t set;
9246             ret = get_errno(sigpending(&set));
9247             if (!is_error(ret)) {
9248                 if (!(p = lock_user(VERIFY_WRITE, arg1, sizeof(target_sigset_t), 0)))
9249                     return -TARGET_EFAULT;
9250                 host_to_target_old_sigset(p, &set);
9251                 unlock_user(p, arg1, sizeof(target_sigset_t));
9252             }
9253         }
9254         return ret;
9255 #endif
9256     case TARGET_NR_rt_sigpending:
9257         {
9258             sigset_t set;
9259 
9260             /* Yes, this check is >, not != like most. We follow the kernel's
9261              * logic and it does it like this because it implements
9262              * NR_sigpending through the same code path, and in that case
9263              * the old_sigset_t is smaller in size.
9264              */
9265             if (arg2 > sizeof(target_sigset_t)) {
9266                 return -TARGET_EINVAL;
9267             }
9268 
9269             ret = get_errno(sigpending(&set));
9270             if (!is_error(ret)) {
9271                 if (!(p = lock_user(VERIFY_WRITE, arg1, sizeof(target_sigset_t), 0)))
9272                     return -TARGET_EFAULT;
9273                 host_to_target_sigset(p, &set);
9274                 unlock_user(p, arg1, sizeof(target_sigset_t));
9275             }
9276         }
9277         return ret;
9278 #ifdef TARGET_NR_sigsuspend
9279     case TARGET_NR_sigsuspend:
9280         {
9281             TaskState *ts = cpu->opaque;
9282 #if defined(TARGET_ALPHA)
9283             abi_ulong mask = arg1;
9284             target_to_host_old_sigset(&ts->sigsuspend_mask, &mask);
9285 #else
9286             if (!(p = lock_user(VERIFY_READ, arg1, sizeof(target_sigset_t), 1)))
9287                 return -TARGET_EFAULT;
9288             target_to_host_old_sigset(&ts->sigsuspend_mask, p);
9289             unlock_user(p, arg1, 0);
9290 #endif
9291             ret = get_errno(safe_rt_sigsuspend(&ts->sigsuspend_mask,
9292                                                SIGSET_T_SIZE));
9293             if (ret != -TARGET_ERESTARTSYS) {
9294                 ts->in_sigsuspend = 1;
9295             }
9296         }
9297         return ret;
9298 #endif
9299     case TARGET_NR_rt_sigsuspend:
9300         {
9301             TaskState *ts = cpu->opaque;
9302 
9303             if (arg2 != sizeof(target_sigset_t)) {
9304                 return -TARGET_EINVAL;
9305             }
9306             if (!(p = lock_user(VERIFY_READ, arg1, sizeof(target_sigset_t), 1)))
9307                 return -TARGET_EFAULT;
9308             target_to_host_sigset(&ts->sigsuspend_mask, p);
9309             unlock_user(p, arg1, 0);
9310             ret = get_errno(safe_rt_sigsuspend(&ts->sigsuspend_mask,
9311                                                SIGSET_T_SIZE));
9312             if (ret != -TARGET_ERESTARTSYS) {
9313                 ts->in_sigsuspend = 1;
9314             }
9315         }
9316         return ret;
9317 #ifdef TARGET_NR_rt_sigtimedwait
9318     case TARGET_NR_rt_sigtimedwait:
9319         {
9320             sigset_t set;
9321             struct timespec uts, *puts;
9322             siginfo_t uinfo;
9323 
9324             if (arg4 != sizeof(target_sigset_t)) {
9325                 return -TARGET_EINVAL;
9326             }
9327 
9328             if (!(p = lock_user(VERIFY_READ, arg1, sizeof(target_sigset_t), 1)))
9329                 return -TARGET_EFAULT;
9330             target_to_host_sigset(&set, p);
9331             unlock_user(p, arg1, 0);
9332             if (arg3) {
9333                 puts = &uts;
9334                 if (target_to_host_timespec(puts, arg3)) {
9335                     return -TARGET_EFAULT;
9336                 }
9337             } else {
9338                 puts = NULL;
9339             }
9340             ret = get_errno(safe_rt_sigtimedwait(&set, &uinfo, puts,
9341                                                  SIGSET_T_SIZE));
9342             if (!is_error(ret)) {
9343                 if (arg2) {
9344                     p = lock_user(VERIFY_WRITE, arg2, sizeof(target_siginfo_t),
9345                                   0);
9346                     if (!p) {
9347                         return -TARGET_EFAULT;
9348                     }
9349                     host_to_target_siginfo(p, &uinfo);
9350                     unlock_user(p, arg2, sizeof(target_siginfo_t));
9351                 }
9352                 ret = host_to_target_signal(ret);
9353             }
9354         }
9355         return ret;
9356 #endif
9357 #ifdef TARGET_NR_rt_sigtimedwait_time64
9358     case TARGET_NR_rt_sigtimedwait_time64:
9359         {
9360             sigset_t set;
9361             struct timespec uts, *puts;
9362             siginfo_t uinfo;
9363 
9364             if (arg4 != sizeof(target_sigset_t)) {
9365                 return -TARGET_EINVAL;
9366             }
9367 
9368             p = lock_user(VERIFY_READ, arg1, sizeof(target_sigset_t), 1);
9369             if (!p) {
9370                 return -TARGET_EFAULT;
9371             }
9372             target_to_host_sigset(&set, p);
9373             unlock_user(p, arg1, 0);
9374             if (arg3) {
9375                 puts = &uts;
9376                 if (target_to_host_timespec64(puts, arg3)) {
9377                     return -TARGET_EFAULT;
9378                 }
9379             } else {
9380                 puts = NULL;
9381             }
9382             ret = get_errno(safe_rt_sigtimedwait(&set, &uinfo, puts,
9383                                                  SIGSET_T_SIZE));
9384             if (!is_error(ret)) {
9385                 if (arg2) {
9386                     p = lock_user(VERIFY_WRITE, arg2,
9387                                   sizeof(target_siginfo_t), 0);
9388                     if (!p) {
9389                         return -TARGET_EFAULT;
9390                     }
9391                     host_to_target_siginfo(p, &uinfo);
9392                     unlock_user(p, arg2, sizeof(target_siginfo_t));
9393                 }
9394                 ret = host_to_target_signal(ret);
9395             }
9396         }
9397         return ret;
9398 #endif
9399     case TARGET_NR_rt_sigqueueinfo:
9400         {
9401             siginfo_t uinfo;
9402 
9403             p = lock_user(VERIFY_READ, arg3, sizeof(target_siginfo_t), 1);
9404             if (!p) {
9405                 return -TARGET_EFAULT;
9406             }
9407             target_to_host_siginfo(&uinfo, p);
9408             unlock_user(p, arg3, 0);
9409             ret = get_errno(sys_rt_sigqueueinfo(arg1, arg2, &uinfo));
9410         }
9411         return ret;
9412     case TARGET_NR_rt_tgsigqueueinfo:
9413         {
9414             siginfo_t uinfo;
9415 
9416             p = lock_user(VERIFY_READ, arg4, sizeof(target_siginfo_t), 1);
9417             if (!p) {
9418                 return -TARGET_EFAULT;
9419             }
9420             target_to_host_siginfo(&uinfo, p);
9421             unlock_user(p, arg4, 0);
9422             ret = get_errno(sys_rt_tgsigqueueinfo(arg1, arg2, arg3, &uinfo));
9423         }
9424         return ret;
9425 #ifdef TARGET_NR_sigreturn
9426     case TARGET_NR_sigreturn:
9427         if (block_signals()) {
9428             return -TARGET_ERESTARTSYS;
9429         }
9430         return do_sigreturn(cpu_env);
9431 #endif
9432     case TARGET_NR_rt_sigreturn:
9433         if (block_signals()) {
9434             return -TARGET_ERESTARTSYS;
9435         }
9436         return do_rt_sigreturn(cpu_env);
9437     case TARGET_NR_sethostname:
9438         if (!(p = lock_user_string(arg1)))
9439             return -TARGET_EFAULT;
9440         ret = get_errno(sethostname(p, arg2));
9441         unlock_user(p, arg1, 0);
9442         return ret;
9443 #ifdef TARGET_NR_setrlimit
9444     case TARGET_NR_setrlimit:
9445         {
9446             int resource = target_to_host_resource(arg1);
9447             struct target_rlimit *target_rlim;
9448             struct rlimit rlim;
9449             if (!lock_user_struct(VERIFY_READ, target_rlim, arg2, 1))
9450                 return -TARGET_EFAULT;
9451             rlim.rlim_cur = target_to_host_rlim(target_rlim->rlim_cur);
9452             rlim.rlim_max = target_to_host_rlim(target_rlim->rlim_max);
9453             unlock_user_struct(target_rlim, arg2, 0);
9454             /*
9455              * If we just passed through resource limit settings for memory then
9456              * they would also apply to QEMU's own allocations, and QEMU will
9457              * crash or hang or die if its allocations fail. Ideally we would
9458              * track the guest allocations in QEMU and apply the limits ourselves.
9459              * For now, just tell the guest the call succeeded but don't actually
9460              * limit anything.
9461              */
9462             if (resource != RLIMIT_AS &&
9463                 resource != RLIMIT_DATA &&
9464                 resource != RLIMIT_STACK) {
9465                 return get_errno(setrlimit(resource, &rlim));
9466             } else {
9467                 return 0;
9468             }
9469         }
9470 #endif
9471 #ifdef TARGET_NR_getrlimit
9472     case TARGET_NR_getrlimit:
9473         {
9474             int resource = target_to_host_resource(arg1);
9475             struct target_rlimit *target_rlim;
9476             struct rlimit rlim;
9477 
9478             ret = get_errno(getrlimit(resource, &rlim));
9479             if (!is_error(ret)) {
9480                 if (!lock_user_struct(VERIFY_WRITE, target_rlim, arg2, 0))
9481                     return -TARGET_EFAULT;
9482                 target_rlim->rlim_cur = host_to_target_rlim(rlim.rlim_cur);
9483                 target_rlim->rlim_max = host_to_target_rlim(rlim.rlim_max);
9484                 unlock_user_struct(target_rlim, arg2, 1);
9485             }
9486         }
9487         return ret;
9488 #endif
9489     case TARGET_NR_getrusage:
9490         {
9491             struct rusage rusage;
9492             ret = get_errno(getrusage(arg1, &rusage));
9493             if (!is_error(ret)) {
9494                 ret = host_to_target_rusage(arg2, &rusage);
9495             }
9496         }
9497         return ret;
9498 #if defined(TARGET_NR_gettimeofday)
9499     case TARGET_NR_gettimeofday:
9500         {
9501             struct timeval tv;
9502             struct timezone tz;
9503 
9504             ret = get_errno(gettimeofday(&tv, &tz));
9505             if (!is_error(ret)) {
9506                 if (arg1 && copy_to_user_timeval(arg1, &tv)) {
9507                     return -TARGET_EFAULT;
9508                 }
9509                 if (arg2 && copy_to_user_timezone(arg2, &tz)) {
9510                     return -TARGET_EFAULT;
9511                 }
9512             }
9513         }
9514         return ret;
9515 #endif
9516 #if defined(TARGET_NR_settimeofday)
9517     case TARGET_NR_settimeofday:
9518         {
9519             struct timeval tv, *ptv = NULL;
9520             struct timezone tz, *ptz = NULL;
9521 
9522             if (arg1) {
9523                 if (copy_from_user_timeval(&tv, arg1)) {
9524                     return -TARGET_EFAULT;
9525                 }
9526                 ptv = &tv;
9527             }
9528 
9529             if (arg2) {
9530                 if (copy_from_user_timezone(&tz, arg2)) {
9531                     return -TARGET_EFAULT;
9532                 }
9533                 ptz = &tz;
9534             }
9535 
9536             return get_errno(settimeofday(ptv, ptz));
9537         }
9538 #endif
9539 #if defined(TARGET_NR_select)
9540     case TARGET_NR_select:
9541 #if defined(TARGET_WANT_NI_OLD_SELECT)
9542         /* some architectures used to have old_select here
9543          * but now ENOSYS it.
9544          */
9545         ret = -TARGET_ENOSYS;
9546 #elif defined(TARGET_WANT_OLD_SYS_SELECT)
9547         ret = do_old_select(arg1);
9548 #else
9549         ret = do_select(arg1, arg2, arg3, arg4, arg5);
9550 #endif
9551         return ret;
9552 #endif
9553 #ifdef TARGET_NR_pselect6
9554     case TARGET_NR_pselect6:
9555         return do_pselect6(arg1, arg2, arg3, arg4, arg5, arg6, false);
9556 #endif
9557 #ifdef TARGET_NR_pselect6_time64
9558     case TARGET_NR_pselect6_time64:
9559         return do_pselect6(arg1, arg2, arg3, arg4, arg5, arg6, true);
9560 #endif
9561 #ifdef TARGET_NR_symlink
9562     case TARGET_NR_symlink:
9563         {
9564             void *p2;
9565             p = lock_user_string(arg1);
9566             p2 = lock_user_string(arg2);
9567             if (!p || !p2)
9568                 ret = -TARGET_EFAULT;
9569             else
9570                 ret = get_errno(symlink(p, p2));
9571             unlock_user(p2, arg2, 0);
9572             unlock_user(p, arg1, 0);
9573         }
9574         return ret;
9575 #endif
9576 #if defined(TARGET_NR_symlinkat)
9577     case TARGET_NR_symlinkat:
9578         {
9579             void *p2;
9580             p  = lock_user_string(arg1);
9581             p2 = lock_user_string(arg3);
9582             if (!p || !p2)
9583                 ret = -TARGET_EFAULT;
9584             else
9585                 ret = get_errno(symlinkat(p, arg2, p2));
9586             unlock_user(p2, arg3, 0);
9587             unlock_user(p, arg1, 0);
9588         }
9589         return ret;
9590 #endif
9591 #ifdef TARGET_NR_readlink
9592     case TARGET_NR_readlink:
9593         {
9594             void *p2;
9595             p = lock_user_string(arg1);
9596             p2 = lock_user(VERIFY_WRITE, arg2, arg3, 0);
9597             if (!p || !p2) {
9598                 ret = -TARGET_EFAULT;
9599             } else if (!arg3) {
9600                 /* Short circuit this for the magic exe check. */
9601                 ret = -TARGET_EINVAL;
9602             } else if (is_proc_myself((const char *)p, "exe")) {
9603                 char real[PATH_MAX], *temp;
9604                 temp = realpath(exec_path, real);
9605                 /* Return value is # of bytes that we wrote to the buffer. */
9606                 if (temp == NULL) {
9607                     ret = get_errno(-1);
9608                 } else {
9609                     /* Don't worry about sign mismatch as earlier mapping
9610                      * logic would have thrown a bad address error. */
9611                     ret = MIN(strlen(real), arg3);
9612                     /* We cannot NUL terminate the string. */
9613                     memcpy(p2, real, ret);
9614                 }
9615             } else {
9616                 ret = get_errno(readlink(path(p), p2, arg3));
9617             }
9618             unlock_user(p2, arg2, ret);
9619             unlock_user(p, arg1, 0);
9620         }
9621         return ret;
9622 #endif
9623 #if defined(TARGET_NR_readlinkat)
9624     case TARGET_NR_readlinkat:
9625         {
9626             void *p2;
9627             p  = lock_user_string(arg2);
9628             p2 = lock_user(VERIFY_WRITE, arg3, arg4, 0);
9629             if (!p || !p2) {
9630                 ret = -TARGET_EFAULT;
9631             } else if (is_proc_myself((const char *)p, "exe")) {
9632                 char real[PATH_MAX], *temp;
9633                 temp = realpath(exec_path, real);
9634                 ret = temp == NULL ? get_errno(-1) : strlen(real) ;
9635                 snprintf((char *)p2, arg4, "%s", real);
9636             } else {
9637                 ret = get_errno(readlinkat(arg1, path(p), p2, arg4));
9638             }
9639             unlock_user(p2, arg3, ret);
9640             unlock_user(p, arg2, 0);
9641         }
9642         return ret;
9643 #endif
9644 #ifdef TARGET_NR_swapon
9645     case TARGET_NR_swapon:
9646         if (!(p = lock_user_string(arg1)))
9647             return -TARGET_EFAULT;
9648         ret = get_errno(swapon(p, arg2));
9649         unlock_user(p, arg1, 0);
9650         return ret;
9651 #endif
9652     case TARGET_NR_reboot:
9653         if (arg3 == LINUX_REBOOT_CMD_RESTART2) {
9654            /* arg4 must be ignored in all other cases */
9655            p = lock_user_string(arg4);
9656            if (!p) {
9657                return -TARGET_EFAULT;
9658            }
9659            ret = get_errno(reboot(arg1, arg2, arg3, p));
9660            unlock_user(p, arg4, 0);
9661         } else {
9662            ret = get_errno(reboot(arg1, arg2, arg3, NULL));
9663         }
9664         return ret;
9665 #ifdef TARGET_NR_mmap
9666     case TARGET_NR_mmap:
9667 #if (defined(TARGET_I386) && defined(TARGET_ABI32)) || \
9668     (defined(TARGET_ARM) && defined(TARGET_ABI32)) || \
9669     defined(TARGET_M68K) || defined(TARGET_CRIS) || defined(TARGET_MICROBLAZE) \
9670     || defined(TARGET_S390X)
9671         {
9672             abi_ulong *v;
9673             abi_ulong v1, v2, v3, v4, v5, v6;
9674             if (!(v = lock_user(VERIFY_READ, arg1, 6 * sizeof(abi_ulong), 1)))
9675                 return -TARGET_EFAULT;
9676             v1 = tswapal(v[0]);
9677             v2 = tswapal(v[1]);
9678             v3 = tswapal(v[2]);
9679             v4 = tswapal(v[3]);
9680             v5 = tswapal(v[4]);
9681             v6 = tswapal(v[5]);
9682             unlock_user(v, arg1, 0);
9683             ret = get_errno(target_mmap(v1, v2, v3,
9684                                         target_to_host_bitmask(v4, mmap_flags_tbl),
9685                                         v5, v6));
9686         }
9687 #else
9688         ret = get_errno(target_mmap(arg1, arg2, arg3,
9689                                     target_to_host_bitmask(arg4, mmap_flags_tbl),
9690                                     arg5,
9691                                     arg6));
9692 #endif
9693         return ret;
9694 #endif
9695 #ifdef TARGET_NR_mmap2
9696     case TARGET_NR_mmap2:
9697 #ifndef MMAP_SHIFT
9698 #define MMAP_SHIFT 12
9699 #endif
9700         ret = target_mmap(arg1, arg2, arg3,
9701                           target_to_host_bitmask(arg4, mmap_flags_tbl),
9702                           arg5, arg6 << MMAP_SHIFT);
9703         return get_errno(ret);
9704 #endif
9705     case TARGET_NR_munmap:
9706         return get_errno(target_munmap(arg1, arg2));
9707     case TARGET_NR_mprotect:
9708         {
9709             TaskState *ts = cpu->opaque;
9710             /* Special hack to detect libc making the stack executable.  */
9711             if ((arg3 & PROT_GROWSDOWN)
9712                 && arg1 >= ts->info->stack_limit
9713                 && arg1 <= ts->info->start_stack) {
9714                 arg3 &= ~PROT_GROWSDOWN;
9715                 arg2 = arg2 + arg1 - ts->info->stack_limit;
9716                 arg1 = ts->info->stack_limit;
9717             }
9718         }
9719         return get_errno(target_mprotect(arg1, arg2, arg3));
9720 #ifdef TARGET_NR_mremap
9721     case TARGET_NR_mremap:
9722         return get_errno(target_mremap(arg1, arg2, arg3, arg4, arg5));
9723 #endif
9724         /* ??? msync/mlock/munlock are broken for softmmu.  */
9725 #ifdef TARGET_NR_msync
9726     case TARGET_NR_msync:
9727         return get_errno(msync(g2h(arg1), arg2, arg3));
9728 #endif
9729 #ifdef TARGET_NR_mlock
9730     case TARGET_NR_mlock:
9731         return get_errno(mlock(g2h(arg1), arg2));
9732 #endif
9733 #ifdef TARGET_NR_munlock
9734     case TARGET_NR_munlock:
9735         return get_errno(munlock(g2h(arg1), arg2));
9736 #endif
9737 #ifdef TARGET_NR_mlockall
9738     case TARGET_NR_mlockall:
9739         return get_errno(mlockall(target_to_host_mlockall_arg(arg1)));
9740 #endif
9741 #ifdef TARGET_NR_munlockall
9742     case TARGET_NR_munlockall:
9743         return get_errno(munlockall());
9744 #endif
9745 #ifdef TARGET_NR_truncate
9746     case TARGET_NR_truncate:
9747         if (!(p = lock_user_string(arg1)))
9748             return -TARGET_EFAULT;
9749         ret = get_errno(truncate(p, arg2));
9750         unlock_user(p, arg1, 0);
9751         return ret;
9752 #endif
9753 #ifdef TARGET_NR_ftruncate
9754     case TARGET_NR_ftruncate:
9755         return get_errno(ftruncate(arg1, arg2));
9756 #endif
9757     case TARGET_NR_fchmod:
9758         return get_errno(fchmod(arg1, arg2));
9759 #if defined(TARGET_NR_fchmodat)
9760     case TARGET_NR_fchmodat:
9761         if (!(p = lock_user_string(arg2)))
9762             return -TARGET_EFAULT;
9763         ret = get_errno(fchmodat(arg1, p, arg3, 0));
9764         unlock_user(p, arg2, 0);
9765         return ret;
9766 #endif
9767     case TARGET_NR_getpriority:
9768         /* Note that negative values are valid for getpriority, so we must
9769            differentiate based on errno settings.  */
9770         errno = 0;
9771         ret = getpriority(arg1, arg2);
9772         if (ret == -1 && errno != 0) {
9773             return -host_to_target_errno(errno);
9774         }
9775 #ifdef TARGET_ALPHA
9776         /* Return value is the unbiased priority.  Signal no error.  */
9777         ((CPUAlphaState *)cpu_env)->ir[IR_V0] = 0;
9778 #else
9779         /* Return value is a biased priority to avoid negative numbers.  */
9780         ret = 20 - ret;
9781 #endif
9782         return ret;
9783     case TARGET_NR_setpriority:
9784         return get_errno(setpriority(arg1, arg2, arg3));
9785 #ifdef TARGET_NR_statfs
9786     case TARGET_NR_statfs:
9787         if (!(p = lock_user_string(arg1))) {
9788             return -TARGET_EFAULT;
9789         }
9790         ret = get_errno(statfs(path(p), &stfs));
9791         unlock_user(p, arg1, 0);
9792     convert_statfs:
9793         if (!is_error(ret)) {
9794             struct target_statfs *target_stfs;
9795 
9796             if (!lock_user_struct(VERIFY_WRITE, target_stfs, arg2, 0))
9797                 return -TARGET_EFAULT;
9798             __put_user(stfs.f_type, &target_stfs->f_type);
9799             __put_user(stfs.f_bsize, &target_stfs->f_bsize);
9800             __put_user(stfs.f_blocks, &target_stfs->f_blocks);
9801             __put_user(stfs.f_bfree, &target_stfs->f_bfree);
9802             __put_user(stfs.f_bavail, &target_stfs->f_bavail);
9803             __put_user(stfs.f_files, &target_stfs->f_files);
9804             __put_user(stfs.f_ffree, &target_stfs->f_ffree);
9805             __put_user(stfs.f_fsid.__val[0], &target_stfs->f_fsid.val[0]);
9806             __put_user(stfs.f_fsid.__val[1], &target_stfs->f_fsid.val[1]);
9807             __put_user(stfs.f_namelen, &target_stfs->f_namelen);
9808             __put_user(stfs.f_frsize, &target_stfs->f_frsize);
9809 #ifdef _STATFS_F_FLAGS
9810             __put_user(stfs.f_flags, &target_stfs->f_flags);
9811 #else
9812             __put_user(0, &target_stfs->f_flags);
9813 #endif
9814             memset(target_stfs->f_spare, 0, sizeof(target_stfs->f_spare));
9815             unlock_user_struct(target_stfs, arg2, 1);
9816         }
9817         return ret;
9818 #endif
9819 #ifdef TARGET_NR_fstatfs
9820     case TARGET_NR_fstatfs:
9821         ret = get_errno(fstatfs(arg1, &stfs));
9822         goto convert_statfs;
9823 #endif
9824 #ifdef TARGET_NR_statfs64
9825     case TARGET_NR_statfs64:
9826         if (!(p = lock_user_string(arg1))) {
9827             return -TARGET_EFAULT;
9828         }
9829         ret = get_errno(statfs(path(p), &stfs));
9830         unlock_user(p, arg1, 0);
9831     convert_statfs64:
9832         if (!is_error(ret)) {
9833             struct target_statfs64 *target_stfs;
9834 
9835             if (!lock_user_struct(VERIFY_WRITE, target_stfs, arg3, 0))
9836                 return -TARGET_EFAULT;
9837             __put_user(stfs.f_type, &target_stfs->f_type);
9838             __put_user(stfs.f_bsize, &target_stfs->f_bsize);
9839             __put_user(stfs.f_blocks, &target_stfs->f_blocks);
9840             __put_user(stfs.f_bfree, &target_stfs->f_bfree);
9841             __put_user(stfs.f_bavail, &target_stfs->f_bavail);
9842             __put_user(stfs.f_files, &target_stfs->f_files);
9843             __put_user(stfs.f_ffree, &target_stfs->f_ffree);
9844             __put_user(stfs.f_fsid.__val[0], &target_stfs->f_fsid.val[0]);
9845             __put_user(stfs.f_fsid.__val[1], &target_stfs->f_fsid.val[1]);
9846             __put_user(stfs.f_namelen, &target_stfs->f_namelen);
9847             __put_user(stfs.f_frsize, &target_stfs->f_frsize);
9848 #ifdef _STATFS_F_FLAGS
9849             __put_user(stfs.f_flags, &target_stfs->f_flags);
9850 #else
9851             __put_user(0, &target_stfs->f_flags);
9852 #endif
9853             memset(target_stfs->f_spare, 0, sizeof(target_stfs->f_spare));
9854             unlock_user_struct(target_stfs, arg3, 1);
9855         }
9856         return ret;
9857     case TARGET_NR_fstatfs64:
9858         ret = get_errno(fstatfs(arg1, &stfs));
9859         goto convert_statfs64;
9860 #endif
9861 #ifdef TARGET_NR_socketcall
9862     case TARGET_NR_socketcall:
9863         return do_socketcall(arg1, arg2);
9864 #endif
9865 #ifdef TARGET_NR_accept
9866     case TARGET_NR_accept:
9867         return do_accept4(arg1, arg2, arg3, 0);
9868 #endif
9869 #ifdef TARGET_NR_accept4
9870     case TARGET_NR_accept4:
9871         return do_accept4(arg1, arg2, arg3, arg4);
9872 #endif
9873 #ifdef TARGET_NR_bind
9874     case TARGET_NR_bind:
9875         return do_bind(arg1, arg2, arg3);
9876 #endif
9877 #ifdef TARGET_NR_connect
9878     case TARGET_NR_connect:
9879         return do_connect(arg1, arg2, arg3);
9880 #endif
9881 #ifdef TARGET_NR_getpeername
9882     case TARGET_NR_getpeername:
9883         return do_getpeername(arg1, arg2, arg3);
9884 #endif
9885 #ifdef TARGET_NR_getsockname
9886     case TARGET_NR_getsockname:
9887         return do_getsockname(arg1, arg2, arg3);
9888 #endif
9889 #ifdef TARGET_NR_getsockopt
9890     case TARGET_NR_getsockopt:
9891         return do_getsockopt(arg1, arg2, arg3, arg4, arg5);
9892 #endif
9893 #ifdef TARGET_NR_listen
9894     case TARGET_NR_listen:
9895         return get_errno(listen(arg1, arg2));
9896 #endif
9897 #ifdef TARGET_NR_recv
9898     case TARGET_NR_recv:
9899         return do_recvfrom(arg1, arg2, arg3, arg4, 0, 0);
9900 #endif
9901 #ifdef TARGET_NR_recvfrom
9902     case TARGET_NR_recvfrom:
9903         return do_recvfrom(arg1, arg2, arg3, arg4, arg5, arg6);
9904 #endif
9905 #ifdef TARGET_NR_recvmsg
9906     case TARGET_NR_recvmsg:
9907         return do_sendrecvmsg(arg1, arg2, arg3, 0);
9908 #endif
9909 #ifdef TARGET_NR_send
9910     case TARGET_NR_send:
9911         return do_sendto(arg1, arg2, arg3, arg4, 0, 0);
9912 #endif
9913 #ifdef TARGET_NR_sendmsg
9914     case TARGET_NR_sendmsg:
9915         return do_sendrecvmsg(arg1, arg2, arg3, 1);
9916 #endif
9917 #ifdef TARGET_NR_sendmmsg
9918     case TARGET_NR_sendmmsg:
9919         return do_sendrecvmmsg(arg1, arg2, arg3, arg4, 1);
9920 #endif
9921 #ifdef TARGET_NR_recvmmsg
9922     case TARGET_NR_recvmmsg:
9923         return do_sendrecvmmsg(arg1, arg2, arg3, arg4, 0);
9924 #endif
9925 #ifdef TARGET_NR_sendto
9926     case TARGET_NR_sendto:
9927         return do_sendto(arg1, arg2, arg3, arg4, arg5, arg6);
9928 #endif
9929 #ifdef TARGET_NR_shutdown
9930     case TARGET_NR_shutdown:
9931         return get_errno(shutdown(arg1, arg2));
9932 #endif
9933 #if defined(TARGET_NR_getrandom) && defined(__NR_getrandom)
9934     case TARGET_NR_getrandom:
9935         p = lock_user(VERIFY_WRITE, arg1, arg2, 0);
9936         if (!p) {
9937             return -TARGET_EFAULT;
9938         }
9939         ret = get_errno(getrandom(p, arg2, arg3));
9940         unlock_user(p, arg1, ret);
9941         return ret;
9942 #endif
9943 #ifdef TARGET_NR_socket
9944     case TARGET_NR_socket:
9945         return do_socket(arg1, arg2, arg3);
9946 #endif
9947 #ifdef TARGET_NR_socketpair
9948     case TARGET_NR_socketpair:
9949         return do_socketpair(arg1, arg2, arg3, arg4);
9950 #endif
9951 #ifdef TARGET_NR_setsockopt
9952     case TARGET_NR_setsockopt:
9953         return do_setsockopt(arg1, arg2, arg3, arg4, (socklen_t) arg5);
9954 #endif
9955 #if defined(TARGET_NR_syslog)
9956     case TARGET_NR_syslog:
9957         {
9958             int len = arg2;
9959 
9960             switch (arg1) {
9961             case TARGET_SYSLOG_ACTION_CLOSE:         /* Close log */
9962             case TARGET_SYSLOG_ACTION_OPEN:          /* Open log */
9963             case TARGET_SYSLOG_ACTION_CLEAR:         /* Clear ring buffer */
9964             case TARGET_SYSLOG_ACTION_CONSOLE_OFF:   /* Disable logging */
9965             case TARGET_SYSLOG_ACTION_CONSOLE_ON:    /* Enable logging */
9966             case TARGET_SYSLOG_ACTION_CONSOLE_LEVEL: /* Set messages level */
9967             case TARGET_SYSLOG_ACTION_SIZE_UNREAD:   /* Number of chars */
9968             case TARGET_SYSLOG_ACTION_SIZE_BUFFER:   /* Size of the buffer */
9969                 return get_errno(sys_syslog((int)arg1, NULL, (int)arg3));
9970             case TARGET_SYSLOG_ACTION_READ:          /* Read from log */
9971             case TARGET_SYSLOG_ACTION_READ_CLEAR:    /* Read/clear msgs */
9972             case TARGET_SYSLOG_ACTION_READ_ALL:      /* Read last messages */
9973                 {
9974                     if (len < 0) {
9975                         return -TARGET_EINVAL;
9976                     }
9977                     if (len == 0) {
9978                         return 0;
9979                     }
9980                     p = lock_user(VERIFY_WRITE, arg2, arg3, 0);
9981                     if (!p) {
9982                         return -TARGET_EFAULT;
9983                     }
9984                     ret = get_errno(sys_syslog((int)arg1, p, (int)arg3));
9985                     unlock_user(p, arg2, arg3);
9986                 }
9987                 return ret;
9988             default:
9989                 return -TARGET_EINVAL;
9990             }
9991         }
9992         break;
9993 #endif
9994     case TARGET_NR_setitimer:
9995         {
9996             struct itimerval value, ovalue, *pvalue;
9997 
9998             if (arg2) {
9999                 pvalue = &value;
10000                 if (copy_from_user_timeval(&pvalue->it_interval, arg2)
10001                     || copy_from_user_timeval(&pvalue->it_value,
10002                                               arg2 + sizeof(struct target_timeval)))
10003                     return -TARGET_EFAULT;
10004             } else {
10005                 pvalue = NULL;
10006             }
10007             ret = get_errno(setitimer(arg1, pvalue, &ovalue));
10008             if (!is_error(ret) && arg3) {
10009                 if (copy_to_user_timeval(arg3,
10010                                          &ovalue.it_interval)
10011                     || copy_to_user_timeval(arg3 + sizeof(struct target_timeval),
10012                                             &ovalue.it_value))
10013                     return -TARGET_EFAULT;
10014             }
10015         }
10016         return ret;
10017     case TARGET_NR_getitimer:
10018         {
10019             struct itimerval value;
10020 
10021             ret = get_errno(getitimer(arg1, &value));
10022             if (!is_error(ret) && arg2) {
10023                 if (copy_to_user_timeval(arg2,
10024                                          &value.it_interval)
10025                     || copy_to_user_timeval(arg2 + sizeof(struct target_timeval),
10026                                             &value.it_value))
10027                     return -TARGET_EFAULT;
10028             }
10029         }
10030         return ret;
10031 #ifdef TARGET_NR_stat
10032     case TARGET_NR_stat:
10033         if (!(p = lock_user_string(arg1))) {
10034             return -TARGET_EFAULT;
10035         }
10036         ret = get_errno(stat(path(p), &st));
10037         unlock_user(p, arg1, 0);
10038         goto do_stat;
10039 #endif
10040 #ifdef TARGET_NR_lstat
10041     case TARGET_NR_lstat:
10042         if (!(p = lock_user_string(arg1))) {
10043             return -TARGET_EFAULT;
10044         }
10045         ret = get_errno(lstat(path(p), &st));
10046         unlock_user(p, arg1, 0);
10047         goto do_stat;
10048 #endif
10049 #ifdef TARGET_NR_fstat
10050     case TARGET_NR_fstat:
10051         {
10052             ret = get_errno(fstat(arg1, &st));
10053 #if defined(TARGET_NR_stat) || defined(TARGET_NR_lstat)
10054         do_stat:
10055 #endif
10056             if (!is_error(ret)) {
10057                 struct target_stat *target_st;
10058 
10059                 if (!lock_user_struct(VERIFY_WRITE, target_st, arg2, 0))
10060                     return -TARGET_EFAULT;
10061                 memset(target_st, 0, sizeof(*target_st));
10062                 __put_user(st.st_dev, &target_st->st_dev);
10063                 __put_user(st.st_ino, &target_st->st_ino);
10064                 __put_user(st.st_mode, &target_st->st_mode);
10065                 __put_user(st.st_uid, &target_st->st_uid);
10066                 __put_user(st.st_gid, &target_st->st_gid);
10067                 __put_user(st.st_nlink, &target_st->st_nlink);
10068                 __put_user(st.st_rdev, &target_st->st_rdev);
10069                 __put_user(st.st_size, &target_st->st_size);
10070                 __put_user(st.st_blksize, &target_st->st_blksize);
10071                 __put_user(st.st_blocks, &target_st->st_blocks);
10072                 __put_user(st.st_atime, &target_st->target_st_atime);
10073                 __put_user(st.st_mtime, &target_st->target_st_mtime);
10074                 __put_user(st.st_ctime, &target_st->target_st_ctime);
10075 #if (_POSIX_C_SOURCE >= 200809L || _XOPEN_SOURCE >= 700) && \
10076     defined(TARGET_STAT_HAVE_NSEC)
10077                 __put_user(st.st_atim.tv_nsec,
10078                            &target_st->target_st_atime_nsec);
10079                 __put_user(st.st_mtim.tv_nsec,
10080                            &target_st->target_st_mtime_nsec);
10081                 __put_user(st.st_ctim.tv_nsec,
10082                            &target_st->target_st_ctime_nsec);
10083 #endif
10084                 unlock_user_struct(target_st, arg2, 1);
10085             }
10086         }
10087         return ret;
10088 #endif
10089     case TARGET_NR_vhangup:
10090         return get_errno(vhangup());
10091 #ifdef TARGET_NR_syscall
10092     case TARGET_NR_syscall:
10093         return do_syscall(cpu_env, arg1 & 0xffff, arg2, arg3, arg4, arg5,
10094                           arg6, arg7, arg8, 0);
10095 #endif
10096 #if defined(TARGET_NR_wait4)
10097     case TARGET_NR_wait4:
10098         {
10099             int status;
10100             abi_long status_ptr = arg2;
10101             struct rusage rusage, *rusage_ptr;
10102             abi_ulong target_rusage = arg4;
10103             abi_long rusage_err;
10104             if (target_rusage)
10105                 rusage_ptr = &rusage;
10106             else
10107                 rusage_ptr = NULL;
10108             ret = get_errno(safe_wait4(arg1, &status, arg3, rusage_ptr));
10109             if (!is_error(ret)) {
10110                 if (status_ptr && ret) {
10111                     status = host_to_target_waitstatus(status);
10112                     if (put_user_s32(status, status_ptr))
10113                         return -TARGET_EFAULT;
10114                 }
10115                 if (target_rusage) {
10116                     rusage_err = host_to_target_rusage(target_rusage, &rusage);
10117                     if (rusage_err) {
10118                         ret = rusage_err;
10119                     }
10120                 }
10121             }
10122         }
10123         return ret;
10124 #endif
10125 #ifdef TARGET_NR_swapoff
10126     case TARGET_NR_swapoff:
10127         if (!(p = lock_user_string(arg1)))
10128             return -TARGET_EFAULT;
10129         ret = get_errno(swapoff(p));
10130         unlock_user(p, arg1, 0);
10131         return ret;
10132 #endif
10133     case TARGET_NR_sysinfo:
10134         {
10135             struct target_sysinfo *target_value;
10136             struct sysinfo value;
10137             ret = get_errno(sysinfo(&value));
10138             if (!is_error(ret) && arg1)
10139             {
10140                 if (!lock_user_struct(VERIFY_WRITE, target_value, arg1, 0))
10141                     return -TARGET_EFAULT;
10142                 __put_user(value.uptime, &target_value->uptime);
10143                 __put_user(value.loads[0], &target_value->loads[0]);
10144                 __put_user(value.loads[1], &target_value->loads[1]);
10145                 __put_user(value.loads[2], &target_value->loads[2]);
10146                 __put_user(value.totalram, &target_value->totalram);
10147                 __put_user(value.freeram, &target_value->freeram);
10148                 __put_user(value.sharedram, &target_value->sharedram);
10149                 __put_user(value.bufferram, &target_value->bufferram);
10150                 __put_user(value.totalswap, &target_value->totalswap);
10151                 __put_user(value.freeswap, &target_value->freeswap);
10152                 __put_user(value.procs, &target_value->procs);
10153                 __put_user(value.totalhigh, &target_value->totalhigh);
10154                 __put_user(value.freehigh, &target_value->freehigh);
10155                 __put_user(value.mem_unit, &target_value->mem_unit);
10156                 unlock_user_struct(target_value, arg1, 1);
10157             }
10158         }
10159         return ret;
10160 #ifdef TARGET_NR_ipc
10161     case TARGET_NR_ipc:
10162         return do_ipc(cpu_env, arg1, arg2, arg3, arg4, arg5, arg6);
10163 #endif
10164 #ifdef TARGET_NR_semget
10165     case TARGET_NR_semget:
10166         return get_errno(semget(arg1, arg2, arg3));
10167 #endif
10168 #ifdef TARGET_NR_semop
10169     case TARGET_NR_semop:
10170         return do_semtimedop(arg1, arg2, arg3, 0, false);
10171 #endif
10172 #ifdef TARGET_NR_semtimedop
10173     case TARGET_NR_semtimedop:
10174         return do_semtimedop(arg1, arg2, arg3, arg4, false);
10175 #endif
10176 #ifdef TARGET_NR_semtimedop_time64
10177     case TARGET_NR_semtimedop_time64:
10178         return do_semtimedop(arg1, arg2, arg3, arg4, true);
10179 #endif
10180 #ifdef TARGET_NR_semctl
10181     case TARGET_NR_semctl:
10182         return do_semctl(arg1, arg2, arg3, arg4);
10183 #endif
10184 #ifdef TARGET_NR_msgctl
10185     case TARGET_NR_msgctl:
10186         return do_msgctl(arg1, arg2, arg3);
10187 #endif
10188 #ifdef TARGET_NR_msgget
10189     case TARGET_NR_msgget:
10190         return get_errno(msgget(arg1, arg2));
10191 #endif
10192 #ifdef TARGET_NR_msgrcv
10193     case TARGET_NR_msgrcv:
10194         return do_msgrcv(arg1, arg2, arg3, arg4, arg5);
10195 #endif
10196 #ifdef TARGET_NR_msgsnd
10197     case TARGET_NR_msgsnd:
10198         return do_msgsnd(arg1, arg2, arg3, arg4);
10199 #endif
10200 #ifdef TARGET_NR_shmget
10201     case TARGET_NR_shmget:
10202         return get_errno(shmget(arg1, arg2, arg3));
10203 #endif
10204 #ifdef TARGET_NR_shmctl
10205     case TARGET_NR_shmctl:
10206         return do_shmctl(arg1, arg2, arg3);
10207 #endif
10208 #ifdef TARGET_NR_shmat
10209     case TARGET_NR_shmat:
10210         return do_shmat(cpu_env, arg1, arg2, arg3);
10211 #endif
10212 #ifdef TARGET_NR_shmdt
10213     case TARGET_NR_shmdt:
10214         return do_shmdt(arg1);
10215 #endif
10216     case TARGET_NR_fsync:
10217         return get_errno(fsync(arg1));
10218     case TARGET_NR_clone:
10219         /* Linux manages to have three different orderings for its
10220          * arguments to clone(); the BACKWARDS and BACKWARDS2 defines
10221          * match the kernel's CONFIG_CLONE_* settings.
10222          * Microblaze is further special in that it uses a sixth
10223          * implicit argument to clone for the TLS pointer.
10224          */
10225 #if defined(TARGET_MICROBLAZE)
10226         ret = get_errno(do_fork(cpu_env, arg1, arg2, arg4, arg6, arg5));
10227 #elif defined(TARGET_CLONE_BACKWARDS)
10228         ret = get_errno(do_fork(cpu_env, arg1, arg2, arg3, arg4, arg5));
10229 #elif defined(TARGET_CLONE_BACKWARDS2)
10230         ret = get_errno(do_fork(cpu_env, arg2, arg1, arg3, arg5, arg4));
10231 #else
10232         ret = get_errno(do_fork(cpu_env, arg1, arg2, arg3, arg5, arg4));
10233 #endif
10234         return ret;
10235 #ifdef __NR_exit_group
10236         /* new thread calls */
10237     case TARGET_NR_exit_group:
10238         preexit_cleanup(cpu_env, arg1);
10239         return get_errno(exit_group(arg1));
10240 #endif
10241     case TARGET_NR_setdomainname:
10242         if (!(p = lock_user_string(arg1)))
10243             return -TARGET_EFAULT;
10244         ret = get_errno(setdomainname(p, arg2));
10245         unlock_user(p, arg1, 0);
10246         return ret;
10247     case TARGET_NR_uname:
10248         /* no need to transcode because we use the linux syscall */
10249         {
10250             struct new_utsname * buf;
10251 
10252             if (!lock_user_struct(VERIFY_WRITE, buf, arg1, 0))
10253                 return -TARGET_EFAULT;
10254             ret = get_errno(sys_uname(buf));
10255             if (!is_error(ret)) {
10256                 /* Overwrite the native machine name with whatever is being
10257                    emulated. */
10258                 g_strlcpy(buf->machine, cpu_to_uname_machine(cpu_env),
10259                           sizeof(buf->machine));
10260                 /* Allow the user to override the reported release.  */
10261                 if (qemu_uname_release && *qemu_uname_release) {
10262                     g_strlcpy(buf->release, qemu_uname_release,
10263                               sizeof(buf->release));
10264                 }
10265             }
10266             unlock_user_struct(buf, arg1, 1);
10267         }
10268         return ret;
10269 #ifdef TARGET_I386
10270     case TARGET_NR_modify_ldt:
10271         return do_modify_ldt(cpu_env, arg1, arg2, arg3);
10272 #if !defined(TARGET_X86_64)
10273     case TARGET_NR_vm86:
10274         return do_vm86(cpu_env, arg1, arg2);
10275 #endif
10276 #endif
10277 #if defined(TARGET_NR_adjtimex)
10278     case TARGET_NR_adjtimex:
10279         {
10280             struct timex host_buf;
10281 
10282             if (target_to_host_timex(&host_buf, arg1) != 0) {
10283                 return -TARGET_EFAULT;
10284             }
10285             ret = get_errno(adjtimex(&host_buf));
10286             if (!is_error(ret)) {
10287                 if (host_to_target_timex(arg1, &host_buf) != 0) {
10288                     return -TARGET_EFAULT;
10289                 }
10290             }
10291         }
10292         return ret;
10293 #endif
10294 #if defined(TARGET_NR_clock_adjtime) && defined(CONFIG_CLOCK_ADJTIME)
10295     case TARGET_NR_clock_adjtime:
10296         {
10297             struct timex htx, *phtx = &htx;
10298 
10299             if (target_to_host_timex(phtx, arg2) != 0) {
10300                 return -TARGET_EFAULT;
10301             }
10302             ret = get_errno(clock_adjtime(arg1, phtx));
10303             if (!is_error(ret) && phtx) {
10304                 if (host_to_target_timex(arg2, phtx) != 0) {
10305                     return -TARGET_EFAULT;
10306                 }
10307             }
10308         }
10309         return ret;
10310 #endif
10311 #if defined(TARGET_NR_clock_adjtime64) && defined(CONFIG_CLOCK_ADJTIME)
10312     case TARGET_NR_clock_adjtime64:
10313         {
10314             struct timex htx;
10315 
10316             if (target_to_host_timex64(&htx, arg2) != 0) {
10317                 return -TARGET_EFAULT;
10318             }
10319             ret = get_errno(clock_adjtime(arg1, &htx));
10320             if (!is_error(ret) && host_to_target_timex64(arg2, &htx)) {
10321                     return -TARGET_EFAULT;
10322             }
10323         }
10324         return ret;
10325 #endif
10326     case TARGET_NR_getpgid:
10327         return get_errno(getpgid(arg1));
10328     case TARGET_NR_fchdir:
10329         return get_errno(fchdir(arg1));
10330     case TARGET_NR_personality:
10331         return get_errno(personality(arg1));
10332 #ifdef TARGET_NR__llseek /* Not on alpha */
10333     case TARGET_NR__llseek:
10334         {
10335             int64_t res;
10336 #if !defined(__NR_llseek)
10337             res = lseek(arg1, ((uint64_t)arg2 << 32) | (abi_ulong)arg3, arg5);
10338             if (res == -1) {
10339                 ret = get_errno(res);
10340             } else {
10341                 ret = 0;
10342             }
10343 #else
10344             ret = get_errno(_llseek(arg1, arg2, arg3, &res, arg5));
10345 #endif
10346             if ((ret == 0) && put_user_s64(res, arg4)) {
10347                 return -TARGET_EFAULT;
10348             }
10349         }
10350         return ret;
10351 #endif
10352 #ifdef TARGET_NR_getdents
10353     case TARGET_NR_getdents:
10354 #ifdef EMULATE_GETDENTS_WITH_GETDENTS
10355 #if TARGET_ABI_BITS == 32 && HOST_LONG_BITS == 64
10356         {
10357             struct target_dirent *target_dirp;
10358             struct linux_dirent *dirp;
10359             abi_long count = arg3;
10360 
10361             dirp = g_try_malloc(count);
10362             if (!dirp) {
10363                 return -TARGET_ENOMEM;
10364             }
10365 
10366             ret = get_errno(sys_getdents(arg1, dirp, count));
10367             if (!is_error(ret)) {
10368                 struct linux_dirent *de;
10369 		struct target_dirent *tde;
10370                 int len = ret;
10371                 int reclen, treclen;
10372 		int count1, tnamelen;
10373 
10374 		count1 = 0;
10375                 de = dirp;
10376                 if (!(target_dirp = lock_user(VERIFY_WRITE, arg2, count, 0)))
10377                     return -TARGET_EFAULT;
10378 		tde = target_dirp;
10379                 while (len > 0) {
10380                     reclen = de->d_reclen;
10381                     tnamelen = reclen - offsetof(struct linux_dirent, d_name);
10382                     assert(tnamelen >= 0);
10383                     treclen = tnamelen + offsetof(struct target_dirent, d_name);
10384                     assert(count1 + treclen <= count);
10385                     tde->d_reclen = tswap16(treclen);
10386                     tde->d_ino = tswapal(de->d_ino);
10387                     tde->d_off = tswapal(de->d_off);
10388                     memcpy(tde->d_name, de->d_name, tnamelen);
10389                     de = (struct linux_dirent *)((char *)de + reclen);
10390                     len -= reclen;
10391                     tde = (struct target_dirent *)((char *)tde + treclen);
10392 		    count1 += treclen;
10393                 }
10394 		ret = count1;
10395                 unlock_user(target_dirp, arg2, ret);
10396             }
10397             g_free(dirp);
10398         }
10399 #else
10400         {
10401             struct linux_dirent *dirp;
10402             abi_long count = arg3;
10403 
10404             if (!(dirp = lock_user(VERIFY_WRITE, arg2, count, 0)))
10405                 return -TARGET_EFAULT;
10406             ret = get_errno(sys_getdents(arg1, dirp, count));
10407             if (!is_error(ret)) {
10408                 struct linux_dirent *de;
10409                 int len = ret;
10410                 int reclen;
10411                 de = dirp;
10412                 while (len > 0) {
10413                     reclen = de->d_reclen;
10414                     if (reclen > len)
10415                         break;
10416                     de->d_reclen = tswap16(reclen);
10417                     tswapls(&de->d_ino);
10418                     tswapls(&de->d_off);
10419                     de = (struct linux_dirent *)((char *)de + reclen);
10420                     len -= reclen;
10421                 }
10422             }
10423             unlock_user(dirp, arg2, ret);
10424         }
10425 #endif
10426 #else
10427         /* Implement getdents in terms of getdents64 */
10428         {
10429             struct linux_dirent64 *dirp;
10430             abi_long count = arg3;
10431 
10432             dirp = lock_user(VERIFY_WRITE, arg2, count, 0);
10433             if (!dirp) {
10434                 return -TARGET_EFAULT;
10435             }
10436             ret = get_errno(sys_getdents64(arg1, dirp, count));
10437             if (!is_error(ret)) {
10438                 /* Convert the dirent64 structs to target dirent.  We do this
10439                  * in-place, since we can guarantee that a target_dirent is no
10440                  * larger than a dirent64; however this means we have to be
10441                  * careful to read everything before writing in the new format.
10442                  */
10443                 struct linux_dirent64 *de;
10444                 struct target_dirent *tde;
10445                 int len = ret;
10446                 int tlen = 0;
10447 
10448                 de = dirp;
10449                 tde = (struct target_dirent *)dirp;
10450                 while (len > 0) {
10451                     int namelen, treclen;
10452                     int reclen = de->d_reclen;
10453                     uint64_t ino = de->d_ino;
10454                     int64_t off = de->d_off;
10455                     uint8_t type = de->d_type;
10456 
10457                     namelen = strlen(de->d_name);
10458                     treclen = offsetof(struct target_dirent, d_name)
10459                         + namelen + 2;
10460                     treclen = QEMU_ALIGN_UP(treclen, sizeof(abi_long));
10461 
10462                     memmove(tde->d_name, de->d_name, namelen + 1);
10463                     tde->d_ino = tswapal(ino);
10464                     tde->d_off = tswapal(off);
10465                     tde->d_reclen = tswap16(treclen);
10466                     /* The target_dirent type is in what was formerly a padding
10467                      * byte at the end of the structure:
10468                      */
10469                     *(((char *)tde) + treclen - 1) = type;
10470 
10471                     de = (struct linux_dirent64 *)((char *)de + reclen);
10472                     tde = (struct target_dirent *)((char *)tde + treclen);
10473                     len -= reclen;
10474                     tlen += treclen;
10475                 }
10476                 ret = tlen;
10477             }
10478             unlock_user(dirp, arg2, ret);
10479         }
10480 #endif
10481         return ret;
10482 #endif /* TARGET_NR_getdents */
10483 #if defined(TARGET_NR_getdents64) && defined(__NR_getdents64)
10484     case TARGET_NR_getdents64:
10485         {
10486             struct linux_dirent64 *dirp;
10487             abi_long count = arg3;
10488             if (!(dirp = lock_user(VERIFY_WRITE, arg2, count, 0)))
10489                 return -TARGET_EFAULT;
10490             ret = get_errno(sys_getdents64(arg1, dirp, count));
10491             if (!is_error(ret)) {
10492                 struct linux_dirent64 *de;
10493                 int len = ret;
10494                 int reclen;
10495                 de = dirp;
10496                 while (len > 0) {
10497                     reclen = de->d_reclen;
10498                     if (reclen > len)
10499                         break;
10500                     de->d_reclen = tswap16(reclen);
10501                     tswap64s((uint64_t *)&de->d_ino);
10502                     tswap64s((uint64_t *)&de->d_off);
10503                     de = (struct linux_dirent64 *)((char *)de + reclen);
10504                     len -= reclen;
10505                 }
10506             }
10507             unlock_user(dirp, arg2, ret);
10508         }
10509         return ret;
10510 #endif /* TARGET_NR_getdents64 */
10511 #if defined(TARGET_NR__newselect)
10512     case TARGET_NR__newselect:
10513         return do_select(arg1, arg2, arg3, arg4, arg5);
10514 #endif
10515 #ifdef TARGET_NR_poll
10516     case TARGET_NR_poll:
10517         return do_ppoll(arg1, arg2, arg3, arg4, arg5, false, false);
10518 #endif
10519 #ifdef TARGET_NR_ppoll
10520     case TARGET_NR_ppoll:
10521         return do_ppoll(arg1, arg2, arg3, arg4, arg5, true, false);
10522 #endif
10523 #ifdef TARGET_NR_ppoll_time64
10524     case TARGET_NR_ppoll_time64:
10525         return do_ppoll(arg1, arg2, arg3, arg4, arg5, true, true);
10526 #endif
10527     case TARGET_NR_flock:
10528         /* NOTE: the flock constant seems to be the same for every
10529            Linux platform */
10530         return get_errno(safe_flock(arg1, arg2));
10531     case TARGET_NR_readv:
10532         {
10533             struct iovec *vec = lock_iovec(VERIFY_WRITE, arg2, arg3, 0);
10534             if (vec != NULL) {
10535                 ret = get_errno(safe_readv(arg1, vec, arg3));
10536                 unlock_iovec(vec, arg2, arg3, 1);
10537             } else {
10538                 ret = -host_to_target_errno(errno);
10539             }
10540         }
10541         return ret;
10542     case TARGET_NR_writev:
10543         {
10544             struct iovec *vec = lock_iovec(VERIFY_READ, arg2, arg3, 1);
10545             if (vec != NULL) {
10546                 ret = get_errno(safe_writev(arg1, vec, arg3));
10547                 unlock_iovec(vec, arg2, arg3, 0);
10548             } else {
10549                 ret = -host_to_target_errno(errno);
10550             }
10551         }
10552         return ret;
10553 #if defined(TARGET_NR_preadv)
10554     case TARGET_NR_preadv:
10555         {
10556             struct iovec *vec = lock_iovec(VERIFY_WRITE, arg2, arg3, 0);
10557             if (vec != NULL) {
10558                 unsigned long low, high;
10559 
10560                 target_to_host_low_high(arg4, arg5, &low, &high);
10561                 ret = get_errno(safe_preadv(arg1, vec, arg3, low, high));
10562                 unlock_iovec(vec, arg2, arg3, 1);
10563             } else {
10564                 ret = -host_to_target_errno(errno);
10565            }
10566         }
10567         return ret;
10568 #endif
10569 #if defined(TARGET_NR_pwritev)
10570     case TARGET_NR_pwritev:
10571         {
10572             struct iovec *vec = lock_iovec(VERIFY_READ, arg2, arg3, 1);
10573             if (vec != NULL) {
10574                 unsigned long low, high;
10575 
10576                 target_to_host_low_high(arg4, arg5, &low, &high);
10577                 ret = get_errno(safe_pwritev(arg1, vec, arg3, low, high));
10578                 unlock_iovec(vec, arg2, arg3, 0);
10579             } else {
10580                 ret = -host_to_target_errno(errno);
10581            }
10582         }
10583         return ret;
10584 #endif
10585     case TARGET_NR_getsid:
10586         return get_errno(getsid(arg1));
10587 #if defined(TARGET_NR_fdatasync) /* Not on alpha (osf_datasync ?) */
10588     case TARGET_NR_fdatasync:
10589         return get_errno(fdatasync(arg1));
10590 #endif
10591     case TARGET_NR_sched_getaffinity:
10592         {
10593             unsigned int mask_size;
10594             unsigned long *mask;
10595 
10596             /*
10597              * sched_getaffinity needs multiples of ulong, so need to take
10598              * care of mismatches between target ulong and host ulong sizes.
10599              */
10600             if (arg2 & (sizeof(abi_ulong) - 1)) {
10601                 return -TARGET_EINVAL;
10602             }
10603             mask_size = (arg2 + (sizeof(*mask) - 1)) & ~(sizeof(*mask) - 1);
10604 
10605             mask = alloca(mask_size);
10606             memset(mask, 0, mask_size);
10607             ret = get_errno(sys_sched_getaffinity(arg1, mask_size, mask));
10608 
10609             if (!is_error(ret)) {
10610                 if (ret > arg2) {
10611                     /* More data returned than the caller's buffer will fit.
10612                      * This only happens if sizeof(abi_long) < sizeof(long)
10613                      * and the caller passed us a buffer holding an odd number
10614                      * of abi_longs. If the host kernel is actually using the
10615                      * extra 4 bytes then fail EINVAL; otherwise we can just
10616                      * ignore them and only copy the interesting part.
10617                      */
10618                     int numcpus = sysconf(_SC_NPROCESSORS_CONF);
10619                     if (numcpus > arg2 * 8) {
10620                         return -TARGET_EINVAL;
10621                     }
10622                     ret = arg2;
10623                 }
10624 
10625                 if (host_to_target_cpu_mask(mask, mask_size, arg3, ret)) {
10626                     return -TARGET_EFAULT;
10627                 }
10628             }
10629         }
10630         return ret;
10631     case TARGET_NR_sched_setaffinity:
10632         {
10633             unsigned int mask_size;
10634             unsigned long *mask;
10635 
10636             /*
10637              * sched_setaffinity needs multiples of ulong, so need to take
10638              * care of mismatches between target ulong and host ulong sizes.
10639              */
10640             if (arg2 & (sizeof(abi_ulong) - 1)) {
10641                 return -TARGET_EINVAL;
10642             }
10643             mask_size = (arg2 + (sizeof(*mask) - 1)) & ~(sizeof(*mask) - 1);
10644             mask = alloca(mask_size);
10645 
10646             ret = target_to_host_cpu_mask(mask, mask_size, arg3, arg2);
10647             if (ret) {
10648                 return ret;
10649             }
10650 
10651             return get_errno(sys_sched_setaffinity(arg1, mask_size, mask));
10652         }
10653     case TARGET_NR_getcpu:
10654         {
10655             unsigned cpu, node;
10656             ret = get_errno(sys_getcpu(arg1 ? &cpu : NULL,
10657                                        arg2 ? &node : NULL,
10658                                        NULL));
10659             if (is_error(ret)) {
10660                 return ret;
10661             }
10662             if (arg1 && put_user_u32(cpu, arg1)) {
10663                 return -TARGET_EFAULT;
10664             }
10665             if (arg2 && put_user_u32(node, arg2)) {
10666                 return -TARGET_EFAULT;
10667             }
10668         }
10669         return ret;
10670     case TARGET_NR_sched_setparam:
10671         {
10672             struct sched_param *target_schp;
10673             struct sched_param schp;
10674 
10675             if (arg2 == 0) {
10676                 return -TARGET_EINVAL;
10677             }
10678             if (!lock_user_struct(VERIFY_READ, target_schp, arg2, 1))
10679                 return -TARGET_EFAULT;
10680             schp.sched_priority = tswap32(target_schp->sched_priority);
10681             unlock_user_struct(target_schp, arg2, 0);
10682             return get_errno(sched_setparam(arg1, &schp));
10683         }
10684     case TARGET_NR_sched_getparam:
10685         {
10686             struct sched_param *target_schp;
10687             struct sched_param schp;
10688 
10689             if (arg2 == 0) {
10690                 return -TARGET_EINVAL;
10691             }
10692             ret = get_errno(sched_getparam(arg1, &schp));
10693             if (!is_error(ret)) {
10694                 if (!lock_user_struct(VERIFY_WRITE, target_schp, arg2, 0))
10695                     return -TARGET_EFAULT;
10696                 target_schp->sched_priority = tswap32(schp.sched_priority);
10697                 unlock_user_struct(target_schp, arg2, 1);
10698             }
10699         }
10700         return ret;
10701     case TARGET_NR_sched_setscheduler:
10702         {
10703             struct sched_param *target_schp;
10704             struct sched_param schp;
10705             if (arg3 == 0) {
10706                 return -TARGET_EINVAL;
10707             }
10708             if (!lock_user_struct(VERIFY_READ, target_schp, arg3, 1))
10709                 return -TARGET_EFAULT;
10710             schp.sched_priority = tswap32(target_schp->sched_priority);
10711             unlock_user_struct(target_schp, arg3, 0);
10712             return get_errno(sched_setscheduler(arg1, arg2, &schp));
10713         }
10714     case TARGET_NR_sched_getscheduler:
10715         return get_errno(sched_getscheduler(arg1));
10716     case TARGET_NR_sched_yield:
10717         return get_errno(sched_yield());
10718     case TARGET_NR_sched_get_priority_max:
10719         return get_errno(sched_get_priority_max(arg1));
10720     case TARGET_NR_sched_get_priority_min:
10721         return get_errno(sched_get_priority_min(arg1));
10722 #ifdef TARGET_NR_sched_rr_get_interval
10723     case TARGET_NR_sched_rr_get_interval:
10724         {
10725             struct timespec ts;
10726             ret = get_errno(sched_rr_get_interval(arg1, &ts));
10727             if (!is_error(ret)) {
10728                 ret = host_to_target_timespec(arg2, &ts);
10729             }
10730         }
10731         return ret;
10732 #endif
10733 #ifdef TARGET_NR_sched_rr_get_interval_time64
10734     case TARGET_NR_sched_rr_get_interval_time64:
10735         {
10736             struct timespec ts;
10737             ret = get_errno(sched_rr_get_interval(arg1, &ts));
10738             if (!is_error(ret)) {
10739                 ret = host_to_target_timespec64(arg2, &ts);
10740             }
10741         }
10742         return ret;
10743 #endif
10744 #if defined(TARGET_NR_nanosleep)
10745     case TARGET_NR_nanosleep:
10746         {
10747             struct timespec req, rem;
10748             target_to_host_timespec(&req, arg1);
10749             ret = get_errno(safe_nanosleep(&req, &rem));
10750             if (is_error(ret) && arg2) {
10751                 host_to_target_timespec(arg2, &rem);
10752             }
10753         }
10754         return ret;
10755 #endif
10756     case TARGET_NR_prctl:
10757         switch (arg1) {
10758         case PR_GET_PDEATHSIG:
10759         {
10760             int deathsig;
10761             ret = get_errno(prctl(arg1, &deathsig, arg3, arg4, arg5));
10762             if (!is_error(ret) && arg2
10763                 && put_user_s32(deathsig, arg2)) {
10764                 return -TARGET_EFAULT;
10765             }
10766             return ret;
10767         }
10768 #ifdef PR_GET_NAME
10769         case PR_GET_NAME:
10770         {
10771             void *name = lock_user(VERIFY_WRITE, arg2, 16, 1);
10772             if (!name) {
10773                 return -TARGET_EFAULT;
10774             }
10775             ret = get_errno(prctl(arg1, (unsigned long)name,
10776                                   arg3, arg4, arg5));
10777             unlock_user(name, arg2, 16);
10778             return ret;
10779         }
10780         case PR_SET_NAME:
10781         {
10782             void *name = lock_user(VERIFY_READ, arg2, 16, 1);
10783             if (!name) {
10784                 return -TARGET_EFAULT;
10785             }
10786             ret = get_errno(prctl(arg1, (unsigned long)name,
10787                                   arg3, arg4, arg5));
10788             unlock_user(name, arg2, 0);
10789             return ret;
10790         }
10791 #endif
10792 #ifdef TARGET_MIPS
10793         case TARGET_PR_GET_FP_MODE:
10794         {
10795             CPUMIPSState *env = ((CPUMIPSState *)cpu_env);
10796             ret = 0;
10797             if (env->CP0_Status & (1 << CP0St_FR)) {
10798                 ret |= TARGET_PR_FP_MODE_FR;
10799             }
10800             if (env->CP0_Config5 & (1 << CP0C5_FRE)) {
10801                 ret |= TARGET_PR_FP_MODE_FRE;
10802             }
10803             return ret;
10804         }
10805         case TARGET_PR_SET_FP_MODE:
10806         {
10807             CPUMIPSState *env = ((CPUMIPSState *)cpu_env);
10808             bool old_fr = env->CP0_Status & (1 << CP0St_FR);
10809             bool old_fre = env->CP0_Config5 & (1 << CP0C5_FRE);
10810             bool new_fr = arg2 & TARGET_PR_FP_MODE_FR;
10811             bool new_fre = arg2 & TARGET_PR_FP_MODE_FRE;
10812 
10813             const unsigned int known_bits = TARGET_PR_FP_MODE_FR |
10814                                             TARGET_PR_FP_MODE_FRE;
10815 
10816             /* If nothing to change, return right away, successfully.  */
10817             if (old_fr == new_fr && old_fre == new_fre) {
10818                 return 0;
10819             }
10820             /* Check the value is valid */
10821             if (arg2 & ~known_bits) {
10822                 return -TARGET_EOPNOTSUPP;
10823             }
10824             /* Setting FRE without FR is not supported.  */
10825             if (new_fre && !new_fr) {
10826                 return -TARGET_EOPNOTSUPP;
10827             }
10828             if (new_fr && !(env->active_fpu.fcr0 & (1 << FCR0_F64))) {
10829                 /* FR1 is not supported */
10830                 return -TARGET_EOPNOTSUPP;
10831             }
10832             if (!new_fr && (env->active_fpu.fcr0 & (1 << FCR0_F64))
10833                 && !(env->CP0_Status_rw_bitmask & (1 << CP0St_FR))) {
10834                 /* cannot set FR=0 */
10835                 return -TARGET_EOPNOTSUPP;
10836             }
10837             if (new_fre && !(env->active_fpu.fcr0 & (1 << FCR0_FREP))) {
10838                 /* Cannot set FRE=1 */
10839                 return -TARGET_EOPNOTSUPP;
10840             }
10841 
10842             int i;
10843             fpr_t *fpr = env->active_fpu.fpr;
10844             for (i = 0; i < 32 ; i += 2) {
10845                 if (!old_fr && new_fr) {
10846                     fpr[i].w[!FP_ENDIAN_IDX] = fpr[i + 1].w[FP_ENDIAN_IDX];
10847                 } else if (old_fr && !new_fr) {
10848                     fpr[i + 1].w[FP_ENDIAN_IDX] = fpr[i].w[!FP_ENDIAN_IDX];
10849                 }
10850             }
10851 
10852             if (new_fr) {
10853                 env->CP0_Status |= (1 << CP0St_FR);
10854                 env->hflags |= MIPS_HFLAG_F64;
10855             } else {
10856                 env->CP0_Status &= ~(1 << CP0St_FR);
10857                 env->hflags &= ~MIPS_HFLAG_F64;
10858             }
10859             if (new_fre) {
10860                 env->CP0_Config5 |= (1 << CP0C5_FRE);
10861                 if (env->active_fpu.fcr0 & (1 << FCR0_FREP)) {
10862                     env->hflags |= MIPS_HFLAG_FRE;
10863                 }
10864             } else {
10865                 env->CP0_Config5 &= ~(1 << CP0C5_FRE);
10866                 env->hflags &= ~MIPS_HFLAG_FRE;
10867             }
10868 
10869             return 0;
10870         }
10871 #endif /* MIPS */
10872 #ifdef TARGET_AARCH64
10873         case TARGET_PR_SVE_SET_VL:
10874             /*
10875              * We cannot support either PR_SVE_SET_VL_ONEXEC or
10876              * PR_SVE_VL_INHERIT.  Note the kernel definition
10877              * of sve_vl_valid allows for VQ=512, i.e. VL=8192,
10878              * even though the current architectural maximum is VQ=16.
10879              */
10880             ret = -TARGET_EINVAL;
10881             if (cpu_isar_feature(aa64_sve, env_archcpu(cpu_env))
10882                 && arg2 >= 0 && arg2 <= 512 * 16 && !(arg2 & 15)) {
10883                 CPUARMState *env = cpu_env;
10884                 ARMCPU *cpu = env_archcpu(env);
10885                 uint32_t vq, old_vq;
10886 
10887                 old_vq = (env->vfp.zcr_el[1] & 0xf) + 1;
10888                 vq = MAX(arg2 / 16, 1);
10889                 vq = MIN(vq, cpu->sve_max_vq);
10890 
10891                 if (vq < old_vq) {
10892                     aarch64_sve_narrow_vq(env, vq);
10893                 }
10894                 env->vfp.zcr_el[1] = vq - 1;
10895                 arm_rebuild_hflags(env);
10896                 ret = vq * 16;
10897             }
10898             return ret;
10899         case TARGET_PR_SVE_GET_VL:
10900             ret = -TARGET_EINVAL;
10901             {
10902                 ARMCPU *cpu = env_archcpu(cpu_env);
10903                 if (cpu_isar_feature(aa64_sve, cpu)) {
10904                     ret = ((cpu->env.vfp.zcr_el[1] & 0xf) + 1) * 16;
10905                 }
10906             }
10907             return ret;
10908         case TARGET_PR_PAC_RESET_KEYS:
10909             {
10910                 CPUARMState *env = cpu_env;
10911                 ARMCPU *cpu = env_archcpu(env);
10912 
10913                 if (arg3 || arg4 || arg5) {
10914                     return -TARGET_EINVAL;
10915                 }
10916                 if (cpu_isar_feature(aa64_pauth, cpu)) {
10917                     int all = (TARGET_PR_PAC_APIAKEY | TARGET_PR_PAC_APIBKEY |
10918                                TARGET_PR_PAC_APDAKEY | TARGET_PR_PAC_APDBKEY |
10919                                TARGET_PR_PAC_APGAKEY);
10920                     int ret = 0;
10921                     Error *err = NULL;
10922 
10923                     if (arg2 == 0) {
10924                         arg2 = all;
10925                     } else if (arg2 & ~all) {
10926                         return -TARGET_EINVAL;
10927                     }
10928                     if (arg2 & TARGET_PR_PAC_APIAKEY) {
10929                         ret |= qemu_guest_getrandom(&env->keys.apia,
10930                                                     sizeof(ARMPACKey), &err);
10931                     }
10932                     if (arg2 & TARGET_PR_PAC_APIBKEY) {
10933                         ret |= qemu_guest_getrandom(&env->keys.apib,
10934                                                     sizeof(ARMPACKey), &err);
10935                     }
10936                     if (arg2 & TARGET_PR_PAC_APDAKEY) {
10937                         ret |= qemu_guest_getrandom(&env->keys.apda,
10938                                                     sizeof(ARMPACKey), &err);
10939                     }
10940                     if (arg2 & TARGET_PR_PAC_APDBKEY) {
10941                         ret |= qemu_guest_getrandom(&env->keys.apdb,
10942                                                     sizeof(ARMPACKey), &err);
10943                     }
10944                     if (arg2 & TARGET_PR_PAC_APGAKEY) {
10945                         ret |= qemu_guest_getrandom(&env->keys.apga,
10946                                                     sizeof(ARMPACKey), &err);
10947                     }
10948                     if (ret != 0) {
10949                         /*
10950                          * Some unknown failure in the crypto.  The best
10951                          * we can do is log it and fail the syscall.
10952                          * The real syscall cannot fail this way.
10953                          */
10954                         qemu_log_mask(LOG_UNIMP,
10955                                       "PR_PAC_RESET_KEYS: Crypto failure: %s",
10956                                       error_get_pretty(err));
10957                         error_free(err);
10958                         return -TARGET_EIO;
10959                     }
10960                     return 0;
10961                 }
10962             }
10963             return -TARGET_EINVAL;
10964 #endif /* AARCH64 */
10965         case PR_GET_SECCOMP:
10966         case PR_SET_SECCOMP:
10967             /* Disable seccomp to prevent the target disabling syscalls we
10968              * need. */
10969             return -TARGET_EINVAL;
10970         default:
10971             /* Most prctl options have no pointer arguments */
10972             return get_errno(prctl(arg1, arg2, arg3, arg4, arg5));
10973         }
10974         break;
10975 #ifdef TARGET_NR_arch_prctl
10976     case TARGET_NR_arch_prctl:
10977         return do_arch_prctl(cpu_env, arg1, arg2);
10978 #endif
10979 #ifdef TARGET_NR_pread64
10980     case TARGET_NR_pread64:
10981         if (regpairs_aligned(cpu_env, num)) {
10982             arg4 = arg5;
10983             arg5 = arg6;
10984         }
10985         if (arg2 == 0 && arg3 == 0) {
10986             /* Special-case NULL buffer and zero length, which should succeed */
10987             p = 0;
10988         } else {
10989             p = lock_user(VERIFY_WRITE, arg2, arg3, 0);
10990             if (!p) {
10991                 return -TARGET_EFAULT;
10992             }
10993         }
10994         ret = get_errno(pread64(arg1, p, arg3, target_offset64(arg4, arg5)));
10995         unlock_user(p, arg2, ret);
10996         return ret;
10997     case TARGET_NR_pwrite64:
10998         if (regpairs_aligned(cpu_env, num)) {
10999             arg4 = arg5;
11000             arg5 = arg6;
11001         }
11002         if (arg2 == 0 && arg3 == 0) {
11003             /* Special-case NULL buffer and zero length, which should succeed */
11004             p = 0;
11005         } else {
11006             p = lock_user(VERIFY_READ, arg2, arg3, 1);
11007             if (!p) {
11008                 return -TARGET_EFAULT;
11009             }
11010         }
11011         ret = get_errno(pwrite64(arg1, p, arg3, target_offset64(arg4, arg5)));
11012         unlock_user(p, arg2, 0);
11013         return ret;
11014 #endif
11015     case TARGET_NR_getcwd:
11016         if (!(p = lock_user(VERIFY_WRITE, arg1, arg2, 0)))
11017             return -TARGET_EFAULT;
11018         ret = get_errno(sys_getcwd1(p, arg2));
11019         unlock_user(p, arg1, ret);
11020         return ret;
11021     case TARGET_NR_capget:
11022     case TARGET_NR_capset:
11023     {
11024         struct target_user_cap_header *target_header;
11025         struct target_user_cap_data *target_data = NULL;
11026         struct __user_cap_header_struct header;
11027         struct __user_cap_data_struct data[2];
11028         struct __user_cap_data_struct *dataptr = NULL;
11029         int i, target_datalen;
11030         int data_items = 1;
11031 
11032         if (!lock_user_struct(VERIFY_WRITE, target_header, arg1, 1)) {
11033             return -TARGET_EFAULT;
11034         }
11035         header.version = tswap32(target_header->version);
11036         header.pid = tswap32(target_header->pid);
11037 
11038         if (header.version != _LINUX_CAPABILITY_VERSION) {
11039             /* Version 2 and up takes pointer to two user_data structs */
11040             data_items = 2;
11041         }
11042 
11043         target_datalen = sizeof(*target_data) * data_items;
11044 
11045         if (arg2) {
11046             if (num == TARGET_NR_capget) {
11047                 target_data = lock_user(VERIFY_WRITE, arg2, target_datalen, 0);
11048             } else {
11049                 target_data = lock_user(VERIFY_READ, arg2, target_datalen, 1);
11050             }
11051             if (!target_data) {
11052                 unlock_user_struct(target_header, arg1, 0);
11053                 return -TARGET_EFAULT;
11054             }
11055 
11056             if (num == TARGET_NR_capset) {
11057                 for (i = 0; i < data_items; i++) {
11058                     data[i].effective = tswap32(target_data[i].effective);
11059                     data[i].permitted = tswap32(target_data[i].permitted);
11060                     data[i].inheritable = tswap32(target_data[i].inheritable);
11061                 }
11062             }
11063 
11064             dataptr = data;
11065         }
11066 
11067         if (num == TARGET_NR_capget) {
11068             ret = get_errno(capget(&header, dataptr));
11069         } else {
11070             ret = get_errno(capset(&header, dataptr));
11071         }
11072 
11073         /* The kernel always updates version for both capget and capset */
11074         target_header->version = tswap32(header.version);
11075         unlock_user_struct(target_header, arg1, 1);
11076 
11077         if (arg2) {
11078             if (num == TARGET_NR_capget) {
11079                 for (i = 0; i < data_items; i++) {
11080                     target_data[i].effective = tswap32(data[i].effective);
11081                     target_data[i].permitted = tswap32(data[i].permitted);
11082                     target_data[i].inheritable = tswap32(data[i].inheritable);
11083                 }
11084                 unlock_user(target_data, arg2, target_datalen);
11085             } else {
11086                 unlock_user(target_data, arg2, 0);
11087             }
11088         }
11089         return ret;
11090     }
11091     case TARGET_NR_sigaltstack:
11092         return do_sigaltstack(arg1, arg2,
11093                               get_sp_from_cpustate((CPUArchState *)cpu_env));
11094 
11095 #ifdef CONFIG_SENDFILE
11096 #ifdef TARGET_NR_sendfile
11097     case TARGET_NR_sendfile:
11098     {
11099         off_t *offp = NULL;
11100         off_t off;
11101         if (arg3) {
11102             ret = get_user_sal(off, arg3);
11103             if (is_error(ret)) {
11104                 return ret;
11105             }
11106             offp = &off;
11107         }
11108         ret = get_errno(sendfile(arg1, arg2, offp, arg4));
11109         if (!is_error(ret) && arg3) {
11110             abi_long ret2 = put_user_sal(off, arg3);
11111             if (is_error(ret2)) {
11112                 ret = ret2;
11113             }
11114         }
11115         return ret;
11116     }
11117 #endif
11118 #ifdef TARGET_NR_sendfile64
11119     case TARGET_NR_sendfile64:
11120     {
11121         off_t *offp = NULL;
11122         off_t off;
11123         if (arg3) {
11124             ret = get_user_s64(off, arg3);
11125             if (is_error(ret)) {
11126                 return ret;
11127             }
11128             offp = &off;
11129         }
11130         ret = get_errno(sendfile(arg1, arg2, offp, arg4));
11131         if (!is_error(ret) && arg3) {
11132             abi_long ret2 = put_user_s64(off, arg3);
11133             if (is_error(ret2)) {
11134                 ret = ret2;
11135             }
11136         }
11137         return ret;
11138     }
11139 #endif
11140 #endif
11141 #ifdef TARGET_NR_vfork
11142     case TARGET_NR_vfork:
11143         return get_errno(do_fork(cpu_env,
11144                          CLONE_VFORK | CLONE_VM | TARGET_SIGCHLD,
11145                          0, 0, 0, 0));
11146 #endif
11147 #ifdef TARGET_NR_ugetrlimit
11148     case TARGET_NR_ugetrlimit:
11149     {
11150 	struct rlimit rlim;
11151 	int resource = target_to_host_resource(arg1);
11152 	ret = get_errno(getrlimit(resource, &rlim));
11153 	if (!is_error(ret)) {
11154 	    struct target_rlimit *target_rlim;
11155             if (!lock_user_struct(VERIFY_WRITE, target_rlim, arg2, 0))
11156                 return -TARGET_EFAULT;
11157 	    target_rlim->rlim_cur = host_to_target_rlim(rlim.rlim_cur);
11158 	    target_rlim->rlim_max = host_to_target_rlim(rlim.rlim_max);
11159             unlock_user_struct(target_rlim, arg2, 1);
11160 	}
11161         return ret;
11162     }
11163 #endif
11164 #ifdef TARGET_NR_truncate64
11165     case TARGET_NR_truncate64:
11166         if (!(p = lock_user_string(arg1)))
11167             return -TARGET_EFAULT;
11168 	ret = target_truncate64(cpu_env, p, arg2, arg3, arg4);
11169         unlock_user(p, arg1, 0);
11170         return ret;
11171 #endif
11172 #ifdef TARGET_NR_ftruncate64
11173     case TARGET_NR_ftruncate64:
11174         return target_ftruncate64(cpu_env, arg1, arg2, arg3, arg4);
11175 #endif
11176 #ifdef TARGET_NR_stat64
11177     case TARGET_NR_stat64:
11178         if (!(p = lock_user_string(arg1))) {
11179             return -TARGET_EFAULT;
11180         }
11181         ret = get_errno(stat(path(p), &st));
11182         unlock_user(p, arg1, 0);
11183         if (!is_error(ret))
11184             ret = host_to_target_stat64(cpu_env, arg2, &st);
11185         return ret;
11186 #endif
11187 #ifdef TARGET_NR_lstat64
11188     case TARGET_NR_lstat64:
11189         if (!(p = lock_user_string(arg1))) {
11190             return -TARGET_EFAULT;
11191         }
11192         ret = get_errno(lstat(path(p), &st));
11193         unlock_user(p, arg1, 0);
11194         if (!is_error(ret))
11195             ret = host_to_target_stat64(cpu_env, arg2, &st);
11196         return ret;
11197 #endif
11198 #ifdef TARGET_NR_fstat64
11199     case TARGET_NR_fstat64:
11200         ret = get_errno(fstat(arg1, &st));
11201         if (!is_error(ret))
11202             ret = host_to_target_stat64(cpu_env, arg2, &st);
11203         return ret;
11204 #endif
11205 #if (defined(TARGET_NR_fstatat64) || defined(TARGET_NR_newfstatat))
11206 #ifdef TARGET_NR_fstatat64
11207     case TARGET_NR_fstatat64:
11208 #endif
11209 #ifdef TARGET_NR_newfstatat
11210     case TARGET_NR_newfstatat:
11211 #endif
11212         if (!(p = lock_user_string(arg2))) {
11213             return -TARGET_EFAULT;
11214         }
11215         ret = get_errno(fstatat(arg1, path(p), &st, arg4));
11216         unlock_user(p, arg2, 0);
11217         if (!is_error(ret))
11218             ret = host_to_target_stat64(cpu_env, arg3, &st);
11219         return ret;
11220 #endif
11221 #if defined(TARGET_NR_statx)
11222     case TARGET_NR_statx:
11223         {
11224             struct target_statx *target_stx;
11225             int dirfd = arg1;
11226             int flags = arg3;
11227 
11228             p = lock_user_string(arg2);
11229             if (p == NULL) {
11230                 return -TARGET_EFAULT;
11231             }
11232 #if defined(__NR_statx)
11233             {
11234                 /*
11235                  * It is assumed that struct statx is architecture independent.
11236                  */
11237                 struct target_statx host_stx;
11238                 int mask = arg4;
11239 
11240                 ret = get_errno(sys_statx(dirfd, p, flags, mask, &host_stx));
11241                 if (!is_error(ret)) {
11242                     if (host_to_target_statx(&host_stx, arg5) != 0) {
11243                         unlock_user(p, arg2, 0);
11244                         return -TARGET_EFAULT;
11245                     }
11246                 }
11247 
11248                 if (ret != -TARGET_ENOSYS) {
11249                     unlock_user(p, arg2, 0);
11250                     return ret;
11251                 }
11252             }
11253 #endif
11254             ret = get_errno(fstatat(dirfd, path(p), &st, flags));
11255             unlock_user(p, arg2, 0);
11256 
11257             if (!is_error(ret)) {
11258                 if (!lock_user_struct(VERIFY_WRITE, target_stx, arg5, 0)) {
11259                     return -TARGET_EFAULT;
11260                 }
11261                 memset(target_stx, 0, sizeof(*target_stx));
11262                 __put_user(major(st.st_dev), &target_stx->stx_dev_major);
11263                 __put_user(minor(st.st_dev), &target_stx->stx_dev_minor);
11264                 __put_user(st.st_ino, &target_stx->stx_ino);
11265                 __put_user(st.st_mode, &target_stx->stx_mode);
11266                 __put_user(st.st_uid, &target_stx->stx_uid);
11267                 __put_user(st.st_gid, &target_stx->stx_gid);
11268                 __put_user(st.st_nlink, &target_stx->stx_nlink);
11269                 __put_user(major(st.st_rdev), &target_stx->stx_rdev_major);
11270                 __put_user(minor(st.st_rdev), &target_stx->stx_rdev_minor);
11271                 __put_user(st.st_size, &target_stx->stx_size);
11272                 __put_user(st.st_blksize, &target_stx->stx_blksize);
11273                 __put_user(st.st_blocks, &target_stx->stx_blocks);
11274                 __put_user(st.st_atime, &target_stx->stx_atime.tv_sec);
11275                 __put_user(st.st_mtime, &target_stx->stx_mtime.tv_sec);
11276                 __put_user(st.st_ctime, &target_stx->stx_ctime.tv_sec);
11277                 unlock_user_struct(target_stx, arg5, 1);
11278             }
11279         }
11280         return ret;
11281 #endif
11282 #ifdef TARGET_NR_lchown
11283     case TARGET_NR_lchown:
11284         if (!(p = lock_user_string(arg1)))
11285             return -TARGET_EFAULT;
11286         ret = get_errno(lchown(p, low2highuid(arg2), low2highgid(arg3)));
11287         unlock_user(p, arg1, 0);
11288         return ret;
11289 #endif
11290 #ifdef TARGET_NR_getuid
11291     case TARGET_NR_getuid:
11292         return get_errno(high2lowuid(getuid()));
11293 #endif
11294 #ifdef TARGET_NR_getgid
11295     case TARGET_NR_getgid:
11296         return get_errno(high2lowgid(getgid()));
11297 #endif
11298 #ifdef TARGET_NR_geteuid
11299     case TARGET_NR_geteuid:
11300         return get_errno(high2lowuid(geteuid()));
11301 #endif
11302 #ifdef TARGET_NR_getegid
11303     case TARGET_NR_getegid:
11304         return get_errno(high2lowgid(getegid()));
11305 #endif
11306     case TARGET_NR_setreuid:
11307         return get_errno(setreuid(low2highuid(arg1), low2highuid(arg2)));
11308     case TARGET_NR_setregid:
11309         return get_errno(setregid(low2highgid(arg1), low2highgid(arg2)));
11310     case TARGET_NR_getgroups:
11311         {
11312             int gidsetsize = arg1;
11313             target_id *target_grouplist;
11314             gid_t *grouplist;
11315             int i;
11316 
11317             grouplist = alloca(gidsetsize * sizeof(gid_t));
11318             ret = get_errno(getgroups(gidsetsize, grouplist));
11319             if (gidsetsize == 0)
11320                 return ret;
11321             if (!is_error(ret)) {
11322                 target_grouplist = lock_user(VERIFY_WRITE, arg2, gidsetsize * sizeof(target_id), 0);
11323                 if (!target_grouplist)
11324                     return -TARGET_EFAULT;
11325                 for(i = 0;i < ret; i++)
11326                     target_grouplist[i] = tswapid(high2lowgid(grouplist[i]));
11327                 unlock_user(target_grouplist, arg2, gidsetsize * sizeof(target_id));
11328             }
11329         }
11330         return ret;
11331     case TARGET_NR_setgroups:
11332         {
11333             int gidsetsize = arg1;
11334             target_id *target_grouplist;
11335             gid_t *grouplist = NULL;
11336             int i;
11337             if (gidsetsize) {
11338                 grouplist = alloca(gidsetsize * sizeof(gid_t));
11339                 target_grouplist = lock_user(VERIFY_READ, arg2, gidsetsize * sizeof(target_id), 1);
11340                 if (!target_grouplist) {
11341                     return -TARGET_EFAULT;
11342                 }
11343                 for (i = 0; i < gidsetsize; i++) {
11344                     grouplist[i] = low2highgid(tswapid(target_grouplist[i]));
11345                 }
11346                 unlock_user(target_grouplist, arg2, 0);
11347             }
11348             return get_errno(setgroups(gidsetsize, grouplist));
11349         }
11350     case TARGET_NR_fchown:
11351         return get_errno(fchown(arg1, low2highuid(arg2), low2highgid(arg3)));
11352 #if defined(TARGET_NR_fchownat)
11353     case TARGET_NR_fchownat:
11354         if (!(p = lock_user_string(arg2)))
11355             return -TARGET_EFAULT;
11356         ret = get_errno(fchownat(arg1, p, low2highuid(arg3),
11357                                  low2highgid(arg4), arg5));
11358         unlock_user(p, arg2, 0);
11359         return ret;
11360 #endif
11361 #ifdef TARGET_NR_setresuid
11362     case TARGET_NR_setresuid:
11363         return get_errno(sys_setresuid(low2highuid(arg1),
11364                                        low2highuid(arg2),
11365                                        low2highuid(arg3)));
11366 #endif
11367 #ifdef TARGET_NR_getresuid
11368     case TARGET_NR_getresuid:
11369         {
11370             uid_t ruid, euid, suid;
11371             ret = get_errno(getresuid(&ruid, &euid, &suid));
11372             if (!is_error(ret)) {
11373                 if (put_user_id(high2lowuid(ruid), arg1)
11374                     || put_user_id(high2lowuid(euid), arg2)
11375                     || put_user_id(high2lowuid(suid), arg3))
11376                     return -TARGET_EFAULT;
11377             }
11378         }
11379         return ret;
11380 #endif
11381 #ifdef TARGET_NR_getresgid
11382     case TARGET_NR_setresgid:
11383         return get_errno(sys_setresgid(low2highgid(arg1),
11384                                        low2highgid(arg2),
11385                                        low2highgid(arg3)));
11386 #endif
11387 #ifdef TARGET_NR_getresgid
11388     case TARGET_NR_getresgid:
11389         {
11390             gid_t rgid, egid, sgid;
11391             ret = get_errno(getresgid(&rgid, &egid, &sgid));
11392             if (!is_error(ret)) {
11393                 if (put_user_id(high2lowgid(rgid), arg1)
11394                     || put_user_id(high2lowgid(egid), arg2)
11395                     || put_user_id(high2lowgid(sgid), arg3))
11396                     return -TARGET_EFAULT;
11397             }
11398         }
11399         return ret;
11400 #endif
11401 #ifdef TARGET_NR_chown
11402     case TARGET_NR_chown:
11403         if (!(p = lock_user_string(arg1)))
11404             return -TARGET_EFAULT;
11405         ret = get_errno(chown(p, low2highuid(arg2), low2highgid(arg3)));
11406         unlock_user(p, arg1, 0);
11407         return ret;
11408 #endif
11409     case TARGET_NR_setuid:
11410         return get_errno(sys_setuid(low2highuid(arg1)));
11411     case TARGET_NR_setgid:
11412         return get_errno(sys_setgid(low2highgid(arg1)));
11413     case TARGET_NR_setfsuid:
11414         return get_errno(setfsuid(arg1));
11415     case TARGET_NR_setfsgid:
11416         return get_errno(setfsgid(arg1));
11417 
11418 #ifdef TARGET_NR_lchown32
11419     case TARGET_NR_lchown32:
11420         if (!(p = lock_user_string(arg1)))
11421             return -TARGET_EFAULT;
11422         ret = get_errno(lchown(p, arg2, arg3));
11423         unlock_user(p, arg1, 0);
11424         return ret;
11425 #endif
11426 #ifdef TARGET_NR_getuid32
11427     case TARGET_NR_getuid32:
11428         return get_errno(getuid());
11429 #endif
11430 
11431 #if defined(TARGET_NR_getxuid) && defined(TARGET_ALPHA)
11432    /* Alpha specific */
11433     case TARGET_NR_getxuid:
11434          {
11435             uid_t euid;
11436             euid=geteuid();
11437             ((CPUAlphaState *)cpu_env)->ir[IR_A4]=euid;
11438          }
11439         return get_errno(getuid());
11440 #endif
11441 #if defined(TARGET_NR_getxgid) && defined(TARGET_ALPHA)
11442    /* Alpha specific */
11443     case TARGET_NR_getxgid:
11444          {
11445             uid_t egid;
11446             egid=getegid();
11447             ((CPUAlphaState *)cpu_env)->ir[IR_A4]=egid;
11448          }
11449         return get_errno(getgid());
11450 #endif
11451 #if defined(TARGET_NR_osf_getsysinfo) && defined(TARGET_ALPHA)
11452     /* Alpha specific */
11453     case TARGET_NR_osf_getsysinfo:
11454         ret = -TARGET_EOPNOTSUPP;
11455         switch (arg1) {
11456           case TARGET_GSI_IEEE_FP_CONTROL:
11457             {
11458                 uint64_t fpcr = cpu_alpha_load_fpcr(cpu_env);
11459                 uint64_t swcr = ((CPUAlphaState *)cpu_env)->swcr;
11460 
11461                 swcr &= ~SWCR_STATUS_MASK;
11462                 swcr |= (fpcr >> 35) & SWCR_STATUS_MASK;
11463 
11464                 if (put_user_u64 (swcr, arg2))
11465                         return -TARGET_EFAULT;
11466                 ret = 0;
11467             }
11468             break;
11469 
11470           /* case GSI_IEEE_STATE_AT_SIGNAL:
11471              -- Not implemented in linux kernel.
11472              case GSI_UACPROC:
11473              -- Retrieves current unaligned access state; not much used.
11474              case GSI_PROC_TYPE:
11475              -- Retrieves implver information; surely not used.
11476              case GSI_GET_HWRPB:
11477              -- Grabs a copy of the HWRPB; surely not used.
11478           */
11479         }
11480         return ret;
11481 #endif
11482 #if defined(TARGET_NR_osf_setsysinfo) && defined(TARGET_ALPHA)
11483     /* Alpha specific */
11484     case TARGET_NR_osf_setsysinfo:
11485         ret = -TARGET_EOPNOTSUPP;
11486         switch (arg1) {
11487           case TARGET_SSI_IEEE_FP_CONTROL:
11488             {
11489                 uint64_t swcr, fpcr;
11490 
11491                 if (get_user_u64 (swcr, arg2)) {
11492                     return -TARGET_EFAULT;
11493                 }
11494 
11495                 /*
11496                  * The kernel calls swcr_update_status to update the
11497                  * status bits from the fpcr at every point that it
11498                  * could be queried.  Therefore, we store the status
11499                  * bits only in FPCR.
11500                  */
11501                 ((CPUAlphaState *)cpu_env)->swcr
11502                     = swcr & (SWCR_TRAP_ENABLE_MASK | SWCR_MAP_MASK);
11503 
11504                 fpcr = cpu_alpha_load_fpcr(cpu_env);
11505                 fpcr &= ((uint64_t)FPCR_DYN_MASK << 32);
11506                 fpcr |= alpha_ieee_swcr_to_fpcr(swcr);
11507                 cpu_alpha_store_fpcr(cpu_env, fpcr);
11508                 ret = 0;
11509             }
11510             break;
11511 
11512           case TARGET_SSI_IEEE_RAISE_EXCEPTION:
11513             {
11514                 uint64_t exc, fpcr, fex;
11515 
11516                 if (get_user_u64(exc, arg2)) {
11517                     return -TARGET_EFAULT;
11518                 }
11519                 exc &= SWCR_STATUS_MASK;
11520                 fpcr = cpu_alpha_load_fpcr(cpu_env);
11521 
11522                 /* Old exceptions are not signaled.  */
11523                 fex = alpha_ieee_fpcr_to_swcr(fpcr);
11524                 fex = exc & ~fex;
11525                 fex >>= SWCR_STATUS_TO_EXCSUM_SHIFT;
11526                 fex &= ((CPUArchState *)cpu_env)->swcr;
11527 
11528                 /* Update the hardware fpcr.  */
11529                 fpcr |= alpha_ieee_swcr_to_fpcr(exc);
11530                 cpu_alpha_store_fpcr(cpu_env, fpcr);
11531 
11532                 if (fex) {
11533                     int si_code = TARGET_FPE_FLTUNK;
11534                     target_siginfo_t info;
11535 
11536                     if (fex & SWCR_TRAP_ENABLE_DNO) {
11537                         si_code = TARGET_FPE_FLTUND;
11538                     }
11539                     if (fex & SWCR_TRAP_ENABLE_INE) {
11540                         si_code = TARGET_FPE_FLTRES;
11541                     }
11542                     if (fex & SWCR_TRAP_ENABLE_UNF) {
11543                         si_code = TARGET_FPE_FLTUND;
11544                     }
11545                     if (fex & SWCR_TRAP_ENABLE_OVF) {
11546                         si_code = TARGET_FPE_FLTOVF;
11547                     }
11548                     if (fex & SWCR_TRAP_ENABLE_DZE) {
11549                         si_code = TARGET_FPE_FLTDIV;
11550                     }
11551                     if (fex & SWCR_TRAP_ENABLE_INV) {
11552                         si_code = TARGET_FPE_FLTINV;
11553                     }
11554 
11555                     info.si_signo = SIGFPE;
11556                     info.si_errno = 0;
11557                     info.si_code = si_code;
11558                     info._sifields._sigfault._addr
11559                         = ((CPUArchState *)cpu_env)->pc;
11560                     queue_signal((CPUArchState *)cpu_env, info.si_signo,
11561                                  QEMU_SI_FAULT, &info);
11562                 }
11563                 ret = 0;
11564             }
11565             break;
11566 
11567           /* case SSI_NVPAIRS:
11568              -- Used with SSIN_UACPROC to enable unaligned accesses.
11569              case SSI_IEEE_STATE_AT_SIGNAL:
11570              case SSI_IEEE_IGNORE_STATE_AT_SIGNAL:
11571              -- Not implemented in linux kernel
11572           */
11573         }
11574         return ret;
11575 #endif
11576 #ifdef TARGET_NR_osf_sigprocmask
11577     /* Alpha specific.  */
11578     case TARGET_NR_osf_sigprocmask:
11579         {
11580             abi_ulong mask;
11581             int how;
11582             sigset_t set, oldset;
11583 
11584             switch(arg1) {
11585             case TARGET_SIG_BLOCK:
11586                 how = SIG_BLOCK;
11587                 break;
11588             case TARGET_SIG_UNBLOCK:
11589                 how = SIG_UNBLOCK;
11590                 break;
11591             case TARGET_SIG_SETMASK:
11592                 how = SIG_SETMASK;
11593                 break;
11594             default:
11595                 return -TARGET_EINVAL;
11596             }
11597             mask = arg2;
11598             target_to_host_old_sigset(&set, &mask);
11599             ret = do_sigprocmask(how, &set, &oldset);
11600             if (!ret) {
11601                 host_to_target_old_sigset(&mask, &oldset);
11602                 ret = mask;
11603             }
11604         }
11605         return ret;
11606 #endif
11607 
11608 #ifdef TARGET_NR_getgid32
11609     case TARGET_NR_getgid32:
11610         return get_errno(getgid());
11611 #endif
11612 #ifdef TARGET_NR_geteuid32
11613     case TARGET_NR_geteuid32:
11614         return get_errno(geteuid());
11615 #endif
11616 #ifdef TARGET_NR_getegid32
11617     case TARGET_NR_getegid32:
11618         return get_errno(getegid());
11619 #endif
11620 #ifdef TARGET_NR_setreuid32
11621     case TARGET_NR_setreuid32:
11622         return get_errno(setreuid(arg1, arg2));
11623 #endif
11624 #ifdef TARGET_NR_setregid32
11625     case TARGET_NR_setregid32:
11626         return get_errno(setregid(arg1, arg2));
11627 #endif
11628 #ifdef TARGET_NR_getgroups32
11629     case TARGET_NR_getgroups32:
11630         {
11631             int gidsetsize = arg1;
11632             uint32_t *target_grouplist;
11633             gid_t *grouplist;
11634             int i;
11635 
11636             grouplist = alloca(gidsetsize * sizeof(gid_t));
11637             ret = get_errno(getgroups(gidsetsize, grouplist));
11638             if (gidsetsize == 0)
11639                 return ret;
11640             if (!is_error(ret)) {
11641                 target_grouplist = lock_user(VERIFY_WRITE, arg2, gidsetsize * 4, 0);
11642                 if (!target_grouplist) {
11643                     return -TARGET_EFAULT;
11644                 }
11645                 for(i = 0;i < ret; i++)
11646                     target_grouplist[i] = tswap32(grouplist[i]);
11647                 unlock_user(target_grouplist, arg2, gidsetsize * 4);
11648             }
11649         }
11650         return ret;
11651 #endif
11652 #ifdef TARGET_NR_setgroups32
11653     case TARGET_NR_setgroups32:
11654         {
11655             int gidsetsize = arg1;
11656             uint32_t *target_grouplist;
11657             gid_t *grouplist;
11658             int i;
11659 
11660             grouplist = alloca(gidsetsize * sizeof(gid_t));
11661             target_grouplist = lock_user(VERIFY_READ, arg2, gidsetsize * 4, 1);
11662             if (!target_grouplist) {
11663                 return -TARGET_EFAULT;
11664             }
11665             for(i = 0;i < gidsetsize; i++)
11666                 grouplist[i] = tswap32(target_grouplist[i]);
11667             unlock_user(target_grouplist, arg2, 0);
11668             return get_errno(setgroups(gidsetsize, grouplist));
11669         }
11670 #endif
11671 #ifdef TARGET_NR_fchown32
11672     case TARGET_NR_fchown32:
11673         return get_errno(fchown(arg1, arg2, arg3));
11674 #endif
11675 #ifdef TARGET_NR_setresuid32
11676     case TARGET_NR_setresuid32:
11677         return get_errno(sys_setresuid(arg1, arg2, arg3));
11678 #endif
11679 #ifdef TARGET_NR_getresuid32
11680     case TARGET_NR_getresuid32:
11681         {
11682             uid_t ruid, euid, suid;
11683             ret = get_errno(getresuid(&ruid, &euid, &suid));
11684             if (!is_error(ret)) {
11685                 if (put_user_u32(ruid, arg1)
11686                     || put_user_u32(euid, arg2)
11687                     || put_user_u32(suid, arg3))
11688                     return -TARGET_EFAULT;
11689             }
11690         }
11691         return ret;
11692 #endif
11693 #ifdef TARGET_NR_setresgid32
11694     case TARGET_NR_setresgid32:
11695         return get_errno(sys_setresgid(arg1, arg2, arg3));
11696 #endif
11697 #ifdef TARGET_NR_getresgid32
11698     case TARGET_NR_getresgid32:
11699         {
11700             gid_t rgid, egid, sgid;
11701             ret = get_errno(getresgid(&rgid, &egid, &sgid));
11702             if (!is_error(ret)) {
11703                 if (put_user_u32(rgid, arg1)
11704                     || put_user_u32(egid, arg2)
11705                     || put_user_u32(sgid, arg3))
11706                     return -TARGET_EFAULT;
11707             }
11708         }
11709         return ret;
11710 #endif
11711 #ifdef TARGET_NR_chown32
11712     case TARGET_NR_chown32:
11713         if (!(p = lock_user_string(arg1)))
11714             return -TARGET_EFAULT;
11715         ret = get_errno(chown(p, arg2, arg3));
11716         unlock_user(p, arg1, 0);
11717         return ret;
11718 #endif
11719 #ifdef TARGET_NR_setuid32
11720     case TARGET_NR_setuid32:
11721         return get_errno(sys_setuid(arg1));
11722 #endif
11723 #ifdef TARGET_NR_setgid32
11724     case TARGET_NR_setgid32:
11725         return get_errno(sys_setgid(arg1));
11726 #endif
11727 #ifdef TARGET_NR_setfsuid32
11728     case TARGET_NR_setfsuid32:
11729         return get_errno(setfsuid(arg1));
11730 #endif
11731 #ifdef TARGET_NR_setfsgid32
11732     case TARGET_NR_setfsgid32:
11733         return get_errno(setfsgid(arg1));
11734 #endif
11735 #ifdef TARGET_NR_mincore
11736     case TARGET_NR_mincore:
11737         {
11738             void *a = lock_user(VERIFY_READ, arg1, arg2, 0);
11739             if (!a) {
11740                 return -TARGET_ENOMEM;
11741             }
11742             p = lock_user_string(arg3);
11743             if (!p) {
11744                 ret = -TARGET_EFAULT;
11745             } else {
11746                 ret = get_errno(mincore(a, arg2, p));
11747                 unlock_user(p, arg3, ret);
11748             }
11749             unlock_user(a, arg1, 0);
11750         }
11751         return ret;
11752 #endif
11753 #ifdef TARGET_NR_arm_fadvise64_64
11754     case TARGET_NR_arm_fadvise64_64:
11755         /* arm_fadvise64_64 looks like fadvise64_64 but
11756          * with different argument order: fd, advice, offset, len
11757          * rather than the usual fd, offset, len, advice.
11758          * Note that offset and len are both 64-bit so appear as
11759          * pairs of 32-bit registers.
11760          */
11761         ret = posix_fadvise(arg1, target_offset64(arg3, arg4),
11762                             target_offset64(arg5, arg6), arg2);
11763         return -host_to_target_errno(ret);
11764 #endif
11765 
11766 #if TARGET_ABI_BITS == 32
11767 
11768 #ifdef TARGET_NR_fadvise64_64
11769     case TARGET_NR_fadvise64_64:
11770 #if defined(TARGET_PPC) || defined(TARGET_XTENSA)
11771         /* 6 args: fd, advice, offset (high, low), len (high, low) */
11772         ret = arg2;
11773         arg2 = arg3;
11774         arg3 = arg4;
11775         arg4 = arg5;
11776         arg5 = arg6;
11777         arg6 = ret;
11778 #else
11779         /* 6 args: fd, offset (high, low), len (high, low), advice */
11780         if (regpairs_aligned(cpu_env, num)) {
11781             /* offset is in (3,4), len in (5,6) and advice in 7 */
11782             arg2 = arg3;
11783             arg3 = arg4;
11784             arg4 = arg5;
11785             arg5 = arg6;
11786             arg6 = arg7;
11787         }
11788 #endif
11789         ret = posix_fadvise(arg1, target_offset64(arg2, arg3),
11790                             target_offset64(arg4, arg5), arg6);
11791         return -host_to_target_errno(ret);
11792 #endif
11793 
11794 #ifdef TARGET_NR_fadvise64
11795     case TARGET_NR_fadvise64:
11796         /* 5 args: fd, offset (high, low), len, advice */
11797         if (regpairs_aligned(cpu_env, num)) {
11798             /* offset is in (3,4), len in 5 and advice in 6 */
11799             arg2 = arg3;
11800             arg3 = arg4;
11801             arg4 = arg5;
11802             arg5 = arg6;
11803         }
11804         ret = posix_fadvise(arg1, target_offset64(arg2, arg3), arg4, arg5);
11805         return -host_to_target_errno(ret);
11806 #endif
11807 
11808 #else /* not a 32-bit ABI */
11809 #if defined(TARGET_NR_fadvise64_64) || defined(TARGET_NR_fadvise64)
11810 #ifdef TARGET_NR_fadvise64_64
11811     case TARGET_NR_fadvise64_64:
11812 #endif
11813 #ifdef TARGET_NR_fadvise64
11814     case TARGET_NR_fadvise64:
11815 #endif
11816 #ifdef TARGET_S390X
11817         switch (arg4) {
11818         case 4: arg4 = POSIX_FADV_NOREUSE + 1; break; /* make sure it's an invalid value */
11819         case 5: arg4 = POSIX_FADV_NOREUSE + 2; break; /* ditto */
11820         case 6: arg4 = POSIX_FADV_DONTNEED; break;
11821         case 7: arg4 = POSIX_FADV_NOREUSE; break;
11822         default: break;
11823         }
11824 #endif
11825         return -host_to_target_errno(posix_fadvise(arg1, arg2, arg3, arg4));
11826 #endif
11827 #endif /* end of 64-bit ABI fadvise handling */
11828 
11829 #ifdef TARGET_NR_madvise
11830     case TARGET_NR_madvise:
11831         /* A straight passthrough may not be safe because qemu sometimes
11832            turns private file-backed mappings into anonymous mappings.
11833            This will break MADV_DONTNEED.
11834            This is a hint, so ignoring and returning success is ok.  */
11835         return 0;
11836 #endif
11837 #ifdef TARGET_NR_fcntl64
11838     case TARGET_NR_fcntl64:
11839     {
11840         int cmd;
11841         struct flock64 fl;
11842         from_flock64_fn *copyfrom = copy_from_user_flock64;
11843         to_flock64_fn *copyto = copy_to_user_flock64;
11844 
11845 #ifdef TARGET_ARM
11846         if (!((CPUARMState *)cpu_env)->eabi) {
11847             copyfrom = copy_from_user_oabi_flock64;
11848             copyto = copy_to_user_oabi_flock64;
11849         }
11850 #endif
11851 
11852         cmd = target_to_host_fcntl_cmd(arg2);
11853         if (cmd == -TARGET_EINVAL) {
11854             return cmd;
11855         }
11856 
11857         switch(arg2) {
11858         case TARGET_F_GETLK64:
11859             ret = copyfrom(&fl, arg3);
11860             if (ret) {
11861                 break;
11862             }
11863             ret = get_errno(safe_fcntl(arg1, cmd, &fl));
11864             if (ret == 0) {
11865                 ret = copyto(arg3, &fl);
11866             }
11867 	    break;
11868 
11869         case TARGET_F_SETLK64:
11870         case TARGET_F_SETLKW64:
11871             ret = copyfrom(&fl, arg3);
11872             if (ret) {
11873                 break;
11874             }
11875             ret = get_errno(safe_fcntl(arg1, cmd, &fl));
11876 	    break;
11877         default:
11878             ret = do_fcntl(arg1, arg2, arg3);
11879             break;
11880         }
11881         return ret;
11882     }
11883 #endif
11884 #ifdef TARGET_NR_cacheflush
11885     case TARGET_NR_cacheflush:
11886         /* self-modifying code is handled automatically, so nothing needed */
11887         return 0;
11888 #endif
11889 #ifdef TARGET_NR_getpagesize
11890     case TARGET_NR_getpagesize:
11891         return TARGET_PAGE_SIZE;
11892 #endif
11893     case TARGET_NR_gettid:
11894         return get_errno(sys_gettid());
11895 #ifdef TARGET_NR_readahead
11896     case TARGET_NR_readahead:
11897 #if TARGET_ABI_BITS == 32
11898         if (regpairs_aligned(cpu_env, num)) {
11899             arg2 = arg3;
11900             arg3 = arg4;
11901             arg4 = arg5;
11902         }
11903         ret = get_errno(readahead(arg1, target_offset64(arg2, arg3) , arg4));
11904 #else
11905         ret = get_errno(readahead(arg1, arg2, arg3));
11906 #endif
11907         return ret;
11908 #endif
11909 #ifdef CONFIG_ATTR
11910 #ifdef TARGET_NR_setxattr
11911     case TARGET_NR_listxattr:
11912     case TARGET_NR_llistxattr:
11913     {
11914         void *p, *b = 0;
11915         if (arg2) {
11916             b = lock_user(VERIFY_WRITE, arg2, arg3, 0);
11917             if (!b) {
11918                 return -TARGET_EFAULT;
11919             }
11920         }
11921         p = lock_user_string(arg1);
11922         if (p) {
11923             if (num == TARGET_NR_listxattr) {
11924                 ret = get_errno(listxattr(p, b, arg3));
11925             } else {
11926                 ret = get_errno(llistxattr(p, b, arg3));
11927             }
11928         } else {
11929             ret = -TARGET_EFAULT;
11930         }
11931         unlock_user(p, arg1, 0);
11932         unlock_user(b, arg2, arg3);
11933         return ret;
11934     }
11935     case TARGET_NR_flistxattr:
11936     {
11937         void *b = 0;
11938         if (arg2) {
11939             b = lock_user(VERIFY_WRITE, arg2, arg3, 0);
11940             if (!b) {
11941                 return -TARGET_EFAULT;
11942             }
11943         }
11944         ret = get_errno(flistxattr(arg1, b, arg3));
11945         unlock_user(b, arg2, arg3);
11946         return ret;
11947     }
11948     case TARGET_NR_setxattr:
11949     case TARGET_NR_lsetxattr:
11950         {
11951             void *p, *n, *v = 0;
11952             if (arg3) {
11953                 v = lock_user(VERIFY_READ, arg3, arg4, 1);
11954                 if (!v) {
11955                     return -TARGET_EFAULT;
11956                 }
11957             }
11958             p = lock_user_string(arg1);
11959             n = lock_user_string(arg2);
11960             if (p && n) {
11961                 if (num == TARGET_NR_setxattr) {
11962                     ret = get_errno(setxattr(p, n, v, arg4, arg5));
11963                 } else {
11964                     ret = get_errno(lsetxattr(p, n, v, arg4, arg5));
11965                 }
11966             } else {
11967                 ret = -TARGET_EFAULT;
11968             }
11969             unlock_user(p, arg1, 0);
11970             unlock_user(n, arg2, 0);
11971             unlock_user(v, arg3, 0);
11972         }
11973         return ret;
11974     case TARGET_NR_fsetxattr:
11975         {
11976             void *n, *v = 0;
11977             if (arg3) {
11978                 v = lock_user(VERIFY_READ, arg3, arg4, 1);
11979                 if (!v) {
11980                     return -TARGET_EFAULT;
11981                 }
11982             }
11983             n = lock_user_string(arg2);
11984             if (n) {
11985                 ret = get_errno(fsetxattr(arg1, n, v, arg4, arg5));
11986             } else {
11987                 ret = -TARGET_EFAULT;
11988             }
11989             unlock_user(n, arg2, 0);
11990             unlock_user(v, arg3, 0);
11991         }
11992         return ret;
11993     case TARGET_NR_getxattr:
11994     case TARGET_NR_lgetxattr:
11995         {
11996             void *p, *n, *v = 0;
11997             if (arg3) {
11998                 v = lock_user(VERIFY_WRITE, arg3, arg4, 0);
11999                 if (!v) {
12000                     return -TARGET_EFAULT;
12001                 }
12002             }
12003             p = lock_user_string(arg1);
12004             n = lock_user_string(arg2);
12005             if (p && n) {
12006                 if (num == TARGET_NR_getxattr) {
12007                     ret = get_errno(getxattr(p, n, v, arg4));
12008                 } else {
12009                     ret = get_errno(lgetxattr(p, n, v, arg4));
12010                 }
12011             } else {
12012                 ret = -TARGET_EFAULT;
12013             }
12014             unlock_user(p, arg1, 0);
12015             unlock_user(n, arg2, 0);
12016             unlock_user(v, arg3, arg4);
12017         }
12018         return ret;
12019     case TARGET_NR_fgetxattr:
12020         {
12021             void *n, *v = 0;
12022             if (arg3) {
12023                 v = lock_user(VERIFY_WRITE, arg3, arg4, 0);
12024                 if (!v) {
12025                     return -TARGET_EFAULT;
12026                 }
12027             }
12028             n = lock_user_string(arg2);
12029             if (n) {
12030                 ret = get_errno(fgetxattr(arg1, n, v, arg4));
12031             } else {
12032                 ret = -TARGET_EFAULT;
12033             }
12034             unlock_user(n, arg2, 0);
12035             unlock_user(v, arg3, arg4);
12036         }
12037         return ret;
12038     case TARGET_NR_removexattr:
12039     case TARGET_NR_lremovexattr:
12040         {
12041             void *p, *n;
12042             p = lock_user_string(arg1);
12043             n = lock_user_string(arg2);
12044             if (p && n) {
12045                 if (num == TARGET_NR_removexattr) {
12046                     ret = get_errno(removexattr(p, n));
12047                 } else {
12048                     ret = get_errno(lremovexattr(p, n));
12049                 }
12050             } else {
12051                 ret = -TARGET_EFAULT;
12052             }
12053             unlock_user(p, arg1, 0);
12054             unlock_user(n, arg2, 0);
12055         }
12056         return ret;
12057     case TARGET_NR_fremovexattr:
12058         {
12059             void *n;
12060             n = lock_user_string(arg2);
12061             if (n) {
12062                 ret = get_errno(fremovexattr(arg1, n));
12063             } else {
12064                 ret = -TARGET_EFAULT;
12065             }
12066             unlock_user(n, arg2, 0);
12067         }
12068         return ret;
12069 #endif
12070 #endif /* CONFIG_ATTR */
12071 #ifdef TARGET_NR_set_thread_area
12072     case TARGET_NR_set_thread_area:
12073 #if defined(TARGET_MIPS)
12074       ((CPUMIPSState *) cpu_env)->active_tc.CP0_UserLocal = arg1;
12075       return 0;
12076 #elif defined(TARGET_CRIS)
12077       if (arg1 & 0xff)
12078           ret = -TARGET_EINVAL;
12079       else {
12080           ((CPUCRISState *) cpu_env)->pregs[PR_PID] = arg1;
12081           ret = 0;
12082       }
12083       return ret;
12084 #elif defined(TARGET_I386) && defined(TARGET_ABI32)
12085       return do_set_thread_area(cpu_env, arg1);
12086 #elif defined(TARGET_M68K)
12087       {
12088           TaskState *ts = cpu->opaque;
12089           ts->tp_value = arg1;
12090           return 0;
12091       }
12092 #else
12093       return -TARGET_ENOSYS;
12094 #endif
12095 #endif
12096 #ifdef TARGET_NR_get_thread_area
12097     case TARGET_NR_get_thread_area:
12098 #if defined(TARGET_I386) && defined(TARGET_ABI32)
12099         return do_get_thread_area(cpu_env, arg1);
12100 #elif defined(TARGET_M68K)
12101         {
12102             TaskState *ts = cpu->opaque;
12103             return ts->tp_value;
12104         }
12105 #else
12106         return -TARGET_ENOSYS;
12107 #endif
12108 #endif
12109 #ifdef TARGET_NR_getdomainname
12110     case TARGET_NR_getdomainname:
12111         return -TARGET_ENOSYS;
12112 #endif
12113 
12114 #ifdef TARGET_NR_clock_settime
12115     case TARGET_NR_clock_settime:
12116     {
12117         struct timespec ts;
12118 
12119         ret = target_to_host_timespec(&ts, arg2);
12120         if (!is_error(ret)) {
12121             ret = get_errno(clock_settime(arg1, &ts));
12122         }
12123         return ret;
12124     }
12125 #endif
12126 #ifdef TARGET_NR_clock_settime64
12127     case TARGET_NR_clock_settime64:
12128     {
12129         struct timespec ts;
12130 
12131         ret = target_to_host_timespec64(&ts, arg2);
12132         if (!is_error(ret)) {
12133             ret = get_errno(clock_settime(arg1, &ts));
12134         }
12135         return ret;
12136     }
12137 #endif
12138 #ifdef TARGET_NR_clock_gettime
12139     case TARGET_NR_clock_gettime:
12140     {
12141         struct timespec ts;
12142         ret = get_errno(clock_gettime(arg1, &ts));
12143         if (!is_error(ret)) {
12144             ret = host_to_target_timespec(arg2, &ts);
12145         }
12146         return ret;
12147     }
12148 #endif
12149 #ifdef TARGET_NR_clock_gettime64
12150     case TARGET_NR_clock_gettime64:
12151     {
12152         struct timespec ts;
12153         ret = get_errno(clock_gettime(arg1, &ts));
12154         if (!is_error(ret)) {
12155             ret = host_to_target_timespec64(arg2, &ts);
12156         }
12157         return ret;
12158     }
12159 #endif
12160 #ifdef TARGET_NR_clock_getres
12161     case TARGET_NR_clock_getres:
12162     {
12163         struct timespec ts;
12164         ret = get_errno(clock_getres(arg1, &ts));
12165         if (!is_error(ret)) {
12166             host_to_target_timespec(arg2, &ts);
12167         }
12168         return ret;
12169     }
12170 #endif
12171 #ifdef TARGET_NR_clock_getres_time64
12172     case TARGET_NR_clock_getres_time64:
12173     {
12174         struct timespec ts;
12175         ret = get_errno(clock_getres(arg1, &ts));
12176         if (!is_error(ret)) {
12177             host_to_target_timespec64(arg2, &ts);
12178         }
12179         return ret;
12180     }
12181 #endif
12182 #ifdef TARGET_NR_clock_nanosleep
12183     case TARGET_NR_clock_nanosleep:
12184     {
12185         struct timespec ts;
12186         if (target_to_host_timespec(&ts, arg3)) {
12187             return -TARGET_EFAULT;
12188         }
12189         ret = get_errno(safe_clock_nanosleep(arg1, arg2,
12190                                              &ts, arg4 ? &ts : NULL));
12191         /*
12192          * if the call is interrupted by a signal handler, it fails
12193          * with error -TARGET_EINTR and if arg4 is not NULL and arg2 is not
12194          * TIMER_ABSTIME, it returns the remaining unslept time in arg4.
12195          */
12196         if (ret == -TARGET_EINTR && arg4 && arg2 != TIMER_ABSTIME &&
12197             host_to_target_timespec(arg4, &ts)) {
12198               return -TARGET_EFAULT;
12199         }
12200 
12201         return ret;
12202     }
12203 #endif
12204 #ifdef TARGET_NR_clock_nanosleep_time64
12205     case TARGET_NR_clock_nanosleep_time64:
12206     {
12207         struct timespec ts;
12208 
12209         if (target_to_host_timespec64(&ts, arg3)) {
12210             return -TARGET_EFAULT;
12211         }
12212 
12213         ret = get_errno(safe_clock_nanosleep(arg1, arg2,
12214                                              &ts, arg4 ? &ts : NULL));
12215 
12216         if (ret == -TARGET_EINTR && arg4 && arg2 != TIMER_ABSTIME &&
12217             host_to_target_timespec64(arg4, &ts)) {
12218             return -TARGET_EFAULT;
12219         }
12220         return ret;
12221     }
12222 #endif
12223 
12224 #if defined(TARGET_NR_set_tid_address) && defined(__NR_set_tid_address)
12225     case TARGET_NR_set_tid_address:
12226         return get_errno(set_tid_address((int *)g2h(arg1)));
12227 #endif
12228 
12229     case TARGET_NR_tkill:
12230         return get_errno(safe_tkill((int)arg1, target_to_host_signal(arg2)));
12231 
12232     case TARGET_NR_tgkill:
12233         return get_errno(safe_tgkill((int)arg1, (int)arg2,
12234                          target_to_host_signal(arg3)));
12235 
12236 #ifdef TARGET_NR_set_robust_list
12237     case TARGET_NR_set_robust_list:
12238     case TARGET_NR_get_robust_list:
12239         /* The ABI for supporting robust futexes has userspace pass
12240          * the kernel a pointer to a linked list which is updated by
12241          * userspace after the syscall; the list is walked by the kernel
12242          * when the thread exits. Since the linked list in QEMU guest
12243          * memory isn't a valid linked list for the host and we have
12244          * no way to reliably intercept the thread-death event, we can't
12245          * support these. Silently return ENOSYS so that guest userspace
12246          * falls back to a non-robust futex implementation (which should
12247          * be OK except in the corner case of the guest crashing while
12248          * holding a mutex that is shared with another process via
12249          * shared memory).
12250          */
12251         return -TARGET_ENOSYS;
12252 #endif
12253 
12254 #if defined(TARGET_NR_utimensat)
12255     case TARGET_NR_utimensat:
12256         {
12257             struct timespec *tsp, ts[2];
12258             if (!arg3) {
12259                 tsp = NULL;
12260             } else {
12261                 if (target_to_host_timespec(ts, arg3)) {
12262                     return -TARGET_EFAULT;
12263                 }
12264                 if (target_to_host_timespec(ts + 1, arg3 +
12265                                             sizeof(struct target_timespec))) {
12266                     return -TARGET_EFAULT;
12267                 }
12268                 tsp = ts;
12269             }
12270             if (!arg2)
12271                 ret = get_errno(sys_utimensat(arg1, NULL, tsp, arg4));
12272             else {
12273                 if (!(p = lock_user_string(arg2))) {
12274                     return -TARGET_EFAULT;
12275                 }
12276                 ret = get_errno(sys_utimensat(arg1, path(p), tsp, arg4));
12277                 unlock_user(p, arg2, 0);
12278             }
12279         }
12280         return ret;
12281 #endif
12282 #ifdef TARGET_NR_utimensat_time64
12283     case TARGET_NR_utimensat_time64:
12284         {
12285             struct timespec *tsp, ts[2];
12286             if (!arg3) {
12287                 tsp = NULL;
12288             } else {
12289                 if (target_to_host_timespec64(ts, arg3)) {
12290                     return -TARGET_EFAULT;
12291                 }
12292                 if (target_to_host_timespec64(ts + 1, arg3 +
12293                                      sizeof(struct target__kernel_timespec))) {
12294                     return -TARGET_EFAULT;
12295                 }
12296                 tsp = ts;
12297             }
12298             if (!arg2)
12299                 ret = get_errno(sys_utimensat(arg1, NULL, tsp, arg4));
12300             else {
12301                 p = lock_user_string(arg2);
12302                 if (!p) {
12303                     return -TARGET_EFAULT;
12304                 }
12305                 ret = get_errno(sys_utimensat(arg1, path(p), tsp, arg4));
12306                 unlock_user(p, arg2, 0);
12307             }
12308         }
12309         return ret;
12310 #endif
12311 #ifdef TARGET_NR_futex
12312     case TARGET_NR_futex:
12313         return do_futex(arg1, arg2, arg3, arg4, arg5, arg6);
12314 #endif
12315 #ifdef TARGET_NR_futex_time64
12316     case TARGET_NR_futex_time64:
12317         return do_futex_time64(arg1, arg2, arg3, arg4, arg5, arg6);
12318 #endif
12319 #if defined(TARGET_NR_inotify_init) && defined(__NR_inotify_init)
12320     case TARGET_NR_inotify_init:
12321         ret = get_errno(sys_inotify_init());
12322         if (ret >= 0) {
12323             fd_trans_register(ret, &target_inotify_trans);
12324         }
12325         return ret;
12326 #endif
12327 #ifdef CONFIG_INOTIFY1
12328 #if defined(TARGET_NR_inotify_init1) && defined(__NR_inotify_init1)
12329     case TARGET_NR_inotify_init1:
12330         ret = get_errno(sys_inotify_init1(target_to_host_bitmask(arg1,
12331                                           fcntl_flags_tbl)));
12332         if (ret >= 0) {
12333             fd_trans_register(ret, &target_inotify_trans);
12334         }
12335         return ret;
12336 #endif
12337 #endif
12338 #if defined(TARGET_NR_inotify_add_watch) && defined(__NR_inotify_add_watch)
12339     case TARGET_NR_inotify_add_watch:
12340         p = lock_user_string(arg2);
12341         ret = get_errno(sys_inotify_add_watch(arg1, path(p), arg3));
12342         unlock_user(p, arg2, 0);
12343         return ret;
12344 #endif
12345 #if defined(TARGET_NR_inotify_rm_watch) && defined(__NR_inotify_rm_watch)
12346     case TARGET_NR_inotify_rm_watch:
12347         return get_errno(sys_inotify_rm_watch(arg1, arg2));
12348 #endif
12349 
12350 #if defined(TARGET_NR_mq_open) && defined(__NR_mq_open)
12351     case TARGET_NR_mq_open:
12352         {
12353             struct mq_attr posix_mq_attr;
12354             struct mq_attr *pposix_mq_attr;
12355             int host_flags;
12356 
12357             host_flags = target_to_host_bitmask(arg2, fcntl_flags_tbl);
12358             pposix_mq_attr = NULL;
12359             if (arg4) {
12360                 if (copy_from_user_mq_attr(&posix_mq_attr, arg4) != 0) {
12361                     return -TARGET_EFAULT;
12362                 }
12363                 pposix_mq_attr = &posix_mq_attr;
12364             }
12365             p = lock_user_string(arg1 - 1);
12366             if (!p) {
12367                 return -TARGET_EFAULT;
12368             }
12369             ret = get_errno(mq_open(p, host_flags, arg3, pposix_mq_attr));
12370             unlock_user (p, arg1, 0);
12371         }
12372         return ret;
12373 
12374     case TARGET_NR_mq_unlink:
12375         p = lock_user_string(arg1 - 1);
12376         if (!p) {
12377             return -TARGET_EFAULT;
12378         }
12379         ret = get_errno(mq_unlink(p));
12380         unlock_user (p, arg1, 0);
12381         return ret;
12382 
12383 #ifdef TARGET_NR_mq_timedsend
12384     case TARGET_NR_mq_timedsend:
12385         {
12386             struct timespec ts;
12387 
12388             p = lock_user (VERIFY_READ, arg2, arg3, 1);
12389             if (arg5 != 0) {
12390                 if (target_to_host_timespec(&ts, arg5)) {
12391                     return -TARGET_EFAULT;
12392                 }
12393                 ret = get_errno(safe_mq_timedsend(arg1, p, arg3, arg4, &ts));
12394                 if (!is_error(ret) && host_to_target_timespec(arg5, &ts)) {
12395                     return -TARGET_EFAULT;
12396                 }
12397             } else {
12398                 ret = get_errno(safe_mq_timedsend(arg1, p, arg3, arg4, NULL));
12399             }
12400             unlock_user (p, arg2, arg3);
12401         }
12402         return ret;
12403 #endif
12404 #ifdef TARGET_NR_mq_timedsend_time64
12405     case TARGET_NR_mq_timedsend_time64:
12406         {
12407             struct timespec ts;
12408 
12409             p = lock_user(VERIFY_READ, arg2, arg3, 1);
12410             if (arg5 != 0) {
12411                 if (target_to_host_timespec64(&ts, arg5)) {
12412                     return -TARGET_EFAULT;
12413                 }
12414                 ret = get_errno(safe_mq_timedsend(arg1, p, arg3, arg4, &ts));
12415                 if (!is_error(ret) && host_to_target_timespec64(arg5, &ts)) {
12416                     return -TARGET_EFAULT;
12417                 }
12418             } else {
12419                 ret = get_errno(safe_mq_timedsend(arg1, p, arg3, arg4, NULL));
12420             }
12421             unlock_user(p, arg2, arg3);
12422         }
12423         return ret;
12424 #endif
12425 
12426 #ifdef TARGET_NR_mq_timedreceive
12427     case TARGET_NR_mq_timedreceive:
12428         {
12429             struct timespec ts;
12430             unsigned int prio;
12431 
12432             p = lock_user (VERIFY_READ, arg2, arg3, 1);
12433             if (arg5 != 0) {
12434                 if (target_to_host_timespec(&ts, arg5)) {
12435                     return -TARGET_EFAULT;
12436                 }
12437                 ret = get_errno(safe_mq_timedreceive(arg1, p, arg3,
12438                                                      &prio, &ts));
12439                 if (!is_error(ret) && host_to_target_timespec(arg5, &ts)) {
12440                     return -TARGET_EFAULT;
12441                 }
12442             } else {
12443                 ret = get_errno(safe_mq_timedreceive(arg1, p, arg3,
12444                                                      &prio, NULL));
12445             }
12446             unlock_user (p, arg2, arg3);
12447             if (arg4 != 0)
12448                 put_user_u32(prio, arg4);
12449         }
12450         return ret;
12451 #endif
12452 #ifdef TARGET_NR_mq_timedreceive_time64
12453     case TARGET_NR_mq_timedreceive_time64:
12454         {
12455             struct timespec ts;
12456             unsigned int prio;
12457 
12458             p = lock_user(VERIFY_READ, arg2, arg3, 1);
12459             if (arg5 != 0) {
12460                 if (target_to_host_timespec64(&ts, arg5)) {
12461                     return -TARGET_EFAULT;
12462                 }
12463                 ret = get_errno(safe_mq_timedreceive(arg1, p, arg3,
12464                                                      &prio, &ts));
12465                 if (!is_error(ret) && host_to_target_timespec64(arg5, &ts)) {
12466                     return -TARGET_EFAULT;
12467                 }
12468             } else {
12469                 ret = get_errno(safe_mq_timedreceive(arg1, p, arg3,
12470                                                      &prio, NULL));
12471             }
12472             unlock_user(p, arg2, arg3);
12473             if (arg4 != 0) {
12474                 put_user_u32(prio, arg4);
12475             }
12476         }
12477         return ret;
12478 #endif
12479 
12480     /* Not implemented for now... */
12481 /*     case TARGET_NR_mq_notify: */
12482 /*         break; */
12483 
12484     case TARGET_NR_mq_getsetattr:
12485         {
12486             struct mq_attr posix_mq_attr_in, posix_mq_attr_out;
12487             ret = 0;
12488             if (arg2 != 0) {
12489                 copy_from_user_mq_attr(&posix_mq_attr_in, arg2);
12490                 ret = get_errno(mq_setattr(arg1, &posix_mq_attr_in,
12491                                            &posix_mq_attr_out));
12492             } else if (arg3 != 0) {
12493                 ret = get_errno(mq_getattr(arg1, &posix_mq_attr_out));
12494             }
12495             if (ret == 0 && arg3 != 0) {
12496                 copy_to_user_mq_attr(arg3, &posix_mq_attr_out);
12497             }
12498         }
12499         return ret;
12500 #endif
12501 
12502 #ifdef CONFIG_SPLICE
12503 #ifdef TARGET_NR_tee
12504     case TARGET_NR_tee:
12505         {
12506             ret = get_errno(tee(arg1,arg2,arg3,arg4));
12507         }
12508         return ret;
12509 #endif
12510 #ifdef TARGET_NR_splice
12511     case TARGET_NR_splice:
12512         {
12513             loff_t loff_in, loff_out;
12514             loff_t *ploff_in = NULL, *ploff_out = NULL;
12515             if (arg2) {
12516                 if (get_user_u64(loff_in, arg2)) {
12517                     return -TARGET_EFAULT;
12518                 }
12519                 ploff_in = &loff_in;
12520             }
12521             if (arg4) {
12522                 if (get_user_u64(loff_out, arg4)) {
12523                     return -TARGET_EFAULT;
12524                 }
12525                 ploff_out = &loff_out;
12526             }
12527             ret = get_errno(splice(arg1, ploff_in, arg3, ploff_out, arg5, arg6));
12528             if (arg2) {
12529                 if (put_user_u64(loff_in, arg2)) {
12530                     return -TARGET_EFAULT;
12531                 }
12532             }
12533             if (arg4) {
12534                 if (put_user_u64(loff_out, arg4)) {
12535                     return -TARGET_EFAULT;
12536                 }
12537             }
12538         }
12539         return ret;
12540 #endif
12541 #ifdef TARGET_NR_vmsplice
12542 	case TARGET_NR_vmsplice:
12543         {
12544             struct iovec *vec = lock_iovec(VERIFY_READ, arg2, arg3, 1);
12545             if (vec != NULL) {
12546                 ret = get_errno(vmsplice(arg1, vec, arg3, arg4));
12547                 unlock_iovec(vec, arg2, arg3, 0);
12548             } else {
12549                 ret = -host_to_target_errno(errno);
12550             }
12551         }
12552         return ret;
12553 #endif
12554 #endif /* CONFIG_SPLICE */
12555 #ifdef CONFIG_EVENTFD
12556 #if defined(TARGET_NR_eventfd)
12557     case TARGET_NR_eventfd:
12558         ret = get_errno(eventfd(arg1, 0));
12559         if (ret >= 0) {
12560             fd_trans_register(ret, &target_eventfd_trans);
12561         }
12562         return ret;
12563 #endif
12564 #if defined(TARGET_NR_eventfd2)
12565     case TARGET_NR_eventfd2:
12566     {
12567         int host_flags = arg2 & (~(TARGET_O_NONBLOCK | TARGET_O_CLOEXEC));
12568         if (arg2 & TARGET_O_NONBLOCK) {
12569             host_flags |= O_NONBLOCK;
12570         }
12571         if (arg2 & TARGET_O_CLOEXEC) {
12572             host_flags |= O_CLOEXEC;
12573         }
12574         ret = get_errno(eventfd(arg1, host_flags));
12575         if (ret >= 0) {
12576             fd_trans_register(ret, &target_eventfd_trans);
12577         }
12578         return ret;
12579     }
12580 #endif
12581 #endif /* CONFIG_EVENTFD  */
12582 #if defined(CONFIG_FALLOCATE) && defined(TARGET_NR_fallocate)
12583     case TARGET_NR_fallocate:
12584 #if TARGET_ABI_BITS == 32
12585         ret = get_errno(fallocate(arg1, arg2, target_offset64(arg3, arg4),
12586                                   target_offset64(arg5, arg6)));
12587 #else
12588         ret = get_errno(fallocate(arg1, arg2, arg3, arg4));
12589 #endif
12590         return ret;
12591 #endif
12592 #if defined(CONFIG_SYNC_FILE_RANGE)
12593 #if defined(TARGET_NR_sync_file_range)
12594     case TARGET_NR_sync_file_range:
12595 #if TARGET_ABI_BITS == 32
12596 #if defined(TARGET_MIPS)
12597         ret = get_errno(sync_file_range(arg1, target_offset64(arg3, arg4),
12598                                         target_offset64(arg5, arg6), arg7));
12599 #else
12600         ret = get_errno(sync_file_range(arg1, target_offset64(arg2, arg3),
12601                                         target_offset64(arg4, arg5), arg6));
12602 #endif /* !TARGET_MIPS */
12603 #else
12604         ret = get_errno(sync_file_range(arg1, arg2, arg3, arg4));
12605 #endif
12606         return ret;
12607 #endif
12608 #if defined(TARGET_NR_sync_file_range2) || \
12609     defined(TARGET_NR_arm_sync_file_range)
12610 #if defined(TARGET_NR_sync_file_range2)
12611     case TARGET_NR_sync_file_range2:
12612 #endif
12613 #if defined(TARGET_NR_arm_sync_file_range)
12614     case TARGET_NR_arm_sync_file_range:
12615 #endif
12616         /* This is like sync_file_range but the arguments are reordered */
12617 #if TARGET_ABI_BITS == 32
12618         ret = get_errno(sync_file_range(arg1, target_offset64(arg3, arg4),
12619                                         target_offset64(arg5, arg6), arg2));
12620 #else
12621         ret = get_errno(sync_file_range(arg1, arg3, arg4, arg2));
12622 #endif
12623         return ret;
12624 #endif
12625 #endif
12626 #if defined(TARGET_NR_signalfd4)
12627     case TARGET_NR_signalfd4:
12628         return do_signalfd4(arg1, arg2, arg4);
12629 #endif
12630 #if defined(TARGET_NR_signalfd)
12631     case TARGET_NR_signalfd:
12632         return do_signalfd4(arg1, arg2, 0);
12633 #endif
12634 #if defined(CONFIG_EPOLL)
12635 #if defined(TARGET_NR_epoll_create)
12636     case TARGET_NR_epoll_create:
12637         return get_errno(epoll_create(arg1));
12638 #endif
12639 #if defined(TARGET_NR_epoll_create1) && defined(CONFIG_EPOLL_CREATE1)
12640     case TARGET_NR_epoll_create1:
12641         return get_errno(epoll_create1(target_to_host_bitmask(arg1, fcntl_flags_tbl)));
12642 #endif
12643 #if defined(TARGET_NR_epoll_ctl)
12644     case TARGET_NR_epoll_ctl:
12645     {
12646         struct epoll_event ep;
12647         struct epoll_event *epp = 0;
12648         if (arg4) {
12649             if (arg2 != EPOLL_CTL_DEL) {
12650                 struct target_epoll_event *target_ep;
12651                 if (!lock_user_struct(VERIFY_READ, target_ep, arg4, 1)) {
12652                     return -TARGET_EFAULT;
12653                 }
12654                 ep.events = tswap32(target_ep->events);
12655                 /*
12656                  * The epoll_data_t union is just opaque data to the kernel,
12657                  * so we transfer all 64 bits across and need not worry what
12658                  * actual data type it is.
12659                  */
12660                 ep.data.u64 = tswap64(target_ep->data.u64);
12661                 unlock_user_struct(target_ep, arg4, 0);
12662             }
12663             /*
12664              * before kernel 2.6.9, EPOLL_CTL_DEL operation required a
12665              * non-null pointer, even though this argument is ignored.
12666              *
12667              */
12668             epp = &ep;
12669         }
12670         return get_errno(epoll_ctl(arg1, arg2, arg3, epp));
12671     }
12672 #endif
12673 
12674 #if defined(TARGET_NR_epoll_wait) || defined(TARGET_NR_epoll_pwait)
12675 #if defined(TARGET_NR_epoll_wait)
12676     case TARGET_NR_epoll_wait:
12677 #endif
12678 #if defined(TARGET_NR_epoll_pwait)
12679     case TARGET_NR_epoll_pwait:
12680 #endif
12681     {
12682         struct target_epoll_event *target_ep;
12683         struct epoll_event *ep;
12684         int epfd = arg1;
12685         int maxevents = arg3;
12686         int timeout = arg4;
12687 
12688         if (maxevents <= 0 || maxevents > TARGET_EP_MAX_EVENTS) {
12689             return -TARGET_EINVAL;
12690         }
12691 
12692         target_ep = lock_user(VERIFY_WRITE, arg2,
12693                               maxevents * sizeof(struct target_epoll_event), 1);
12694         if (!target_ep) {
12695             return -TARGET_EFAULT;
12696         }
12697 
12698         ep = g_try_new(struct epoll_event, maxevents);
12699         if (!ep) {
12700             unlock_user(target_ep, arg2, 0);
12701             return -TARGET_ENOMEM;
12702         }
12703 
12704         switch (num) {
12705 #if defined(TARGET_NR_epoll_pwait)
12706         case TARGET_NR_epoll_pwait:
12707         {
12708             target_sigset_t *target_set;
12709             sigset_t _set, *set = &_set;
12710 
12711             if (arg5) {
12712                 if (arg6 != sizeof(target_sigset_t)) {
12713                     ret = -TARGET_EINVAL;
12714                     break;
12715                 }
12716 
12717                 target_set = lock_user(VERIFY_READ, arg5,
12718                                        sizeof(target_sigset_t), 1);
12719                 if (!target_set) {
12720                     ret = -TARGET_EFAULT;
12721                     break;
12722                 }
12723                 target_to_host_sigset(set, target_set);
12724                 unlock_user(target_set, arg5, 0);
12725             } else {
12726                 set = NULL;
12727             }
12728 
12729             ret = get_errno(safe_epoll_pwait(epfd, ep, maxevents, timeout,
12730                                              set, SIGSET_T_SIZE));
12731             break;
12732         }
12733 #endif
12734 #if defined(TARGET_NR_epoll_wait)
12735         case TARGET_NR_epoll_wait:
12736             ret = get_errno(safe_epoll_pwait(epfd, ep, maxevents, timeout,
12737                                              NULL, 0));
12738             break;
12739 #endif
12740         default:
12741             ret = -TARGET_ENOSYS;
12742         }
12743         if (!is_error(ret)) {
12744             int i;
12745             for (i = 0; i < ret; i++) {
12746                 target_ep[i].events = tswap32(ep[i].events);
12747                 target_ep[i].data.u64 = tswap64(ep[i].data.u64);
12748             }
12749             unlock_user(target_ep, arg2,
12750                         ret * sizeof(struct target_epoll_event));
12751         } else {
12752             unlock_user(target_ep, arg2, 0);
12753         }
12754         g_free(ep);
12755         return ret;
12756     }
12757 #endif
12758 #endif
12759 #ifdef TARGET_NR_prlimit64
12760     case TARGET_NR_prlimit64:
12761     {
12762         /* args: pid, resource number, ptr to new rlimit, ptr to old rlimit */
12763         struct target_rlimit64 *target_rnew, *target_rold;
12764         struct host_rlimit64 rnew, rold, *rnewp = 0;
12765         int resource = target_to_host_resource(arg2);
12766 
12767         if (arg3 && (resource != RLIMIT_AS &&
12768                      resource != RLIMIT_DATA &&
12769                      resource != RLIMIT_STACK)) {
12770             if (!lock_user_struct(VERIFY_READ, target_rnew, arg3, 1)) {
12771                 return -TARGET_EFAULT;
12772             }
12773             rnew.rlim_cur = tswap64(target_rnew->rlim_cur);
12774             rnew.rlim_max = tswap64(target_rnew->rlim_max);
12775             unlock_user_struct(target_rnew, arg3, 0);
12776             rnewp = &rnew;
12777         }
12778 
12779         ret = get_errno(sys_prlimit64(arg1, resource, rnewp, arg4 ? &rold : 0));
12780         if (!is_error(ret) && arg4) {
12781             if (!lock_user_struct(VERIFY_WRITE, target_rold, arg4, 1)) {
12782                 return -TARGET_EFAULT;
12783             }
12784             target_rold->rlim_cur = tswap64(rold.rlim_cur);
12785             target_rold->rlim_max = tswap64(rold.rlim_max);
12786             unlock_user_struct(target_rold, arg4, 1);
12787         }
12788         return ret;
12789     }
12790 #endif
12791 #ifdef TARGET_NR_gethostname
12792     case TARGET_NR_gethostname:
12793     {
12794         char *name = lock_user(VERIFY_WRITE, arg1, arg2, 0);
12795         if (name) {
12796             ret = get_errno(gethostname(name, arg2));
12797             unlock_user(name, arg1, arg2);
12798         } else {
12799             ret = -TARGET_EFAULT;
12800         }
12801         return ret;
12802     }
12803 #endif
12804 #ifdef TARGET_NR_atomic_cmpxchg_32
12805     case TARGET_NR_atomic_cmpxchg_32:
12806     {
12807         /* should use start_exclusive from main.c */
12808         abi_ulong mem_value;
12809         if (get_user_u32(mem_value, arg6)) {
12810             target_siginfo_t info;
12811             info.si_signo = SIGSEGV;
12812             info.si_errno = 0;
12813             info.si_code = TARGET_SEGV_MAPERR;
12814             info._sifields._sigfault._addr = arg6;
12815             queue_signal((CPUArchState *)cpu_env, info.si_signo,
12816                          QEMU_SI_FAULT, &info);
12817             ret = 0xdeadbeef;
12818 
12819         }
12820         if (mem_value == arg2)
12821             put_user_u32(arg1, arg6);
12822         return mem_value;
12823     }
12824 #endif
12825 #ifdef TARGET_NR_atomic_barrier
12826     case TARGET_NR_atomic_barrier:
12827         /* Like the kernel implementation and the
12828            qemu arm barrier, no-op this? */
12829         return 0;
12830 #endif
12831 
12832 #ifdef TARGET_NR_timer_create
12833     case TARGET_NR_timer_create:
12834     {
12835         /* args: clockid_t clockid, struct sigevent *sevp, timer_t *timerid */
12836 
12837         struct sigevent host_sevp = { {0}, }, *phost_sevp = NULL;
12838 
12839         int clkid = arg1;
12840         int timer_index = next_free_host_timer();
12841 
12842         if (timer_index < 0) {
12843             ret = -TARGET_EAGAIN;
12844         } else {
12845             timer_t *phtimer = g_posix_timers  + timer_index;
12846 
12847             if (arg2) {
12848                 phost_sevp = &host_sevp;
12849                 ret = target_to_host_sigevent(phost_sevp, arg2);
12850                 if (ret != 0) {
12851                     return ret;
12852                 }
12853             }
12854 
12855             ret = get_errno(timer_create(clkid, phost_sevp, phtimer));
12856             if (ret) {
12857                 phtimer = NULL;
12858             } else {
12859                 if (put_user(TIMER_MAGIC | timer_index, arg3, target_timer_t)) {
12860                     return -TARGET_EFAULT;
12861                 }
12862             }
12863         }
12864         return ret;
12865     }
12866 #endif
12867 
12868 #ifdef TARGET_NR_timer_settime
12869     case TARGET_NR_timer_settime:
12870     {
12871         /* args: timer_t timerid, int flags, const struct itimerspec *new_value,
12872          * struct itimerspec * old_value */
12873         target_timer_t timerid = get_timer_id(arg1);
12874 
12875         if (timerid < 0) {
12876             ret = timerid;
12877         } else if (arg3 == 0) {
12878             ret = -TARGET_EINVAL;
12879         } else {
12880             timer_t htimer = g_posix_timers[timerid];
12881             struct itimerspec hspec_new = {{0},}, hspec_old = {{0},};
12882 
12883             if (target_to_host_itimerspec(&hspec_new, arg3)) {
12884                 return -TARGET_EFAULT;
12885             }
12886             ret = get_errno(
12887                           timer_settime(htimer, arg2, &hspec_new, &hspec_old));
12888             if (arg4 && host_to_target_itimerspec(arg4, &hspec_old)) {
12889                 return -TARGET_EFAULT;
12890             }
12891         }
12892         return ret;
12893     }
12894 #endif
12895 
12896 #ifdef TARGET_NR_timer_settime64
12897     case TARGET_NR_timer_settime64:
12898     {
12899         target_timer_t timerid = get_timer_id(arg1);
12900 
12901         if (timerid < 0) {
12902             ret = timerid;
12903         } else if (arg3 == 0) {
12904             ret = -TARGET_EINVAL;
12905         } else {
12906             timer_t htimer = g_posix_timers[timerid];
12907             struct itimerspec hspec_new = {{0},}, hspec_old = {{0},};
12908 
12909             if (target_to_host_itimerspec64(&hspec_new, arg3)) {
12910                 return -TARGET_EFAULT;
12911             }
12912             ret = get_errno(
12913                           timer_settime(htimer, arg2, &hspec_new, &hspec_old));
12914             if (arg4 && host_to_target_itimerspec64(arg4, &hspec_old)) {
12915                 return -TARGET_EFAULT;
12916             }
12917         }
12918         return ret;
12919     }
12920 #endif
12921 
12922 #ifdef TARGET_NR_timer_gettime
12923     case TARGET_NR_timer_gettime:
12924     {
12925         /* args: timer_t timerid, struct itimerspec *curr_value */
12926         target_timer_t timerid = get_timer_id(arg1);
12927 
12928         if (timerid < 0) {
12929             ret = timerid;
12930         } else if (!arg2) {
12931             ret = -TARGET_EFAULT;
12932         } else {
12933             timer_t htimer = g_posix_timers[timerid];
12934             struct itimerspec hspec;
12935             ret = get_errno(timer_gettime(htimer, &hspec));
12936 
12937             if (host_to_target_itimerspec(arg2, &hspec)) {
12938                 ret = -TARGET_EFAULT;
12939             }
12940         }
12941         return ret;
12942     }
12943 #endif
12944 
12945 #ifdef TARGET_NR_timer_gettime64
12946     case TARGET_NR_timer_gettime64:
12947     {
12948         /* args: timer_t timerid, struct itimerspec64 *curr_value */
12949         target_timer_t timerid = get_timer_id(arg1);
12950 
12951         if (timerid < 0) {
12952             ret = timerid;
12953         } else if (!arg2) {
12954             ret = -TARGET_EFAULT;
12955         } else {
12956             timer_t htimer = g_posix_timers[timerid];
12957             struct itimerspec hspec;
12958             ret = get_errno(timer_gettime(htimer, &hspec));
12959 
12960             if (host_to_target_itimerspec64(arg2, &hspec)) {
12961                 ret = -TARGET_EFAULT;
12962             }
12963         }
12964         return ret;
12965     }
12966 #endif
12967 
12968 #ifdef TARGET_NR_timer_getoverrun
12969     case TARGET_NR_timer_getoverrun:
12970     {
12971         /* args: timer_t timerid */
12972         target_timer_t timerid = get_timer_id(arg1);
12973 
12974         if (timerid < 0) {
12975             ret = timerid;
12976         } else {
12977             timer_t htimer = g_posix_timers[timerid];
12978             ret = get_errno(timer_getoverrun(htimer));
12979         }
12980         return ret;
12981     }
12982 #endif
12983 
12984 #ifdef TARGET_NR_timer_delete
12985     case TARGET_NR_timer_delete:
12986     {
12987         /* args: timer_t timerid */
12988         target_timer_t timerid = get_timer_id(arg1);
12989 
12990         if (timerid < 0) {
12991             ret = timerid;
12992         } else {
12993             timer_t htimer = g_posix_timers[timerid];
12994             ret = get_errno(timer_delete(htimer));
12995             g_posix_timers[timerid] = 0;
12996         }
12997         return ret;
12998     }
12999 #endif
13000 
13001 #if defined(TARGET_NR_timerfd_create) && defined(CONFIG_TIMERFD)
13002     case TARGET_NR_timerfd_create:
13003         return get_errno(timerfd_create(arg1,
13004                           target_to_host_bitmask(arg2, fcntl_flags_tbl)));
13005 #endif
13006 
13007 #if defined(TARGET_NR_timerfd_gettime) && defined(CONFIG_TIMERFD)
13008     case TARGET_NR_timerfd_gettime:
13009         {
13010             struct itimerspec its_curr;
13011 
13012             ret = get_errno(timerfd_gettime(arg1, &its_curr));
13013 
13014             if (arg2 && host_to_target_itimerspec(arg2, &its_curr)) {
13015                 return -TARGET_EFAULT;
13016             }
13017         }
13018         return ret;
13019 #endif
13020 
13021 #if defined(TARGET_NR_timerfd_gettime64) && defined(CONFIG_TIMERFD)
13022     case TARGET_NR_timerfd_gettime64:
13023         {
13024             struct itimerspec its_curr;
13025 
13026             ret = get_errno(timerfd_gettime(arg1, &its_curr));
13027 
13028             if (arg2 && host_to_target_itimerspec64(arg2, &its_curr)) {
13029                 return -TARGET_EFAULT;
13030             }
13031         }
13032         return ret;
13033 #endif
13034 
13035 #if defined(TARGET_NR_timerfd_settime) && defined(CONFIG_TIMERFD)
13036     case TARGET_NR_timerfd_settime:
13037         {
13038             struct itimerspec its_new, its_old, *p_new;
13039 
13040             if (arg3) {
13041                 if (target_to_host_itimerspec(&its_new, arg3)) {
13042                     return -TARGET_EFAULT;
13043                 }
13044                 p_new = &its_new;
13045             } else {
13046                 p_new = NULL;
13047             }
13048 
13049             ret = get_errno(timerfd_settime(arg1, arg2, p_new, &its_old));
13050 
13051             if (arg4 && host_to_target_itimerspec(arg4, &its_old)) {
13052                 return -TARGET_EFAULT;
13053             }
13054         }
13055         return ret;
13056 #endif
13057 
13058 #if defined(TARGET_NR_timerfd_settime64) && defined(CONFIG_TIMERFD)
13059     case TARGET_NR_timerfd_settime64:
13060         {
13061             struct itimerspec its_new, its_old, *p_new;
13062 
13063             if (arg3) {
13064                 if (target_to_host_itimerspec64(&its_new, arg3)) {
13065                     return -TARGET_EFAULT;
13066                 }
13067                 p_new = &its_new;
13068             } else {
13069                 p_new = NULL;
13070             }
13071 
13072             ret = get_errno(timerfd_settime(arg1, arg2, p_new, &its_old));
13073 
13074             if (arg4 && host_to_target_itimerspec64(arg4, &its_old)) {
13075                 return -TARGET_EFAULT;
13076             }
13077         }
13078         return ret;
13079 #endif
13080 
13081 #if defined(TARGET_NR_ioprio_get) && defined(__NR_ioprio_get)
13082     case TARGET_NR_ioprio_get:
13083         return get_errno(ioprio_get(arg1, arg2));
13084 #endif
13085 
13086 #if defined(TARGET_NR_ioprio_set) && defined(__NR_ioprio_set)
13087     case TARGET_NR_ioprio_set:
13088         return get_errno(ioprio_set(arg1, arg2, arg3));
13089 #endif
13090 
13091 #if defined(TARGET_NR_setns) && defined(CONFIG_SETNS)
13092     case TARGET_NR_setns:
13093         return get_errno(setns(arg1, arg2));
13094 #endif
13095 #if defined(TARGET_NR_unshare) && defined(CONFIG_SETNS)
13096     case TARGET_NR_unshare:
13097         return get_errno(unshare(arg1));
13098 #endif
13099 #if defined(TARGET_NR_kcmp) && defined(__NR_kcmp)
13100     case TARGET_NR_kcmp:
13101         return get_errno(kcmp(arg1, arg2, arg3, arg4, arg5));
13102 #endif
13103 #ifdef TARGET_NR_swapcontext
13104     case TARGET_NR_swapcontext:
13105         /* PowerPC specific.  */
13106         return do_swapcontext(cpu_env, arg1, arg2, arg3);
13107 #endif
13108 #ifdef TARGET_NR_memfd_create
13109     case TARGET_NR_memfd_create:
13110         p = lock_user_string(arg1);
13111         if (!p) {
13112             return -TARGET_EFAULT;
13113         }
13114         ret = get_errno(memfd_create(p, arg2));
13115         fd_trans_unregister(ret);
13116         unlock_user(p, arg1, 0);
13117         return ret;
13118 #endif
13119 #if defined TARGET_NR_membarrier && defined __NR_membarrier
13120     case TARGET_NR_membarrier:
13121         return get_errno(membarrier(arg1, arg2));
13122 #endif
13123 
13124 #if defined(TARGET_NR_copy_file_range) && defined(__NR_copy_file_range)
13125     case TARGET_NR_copy_file_range:
13126         {
13127             loff_t inoff, outoff;
13128             loff_t *pinoff = NULL, *poutoff = NULL;
13129 
13130             if (arg2) {
13131                 if (get_user_u64(inoff, arg2)) {
13132                     return -TARGET_EFAULT;
13133                 }
13134                 pinoff = &inoff;
13135             }
13136             if (arg4) {
13137                 if (get_user_u64(outoff, arg4)) {
13138                     return -TARGET_EFAULT;
13139                 }
13140                 poutoff = &outoff;
13141             }
13142             ret = get_errno(safe_copy_file_range(arg1, pinoff, arg3, poutoff,
13143                                                  arg5, arg6));
13144             if (!is_error(ret) && ret > 0) {
13145                 if (arg2) {
13146                     if (put_user_u64(inoff, arg2)) {
13147                         return -TARGET_EFAULT;
13148                     }
13149                 }
13150                 if (arg4) {
13151                     if (put_user_u64(outoff, arg4)) {
13152                         return -TARGET_EFAULT;
13153                     }
13154                 }
13155             }
13156         }
13157         return ret;
13158 #endif
13159 
13160     default:
13161         qemu_log_mask(LOG_UNIMP, "Unsupported syscall: %d\n", num);
13162         return -TARGET_ENOSYS;
13163     }
13164     return ret;
13165 }
13166 
13167 abi_long do_syscall(void *cpu_env, int num, abi_long arg1,
13168                     abi_long arg2, abi_long arg3, abi_long arg4,
13169                     abi_long arg5, abi_long arg6, abi_long arg7,
13170                     abi_long arg8)
13171 {
13172     CPUState *cpu = env_cpu(cpu_env);
13173     abi_long ret;
13174 
13175 #ifdef DEBUG_ERESTARTSYS
13176     /* Debug-only code for exercising the syscall-restart code paths
13177      * in the per-architecture cpu main loops: restart every syscall
13178      * the guest makes once before letting it through.
13179      */
13180     {
13181         static bool flag;
13182         flag = !flag;
13183         if (flag) {
13184             return -TARGET_ERESTARTSYS;
13185         }
13186     }
13187 #endif
13188 
13189     record_syscall_start(cpu, num, arg1,
13190                          arg2, arg3, arg4, arg5, arg6, arg7, arg8);
13191 
13192     if (unlikely(qemu_loglevel_mask(LOG_STRACE))) {
13193         print_syscall(cpu_env, num, arg1, arg2, arg3, arg4, arg5, arg6);
13194     }
13195 
13196     ret = do_syscall1(cpu_env, num, arg1, arg2, arg3, arg4,
13197                       arg5, arg6, arg7, arg8);
13198 
13199     if (unlikely(qemu_loglevel_mask(LOG_STRACE))) {
13200         print_syscall_ret(cpu_env, num, ret, arg1, arg2,
13201                           arg3, arg4, arg5, arg6);
13202     }
13203 
13204     record_syscall_return(cpu, num, ret);
13205     return ret;
13206 }
13207