xref: /openbmc/qemu/linux-user/syscall.c (revision c27c1cc3)
1 /*
2  *  Linux syscalls
3  *
4  *  Copyright (c) 2003 Fabrice Bellard
5  *
6  *  This program is free software; you can redistribute it and/or modify
7  *  it under the terms of the GNU General Public License as published by
8  *  the Free Software Foundation; either version 2 of the License, or
9  *  (at your option) any later version.
10  *
11  *  This program is distributed in the hope that it will be useful,
12  *  but WITHOUT ANY WARRANTY; without even the implied warranty of
13  *  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
14  *  GNU General Public License for more details.
15  *
16  *  You should have received a copy of the GNU General Public License
17  *  along with this program; if not, see <http://www.gnu.org/licenses/>.
18  */
19 #define _ATFILE_SOURCE
20 #include "qemu/osdep.h"
21 #include "qemu/cutils.h"
22 #include "qemu/path.h"
23 #include "qemu/memfd.h"
24 #include "qemu/queue.h"
25 #include <elf.h>
26 #include <endian.h>
27 #include <grp.h>
28 #include <sys/ipc.h>
29 #include <sys/msg.h>
30 #include <sys/wait.h>
31 #include <sys/mount.h>
32 #include <sys/file.h>
33 #include <sys/fsuid.h>
34 #include <sys/personality.h>
35 #include <sys/prctl.h>
36 #include <sys/resource.h>
37 #include <sys/swap.h>
38 #include <linux/capability.h>
39 #include <sched.h>
40 #include <sys/timex.h>
41 #include <sys/socket.h>
42 #include <linux/sockios.h>
43 #include <sys/un.h>
44 #include <sys/uio.h>
45 #include <poll.h>
46 #include <sys/times.h>
47 #include <sys/shm.h>
48 #include <sys/sem.h>
49 #include <sys/statfs.h>
50 #include <utime.h>
51 #include <sys/sysinfo.h>
52 #include <sys/signalfd.h>
53 //#include <sys/user.h>
54 #include <netinet/ip.h>
55 #include <netinet/tcp.h>
56 #include <linux/wireless.h>
57 #include <linux/icmp.h>
58 #include <linux/icmpv6.h>
59 #include <linux/errqueue.h>
60 #include <linux/random.h>
61 #ifdef CONFIG_TIMERFD
62 #include <sys/timerfd.h>
63 #endif
64 #ifdef CONFIG_EVENTFD
65 #include <sys/eventfd.h>
66 #endif
67 #ifdef CONFIG_EPOLL
68 #include <sys/epoll.h>
69 #endif
70 #ifdef CONFIG_ATTR
71 #include "qemu/xattr.h"
72 #endif
73 #ifdef CONFIG_SENDFILE
74 #include <sys/sendfile.h>
75 #endif
76 #ifdef CONFIG_KCOV
77 #include <sys/kcov.h>
78 #endif
79 
80 #define termios host_termios
81 #define winsize host_winsize
82 #define termio host_termio
83 #define sgttyb host_sgttyb /* same as target */
84 #define tchars host_tchars /* same as target */
85 #define ltchars host_ltchars /* same as target */
86 
87 #include <linux/termios.h>
88 #include <linux/unistd.h>
89 #include <linux/cdrom.h>
90 #include <linux/hdreg.h>
91 #include <linux/soundcard.h>
92 #include <linux/kd.h>
93 #include <linux/mtio.h>
94 #include <linux/fs.h>
95 #include <linux/fd.h>
96 #if defined(CONFIG_FIEMAP)
97 #include <linux/fiemap.h>
98 #endif
99 #include <linux/fb.h>
100 #if defined(CONFIG_USBFS)
101 #include <linux/usbdevice_fs.h>
102 #include <linux/usb/ch9.h>
103 #endif
104 #include <linux/vt.h>
105 #include <linux/dm-ioctl.h>
106 #include <linux/reboot.h>
107 #include <linux/route.h>
108 #include <linux/filter.h>
109 #include <linux/blkpg.h>
110 #include <netpacket/packet.h>
111 #include <linux/netlink.h>
112 #include <linux/if_alg.h>
113 #include <linux/rtc.h>
114 #include <sound/asound.h>
115 #ifdef HAVE_DRM_H
116 #include <libdrm/drm.h>
117 #endif
118 #include "linux_loop.h"
119 #include "uname.h"
120 
121 #include "qemu.h"
122 #include "qemu/guest-random.h"
123 #include "qemu/selfmap.h"
124 #include "user/syscall-trace.h"
125 #include "qapi/error.h"
126 #include "fd-trans.h"
127 #include "tcg/tcg.h"
128 
129 #ifndef CLONE_IO
130 #define CLONE_IO                0x80000000      /* Clone io context */
131 #endif
132 
133 /* We can't directly call the host clone syscall, because this will
134  * badly confuse libc (breaking mutexes, for example). So we must
135  * divide clone flags into:
136  *  * flag combinations that look like pthread_create()
137  *  * flag combinations that look like fork()
138  *  * flags we can implement within QEMU itself
139  *  * flags we can't support and will return an error for
140  */
141 /* For thread creation, all these flags must be present; for
142  * fork, none must be present.
143  */
144 #define CLONE_THREAD_FLAGS                              \
145     (CLONE_VM | CLONE_FS | CLONE_FILES |                \
146      CLONE_SIGHAND | CLONE_THREAD | CLONE_SYSVSEM)
147 
148 /* These flags are ignored:
149  * CLONE_DETACHED is now ignored by the kernel;
150  * CLONE_IO is just an optimisation hint to the I/O scheduler
151  */
152 #define CLONE_IGNORED_FLAGS                     \
153     (CLONE_DETACHED | CLONE_IO)
154 
155 /* Flags for fork which we can implement within QEMU itself */
156 #define CLONE_OPTIONAL_FORK_FLAGS               \
157     (CLONE_SETTLS | CLONE_PARENT_SETTID |       \
158      CLONE_CHILD_CLEARTID | CLONE_CHILD_SETTID)
159 
160 /* Flags for thread creation which we can implement within QEMU itself */
161 #define CLONE_OPTIONAL_THREAD_FLAGS                             \
162     (CLONE_SETTLS | CLONE_PARENT_SETTID |                       \
163      CLONE_CHILD_CLEARTID | CLONE_CHILD_SETTID | CLONE_PARENT)
164 
165 #define CLONE_INVALID_FORK_FLAGS                                        \
166     (~(CSIGNAL | CLONE_OPTIONAL_FORK_FLAGS | CLONE_IGNORED_FLAGS))
167 
168 #define CLONE_INVALID_THREAD_FLAGS                                      \
169     (~(CSIGNAL | CLONE_THREAD_FLAGS | CLONE_OPTIONAL_THREAD_FLAGS |     \
170        CLONE_IGNORED_FLAGS))
171 
172 /* CLONE_VFORK is special cased early in do_fork(). The other flag bits
173  * have almost all been allocated. We cannot support any of
174  * CLONE_NEWNS, CLONE_NEWCGROUP, CLONE_NEWUTS, CLONE_NEWIPC,
175  * CLONE_NEWUSER, CLONE_NEWPID, CLONE_NEWNET, CLONE_PTRACE, CLONE_UNTRACED.
176  * The checks against the invalid thread masks above will catch these.
177  * (The one remaining unallocated bit is 0x1000 which used to be CLONE_PID.)
178  */
179 
180 /* Define DEBUG_ERESTARTSYS to force every syscall to be restarted
181  * once. This exercises the codepaths for restart.
182  */
183 //#define DEBUG_ERESTARTSYS
184 
185 //#include <linux/msdos_fs.h>
186 #define	VFAT_IOCTL_READDIR_BOTH		_IOR('r', 1, struct linux_dirent [2])
187 #define	VFAT_IOCTL_READDIR_SHORT	_IOR('r', 2, struct linux_dirent [2])
188 
189 #undef _syscall0
190 #undef _syscall1
191 #undef _syscall2
192 #undef _syscall3
193 #undef _syscall4
194 #undef _syscall5
195 #undef _syscall6
196 
197 #define _syscall0(type,name)		\
198 static type name (void)			\
199 {					\
200 	return syscall(__NR_##name);	\
201 }
202 
203 #define _syscall1(type,name,type1,arg1)		\
204 static type name (type1 arg1)			\
205 {						\
206 	return syscall(__NR_##name, arg1);	\
207 }
208 
209 #define _syscall2(type,name,type1,arg1,type2,arg2)	\
210 static type name (type1 arg1,type2 arg2)		\
211 {							\
212 	return syscall(__NR_##name, arg1, arg2);	\
213 }
214 
215 #define _syscall3(type,name,type1,arg1,type2,arg2,type3,arg3)	\
216 static type name (type1 arg1,type2 arg2,type3 arg3)		\
217 {								\
218 	return syscall(__NR_##name, arg1, arg2, arg3);		\
219 }
220 
221 #define _syscall4(type,name,type1,arg1,type2,arg2,type3,arg3,type4,arg4)	\
222 static type name (type1 arg1,type2 arg2,type3 arg3,type4 arg4)			\
223 {										\
224 	return syscall(__NR_##name, arg1, arg2, arg3, arg4);			\
225 }
226 
227 #define _syscall5(type,name,type1,arg1,type2,arg2,type3,arg3,type4,arg4,	\
228 		  type5,arg5)							\
229 static type name (type1 arg1,type2 arg2,type3 arg3,type4 arg4,type5 arg5)	\
230 {										\
231 	return syscall(__NR_##name, arg1, arg2, arg3, arg4, arg5);		\
232 }
233 
234 
235 #define _syscall6(type,name,type1,arg1,type2,arg2,type3,arg3,type4,arg4,	\
236 		  type5,arg5,type6,arg6)					\
237 static type name (type1 arg1,type2 arg2,type3 arg3,type4 arg4,type5 arg5,	\
238                   type6 arg6)							\
239 {										\
240 	return syscall(__NR_##name, arg1, arg2, arg3, arg4, arg5, arg6);	\
241 }
242 
243 
244 #define __NR_sys_uname __NR_uname
245 #define __NR_sys_getcwd1 __NR_getcwd
246 #define __NR_sys_getdents __NR_getdents
247 #define __NR_sys_getdents64 __NR_getdents64
248 #define __NR_sys_getpriority __NR_getpriority
249 #define __NR_sys_rt_sigqueueinfo __NR_rt_sigqueueinfo
250 #define __NR_sys_rt_tgsigqueueinfo __NR_rt_tgsigqueueinfo
251 #define __NR_sys_syslog __NR_syslog
252 #if defined(__NR_futex)
253 # define __NR_sys_futex __NR_futex
254 #endif
255 #if defined(__NR_futex_time64)
256 # define __NR_sys_futex_time64 __NR_futex_time64
257 #endif
258 #define __NR_sys_inotify_init __NR_inotify_init
259 #define __NR_sys_inotify_add_watch __NR_inotify_add_watch
260 #define __NR_sys_inotify_rm_watch __NR_inotify_rm_watch
261 #define __NR_sys_statx __NR_statx
262 
263 #if defined(__alpha__) || defined(__x86_64__) || defined(__s390x__)
264 #define __NR__llseek __NR_lseek
265 #endif
266 
267 /* Newer kernel ports have llseek() instead of _llseek() */
268 #if defined(TARGET_NR_llseek) && !defined(TARGET_NR__llseek)
269 #define TARGET_NR__llseek TARGET_NR_llseek
270 #endif
271 
272 #define __NR_sys_gettid __NR_gettid
273 _syscall0(int, sys_gettid)
274 
275 /* For the 64-bit guest on 32-bit host case we must emulate
276  * getdents using getdents64, because otherwise the host
277  * might hand us back more dirent records than we can fit
278  * into the guest buffer after structure format conversion.
279  * Otherwise we emulate getdents with getdents if the host has it.
280  */
281 #if defined(__NR_getdents) && HOST_LONG_BITS >= TARGET_ABI_BITS
282 #define EMULATE_GETDENTS_WITH_GETDENTS
283 #endif
284 
285 #if defined(TARGET_NR_getdents) && defined(EMULATE_GETDENTS_WITH_GETDENTS)
286 _syscall3(int, sys_getdents, uint, fd, struct linux_dirent *, dirp, uint, count);
287 #endif
288 #if (defined(TARGET_NR_getdents) && \
289       !defined(EMULATE_GETDENTS_WITH_GETDENTS)) || \
290     (defined(TARGET_NR_getdents64) && defined(__NR_getdents64))
291 _syscall3(int, sys_getdents64, uint, fd, struct linux_dirent64 *, dirp, uint, count);
292 #endif
293 #if defined(TARGET_NR__llseek) && defined(__NR_llseek)
294 _syscall5(int, _llseek,  uint,  fd, ulong, hi, ulong, lo,
295           loff_t *, res, uint, wh);
296 #endif
297 _syscall3(int, sys_rt_sigqueueinfo, pid_t, pid, int, sig, siginfo_t *, uinfo)
298 _syscall4(int, sys_rt_tgsigqueueinfo, pid_t, pid, pid_t, tid, int, sig,
299           siginfo_t *, uinfo)
300 _syscall3(int,sys_syslog,int,type,char*,bufp,int,len)
301 #ifdef __NR_exit_group
302 _syscall1(int,exit_group,int,error_code)
303 #endif
304 #if defined(TARGET_NR_set_tid_address) && defined(__NR_set_tid_address)
305 _syscall1(int,set_tid_address,int *,tidptr)
306 #endif
307 #if defined(__NR_futex)
308 _syscall6(int,sys_futex,int *,uaddr,int,op,int,val,
309           const struct timespec *,timeout,int *,uaddr2,int,val3)
310 #endif
311 #if defined(__NR_futex_time64)
312 _syscall6(int,sys_futex_time64,int *,uaddr,int,op,int,val,
313           const struct timespec *,timeout,int *,uaddr2,int,val3)
314 #endif
315 #define __NR_sys_sched_getaffinity __NR_sched_getaffinity
316 _syscall3(int, sys_sched_getaffinity, pid_t, pid, unsigned int, len,
317           unsigned long *, user_mask_ptr);
318 #define __NR_sys_sched_setaffinity __NR_sched_setaffinity
319 _syscall3(int, sys_sched_setaffinity, pid_t, pid, unsigned int, len,
320           unsigned long *, user_mask_ptr);
321 #define __NR_sys_getcpu __NR_getcpu
322 _syscall3(int, sys_getcpu, unsigned *, cpu, unsigned *, node, void *, tcache);
323 _syscall4(int, reboot, int, magic1, int, magic2, unsigned int, cmd,
324           void *, arg);
325 _syscall2(int, capget, struct __user_cap_header_struct *, header,
326           struct __user_cap_data_struct *, data);
327 _syscall2(int, capset, struct __user_cap_header_struct *, header,
328           struct __user_cap_data_struct *, data);
329 #if defined(TARGET_NR_ioprio_get) && defined(__NR_ioprio_get)
330 _syscall2(int, ioprio_get, int, which, int, who)
331 #endif
332 #if defined(TARGET_NR_ioprio_set) && defined(__NR_ioprio_set)
333 _syscall3(int, ioprio_set, int, which, int, who, int, ioprio)
334 #endif
335 #if defined(TARGET_NR_getrandom) && defined(__NR_getrandom)
336 _syscall3(int, getrandom, void *, buf, size_t, buflen, unsigned int, flags)
337 #endif
338 
339 #if defined(TARGET_NR_kcmp) && defined(__NR_kcmp)
340 _syscall5(int, kcmp, pid_t, pid1, pid_t, pid2, int, type,
341           unsigned long, idx1, unsigned long, idx2)
342 #endif
343 
344 /*
345  * It is assumed that struct statx is architecture independent.
346  */
347 #if defined(TARGET_NR_statx) && defined(__NR_statx)
348 _syscall5(int, sys_statx, int, dirfd, const char *, pathname, int, flags,
349           unsigned int, mask, struct target_statx *, statxbuf)
350 #endif
351 #if defined(TARGET_NR_membarrier) && defined(__NR_membarrier)
352 _syscall2(int, membarrier, int, cmd, int, flags)
353 #endif
354 
355 static bitmask_transtbl fcntl_flags_tbl[] = {
356   { TARGET_O_ACCMODE,   TARGET_O_WRONLY,    O_ACCMODE,   O_WRONLY,    },
357   { TARGET_O_ACCMODE,   TARGET_O_RDWR,      O_ACCMODE,   O_RDWR,      },
358   { TARGET_O_CREAT,     TARGET_O_CREAT,     O_CREAT,     O_CREAT,     },
359   { TARGET_O_EXCL,      TARGET_O_EXCL,      O_EXCL,      O_EXCL,      },
360   { TARGET_O_NOCTTY,    TARGET_O_NOCTTY,    O_NOCTTY,    O_NOCTTY,    },
361   { TARGET_O_TRUNC,     TARGET_O_TRUNC,     O_TRUNC,     O_TRUNC,     },
362   { TARGET_O_APPEND,    TARGET_O_APPEND,    O_APPEND,    O_APPEND,    },
363   { TARGET_O_NONBLOCK,  TARGET_O_NONBLOCK,  O_NONBLOCK,  O_NONBLOCK,  },
364   { TARGET_O_SYNC,      TARGET_O_DSYNC,     O_SYNC,      O_DSYNC,     },
365   { TARGET_O_SYNC,      TARGET_O_SYNC,      O_SYNC,      O_SYNC,      },
366   { TARGET_FASYNC,      TARGET_FASYNC,      FASYNC,      FASYNC,      },
367   { TARGET_O_DIRECTORY, TARGET_O_DIRECTORY, O_DIRECTORY, O_DIRECTORY, },
368   { TARGET_O_NOFOLLOW,  TARGET_O_NOFOLLOW,  O_NOFOLLOW,  O_NOFOLLOW,  },
369 #if defined(O_DIRECT)
370   { TARGET_O_DIRECT,    TARGET_O_DIRECT,    O_DIRECT,    O_DIRECT,    },
371 #endif
372 #if defined(O_NOATIME)
373   { TARGET_O_NOATIME,   TARGET_O_NOATIME,   O_NOATIME,   O_NOATIME    },
374 #endif
375 #if defined(O_CLOEXEC)
376   { TARGET_O_CLOEXEC,   TARGET_O_CLOEXEC,   O_CLOEXEC,   O_CLOEXEC    },
377 #endif
378 #if defined(O_PATH)
379   { TARGET_O_PATH,      TARGET_O_PATH,      O_PATH,      O_PATH       },
380 #endif
381 #if defined(O_TMPFILE)
382   { TARGET_O_TMPFILE,   TARGET_O_TMPFILE,   O_TMPFILE,   O_TMPFILE    },
383 #endif
384   /* Don't terminate the list prematurely on 64-bit host+guest.  */
385 #if TARGET_O_LARGEFILE != 0 || O_LARGEFILE != 0
386   { TARGET_O_LARGEFILE, TARGET_O_LARGEFILE, O_LARGEFILE, O_LARGEFILE, },
387 #endif
388   { 0, 0, 0, 0 }
389 };
390 
391 _syscall2(int, sys_getcwd1, char *, buf, size_t, size)
392 
393 #ifdef TARGET_NR_utimensat
394 #if defined(__NR_utimensat)
395 #define __NR_sys_utimensat __NR_utimensat
396 _syscall4(int,sys_utimensat,int,dirfd,const char *,pathname,
397           const struct timespec *,tsp,int,flags)
398 #else
399 static int sys_utimensat(int dirfd, const char *pathname,
400                          const struct timespec times[2], int flags)
401 {
402     errno = ENOSYS;
403     return -1;
404 }
405 #endif
406 #endif /* TARGET_NR_utimensat */
407 
408 #ifdef TARGET_NR_renameat2
409 #if defined(__NR_renameat2)
410 #define __NR_sys_renameat2 __NR_renameat2
411 _syscall5(int, sys_renameat2, int, oldfd, const char *, old, int, newfd,
412           const char *, new, unsigned int, flags)
413 #else
414 static int sys_renameat2(int oldfd, const char *old,
415                          int newfd, const char *new, int flags)
416 {
417     if (flags == 0) {
418         return renameat(oldfd, old, newfd, new);
419     }
420     errno = ENOSYS;
421     return -1;
422 }
423 #endif
424 #endif /* TARGET_NR_renameat2 */
425 
426 #ifdef CONFIG_INOTIFY
427 #include <sys/inotify.h>
428 
429 #if defined(TARGET_NR_inotify_init) && defined(__NR_inotify_init)
430 static int sys_inotify_init(void)
431 {
432   return (inotify_init());
433 }
434 #endif
435 #if defined(TARGET_NR_inotify_add_watch) && defined(__NR_inotify_add_watch)
436 static int sys_inotify_add_watch(int fd,const char *pathname, int32_t mask)
437 {
438   return (inotify_add_watch(fd, pathname, mask));
439 }
440 #endif
441 #if defined(TARGET_NR_inotify_rm_watch) && defined(__NR_inotify_rm_watch)
442 static int sys_inotify_rm_watch(int fd, int32_t wd)
443 {
444   return (inotify_rm_watch(fd, wd));
445 }
446 #endif
447 #ifdef CONFIG_INOTIFY1
448 #if defined(TARGET_NR_inotify_init1) && defined(__NR_inotify_init1)
449 static int sys_inotify_init1(int flags)
450 {
451   return (inotify_init1(flags));
452 }
453 #endif
454 #endif
455 #else
456 /* Userspace can usually survive runtime without inotify */
457 #undef TARGET_NR_inotify_init
458 #undef TARGET_NR_inotify_init1
459 #undef TARGET_NR_inotify_add_watch
460 #undef TARGET_NR_inotify_rm_watch
461 #endif /* CONFIG_INOTIFY  */
462 
463 #if defined(TARGET_NR_prlimit64)
464 #ifndef __NR_prlimit64
465 # define __NR_prlimit64 -1
466 #endif
467 #define __NR_sys_prlimit64 __NR_prlimit64
468 /* The glibc rlimit structure may not be that used by the underlying syscall */
469 struct host_rlimit64 {
470     uint64_t rlim_cur;
471     uint64_t rlim_max;
472 };
473 _syscall4(int, sys_prlimit64, pid_t, pid, int, resource,
474           const struct host_rlimit64 *, new_limit,
475           struct host_rlimit64 *, old_limit)
476 #endif
477 
478 
479 #if defined(TARGET_NR_timer_create)
480 /* Maxiumum of 32 active POSIX timers allowed at any one time. */
481 static timer_t g_posix_timers[32] = { 0, } ;
482 
483 static inline int next_free_host_timer(void)
484 {
485     int k ;
486     /* FIXME: Does finding the next free slot require a lock? */
487     for (k = 0; k < ARRAY_SIZE(g_posix_timers); k++) {
488         if (g_posix_timers[k] == 0) {
489             g_posix_timers[k] = (timer_t) 1;
490             return k;
491         }
492     }
493     return -1;
494 }
495 #endif
496 
497 /* ARM EABI and MIPS expect 64bit types aligned even on pairs or registers */
498 #ifdef TARGET_ARM
499 static inline int regpairs_aligned(void *cpu_env, int num)
500 {
501     return ((((CPUARMState *)cpu_env)->eabi) == 1) ;
502 }
503 #elif defined(TARGET_MIPS) && (TARGET_ABI_BITS == 32)
504 static inline int regpairs_aligned(void *cpu_env, int num) { return 1; }
505 #elif defined(TARGET_PPC) && !defined(TARGET_PPC64)
506 /* SysV AVI for PPC32 expects 64bit parameters to be passed on odd/even pairs
507  * of registers which translates to the same as ARM/MIPS, because we start with
508  * r3 as arg1 */
509 static inline int regpairs_aligned(void *cpu_env, int num) { return 1; }
510 #elif defined(TARGET_SH4)
511 /* SH4 doesn't align register pairs, except for p{read,write}64 */
512 static inline int regpairs_aligned(void *cpu_env, int num)
513 {
514     switch (num) {
515     case TARGET_NR_pread64:
516     case TARGET_NR_pwrite64:
517         return 1;
518 
519     default:
520         return 0;
521     }
522 }
523 #elif defined(TARGET_XTENSA)
524 static inline int regpairs_aligned(void *cpu_env, int num) { return 1; }
525 #else
526 static inline int regpairs_aligned(void *cpu_env, int num) { return 0; }
527 #endif
528 
529 #define ERRNO_TABLE_SIZE 1200
530 
531 /* target_to_host_errno_table[] is initialized from
532  * host_to_target_errno_table[] in syscall_init(). */
533 static uint16_t target_to_host_errno_table[ERRNO_TABLE_SIZE] = {
534 };
535 
536 /*
537  * This list is the union of errno values overridden in asm-<arch>/errno.h
538  * minus the errnos that are not actually generic to all archs.
539  */
540 static uint16_t host_to_target_errno_table[ERRNO_TABLE_SIZE] = {
541     [EAGAIN]		= TARGET_EAGAIN,
542     [EIDRM]		= TARGET_EIDRM,
543     [ECHRNG]		= TARGET_ECHRNG,
544     [EL2NSYNC]		= TARGET_EL2NSYNC,
545     [EL3HLT]		= TARGET_EL3HLT,
546     [EL3RST]		= TARGET_EL3RST,
547     [ELNRNG]		= TARGET_ELNRNG,
548     [EUNATCH]		= TARGET_EUNATCH,
549     [ENOCSI]		= TARGET_ENOCSI,
550     [EL2HLT]		= TARGET_EL2HLT,
551     [EDEADLK]		= TARGET_EDEADLK,
552     [ENOLCK]		= TARGET_ENOLCK,
553     [EBADE]		= TARGET_EBADE,
554     [EBADR]		= TARGET_EBADR,
555     [EXFULL]		= TARGET_EXFULL,
556     [ENOANO]		= TARGET_ENOANO,
557     [EBADRQC]		= TARGET_EBADRQC,
558     [EBADSLT]		= TARGET_EBADSLT,
559     [EBFONT]		= TARGET_EBFONT,
560     [ENOSTR]		= TARGET_ENOSTR,
561     [ENODATA]		= TARGET_ENODATA,
562     [ETIME]		= TARGET_ETIME,
563     [ENOSR]		= TARGET_ENOSR,
564     [ENONET]		= TARGET_ENONET,
565     [ENOPKG]		= TARGET_ENOPKG,
566     [EREMOTE]		= TARGET_EREMOTE,
567     [ENOLINK]		= TARGET_ENOLINK,
568     [EADV]		= TARGET_EADV,
569     [ESRMNT]		= TARGET_ESRMNT,
570     [ECOMM]		= TARGET_ECOMM,
571     [EPROTO]		= TARGET_EPROTO,
572     [EDOTDOT]		= TARGET_EDOTDOT,
573     [EMULTIHOP]		= TARGET_EMULTIHOP,
574     [EBADMSG]		= TARGET_EBADMSG,
575     [ENAMETOOLONG]	= TARGET_ENAMETOOLONG,
576     [EOVERFLOW]		= TARGET_EOVERFLOW,
577     [ENOTUNIQ]		= TARGET_ENOTUNIQ,
578     [EBADFD]		= TARGET_EBADFD,
579     [EREMCHG]		= TARGET_EREMCHG,
580     [ELIBACC]		= TARGET_ELIBACC,
581     [ELIBBAD]		= TARGET_ELIBBAD,
582     [ELIBSCN]		= TARGET_ELIBSCN,
583     [ELIBMAX]		= TARGET_ELIBMAX,
584     [ELIBEXEC]		= TARGET_ELIBEXEC,
585     [EILSEQ]		= TARGET_EILSEQ,
586     [ENOSYS]		= TARGET_ENOSYS,
587     [ELOOP]		= TARGET_ELOOP,
588     [ERESTART]		= TARGET_ERESTART,
589     [ESTRPIPE]		= TARGET_ESTRPIPE,
590     [ENOTEMPTY]		= TARGET_ENOTEMPTY,
591     [EUSERS]		= TARGET_EUSERS,
592     [ENOTSOCK]		= TARGET_ENOTSOCK,
593     [EDESTADDRREQ]	= TARGET_EDESTADDRREQ,
594     [EMSGSIZE]		= TARGET_EMSGSIZE,
595     [EPROTOTYPE]	= TARGET_EPROTOTYPE,
596     [ENOPROTOOPT]	= TARGET_ENOPROTOOPT,
597     [EPROTONOSUPPORT]	= TARGET_EPROTONOSUPPORT,
598     [ESOCKTNOSUPPORT]	= TARGET_ESOCKTNOSUPPORT,
599     [EOPNOTSUPP]	= TARGET_EOPNOTSUPP,
600     [EPFNOSUPPORT]	= TARGET_EPFNOSUPPORT,
601     [EAFNOSUPPORT]	= TARGET_EAFNOSUPPORT,
602     [EADDRINUSE]	= TARGET_EADDRINUSE,
603     [EADDRNOTAVAIL]	= TARGET_EADDRNOTAVAIL,
604     [ENETDOWN]		= TARGET_ENETDOWN,
605     [ENETUNREACH]	= TARGET_ENETUNREACH,
606     [ENETRESET]		= TARGET_ENETRESET,
607     [ECONNABORTED]	= TARGET_ECONNABORTED,
608     [ECONNRESET]	= TARGET_ECONNRESET,
609     [ENOBUFS]		= TARGET_ENOBUFS,
610     [EISCONN]		= TARGET_EISCONN,
611     [ENOTCONN]		= TARGET_ENOTCONN,
612     [EUCLEAN]		= TARGET_EUCLEAN,
613     [ENOTNAM]		= TARGET_ENOTNAM,
614     [ENAVAIL]		= TARGET_ENAVAIL,
615     [EISNAM]		= TARGET_EISNAM,
616     [EREMOTEIO]		= TARGET_EREMOTEIO,
617     [EDQUOT]            = TARGET_EDQUOT,
618     [ESHUTDOWN]		= TARGET_ESHUTDOWN,
619     [ETOOMANYREFS]	= TARGET_ETOOMANYREFS,
620     [ETIMEDOUT]		= TARGET_ETIMEDOUT,
621     [ECONNREFUSED]	= TARGET_ECONNREFUSED,
622     [EHOSTDOWN]		= TARGET_EHOSTDOWN,
623     [EHOSTUNREACH]	= TARGET_EHOSTUNREACH,
624     [EALREADY]		= TARGET_EALREADY,
625     [EINPROGRESS]	= TARGET_EINPROGRESS,
626     [ESTALE]		= TARGET_ESTALE,
627     [ECANCELED]		= TARGET_ECANCELED,
628     [ENOMEDIUM]		= TARGET_ENOMEDIUM,
629     [EMEDIUMTYPE]	= TARGET_EMEDIUMTYPE,
630 #ifdef ENOKEY
631     [ENOKEY]		= TARGET_ENOKEY,
632 #endif
633 #ifdef EKEYEXPIRED
634     [EKEYEXPIRED]	= TARGET_EKEYEXPIRED,
635 #endif
636 #ifdef EKEYREVOKED
637     [EKEYREVOKED]	= TARGET_EKEYREVOKED,
638 #endif
639 #ifdef EKEYREJECTED
640     [EKEYREJECTED]	= TARGET_EKEYREJECTED,
641 #endif
642 #ifdef EOWNERDEAD
643     [EOWNERDEAD]	= TARGET_EOWNERDEAD,
644 #endif
645 #ifdef ENOTRECOVERABLE
646     [ENOTRECOVERABLE]	= TARGET_ENOTRECOVERABLE,
647 #endif
648 #ifdef ENOMSG
649     [ENOMSG]            = TARGET_ENOMSG,
650 #endif
651 #ifdef ERKFILL
652     [ERFKILL]           = TARGET_ERFKILL,
653 #endif
654 #ifdef EHWPOISON
655     [EHWPOISON]         = TARGET_EHWPOISON,
656 #endif
657 };
658 
659 static inline int host_to_target_errno(int err)
660 {
661     if (err >= 0 && err < ERRNO_TABLE_SIZE &&
662         host_to_target_errno_table[err]) {
663         return host_to_target_errno_table[err];
664     }
665     return err;
666 }
667 
668 static inline int target_to_host_errno(int err)
669 {
670     if (err >= 0 && err < ERRNO_TABLE_SIZE &&
671         target_to_host_errno_table[err]) {
672         return target_to_host_errno_table[err];
673     }
674     return err;
675 }
676 
677 static inline abi_long get_errno(abi_long ret)
678 {
679     if (ret == -1)
680         return -host_to_target_errno(errno);
681     else
682         return ret;
683 }
684 
685 const char *target_strerror(int err)
686 {
687     if (err == TARGET_ERESTARTSYS) {
688         return "To be restarted";
689     }
690     if (err == TARGET_QEMU_ESIGRETURN) {
691         return "Successful exit from sigreturn";
692     }
693 
694     if ((err >= ERRNO_TABLE_SIZE) || (err < 0)) {
695         return NULL;
696     }
697     return strerror(target_to_host_errno(err));
698 }
699 
700 #define safe_syscall0(type, name) \
701 static type safe_##name(void) \
702 { \
703     return safe_syscall(__NR_##name); \
704 }
705 
706 #define safe_syscall1(type, name, type1, arg1) \
707 static type safe_##name(type1 arg1) \
708 { \
709     return safe_syscall(__NR_##name, arg1); \
710 }
711 
712 #define safe_syscall2(type, name, type1, arg1, type2, arg2) \
713 static type safe_##name(type1 arg1, type2 arg2) \
714 { \
715     return safe_syscall(__NR_##name, arg1, arg2); \
716 }
717 
718 #define safe_syscall3(type, name, type1, arg1, type2, arg2, type3, arg3) \
719 static type safe_##name(type1 arg1, type2 arg2, type3 arg3) \
720 { \
721     return safe_syscall(__NR_##name, arg1, arg2, arg3); \
722 }
723 
724 #define safe_syscall4(type, name, type1, arg1, type2, arg2, type3, arg3, \
725     type4, arg4) \
726 static type safe_##name(type1 arg1, type2 arg2, type3 arg3, type4 arg4) \
727 { \
728     return safe_syscall(__NR_##name, arg1, arg2, arg3, arg4); \
729 }
730 
731 #define safe_syscall5(type, name, type1, arg1, type2, arg2, type3, arg3, \
732     type4, arg4, type5, arg5) \
733 static type safe_##name(type1 arg1, type2 arg2, type3 arg3, type4 arg4, \
734     type5 arg5) \
735 { \
736     return safe_syscall(__NR_##name, arg1, arg2, arg3, arg4, arg5); \
737 }
738 
739 #define safe_syscall6(type, name, type1, arg1, type2, arg2, type3, arg3, \
740     type4, arg4, type5, arg5, type6, arg6) \
741 static type safe_##name(type1 arg1, type2 arg2, type3 arg3, type4 arg4, \
742     type5 arg5, type6 arg6) \
743 { \
744     return safe_syscall(__NR_##name, arg1, arg2, arg3, arg4, arg5, arg6); \
745 }
746 
747 safe_syscall3(ssize_t, read, int, fd, void *, buff, size_t, count)
748 safe_syscall3(ssize_t, write, int, fd, const void *, buff, size_t, count)
749 safe_syscall4(int, openat, int, dirfd, const char *, pathname, \
750               int, flags, mode_t, mode)
751 #if defined(TARGET_NR_wait4) || defined(TARGET_NR_waitpid)
752 safe_syscall4(pid_t, wait4, pid_t, pid, int *, status, int, options, \
753               struct rusage *, rusage)
754 #endif
755 safe_syscall5(int, waitid, idtype_t, idtype, id_t, id, siginfo_t *, infop, \
756               int, options, struct rusage *, rusage)
757 safe_syscall3(int, execve, const char *, filename, char **, argv, char **, envp)
758 #if defined(TARGET_NR_select) || defined(TARGET_NR__newselect) || \
759     defined(TARGET_NR_pselect6)
760 safe_syscall6(int, pselect6, int, nfds, fd_set *, readfds, fd_set *, writefds, \
761               fd_set *, exceptfds, struct timespec *, timeout, void *, sig)
762 #endif
763 #if defined(TARGET_NR_ppoll) || defined(TARGET_NR_poll)
764 safe_syscall5(int, ppoll, struct pollfd *, ufds, unsigned int, nfds,
765               struct timespec *, tsp, const sigset_t *, sigmask,
766               size_t, sigsetsize)
767 #endif
768 safe_syscall6(int, epoll_pwait, int, epfd, struct epoll_event *, events,
769               int, maxevents, int, timeout, const sigset_t *, sigmask,
770               size_t, sigsetsize)
771 #if defined(__NR_futex)
772 safe_syscall6(int,futex,int *,uaddr,int,op,int,val, \
773               const struct timespec *,timeout,int *,uaddr2,int,val3)
774 #endif
775 #if defined(__NR_futex_time64)
776 safe_syscall6(int,futex_time64,int *,uaddr,int,op,int,val, \
777               const struct timespec *,timeout,int *,uaddr2,int,val3)
778 #endif
779 safe_syscall2(int, rt_sigsuspend, sigset_t *, newset, size_t, sigsetsize)
780 safe_syscall2(int, kill, pid_t, pid, int, sig)
781 safe_syscall2(int, tkill, int, tid, int, sig)
782 safe_syscall3(int, tgkill, int, tgid, int, pid, int, sig)
783 safe_syscall3(ssize_t, readv, int, fd, const struct iovec *, iov, int, iovcnt)
784 safe_syscall3(ssize_t, writev, int, fd, const struct iovec *, iov, int, iovcnt)
785 safe_syscall5(ssize_t, preadv, int, fd, const struct iovec *, iov, int, iovcnt,
786               unsigned long, pos_l, unsigned long, pos_h)
787 safe_syscall5(ssize_t, pwritev, int, fd, const struct iovec *, iov, int, iovcnt,
788               unsigned long, pos_l, unsigned long, pos_h)
789 safe_syscall3(int, connect, int, fd, const struct sockaddr *, addr,
790               socklen_t, addrlen)
791 safe_syscall6(ssize_t, sendto, int, fd, const void *, buf, size_t, len,
792               int, flags, const struct sockaddr *, addr, socklen_t, addrlen)
793 safe_syscall6(ssize_t, recvfrom, int, fd, void *, buf, size_t, len,
794               int, flags, struct sockaddr *, addr, socklen_t *, addrlen)
795 safe_syscall3(ssize_t, sendmsg, int, fd, const struct msghdr *, msg, int, flags)
796 safe_syscall3(ssize_t, recvmsg, int, fd, struct msghdr *, msg, int, flags)
797 safe_syscall2(int, flock, int, fd, int, operation)
798 #ifdef TARGET_NR_rt_sigtimedwait
799 safe_syscall4(int, rt_sigtimedwait, const sigset_t *, these, siginfo_t *, uinfo,
800               const struct timespec *, uts, size_t, sigsetsize)
801 #endif
802 safe_syscall4(int, accept4, int, fd, struct sockaddr *, addr, socklen_t *, len,
803               int, flags)
804 #if defined(TARGET_NR_nanosleep)
805 safe_syscall2(int, nanosleep, const struct timespec *, req,
806               struct timespec *, rem)
807 #endif
808 #ifdef TARGET_NR_clock_nanosleep
809 safe_syscall4(int, clock_nanosleep, const clockid_t, clock, int, flags,
810               const struct timespec *, req, struct timespec *, rem)
811 #endif
812 #ifdef __NR_ipc
813 #ifdef __s390x__
814 safe_syscall5(int, ipc, int, call, long, first, long, second, long, third,
815               void *, ptr)
816 #else
817 safe_syscall6(int, ipc, int, call, long, first, long, second, long, third,
818               void *, ptr, long, fifth)
819 #endif
820 #endif
821 #ifdef __NR_msgsnd
822 safe_syscall4(int, msgsnd, int, msgid, const void *, msgp, size_t, sz,
823               int, flags)
824 #endif
825 #ifdef __NR_msgrcv
826 safe_syscall5(int, msgrcv, int, msgid, void *, msgp, size_t, sz,
827               long, msgtype, int, flags)
828 #endif
829 #ifdef __NR_semtimedop
830 safe_syscall4(int, semtimedop, int, semid, struct sembuf *, tsops,
831               unsigned, nsops, const struct timespec *, timeout)
832 #endif
833 #ifdef TARGET_NR_mq_timedsend
834 safe_syscall5(int, mq_timedsend, int, mqdes, const char *, msg_ptr,
835               size_t, len, unsigned, prio, const struct timespec *, timeout)
836 #endif
837 #ifdef TARGET_NR_mq_timedreceive
838 safe_syscall5(int, mq_timedreceive, int, mqdes, char *, msg_ptr,
839               size_t, len, unsigned *, prio, const struct timespec *, timeout)
840 #endif
841 /* We do ioctl like this rather than via safe_syscall3 to preserve the
842  * "third argument might be integer or pointer or not present" behaviour of
843  * the libc function.
844  */
845 #define safe_ioctl(...) safe_syscall(__NR_ioctl, __VA_ARGS__)
846 /* Similarly for fcntl. Note that callers must always:
847  *  pass the F_GETLK64 etc constants rather than the unsuffixed F_GETLK
848  *  use the flock64 struct rather than unsuffixed flock
849  * This will then work and use a 64-bit offset for both 32-bit and 64-bit hosts.
850  */
851 #ifdef __NR_fcntl64
852 #define safe_fcntl(...) safe_syscall(__NR_fcntl64, __VA_ARGS__)
853 #else
854 #define safe_fcntl(...) safe_syscall(__NR_fcntl, __VA_ARGS__)
855 #endif
856 
857 static inline int host_to_target_sock_type(int host_type)
858 {
859     int target_type;
860 
861     switch (host_type & 0xf /* SOCK_TYPE_MASK */) {
862     case SOCK_DGRAM:
863         target_type = TARGET_SOCK_DGRAM;
864         break;
865     case SOCK_STREAM:
866         target_type = TARGET_SOCK_STREAM;
867         break;
868     default:
869         target_type = host_type & 0xf /* SOCK_TYPE_MASK */;
870         break;
871     }
872 
873 #if defined(SOCK_CLOEXEC)
874     if (host_type & SOCK_CLOEXEC) {
875         target_type |= TARGET_SOCK_CLOEXEC;
876     }
877 #endif
878 
879 #if defined(SOCK_NONBLOCK)
880     if (host_type & SOCK_NONBLOCK) {
881         target_type |= TARGET_SOCK_NONBLOCK;
882     }
883 #endif
884 
885     return target_type;
886 }
887 
888 static abi_ulong target_brk;
889 static abi_ulong target_original_brk;
890 static abi_ulong brk_page;
891 
892 void target_set_brk(abi_ulong new_brk)
893 {
894     target_original_brk = target_brk = HOST_PAGE_ALIGN(new_brk);
895     brk_page = HOST_PAGE_ALIGN(target_brk);
896 }
897 
898 //#define DEBUGF_BRK(message, args...) do { fprintf(stderr, (message), ## args); } while (0)
899 #define DEBUGF_BRK(message, args...)
900 
901 /* do_brk() must return target values and target errnos. */
902 abi_long do_brk(abi_ulong new_brk)
903 {
904     abi_long mapped_addr;
905     abi_ulong new_alloc_size;
906 
907     DEBUGF_BRK("do_brk(" TARGET_ABI_FMT_lx ") -> ", new_brk);
908 
909     if (!new_brk) {
910         DEBUGF_BRK(TARGET_ABI_FMT_lx " (!new_brk)\n", target_brk);
911         return target_brk;
912     }
913     if (new_brk < target_original_brk) {
914         DEBUGF_BRK(TARGET_ABI_FMT_lx " (new_brk < target_original_brk)\n",
915                    target_brk);
916         return target_brk;
917     }
918 
919     /* If the new brk is less than the highest page reserved to the
920      * target heap allocation, set it and we're almost done...  */
921     if (new_brk <= brk_page) {
922         /* Heap contents are initialized to zero, as for anonymous
923          * mapped pages.  */
924         if (new_brk > target_brk) {
925             memset(g2h(target_brk), 0, new_brk - target_brk);
926         }
927 	target_brk = new_brk;
928         DEBUGF_BRK(TARGET_ABI_FMT_lx " (new_brk <= brk_page)\n", target_brk);
929 	return target_brk;
930     }
931 
932     /* We need to allocate more memory after the brk... Note that
933      * we don't use MAP_FIXED because that will map over the top of
934      * any existing mapping (like the one with the host libc or qemu
935      * itself); instead we treat "mapped but at wrong address" as
936      * a failure and unmap again.
937      */
938     new_alloc_size = HOST_PAGE_ALIGN(new_brk - brk_page);
939     mapped_addr = get_errno(target_mmap(brk_page, new_alloc_size,
940                                         PROT_READ|PROT_WRITE,
941                                         MAP_ANON|MAP_PRIVATE, 0, 0));
942 
943     if (mapped_addr == brk_page) {
944         /* Heap contents are initialized to zero, as for anonymous
945          * mapped pages.  Technically the new pages are already
946          * initialized to zero since they *are* anonymous mapped
947          * pages, however we have to take care with the contents that
948          * come from the remaining part of the previous page: it may
949          * contains garbage data due to a previous heap usage (grown
950          * then shrunken).  */
951         memset(g2h(target_brk), 0, brk_page - target_brk);
952 
953         target_brk = new_brk;
954         brk_page = HOST_PAGE_ALIGN(target_brk);
955         DEBUGF_BRK(TARGET_ABI_FMT_lx " (mapped_addr == brk_page)\n",
956             target_brk);
957         return target_brk;
958     } else if (mapped_addr != -1) {
959         /* Mapped but at wrong address, meaning there wasn't actually
960          * enough space for this brk.
961          */
962         target_munmap(mapped_addr, new_alloc_size);
963         mapped_addr = -1;
964         DEBUGF_BRK(TARGET_ABI_FMT_lx " (mapped_addr != -1)\n", target_brk);
965     }
966     else {
967         DEBUGF_BRK(TARGET_ABI_FMT_lx " (otherwise)\n", target_brk);
968     }
969 
970 #if defined(TARGET_ALPHA)
971     /* We (partially) emulate OSF/1 on Alpha, which requires we
972        return a proper errno, not an unchanged brk value.  */
973     return -TARGET_ENOMEM;
974 #endif
975     /* For everything else, return the previous break. */
976     return target_brk;
977 }
978 
979 #if defined(TARGET_NR_select) || defined(TARGET_NR__newselect) || \
980     defined(TARGET_NR_pselect6)
981 static inline abi_long copy_from_user_fdset(fd_set *fds,
982                                             abi_ulong target_fds_addr,
983                                             int n)
984 {
985     int i, nw, j, k;
986     abi_ulong b, *target_fds;
987 
988     nw = DIV_ROUND_UP(n, TARGET_ABI_BITS);
989     if (!(target_fds = lock_user(VERIFY_READ,
990                                  target_fds_addr,
991                                  sizeof(abi_ulong) * nw,
992                                  1)))
993         return -TARGET_EFAULT;
994 
995     FD_ZERO(fds);
996     k = 0;
997     for (i = 0; i < nw; i++) {
998         /* grab the abi_ulong */
999         __get_user(b, &target_fds[i]);
1000         for (j = 0; j < TARGET_ABI_BITS; j++) {
1001             /* check the bit inside the abi_ulong */
1002             if ((b >> j) & 1)
1003                 FD_SET(k, fds);
1004             k++;
1005         }
1006     }
1007 
1008     unlock_user(target_fds, target_fds_addr, 0);
1009 
1010     return 0;
1011 }
1012 
1013 static inline abi_ulong copy_from_user_fdset_ptr(fd_set *fds, fd_set **fds_ptr,
1014                                                  abi_ulong target_fds_addr,
1015                                                  int n)
1016 {
1017     if (target_fds_addr) {
1018         if (copy_from_user_fdset(fds, target_fds_addr, n))
1019             return -TARGET_EFAULT;
1020         *fds_ptr = fds;
1021     } else {
1022         *fds_ptr = NULL;
1023     }
1024     return 0;
1025 }
1026 
1027 static inline abi_long copy_to_user_fdset(abi_ulong target_fds_addr,
1028                                           const fd_set *fds,
1029                                           int n)
1030 {
1031     int i, nw, j, k;
1032     abi_long v;
1033     abi_ulong *target_fds;
1034 
1035     nw = DIV_ROUND_UP(n, TARGET_ABI_BITS);
1036     if (!(target_fds = lock_user(VERIFY_WRITE,
1037                                  target_fds_addr,
1038                                  sizeof(abi_ulong) * nw,
1039                                  0)))
1040         return -TARGET_EFAULT;
1041 
1042     k = 0;
1043     for (i = 0; i < nw; i++) {
1044         v = 0;
1045         for (j = 0; j < TARGET_ABI_BITS; j++) {
1046             v |= ((abi_ulong)(FD_ISSET(k, fds) != 0) << j);
1047             k++;
1048         }
1049         __put_user(v, &target_fds[i]);
1050     }
1051 
1052     unlock_user(target_fds, target_fds_addr, sizeof(abi_ulong) * nw);
1053 
1054     return 0;
1055 }
1056 #endif
1057 
1058 #if defined(__alpha__)
1059 #define HOST_HZ 1024
1060 #else
1061 #define HOST_HZ 100
1062 #endif
1063 
1064 static inline abi_long host_to_target_clock_t(long ticks)
1065 {
1066 #if HOST_HZ == TARGET_HZ
1067     return ticks;
1068 #else
1069     return ((int64_t)ticks * TARGET_HZ) / HOST_HZ;
1070 #endif
1071 }
1072 
1073 static inline abi_long host_to_target_rusage(abi_ulong target_addr,
1074                                              const struct rusage *rusage)
1075 {
1076     struct target_rusage *target_rusage;
1077 
1078     if (!lock_user_struct(VERIFY_WRITE, target_rusage, target_addr, 0))
1079         return -TARGET_EFAULT;
1080     target_rusage->ru_utime.tv_sec = tswapal(rusage->ru_utime.tv_sec);
1081     target_rusage->ru_utime.tv_usec = tswapal(rusage->ru_utime.tv_usec);
1082     target_rusage->ru_stime.tv_sec = tswapal(rusage->ru_stime.tv_sec);
1083     target_rusage->ru_stime.tv_usec = tswapal(rusage->ru_stime.tv_usec);
1084     target_rusage->ru_maxrss = tswapal(rusage->ru_maxrss);
1085     target_rusage->ru_ixrss = tswapal(rusage->ru_ixrss);
1086     target_rusage->ru_idrss = tswapal(rusage->ru_idrss);
1087     target_rusage->ru_isrss = tswapal(rusage->ru_isrss);
1088     target_rusage->ru_minflt = tswapal(rusage->ru_minflt);
1089     target_rusage->ru_majflt = tswapal(rusage->ru_majflt);
1090     target_rusage->ru_nswap = tswapal(rusage->ru_nswap);
1091     target_rusage->ru_inblock = tswapal(rusage->ru_inblock);
1092     target_rusage->ru_oublock = tswapal(rusage->ru_oublock);
1093     target_rusage->ru_msgsnd = tswapal(rusage->ru_msgsnd);
1094     target_rusage->ru_msgrcv = tswapal(rusage->ru_msgrcv);
1095     target_rusage->ru_nsignals = tswapal(rusage->ru_nsignals);
1096     target_rusage->ru_nvcsw = tswapal(rusage->ru_nvcsw);
1097     target_rusage->ru_nivcsw = tswapal(rusage->ru_nivcsw);
1098     unlock_user_struct(target_rusage, target_addr, 1);
1099 
1100     return 0;
1101 }
1102 
1103 #ifdef TARGET_NR_setrlimit
1104 static inline rlim_t target_to_host_rlim(abi_ulong target_rlim)
1105 {
1106     abi_ulong target_rlim_swap;
1107     rlim_t result;
1108 
1109     target_rlim_swap = tswapal(target_rlim);
1110     if (target_rlim_swap == TARGET_RLIM_INFINITY)
1111         return RLIM_INFINITY;
1112 
1113     result = target_rlim_swap;
1114     if (target_rlim_swap != (rlim_t)result)
1115         return RLIM_INFINITY;
1116 
1117     return result;
1118 }
1119 #endif
1120 
1121 #if defined(TARGET_NR_getrlimit) || defined(TARGET_NR_ugetrlimit)
1122 static inline abi_ulong host_to_target_rlim(rlim_t rlim)
1123 {
1124     abi_ulong target_rlim_swap;
1125     abi_ulong result;
1126 
1127     if (rlim == RLIM_INFINITY || rlim != (abi_long)rlim)
1128         target_rlim_swap = TARGET_RLIM_INFINITY;
1129     else
1130         target_rlim_swap = rlim;
1131     result = tswapal(target_rlim_swap);
1132 
1133     return result;
1134 }
1135 #endif
1136 
1137 static inline int target_to_host_resource(int code)
1138 {
1139     switch (code) {
1140     case TARGET_RLIMIT_AS:
1141         return RLIMIT_AS;
1142     case TARGET_RLIMIT_CORE:
1143         return RLIMIT_CORE;
1144     case TARGET_RLIMIT_CPU:
1145         return RLIMIT_CPU;
1146     case TARGET_RLIMIT_DATA:
1147         return RLIMIT_DATA;
1148     case TARGET_RLIMIT_FSIZE:
1149         return RLIMIT_FSIZE;
1150     case TARGET_RLIMIT_LOCKS:
1151         return RLIMIT_LOCKS;
1152     case TARGET_RLIMIT_MEMLOCK:
1153         return RLIMIT_MEMLOCK;
1154     case TARGET_RLIMIT_MSGQUEUE:
1155         return RLIMIT_MSGQUEUE;
1156     case TARGET_RLIMIT_NICE:
1157         return RLIMIT_NICE;
1158     case TARGET_RLIMIT_NOFILE:
1159         return RLIMIT_NOFILE;
1160     case TARGET_RLIMIT_NPROC:
1161         return RLIMIT_NPROC;
1162     case TARGET_RLIMIT_RSS:
1163         return RLIMIT_RSS;
1164     case TARGET_RLIMIT_RTPRIO:
1165         return RLIMIT_RTPRIO;
1166     case TARGET_RLIMIT_SIGPENDING:
1167         return RLIMIT_SIGPENDING;
1168     case TARGET_RLIMIT_STACK:
1169         return RLIMIT_STACK;
1170     default:
1171         return code;
1172     }
1173 }
1174 
1175 static inline abi_long copy_from_user_timeval(struct timeval *tv,
1176                                               abi_ulong target_tv_addr)
1177 {
1178     struct target_timeval *target_tv;
1179 
1180     if (!lock_user_struct(VERIFY_READ, target_tv, target_tv_addr, 1)) {
1181         return -TARGET_EFAULT;
1182     }
1183 
1184     __get_user(tv->tv_sec, &target_tv->tv_sec);
1185     __get_user(tv->tv_usec, &target_tv->tv_usec);
1186 
1187     unlock_user_struct(target_tv, target_tv_addr, 0);
1188 
1189     return 0;
1190 }
1191 
1192 static inline abi_long copy_to_user_timeval(abi_ulong target_tv_addr,
1193                                             const struct timeval *tv)
1194 {
1195     struct target_timeval *target_tv;
1196 
1197     if (!lock_user_struct(VERIFY_WRITE, target_tv, target_tv_addr, 0)) {
1198         return -TARGET_EFAULT;
1199     }
1200 
1201     __put_user(tv->tv_sec, &target_tv->tv_sec);
1202     __put_user(tv->tv_usec, &target_tv->tv_usec);
1203 
1204     unlock_user_struct(target_tv, target_tv_addr, 1);
1205 
1206     return 0;
1207 }
1208 
1209 static inline abi_long copy_to_user_timeval64(abi_ulong target_tv_addr,
1210                                              const struct timeval *tv)
1211 {
1212     struct target__kernel_sock_timeval *target_tv;
1213 
1214     if (!lock_user_struct(VERIFY_WRITE, target_tv, target_tv_addr, 0)) {
1215         return -TARGET_EFAULT;
1216     }
1217 
1218     __put_user(tv->tv_sec, &target_tv->tv_sec);
1219     __put_user(tv->tv_usec, &target_tv->tv_usec);
1220 
1221     unlock_user_struct(target_tv, target_tv_addr, 1);
1222 
1223     return 0;
1224 }
1225 
1226 #if defined(TARGET_NR_futex) || \
1227     defined(TARGET_NR_rt_sigtimedwait) || \
1228     defined(TARGET_NR_pselect6) || defined(TARGET_NR_pselect6) || \
1229     defined(TARGET_NR_nanosleep) || defined(TARGET_NR_clock_settime) || \
1230     defined(TARGET_NR_utimensat) || defined(TARGET_NR_mq_timedsend) || \
1231     defined(TARGET_NR_mq_timedreceive) || defined(TARGET_NR_ipc) || \
1232     defined(TARGET_NR_semop) || defined(TARGET_NR_semtimedop) || \
1233     defined(TARGET_NR_timer_settime) || \
1234     (defined(TARGET_NR_timerfd_settime) && defined(CONFIG_TIMERFD))
1235 static inline abi_long target_to_host_timespec(struct timespec *host_ts,
1236                                                abi_ulong target_addr)
1237 {
1238     struct target_timespec *target_ts;
1239 
1240     if (!lock_user_struct(VERIFY_READ, target_ts, target_addr, 1)) {
1241         return -TARGET_EFAULT;
1242     }
1243     __get_user(host_ts->tv_sec, &target_ts->tv_sec);
1244     __get_user(host_ts->tv_nsec, &target_ts->tv_nsec);
1245     unlock_user_struct(target_ts, target_addr, 0);
1246     return 0;
1247 }
1248 #endif
1249 
1250 #if defined(TARGET_NR_clock_settime64) || defined(TARGET_NR_futex_time64) || \
1251     defined(TARGET_NR_timer_settime64) || \
1252     (defined(TARGET_NR_timerfd_settime64) && defined(CONFIG_TIMERFD))
1253 static inline abi_long target_to_host_timespec64(struct timespec *host_ts,
1254                                                  abi_ulong target_addr)
1255 {
1256     struct target__kernel_timespec *target_ts;
1257 
1258     if (!lock_user_struct(VERIFY_READ, target_ts, target_addr, 1)) {
1259         return -TARGET_EFAULT;
1260     }
1261     __get_user(host_ts->tv_sec, &target_ts->tv_sec);
1262     __get_user(host_ts->tv_nsec, &target_ts->tv_nsec);
1263     unlock_user_struct(target_ts, target_addr, 0);
1264     return 0;
1265 }
1266 #endif
1267 
1268 static inline abi_long host_to_target_timespec(abi_ulong target_addr,
1269                                                struct timespec *host_ts)
1270 {
1271     struct target_timespec *target_ts;
1272 
1273     if (!lock_user_struct(VERIFY_WRITE, target_ts, target_addr, 0)) {
1274         return -TARGET_EFAULT;
1275     }
1276     __put_user(host_ts->tv_sec, &target_ts->tv_sec);
1277     __put_user(host_ts->tv_nsec, &target_ts->tv_nsec);
1278     unlock_user_struct(target_ts, target_addr, 1);
1279     return 0;
1280 }
1281 
1282 static inline abi_long host_to_target_timespec64(abi_ulong target_addr,
1283                                                  struct timespec *host_ts)
1284 {
1285     struct target__kernel_timespec *target_ts;
1286 
1287     if (!lock_user_struct(VERIFY_WRITE, target_ts, target_addr, 0)) {
1288         return -TARGET_EFAULT;
1289     }
1290     __put_user(host_ts->tv_sec, &target_ts->tv_sec);
1291     __put_user(host_ts->tv_nsec, &target_ts->tv_nsec);
1292     unlock_user_struct(target_ts, target_addr, 1);
1293     return 0;
1294 }
1295 
1296 #if defined(TARGET_NR_gettimeofday)
1297 static inline abi_long copy_to_user_timezone(abi_ulong target_tz_addr,
1298                                              struct timezone *tz)
1299 {
1300     struct target_timezone *target_tz;
1301 
1302     if (!lock_user_struct(VERIFY_WRITE, target_tz, target_tz_addr, 1)) {
1303         return -TARGET_EFAULT;
1304     }
1305 
1306     __put_user(tz->tz_minuteswest, &target_tz->tz_minuteswest);
1307     __put_user(tz->tz_dsttime, &target_tz->tz_dsttime);
1308 
1309     unlock_user_struct(target_tz, target_tz_addr, 1);
1310 
1311     return 0;
1312 }
1313 #endif
1314 
1315 #if defined(TARGET_NR_settimeofday)
1316 static inline abi_long copy_from_user_timezone(struct timezone *tz,
1317                                                abi_ulong target_tz_addr)
1318 {
1319     struct target_timezone *target_tz;
1320 
1321     if (!lock_user_struct(VERIFY_READ, target_tz, target_tz_addr, 1)) {
1322         return -TARGET_EFAULT;
1323     }
1324 
1325     __get_user(tz->tz_minuteswest, &target_tz->tz_minuteswest);
1326     __get_user(tz->tz_dsttime, &target_tz->tz_dsttime);
1327 
1328     unlock_user_struct(target_tz, target_tz_addr, 0);
1329 
1330     return 0;
1331 }
1332 #endif
1333 
1334 #if defined(TARGET_NR_mq_open) && defined(__NR_mq_open)
1335 #include <mqueue.h>
1336 
1337 static inline abi_long copy_from_user_mq_attr(struct mq_attr *attr,
1338                                               abi_ulong target_mq_attr_addr)
1339 {
1340     struct target_mq_attr *target_mq_attr;
1341 
1342     if (!lock_user_struct(VERIFY_READ, target_mq_attr,
1343                           target_mq_attr_addr, 1))
1344         return -TARGET_EFAULT;
1345 
1346     __get_user(attr->mq_flags, &target_mq_attr->mq_flags);
1347     __get_user(attr->mq_maxmsg, &target_mq_attr->mq_maxmsg);
1348     __get_user(attr->mq_msgsize, &target_mq_attr->mq_msgsize);
1349     __get_user(attr->mq_curmsgs, &target_mq_attr->mq_curmsgs);
1350 
1351     unlock_user_struct(target_mq_attr, target_mq_attr_addr, 0);
1352 
1353     return 0;
1354 }
1355 
1356 static inline abi_long copy_to_user_mq_attr(abi_ulong target_mq_attr_addr,
1357                                             const struct mq_attr *attr)
1358 {
1359     struct target_mq_attr *target_mq_attr;
1360 
1361     if (!lock_user_struct(VERIFY_WRITE, target_mq_attr,
1362                           target_mq_attr_addr, 0))
1363         return -TARGET_EFAULT;
1364 
1365     __put_user(attr->mq_flags, &target_mq_attr->mq_flags);
1366     __put_user(attr->mq_maxmsg, &target_mq_attr->mq_maxmsg);
1367     __put_user(attr->mq_msgsize, &target_mq_attr->mq_msgsize);
1368     __put_user(attr->mq_curmsgs, &target_mq_attr->mq_curmsgs);
1369 
1370     unlock_user_struct(target_mq_attr, target_mq_attr_addr, 1);
1371 
1372     return 0;
1373 }
1374 #endif
1375 
1376 #if defined(TARGET_NR_select) || defined(TARGET_NR__newselect)
1377 /* do_select() must return target values and target errnos. */
1378 static abi_long do_select(int n,
1379                           abi_ulong rfd_addr, abi_ulong wfd_addr,
1380                           abi_ulong efd_addr, abi_ulong target_tv_addr)
1381 {
1382     fd_set rfds, wfds, efds;
1383     fd_set *rfds_ptr, *wfds_ptr, *efds_ptr;
1384     struct timeval tv;
1385     struct timespec ts, *ts_ptr;
1386     abi_long ret;
1387 
1388     ret = copy_from_user_fdset_ptr(&rfds, &rfds_ptr, rfd_addr, n);
1389     if (ret) {
1390         return ret;
1391     }
1392     ret = copy_from_user_fdset_ptr(&wfds, &wfds_ptr, wfd_addr, n);
1393     if (ret) {
1394         return ret;
1395     }
1396     ret = copy_from_user_fdset_ptr(&efds, &efds_ptr, efd_addr, n);
1397     if (ret) {
1398         return ret;
1399     }
1400 
1401     if (target_tv_addr) {
1402         if (copy_from_user_timeval(&tv, target_tv_addr))
1403             return -TARGET_EFAULT;
1404         ts.tv_sec = tv.tv_sec;
1405         ts.tv_nsec = tv.tv_usec * 1000;
1406         ts_ptr = &ts;
1407     } else {
1408         ts_ptr = NULL;
1409     }
1410 
1411     ret = get_errno(safe_pselect6(n, rfds_ptr, wfds_ptr, efds_ptr,
1412                                   ts_ptr, NULL));
1413 
1414     if (!is_error(ret)) {
1415         if (rfd_addr && copy_to_user_fdset(rfd_addr, &rfds, n))
1416             return -TARGET_EFAULT;
1417         if (wfd_addr && copy_to_user_fdset(wfd_addr, &wfds, n))
1418             return -TARGET_EFAULT;
1419         if (efd_addr && copy_to_user_fdset(efd_addr, &efds, n))
1420             return -TARGET_EFAULT;
1421 
1422         if (target_tv_addr) {
1423             tv.tv_sec = ts.tv_sec;
1424             tv.tv_usec = ts.tv_nsec / 1000;
1425             if (copy_to_user_timeval(target_tv_addr, &tv)) {
1426                 return -TARGET_EFAULT;
1427             }
1428         }
1429     }
1430 
1431     return ret;
1432 }
1433 
1434 #if defined(TARGET_WANT_OLD_SYS_SELECT)
1435 static abi_long do_old_select(abi_ulong arg1)
1436 {
1437     struct target_sel_arg_struct *sel;
1438     abi_ulong inp, outp, exp, tvp;
1439     long nsel;
1440 
1441     if (!lock_user_struct(VERIFY_READ, sel, arg1, 1)) {
1442         return -TARGET_EFAULT;
1443     }
1444 
1445     nsel = tswapal(sel->n);
1446     inp = tswapal(sel->inp);
1447     outp = tswapal(sel->outp);
1448     exp = tswapal(sel->exp);
1449     tvp = tswapal(sel->tvp);
1450 
1451     unlock_user_struct(sel, arg1, 0);
1452 
1453     return do_select(nsel, inp, outp, exp, tvp);
1454 }
1455 #endif
1456 #endif
1457 
1458 static abi_long do_pipe2(int host_pipe[], int flags)
1459 {
1460 #ifdef CONFIG_PIPE2
1461     return pipe2(host_pipe, flags);
1462 #else
1463     return -ENOSYS;
1464 #endif
1465 }
1466 
1467 static abi_long do_pipe(void *cpu_env, abi_ulong pipedes,
1468                         int flags, int is_pipe2)
1469 {
1470     int host_pipe[2];
1471     abi_long ret;
1472     ret = flags ? do_pipe2(host_pipe, flags) : pipe(host_pipe);
1473 
1474     if (is_error(ret))
1475         return get_errno(ret);
1476 
1477     /* Several targets have special calling conventions for the original
1478        pipe syscall, but didn't replicate this into the pipe2 syscall.  */
1479     if (!is_pipe2) {
1480 #if defined(TARGET_ALPHA)
1481         ((CPUAlphaState *)cpu_env)->ir[IR_A4] = host_pipe[1];
1482         return host_pipe[0];
1483 #elif defined(TARGET_MIPS)
1484         ((CPUMIPSState*)cpu_env)->active_tc.gpr[3] = host_pipe[1];
1485         return host_pipe[0];
1486 #elif defined(TARGET_SH4)
1487         ((CPUSH4State*)cpu_env)->gregs[1] = host_pipe[1];
1488         return host_pipe[0];
1489 #elif defined(TARGET_SPARC)
1490         ((CPUSPARCState*)cpu_env)->regwptr[1] = host_pipe[1];
1491         return host_pipe[0];
1492 #endif
1493     }
1494 
1495     if (put_user_s32(host_pipe[0], pipedes)
1496         || put_user_s32(host_pipe[1], pipedes + sizeof(host_pipe[0])))
1497         return -TARGET_EFAULT;
1498     return get_errno(ret);
1499 }
1500 
1501 static inline abi_long target_to_host_ip_mreq(struct ip_mreqn *mreqn,
1502                                               abi_ulong target_addr,
1503                                               socklen_t len)
1504 {
1505     struct target_ip_mreqn *target_smreqn;
1506 
1507     target_smreqn = lock_user(VERIFY_READ, target_addr, len, 1);
1508     if (!target_smreqn)
1509         return -TARGET_EFAULT;
1510     mreqn->imr_multiaddr.s_addr = target_smreqn->imr_multiaddr.s_addr;
1511     mreqn->imr_address.s_addr = target_smreqn->imr_address.s_addr;
1512     if (len == sizeof(struct target_ip_mreqn))
1513         mreqn->imr_ifindex = tswapal(target_smreqn->imr_ifindex);
1514     unlock_user(target_smreqn, target_addr, 0);
1515 
1516     return 0;
1517 }
1518 
1519 static inline abi_long target_to_host_sockaddr(int fd, struct sockaddr *addr,
1520                                                abi_ulong target_addr,
1521                                                socklen_t len)
1522 {
1523     const socklen_t unix_maxlen = sizeof (struct sockaddr_un);
1524     sa_family_t sa_family;
1525     struct target_sockaddr *target_saddr;
1526 
1527     if (fd_trans_target_to_host_addr(fd)) {
1528         return fd_trans_target_to_host_addr(fd)(addr, target_addr, len);
1529     }
1530 
1531     target_saddr = lock_user(VERIFY_READ, target_addr, len, 1);
1532     if (!target_saddr)
1533         return -TARGET_EFAULT;
1534 
1535     sa_family = tswap16(target_saddr->sa_family);
1536 
1537     /* Oops. The caller might send a incomplete sun_path; sun_path
1538      * must be terminated by \0 (see the manual page), but
1539      * unfortunately it is quite common to specify sockaddr_un
1540      * length as "strlen(x->sun_path)" while it should be
1541      * "strlen(...) + 1". We'll fix that here if needed.
1542      * Linux kernel has a similar feature.
1543      */
1544 
1545     if (sa_family == AF_UNIX) {
1546         if (len < unix_maxlen && len > 0) {
1547             char *cp = (char*)target_saddr;
1548 
1549             if ( cp[len-1] && !cp[len] )
1550                 len++;
1551         }
1552         if (len > unix_maxlen)
1553             len = unix_maxlen;
1554     }
1555 
1556     memcpy(addr, target_saddr, len);
1557     addr->sa_family = sa_family;
1558     if (sa_family == AF_NETLINK) {
1559         struct sockaddr_nl *nladdr;
1560 
1561         nladdr = (struct sockaddr_nl *)addr;
1562         nladdr->nl_pid = tswap32(nladdr->nl_pid);
1563         nladdr->nl_groups = tswap32(nladdr->nl_groups);
1564     } else if (sa_family == AF_PACKET) {
1565 	struct target_sockaddr_ll *lladdr;
1566 
1567 	lladdr = (struct target_sockaddr_ll *)addr;
1568 	lladdr->sll_ifindex = tswap32(lladdr->sll_ifindex);
1569 	lladdr->sll_hatype = tswap16(lladdr->sll_hatype);
1570     }
1571     unlock_user(target_saddr, target_addr, 0);
1572 
1573     return 0;
1574 }
1575 
1576 static inline abi_long host_to_target_sockaddr(abi_ulong target_addr,
1577                                                struct sockaddr *addr,
1578                                                socklen_t len)
1579 {
1580     struct target_sockaddr *target_saddr;
1581 
1582     if (len == 0) {
1583         return 0;
1584     }
1585     assert(addr);
1586 
1587     target_saddr = lock_user(VERIFY_WRITE, target_addr, len, 0);
1588     if (!target_saddr)
1589         return -TARGET_EFAULT;
1590     memcpy(target_saddr, addr, len);
1591     if (len >= offsetof(struct target_sockaddr, sa_family) +
1592         sizeof(target_saddr->sa_family)) {
1593         target_saddr->sa_family = tswap16(addr->sa_family);
1594     }
1595     if (addr->sa_family == AF_NETLINK &&
1596         len >= sizeof(struct target_sockaddr_nl)) {
1597         struct target_sockaddr_nl *target_nl =
1598                (struct target_sockaddr_nl *)target_saddr;
1599         target_nl->nl_pid = tswap32(target_nl->nl_pid);
1600         target_nl->nl_groups = tswap32(target_nl->nl_groups);
1601     } else if (addr->sa_family == AF_PACKET) {
1602         struct sockaddr_ll *target_ll = (struct sockaddr_ll *)target_saddr;
1603         target_ll->sll_ifindex = tswap32(target_ll->sll_ifindex);
1604         target_ll->sll_hatype = tswap16(target_ll->sll_hatype);
1605     } else if (addr->sa_family == AF_INET6 &&
1606                len >= sizeof(struct target_sockaddr_in6)) {
1607         struct target_sockaddr_in6 *target_in6 =
1608                (struct target_sockaddr_in6 *)target_saddr;
1609         target_in6->sin6_scope_id = tswap16(target_in6->sin6_scope_id);
1610     }
1611     unlock_user(target_saddr, target_addr, len);
1612 
1613     return 0;
1614 }
1615 
1616 static inline abi_long target_to_host_cmsg(struct msghdr *msgh,
1617                                            struct target_msghdr *target_msgh)
1618 {
1619     struct cmsghdr *cmsg = CMSG_FIRSTHDR(msgh);
1620     abi_long msg_controllen;
1621     abi_ulong target_cmsg_addr;
1622     struct target_cmsghdr *target_cmsg, *target_cmsg_start;
1623     socklen_t space = 0;
1624 
1625     msg_controllen = tswapal(target_msgh->msg_controllen);
1626     if (msg_controllen < sizeof (struct target_cmsghdr))
1627         goto the_end;
1628     target_cmsg_addr = tswapal(target_msgh->msg_control);
1629     target_cmsg = lock_user(VERIFY_READ, target_cmsg_addr, msg_controllen, 1);
1630     target_cmsg_start = target_cmsg;
1631     if (!target_cmsg)
1632         return -TARGET_EFAULT;
1633 
1634     while (cmsg && target_cmsg) {
1635         void *data = CMSG_DATA(cmsg);
1636         void *target_data = TARGET_CMSG_DATA(target_cmsg);
1637 
1638         int len = tswapal(target_cmsg->cmsg_len)
1639             - sizeof(struct target_cmsghdr);
1640 
1641         space += CMSG_SPACE(len);
1642         if (space > msgh->msg_controllen) {
1643             space -= CMSG_SPACE(len);
1644             /* This is a QEMU bug, since we allocated the payload
1645              * area ourselves (unlike overflow in host-to-target
1646              * conversion, which is just the guest giving us a buffer
1647              * that's too small). It can't happen for the payload types
1648              * we currently support; if it becomes an issue in future
1649              * we would need to improve our allocation strategy to
1650              * something more intelligent than "twice the size of the
1651              * target buffer we're reading from".
1652              */
1653             qemu_log_mask(LOG_UNIMP,
1654                           ("Unsupported ancillary data %d/%d: "
1655                            "unhandled msg size\n"),
1656                           tswap32(target_cmsg->cmsg_level),
1657                           tswap32(target_cmsg->cmsg_type));
1658             break;
1659         }
1660 
1661         if (tswap32(target_cmsg->cmsg_level) == TARGET_SOL_SOCKET) {
1662             cmsg->cmsg_level = SOL_SOCKET;
1663         } else {
1664             cmsg->cmsg_level = tswap32(target_cmsg->cmsg_level);
1665         }
1666         cmsg->cmsg_type = tswap32(target_cmsg->cmsg_type);
1667         cmsg->cmsg_len = CMSG_LEN(len);
1668 
1669         if (cmsg->cmsg_level == SOL_SOCKET && cmsg->cmsg_type == SCM_RIGHTS) {
1670             int *fd = (int *)data;
1671             int *target_fd = (int *)target_data;
1672             int i, numfds = len / sizeof(int);
1673 
1674             for (i = 0; i < numfds; i++) {
1675                 __get_user(fd[i], target_fd + i);
1676             }
1677         } else if (cmsg->cmsg_level == SOL_SOCKET
1678                &&  cmsg->cmsg_type == SCM_CREDENTIALS) {
1679             struct ucred *cred = (struct ucred *)data;
1680             struct target_ucred *target_cred =
1681                 (struct target_ucred *)target_data;
1682 
1683             __get_user(cred->pid, &target_cred->pid);
1684             __get_user(cred->uid, &target_cred->uid);
1685             __get_user(cred->gid, &target_cred->gid);
1686         } else {
1687             qemu_log_mask(LOG_UNIMP, "Unsupported ancillary data: %d/%d\n",
1688                           cmsg->cmsg_level, cmsg->cmsg_type);
1689             memcpy(data, target_data, len);
1690         }
1691 
1692         cmsg = CMSG_NXTHDR(msgh, cmsg);
1693         target_cmsg = TARGET_CMSG_NXTHDR(target_msgh, target_cmsg,
1694                                          target_cmsg_start);
1695     }
1696     unlock_user(target_cmsg, target_cmsg_addr, 0);
1697  the_end:
1698     msgh->msg_controllen = space;
1699     return 0;
1700 }
1701 
1702 static inline abi_long host_to_target_cmsg(struct target_msghdr *target_msgh,
1703                                            struct msghdr *msgh)
1704 {
1705     struct cmsghdr *cmsg = CMSG_FIRSTHDR(msgh);
1706     abi_long msg_controllen;
1707     abi_ulong target_cmsg_addr;
1708     struct target_cmsghdr *target_cmsg, *target_cmsg_start;
1709     socklen_t space = 0;
1710 
1711     msg_controllen = tswapal(target_msgh->msg_controllen);
1712     if (msg_controllen < sizeof (struct target_cmsghdr))
1713         goto the_end;
1714     target_cmsg_addr = tswapal(target_msgh->msg_control);
1715     target_cmsg = lock_user(VERIFY_WRITE, target_cmsg_addr, msg_controllen, 0);
1716     target_cmsg_start = target_cmsg;
1717     if (!target_cmsg)
1718         return -TARGET_EFAULT;
1719 
1720     while (cmsg && target_cmsg) {
1721         void *data = CMSG_DATA(cmsg);
1722         void *target_data = TARGET_CMSG_DATA(target_cmsg);
1723 
1724         int len = cmsg->cmsg_len - sizeof(struct cmsghdr);
1725         int tgt_len, tgt_space;
1726 
1727         /* We never copy a half-header but may copy half-data;
1728          * this is Linux's behaviour in put_cmsg(). Note that
1729          * truncation here is a guest problem (which we report
1730          * to the guest via the CTRUNC bit), unlike truncation
1731          * in target_to_host_cmsg, which is a QEMU bug.
1732          */
1733         if (msg_controllen < sizeof(struct target_cmsghdr)) {
1734             target_msgh->msg_flags |= tswap32(MSG_CTRUNC);
1735             break;
1736         }
1737 
1738         if (cmsg->cmsg_level == SOL_SOCKET) {
1739             target_cmsg->cmsg_level = tswap32(TARGET_SOL_SOCKET);
1740         } else {
1741             target_cmsg->cmsg_level = tswap32(cmsg->cmsg_level);
1742         }
1743         target_cmsg->cmsg_type = tswap32(cmsg->cmsg_type);
1744 
1745         /* Payload types which need a different size of payload on
1746          * the target must adjust tgt_len here.
1747          */
1748         tgt_len = len;
1749         switch (cmsg->cmsg_level) {
1750         case SOL_SOCKET:
1751             switch (cmsg->cmsg_type) {
1752             case SO_TIMESTAMP:
1753                 tgt_len = sizeof(struct target_timeval);
1754                 break;
1755             default:
1756                 break;
1757             }
1758             break;
1759         default:
1760             break;
1761         }
1762 
1763         if (msg_controllen < TARGET_CMSG_LEN(tgt_len)) {
1764             target_msgh->msg_flags |= tswap32(MSG_CTRUNC);
1765             tgt_len = msg_controllen - sizeof(struct target_cmsghdr);
1766         }
1767 
1768         /* We must now copy-and-convert len bytes of payload
1769          * into tgt_len bytes of destination space. Bear in mind
1770          * that in both source and destination we may be dealing
1771          * with a truncated value!
1772          */
1773         switch (cmsg->cmsg_level) {
1774         case SOL_SOCKET:
1775             switch (cmsg->cmsg_type) {
1776             case SCM_RIGHTS:
1777             {
1778                 int *fd = (int *)data;
1779                 int *target_fd = (int *)target_data;
1780                 int i, numfds = tgt_len / sizeof(int);
1781 
1782                 for (i = 0; i < numfds; i++) {
1783                     __put_user(fd[i], target_fd + i);
1784                 }
1785                 break;
1786             }
1787             case SO_TIMESTAMP:
1788             {
1789                 struct timeval *tv = (struct timeval *)data;
1790                 struct target_timeval *target_tv =
1791                     (struct target_timeval *)target_data;
1792 
1793                 if (len != sizeof(struct timeval) ||
1794                     tgt_len != sizeof(struct target_timeval)) {
1795                     goto unimplemented;
1796                 }
1797 
1798                 /* copy struct timeval to target */
1799                 __put_user(tv->tv_sec, &target_tv->tv_sec);
1800                 __put_user(tv->tv_usec, &target_tv->tv_usec);
1801                 break;
1802             }
1803             case SCM_CREDENTIALS:
1804             {
1805                 struct ucred *cred = (struct ucred *)data;
1806                 struct target_ucred *target_cred =
1807                     (struct target_ucred *)target_data;
1808 
1809                 __put_user(cred->pid, &target_cred->pid);
1810                 __put_user(cred->uid, &target_cred->uid);
1811                 __put_user(cred->gid, &target_cred->gid);
1812                 break;
1813             }
1814             default:
1815                 goto unimplemented;
1816             }
1817             break;
1818 
1819         case SOL_IP:
1820             switch (cmsg->cmsg_type) {
1821             case IP_TTL:
1822             {
1823                 uint32_t *v = (uint32_t *)data;
1824                 uint32_t *t_int = (uint32_t *)target_data;
1825 
1826                 if (len != sizeof(uint32_t) ||
1827                     tgt_len != sizeof(uint32_t)) {
1828                     goto unimplemented;
1829                 }
1830                 __put_user(*v, t_int);
1831                 break;
1832             }
1833             case IP_RECVERR:
1834             {
1835                 struct errhdr_t {
1836                    struct sock_extended_err ee;
1837                    struct sockaddr_in offender;
1838                 };
1839                 struct errhdr_t *errh = (struct errhdr_t *)data;
1840                 struct errhdr_t *target_errh =
1841                     (struct errhdr_t *)target_data;
1842 
1843                 if (len != sizeof(struct errhdr_t) ||
1844                     tgt_len != sizeof(struct errhdr_t)) {
1845                     goto unimplemented;
1846                 }
1847                 __put_user(errh->ee.ee_errno, &target_errh->ee.ee_errno);
1848                 __put_user(errh->ee.ee_origin, &target_errh->ee.ee_origin);
1849                 __put_user(errh->ee.ee_type,  &target_errh->ee.ee_type);
1850                 __put_user(errh->ee.ee_code, &target_errh->ee.ee_code);
1851                 __put_user(errh->ee.ee_pad, &target_errh->ee.ee_pad);
1852                 __put_user(errh->ee.ee_info, &target_errh->ee.ee_info);
1853                 __put_user(errh->ee.ee_data, &target_errh->ee.ee_data);
1854                 host_to_target_sockaddr((unsigned long) &target_errh->offender,
1855                     (void *) &errh->offender, sizeof(errh->offender));
1856                 break;
1857             }
1858             default:
1859                 goto unimplemented;
1860             }
1861             break;
1862 
1863         case SOL_IPV6:
1864             switch (cmsg->cmsg_type) {
1865             case IPV6_HOPLIMIT:
1866             {
1867                 uint32_t *v = (uint32_t *)data;
1868                 uint32_t *t_int = (uint32_t *)target_data;
1869 
1870                 if (len != sizeof(uint32_t) ||
1871                     tgt_len != sizeof(uint32_t)) {
1872                     goto unimplemented;
1873                 }
1874                 __put_user(*v, t_int);
1875                 break;
1876             }
1877             case IPV6_RECVERR:
1878             {
1879                 struct errhdr6_t {
1880                    struct sock_extended_err ee;
1881                    struct sockaddr_in6 offender;
1882                 };
1883                 struct errhdr6_t *errh = (struct errhdr6_t *)data;
1884                 struct errhdr6_t *target_errh =
1885                     (struct errhdr6_t *)target_data;
1886 
1887                 if (len != sizeof(struct errhdr6_t) ||
1888                     tgt_len != sizeof(struct errhdr6_t)) {
1889                     goto unimplemented;
1890                 }
1891                 __put_user(errh->ee.ee_errno, &target_errh->ee.ee_errno);
1892                 __put_user(errh->ee.ee_origin, &target_errh->ee.ee_origin);
1893                 __put_user(errh->ee.ee_type,  &target_errh->ee.ee_type);
1894                 __put_user(errh->ee.ee_code, &target_errh->ee.ee_code);
1895                 __put_user(errh->ee.ee_pad, &target_errh->ee.ee_pad);
1896                 __put_user(errh->ee.ee_info, &target_errh->ee.ee_info);
1897                 __put_user(errh->ee.ee_data, &target_errh->ee.ee_data);
1898                 host_to_target_sockaddr((unsigned long) &target_errh->offender,
1899                     (void *) &errh->offender, sizeof(errh->offender));
1900                 break;
1901             }
1902             default:
1903                 goto unimplemented;
1904             }
1905             break;
1906 
1907         default:
1908         unimplemented:
1909             qemu_log_mask(LOG_UNIMP, "Unsupported ancillary data: %d/%d\n",
1910                           cmsg->cmsg_level, cmsg->cmsg_type);
1911             memcpy(target_data, data, MIN(len, tgt_len));
1912             if (tgt_len > len) {
1913                 memset(target_data + len, 0, tgt_len - len);
1914             }
1915         }
1916 
1917         target_cmsg->cmsg_len = tswapal(TARGET_CMSG_LEN(tgt_len));
1918         tgt_space = TARGET_CMSG_SPACE(tgt_len);
1919         if (msg_controllen < tgt_space) {
1920             tgt_space = msg_controllen;
1921         }
1922         msg_controllen -= tgt_space;
1923         space += tgt_space;
1924         cmsg = CMSG_NXTHDR(msgh, cmsg);
1925         target_cmsg = TARGET_CMSG_NXTHDR(target_msgh, target_cmsg,
1926                                          target_cmsg_start);
1927     }
1928     unlock_user(target_cmsg, target_cmsg_addr, space);
1929  the_end:
1930     target_msgh->msg_controllen = tswapal(space);
1931     return 0;
1932 }
1933 
1934 /* do_setsockopt() Must return target values and target errnos. */
1935 static abi_long do_setsockopt(int sockfd, int level, int optname,
1936                               abi_ulong optval_addr, socklen_t optlen)
1937 {
1938     abi_long ret;
1939     int val;
1940     struct ip_mreqn *ip_mreq;
1941     struct ip_mreq_source *ip_mreq_source;
1942 
1943     switch(level) {
1944     case SOL_TCP:
1945         /* TCP options all take an 'int' value.  */
1946         if (optlen < sizeof(uint32_t))
1947             return -TARGET_EINVAL;
1948 
1949         if (get_user_u32(val, optval_addr))
1950             return -TARGET_EFAULT;
1951         ret = get_errno(setsockopt(sockfd, level, optname, &val, sizeof(val)));
1952         break;
1953     case SOL_IP:
1954         switch(optname) {
1955         case IP_TOS:
1956         case IP_TTL:
1957         case IP_HDRINCL:
1958         case IP_ROUTER_ALERT:
1959         case IP_RECVOPTS:
1960         case IP_RETOPTS:
1961         case IP_PKTINFO:
1962         case IP_MTU_DISCOVER:
1963         case IP_RECVERR:
1964         case IP_RECVTTL:
1965         case IP_RECVTOS:
1966 #ifdef IP_FREEBIND
1967         case IP_FREEBIND:
1968 #endif
1969         case IP_MULTICAST_TTL:
1970         case IP_MULTICAST_LOOP:
1971             val = 0;
1972             if (optlen >= sizeof(uint32_t)) {
1973                 if (get_user_u32(val, optval_addr))
1974                     return -TARGET_EFAULT;
1975             } else if (optlen >= 1) {
1976                 if (get_user_u8(val, optval_addr))
1977                     return -TARGET_EFAULT;
1978             }
1979             ret = get_errno(setsockopt(sockfd, level, optname, &val, sizeof(val)));
1980             break;
1981         case IP_ADD_MEMBERSHIP:
1982         case IP_DROP_MEMBERSHIP:
1983             if (optlen < sizeof (struct target_ip_mreq) ||
1984                 optlen > sizeof (struct target_ip_mreqn))
1985                 return -TARGET_EINVAL;
1986 
1987             ip_mreq = (struct ip_mreqn *) alloca(optlen);
1988             target_to_host_ip_mreq(ip_mreq, optval_addr, optlen);
1989             ret = get_errno(setsockopt(sockfd, level, optname, ip_mreq, optlen));
1990             break;
1991 
1992         case IP_BLOCK_SOURCE:
1993         case IP_UNBLOCK_SOURCE:
1994         case IP_ADD_SOURCE_MEMBERSHIP:
1995         case IP_DROP_SOURCE_MEMBERSHIP:
1996             if (optlen != sizeof (struct target_ip_mreq_source))
1997                 return -TARGET_EINVAL;
1998 
1999             ip_mreq_source = lock_user(VERIFY_READ, optval_addr, optlen, 1);
2000             ret = get_errno(setsockopt(sockfd, level, optname, ip_mreq_source, optlen));
2001             unlock_user (ip_mreq_source, optval_addr, 0);
2002             break;
2003 
2004         default:
2005             goto unimplemented;
2006         }
2007         break;
2008     case SOL_IPV6:
2009         switch (optname) {
2010         case IPV6_MTU_DISCOVER:
2011         case IPV6_MTU:
2012         case IPV6_V6ONLY:
2013         case IPV6_RECVPKTINFO:
2014         case IPV6_UNICAST_HOPS:
2015         case IPV6_MULTICAST_HOPS:
2016         case IPV6_MULTICAST_LOOP:
2017         case IPV6_RECVERR:
2018         case IPV6_RECVHOPLIMIT:
2019         case IPV6_2292HOPLIMIT:
2020         case IPV6_CHECKSUM:
2021         case IPV6_ADDRFORM:
2022         case IPV6_2292PKTINFO:
2023         case IPV6_RECVTCLASS:
2024         case IPV6_RECVRTHDR:
2025         case IPV6_2292RTHDR:
2026         case IPV6_RECVHOPOPTS:
2027         case IPV6_2292HOPOPTS:
2028         case IPV6_RECVDSTOPTS:
2029         case IPV6_2292DSTOPTS:
2030         case IPV6_TCLASS:
2031 #ifdef IPV6_RECVPATHMTU
2032         case IPV6_RECVPATHMTU:
2033 #endif
2034 #ifdef IPV6_TRANSPARENT
2035         case IPV6_TRANSPARENT:
2036 #endif
2037 #ifdef IPV6_FREEBIND
2038         case IPV6_FREEBIND:
2039 #endif
2040 #ifdef IPV6_RECVORIGDSTADDR
2041         case IPV6_RECVORIGDSTADDR:
2042 #endif
2043             val = 0;
2044             if (optlen < sizeof(uint32_t)) {
2045                 return -TARGET_EINVAL;
2046             }
2047             if (get_user_u32(val, optval_addr)) {
2048                 return -TARGET_EFAULT;
2049             }
2050             ret = get_errno(setsockopt(sockfd, level, optname,
2051                                        &val, sizeof(val)));
2052             break;
2053         case IPV6_PKTINFO:
2054         {
2055             struct in6_pktinfo pki;
2056 
2057             if (optlen < sizeof(pki)) {
2058                 return -TARGET_EINVAL;
2059             }
2060 
2061             if (copy_from_user(&pki, optval_addr, sizeof(pki))) {
2062                 return -TARGET_EFAULT;
2063             }
2064 
2065             pki.ipi6_ifindex = tswap32(pki.ipi6_ifindex);
2066 
2067             ret = get_errno(setsockopt(sockfd, level, optname,
2068                                        &pki, sizeof(pki)));
2069             break;
2070         }
2071         case IPV6_ADD_MEMBERSHIP:
2072         case IPV6_DROP_MEMBERSHIP:
2073         {
2074             struct ipv6_mreq ipv6mreq;
2075 
2076             if (optlen < sizeof(ipv6mreq)) {
2077                 return -TARGET_EINVAL;
2078             }
2079 
2080             if (copy_from_user(&ipv6mreq, optval_addr, sizeof(ipv6mreq))) {
2081                 return -TARGET_EFAULT;
2082             }
2083 
2084             ipv6mreq.ipv6mr_interface = tswap32(ipv6mreq.ipv6mr_interface);
2085 
2086             ret = get_errno(setsockopt(sockfd, level, optname,
2087                                        &ipv6mreq, sizeof(ipv6mreq)));
2088             break;
2089         }
2090         default:
2091             goto unimplemented;
2092         }
2093         break;
2094     case SOL_ICMPV6:
2095         switch (optname) {
2096         case ICMPV6_FILTER:
2097         {
2098             struct icmp6_filter icmp6f;
2099 
2100             if (optlen > sizeof(icmp6f)) {
2101                 optlen = sizeof(icmp6f);
2102             }
2103 
2104             if (copy_from_user(&icmp6f, optval_addr, optlen)) {
2105                 return -TARGET_EFAULT;
2106             }
2107 
2108             for (val = 0; val < 8; val++) {
2109                 icmp6f.data[val] = tswap32(icmp6f.data[val]);
2110             }
2111 
2112             ret = get_errno(setsockopt(sockfd, level, optname,
2113                                        &icmp6f, optlen));
2114             break;
2115         }
2116         default:
2117             goto unimplemented;
2118         }
2119         break;
2120     case SOL_RAW:
2121         switch (optname) {
2122         case ICMP_FILTER:
2123         case IPV6_CHECKSUM:
2124             /* those take an u32 value */
2125             if (optlen < sizeof(uint32_t)) {
2126                 return -TARGET_EINVAL;
2127             }
2128 
2129             if (get_user_u32(val, optval_addr)) {
2130                 return -TARGET_EFAULT;
2131             }
2132             ret = get_errno(setsockopt(sockfd, level, optname,
2133                                        &val, sizeof(val)));
2134             break;
2135 
2136         default:
2137             goto unimplemented;
2138         }
2139         break;
2140 #if defined(SOL_ALG) && defined(ALG_SET_KEY) && defined(ALG_SET_AEAD_AUTHSIZE)
2141     case SOL_ALG:
2142         switch (optname) {
2143         case ALG_SET_KEY:
2144         {
2145             char *alg_key = g_malloc(optlen);
2146 
2147             if (!alg_key) {
2148                 return -TARGET_ENOMEM;
2149             }
2150             if (copy_from_user(alg_key, optval_addr, optlen)) {
2151                 g_free(alg_key);
2152                 return -TARGET_EFAULT;
2153             }
2154             ret = get_errno(setsockopt(sockfd, level, optname,
2155                                        alg_key, optlen));
2156             g_free(alg_key);
2157             break;
2158         }
2159         case ALG_SET_AEAD_AUTHSIZE:
2160         {
2161             ret = get_errno(setsockopt(sockfd, level, optname,
2162                                        NULL, optlen));
2163             break;
2164         }
2165         default:
2166             goto unimplemented;
2167         }
2168         break;
2169 #endif
2170     case TARGET_SOL_SOCKET:
2171         switch (optname) {
2172         case TARGET_SO_RCVTIMEO:
2173         {
2174                 struct timeval tv;
2175 
2176                 optname = SO_RCVTIMEO;
2177 
2178 set_timeout:
2179                 if (optlen != sizeof(struct target_timeval)) {
2180                     return -TARGET_EINVAL;
2181                 }
2182 
2183                 if (copy_from_user_timeval(&tv, optval_addr)) {
2184                     return -TARGET_EFAULT;
2185                 }
2186 
2187                 ret = get_errno(setsockopt(sockfd, SOL_SOCKET, optname,
2188                                 &tv, sizeof(tv)));
2189                 return ret;
2190         }
2191         case TARGET_SO_SNDTIMEO:
2192                 optname = SO_SNDTIMEO;
2193                 goto set_timeout;
2194         case TARGET_SO_ATTACH_FILTER:
2195         {
2196                 struct target_sock_fprog *tfprog;
2197                 struct target_sock_filter *tfilter;
2198                 struct sock_fprog fprog;
2199                 struct sock_filter *filter;
2200                 int i;
2201 
2202                 if (optlen != sizeof(*tfprog)) {
2203                     return -TARGET_EINVAL;
2204                 }
2205                 if (!lock_user_struct(VERIFY_READ, tfprog, optval_addr, 0)) {
2206                     return -TARGET_EFAULT;
2207                 }
2208                 if (!lock_user_struct(VERIFY_READ, tfilter,
2209                                       tswapal(tfprog->filter), 0)) {
2210                     unlock_user_struct(tfprog, optval_addr, 1);
2211                     return -TARGET_EFAULT;
2212                 }
2213 
2214                 fprog.len = tswap16(tfprog->len);
2215                 filter = g_try_new(struct sock_filter, fprog.len);
2216                 if (filter == NULL) {
2217                     unlock_user_struct(tfilter, tfprog->filter, 1);
2218                     unlock_user_struct(tfprog, optval_addr, 1);
2219                     return -TARGET_ENOMEM;
2220                 }
2221                 for (i = 0; i < fprog.len; i++) {
2222                     filter[i].code = tswap16(tfilter[i].code);
2223                     filter[i].jt = tfilter[i].jt;
2224                     filter[i].jf = tfilter[i].jf;
2225                     filter[i].k = tswap32(tfilter[i].k);
2226                 }
2227                 fprog.filter = filter;
2228 
2229                 ret = get_errno(setsockopt(sockfd, SOL_SOCKET,
2230                                 SO_ATTACH_FILTER, &fprog, sizeof(fprog)));
2231                 g_free(filter);
2232 
2233                 unlock_user_struct(tfilter, tfprog->filter, 1);
2234                 unlock_user_struct(tfprog, optval_addr, 1);
2235                 return ret;
2236         }
2237 	case TARGET_SO_BINDTODEVICE:
2238 	{
2239 		char *dev_ifname, *addr_ifname;
2240 
2241 		if (optlen > IFNAMSIZ - 1) {
2242 		    optlen = IFNAMSIZ - 1;
2243 		}
2244 		dev_ifname = lock_user(VERIFY_READ, optval_addr, optlen, 1);
2245 		if (!dev_ifname) {
2246 		    return -TARGET_EFAULT;
2247 		}
2248 		optname = SO_BINDTODEVICE;
2249 		addr_ifname = alloca(IFNAMSIZ);
2250 		memcpy(addr_ifname, dev_ifname, optlen);
2251 		addr_ifname[optlen] = 0;
2252 		ret = get_errno(setsockopt(sockfd, SOL_SOCKET, optname,
2253                                            addr_ifname, optlen));
2254 		unlock_user (dev_ifname, optval_addr, 0);
2255 		return ret;
2256 	}
2257         case TARGET_SO_LINGER:
2258         {
2259                 struct linger lg;
2260                 struct target_linger *tlg;
2261 
2262                 if (optlen != sizeof(struct target_linger)) {
2263                     return -TARGET_EINVAL;
2264                 }
2265                 if (!lock_user_struct(VERIFY_READ, tlg, optval_addr, 1)) {
2266                     return -TARGET_EFAULT;
2267                 }
2268                 __get_user(lg.l_onoff, &tlg->l_onoff);
2269                 __get_user(lg.l_linger, &tlg->l_linger);
2270                 ret = get_errno(setsockopt(sockfd, SOL_SOCKET, SO_LINGER,
2271                                 &lg, sizeof(lg)));
2272                 unlock_user_struct(tlg, optval_addr, 0);
2273                 return ret;
2274         }
2275             /* Options with 'int' argument.  */
2276         case TARGET_SO_DEBUG:
2277 		optname = SO_DEBUG;
2278 		break;
2279         case TARGET_SO_REUSEADDR:
2280 		optname = SO_REUSEADDR;
2281 		break;
2282 #ifdef SO_REUSEPORT
2283         case TARGET_SO_REUSEPORT:
2284                 optname = SO_REUSEPORT;
2285                 break;
2286 #endif
2287         case TARGET_SO_TYPE:
2288 		optname = SO_TYPE;
2289 		break;
2290         case TARGET_SO_ERROR:
2291 		optname = SO_ERROR;
2292 		break;
2293         case TARGET_SO_DONTROUTE:
2294 		optname = SO_DONTROUTE;
2295 		break;
2296         case TARGET_SO_BROADCAST:
2297 		optname = SO_BROADCAST;
2298 		break;
2299         case TARGET_SO_SNDBUF:
2300 		optname = SO_SNDBUF;
2301 		break;
2302         case TARGET_SO_SNDBUFFORCE:
2303                 optname = SO_SNDBUFFORCE;
2304                 break;
2305         case TARGET_SO_RCVBUF:
2306 		optname = SO_RCVBUF;
2307 		break;
2308         case TARGET_SO_RCVBUFFORCE:
2309                 optname = SO_RCVBUFFORCE;
2310                 break;
2311         case TARGET_SO_KEEPALIVE:
2312 		optname = SO_KEEPALIVE;
2313 		break;
2314         case TARGET_SO_OOBINLINE:
2315 		optname = SO_OOBINLINE;
2316 		break;
2317         case TARGET_SO_NO_CHECK:
2318 		optname = SO_NO_CHECK;
2319 		break;
2320         case TARGET_SO_PRIORITY:
2321 		optname = SO_PRIORITY;
2322 		break;
2323 #ifdef SO_BSDCOMPAT
2324         case TARGET_SO_BSDCOMPAT:
2325 		optname = SO_BSDCOMPAT;
2326 		break;
2327 #endif
2328         case TARGET_SO_PASSCRED:
2329 		optname = SO_PASSCRED;
2330 		break;
2331         case TARGET_SO_PASSSEC:
2332                 optname = SO_PASSSEC;
2333                 break;
2334         case TARGET_SO_TIMESTAMP:
2335 		optname = SO_TIMESTAMP;
2336 		break;
2337         case TARGET_SO_RCVLOWAT:
2338 		optname = SO_RCVLOWAT;
2339 		break;
2340         default:
2341             goto unimplemented;
2342         }
2343 	if (optlen < sizeof(uint32_t))
2344             return -TARGET_EINVAL;
2345 
2346 	if (get_user_u32(val, optval_addr))
2347             return -TARGET_EFAULT;
2348 	ret = get_errno(setsockopt(sockfd, SOL_SOCKET, optname, &val, sizeof(val)));
2349         break;
2350 #ifdef SOL_NETLINK
2351     case SOL_NETLINK:
2352         switch (optname) {
2353         case NETLINK_PKTINFO:
2354         case NETLINK_ADD_MEMBERSHIP:
2355         case NETLINK_DROP_MEMBERSHIP:
2356         case NETLINK_BROADCAST_ERROR:
2357         case NETLINK_NO_ENOBUFS:
2358 #if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 2, 0)
2359         case NETLINK_LISTEN_ALL_NSID:
2360         case NETLINK_CAP_ACK:
2361 #endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(4, 2, 0) */
2362 #if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 12, 0)
2363         case NETLINK_EXT_ACK:
2364 #endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(4, 12, 0) */
2365 #if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 20, 0)
2366         case NETLINK_GET_STRICT_CHK:
2367 #endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(4, 12, 0) */
2368             break;
2369         default:
2370             goto unimplemented;
2371         }
2372         val = 0;
2373         if (optlen < sizeof(uint32_t)) {
2374             return -TARGET_EINVAL;
2375         }
2376         if (get_user_u32(val, optval_addr)) {
2377             return -TARGET_EFAULT;
2378         }
2379         ret = get_errno(setsockopt(sockfd, SOL_NETLINK, optname, &val,
2380                                    sizeof(val)));
2381         break;
2382 #endif /* SOL_NETLINK */
2383     default:
2384     unimplemented:
2385         qemu_log_mask(LOG_UNIMP, "Unsupported setsockopt level=%d optname=%d\n",
2386                       level, optname);
2387         ret = -TARGET_ENOPROTOOPT;
2388     }
2389     return ret;
2390 }
2391 
2392 /* do_getsockopt() Must return target values and target errnos. */
2393 static abi_long do_getsockopt(int sockfd, int level, int optname,
2394                               abi_ulong optval_addr, abi_ulong optlen)
2395 {
2396     abi_long ret;
2397     int len, val;
2398     socklen_t lv;
2399 
2400     switch(level) {
2401     case TARGET_SOL_SOCKET:
2402         level = SOL_SOCKET;
2403         switch (optname) {
2404         /* These don't just return a single integer */
2405         case TARGET_SO_PEERNAME:
2406             goto unimplemented;
2407         case TARGET_SO_RCVTIMEO: {
2408             struct timeval tv;
2409             socklen_t tvlen;
2410 
2411             optname = SO_RCVTIMEO;
2412 
2413 get_timeout:
2414             if (get_user_u32(len, optlen)) {
2415                 return -TARGET_EFAULT;
2416             }
2417             if (len < 0) {
2418                 return -TARGET_EINVAL;
2419             }
2420 
2421             tvlen = sizeof(tv);
2422             ret = get_errno(getsockopt(sockfd, level, optname,
2423                                        &tv, &tvlen));
2424             if (ret < 0) {
2425                 return ret;
2426             }
2427             if (len > sizeof(struct target_timeval)) {
2428                 len = sizeof(struct target_timeval);
2429             }
2430             if (copy_to_user_timeval(optval_addr, &tv)) {
2431                 return -TARGET_EFAULT;
2432             }
2433             if (put_user_u32(len, optlen)) {
2434                 return -TARGET_EFAULT;
2435             }
2436             break;
2437         }
2438         case TARGET_SO_SNDTIMEO:
2439             optname = SO_SNDTIMEO;
2440             goto get_timeout;
2441         case TARGET_SO_PEERCRED: {
2442             struct ucred cr;
2443             socklen_t crlen;
2444             struct target_ucred *tcr;
2445 
2446             if (get_user_u32(len, optlen)) {
2447                 return -TARGET_EFAULT;
2448             }
2449             if (len < 0) {
2450                 return -TARGET_EINVAL;
2451             }
2452 
2453             crlen = sizeof(cr);
2454             ret = get_errno(getsockopt(sockfd, level, SO_PEERCRED,
2455                                        &cr, &crlen));
2456             if (ret < 0) {
2457                 return ret;
2458             }
2459             if (len > crlen) {
2460                 len = crlen;
2461             }
2462             if (!lock_user_struct(VERIFY_WRITE, tcr, optval_addr, 0)) {
2463                 return -TARGET_EFAULT;
2464             }
2465             __put_user(cr.pid, &tcr->pid);
2466             __put_user(cr.uid, &tcr->uid);
2467             __put_user(cr.gid, &tcr->gid);
2468             unlock_user_struct(tcr, optval_addr, 1);
2469             if (put_user_u32(len, optlen)) {
2470                 return -TARGET_EFAULT;
2471             }
2472             break;
2473         }
2474         case TARGET_SO_PEERSEC: {
2475             char *name;
2476 
2477             if (get_user_u32(len, optlen)) {
2478                 return -TARGET_EFAULT;
2479             }
2480             if (len < 0) {
2481                 return -TARGET_EINVAL;
2482             }
2483             name = lock_user(VERIFY_WRITE, optval_addr, len, 0);
2484             if (!name) {
2485                 return -TARGET_EFAULT;
2486             }
2487             lv = len;
2488             ret = get_errno(getsockopt(sockfd, level, SO_PEERSEC,
2489                                        name, &lv));
2490             if (put_user_u32(lv, optlen)) {
2491                 ret = -TARGET_EFAULT;
2492             }
2493             unlock_user(name, optval_addr, lv);
2494             break;
2495         }
2496         case TARGET_SO_LINGER:
2497         {
2498             struct linger lg;
2499             socklen_t lglen;
2500             struct target_linger *tlg;
2501 
2502             if (get_user_u32(len, optlen)) {
2503                 return -TARGET_EFAULT;
2504             }
2505             if (len < 0) {
2506                 return -TARGET_EINVAL;
2507             }
2508 
2509             lglen = sizeof(lg);
2510             ret = get_errno(getsockopt(sockfd, level, SO_LINGER,
2511                                        &lg, &lglen));
2512             if (ret < 0) {
2513                 return ret;
2514             }
2515             if (len > lglen) {
2516                 len = lglen;
2517             }
2518             if (!lock_user_struct(VERIFY_WRITE, tlg, optval_addr, 0)) {
2519                 return -TARGET_EFAULT;
2520             }
2521             __put_user(lg.l_onoff, &tlg->l_onoff);
2522             __put_user(lg.l_linger, &tlg->l_linger);
2523             unlock_user_struct(tlg, optval_addr, 1);
2524             if (put_user_u32(len, optlen)) {
2525                 return -TARGET_EFAULT;
2526             }
2527             break;
2528         }
2529         /* Options with 'int' argument.  */
2530         case TARGET_SO_DEBUG:
2531             optname = SO_DEBUG;
2532             goto int_case;
2533         case TARGET_SO_REUSEADDR:
2534             optname = SO_REUSEADDR;
2535             goto int_case;
2536 #ifdef SO_REUSEPORT
2537         case TARGET_SO_REUSEPORT:
2538             optname = SO_REUSEPORT;
2539             goto int_case;
2540 #endif
2541         case TARGET_SO_TYPE:
2542             optname = SO_TYPE;
2543             goto int_case;
2544         case TARGET_SO_ERROR:
2545             optname = SO_ERROR;
2546             goto int_case;
2547         case TARGET_SO_DONTROUTE:
2548             optname = SO_DONTROUTE;
2549             goto int_case;
2550         case TARGET_SO_BROADCAST:
2551             optname = SO_BROADCAST;
2552             goto int_case;
2553         case TARGET_SO_SNDBUF:
2554             optname = SO_SNDBUF;
2555             goto int_case;
2556         case TARGET_SO_RCVBUF:
2557             optname = SO_RCVBUF;
2558             goto int_case;
2559         case TARGET_SO_KEEPALIVE:
2560             optname = SO_KEEPALIVE;
2561             goto int_case;
2562         case TARGET_SO_OOBINLINE:
2563             optname = SO_OOBINLINE;
2564             goto int_case;
2565         case TARGET_SO_NO_CHECK:
2566             optname = SO_NO_CHECK;
2567             goto int_case;
2568         case TARGET_SO_PRIORITY:
2569             optname = SO_PRIORITY;
2570             goto int_case;
2571 #ifdef SO_BSDCOMPAT
2572         case TARGET_SO_BSDCOMPAT:
2573             optname = SO_BSDCOMPAT;
2574             goto int_case;
2575 #endif
2576         case TARGET_SO_PASSCRED:
2577             optname = SO_PASSCRED;
2578             goto int_case;
2579         case TARGET_SO_TIMESTAMP:
2580             optname = SO_TIMESTAMP;
2581             goto int_case;
2582         case TARGET_SO_RCVLOWAT:
2583             optname = SO_RCVLOWAT;
2584             goto int_case;
2585         case TARGET_SO_ACCEPTCONN:
2586             optname = SO_ACCEPTCONN;
2587             goto int_case;
2588         default:
2589             goto int_case;
2590         }
2591         break;
2592     case SOL_TCP:
2593         /* TCP options all take an 'int' value.  */
2594     int_case:
2595         if (get_user_u32(len, optlen))
2596             return -TARGET_EFAULT;
2597         if (len < 0)
2598             return -TARGET_EINVAL;
2599         lv = sizeof(lv);
2600         ret = get_errno(getsockopt(sockfd, level, optname, &val, &lv));
2601         if (ret < 0)
2602             return ret;
2603         if (optname == SO_TYPE) {
2604             val = host_to_target_sock_type(val);
2605         }
2606         if (len > lv)
2607             len = lv;
2608         if (len == 4) {
2609             if (put_user_u32(val, optval_addr))
2610                 return -TARGET_EFAULT;
2611         } else {
2612             if (put_user_u8(val, optval_addr))
2613                 return -TARGET_EFAULT;
2614         }
2615         if (put_user_u32(len, optlen))
2616             return -TARGET_EFAULT;
2617         break;
2618     case SOL_IP:
2619         switch(optname) {
2620         case IP_TOS:
2621         case IP_TTL:
2622         case IP_HDRINCL:
2623         case IP_ROUTER_ALERT:
2624         case IP_RECVOPTS:
2625         case IP_RETOPTS:
2626         case IP_PKTINFO:
2627         case IP_MTU_DISCOVER:
2628         case IP_RECVERR:
2629         case IP_RECVTOS:
2630 #ifdef IP_FREEBIND
2631         case IP_FREEBIND:
2632 #endif
2633         case IP_MULTICAST_TTL:
2634         case IP_MULTICAST_LOOP:
2635             if (get_user_u32(len, optlen))
2636                 return -TARGET_EFAULT;
2637             if (len < 0)
2638                 return -TARGET_EINVAL;
2639             lv = sizeof(lv);
2640             ret = get_errno(getsockopt(sockfd, level, optname, &val, &lv));
2641             if (ret < 0)
2642                 return ret;
2643             if (len < sizeof(int) && len > 0 && val >= 0 && val < 255) {
2644                 len = 1;
2645                 if (put_user_u32(len, optlen)
2646                     || put_user_u8(val, optval_addr))
2647                     return -TARGET_EFAULT;
2648             } else {
2649                 if (len > sizeof(int))
2650                     len = sizeof(int);
2651                 if (put_user_u32(len, optlen)
2652                     || put_user_u32(val, optval_addr))
2653                     return -TARGET_EFAULT;
2654             }
2655             break;
2656         default:
2657             ret = -TARGET_ENOPROTOOPT;
2658             break;
2659         }
2660         break;
2661     case SOL_IPV6:
2662         switch (optname) {
2663         case IPV6_MTU_DISCOVER:
2664         case IPV6_MTU:
2665         case IPV6_V6ONLY:
2666         case IPV6_RECVPKTINFO:
2667         case IPV6_UNICAST_HOPS:
2668         case IPV6_MULTICAST_HOPS:
2669         case IPV6_MULTICAST_LOOP:
2670         case IPV6_RECVERR:
2671         case IPV6_RECVHOPLIMIT:
2672         case IPV6_2292HOPLIMIT:
2673         case IPV6_CHECKSUM:
2674         case IPV6_ADDRFORM:
2675         case IPV6_2292PKTINFO:
2676         case IPV6_RECVTCLASS:
2677         case IPV6_RECVRTHDR:
2678         case IPV6_2292RTHDR:
2679         case IPV6_RECVHOPOPTS:
2680         case IPV6_2292HOPOPTS:
2681         case IPV6_RECVDSTOPTS:
2682         case IPV6_2292DSTOPTS:
2683         case IPV6_TCLASS:
2684 #ifdef IPV6_RECVPATHMTU
2685         case IPV6_RECVPATHMTU:
2686 #endif
2687 #ifdef IPV6_TRANSPARENT
2688         case IPV6_TRANSPARENT:
2689 #endif
2690 #ifdef IPV6_FREEBIND
2691         case IPV6_FREEBIND:
2692 #endif
2693 #ifdef IPV6_RECVORIGDSTADDR
2694         case IPV6_RECVORIGDSTADDR:
2695 #endif
2696             if (get_user_u32(len, optlen))
2697                 return -TARGET_EFAULT;
2698             if (len < 0)
2699                 return -TARGET_EINVAL;
2700             lv = sizeof(lv);
2701             ret = get_errno(getsockopt(sockfd, level, optname, &val, &lv));
2702             if (ret < 0)
2703                 return ret;
2704             if (len < sizeof(int) && len > 0 && val >= 0 && val < 255) {
2705                 len = 1;
2706                 if (put_user_u32(len, optlen)
2707                     || put_user_u8(val, optval_addr))
2708                     return -TARGET_EFAULT;
2709             } else {
2710                 if (len > sizeof(int))
2711                     len = sizeof(int);
2712                 if (put_user_u32(len, optlen)
2713                     || put_user_u32(val, optval_addr))
2714                     return -TARGET_EFAULT;
2715             }
2716             break;
2717         default:
2718             ret = -TARGET_ENOPROTOOPT;
2719             break;
2720         }
2721         break;
2722 #ifdef SOL_NETLINK
2723     case SOL_NETLINK:
2724         switch (optname) {
2725         case NETLINK_PKTINFO:
2726         case NETLINK_BROADCAST_ERROR:
2727         case NETLINK_NO_ENOBUFS:
2728 #if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 2, 0)
2729         case NETLINK_LISTEN_ALL_NSID:
2730         case NETLINK_CAP_ACK:
2731 #endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(4, 2, 0) */
2732 #if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 12, 0)
2733         case NETLINK_EXT_ACK:
2734 #endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(4, 12, 0) */
2735 #if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 20, 0)
2736         case NETLINK_GET_STRICT_CHK:
2737 #endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(4, 12, 0) */
2738             if (get_user_u32(len, optlen)) {
2739                 return -TARGET_EFAULT;
2740             }
2741             if (len != sizeof(val)) {
2742                 return -TARGET_EINVAL;
2743             }
2744             lv = len;
2745             ret = get_errno(getsockopt(sockfd, level, optname, &val, &lv));
2746             if (ret < 0) {
2747                 return ret;
2748             }
2749             if (put_user_u32(lv, optlen)
2750                 || put_user_u32(val, optval_addr)) {
2751                 return -TARGET_EFAULT;
2752             }
2753             break;
2754 #if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 2, 0)
2755         case NETLINK_LIST_MEMBERSHIPS:
2756         {
2757             uint32_t *results;
2758             int i;
2759             if (get_user_u32(len, optlen)) {
2760                 return -TARGET_EFAULT;
2761             }
2762             if (len < 0) {
2763                 return -TARGET_EINVAL;
2764             }
2765             results = lock_user(VERIFY_WRITE, optval_addr, len, 1);
2766             if (!results) {
2767                 return -TARGET_EFAULT;
2768             }
2769             lv = len;
2770             ret = get_errno(getsockopt(sockfd, level, optname, results, &lv));
2771             if (ret < 0) {
2772                 unlock_user(results, optval_addr, 0);
2773                 return ret;
2774             }
2775             /* swap host endianess to target endianess. */
2776             for (i = 0; i < (len / sizeof(uint32_t)); i++) {
2777                 results[i] = tswap32(results[i]);
2778             }
2779             if (put_user_u32(lv, optlen)) {
2780                 return -TARGET_EFAULT;
2781             }
2782             unlock_user(results, optval_addr, 0);
2783             break;
2784         }
2785 #endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(4, 2, 0) */
2786         default:
2787             goto unimplemented;
2788         }
2789         break;
2790 #endif /* SOL_NETLINK */
2791     default:
2792     unimplemented:
2793         qemu_log_mask(LOG_UNIMP,
2794                       "getsockopt level=%d optname=%d not yet supported\n",
2795                       level, optname);
2796         ret = -TARGET_EOPNOTSUPP;
2797         break;
2798     }
2799     return ret;
2800 }
2801 
2802 /* Convert target low/high pair representing file offset into the host
2803  * low/high pair. This function doesn't handle offsets bigger than 64 bits
2804  * as the kernel doesn't handle them either.
2805  */
2806 static void target_to_host_low_high(abi_ulong tlow,
2807                                     abi_ulong thigh,
2808                                     unsigned long *hlow,
2809                                     unsigned long *hhigh)
2810 {
2811     uint64_t off = tlow |
2812         ((unsigned long long)thigh << TARGET_LONG_BITS / 2) <<
2813         TARGET_LONG_BITS / 2;
2814 
2815     *hlow = off;
2816     *hhigh = (off >> HOST_LONG_BITS / 2) >> HOST_LONG_BITS / 2;
2817 }
2818 
2819 static struct iovec *lock_iovec(int type, abi_ulong target_addr,
2820                                 abi_ulong count, int copy)
2821 {
2822     struct target_iovec *target_vec;
2823     struct iovec *vec;
2824     abi_ulong total_len, max_len;
2825     int i;
2826     int err = 0;
2827     bool bad_address = false;
2828 
2829     if (count == 0) {
2830         errno = 0;
2831         return NULL;
2832     }
2833     if (count > IOV_MAX) {
2834         errno = EINVAL;
2835         return NULL;
2836     }
2837 
2838     vec = g_try_new0(struct iovec, count);
2839     if (vec == NULL) {
2840         errno = ENOMEM;
2841         return NULL;
2842     }
2843 
2844     target_vec = lock_user(VERIFY_READ, target_addr,
2845                            count * sizeof(struct target_iovec), 1);
2846     if (target_vec == NULL) {
2847         err = EFAULT;
2848         goto fail2;
2849     }
2850 
2851     /* ??? If host page size > target page size, this will result in a
2852        value larger than what we can actually support.  */
2853     max_len = 0x7fffffff & TARGET_PAGE_MASK;
2854     total_len = 0;
2855 
2856     for (i = 0; i < count; i++) {
2857         abi_ulong base = tswapal(target_vec[i].iov_base);
2858         abi_long len = tswapal(target_vec[i].iov_len);
2859 
2860         if (len < 0) {
2861             err = EINVAL;
2862             goto fail;
2863         } else if (len == 0) {
2864             /* Zero length pointer is ignored.  */
2865             vec[i].iov_base = 0;
2866         } else {
2867             vec[i].iov_base = lock_user(type, base, len, copy);
2868             /* If the first buffer pointer is bad, this is a fault.  But
2869              * subsequent bad buffers will result in a partial write; this
2870              * is realized by filling the vector with null pointers and
2871              * zero lengths. */
2872             if (!vec[i].iov_base) {
2873                 if (i == 0) {
2874                     err = EFAULT;
2875                     goto fail;
2876                 } else {
2877                     bad_address = true;
2878                 }
2879             }
2880             if (bad_address) {
2881                 len = 0;
2882             }
2883             if (len > max_len - total_len) {
2884                 len = max_len - total_len;
2885             }
2886         }
2887         vec[i].iov_len = len;
2888         total_len += len;
2889     }
2890 
2891     unlock_user(target_vec, target_addr, 0);
2892     return vec;
2893 
2894  fail:
2895     while (--i >= 0) {
2896         if (tswapal(target_vec[i].iov_len) > 0) {
2897             unlock_user(vec[i].iov_base, tswapal(target_vec[i].iov_base), 0);
2898         }
2899     }
2900     unlock_user(target_vec, target_addr, 0);
2901  fail2:
2902     g_free(vec);
2903     errno = err;
2904     return NULL;
2905 }
2906 
2907 static void unlock_iovec(struct iovec *vec, abi_ulong target_addr,
2908                          abi_ulong count, int copy)
2909 {
2910     struct target_iovec *target_vec;
2911     int i;
2912 
2913     target_vec = lock_user(VERIFY_READ, target_addr,
2914                            count * sizeof(struct target_iovec), 1);
2915     if (target_vec) {
2916         for (i = 0; i < count; i++) {
2917             abi_ulong base = tswapal(target_vec[i].iov_base);
2918             abi_long len = tswapal(target_vec[i].iov_len);
2919             if (len < 0) {
2920                 break;
2921             }
2922             unlock_user(vec[i].iov_base, base, copy ? vec[i].iov_len : 0);
2923         }
2924         unlock_user(target_vec, target_addr, 0);
2925     }
2926 
2927     g_free(vec);
2928 }
2929 
2930 static inline int target_to_host_sock_type(int *type)
2931 {
2932     int host_type = 0;
2933     int target_type = *type;
2934 
2935     switch (target_type & TARGET_SOCK_TYPE_MASK) {
2936     case TARGET_SOCK_DGRAM:
2937         host_type = SOCK_DGRAM;
2938         break;
2939     case TARGET_SOCK_STREAM:
2940         host_type = SOCK_STREAM;
2941         break;
2942     default:
2943         host_type = target_type & TARGET_SOCK_TYPE_MASK;
2944         break;
2945     }
2946     if (target_type & TARGET_SOCK_CLOEXEC) {
2947 #if defined(SOCK_CLOEXEC)
2948         host_type |= SOCK_CLOEXEC;
2949 #else
2950         return -TARGET_EINVAL;
2951 #endif
2952     }
2953     if (target_type & TARGET_SOCK_NONBLOCK) {
2954 #if defined(SOCK_NONBLOCK)
2955         host_type |= SOCK_NONBLOCK;
2956 #elif !defined(O_NONBLOCK)
2957         return -TARGET_EINVAL;
2958 #endif
2959     }
2960     *type = host_type;
2961     return 0;
2962 }
2963 
2964 /* Try to emulate socket type flags after socket creation.  */
2965 static int sock_flags_fixup(int fd, int target_type)
2966 {
2967 #if !defined(SOCK_NONBLOCK) && defined(O_NONBLOCK)
2968     if (target_type & TARGET_SOCK_NONBLOCK) {
2969         int flags = fcntl(fd, F_GETFL);
2970         if (fcntl(fd, F_SETFL, O_NONBLOCK | flags) == -1) {
2971             close(fd);
2972             return -TARGET_EINVAL;
2973         }
2974     }
2975 #endif
2976     return fd;
2977 }
2978 
2979 /* do_socket() Must return target values and target errnos. */
2980 static abi_long do_socket(int domain, int type, int protocol)
2981 {
2982     int target_type = type;
2983     int ret;
2984 
2985     ret = target_to_host_sock_type(&type);
2986     if (ret) {
2987         return ret;
2988     }
2989 
2990     if (domain == PF_NETLINK && !(
2991 #ifdef CONFIG_RTNETLINK
2992          protocol == NETLINK_ROUTE ||
2993 #endif
2994          protocol == NETLINK_KOBJECT_UEVENT ||
2995          protocol == NETLINK_AUDIT)) {
2996         return -TARGET_EPROTONOSUPPORT;
2997     }
2998 
2999     if (domain == AF_PACKET ||
3000         (domain == AF_INET && type == SOCK_PACKET)) {
3001         protocol = tswap16(protocol);
3002     }
3003 
3004     ret = get_errno(socket(domain, type, protocol));
3005     if (ret >= 0) {
3006         ret = sock_flags_fixup(ret, target_type);
3007         if (type == SOCK_PACKET) {
3008             /* Manage an obsolete case :
3009              * if socket type is SOCK_PACKET, bind by name
3010              */
3011             fd_trans_register(ret, &target_packet_trans);
3012         } else if (domain == PF_NETLINK) {
3013             switch (protocol) {
3014 #ifdef CONFIG_RTNETLINK
3015             case NETLINK_ROUTE:
3016                 fd_trans_register(ret, &target_netlink_route_trans);
3017                 break;
3018 #endif
3019             case NETLINK_KOBJECT_UEVENT:
3020                 /* nothing to do: messages are strings */
3021                 break;
3022             case NETLINK_AUDIT:
3023                 fd_trans_register(ret, &target_netlink_audit_trans);
3024                 break;
3025             default:
3026                 g_assert_not_reached();
3027             }
3028         }
3029     }
3030     return ret;
3031 }
3032 
3033 /* do_bind() Must return target values and target errnos. */
3034 static abi_long do_bind(int sockfd, abi_ulong target_addr,
3035                         socklen_t addrlen)
3036 {
3037     void *addr;
3038     abi_long ret;
3039 
3040     if ((int)addrlen < 0) {
3041         return -TARGET_EINVAL;
3042     }
3043 
3044     addr = alloca(addrlen+1);
3045 
3046     ret = target_to_host_sockaddr(sockfd, addr, target_addr, addrlen);
3047     if (ret)
3048         return ret;
3049 
3050     return get_errno(bind(sockfd, addr, addrlen));
3051 }
3052 
3053 /* do_connect() Must return target values and target errnos. */
3054 static abi_long do_connect(int sockfd, abi_ulong target_addr,
3055                            socklen_t addrlen)
3056 {
3057     void *addr;
3058     abi_long ret;
3059 
3060     if ((int)addrlen < 0) {
3061         return -TARGET_EINVAL;
3062     }
3063 
3064     addr = alloca(addrlen+1);
3065 
3066     ret = target_to_host_sockaddr(sockfd, addr, target_addr, addrlen);
3067     if (ret)
3068         return ret;
3069 
3070     return get_errno(safe_connect(sockfd, addr, addrlen));
3071 }
3072 
3073 /* do_sendrecvmsg_locked() Must return target values and target errnos. */
3074 static abi_long do_sendrecvmsg_locked(int fd, struct target_msghdr *msgp,
3075                                       int flags, int send)
3076 {
3077     abi_long ret, len;
3078     struct msghdr msg;
3079     abi_ulong count;
3080     struct iovec *vec;
3081     abi_ulong target_vec;
3082 
3083     if (msgp->msg_name) {
3084         msg.msg_namelen = tswap32(msgp->msg_namelen);
3085         msg.msg_name = alloca(msg.msg_namelen+1);
3086         ret = target_to_host_sockaddr(fd, msg.msg_name,
3087                                       tswapal(msgp->msg_name),
3088                                       msg.msg_namelen);
3089         if (ret == -TARGET_EFAULT) {
3090             /* For connected sockets msg_name and msg_namelen must
3091              * be ignored, so returning EFAULT immediately is wrong.
3092              * Instead, pass a bad msg_name to the host kernel, and
3093              * let it decide whether to return EFAULT or not.
3094              */
3095             msg.msg_name = (void *)-1;
3096         } else if (ret) {
3097             goto out2;
3098         }
3099     } else {
3100         msg.msg_name = NULL;
3101         msg.msg_namelen = 0;
3102     }
3103     msg.msg_controllen = 2 * tswapal(msgp->msg_controllen);
3104     msg.msg_control = alloca(msg.msg_controllen);
3105     memset(msg.msg_control, 0, msg.msg_controllen);
3106 
3107     msg.msg_flags = tswap32(msgp->msg_flags);
3108 
3109     count = tswapal(msgp->msg_iovlen);
3110     target_vec = tswapal(msgp->msg_iov);
3111 
3112     if (count > IOV_MAX) {
3113         /* sendrcvmsg returns a different errno for this condition than
3114          * readv/writev, so we must catch it here before lock_iovec() does.
3115          */
3116         ret = -TARGET_EMSGSIZE;
3117         goto out2;
3118     }
3119 
3120     vec = lock_iovec(send ? VERIFY_READ : VERIFY_WRITE,
3121                      target_vec, count, send);
3122     if (vec == NULL) {
3123         ret = -host_to_target_errno(errno);
3124         goto out2;
3125     }
3126     msg.msg_iovlen = count;
3127     msg.msg_iov = vec;
3128 
3129     if (send) {
3130         if (fd_trans_target_to_host_data(fd)) {
3131             void *host_msg;
3132 
3133             host_msg = g_malloc(msg.msg_iov->iov_len);
3134             memcpy(host_msg, msg.msg_iov->iov_base, msg.msg_iov->iov_len);
3135             ret = fd_trans_target_to_host_data(fd)(host_msg,
3136                                                    msg.msg_iov->iov_len);
3137             if (ret >= 0) {
3138                 msg.msg_iov->iov_base = host_msg;
3139                 ret = get_errno(safe_sendmsg(fd, &msg, flags));
3140             }
3141             g_free(host_msg);
3142         } else {
3143             ret = target_to_host_cmsg(&msg, msgp);
3144             if (ret == 0) {
3145                 ret = get_errno(safe_sendmsg(fd, &msg, flags));
3146             }
3147         }
3148     } else {
3149         ret = get_errno(safe_recvmsg(fd, &msg, flags));
3150         if (!is_error(ret)) {
3151             len = ret;
3152             if (fd_trans_host_to_target_data(fd)) {
3153                 ret = fd_trans_host_to_target_data(fd)(msg.msg_iov->iov_base,
3154                                                MIN(msg.msg_iov->iov_len, len));
3155             } else {
3156                 ret = host_to_target_cmsg(msgp, &msg);
3157             }
3158             if (!is_error(ret)) {
3159                 msgp->msg_namelen = tswap32(msg.msg_namelen);
3160                 msgp->msg_flags = tswap32(msg.msg_flags);
3161                 if (msg.msg_name != NULL && msg.msg_name != (void *)-1) {
3162                     ret = host_to_target_sockaddr(tswapal(msgp->msg_name),
3163                                     msg.msg_name, msg.msg_namelen);
3164                     if (ret) {
3165                         goto out;
3166                     }
3167                 }
3168 
3169                 ret = len;
3170             }
3171         }
3172     }
3173 
3174 out:
3175     unlock_iovec(vec, target_vec, count, !send);
3176 out2:
3177     return ret;
3178 }
3179 
3180 static abi_long do_sendrecvmsg(int fd, abi_ulong target_msg,
3181                                int flags, int send)
3182 {
3183     abi_long ret;
3184     struct target_msghdr *msgp;
3185 
3186     if (!lock_user_struct(send ? VERIFY_READ : VERIFY_WRITE,
3187                           msgp,
3188                           target_msg,
3189                           send ? 1 : 0)) {
3190         return -TARGET_EFAULT;
3191     }
3192     ret = do_sendrecvmsg_locked(fd, msgp, flags, send);
3193     unlock_user_struct(msgp, target_msg, send ? 0 : 1);
3194     return ret;
3195 }
3196 
3197 /* We don't rely on the C library to have sendmmsg/recvmmsg support,
3198  * so it might not have this *mmsg-specific flag either.
3199  */
3200 #ifndef MSG_WAITFORONE
3201 #define MSG_WAITFORONE 0x10000
3202 #endif
3203 
3204 static abi_long do_sendrecvmmsg(int fd, abi_ulong target_msgvec,
3205                                 unsigned int vlen, unsigned int flags,
3206                                 int send)
3207 {
3208     struct target_mmsghdr *mmsgp;
3209     abi_long ret = 0;
3210     int i;
3211 
3212     if (vlen > UIO_MAXIOV) {
3213         vlen = UIO_MAXIOV;
3214     }
3215 
3216     mmsgp = lock_user(VERIFY_WRITE, target_msgvec, sizeof(*mmsgp) * vlen, 1);
3217     if (!mmsgp) {
3218         return -TARGET_EFAULT;
3219     }
3220 
3221     for (i = 0; i < vlen; i++) {
3222         ret = do_sendrecvmsg_locked(fd, &mmsgp[i].msg_hdr, flags, send);
3223         if (is_error(ret)) {
3224             break;
3225         }
3226         mmsgp[i].msg_len = tswap32(ret);
3227         /* MSG_WAITFORONE turns on MSG_DONTWAIT after one packet */
3228         if (flags & MSG_WAITFORONE) {
3229             flags |= MSG_DONTWAIT;
3230         }
3231     }
3232 
3233     unlock_user(mmsgp, target_msgvec, sizeof(*mmsgp) * i);
3234 
3235     /* Return number of datagrams sent if we sent any at all;
3236      * otherwise return the error.
3237      */
3238     if (i) {
3239         return i;
3240     }
3241     return ret;
3242 }
3243 
3244 /* do_accept4() Must return target values and target errnos. */
3245 static abi_long do_accept4(int fd, abi_ulong target_addr,
3246                            abi_ulong target_addrlen_addr, int flags)
3247 {
3248     socklen_t addrlen, ret_addrlen;
3249     void *addr;
3250     abi_long ret;
3251     int host_flags;
3252 
3253     host_flags = target_to_host_bitmask(flags, fcntl_flags_tbl);
3254 
3255     if (target_addr == 0) {
3256         return get_errno(safe_accept4(fd, NULL, NULL, host_flags));
3257     }
3258 
3259     /* linux returns EINVAL if addrlen pointer is invalid */
3260     if (get_user_u32(addrlen, target_addrlen_addr))
3261         return -TARGET_EINVAL;
3262 
3263     if ((int)addrlen < 0) {
3264         return -TARGET_EINVAL;
3265     }
3266 
3267     if (!access_ok(VERIFY_WRITE, target_addr, addrlen))
3268         return -TARGET_EINVAL;
3269 
3270     addr = alloca(addrlen);
3271 
3272     ret_addrlen = addrlen;
3273     ret = get_errno(safe_accept4(fd, addr, &ret_addrlen, host_flags));
3274     if (!is_error(ret)) {
3275         host_to_target_sockaddr(target_addr, addr, MIN(addrlen, ret_addrlen));
3276         if (put_user_u32(ret_addrlen, target_addrlen_addr)) {
3277             ret = -TARGET_EFAULT;
3278         }
3279     }
3280     return ret;
3281 }
3282 
3283 /* do_getpeername() Must return target values and target errnos. */
3284 static abi_long do_getpeername(int fd, abi_ulong target_addr,
3285                                abi_ulong target_addrlen_addr)
3286 {
3287     socklen_t addrlen, ret_addrlen;
3288     void *addr;
3289     abi_long ret;
3290 
3291     if (get_user_u32(addrlen, target_addrlen_addr))
3292         return -TARGET_EFAULT;
3293 
3294     if ((int)addrlen < 0) {
3295         return -TARGET_EINVAL;
3296     }
3297 
3298     if (!access_ok(VERIFY_WRITE, target_addr, addrlen))
3299         return -TARGET_EFAULT;
3300 
3301     addr = alloca(addrlen);
3302 
3303     ret_addrlen = addrlen;
3304     ret = get_errno(getpeername(fd, addr, &ret_addrlen));
3305     if (!is_error(ret)) {
3306         host_to_target_sockaddr(target_addr, addr, MIN(addrlen, ret_addrlen));
3307         if (put_user_u32(ret_addrlen, target_addrlen_addr)) {
3308             ret = -TARGET_EFAULT;
3309         }
3310     }
3311     return ret;
3312 }
3313 
3314 /* do_getsockname() Must return target values and target errnos. */
3315 static abi_long do_getsockname(int fd, abi_ulong target_addr,
3316                                abi_ulong target_addrlen_addr)
3317 {
3318     socklen_t addrlen, ret_addrlen;
3319     void *addr;
3320     abi_long ret;
3321 
3322     if (get_user_u32(addrlen, target_addrlen_addr))
3323         return -TARGET_EFAULT;
3324 
3325     if ((int)addrlen < 0) {
3326         return -TARGET_EINVAL;
3327     }
3328 
3329     if (!access_ok(VERIFY_WRITE, target_addr, addrlen))
3330         return -TARGET_EFAULT;
3331 
3332     addr = alloca(addrlen);
3333 
3334     ret_addrlen = addrlen;
3335     ret = get_errno(getsockname(fd, addr, &ret_addrlen));
3336     if (!is_error(ret)) {
3337         host_to_target_sockaddr(target_addr, addr, MIN(addrlen, ret_addrlen));
3338         if (put_user_u32(ret_addrlen, target_addrlen_addr)) {
3339             ret = -TARGET_EFAULT;
3340         }
3341     }
3342     return ret;
3343 }
3344 
3345 /* do_socketpair() Must return target values and target errnos. */
3346 static abi_long do_socketpair(int domain, int type, int protocol,
3347                               abi_ulong target_tab_addr)
3348 {
3349     int tab[2];
3350     abi_long ret;
3351 
3352     target_to_host_sock_type(&type);
3353 
3354     ret = get_errno(socketpair(domain, type, protocol, tab));
3355     if (!is_error(ret)) {
3356         if (put_user_s32(tab[0], target_tab_addr)
3357             || put_user_s32(tab[1], target_tab_addr + sizeof(tab[0])))
3358             ret = -TARGET_EFAULT;
3359     }
3360     return ret;
3361 }
3362 
3363 /* do_sendto() Must return target values and target errnos. */
3364 static abi_long do_sendto(int fd, abi_ulong msg, size_t len, int flags,
3365                           abi_ulong target_addr, socklen_t addrlen)
3366 {
3367     void *addr;
3368     void *host_msg;
3369     void *copy_msg = NULL;
3370     abi_long ret;
3371 
3372     if ((int)addrlen < 0) {
3373         return -TARGET_EINVAL;
3374     }
3375 
3376     host_msg = lock_user(VERIFY_READ, msg, len, 1);
3377     if (!host_msg)
3378         return -TARGET_EFAULT;
3379     if (fd_trans_target_to_host_data(fd)) {
3380         copy_msg = host_msg;
3381         host_msg = g_malloc(len);
3382         memcpy(host_msg, copy_msg, len);
3383         ret = fd_trans_target_to_host_data(fd)(host_msg, len);
3384         if (ret < 0) {
3385             goto fail;
3386         }
3387     }
3388     if (target_addr) {
3389         addr = alloca(addrlen+1);
3390         ret = target_to_host_sockaddr(fd, addr, target_addr, addrlen);
3391         if (ret) {
3392             goto fail;
3393         }
3394         ret = get_errno(safe_sendto(fd, host_msg, len, flags, addr, addrlen));
3395     } else {
3396         ret = get_errno(safe_sendto(fd, host_msg, len, flags, NULL, 0));
3397     }
3398 fail:
3399     if (copy_msg) {
3400         g_free(host_msg);
3401         host_msg = copy_msg;
3402     }
3403     unlock_user(host_msg, msg, 0);
3404     return ret;
3405 }
3406 
3407 /* do_recvfrom() Must return target values and target errnos. */
3408 static abi_long do_recvfrom(int fd, abi_ulong msg, size_t len, int flags,
3409                             abi_ulong target_addr,
3410                             abi_ulong target_addrlen)
3411 {
3412     socklen_t addrlen, ret_addrlen;
3413     void *addr;
3414     void *host_msg;
3415     abi_long ret;
3416 
3417     host_msg = lock_user(VERIFY_WRITE, msg, len, 0);
3418     if (!host_msg)
3419         return -TARGET_EFAULT;
3420     if (target_addr) {
3421         if (get_user_u32(addrlen, target_addrlen)) {
3422             ret = -TARGET_EFAULT;
3423             goto fail;
3424         }
3425         if ((int)addrlen < 0) {
3426             ret = -TARGET_EINVAL;
3427             goto fail;
3428         }
3429         addr = alloca(addrlen);
3430         ret_addrlen = addrlen;
3431         ret = get_errno(safe_recvfrom(fd, host_msg, len, flags,
3432                                       addr, &ret_addrlen));
3433     } else {
3434         addr = NULL; /* To keep compiler quiet.  */
3435         addrlen = 0; /* To keep compiler quiet.  */
3436         ret = get_errno(safe_recvfrom(fd, host_msg, len, flags, NULL, 0));
3437     }
3438     if (!is_error(ret)) {
3439         if (fd_trans_host_to_target_data(fd)) {
3440             abi_long trans;
3441             trans = fd_trans_host_to_target_data(fd)(host_msg, MIN(ret, len));
3442             if (is_error(trans)) {
3443                 ret = trans;
3444                 goto fail;
3445             }
3446         }
3447         if (target_addr) {
3448             host_to_target_sockaddr(target_addr, addr,
3449                                     MIN(addrlen, ret_addrlen));
3450             if (put_user_u32(ret_addrlen, target_addrlen)) {
3451                 ret = -TARGET_EFAULT;
3452                 goto fail;
3453             }
3454         }
3455         unlock_user(host_msg, msg, len);
3456     } else {
3457 fail:
3458         unlock_user(host_msg, msg, 0);
3459     }
3460     return ret;
3461 }
3462 
3463 #ifdef TARGET_NR_socketcall
3464 /* do_socketcall() must return target values and target errnos. */
3465 static abi_long do_socketcall(int num, abi_ulong vptr)
3466 {
3467     static const unsigned nargs[] = { /* number of arguments per operation */
3468         [TARGET_SYS_SOCKET] = 3,      /* domain, type, protocol */
3469         [TARGET_SYS_BIND] = 3,        /* fd, addr, addrlen */
3470         [TARGET_SYS_CONNECT] = 3,     /* fd, addr, addrlen */
3471         [TARGET_SYS_LISTEN] = 2,      /* fd, backlog */
3472         [TARGET_SYS_ACCEPT] = 3,      /* fd, addr, addrlen */
3473         [TARGET_SYS_GETSOCKNAME] = 3, /* fd, addr, addrlen */
3474         [TARGET_SYS_GETPEERNAME] = 3, /* fd, addr, addrlen */
3475         [TARGET_SYS_SOCKETPAIR] = 4,  /* domain, type, protocol, tab */
3476         [TARGET_SYS_SEND] = 4,        /* fd, msg, len, flags */
3477         [TARGET_SYS_RECV] = 4,        /* fd, msg, len, flags */
3478         [TARGET_SYS_SENDTO] = 6,      /* fd, msg, len, flags, addr, addrlen */
3479         [TARGET_SYS_RECVFROM] = 6,    /* fd, msg, len, flags, addr, addrlen */
3480         [TARGET_SYS_SHUTDOWN] = 2,    /* fd, how */
3481         [TARGET_SYS_SETSOCKOPT] = 5,  /* fd, level, optname, optval, optlen */
3482         [TARGET_SYS_GETSOCKOPT] = 5,  /* fd, level, optname, optval, optlen */
3483         [TARGET_SYS_SENDMSG] = 3,     /* fd, msg, flags */
3484         [TARGET_SYS_RECVMSG] = 3,     /* fd, msg, flags */
3485         [TARGET_SYS_ACCEPT4] = 4,     /* fd, addr, addrlen, flags */
3486         [TARGET_SYS_RECVMMSG] = 4,    /* fd, msgvec, vlen, flags */
3487         [TARGET_SYS_SENDMMSG] = 4,    /* fd, msgvec, vlen, flags */
3488     };
3489     abi_long a[6]; /* max 6 args */
3490     unsigned i;
3491 
3492     /* check the range of the first argument num */
3493     /* (TARGET_SYS_SENDMMSG is the highest among TARGET_SYS_xxx) */
3494     if (num < 1 || num > TARGET_SYS_SENDMMSG) {
3495         return -TARGET_EINVAL;
3496     }
3497     /* ensure we have space for args */
3498     if (nargs[num] > ARRAY_SIZE(a)) {
3499         return -TARGET_EINVAL;
3500     }
3501     /* collect the arguments in a[] according to nargs[] */
3502     for (i = 0; i < nargs[num]; ++i) {
3503         if (get_user_ual(a[i], vptr + i * sizeof(abi_long)) != 0) {
3504             return -TARGET_EFAULT;
3505         }
3506     }
3507     /* now when we have the args, invoke the appropriate underlying function */
3508     switch (num) {
3509     case TARGET_SYS_SOCKET: /* domain, type, protocol */
3510         return do_socket(a[0], a[1], a[2]);
3511     case TARGET_SYS_BIND: /* sockfd, addr, addrlen */
3512         return do_bind(a[0], a[1], a[2]);
3513     case TARGET_SYS_CONNECT: /* sockfd, addr, addrlen */
3514         return do_connect(a[0], a[1], a[2]);
3515     case TARGET_SYS_LISTEN: /* sockfd, backlog */
3516         return get_errno(listen(a[0], a[1]));
3517     case TARGET_SYS_ACCEPT: /* sockfd, addr, addrlen */
3518         return do_accept4(a[0], a[1], a[2], 0);
3519     case TARGET_SYS_GETSOCKNAME: /* sockfd, addr, addrlen */
3520         return do_getsockname(a[0], a[1], a[2]);
3521     case TARGET_SYS_GETPEERNAME: /* sockfd, addr, addrlen */
3522         return do_getpeername(a[0], a[1], a[2]);
3523     case TARGET_SYS_SOCKETPAIR: /* domain, type, protocol, tab */
3524         return do_socketpair(a[0], a[1], a[2], a[3]);
3525     case TARGET_SYS_SEND: /* sockfd, msg, len, flags */
3526         return do_sendto(a[0], a[1], a[2], a[3], 0, 0);
3527     case TARGET_SYS_RECV: /* sockfd, msg, len, flags */
3528         return do_recvfrom(a[0], a[1], a[2], a[3], 0, 0);
3529     case TARGET_SYS_SENDTO: /* sockfd, msg, len, flags, addr, addrlen */
3530         return do_sendto(a[0], a[1], a[2], a[3], a[4], a[5]);
3531     case TARGET_SYS_RECVFROM: /* sockfd, msg, len, flags, addr, addrlen */
3532         return do_recvfrom(a[0], a[1], a[2], a[3], a[4], a[5]);
3533     case TARGET_SYS_SHUTDOWN: /* sockfd, how */
3534         return get_errno(shutdown(a[0], a[1]));
3535     case TARGET_SYS_SETSOCKOPT: /* sockfd, level, optname, optval, optlen */
3536         return do_setsockopt(a[0], a[1], a[2], a[3], a[4]);
3537     case TARGET_SYS_GETSOCKOPT: /* sockfd, level, optname, optval, optlen */
3538         return do_getsockopt(a[0], a[1], a[2], a[3], a[4]);
3539     case TARGET_SYS_SENDMSG: /* sockfd, msg, flags */
3540         return do_sendrecvmsg(a[0], a[1], a[2], 1);
3541     case TARGET_SYS_RECVMSG: /* sockfd, msg, flags */
3542         return do_sendrecvmsg(a[0], a[1], a[2], 0);
3543     case TARGET_SYS_ACCEPT4: /* sockfd, addr, addrlen, flags */
3544         return do_accept4(a[0], a[1], a[2], a[3]);
3545     case TARGET_SYS_RECVMMSG: /* sockfd, msgvec, vlen, flags */
3546         return do_sendrecvmmsg(a[0], a[1], a[2], a[3], 0);
3547     case TARGET_SYS_SENDMMSG: /* sockfd, msgvec, vlen, flags */
3548         return do_sendrecvmmsg(a[0], a[1], a[2], a[3], 1);
3549     default:
3550         qemu_log_mask(LOG_UNIMP, "Unsupported socketcall: %d\n", num);
3551         return -TARGET_EINVAL;
3552     }
3553 }
3554 #endif
3555 
3556 #define N_SHM_REGIONS	32
3557 
3558 static struct shm_region {
3559     abi_ulong start;
3560     abi_ulong size;
3561     bool in_use;
3562 } shm_regions[N_SHM_REGIONS];
3563 
3564 #ifndef TARGET_SEMID64_DS
3565 /* asm-generic version of this struct */
3566 struct target_semid64_ds
3567 {
3568   struct target_ipc_perm sem_perm;
3569   abi_ulong sem_otime;
3570 #if TARGET_ABI_BITS == 32
3571   abi_ulong __unused1;
3572 #endif
3573   abi_ulong sem_ctime;
3574 #if TARGET_ABI_BITS == 32
3575   abi_ulong __unused2;
3576 #endif
3577   abi_ulong sem_nsems;
3578   abi_ulong __unused3;
3579   abi_ulong __unused4;
3580 };
3581 #endif
3582 
3583 static inline abi_long target_to_host_ipc_perm(struct ipc_perm *host_ip,
3584                                                abi_ulong target_addr)
3585 {
3586     struct target_ipc_perm *target_ip;
3587     struct target_semid64_ds *target_sd;
3588 
3589     if (!lock_user_struct(VERIFY_READ, target_sd, target_addr, 1))
3590         return -TARGET_EFAULT;
3591     target_ip = &(target_sd->sem_perm);
3592     host_ip->__key = tswap32(target_ip->__key);
3593     host_ip->uid = tswap32(target_ip->uid);
3594     host_ip->gid = tswap32(target_ip->gid);
3595     host_ip->cuid = tswap32(target_ip->cuid);
3596     host_ip->cgid = tswap32(target_ip->cgid);
3597 #if defined(TARGET_ALPHA) || defined(TARGET_MIPS) || defined(TARGET_PPC)
3598     host_ip->mode = tswap32(target_ip->mode);
3599 #else
3600     host_ip->mode = tswap16(target_ip->mode);
3601 #endif
3602 #if defined(TARGET_PPC)
3603     host_ip->__seq = tswap32(target_ip->__seq);
3604 #else
3605     host_ip->__seq = tswap16(target_ip->__seq);
3606 #endif
3607     unlock_user_struct(target_sd, target_addr, 0);
3608     return 0;
3609 }
3610 
3611 static inline abi_long host_to_target_ipc_perm(abi_ulong target_addr,
3612                                                struct ipc_perm *host_ip)
3613 {
3614     struct target_ipc_perm *target_ip;
3615     struct target_semid64_ds *target_sd;
3616 
3617     if (!lock_user_struct(VERIFY_WRITE, target_sd, target_addr, 0))
3618         return -TARGET_EFAULT;
3619     target_ip = &(target_sd->sem_perm);
3620     target_ip->__key = tswap32(host_ip->__key);
3621     target_ip->uid = tswap32(host_ip->uid);
3622     target_ip->gid = tswap32(host_ip->gid);
3623     target_ip->cuid = tswap32(host_ip->cuid);
3624     target_ip->cgid = tswap32(host_ip->cgid);
3625 #if defined(TARGET_ALPHA) || defined(TARGET_MIPS) || defined(TARGET_PPC)
3626     target_ip->mode = tswap32(host_ip->mode);
3627 #else
3628     target_ip->mode = tswap16(host_ip->mode);
3629 #endif
3630 #if defined(TARGET_PPC)
3631     target_ip->__seq = tswap32(host_ip->__seq);
3632 #else
3633     target_ip->__seq = tswap16(host_ip->__seq);
3634 #endif
3635     unlock_user_struct(target_sd, target_addr, 1);
3636     return 0;
3637 }
3638 
3639 static inline abi_long target_to_host_semid_ds(struct semid_ds *host_sd,
3640                                                abi_ulong target_addr)
3641 {
3642     struct target_semid64_ds *target_sd;
3643 
3644     if (!lock_user_struct(VERIFY_READ, target_sd, target_addr, 1))
3645         return -TARGET_EFAULT;
3646     if (target_to_host_ipc_perm(&(host_sd->sem_perm),target_addr))
3647         return -TARGET_EFAULT;
3648     host_sd->sem_nsems = tswapal(target_sd->sem_nsems);
3649     host_sd->sem_otime = tswapal(target_sd->sem_otime);
3650     host_sd->sem_ctime = tswapal(target_sd->sem_ctime);
3651     unlock_user_struct(target_sd, target_addr, 0);
3652     return 0;
3653 }
3654 
3655 static inline abi_long host_to_target_semid_ds(abi_ulong target_addr,
3656                                                struct semid_ds *host_sd)
3657 {
3658     struct target_semid64_ds *target_sd;
3659 
3660     if (!lock_user_struct(VERIFY_WRITE, target_sd, target_addr, 0))
3661         return -TARGET_EFAULT;
3662     if (host_to_target_ipc_perm(target_addr,&(host_sd->sem_perm)))
3663         return -TARGET_EFAULT;
3664     target_sd->sem_nsems = tswapal(host_sd->sem_nsems);
3665     target_sd->sem_otime = tswapal(host_sd->sem_otime);
3666     target_sd->sem_ctime = tswapal(host_sd->sem_ctime);
3667     unlock_user_struct(target_sd, target_addr, 1);
3668     return 0;
3669 }
3670 
3671 struct target_seminfo {
3672     int semmap;
3673     int semmni;
3674     int semmns;
3675     int semmnu;
3676     int semmsl;
3677     int semopm;
3678     int semume;
3679     int semusz;
3680     int semvmx;
3681     int semaem;
3682 };
3683 
3684 static inline abi_long host_to_target_seminfo(abi_ulong target_addr,
3685                                               struct seminfo *host_seminfo)
3686 {
3687     struct target_seminfo *target_seminfo;
3688     if (!lock_user_struct(VERIFY_WRITE, target_seminfo, target_addr, 0))
3689         return -TARGET_EFAULT;
3690     __put_user(host_seminfo->semmap, &target_seminfo->semmap);
3691     __put_user(host_seminfo->semmni, &target_seminfo->semmni);
3692     __put_user(host_seminfo->semmns, &target_seminfo->semmns);
3693     __put_user(host_seminfo->semmnu, &target_seminfo->semmnu);
3694     __put_user(host_seminfo->semmsl, &target_seminfo->semmsl);
3695     __put_user(host_seminfo->semopm, &target_seminfo->semopm);
3696     __put_user(host_seminfo->semume, &target_seminfo->semume);
3697     __put_user(host_seminfo->semusz, &target_seminfo->semusz);
3698     __put_user(host_seminfo->semvmx, &target_seminfo->semvmx);
3699     __put_user(host_seminfo->semaem, &target_seminfo->semaem);
3700     unlock_user_struct(target_seminfo, target_addr, 1);
3701     return 0;
3702 }
3703 
3704 union semun {
3705 	int val;
3706 	struct semid_ds *buf;
3707 	unsigned short *array;
3708 	struct seminfo *__buf;
3709 };
3710 
3711 union target_semun {
3712 	int val;
3713 	abi_ulong buf;
3714 	abi_ulong array;
3715 	abi_ulong __buf;
3716 };
3717 
3718 static inline abi_long target_to_host_semarray(int semid, unsigned short **host_array,
3719                                                abi_ulong target_addr)
3720 {
3721     int nsems;
3722     unsigned short *array;
3723     union semun semun;
3724     struct semid_ds semid_ds;
3725     int i, ret;
3726 
3727     semun.buf = &semid_ds;
3728 
3729     ret = semctl(semid, 0, IPC_STAT, semun);
3730     if (ret == -1)
3731         return get_errno(ret);
3732 
3733     nsems = semid_ds.sem_nsems;
3734 
3735     *host_array = g_try_new(unsigned short, nsems);
3736     if (!*host_array) {
3737         return -TARGET_ENOMEM;
3738     }
3739     array = lock_user(VERIFY_READ, target_addr,
3740                       nsems*sizeof(unsigned short), 1);
3741     if (!array) {
3742         g_free(*host_array);
3743         return -TARGET_EFAULT;
3744     }
3745 
3746     for(i=0; i<nsems; i++) {
3747         __get_user((*host_array)[i], &array[i]);
3748     }
3749     unlock_user(array, target_addr, 0);
3750 
3751     return 0;
3752 }
3753 
3754 static inline abi_long host_to_target_semarray(int semid, abi_ulong target_addr,
3755                                                unsigned short **host_array)
3756 {
3757     int nsems;
3758     unsigned short *array;
3759     union semun semun;
3760     struct semid_ds semid_ds;
3761     int i, ret;
3762 
3763     semun.buf = &semid_ds;
3764 
3765     ret = semctl(semid, 0, IPC_STAT, semun);
3766     if (ret == -1)
3767         return get_errno(ret);
3768 
3769     nsems = semid_ds.sem_nsems;
3770 
3771     array = lock_user(VERIFY_WRITE, target_addr,
3772                       nsems*sizeof(unsigned short), 0);
3773     if (!array)
3774         return -TARGET_EFAULT;
3775 
3776     for(i=0; i<nsems; i++) {
3777         __put_user((*host_array)[i], &array[i]);
3778     }
3779     g_free(*host_array);
3780     unlock_user(array, target_addr, 1);
3781 
3782     return 0;
3783 }
3784 
3785 static inline abi_long do_semctl(int semid, int semnum, int cmd,
3786                                  abi_ulong target_arg)
3787 {
3788     union target_semun target_su = { .buf = target_arg };
3789     union semun arg;
3790     struct semid_ds dsarg;
3791     unsigned short *array = NULL;
3792     struct seminfo seminfo;
3793     abi_long ret = -TARGET_EINVAL;
3794     abi_long err;
3795     cmd &= 0xff;
3796 
3797     switch( cmd ) {
3798 	case GETVAL:
3799 	case SETVAL:
3800             /* In 64 bit cross-endian situations, we will erroneously pick up
3801              * the wrong half of the union for the "val" element.  To rectify
3802              * this, the entire 8-byte structure is byteswapped, followed by
3803 	     * a swap of the 4 byte val field. In other cases, the data is
3804 	     * already in proper host byte order. */
3805 	    if (sizeof(target_su.val) != (sizeof(target_su.buf))) {
3806 		target_su.buf = tswapal(target_su.buf);
3807 		arg.val = tswap32(target_su.val);
3808 	    } else {
3809 		arg.val = target_su.val;
3810 	    }
3811             ret = get_errno(semctl(semid, semnum, cmd, arg));
3812             break;
3813 	case GETALL:
3814 	case SETALL:
3815             err = target_to_host_semarray(semid, &array, target_su.array);
3816             if (err)
3817                 return err;
3818             arg.array = array;
3819             ret = get_errno(semctl(semid, semnum, cmd, arg));
3820             err = host_to_target_semarray(semid, target_su.array, &array);
3821             if (err)
3822                 return err;
3823             break;
3824 	case IPC_STAT:
3825 	case IPC_SET:
3826 	case SEM_STAT:
3827             err = target_to_host_semid_ds(&dsarg, target_su.buf);
3828             if (err)
3829                 return err;
3830             arg.buf = &dsarg;
3831             ret = get_errno(semctl(semid, semnum, cmd, arg));
3832             err = host_to_target_semid_ds(target_su.buf, &dsarg);
3833             if (err)
3834                 return err;
3835             break;
3836 	case IPC_INFO:
3837 	case SEM_INFO:
3838             arg.__buf = &seminfo;
3839             ret = get_errno(semctl(semid, semnum, cmd, arg));
3840             err = host_to_target_seminfo(target_su.__buf, &seminfo);
3841             if (err)
3842                 return err;
3843             break;
3844 	case IPC_RMID:
3845 	case GETPID:
3846 	case GETNCNT:
3847 	case GETZCNT:
3848             ret = get_errno(semctl(semid, semnum, cmd, NULL));
3849             break;
3850     }
3851 
3852     return ret;
3853 }
3854 
3855 struct target_sembuf {
3856     unsigned short sem_num;
3857     short sem_op;
3858     short sem_flg;
3859 };
3860 
3861 static inline abi_long target_to_host_sembuf(struct sembuf *host_sembuf,
3862                                              abi_ulong target_addr,
3863                                              unsigned nsops)
3864 {
3865     struct target_sembuf *target_sembuf;
3866     int i;
3867 
3868     target_sembuf = lock_user(VERIFY_READ, target_addr,
3869                               nsops*sizeof(struct target_sembuf), 1);
3870     if (!target_sembuf)
3871         return -TARGET_EFAULT;
3872 
3873     for(i=0; i<nsops; i++) {
3874         __get_user(host_sembuf[i].sem_num, &target_sembuf[i].sem_num);
3875         __get_user(host_sembuf[i].sem_op, &target_sembuf[i].sem_op);
3876         __get_user(host_sembuf[i].sem_flg, &target_sembuf[i].sem_flg);
3877     }
3878 
3879     unlock_user(target_sembuf, target_addr, 0);
3880 
3881     return 0;
3882 }
3883 
3884 #if defined(TARGET_NR_ipc) || defined(TARGET_NR_semop) || \
3885     defined(TARGET_NR_semtimedop)
3886 
3887 /*
3888  * This macro is required to handle the s390 variants, which passes the
3889  * arguments in a different order than default.
3890  */
3891 #ifdef __s390x__
3892 #define SEMTIMEDOP_IPC_ARGS(__nsops, __sops, __timeout) \
3893   (__nsops), (__timeout), (__sops)
3894 #else
3895 #define SEMTIMEDOP_IPC_ARGS(__nsops, __sops, __timeout) \
3896   (__nsops), 0, (__sops), (__timeout)
3897 #endif
3898 
3899 static inline abi_long do_semtimedop(int semid,
3900                                      abi_long ptr,
3901                                      unsigned nsops,
3902                                      abi_long timeout)
3903 {
3904     struct sembuf sops[nsops];
3905     struct timespec ts, *pts = NULL;
3906     abi_long ret;
3907 
3908     if (timeout) {
3909         pts = &ts;
3910         if (target_to_host_timespec(pts, timeout)) {
3911             return -TARGET_EFAULT;
3912         }
3913     }
3914 
3915     if (target_to_host_sembuf(sops, ptr, nsops))
3916         return -TARGET_EFAULT;
3917 
3918     ret = -TARGET_ENOSYS;
3919 #ifdef __NR_semtimedop
3920     ret = get_errno(safe_semtimedop(semid, sops, nsops, pts));
3921 #endif
3922 #ifdef __NR_ipc
3923     if (ret == -TARGET_ENOSYS) {
3924         ret = get_errno(safe_ipc(IPCOP_semtimedop, semid,
3925                                  SEMTIMEDOP_IPC_ARGS(nsops, sops, (long)pts)));
3926     }
3927 #endif
3928     return ret;
3929 }
3930 #endif
3931 
3932 struct target_msqid_ds
3933 {
3934     struct target_ipc_perm msg_perm;
3935     abi_ulong msg_stime;
3936 #if TARGET_ABI_BITS == 32
3937     abi_ulong __unused1;
3938 #endif
3939     abi_ulong msg_rtime;
3940 #if TARGET_ABI_BITS == 32
3941     abi_ulong __unused2;
3942 #endif
3943     abi_ulong msg_ctime;
3944 #if TARGET_ABI_BITS == 32
3945     abi_ulong __unused3;
3946 #endif
3947     abi_ulong __msg_cbytes;
3948     abi_ulong msg_qnum;
3949     abi_ulong msg_qbytes;
3950     abi_ulong msg_lspid;
3951     abi_ulong msg_lrpid;
3952     abi_ulong __unused4;
3953     abi_ulong __unused5;
3954 };
3955 
3956 static inline abi_long target_to_host_msqid_ds(struct msqid_ds *host_md,
3957                                                abi_ulong target_addr)
3958 {
3959     struct target_msqid_ds *target_md;
3960 
3961     if (!lock_user_struct(VERIFY_READ, target_md, target_addr, 1))
3962         return -TARGET_EFAULT;
3963     if (target_to_host_ipc_perm(&(host_md->msg_perm),target_addr))
3964         return -TARGET_EFAULT;
3965     host_md->msg_stime = tswapal(target_md->msg_stime);
3966     host_md->msg_rtime = tswapal(target_md->msg_rtime);
3967     host_md->msg_ctime = tswapal(target_md->msg_ctime);
3968     host_md->__msg_cbytes = tswapal(target_md->__msg_cbytes);
3969     host_md->msg_qnum = tswapal(target_md->msg_qnum);
3970     host_md->msg_qbytes = tswapal(target_md->msg_qbytes);
3971     host_md->msg_lspid = tswapal(target_md->msg_lspid);
3972     host_md->msg_lrpid = tswapal(target_md->msg_lrpid);
3973     unlock_user_struct(target_md, target_addr, 0);
3974     return 0;
3975 }
3976 
3977 static inline abi_long host_to_target_msqid_ds(abi_ulong target_addr,
3978                                                struct msqid_ds *host_md)
3979 {
3980     struct target_msqid_ds *target_md;
3981 
3982     if (!lock_user_struct(VERIFY_WRITE, target_md, target_addr, 0))
3983         return -TARGET_EFAULT;
3984     if (host_to_target_ipc_perm(target_addr,&(host_md->msg_perm)))
3985         return -TARGET_EFAULT;
3986     target_md->msg_stime = tswapal(host_md->msg_stime);
3987     target_md->msg_rtime = tswapal(host_md->msg_rtime);
3988     target_md->msg_ctime = tswapal(host_md->msg_ctime);
3989     target_md->__msg_cbytes = tswapal(host_md->__msg_cbytes);
3990     target_md->msg_qnum = tswapal(host_md->msg_qnum);
3991     target_md->msg_qbytes = tswapal(host_md->msg_qbytes);
3992     target_md->msg_lspid = tswapal(host_md->msg_lspid);
3993     target_md->msg_lrpid = tswapal(host_md->msg_lrpid);
3994     unlock_user_struct(target_md, target_addr, 1);
3995     return 0;
3996 }
3997 
3998 struct target_msginfo {
3999     int msgpool;
4000     int msgmap;
4001     int msgmax;
4002     int msgmnb;
4003     int msgmni;
4004     int msgssz;
4005     int msgtql;
4006     unsigned short int msgseg;
4007 };
4008 
4009 static inline abi_long host_to_target_msginfo(abi_ulong target_addr,
4010                                               struct msginfo *host_msginfo)
4011 {
4012     struct target_msginfo *target_msginfo;
4013     if (!lock_user_struct(VERIFY_WRITE, target_msginfo, target_addr, 0))
4014         return -TARGET_EFAULT;
4015     __put_user(host_msginfo->msgpool, &target_msginfo->msgpool);
4016     __put_user(host_msginfo->msgmap, &target_msginfo->msgmap);
4017     __put_user(host_msginfo->msgmax, &target_msginfo->msgmax);
4018     __put_user(host_msginfo->msgmnb, &target_msginfo->msgmnb);
4019     __put_user(host_msginfo->msgmni, &target_msginfo->msgmni);
4020     __put_user(host_msginfo->msgssz, &target_msginfo->msgssz);
4021     __put_user(host_msginfo->msgtql, &target_msginfo->msgtql);
4022     __put_user(host_msginfo->msgseg, &target_msginfo->msgseg);
4023     unlock_user_struct(target_msginfo, target_addr, 1);
4024     return 0;
4025 }
4026 
4027 static inline abi_long do_msgctl(int msgid, int cmd, abi_long ptr)
4028 {
4029     struct msqid_ds dsarg;
4030     struct msginfo msginfo;
4031     abi_long ret = -TARGET_EINVAL;
4032 
4033     cmd &= 0xff;
4034 
4035     switch (cmd) {
4036     case IPC_STAT:
4037     case IPC_SET:
4038     case MSG_STAT:
4039         if (target_to_host_msqid_ds(&dsarg,ptr))
4040             return -TARGET_EFAULT;
4041         ret = get_errno(msgctl(msgid, cmd, &dsarg));
4042         if (host_to_target_msqid_ds(ptr,&dsarg))
4043             return -TARGET_EFAULT;
4044         break;
4045     case IPC_RMID:
4046         ret = get_errno(msgctl(msgid, cmd, NULL));
4047         break;
4048     case IPC_INFO:
4049     case MSG_INFO:
4050         ret = get_errno(msgctl(msgid, cmd, (struct msqid_ds *)&msginfo));
4051         if (host_to_target_msginfo(ptr, &msginfo))
4052             return -TARGET_EFAULT;
4053         break;
4054     }
4055 
4056     return ret;
4057 }
4058 
4059 struct target_msgbuf {
4060     abi_long mtype;
4061     char	mtext[1];
4062 };
4063 
4064 static inline abi_long do_msgsnd(int msqid, abi_long msgp,
4065                                  ssize_t msgsz, int msgflg)
4066 {
4067     struct target_msgbuf *target_mb;
4068     struct msgbuf *host_mb;
4069     abi_long ret = 0;
4070 
4071     if (msgsz < 0) {
4072         return -TARGET_EINVAL;
4073     }
4074 
4075     if (!lock_user_struct(VERIFY_READ, target_mb, msgp, 0))
4076         return -TARGET_EFAULT;
4077     host_mb = g_try_malloc(msgsz + sizeof(long));
4078     if (!host_mb) {
4079         unlock_user_struct(target_mb, msgp, 0);
4080         return -TARGET_ENOMEM;
4081     }
4082     host_mb->mtype = (abi_long) tswapal(target_mb->mtype);
4083     memcpy(host_mb->mtext, target_mb->mtext, msgsz);
4084     ret = -TARGET_ENOSYS;
4085 #ifdef __NR_msgsnd
4086     ret = get_errno(safe_msgsnd(msqid, host_mb, msgsz, msgflg));
4087 #endif
4088 #ifdef __NR_ipc
4089     if (ret == -TARGET_ENOSYS) {
4090 #ifdef __s390x__
4091         ret = get_errno(safe_ipc(IPCOP_msgsnd, msqid, msgsz, msgflg,
4092                                  host_mb));
4093 #else
4094         ret = get_errno(safe_ipc(IPCOP_msgsnd, msqid, msgsz, msgflg,
4095                                  host_mb, 0));
4096 #endif
4097     }
4098 #endif
4099     g_free(host_mb);
4100     unlock_user_struct(target_mb, msgp, 0);
4101 
4102     return ret;
4103 }
4104 
4105 #ifdef __NR_ipc
4106 #if defined(__sparc__)
4107 /* SPARC for msgrcv it does not use the kludge on final 2 arguments.  */
4108 #define MSGRCV_ARGS(__msgp, __msgtyp) __msgp, __msgtyp
4109 #elif defined(__s390x__)
4110 /* The s390 sys_ipc variant has only five parameters.  */
4111 #define MSGRCV_ARGS(__msgp, __msgtyp) \
4112     ((long int[]){(long int)__msgp, __msgtyp})
4113 #else
4114 #define MSGRCV_ARGS(__msgp, __msgtyp) \
4115     ((long int[]){(long int)__msgp, __msgtyp}), 0
4116 #endif
4117 #endif
4118 
4119 static inline abi_long do_msgrcv(int msqid, abi_long msgp,
4120                                  ssize_t msgsz, abi_long msgtyp,
4121                                  int msgflg)
4122 {
4123     struct target_msgbuf *target_mb;
4124     char *target_mtext;
4125     struct msgbuf *host_mb;
4126     abi_long ret = 0;
4127 
4128     if (msgsz < 0) {
4129         return -TARGET_EINVAL;
4130     }
4131 
4132     if (!lock_user_struct(VERIFY_WRITE, target_mb, msgp, 0))
4133         return -TARGET_EFAULT;
4134 
4135     host_mb = g_try_malloc(msgsz + sizeof(long));
4136     if (!host_mb) {
4137         ret = -TARGET_ENOMEM;
4138         goto end;
4139     }
4140     ret = -TARGET_ENOSYS;
4141 #ifdef __NR_msgrcv
4142     ret = get_errno(safe_msgrcv(msqid, host_mb, msgsz, msgtyp, msgflg));
4143 #endif
4144 #ifdef __NR_ipc
4145     if (ret == -TARGET_ENOSYS) {
4146         ret = get_errno(safe_ipc(IPCOP_CALL(1, IPCOP_msgrcv), msqid, msgsz,
4147                         msgflg, MSGRCV_ARGS(host_mb, msgtyp)));
4148     }
4149 #endif
4150 
4151     if (ret > 0) {
4152         abi_ulong target_mtext_addr = msgp + sizeof(abi_ulong);
4153         target_mtext = lock_user(VERIFY_WRITE, target_mtext_addr, ret, 0);
4154         if (!target_mtext) {
4155             ret = -TARGET_EFAULT;
4156             goto end;
4157         }
4158         memcpy(target_mb->mtext, host_mb->mtext, ret);
4159         unlock_user(target_mtext, target_mtext_addr, ret);
4160     }
4161 
4162     target_mb->mtype = tswapal(host_mb->mtype);
4163 
4164 end:
4165     if (target_mb)
4166         unlock_user_struct(target_mb, msgp, 1);
4167     g_free(host_mb);
4168     return ret;
4169 }
4170 
4171 static inline abi_long target_to_host_shmid_ds(struct shmid_ds *host_sd,
4172                                                abi_ulong target_addr)
4173 {
4174     struct target_shmid_ds *target_sd;
4175 
4176     if (!lock_user_struct(VERIFY_READ, target_sd, target_addr, 1))
4177         return -TARGET_EFAULT;
4178     if (target_to_host_ipc_perm(&(host_sd->shm_perm), target_addr))
4179         return -TARGET_EFAULT;
4180     __get_user(host_sd->shm_segsz, &target_sd->shm_segsz);
4181     __get_user(host_sd->shm_atime, &target_sd->shm_atime);
4182     __get_user(host_sd->shm_dtime, &target_sd->shm_dtime);
4183     __get_user(host_sd->shm_ctime, &target_sd->shm_ctime);
4184     __get_user(host_sd->shm_cpid, &target_sd->shm_cpid);
4185     __get_user(host_sd->shm_lpid, &target_sd->shm_lpid);
4186     __get_user(host_sd->shm_nattch, &target_sd->shm_nattch);
4187     unlock_user_struct(target_sd, target_addr, 0);
4188     return 0;
4189 }
4190 
4191 static inline abi_long host_to_target_shmid_ds(abi_ulong target_addr,
4192                                                struct shmid_ds *host_sd)
4193 {
4194     struct target_shmid_ds *target_sd;
4195 
4196     if (!lock_user_struct(VERIFY_WRITE, target_sd, target_addr, 0))
4197         return -TARGET_EFAULT;
4198     if (host_to_target_ipc_perm(target_addr, &(host_sd->shm_perm)))
4199         return -TARGET_EFAULT;
4200     __put_user(host_sd->shm_segsz, &target_sd->shm_segsz);
4201     __put_user(host_sd->shm_atime, &target_sd->shm_atime);
4202     __put_user(host_sd->shm_dtime, &target_sd->shm_dtime);
4203     __put_user(host_sd->shm_ctime, &target_sd->shm_ctime);
4204     __put_user(host_sd->shm_cpid, &target_sd->shm_cpid);
4205     __put_user(host_sd->shm_lpid, &target_sd->shm_lpid);
4206     __put_user(host_sd->shm_nattch, &target_sd->shm_nattch);
4207     unlock_user_struct(target_sd, target_addr, 1);
4208     return 0;
4209 }
4210 
4211 struct  target_shminfo {
4212     abi_ulong shmmax;
4213     abi_ulong shmmin;
4214     abi_ulong shmmni;
4215     abi_ulong shmseg;
4216     abi_ulong shmall;
4217 };
4218 
4219 static inline abi_long host_to_target_shminfo(abi_ulong target_addr,
4220                                               struct shminfo *host_shminfo)
4221 {
4222     struct target_shminfo *target_shminfo;
4223     if (!lock_user_struct(VERIFY_WRITE, target_shminfo, target_addr, 0))
4224         return -TARGET_EFAULT;
4225     __put_user(host_shminfo->shmmax, &target_shminfo->shmmax);
4226     __put_user(host_shminfo->shmmin, &target_shminfo->shmmin);
4227     __put_user(host_shminfo->shmmni, &target_shminfo->shmmni);
4228     __put_user(host_shminfo->shmseg, &target_shminfo->shmseg);
4229     __put_user(host_shminfo->shmall, &target_shminfo->shmall);
4230     unlock_user_struct(target_shminfo, target_addr, 1);
4231     return 0;
4232 }
4233 
4234 struct target_shm_info {
4235     int used_ids;
4236     abi_ulong shm_tot;
4237     abi_ulong shm_rss;
4238     abi_ulong shm_swp;
4239     abi_ulong swap_attempts;
4240     abi_ulong swap_successes;
4241 };
4242 
4243 static inline abi_long host_to_target_shm_info(abi_ulong target_addr,
4244                                                struct shm_info *host_shm_info)
4245 {
4246     struct target_shm_info *target_shm_info;
4247     if (!lock_user_struct(VERIFY_WRITE, target_shm_info, target_addr, 0))
4248         return -TARGET_EFAULT;
4249     __put_user(host_shm_info->used_ids, &target_shm_info->used_ids);
4250     __put_user(host_shm_info->shm_tot, &target_shm_info->shm_tot);
4251     __put_user(host_shm_info->shm_rss, &target_shm_info->shm_rss);
4252     __put_user(host_shm_info->shm_swp, &target_shm_info->shm_swp);
4253     __put_user(host_shm_info->swap_attempts, &target_shm_info->swap_attempts);
4254     __put_user(host_shm_info->swap_successes, &target_shm_info->swap_successes);
4255     unlock_user_struct(target_shm_info, target_addr, 1);
4256     return 0;
4257 }
4258 
4259 static inline abi_long do_shmctl(int shmid, int cmd, abi_long buf)
4260 {
4261     struct shmid_ds dsarg;
4262     struct shminfo shminfo;
4263     struct shm_info shm_info;
4264     abi_long ret = -TARGET_EINVAL;
4265 
4266     cmd &= 0xff;
4267 
4268     switch(cmd) {
4269     case IPC_STAT:
4270     case IPC_SET:
4271     case SHM_STAT:
4272         if (target_to_host_shmid_ds(&dsarg, buf))
4273             return -TARGET_EFAULT;
4274         ret = get_errno(shmctl(shmid, cmd, &dsarg));
4275         if (host_to_target_shmid_ds(buf, &dsarg))
4276             return -TARGET_EFAULT;
4277         break;
4278     case IPC_INFO:
4279         ret = get_errno(shmctl(shmid, cmd, (struct shmid_ds *)&shminfo));
4280         if (host_to_target_shminfo(buf, &shminfo))
4281             return -TARGET_EFAULT;
4282         break;
4283     case SHM_INFO:
4284         ret = get_errno(shmctl(shmid, cmd, (struct shmid_ds *)&shm_info));
4285         if (host_to_target_shm_info(buf, &shm_info))
4286             return -TARGET_EFAULT;
4287         break;
4288     case IPC_RMID:
4289     case SHM_LOCK:
4290     case SHM_UNLOCK:
4291         ret = get_errno(shmctl(shmid, cmd, NULL));
4292         break;
4293     }
4294 
4295     return ret;
4296 }
4297 
4298 #ifndef TARGET_FORCE_SHMLBA
4299 /* For most architectures, SHMLBA is the same as the page size;
4300  * some architectures have larger values, in which case they should
4301  * define TARGET_FORCE_SHMLBA and provide a target_shmlba() function.
4302  * This corresponds to the kernel arch code defining __ARCH_FORCE_SHMLBA
4303  * and defining its own value for SHMLBA.
4304  *
4305  * The kernel also permits SHMLBA to be set by the architecture to a
4306  * value larger than the page size without setting __ARCH_FORCE_SHMLBA;
4307  * this means that addresses are rounded to the large size if
4308  * SHM_RND is set but addresses not aligned to that size are not rejected
4309  * as long as they are at least page-aligned. Since the only architecture
4310  * which uses this is ia64 this code doesn't provide for that oddity.
4311  */
4312 static inline abi_ulong target_shmlba(CPUArchState *cpu_env)
4313 {
4314     return TARGET_PAGE_SIZE;
4315 }
4316 #endif
4317 
4318 static inline abi_ulong do_shmat(CPUArchState *cpu_env,
4319                                  int shmid, abi_ulong shmaddr, int shmflg)
4320 {
4321     abi_long raddr;
4322     void *host_raddr;
4323     struct shmid_ds shm_info;
4324     int i,ret;
4325     abi_ulong shmlba;
4326 
4327     /* find out the length of the shared memory segment */
4328     ret = get_errno(shmctl(shmid, IPC_STAT, &shm_info));
4329     if (is_error(ret)) {
4330         /* can't get length, bail out */
4331         return ret;
4332     }
4333 
4334     shmlba = target_shmlba(cpu_env);
4335 
4336     if (shmaddr & (shmlba - 1)) {
4337         if (shmflg & SHM_RND) {
4338             shmaddr &= ~(shmlba - 1);
4339         } else {
4340             return -TARGET_EINVAL;
4341         }
4342     }
4343     if (!guest_range_valid(shmaddr, shm_info.shm_segsz)) {
4344         return -TARGET_EINVAL;
4345     }
4346 
4347     mmap_lock();
4348 
4349     if (shmaddr)
4350         host_raddr = shmat(shmid, (void *)g2h(shmaddr), shmflg);
4351     else {
4352         abi_ulong mmap_start;
4353 
4354         /* In order to use the host shmat, we need to honor host SHMLBA.  */
4355         mmap_start = mmap_find_vma(0, shm_info.shm_segsz, MAX(SHMLBA, shmlba));
4356 
4357         if (mmap_start == -1) {
4358             errno = ENOMEM;
4359             host_raddr = (void *)-1;
4360         } else
4361             host_raddr = shmat(shmid, g2h(mmap_start), shmflg | SHM_REMAP);
4362     }
4363 
4364     if (host_raddr == (void *)-1) {
4365         mmap_unlock();
4366         return get_errno((long)host_raddr);
4367     }
4368     raddr=h2g((unsigned long)host_raddr);
4369 
4370     page_set_flags(raddr, raddr + shm_info.shm_segsz,
4371                    PAGE_VALID | PAGE_READ |
4372                    ((shmflg & SHM_RDONLY)? 0 : PAGE_WRITE));
4373 
4374     for (i = 0; i < N_SHM_REGIONS; i++) {
4375         if (!shm_regions[i].in_use) {
4376             shm_regions[i].in_use = true;
4377             shm_regions[i].start = raddr;
4378             shm_regions[i].size = shm_info.shm_segsz;
4379             break;
4380         }
4381     }
4382 
4383     mmap_unlock();
4384     return raddr;
4385 
4386 }
4387 
4388 static inline abi_long do_shmdt(abi_ulong shmaddr)
4389 {
4390     int i;
4391     abi_long rv;
4392 
4393     mmap_lock();
4394 
4395     for (i = 0; i < N_SHM_REGIONS; ++i) {
4396         if (shm_regions[i].in_use && shm_regions[i].start == shmaddr) {
4397             shm_regions[i].in_use = false;
4398             page_set_flags(shmaddr, shmaddr + shm_regions[i].size, 0);
4399             break;
4400         }
4401     }
4402     rv = get_errno(shmdt(g2h(shmaddr)));
4403 
4404     mmap_unlock();
4405 
4406     return rv;
4407 }
4408 
4409 #ifdef TARGET_NR_ipc
4410 /* ??? This only works with linear mappings.  */
4411 /* do_ipc() must return target values and target errnos. */
4412 static abi_long do_ipc(CPUArchState *cpu_env,
4413                        unsigned int call, abi_long first,
4414                        abi_long second, abi_long third,
4415                        abi_long ptr, abi_long fifth)
4416 {
4417     int version;
4418     abi_long ret = 0;
4419 
4420     version = call >> 16;
4421     call &= 0xffff;
4422 
4423     switch (call) {
4424     case IPCOP_semop:
4425         ret = do_semtimedop(first, ptr, second, 0);
4426         break;
4427     case IPCOP_semtimedop:
4428     /*
4429      * The s390 sys_ipc variant has only five parameters instead of six
4430      * (as for default variant) and the only difference is the handling of
4431      * SEMTIMEDOP where on s390 the third parameter is used as a pointer
4432      * to a struct timespec where the generic variant uses fifth parameter.
4433      */
4434 #if defined(TARGET_S390X)
4435         ret = do_semtimedop(first, ptr, second, third);
4436 #else
4437         ret = do_semtimedop(first, ptr, second, fifth);
4438 #endif
4439         break;
4440 
4441     case IPCOP_semget:
4442         ret = get_errno(semget(first, second, third));
4443         break;
4444 
4445     case IPCOP_semctl: {
4446         /* The semun argument to semctl is passed by value, so dereference the
4447          * ptr argument. */
4448         abi_ulong atptr;
4449         get_user_ual(atptr, ptr);
4450         ret = do_semctl(first, second, third, atptr);
4451         break;
4452     }
4453 
4454     case IPCOP_msgget:
4455         ret = get_errno(msgget(first, second));
4456         break;
4457 
4458     case IPCOP_msgsnd:
4459         ret = do_msgsnd(first, ptr, second, third);
4460         break;
4461 
4462     case IPCOP_msgctl:
4463         ret = do_msgctl(first, second, ptr);
4464         break;
4465 
4466     case IPCOP_msgrcv:
4467         switch (version) {
4468         case 0:
4469             {
4470                 struct target_ipc_kludge {
4471                     abi_long msgp;
4472                     abi_long msgtyp;
4473                 } *tmp;
4474 
4475                 if (!lock_user_struct(VERIFY_READ, tmp, ptr, 1)) {
4476                     ret = -TARGET_EFAULT;
4477                     break;
4478                 }
4479 
4480                 ret = do_msgrcv(first, tswapal(tmp->msgp), second, tswapal(tmp->msgtyp), third);
4481 
4482                 unlock_user_struct(tmp, ptr, 0);
4483                 break;
4484             }
4485         default:
4486             ret = do_msgrcv(first, ptr, second, fifth, third);
4487         }
4488         break;
4489 
4490     case IPCOP_shmat:
4491         switch (version) {
4492         default:
4493         {
4494             abi_ulong raddr;
4495             raddr = do_shmat(cpu_env, first, ptr, second);
4496             if (is_error(raddr))
4497                 return get_errno(raddr);
4498             if (put_user_ual(raddr, third))
4499                 return -TARGET_EFAULT;
4500             break;
4501         }
4502         case 1:
4503             ret = -TARGET_EINVAL;
4504             break;
4505         }
4506 	break;
4507     case IPCOP_shmdt:
4508         ret = do_shmdt(ptr);
4509 	break;
4510 
4511     case IPCOP_shmget:
4512 	/* IPC_* flag values are the same on all linux platforms */
4513 	ret = get_errno(shmget(first, second, third));
4514 	break;
4515 
4516 	/* IPC_* and SHM_* command values are the same on all linux platforms */
4517     case IPCOP_shmctl:
4518         ret = do_shmctl(first, second, ptr);
4519         break;
4520     default:
4521         qemu_log_mask(LOG_UNIMP, "Unsupported ipc call: %d (version %d)\n",
4522                       call, version);
4523 	ret = -TARGET_ENOSYS;
4524 	break;
4525     }
4526     return ret;
4527 }
4528 #endif
4529 
4530 /* kernel structure types definitions */
4531 
4532 #define STRUCT(name, ...) STRUCT_ ## name,
4533 #define STRUCT_SPECIAL(name) STRUCT_ ## name,
4534 enum {
4535 #include "syscall_types.h"
4536 STRUCT_MAX
4537 };
4538 #undef STRUCT
4539 #undef STRUCT_SPECIAL
4540 
4541 #define STRUCT(name, ...) static const argtype struct_ ## name ## _def[] = {  __VA_ARGS__, TYPE_NULL };
4542 #define STRUCT_SPECIAL(name)
4543 #include "syscall_types.h"
4544 #undef STRUCT
4545 #undef STRUCT_SPECIAL
4546 
4547 #define MAX_STRUCT_SIZE 4096
4548 
4549 #ifdef CONFIG_FIEMAP
4550 /* So fiemap access checks don't overflow on 32 bit systems.
4551  * This is very slightly smaller than the limit imposed by
4552  * the underlying kernel.
4553  */
4554 #define FIEMAP_MAX_EXTENTS ((UINT_MAX - sizeof(struct fiemap))  \
4555                             / sizeof(struct fiemap_extent))
4556 
4557 static abi_long do_ioctl_fs_ioc_fiemap(const IOCTLEntry *ie, uint8_t *buf_temp,
4558                                        int fd, int cmd, abi_long arg)
4559 {
4560     /* The parameter for this ioctl is a struct fiemap followed
4561      * by an array of struct fiemap_extent whose size is set
4562      * in fiemap->fm_extent_count. The array is filled in by the
4563      * ioctl.
4564      */
4565     int target_size_in, target_size_out;
4566     struct fiemap *fm;
4567     const argtype *arg_type = ie->arg_type;
4568     const argtype extent_arg_type[] = { MK_STRUCT(STRUCT_fiemap_extent) };
4569     void *argptr, *p;
4570     abi_long ret;
4571     int i, extent_size = thunk_type_size(extent_arg_type, 0);
4572     uint32_t outbufsz;
4573     int free_fm = 0;
4574 
4575     assert(arg_type[0] == TYPE_PTR);
4576     assert(ie->access == IOC_RW);
4577     arg_type++;
4578     target_size_in = thunk_type_size(arg_type, 0);
4579     argptr = lock_user(VERIFY_READ, arg, target_size_in, 1);
4580     if (!argptr) {
4581         return -TARGET_EFAULT;
4582     }
4583     thunk_convert(buf_temp, argptr, arg_type, THUNK_HOST);
4584     unlock_user(argptr, arg, 0);
4585     fm = (struct fiemap *)buf_temp;
4586     if (fm->fm_extent_count > FIEMAP_MAX_EXTENTS) {
4587         return -TARGET_EINVAL;
4588     }
4589 
4590     outbufsz = sizeof (*fm) +
4591         (sizeof(struct fiemap_extent) * fm->fm_extent_count);
4592 
4593     if (outbufsz > MAX_STRUCT_SIZE) {
4594         /* We can't fit all the extents into the fixed size buffer.
4595          * Allocate one that is large enough and use it instead.
4596          */
4597         fm = g_try_malloc(outbufsz);
4598         if (!fm) {
4599             return -TARGET_ENOMEM;
4600         }
4601         memcpy(fm, buf_temp, sizeof(struct fiemap));
4602         free_fm = 1;
4603     }
4604     ret = get_errno(safe_ioctl(fd, ie->host_cmd, fm));
4605     if (!is_error(ret)) {
4606         target_size_out = target_size_in;
4607         /* An extent_count of 0 means we were only counting the extents
4608          * so there are no structs to copy
4609          */
4610         if (fm->fm_extent_count != 0) {
4611             target_size_out += fm->fm_mapped_extents * extent_size;
4612         }
4613         argptr = lock_user(VERIFY_WRITE, arg, target_size_out, 0);
4614         if (!argptr) {
4615             ret = -TARGET_EFAULT;
4616         } else {
4617             /* Convert the struct fiemap */
4618             thunk_convert(argptr, fm, arg_type, THUNK_TARGET);
4619             if (fm->fm_extent_count != 0) {
4620                 p = argptr + target_size_in;
4621                 /* ...and then all the struct fiemap_extents */
4622                 for (i = 0; i < fm->fm_mapped_extents; i++) {
4623                     thunk_convert(p, &fm->fm_extents[i], extent_arg_type,
4624                                   THUNK_TARGET);
4625                     p += extent_size;
4626                 }
4627             }
4628             unlock_user(argptr, arg, target_size_out);
4629         }
4630     }
4631     if (free_fm) {
4632         g_free(fm);
4633     }
4634     return ret;
4635 }
4636 #endif
4637 
4638 static abi_long do_ioctl_ifconf(const IOCTLEntry *ie, uint8_t *buf_temp,
4639                                 int fd, int cmd, abi_long arg)
4640 {
4641     const argtype *arg_type = ie->arg_type;
4642     int target_size;
4643     void *argptr;
4644     int ret;
4645     struct ifconf *host_ifconf;
4646     uint32_t outbufsz;
4647     const argtype ifreq_arg_type[] = { MK_STRUCT(STRUCT_sockaddr_ifreq) };
4648     int target_ifreq_size;
4649     int nb_ifreq;
4650     int free_buf = 0;
4651     int i;
4652     int target_ifc_len;
4653     abi_long target_ifc_buf;
4654     int host_ifc_len;
4655     char *host_ifc_buf;
4656 
4657     assert(arg_type[0] == TYPE_PTR);
4658     assert(ie->access == IOC_RW);
4659 
4660     arg_type++;
4661     target_size = thunk_type_size(arg_type, 0);
4662 
4663     argptr = lock_user(VERIFY_READ, arg, target_size, 1);
4664     if (!argptr)
4665         return -TARGET_EFAULT;
4666     thunk_convert(buf_temp, argptr, arg_type, THUNK_HOST);
4667     unlock_user(argptr, arg, 0);
4668 
4669     host_ifconf = (struct ifconf *)(unsigned long)buf_temp;
4670     target_ifc_buf = (abi_long)(unsigned long)host_ifconf->ifc_buf;
4671     target_ifreq_size = thunk_type_size(ifreq_arg_type, 0);
4672 
4673     if (target_ifc_buf != 0) {
4674         target_ifc_len = host_ifconf->ifc_len;
4675         nb_ifreq = target_ifc_len / target_ifreq_size;
4676         host_ifc_len = nb_ifreq * sizeof(struct ifreq);
4677 
4678         outbufsz = sizeof(*host_ifconf) + host_ifc_len;
4679         if (outbufsz > MAX_STRUCT_SIZE) {
4680             /*
4681              * We can't fit all the extents into the fixed size buffer.
4682              * Allocate one that is large enough and use it instead.
4683              */
4684             host_ifconf = malloc(outbufsz);
4685             if (!host_ifconf) {
4686                 return -TARGET_ENOMEM;
4687             }
4688             memcpy(host_ifconf, buf_temp, sizeof(*host_ifconf));
4689             free_buf = 1;
4690         }
4691         host_ifc_buf = (char *)host_ifconf + sizeof(*host_ifconf);
4692 
4693         host_ifconf->ifc_len = host_ifc_len;
4694     } else {
4695       host_ifc_buf = NULL;
4696     }
4697     host_ifconf->ifc_buf = host_ifc_buf;
4698 
4699     ret = get_errno(safe_ioctl(fd, ie->host_cmd, host_ifconf));
4700     if (!is_error(ret)) {
4701 	/* convert host ifc_len to target ifc_len */
4702 
4703         nb_ifreq = host_ifconf->ifc_len / sizeof(struct ifreq);
4704         target_ifc_len = nb_ifreq * target_ifreq_size;
4705         host_ifconf->ifc_len = target_ifc_len;
4706 
4707 	/* restore target ifc_buf */
4708 
4709         host_ifconf->ifc_buf = (char *)(unsigned long)target_ifc_buf;
4710 
4711 	/* copy struct ifconf to target user */
4712 
4713         argptr = lock_user(VERIFY_WRITE, arg, target_size, 0);
4714         if (!argptr)
4715             return -TARGET_EFAULT;
4716         thunk_convert(argptr, host_ifconf, arg_type, THUNK_TARGET);
4717         unlock_user(argptr, arg, target_size);
4718 
4719         if (target_ifc_buf != 0) {
4720             /* copy ifreq[] to target user */
4721             argptr = lock_user(VERIFY_WRITE, target_ifc_buf, target_ifc_len, 0);
4722             for (i = 0; i < nb_ifreq ; i++) {
4723                 thunk_convert(argptr + i * target_ifreq_size,
4724                               host_ifc_buf + i * sizeof(struct ifreq),
4725                               ifreq_arg_type, THUNK_TARGET);
4726             }
4727             unlock_user(argptr, target_ifc_buf, target_ifc_len);
4728         }
4729     }
4730 
4731     if (free_buf) {
4732         free(host_ifconf);
4733     }
4734 
4735     return ret;
4736 }
4737 
4738 #if defined(CONFIG_USBFS)
4739 #if HOST_LONG_BITS > 64
4740 #error USBDEVFS thunks do not support >64 bit hosts yet.
4741 #endif
4742 struct live_urb {
4743     uint64_t target_urb_adr;
4744     uint64_t target_buf_adr;
4745     char *target_buf_ptr;
4746     struct usbdevfs_urb host_urb;
4747 };
4748 
4749 static GHashTable *usbdevfs_urb_hashtable(void)
4750 {
4751     static GHashTable *urb_hashtable;
4752 
4753     if (!urb_hashtable) {
4754         urb_hashtable = g_hash_table_new(g_int64_hash, g_int64_equal);
4755     }
4756     return urb_hashtable;
4757 }
4758 
4759 static void urb_hashtable_insert(struct live_urb *urb)
4760 {
4761     GHashTable *urb_hashtable = usbdevfs_urb_hashtable();
4762     g_hash_table_insert(urb_hashtable, urb, urb);
4763 }
4764 
4765 static struct live_urb *urb_hashtable_lookup(uint64_t target_urb_adr)
4766 {
4767     GHashTable *urb_hashtable = usbdevfs_urb_hashtable();
4768     return g_hash_table_lookup(urb_hashtable, &target_urb_adr);
4769 }
4770 
4771 static void urb_hashtable_remove(struct live_urb *urb)
4772 {
4773     GHashTable *urb_hashtable = usbdevfs_urb_hashtable();
4774     g_hash_table_remove(urb_hashtable, urb);
4775 }
4776 
4777 static abi_long
4778 do_ioctl_usbdevfs_reapurb(const IOCTLEntry *ie, uint8_t *buf_temp,
4779                           int fd, int cmd, abi_long arg)
4780 {
4781     const argtype usbfsurb_arg_type[] = { MK_STRUCT(STRUCT_usbdevfs_urb) };
4782     const argtype ptrvoid_arg_type[] = { TYPE_PTRVOID, 0, 0 };
4783     struct live_urb *lurb;
4784     void *argptr;
4785     uint64_t hurb;
4786     int target_size;
4787     uintptr_t target_urb_adr;
4788     abi_long ret;
4789 
4790     target_size = thunk_type_size(usbfsurb_arg_type, THUNK_TARGET);
4791 
4792     memset(buf_temp, 0, sizeof(uint64_t));
4793     ret = get_errno(safe_ioctl(fd, ie->host_cmd, buf_temp));
4794     if (is_error(ret)) {
4795         return ret;
4796     }
4797 
4798     memcpy(&hurb, buf_temp, sizeof(uint64_t));
4799     lurb = (void *)((uintptr_t)hurb - offsetof(struct live_urb, host_urb));
4800     if (!lurb->target_urb_adr) {
4801         return -TARGET_EFAULT;
4802     }
4803     urb_hashtable_remove(lurb);
4804     unlock_user(lurb->target_buf_ptr, lurb->target_buf_adr,
4805         lurb->host_urb.buffer_length);
4806     lurb->target_buf_ptr = NULL;
4807 
4808     /* restore the guest buffer pointer */
4809     lurb->host_urb.buffer = (void *)(uintptr_t)lurb->target_buf_adr;
4810 
4811     /* update the guest urb struct */
4812     argptr = lock_user(VERIFY_WRITE, lurb->target_urb_adr, target_size, 0);
4813     if (!argptr) {
4814         g_free(lurb);
4815         return -TARGET_EFAULT;
4816     }
4817     thunk_convert(argptr, &lurb->host_urb, usbfsurb_arg_type, THUNK_TARGET);
4818     unlock_user(argptr, lurb->target_urb_adr, target_size);
4819 
4820     target_size = thunk_type_size(ptrvoid_arg_type, THUNK_TARGET);
4821     /* write back the urb handle */
4822     argptr = lock_user(VERIFY_WRITE, arg, target_size, 0);
4823     if (!argptr) {
4824         g_free(lurb);
4825         return -TARGET_EFAULT;
4826     }
4827 
4828     /* GHashTable uses 64-bit keys but thunk_convert expects uintptr_t */
4829     target_urb_adr = lurb->target_urb_adr;
4830     thunk_convert(argptr, &target_urb_adr, ptrvoid_arg_type, THUNK_TARGET);
4831     unlock_user(argptr, arg, target_size);
4832 
4833     g_free(lurb);
4834     return ret;
4835 }
4836 
4837 static abi_long
4838 do_ioctl_usbdevfs_discardurb(const IOCTLEntry *ie,
4839                              uint8_t *buf_temp __attribute__((unused)),
4840                              int fd, int cmd, abi_long arg)
4841 {
4842     struct live_urb *lurb;
4843 
4844     /* map target address back to host URB with metadata. */
4845     lurb = urb_hashtable_lookup(arg);
4846     if (!lurb) {
4847         return -TARGET_EFAULT;
4848     }
4849     return get_errno(safe_ioctl(fd, ie->host_cmd, &lurb->host_urb));
4850 }
4851 
4852 static abi_long
4853 do_ioctl_usbdevfs_submiturb(const IOCTLEntry *ie, uint8_t *buf_temp,
4854                             int fd, int cmd, abi_long arg)
4855 {
4856     const argtype *arg_type = ie->arg_type;
4857     int target_size;
4858     abi_long ret;
4859     void *argptr;
4860     int rw_dir;
4861     struct live_urb *lurb;
4862 
4863     /*
4864      * each submitted URB needs to map to a unique ID for the
4865      * kernel, and that unique ID needs to be a pointer to
4866      * host memory.  hence, we need to malloc for each URB.
4867      * isochronous transfers have a variable length struct.
4868      */
4869     arg_type++;
4870     target_size = thunk_type_size(arg_type, THUNK_TARGET);
4871 
4872     /* construct host copy of urb and metadata */
4873     lurb = g_try_malloc0(sizeof(struct live_urb));
4874     if (!lurb) {
4875         return -TARGET_ENOMEM;
4876     }
4877 
4878     argptr = lock_user(VERIFY_READ, arg, target_size, 1);
4879     if (!argptr) {
4880         g_free(lurb);
4881         return -TARGET_EFAULT;
4882     }
4883     thunk_convert(&lurb->host_urb, argptr, arg_type, THUNK_HOST);
4884     unlock_user(argptr, arg, 0);
4885 
4886     lurb->target_urb_adr = arg;
4887     lurb->target_buf_adr = (uintptr_t)lurb->host_urb.buffer;
4888 
4889     /* buffer space used depends on endpoint type so lock the entire buffer */
4890     /* control type urbs should check the buffer contents for true direction */
4891     rw_dir = lurb->host_urb.endpoint & USB_DIR_IN ? VERIFY_WRITE : VERIFY_READ;
4892     lurb->target_buf_ptr = lock_user(rw_dir, lurb->target_buf_adr,
4893         lurb->host_urb.buffer_length, 1);
4894     if (lurb->target_buf_ptr == NULL) {
4895         g_free(lurb);
4896         return -TARGET_EFAULT;
4897     }
4898 
4899     /* update buffer pointer in host copy */
4900     lurb->host_urb.buffer = lurb->target_buf_ptr;
4901 
4902     ret = get_errno(safe_ioctl(fd, ie->host_cmd, &lurb->host_urb));
4903     if (is_error(ret)) {
4904         unlock_user(lurb->target_buf_ptr, lurb->target_buf_adr, 0);
4905         g_free(lurb);
4906     } else {
4907         urb_hashtable_insert(lurb);
4908     }
4909 
4910     return ret;
4911 }
4912 #endif /* CONFIG_USBFS */
4913 
4914 static abi_long do_ioctl_dm(const IOCTLEntry *ie, uint8_t *buf_temp, int fd,
4915                             int cmd, abi_long arg)
4916 {
4917     void *argptr;
4918     struct dm_ioctl *host_dm;
4919     abi_long guest_data;
4920     uint32_t guest_data_size;
4921     int target_size;
4922     const argtype *arg_type = ie->arg_type;
4923     abi_long ret;
4924     void *big_buf = NULL;
4925     char *host_data;
4926 
4927     arg_type++;
4928     target_size = thunk_type_size(arg_type, 0);
4929     argptr = lock_user(VERIFY_READ, arg, target_size, 1);
4930     if (!argptr) {
4931         ret = -TARGET_EFAULT;
4932         goto out;
4933     }
4934     thunk_convert(buf_temp, argptr, arg_type, THUNK_HOST);
4935     unlock_user(argptr, arg, 0);
4936 
4937     /* buf_temp is too small, so fetch things into a bigger buffer */
4938     big_buf = g_malloc0(((struct dm_ioctl*)buf_temp)->data_size * 2);
4939     memcpy(big_buf, buf_temp, target_size);
4940     buf_temp = big_buf;
4941     host_dm = big_buf;
4942 
4943     guest_data = arg + host_dm->data_start;
4944     if ((guest_data - arg) < 0) {
4945         ret = -TARGET_EINVAL;
4946         goto out;
4947     }
4948     guest_data_size = host_dm->data_size - host_dm->data_start;
4949     host_data = (char*)host_dm + host_dm->data_start;
4950 
4951     argptr = lock_user(VERIFY_READ, guest_data, guest_data_size, 1);
4952     if (!argptr) {
4953         ret = -TARGET_EFAULT;
4954         goto out;
4955     }
4956 
4957     switch (ie->host_cmd) {
4958     case DM_REMOVE_ALL:
4959     case DM_LIST_DEVICES:
4960     case DM_DEV_CREATE:
4961     case DM_DEV_REMOVE:
4962     case DM_DEV_SUSPEND:
4963     case DM_DEV_STATUS:
4964     case DM_DEV_WAIT:
4965     case DM_TABLE_STATUS:
4966     case DM_TABLE_CLEAR:
4967     case DM_TABLE_DEPS:
4968     case DM_LIST_VERSIONS:
4969         /* no input data */
4970         break;
4971     case DM_DEV_RENAME:
4972     case DM_DEV_SET_GEOMETRY:
4973         /* data contains only strings */
4974         memcpy(host_data, argptr, guest_data_size);
4975         break;
4976     case DM_TARGET_MSG:
4977         memcpy(host_data, argptr, guest_data_size);
4978         *(uint64_t*)host_data = tswap64(*(uint64_t*)argptr);
4979         break;
4980     case DM_TABLE_LOAD:
4981     {
4982         void *gspec = argptr;
4983         void *cur_data = host_data;
4984         const argtype arg_type[] = { MK_STRUCT(STRUCT_dm_target_spec) };
4985         int spec_size = thunk_type_size(arg_type, 0);
4986         int i;
4987 
4988         for (i = 0; i < host_dm->target_count; i++) {
4989             struct dm_target_spec *spec = cur_data;
4990             uint32_t next;
4991             int slen;
4992 
4993             thunk_convert(spec, gspec, arg_type, THUNK_HOST);
4994             slen = strlen((char*)gspec + spec_size) + 1;
4995             next = spec->next;
4996             spec->next = sizeof(*spec) + slen;
4997             strcpy((char*)&spec[1], gspec + spec_size);
4998             gspec += next;
4999             cur_data += spec->next;
5000         }
5001         break;
5002     }
5003     default:
5004         ret = -TARGET_EINVAL;
5005         unlock_user(argptr, guest_data, 0);
5006         goto out;
5007     }
5008     unlock_user(argptr, guest_data, 0);
5009 
5010     ret = get_errno(safe_ioctl(fd, ie->host_cmd, buf_temp));
5011     if (!is_error(ret)) {
5012         guest_data = arg + host_dm->data_start;
5013         guest_data_size = host_dm->data_size - host_dm->data_start;
5014         argptr = lock_user(VERIFY_WRITE, guest_data, guest_data_size, 0);
5015         switch (ie->host_cmd) {
5016         case DM_REMOVE_ALL:
5017         case DM_DEV_CREATE:
5018         case DM_DEV_REMOVE:
5019         case DM_DEV_RENAME:
5020         case DM_DEV_SUSPEND:
5021         case DM_DEV_STATUS:
5022         case DM_TABLE_LOAD:
5023         case DM_TABLE_CLEAR:
5024         case DM_TARGET_MSG:
5025         case DM_DEV_SET_GEOMETRY:
5026             /* no return data */
5027             break;
5028         case DM_LIST_DEVICES:
5029         {
5030             struct dm_name_list *nl = (void*)host_dm + host_dm->data_start;
5031             uint32_t remaining_data = guest_data_size;
5032             void *cur_data = argptr;
5033             const argtype arg_type[] = { MK_STRUCT(STRUCT_dm_name_list) };
5034             int nl_size = 12; /* can't use thunk_size due to alignment */
5035 
5036             while (1) {
5037                 uint32_t next = nl->next;
5038                 if (next) {
5039                     nl->next = nl_size + (strlen(nl->name) + 1);
5040                 }
5041                 if (remaining_data < nl->next) {
5042                     host_dm->flags |= DM_BUFFER_FULL_FLAG;
5043                     break;
5044                 }
5045                 thunk_convert(cur_data, nl, arg_type, THUNK_TARGET);
5046                 strcpy(cur_data + nl_size, nl->name);
5047                 cur_data += nl->next;
5048                 remaining_data -= nl->next;
5049                 if (!next) {
5050                     break;
5051                 }
5052                 nl = (void*)nl + next;
5053             }
5054             break;
5055         }
5056         case DM_DEV_WAIT:
5057         case DM_TABLE_STATUS:
5058         {
5059             struct dm_target_spec *spec = (void*)host_dm + host_dm->data_start;
5060             void *cur_data = argptr;
5061             const argtype arg_type[] = { MK_STRUCT(STRUCT_dm_target_spec) };
5062             int spec_size = thunk_type_size(arg_type, 0);
5063             int i;
5064 
5065             for (i = 0; i < host_dm->target_count; i++) {
5066                 uint32_t next = spec->next;
5067                 int slen = strlen((char*)&spec[1]) + 1;
5068                 spec->next = (cur_data - argptr) + spec_size + slen;
5069                 if (guest_data_size < spec->next) {
5070                     host_dm->flags |= DM_BUFFER_FULL_FLAG;
5071                     break;
5072                 }
5073                 thunk_convert(cur_data, spec, arg_type, THUNK_TARGET);
5074                 strcpy(cur_data + spec_size, (char*)&spec[1]);
5075                 cur_data = argptr + spec->next;
5076                 spec = (void*)host_dm + host_dm->data_start + next;
5077             }
5078             break;
5079         }
5080         case DM_TABLE_DEPS:
5081         {
5082             void *hdata = (void*)host_dm + host_dm->data_start;
5083             int count = *(uint32_t*)hdata;
5084             uint64_t *hdev = hdata + 8;
5085             uint64_t *gdev = argptr + 8;
5086             int i;
5087 
5088             *(uint32_t*)argptr = tswap32(count);
5089             for (i = 0; i < count; i++) {
5090                 *gdev = tswap64(*hdev);
5091                 gdev++;
5092                 hdev++;
5093             }
5094             break;
5095         }
5096         case DM_LIST_VERSIONS:
5097         {
5098             struct dm_target_versions *vers = (void*)host_dm + host_dm->data_start;
5099             uint32_t remaining_data = guest_data_size;
5100             void *cur_data = argptr;
5101             const argtype arg_type[] = { MK_STRUCT(STRUCT_dm_target_versions) };
5102             int vers_size = thunk_type_size(arg_type, 0);
5103 
5104             while (1) {
5105                 uint32_t next = vers->next;
5106                 if (next) {
5107                     vers->next = vers_size + (strlen(vers->name) + 1);
5108                 }
5109                 if (remaining_data < vers->next) {
5110                     host_dm->flags |= DM_BUFFER_FULL_FLAG;
5111                     break;
5112                 }
5113                 thunk_convert(cur_data, vers, arg_type, THUNK_TARGET);
5114                 strcpy(cur_data + vers_size, vers->name);
5115                 cur_data += vers->next;
5116                 remaining_data -= vers->next;
5117                 if (!next) {
5118                     break;
5119                 }
5120                 vers = (void*)vers + next;
5121             }
5122             break;
5123         }
5124         default:
5125             unlock_user(argptr, guest_data, 0);
5126             ret = -TARGET_EINVAL;
5127             goto out;
5128         }
5129         unlock_user(argptr, guest_data, guest_data_size);
5130 
5131         argptr = lock_user(VERIFY_WRITE, arg, target_size, 0);
5132         if (!argptr) {
5133             ret = -TARGET_EFAULT;
5134             goto out;
5135         }
5136         thunk_convert(argptr, buf_temp, arg_type, THUNK_TARGET);
5137         unlock_user(argptr, arg, target_size);
5138     }
5139 out:
5140     g_free(big_buf);
5141     return ret;
5142 }
5143 
5144 static abi_long do_ioctl_blkpg(const IOCTLEntry *ie, uint8_t *buf_temp, int fd,
5145                                int cmd, abi_long arg)
5146 {
5147     void *argptr;
5148     int target_size;
5149     const argtype *arg_type = ie->arg_type;
5150     const argtype part_arg_type[] = { MK_STRUCT(STRUCT_blkpg_partition) };
5151     abi_long ret;
5152 
5153     struct blkpg_ioctl_arg *host_blkpg = (void*)buf_temp;
5154     struct blkpg_partition host_part;
5155 
5156     /* Read and convert blkpg */
5157     arg_type++;
5158     target_size = thunk_type_size(arg_type, 0);
5159     argptr = lock_user(VERIFY_READ, arg, target_size, 1);
5160     if (!argptr) {
5161         ret = -TARGET_EFAULT;
5162         goto out;
5163     }
5164     thunk_convert(buf_temp, argptr, arg_type, THUNK_HOST);
5165     unlock_user(argptr, arg, 0);
5166 
5167     switch (host_blkpg->op) {
5168     case BLKPG_ADD_PARTITION:
5169     case BLKPG_DEL_PARTITION:
5170         /* payload is struct blkpg_partition */
5171         break;
5172     default:
5173         /* Unknown opcode */
5174         ret = -TARGET_EINVAL;
5175         goto out;
5176     }
5177 
5178     /* Read and convert blkpg->data */
5179     arg = (abi_long)(uintptr_t)host_blkpg->data;
5180     target_size = thunk_type_size(part_arg_type, 0);
5181     argptr = lock_user(VERIFY_READ, arg, target_size, 1);
5182     if (!argptr) {
5183         ret = -TARGET_EFAULT;
5184         goto out;
5185     }
5186     thunk_convert(&host_part, argptr, part_arg_type, THUNK_HOST);
5187     unlock_user(argptr, arg, 0);
5188 
5189     /* Swizzle the data pointer to our local copy and call! */
5190     host_blkpg->data = &host_part;
5191     ret = get_errno(safe_ioctl(fd, ie->host_cmd, host_blkpg));
5192 
5193 out:
5194     return ret;
5195 }
5196 
5197 static abi_long do_ioctl_rt(const IOCTLEntry *ie, uint8_t *buf_temp,
5198                                 int fd, int cmd, abi_long arg)
5199 {
5200     const argtype *arg_type = ie->arg_type;
5201     const StructEntry *se;
5202     const argtype *field_types;
5203     const int *dst_offsets, *src_offsets;
5204     int target_size;
5205     void *argptr;
5206     abi_ulong *target_rt_dev_ptr = NULL;
5207     unsigned long *host_rt_dev_ptr = NULL;
5208     abi_long ret;
5209     int i;
5210 
5211     assert(ie->access == IOC_W);
5212     assert(*arg_type == TYPE_PTR);
5213     arg_type++;
5214     assert(*arg_type == TYPE_STRUCT);
5215     target_size = thunk_type_size(arg_type, 0);
5216     argptr = lock_user(VERIFY_READ, arg, target_size, 1);
5217     if (!argptr) {
5218         return -TARGET_EFAULT;
5219     }
5220     arg_type++;
5221     assert(*arg_type == (int)STRUCT_rtentry);
5222     se = struct_entries + *arg_type++;
5223     assert(se->convert[0] == NULL);
5224     /* convert struct here to be able to catch rt_dev string */
5225     field_types = se->field_types;
5226     dst_offsets = se->field_offsets[THUNK_HOST];
5227     src_offsets = se->field_offsets[THUNK_TARGET];
5228     for (i = 0; i < se->nb_fields; i++) {
5229         if (dst_offsets[i] == offsetof(struct rtentry, rt_dev)) {
5230             assert(*field_types == TYPE_PTRVOID);
5231             target_rt_dev_ptr = (abi_ulong *)(argptr + src_offsets[i]);
5232             host_rt_dev_ptr = (unsigned long *)(buf_temp + dst_offsets[i]);
5233             if (*target_rt_dev_ptr != 0) {
5234                 *host_rt_dev_ptr = (unsigned long)lock_user_string(
5235                                                   tswapal(*target_rt_dev_ptr));
5236                 if (!*host_rt_dev_ptr) {
5237                     unlock_user(argptr, arg, 0);
5238                     return -TARGET_EFAULT;
5239                 }
5240             } else {
5241                 *host_rt_dev_ptr = 0;
5242             }
5243             field_types++;
5244             continue;
5245         }
5246         field_types = thunk_convert(buf_temp + dst_offsets[i],
5247                                     argptr + src_offsets[i],
5248                                     field_types, THUNK_HOST);
5249     }
5250     unlock_user(argptr, arg, 0);
5251 
5252     ret = get_errno(safe_ioctl(fd, ie->host_cmd, buf_temp));
5253 
5254     assert(host_rt_dev_ptr != NULL);
5255     assert(target_rt_dev_ptr != NULL);
5256     if (*host_rt_dev_ptr != 0) {
5257         unlock_user((void *)*host_rt_dev_ptr,
5258                     *target_rt_dev_ptr, 0);
5259     }
5260     return ret;
5261 }
5262 
5263 static abi_long do_ioctl_kdsigaccept(const IOCTLEntry *ie, uint8_t *buf_temp,
5264                                      int fd, int cmd, abi_long arg)
5265 {
5266     int sig = target_to_host_signal(arg);
5267     return get_errno(safe_ioctl(fd, ie->host_cmd, sig));
5268 }
5269 
5270 static abi_long do_ioctl_SIOCGSTAMP(const IOCTLEntry *ie, uint8_t *buf_temp,
5271                                     int fd, int cmd, abi_long arg)
5272 {
5273     struct timeval tv;
5274     abi_long ret;
5275 
5276     ret = get_errno(safe_ioctl(fd, SIOCGSTAMP, &tv));
5277     if (is_error(ret)) {
5278         return ret;
5279     }
5280 
5281     if (cmd == (int)TARGET_SIOCGSTAMP_OLD) {
5282         if (copy_to_user_timeval(arg, &tv)) {
5283             return -TARGET_EFAULT;
5284         }
5285     } else {
5286         if (copy_to_user_timeval64(arg, &tv)) {
5287             return -TARGET_EFAULT;
5288         }
5289     }
5290 
5291     return ret;
5292 }
5293 
5294 static abi_long do_ioctl_SIOCGSTAMPNS(const IOCTLEntry *ie, uint8_t *buf_temp,
5295                                       int fd, int cmd, abi_long arg)
5296 {
5297     struct timespec ts;
5298     abi_long ret;
5299 
5300     ret = get_errno(safe_ioctl(fd, SIOCGSTAMPNS, &ts));
5301     if (is_error(ret)) {
5302         return ret;
5303     }
5304 
5305     if (cmd == (int)TARGET_SIOCGSTAMPNS_OLD) {
5306         if (host_to_target_timespec(arg, &ts)) {
5307             return -TARGET_EFAULT;
5308         }
5309     } else{
5310         if (host_to_target_timespec64(arg, &ts)) {
5311             return -TARGET_EFAULT;
5312         }
5313     }
5314 
5315     return ret;
5316 }
5317 
5318 #ifdef TIOCGPTPEER
5319 static abi_long do_ioctl_tiocgptpeer(const IOCTLEntry *ie, uint8_t *buf_temp,
5320                                      int fd, int cmd, abi_long arg)
5321 {
5322     int flags = target_to_host_bitmask(arg, fcntl_flags_tbl);
5323     return get_errno(safe_ioctl(fd, ie->host_cmd, flags));
5324 }
5325 #endif
5326 
5327 #ifdef HAVE_DRM_H
5328 
5329 static void unlock_drm_version(struct drm_version *host_ver,
5330                                struct target_drm_version *target_ver,
5331                                bool copy)
5332 {
5333     unlock_user(host_ver->name, target_ver->name,
5334                                 copy ? host_ver->name_len : 0);
5335     unlock_user(host_ver->date, target_ver->date,
5336                                 copy ? host_ver->date_len : 0);
5337     unlock_user(host_ver->desc, target_ver->desc,
5338                                 copy ? host_ver->desc_len : 0);
5339 }
5340 
5341 static inline abi_long target_to_host_drmversion(struct drm_version *host_ver,
5342                                           struct target_drm_version *target_ver)
5343 {
5344     memset(host_ver, 0, sizeof(*host_ver));
5345 
5346     __get_user(host_ver->name_len, &target_ver->name_len);
5347     if (host_ver->name_len) {
5348         host_ver->name = lock_user(VERIFY_WRITE, target_ver->name,
5349                                    target_ver->name_len, 0);
5350         if (!host_ver->name) {
5351             return -EFAULT;
5352         }
5353     }
5354 
5355     __get_user(host_ver->date_len, &target_ver->date_len);
5356     if (host_ver->date_len) {
5357         host_ver->date = lock_user(VERIFY_WRITE, target_ver->date,
5358                                    target_ver->date_len, 0);
5359         if (!host_ver->date) {
5360             goto err;
5361         }
5362     }
5363 
5364     __get_user(host_ver->desc_len, &target_ver->desc_len);
5365     if (host_ver->desc_len) {
5366         host_ver->desc = lock_user(VERIFY_WRITE, target_ver->desc,
5367                                    target_ver->desc_len, 0);
5368         if (!host_ver->desc) {
5369             goto err;
5370         }
5371     }
5372 
5373     return 0;
5374 err:
5375     unlock_drm_version(host_ver, target_ver, false);
5376     return -EFAULT;
5377 }
5378 
5379 static inline void host_to_target_drmversion(
5380                                           struct target_drm_version *target_ver,
5381                                           struct drm_version *host_ver)
5382 {
5383     __put_user(host_ver->version_major, &target_ver->version_major);
5384     __put_user(host_ver->version_minor, &target_ver->version_minor);
5385     __put_user(host_ver->version_patchlevel, &target_ver->version_patchlevel);
5386     __put_user(host_ver->name_len, &target_ver->name_len);
5387     __put_user(host_ver->date_len, &target_ver->date_len);
5388     __put_user(host_ver->desc_len, &target_ver->desc_len);
5389     unlock_drm_version(host_ver, target_ver, true);
5390 }
5391 
5392 static abi_long do_ioctl_drm(const IOCTLEntry *ie, uint8_t *buf_temp,
5393                              int fd, int cmd, abi_long arg)
5394 {
5395     struct drm_version *ver;
5396     struct target_drm_version *target_ver;
5397     abi_long ret;
5398 
5399     switch (ie->host_cmd) {
5400     case DRM_IOCTL_VERSION:
5401         if (!lock_user_struct(VERIFY_WRITE, target_ver, arg, 0)) {
5402             return -TARGET_EFAULT;
5403         }
5404         ver = (struct drm_version *)buf_temp;
5405         ret = target_to_host_drmversion(ver, target_ver);
5406         if (!is_error(ret)) {
5407             ret = get_errno(safe_ioctl(fd, ie->host_cmd, ver));
5408             if (is_error(ret)) {
5409                 unlock_drm_version(ver, target_ver, false);
5410             } else {
5411                 host_to_target_drmversion(target_ver, ver);
5412             }
5413         }
5414         unlock_user_struct(target_ver, arg, 0);
5415         return ret;
5416     }
5417     return -TARGET_ENOSYS;
5418 }
5419 
5420 #endif
5421 
5422 IOCTLEntry ioctl_entries[] = {
5423 #define IOCTL(cmd, access, ...) \
5424     { TARGET_ ## cmd, cmd, #cmd, access, 0, {  __VA_ARGS__ } },
5425 #define IOCTL_SPECIAL(cmd, access, dofn, ...)                      \
5426     { TARGET_ ## cmd, cmd, #cmd, access, dofn, {  __VA_ARGS__ } },
5427 #define IOCTL_IGNORE(cmd) \
5428     { TARGET_ ## cmd, 0, #cmd },
5429 #include "ioctls.h"
5430     { 0, 0, },
5431 };
5432 
5433 /* ??? Implement proper locking for ioctls.  */
5434 /* do_ioctl() Must return target values and target errnos. */
5435 static abi_long do_ioctl(int fd, int cmd, abi_long arg)
5436 {
5437     const IOCTLEntry *ie;
5438     const argtype *arg_type;
5439     abi_long ret;
5440     uint8_t buf_temp[MAX_STRUCT_SIZE];
5441     int target_size;
5442     void *argptr;
5443 
5444     ie = ioctl_entries;
5445     for(;;) {
5446         if (ie->target_cmd == 0) {
5447             qemu_log_mask(
5448                 LOG_UNIMP, "Unsupported ioctl: cmd=0x%04lx\n", (long)cmd);
5449             return -TARGET_ENOSYS;
5450         }
5451         if (ie->target_cmd == cmd)
5452             break;
5453         ie++;
5454     }
5455     arg_type = ie->arg_type;
5456     if (ie->do_ioctl) {
5457         return ie->do_ioctl(ie, buf_temp, fd, cmd, arg);
5458     } else if (!ie->host_cmd) {
5459         /* Some architectures define BSD ioctls in their headers
5460            that are not implemented in Linux.  */
5461         return -TARGET_ENOSYS;
5462     }
5463 
5464     switch(arg_type[0]) {
5465     case TYPE_NULL:
5466         /* no argument */
5467         ret = get_errno(safe_ioctl(fd, ie->host_cmd));
5468         break;
5469     case TYPE_PTRVOID:
5470     case TYPE_INT:
5471     case TYPE_LONG:
5472     case TYPE_ULONG:
5473         ret = get_errno(safe_ioctl(fd, ie->host_cmd, arg));
5474         break;
5475     case TYPE_PTR:
5476         arg_type++;
5477         target_size = thunk_type_size(arg_type, 0);
5478         switch(ie->access) {
5479         case IOC_R:
5480             ret = get_errno(safe_ioctl(fd, ie->host_cmd, buf_temp));
5481             if (!is_error(ret)) {
5482                 argptr = lock_user(VERIFY_WRITE, arg, target_size, 0);
5483                 if (!argptr)
5484                     return -TARGET_EFAULT;
5485                 thunk_convert(argptr, buf_temp, arg_type, THUNK_TARGET);
5486                 unlock_user(argptr, arg, target_size);
5487             }
5488             break;
5489         case IOC_W:
5490             argptr = lock_user(VERIFY_READ, arg, target_size, 1);
5491             if (!argptr)
5492                 return -TARGET_EFAULT;
5493             thunk_convert(buf_temp, argptr, arg_type, THUNK_HOST);
5494             unlock_user(argptr, arg, 0);
5495             ret = get_errno(safe_ioctl(fd, ie->host_cmd, buf_temp));
5496             break;
5497         default:
5498         case IOC_RW:
5499             argptr = lock_user(VERIFY_READ, arg, target_size, 1);
5500             if (!argptr)
5501                 return -TARGET_EFAULT;
5502             thunk_convert(buf_temp, argptr, arg_type, THUNK_HOST);
5503             unlock_user(argptr, arg, 0);
5504             ret = get_errno(safe_ioctl(fd, ie->host_cmd, buf_temp));
5505             if (!is_error(ret)) {
5506                 argptr = lock_user(VERIFY_WRITE, arg, target_size, 0);
5507                 if (!argptr)
5508                     return -TARGET_EFAULT;
5509                 thunk_convert(argptr, buf_temp, arg_type, THUNK_TARGET);
5510                 unlock_user(argptr, arg, target_size);
5511             }
5512             break;
5513         }
5514         break;
5515     default:
5516         qemu_log_mask(LOG_UNIMP,
5517                       "Unsupported ioctl type: cmd=0x%04lx type=%d\n",
5518                       (long)cmd, arg_type[0]);
5519         ret = -TARGET_ENOSYS;
5520         break;
5521     }
5522     return ret;
5523 }
5524 
5525 static const bitmask_transtbl iflag_tbl[] = {
5526         { TARGET_IGNBRK, TARGET_IGNBRK, IGNBRK, IGNBRK },
5527         { TARGET_BRKINT, TARGET_BRKINT, BRKINT, BRKINT },
5528         { TARGET_IGNPAR, TARGET_IGNPAR, IGNPAR, IGNPAR },
5529         { TARGET_PARMRK, TARGET_PARMRK, PARMRK, PARMRK },
5530         { TARGET_INPCK, TARGET_INPCK, INPCK, INPCK },
5531         { TARGET_ISTRIP, TARGET_ISTRIP, ISTRIP, ISTRIP },
5532         { TARGET_INLCR, TARGET_INLCR, INLCR, INLCR },
5533         { TARGET_IGNCR, TARGET_IGNCR, IGNCR, IGNCR },
5534         { TARGET_ICRNL, TARGET_ICRNL, ICRNL, ICRNL },
5535         { TARGET_IUCLC, TARGET_IUCLC, IUCLC, IUCLC },
5536         { TARGET_IXON, TARGET_IXON, IXON, IXON },
5537         { TARGET_IXANY, TARGET_IXANY, IXANY, IXANY },
5538         { TARGET_IXOFF, TARGET_IXOFF, IXOFF, IXOFF },
5539         { TARGET_IMAXBEL, TARGET_IMAXBEL, IMAXBEL, IMAXBEL },
5540         { 0, 0, 0, 0 }
5541 };
5542 
5543 static const bitmask_transtbl oflag_tbl[] = {
5544 	{ TARGET_OPOST, TARGET_OPOST, OPOST, OPOST },
5545 	{ TARGET_OLCUC, TARGET_OLCUC, OLCUC, OLCUC },
5546 	{ TARGET_ONLCR, TARGET_ONLCR, ONLCR, ONLCR },
5547 	{ TARGET_OCRNL, TARGET_OCRNL, OCRNL, OCRNL },
5548 	{ TARGET_ONOCR, TARGET_ONOCR, ONOCR, ONOCR },
5549 	{ TARGET_ONLRET, TARGET_ONLRET, ONLRET, ONLRET },
5550 	{ TARGET_OFILL, TARGET_OFILL, OFILL, OFILL },
5551 	{ TARGET_OFDEL, TARGET_OFDEL, OFDEL, OFDEL },
5552 	{ TARGET_NLDLY, TARGET_NL0, NLDLY, NL0 },
5553 	{ TARGET_NLDLY, TARGET_NL1, NLDLY, NL1 },
5554 	{ TARGET_CRDLY, TARGET_CR0, CRDLY, CR0 },
5555 	{ TARGET_CRDLY, TARGET_CR1, CRDLY, CR1 },
5556 	{ TARGET_CRDLY, TARGET_CR2, CRDLY, CR2 },
5557 	{ TARGET_CRDLY, TARGET_CR3, CRDLY, CR3 },
5558 	{ TARGET_TABDLY, TARGET_TAB0, TABDLY, TAB0 },
5559 	{ TARGET_TABDLY, TARGET_TAB1, TABDLY, TAB1 },
5560 	{ TARGET_TABDLY, TARGET_TAB2, TABDLY, TAB2 },
5561 	{ TARGET_TABDLY, TARGET_TAB3, TABDLY, TAB3 },
5562 	{ TARGET_BSDLY, TARGET_BS0, BSDLY, BS0 },
5563 	{ TARGET_BSDLY, TARGET_BS1, BSDLY, BS1 },
5564 	{ TARGET_VTDLY, TARGET_VT0, VTDLY, VT0 },
5565 	{ TARGET_VTDLY, TARGET_VT1, VTDLY, VT1 },
5566 	{ TARGET_FFDLY, TARGET_FF0, FFDLY, FF0 },
5567 	{ TARGET_FFDLY, TARGET_FF1, FFDLY, FF1 },
5568 	{ 0, 0, 0, 0 }
5569 };
5570 
5571 static const bitmask_transtbl cflag_tbl[] = {
5572 	{ TARGET_CBAUD, TARGET_B0, CBAUD, B0 },
5573 	{ TARGET_CBAUD, TARGET_B50, CBAUD, B50 },
5574 	{ TARGET_CBAUD, TARGET_B75, CBAUD, B75 },
5575 	{ TARGET_CBAUD, TARGET_B110, CBAUD, B110 },
5576 	{ TARGET_CBAUD, TARGET_B134, CBAUD, B134 },
5577 	{ TARGET_CBAUD, TARGET_B150, CBAUD, B150 },
5578 	{ TARGET_CBAUD, TARGET_B200, CBAUD, B200 },
5579 	{ TARGET_CBAUD, TARGET_B300, CBAUD, B300 },
5580 	{ TARGET_CBAUD, TARGET_B600, CBAUD, B600 },
5581 	{ TARGET_CBAUD, TARGET_B1200, CBAUD, B1200 },
5582 	{ TARGET_CBAUD, TARGET_B1800, CBAUD, B1800 },
5583 	{ TARGET_CBAUD, TARGET_B2400, CBAUD, B2400 },
5584 	{ TARGET_CBAUD, TARGET_B4800, CBAUD, B4800 },
5585 	{ TARGET_CBAUD, TARGET_B9600, CBAUD, B9600 },
5586 	{ TARGET_CBAUD, TARGET_B19200, CBAUD, B19200 },
5587 	{ TARGET_CBAUD, TARGET_B38400, CBAUD, B38400 },
5588 	{ TARGET_CBAUD, TARGET_B57600, CBAUD, B57600 },
5589 	{ TARGET_CBAUD, TARGET_B115200, CBAUD, B115200 },
5590 	{ TARGET_CBAUD, TARGET_B230400, CBAUD, B230400 },
5591 	{ TARGET_CBAUD, TARGET_B460800, CBAUD, B460800 },
5592 	{ TARGET_CSIZE, TARGET_CS5, CSIZE, CS5 },
5593 	{ TARGET_CSIZE, TARGET_CS6, CSIZE, CS6 },
5594 	{ TARGET_CSIZE, TARGET_CS7, CSIZE, CS7 },
5595 	{ TARGET_CSIZE, TARGET_CS8, CSIZE, CS8 },
5596 	{ TARGET_CSTOPB, TARGET_CSTOPB, CSTOPB, CSTOPB },
5597 	{ TARGET_CREAD, TARGET_CREAD, CREAD, CREAD },
5598 	{ TARGET_PARENB, TARGET_PARENB, PARENB, PARENB },
5599 	{ TARGET_PARODD, TARGET_PARODD, PARODD, PARODD },
5600 	{ TARGET_HUPCL, TARGET_HUPCL, HUPCL, HUPCL },
5601 	{ TARGET_CLOCAL, TARGET_CLOCAL, CLOCAL, CLOCAL },
5602 	{ TARGET_CRTSCTS, TARGET_CRTSCTS, CRTSCTS, CRTSCTS },
5603 	{ 0, 0, 0, 0 }
5604 };
5605 
5606 static const bitmask_transtbl lflag_tbl[] = {
5607 	{ TARGET_ISIG, TARGET_ISIG, ISIG, ISIG },
5608 	{ TARGET_ICANON, TARGET_ICANON, ICANON, ICANON },
5609 	{ TARGET_XCASE, TARGET_XCASE, XCASE, XCASE },
5610 	{ TARGET_ECHO, TARGET_ECHO, ECHO, ECHO },
5611 	{ TARGET_ECHOE, TARGET_ECHOE, ECHOE, ECHOE },
5612 	{ TARGET_ECHOK, TARGET_ECHOK, ECHOK, ECHOK },
5613 	{ TARGET_ECHONL, TARGET_ECHONL, ECHONL, ECHONL },
5614 	{ TARGET_NOFLSH, TARGET_NOFLSH, NOFLSH, NOFLSH },
5615 	{ TARGET_TOSTOP, TARGET_TOSTOP, TOSTOP, TOSTOP },
5616 	{ TARGET_ECHOCTL, TARGET_ECHOCTL, ECHOCTL, ECHOCTL },
5617 	{ TARGET_ECHOPRT, TARGET_ECHOPRT, ECHOPRT, ECHOPRT },
5618 	{ TARGET_ECHOKE, TARGET_ECHOKE, ECHOKE, ECHOKE },
5619 	{ TARGET_FLUSHO, TARGET_FLUSHO, FLUSHO, FLUSHO },
5620 	{ TARGET_PENDIN, TARGET_PENDIN, PENDIN, PENDIN },
5621 	{ TARGET_IEXTEN, TARGET_IEXTEN, IEXTEN, IEXTEN },
5622 	{ 0, 0, 0, 0 }
5623 };
5624 
5625 static void target_to_host_termios (void *dst, const void *src)
5626 {
5627     struct host_termios *host = dst;
5628     const struct target_termios *target = src;
5629 
5630     host->c_iflag =
5631         target_to_host_bitmask(tswap32(target->c_iflag), iflag_tbl);
5632     host->c_oflag =
5633         target_to_host_bitmask(tswap32(target->c_oflag), oflag_tbl);
5634     host->c_cflag =
5635         target_to_host_bitmask(tswap32(target->c_cflag), cflag_tbl);
5636     host->c_lflag =
5637         target_to_host_bitmask(tswap32(target->c_lflag), lflag_tbl);
5638     host->c_line = target->c_line;
5639 
5640     memset(host->c_cc, 0, sizeof(host->c_cc));
5641     host->c_cc[VINTR] = target->c_cc[TARGET_VINTR];
5642     host->c_cc[VQUIT] = target->c_cc[TARGET_VQUIT];
5643     host->c_cc[VERASE] = target->c_cc[TARGET_VERASE];
5644     host->c_cc[VKILL] = target->c_cc[TARGET_VKILL];
5645     host->c_cc[VEOF] = target->c_cc[TARGET_VEOF];
5646     host->c_cc[VTIME] = target->c_cc[TARGET_VTIME];
5647     host->c_cc[VMIN] = target->c_cc[TARGET_VMIN];
5648     host->c_cc[VSWTC] = target->c_cc[TARGET_VSWTC];
5649     host->c_cc[VSTART] = target->c_cc[TARGET_VSTART];
5650     host->c_cc[VSTOP] = target->c_cc[TARGET_VSTOP];
5651     host->c_cc[VSUSP] = target->c_cc[TARGET_VSUSP];
5652     host->c_cc[VEOL] = target->c_cc[TARGET_VEOL];
5653     host->c_cc[VREPRINT] = target->c_cc[TARGET_VREPRINT];
5654     host->c_cc[VDISCARD] = target->c_cc[TARGET_VDISCARD];
5655     host->c_cc[VWERASE] = target->c_cc[TARGET_VWERASE];
5656     host->c_cc[VLNEXT] = target->c_cc[TARGET_VLNEXT];
5657     host->c_cc[VEOL2] = target->c_cc[TARGET_VEOL2];
5658 }
5659 
5660 static void host_to_target_termios (void *dst, const void *src)
5661 {
5662     struct target_termios *target = dst;
5663     const struct host_termios *host = src;
5664 
5665     target->c_iflag =
5666         tswap32(host_to_target_bitmask(host->c_iflag, iflag_tbl));
5667     target->c_oflag =
5668         tswap32(host_to_target_bitmask(host->c_oflag, oflag_tbl));
5669     target->c_cflag =
5670         tswap32(host_to_target_bitmask(host->c_cflag, cflag_tbl));
5671     target->c_lflag =
5672         tswap32(host_to_target_bitmask(host->c_lflag, lflag_tbl));
5673     target->c_line = host->c_line;
5674 
5675     memset(target->c_cc, 0, sizeof(target->c_cc));
5676     target->c_cc[TARGET_VINTR] = host->c_cc[VINTR];
5677     target->c_cc[TARGET_VQUIT] = host->c_cc[VQUIT];
5678     target->c_cc[TARGET_VERASE] = host->c_cc[VERASE];
5679     target->c_cc[TARGET_VKILL] = host->c_cc[VKILL];
5680     target->c_cc[TARGET_VEOF] = host->c_cc[VEOF];
5681     target->c_cc[TARGET_VTIME] = host->c_cc[VTIME];
5682     target->c_cc[TARGET_VMIN] = host->c_cc[VMIN];
5683     target->c_cc[TARGET_VSWTC] = host->c_cc[VSWTC];
5684     target->c_cc[TARGET_VSTART] = host->c_cc[VSTART];
5685     target->c_cc[TARGET_VSTOP] = host->c_cc[VSTOP];
5686     target->c_cc[TARGET_VSUSP] = host->c_cc[VSUSP];
5687     target->c_cc[TARGET_VEOL] = host->c_cc[VEOL];
5688     target->c_cc[TARGET_VREPRINT] = host->c_cc[VREPRINT];
5689     target->c_cc[TARGET_VDISCARD] = host->c_cc[VDISCARD];
5690     target->c_cc[TARGET_VWERASE] = host->c_cc[VWERASE];
5691     target->c_cc[TARGET_VLNEXT] = host->c_cc[VLNEXT];
5692     target->c_cc[TARGET_VEOL2] = host->c_cc[VEOL2];
5693 }
5694 
5695 static const StructEntry struct_termios_def = {
5696     .convert = { host_to_target_termios, target_to_host_termios },
5697     .size = { sizeof(struct target_termios), sizeof(struct host_termios) },
5698     .align = { __alignof__(struct target_termios), __alignof__(struct host_termios) },
5699 };
5700 
5701 static bitmask_transtbl mmap_flags_tbl[] = {
5702     { TARGET_MAP_SHARED, TARGET_MAP_SHARED, MAP_SHARED, MAP_SHARED },
5703     { TARGET_MAP_PRIVATE, TARGET_MAP_PRIVATE, MAP_PRIVATE, MAP_PRIVATE },
5704     { TARGET_MAP_FIXED, TARGET_MAP_FIXED, MAP_FIXED, MAP_FIXED },
5705     { TARGET_MAP_ANONYMOUS, TARGET_MAP_ANONYMOUS,
5706       MAP_ANONYMOUS, MAP_ANONYMOUS },
5707     { TARGET_MAP_GROWSDOWN, TARGET_MAP_GROWSDOWN,
5708       MAP_GROWSDOWN, MAP_GROWSDOWN },
5709     { TARGET_MAP_DENYWRITE, TARGET_MAP_DENYWRITE,
5710       MAP_DENYWRITE, MAP_DENYWRITE },
5711     { TARGET_MAP_EXECUTABLE, TARGET_MAP_EXECUTABLE,
5712       MAP_EXECUTABLE, MAP_EXECUTABLE },
5713     { TARGET_MAP_LOCKED, TARGET_MAP_LOCKED, MAP_LOCKED, MAP_LOCKED },
5714     { TARGET_MAP_NORESERVE, TARGET_MAP_NORESERVE,
5715       MAP_NORESERVE, MAP_NORESERVE },
5716     { TARGET_MAP_HUGETLB, TARGET_MAP_HUGETLB, MAP_HUGETLB, MAP_HUGETLB },
5717     /* MAP_STACK had been ignored by the kernel for quite some time.
5718        Recognize it for the target insofar as we do not want to pass
5719        it through to the host.  */
5720     { TARGET_MAP_STACK, TARGET_MAP_STACK, 0, 0 },
5721     { 0, 0, 0, 0 }
5722 };
5723 
5724 /*
5725  * NOTE: TARGET_ABI32 is defined for TARGET_I386 (but not for TARGET_X86_64)
5726  *       TARGET_I386 is defined if TARGET_X86_64 is defined
5727  */
5728 #if defined(TARGET_I386)
5729 
5730 /* NOTE: there is really one LDT for all the threads */
5731 static uint8_t *ldt_table;
5732 
5733 static abi_long read_ldt(abi_ulong ptr, unsigned long bytecount)
5734 {
5735     int size;
5736     void *p;
5737 
5738     if (!ldt_table)
5739         return 0;
5740     size = TARGET_LDT_ENTRIES * TARGET_LDT_ENTRY_SIZE;
5741     if (size > bytecount)
5742         size = bytecount;
5743     p = lock_user(VERIFY_WRITE, ptr, size, 0);
5744     if (!p)
5745         return -TARGET_EFAULT;
5746     /* ??? Should this by byteswapped?  */
5747     memcpy(p, ldt_table, size);
5748     unlock_user(p, ptr, size);
5749     return size;
5750 }
5751 
5752 /* XXX: add locking support */
5753 static abi_long write_ldt(CPUX86State *env,
5754                           abi_ulong ptr, unsigned long bytecount, int oldmode)
5755 {
5756     struct target_modify_ldt_ldt_s ldt_info;
5757     struct target_modify_ldt_ldt_s *target_ldt_info;
5758     int seg_32bit, contents, read_exec_only, limit_in_pages;
5759     int seg_not_present, useable, lm;
5760     uint32_t *lp, entry_1, entry_2;
5761 
5762     if (bytecount != sizeof(ldt_info))
5763         return -TARGET_EINVAL;
5764     if (!lock_user_struct(VERIFY_READ, target_ldt_info, ptr, 1))
5765         return -TARGET_EFAULT;
5766     ldt_info.entry_number = tswap32(target_ldt_info->entry_number);
5767     ldt_info.base_addr = tswapal(target_ldt_info->base_addr);
5768     ldt_info.limit = tswap32(target_ldt_info->limit);
5769     ldt_info.flags = tswap32(target_ldt_info->flags);
5770     unlock_user_struct(target_ldt_info, ptr, 0);
5771 
5772     if (ldt_info.entry_number >= TARGET_LDT_ENTRIES)
5773         return -TARGET_EINVAL;
5774     seg_32bit = ldt_info.flags & 1;
5775     contents = (ldt_info.flags >> 1) & 3;
5776     read_exec_only = (ldt_info.flags >> 3) & 1;
5777     limit_in_pages = (ldt_info.flags >> 4) & 1;
5778     seg_not_present = (ldt_info.flags >> 5) & 1;
5779     useable = (ldt_info.flags >> 6) & 1;
5780 #ifdef TARGET_ABI32
5781     lm = 0;
5782 #else
5783     lm = (ldt_info.flags >> 7) & 1;
5784 #endif
5785     if (contents == 3) {
5786         if (oldmode)
5787             return -TARGET_EINVAL;
5788         if (seg_not_present == 0)
5789             return -TARGET_EINVAL;
5790     }
5791     /* allocate the LDT */
5792     if (!ldt_table) {
5793         env->ldt.base = target_mmap(0,
5794                                     TARGET_LDT_ENTRIES * TARGET_LDT_ENTRY_SIZE,
5795                                     PROT_READ|PROT_WRITE,
5796                                     MAP_ANONYMOUS|MAP_PRIVATE, -1, 0);
5797         if (env->ldt.base == -1)
5798             return -TARGET_ENOMEM;
5799         memset(g2h(env->ldt.base), 0,
5800                TARGET_LDT_ENTRIES * TARGET_LDT_ENTRY_SIZE);
5801         env->ldt.limit = 0xffff;
5802         ldt_table = g2h(env->ldt.base);
5803     }
5804 
5805     /* NOTE: same code as Linux kernel */
5806     /* Allow LDTs to be cleared by the user. */
5807     if (ldt_info.base_addr == 0 && ldt_info.limit == 0) {
5808         if (oldmode ||
5809             (contents == 0		&&
5810              read_exec_only == 1	&&
5811              seg_32bit == 0		&&
5812              limit_in_pages == 0	&&
5813              seg_not_present == 1	&&
5814              useable == 0 )) {
5815             entry_1 = 0;
5816             entry_2 = 0;
5817             goto install;
5818         }
5819     }
5820 
5821     entry_1 = ((ldt_info.base_addr & 0x0000ffff) << 16) |
5822         (ldt_info.limit & 0x0ffff);
5823     entry_2 = (ldt_info.base_addr & 0xff000000) |
5824         ((ldt_info.base_addr & 0x00ff0000) >> 16) |
5825         (ldt_info.limit & 0xf0000) |
5826         ((read_exec_only ^ 1) << 9) |
5827         (contents << 10) |
5828         ((seg_not_present ^ 1) << 15) |
5829         (seg_32bit << 22) |
5830         (limit_in_pages << 23) |
5831         (lm << 21) |
5832         0x7000;
5833     if (!oldmode)
5834         entry_2 |= (useable << 20);
5835 
5836     /* Install the new entry ...  */
5837 install:
5838     lp = (uint32_t *)(ldt_table + (ldt_info.entry_number << 3));
5839     lp[0] = tswap32(entry_1);
5840     lp[1] = tswap32(entry_2);
5841     return 0;
5842 }
5843 
5844 /* specific and weird i386 syscalls */
5845 static abi_long do_modify_ldt(CPUX86State *env, int func, abi_ulong ptr,
5846                               unsigned long bytecount)
5847 {
5848     abi_long ret;
5849 
5850     switch (func) {
5851     case 0:
5852         ret = read_ldt(ptr, bytecount);
5853         break;
5854     case 1:
5855         ret = write_ldt(env, ptr, bytecount, 1);
5856         break;
5857     case 0x11:
5858         ret = write_ldt(env, ptr, bytecount, 0);
5859         break;
5860     default:
5861         ret = -TARGET_ENOSYS;
5862         break;
5863     }
5864     return ret;
5865 }
5866 
5867 #if defined(TARGET_ABI32)
5868 abi_long do_set_thread_area(CPUX86State *env, abi_ulong ptr)
5869 {
5870     uint64_t *gdt_table = g2h(env->gdt.base);
5871     struct target_modify_ldt_ldt_s ldt_info;
5872     struct target_modify_ldt_ldt_s *target_ldt_info;
5873     int seg_32bit, contents, read_exec_only, limit_in_pages;
5874     int seg_not_present, useable, lm;
5875     uint32_t *lp, entry_1, entry_2;
5876     int i;
5877 
5878     lock_user_struct(VERIFY_WRITE, target_ldt_info, ptr, 1);
5879     if (!target_ldt_info)
5880         return -TARGET_EFAULT;
5881     ldt_info.entry_number = tswap32(target_ldt_info->entry_number);
5882     ldt_info.base_addr = tswapal(target_ldt_info->base_addr);
5883     ldt_info.limit = tswap32(target_ldt_info->limit);
5884     ldt_info.flags = tswap32(target_ldt_info->flags);
5885     if (ldt_info.entry_number == -1) {
5886         for (i=TARGET_GDT_ENTRY_TLS_MIN; i<=TARGET_GDT_ENTRY_TLS_MAX; i++) {
5887             if (gdt_table[i] == 0) {
5888                 ldt_info.entry_number = i;
5889                 target_ldt_info->entry_number = tswap32(i);
5890                 break;
5891             }
5892         }
5893     }
5894     unlock_user_struct(target_ldt_info, ptr, 1);
5895 
5896     if (ldt_info.entry_number < TARGET_GDT_ENTRY_TLS_MIN ||
5897         ldt_info.entry_number > TARGET_GDT_ENTRY_TLS_MAX)
5898            return -TARGET_EINVAL;
5899     seg_32bit = ldt_info.flags & 1;
5900     contents = (ldt_info.flags >> 1) & 3;
5901     read_exec_only = (ldt_info.flags >> 3) & 1;
5902     limit_in_pages = (ldt_info.flags >> 4) & 1;
5903     seg_not_present = (ldt_info.flags >> 5) & 1;
5904     useable = (ldt_info.flags >> 6) & 1;
5905 #ifdef TARGET_ABI32
5906     lm = 0;
5907 #else
5908     lm = (ldt_info.flags >> 7) & 1;
5909 #endif
5910 
5911     if (contents == 3) {
5912         if (seg_not_present == 0)
5913             return -TARGET_EINVAL;
5914     }
5915 
5916     /* NOTE: same code as Linux kernel */
5917     /* Allow LDTs to be cleared by the user. */
5918     if (ldt_info.base_addr == 0 && ldt_info.limit == 0) {
5919         if ((contents == 0             &&
5920              read_exec_only == 1       &&
5921              seg_32bit == 0            &&
5922              limit_in_pages == 0       &&
5923              seg_not_present == 1      &&
5924              useable == 0 )) {
5925             entry_1 = 0;
5926             entry_2 = 0;
5927             goto install;
5928         }
5929     }
5930 
5931     entry_1 = ((ldt_info.base_addr & 0x0000ffff) << 16) |
5932         (ldt_info.limit & 0x0ffff);
5933     entry_2 = (ldt_info.base_addr & 0xff000000) |
5934         ((ldt_info.base_addr & 0x00ff0000) >> 16) |
5935         (ldt_info.limit & 0xf0000) |
5936         ((read_exec_only ^ 1) << 9) |
5937         (contents << 10) |
5938         ((seg_not_present ^ 1) << 15) |
5939         (seg_32bit << 22) |
5940         (limit_in_pages << 23) |
5941         (useable << 20) |
5942         (lm << 21) |
5943         0x7000;
5944 
5945     /* Install the new entry ...  */
5946 install:
5947     lp = (uint32_t *)(gdt_table + ldt_info.entry_number);
5948     lp[0] = tswap32(entry_1);
5949     lp[1] = tswap32(entry_2);
5950     return 0;
5951 }
5952 
5953 static abi_long do_get_thread_area(CPUX86State *env, abi_ulong ptr)
5954 {
5955     struct target_modify_ldt_ldt_s *target_ldt_info;
5956     uint64_t *gdt_table = g2h(env->gdt.base);
5957     uint32_t base_addr, limit, flags;
5958     int seg_32bit, contents, read_exec_only, limit_in_pages, idx;
5959     int seg_not_present, useable, lm;
5960     uint32_t *lp, entry_1, entry_2;
5961 
5962     lock_user_struct(VERIFY_WRITE, target_ldt_info, ptr, 1);
5963     if (!target_ldt_info)
5964         return -TARGET_EFAULT;
5965     idx = tswap32(target_ldt_info->entry_number);
5966     if (idx < TARGET_GDT_ENTRY_TLS_MIN ||
5967         idx > TARGET_GDT_ENTRY_TLS_MAX) {
5968         unlock_user_struct(target_ldt_info, ptr, 1);
5969         return -TARGET_EINVAL;
5970     }
5971     lp = (uint32_t *)(gdt_table + idx);
5972     entry_1 = tswap32(lp[0]);
5973     entry_2 = tswap32(lp[1]);
5974 
5975     read_exec_only = ((entry_2 >> 9) & 1) ^ 1;
5976     contents = (entry_2 >> 10) & 3;
5977     seg_not_present = ((entry_2 >> 15) & 1) ^ 1;
5978     seg_32bit = (entry_2 >> 22) & 1;
5979     limit_in_pages = (entry_2 >> 23) & 1;
5980     useable = (entry_2 >> 20) & 1;
5981 #ifdef TARGET_ABI32
5982     lm = 0;
5983 #else
5984     lm = (entry_2 >> 21) & 1;
5985 #endif
5986     flags = (seg_32bit << 0) | (contents << 1) |
5987         (read_exec_only << 3) | (limit_in_pages << 4) |
5988         (seg_not_present << 5) | (useable << 6) | (lm << 7);
5989     limit = (entry_1 & 0xffff) | (entry_2  & 0xf0000);
5990     base_addr = (entry_1 >> 16) |
5991         (entry_2 & 0xff000000) |
5992         ((entry_2 & 0xff) << 16);
5993     target_ldt_info->base_addr = tswapal(base_addr);
5994     target_ldt_info->limit = tswap32(limit);
5995     target_ldt_info->flags = tswap32(flags);
5996     unlock_user_struct(target_ldt_info, ptr, 1);
5997     return 0;
5998 }
5999 
6000 abi_long do_arch_prctl(CPUX86State *env, int code, abi_ulong addr)
6001 {
6002     return -TARGET_ENOSYS;
6003 }
6004 #else
6005 abi_long do_arch_prctl(CPUX86State *env, int code, abi_ulong addr)
6006 {
6007     abi_long ret = 0;
6008     abi_ulong val;
6009     int idx;
6010 
6011     switch(code) {
6012     case TARGET_ARCH_SET_GS:
6013     case TARGET_ARCH_SET_FS:
6014         if (code == TARGET_ARCH_SET_GS)
6015             idx = R_GS;
6016         else
6017             idx = R_FS;
6018         cpu_x86_load_seg(env, idx, 0);
6019         env->segs[idx].base = addr;
6020         break;
6021     case TARGET_ARCH_GET_GS:
6022     case TARGET_ARCH_GET_FS:
6023         if (code == TARGET_ARCH_GET_GS)
6024             idx = R_GS;
6025         else
6026             idx = R_FS;
6027         val = env->segs[idx].base;
6028         if (put_user(val, addr, abi_ulong))
6029             ret = -TARGET_EFAULT;
6030         break;
6031     default:
6032         ret = -TARGET_EINVAL;
6033         break;
6034     }
6035     return ret;
6036 }
6037 #endif /* defined(TARGET_ABI32 */
6038 
6039 #endif /* defined(TARGET_I386) */
6040 
6041 #define NEW_STACK_SIZE 0x40000
6042 
6043 
6044 static pthread_mutex_t clone_lock = PTHREAD_MUTEX_INITIALIZER;
6045 typedef struct {
6046     CPUArchState *env;
6047     pthread_mutex_t mutex;
6048     pthread_cond_t cond;
6049     pthread_t thread;
6050     uint32_t tid;
6051     abi_ulong child_tidptr;
6052     abi_ulong parent_tidptr;
6053     sigset_t sigmask;
6054 } new_thread_info;
6055 
6056 static void *clone_func(void *arg)
6057 {
6058     new_thread_info *info = arg;
6059     CPUArchState *env;
6060     CPUState *cpu;
6061     TaskState *ts;
6062 
6063     rcu_register_thread();
6064     tcg_register_thread();
6065     env = info->env;
6066     cpu = env_cpu(env);
6067     thread_cpu = cpu;
6068     ts = (TaskState *)cpu->opaque;
6069     info->tid = sys_gettid();
6070     task_settid(ts);
6071     if (info->child_tidptr)
6072         put_user_u32(info->tid, info->child_tidptr);
6073     if (info->parent_tidptr)
6074         put_user_u32(info->tid, info->parent_tidptr);
6075     qemu_guest_random_seed_thread_part2(cpu->random_seed);
6076     /* Enable signals.  */
6077     sigprocmask(SIG_SETMASK, &info->sigmask, NULL);
6078     /* Signal to the parent that we're ready.  */
6079     pthread_mutex_lock(&info->mutex);
6080     pthread_cond_broadcast(&info->cond);
6081     pthread_mutex_unlock(&info->mutex);
6082     /* Wait until the parent has finished initializing the tls state.  */
6083     pthread_mutex_lock(&clone_lock);
6084     pthread_mutex_unlock(&clone_lock);
6085     cpu_loop(env);
6086     /* never exits */
6087     return NULL;
6088 }
6089 
6090 /* do_fork() Must return host values and target errnos (unlike most
6091    do_*() functions). */
6092 static int do_fork(CPUArchState *env, unsigned int flags, abi_ulong newsp,
6093                    abi_ulong parent_tidptr, target_ulong newtls,
6094                    abi_ulong child_tidptr)
6095 {
6096     CPUState *cpu = env_cpu(env);
6097     int ret;
6098     TaskState *ts;
6099     CPUState *new_cpu;
6100     CPUArchState *new_env;
6101     sigset_t sigmask;
6102 
6103     flags &= ~CLONE_IGNORED_FLAGS;
6104 
6105     /* Emulate vfork() with fork() */
6106     if (flags & CLONE_VFORK)
6107         flags &= ~(CLONE_VFORK | CLONE_VM);
6108 
6109     if (flags & CLONE_VM) {
6110         TaskState *parent_ts = (TaskState *)cpu->opaque;
6111         new_thread_info info;
6112         pthread_attr_t attr;
6113 
6114         if (((flags & CLONE_THREAD_FLAGS) != CLONE_THREAD_FLAGS) ||
6115             (flags & CLONE_INVALID_THREAD_FLAGS)) {
6116             return -TARGET_EINVAL;
6117         }
6118 
6119         ts = g_new0(TaskState, 1);
6120         init_task_state(ts);
6121 
6122         /* Grab a mutex so that thread setup appears atomic.  */
6123         pthread_mutex_lock(&clone_lock);
6124 
6125         /* we create a new CPU instance. */
6126         new_env = cpu_copy(env);
6127         /* Init regs that differ from the parent.  */
6128         cpu_clone_regs_child(new_env, newsp, flags);
6129         cpu_clone_regs_parent(env, flags);
6130         new_cpu = env_cpu(new_env);
6131         new_cpu->opaque = ts;
6132         ts->bprm = parent_ts->bprm;
6133         ts->info = parent_ts->info;
6134         ts->signal_mask = parent_ts->signal_mask;
6135 
6136         if (flags & CLONE_CHILD_CLEARTID) {
6137             ts->child_tidptr = child_tidptr;
6138         }
6139 
6140         if (flags & CLONE_SETTLS) {
6141             cpu_set_tls (new_env, newtls);
6142         }
6143 
6144         memset(&info, 0, sizeof(info));
6145         pthread_mutex_init(&info.mutex, NULL);
6146         pthread_mutex_lock(&info.mutex);
6147         pthread_cond_init(&info.cond, NULL);
6148         info.env = new_env;
6149         if (flags & CLONE_CHILD_SETTID) {
6150             info.child_tidptr = child_tidptr;
6151         }
6152         if (flags & CLONE_PARENT_SETTID) {
6153             info.parent_tidptr = parent_tidptr;
6154         }
6155 
6156         ret = pthread_attr_init(&attr);
6157         ret = pthread_attr_setstacksize(&attr, NEW_STACK_SIZE);
6158         ret = pthread_attr_setdetachstate(&attr, PTHREAD_CREATE_DETACHED);
6159         /* It is not safe to deliver signals until the child has finished
6160            initializing, so temporarily block all signals.  */
6161         sigfillset(&sigmask);
6162         sigprocmask(SIG_BLOCK, &sigmask, &info.sigmask);
6163         cpu->random_seed = qemu_guest_random_seed_thread_part1();
6164 
6165         /* If this is our first additional thread, we need to ensure we
6166          * generate code for parallel execution and flush old translations.
6167          */
6168         if (!parallel_cpus) {
6169             parallel_cpus = true;
6170             tb_flush(cpu);
6171         }
6172 
6173         ret = pthread_create(&info.thread, &attr, clone_func, &info);
6174         /* TODO: Free new CPU state if thread creation failed.  */
6175 
6176         sigprocmask(SIG_SETMASK, &info.sigmask, NULL);
6177         pthread_attr_destroy(&attr);
6178         if (ret == 0) {
6179             /* Wait for the child to initialize.  */
6180             pthread_cond_wait(&info.cond, &info.mutex);
6181             ret = info.tid;
6182         } else {
6183             ret = -1;
6184         }
6185         pthread_mutex_unlock(&info.mutex);
6186         pthread_cond_destroy(&info.cond);
6187         pthread_mutex_destroy(&info.mutex);
6188         pthread_mutex_unlock(&clone_lock);
6189     } else {
6190         /* if no CLONE_VM, we consider it is a fork */
6191         if (flags & CLONE_INVALID_FORK_FLAGS) {
6192             return -TARGET_EINVAL;
6193         }
6194 
6195         /* We can't support custom termination signals */
6196         if ((flags & CSIGNAL) != TARGET_SIGCHLD) {
6197             return -TARGET_EINVAL;
6198         }
6199 
6200         if (block_signals()) {
6201             return -TARGET_ERESTARTSYS;
6202         }
6203 
6204         fork_start();
6205         ret = fork();
6206         if (ret == 0) {
6207             /* Child Process.  */
6208             cpu_clone_regs_child(env, newsp, flags);
6209             fork_end(1);
6210             /* There is a race condition here.  The parent process could
6211                theoretically read the TID in the child process before the child
6212                tid is set.  This would require using either ptrace
6213                (not implemented) or having *_tidptr to point at a shared memory
6214                mapping.  We can't repeat the spinlock hack used above because
6215                the child process gets its own copy of the lock.  */
6216             if (flags & CLONE_CHILD_SETTID)
6217                 put_user_u32(sys_gettid(), child_tidptr);
6218             if (flags & CLONE_PARENT_SETTID)
6219                 put_user_u32(sys_gettid(), parent_tidptr);
6220             ts = (TaskState *)cpu->opaque;
6221             if (flags & CLONE_SETTLS)
6222                 cpu_set_tls (env, newtls);
6223             if (flags & CLONE_CHILD_CLEARTID)
6224                 ts->child_tidptr = child_tidptr;
6225         } else {
6226             cpu_clone_regs_parent(env, flags);
6227             fork_end(0);
6228         }
6229     }
6230     return ret;
6231 }
6232 
6233 /* warning : doesn't handle linux specific flags... */
6234 static int target_to_host_fcntl_cmd(int cmd)
6235 {
6236     int ret;
6237 
6238     switch(cmd) {
6239     case TARGET_F_DUPFD:
6240     case TARGET_F_GETFD:
6241     case TARGET_F_SETFD:
6242     case TARGET_F_GETFL:
6243     case TARGET_F_SETFL:
6244     case TARGET_F_OFD_GETLK:
6245     case TARGET_F_OFD_SETLK:
6246     case TARGET_F_OFD_SETLKW:
6247         ret = cmd;
6248         break;
6249     case TARGET_F_GETLK:
6250         ret = F_GETLK64;
6251         break;
6252     case TARGET_F_SETLK:
6253         ret = F_SETLK64;
6254         break;
6255     case TARGET_F_SETLKW:
6256         ret = F_SETLKW64;
6257         break;
6258     case TARGET_F_GETOWN:
6259         ret = F_GETOWN;
6260         break;
6261     case TARGET_F_SETOWN:
6262         ret = F_SETOWN;
6263         break;
6264     case TARGET_F_GETSIG:
6265         ret = F_GETSIG;
6266         break;
6267     case TARGET_F_SETSIG:
6268         ret = F_SETSIG;
6269         break;
6270 #if TARGET_ABI_BITS == 32
6271     case TARGET_F_GETLK64:
6272         ret = F_GETLK64;
6273         break;
6274     case TARGET_F_SETLK64:
6275         ret = F_SETLK64;
6276         break;
6277     case TARGET_F_SETLKW64:
6278         ret = F_SETLKW64;
6279         break;
6280 #endif
6281     case TARGET_F_SETLEASE:
6282         ret = F_SETLEASE;
6283         break;
6284     case TARGET_F_GETLEASE:
6285         ret = F_GETLEASE;
6286         break;
6287 #ifdef F_DUPFD_CLOEXEC
6288     case TARGET_F_DUPFD_CLOEXEC:
6289         ret = F_DUPFD_CLOEXEC;
6290         break;
6291 #endif
6292     case TARGET_F_NOTIFY:
6293         ret = F_NOTIFY;
6294         break;
6295 #ifdef F_GETOWN_EX
6296     case TARGET_F_GETOWN_EX:
6297         ret = F_GETOWN_EX;
6298         break;
6299 #endif
6300 #ifdef F_SETOWN_EX
6301     case TARGET_F_SETOWN_EX:
6302         ret = F_SETOWN_EX;
6303         break;
6304 #endif
6305 #ifdef F_SETPIPE_SZ
6306     case TARGET_F_SETPIPE_SZ:
6307         ret = F_SETPIPE_SZ;
6308         break;
6309     case TARGET_F_GETPIPE_SZ:
6310         ret = F_GETPIPE_SZ;
6311         break;
6312 #endif
6313     default:
6314         ret = -TARGET_EINVAL;
6315         break;
6316     }
6317 
6318 #if defined(__powerpc64__)
6319     /* On PPC64, glibc headers has the F_*LK* defined to 12, 13 and 14 and
6320      * is not supported by kernel. The glibc fcntl call actually adjusts
6321      * them to 5, 6 and 7 before making the syscall(). Since we make the
6322      * syscall directly, adjust to what is supported by the kernel.
6323      */
6324     if (ret >= F_GETLK64 && ret <= F_SETLKW64) {
6325         ret -= F_GETLK64 - 5;
6326     }
6327 #endif
6328 
6329     return ret;
6330 }
6331 
6332 #define FLOCK_TRANSTBL \
6333     switch (type) { \
6334     TRANSTBL_CONVERT(F_RDLCK); \
6335     TRANSTBL_CONVERT(F_WRLCK); \
6336     TRANSTBL_CONVERT(F_UNLCK); \
6337     TRANSTBL_CONVERT(F_EXLCK); \
6338     TRANSTBL_CONVERT(F_SHLCK); \
6339     }
6340 
6341 static int target_to_host_flock(int type)
6342 {
6343 #define TRANSTBL_CONVERT(a) case TARGET_##a: return a
6344     FLOCK_TRANSTBL
6345 #undef  TRANSTBL_CONVERT
6346     return -TARGET_EINVAL;
6347 }
6348 
6349 static int host_to_target_flock(int type)
6350 {
6351 #define TRANSTBL_CONVERT(a) case a: return TARGET_##a
6352     FLOCK_TRANSTBL
6353 #undef  TRANSTBL_CONVERT
6354     /* if we don't know how to convert the value coming
6355      * from the host we copy to the target field as-is
6356      */
6357     return type;
6358 }
6359 
6360 static inline abi_long copy_from_user_flock(struct flock64 *fl,
6361                                             abi_ulong target_flock_addr)
6362 {
6363     struct target_flock *target_fl;
6364     int l_type;
6365 
6366     if (!lock_user_struct(VERIFY_READ, target_fl, target_flock_addr, 1)) {
6367         return -TARGET_EFAULT;
6368     }
6369 
6370     __get_user(l_type, &target_fl->l_type);
6371     l_type = target_to_host_flock(l_type);
6372     if (l_type < 0) {
6373         return l_type;
6374     }
6375     fl->l_type = l_type;
6376     __get_user(fl->l_whence, &target_fl->l_whence);
6377     __get_user(fl->l_start, &target_fl->l_start);
6378     __get_user(fl->l_len, &target_fl->l_len);
6379     __get_user(fl->l_pid, &target_fl->l_pid);
6380     unlock_user_struct(target_fl, target_flock_addr, 0);
6381     return 0;
6382 }
6383 
6384 static inline abi_long copy_to_user_flock(abi_ulong target_flock_addr,
6385                                           const struct flock64 *fl)
6386 {
6387     struct target_flock *target_fl;
6388     short l_type;
6389 
6390     if (!lock_user_struct(VERIFY_WRITE, target_fl, target_flock_addr, 0)) {
6391         return -TARGET_EFAULT;
6392     }
6393 
6394     l_type = host_to_target_flock(fl->l_type);
6395     __put_user(l_type, &target_fl->l_type);
6396     __put_user(fl->l_whence, &target_fl->l_whence);
6397     __put_user(fl->l_start, &target_fl->l_start);
6398     __put_user(fl->l_len, &target_fl->l_len);
6399     __put_user(fl->l_pid, &target_fl->l_pid);
6400     unlock_user_struct(target_fl, target_flock_addr, 1);
6401     return 0;
6402 }
6403 
6404 typedef abi_long from_flock64_fn(struct flock64 *fl, abi_ulong target_addr);
6405 typedef abi_long to_flock64_fn(abi_ulong target_addr, const struct flock64 *fl);
6406 
6407 #if defined(TARGET_ARM) && TARGET_ABI_BITS == 32
6408 static inline abi_long copy_from_user_oabi_flock64(struct flock64 *fl,
6409                                                    abi_ulong target_flock_addr)
6410 {
6411     struct target_oabi_flock64 *target_fl;
6412     int l_type;
6413 
6414     if (!lock_user_struct(VERIFY_READ, target_fl, target_flock_addr, 1)) {
6415         return -TARGET_EFAULT;
6416     }
6417 
6418     __get_user(l_type, &target_fl->l_type);
6419     l_type = target_to_host_flock(l_type);
6420     if (l_type < 0) {
6421         return l_type;
6422     }
6423     fl->l_type = l_type;
6424     __get_user(fl->l_whence, &target_fl->l_whence);
6425     __get_user(fl->l_start, &target_fl->l_start);
6426     __get_user(fl->l_len, &target_fl->l_len);
6427     __get_user(fl->l_pid, &target_fl->l_pid);
6428     unlock_user_struct(target_fl, target_flock_addr, 0);
6429     return 0;
6430 }
6431 
6432 static inline abi_long copy_to_user_oabi_flock64(abi_ulong target_flock_addr,
6433                                                  const struct flock64 *fl)
6434 {
6435     struct target_oabi_flock64 *target_fl;
6436     short l_type;
6437 
6438     if (!lock_user_struct(VERIFY_WRITE, target_fl, target_flock_addr, 0)) {
6439         return -TARGET_EFAULT;
6440     }
6441 
6442     l_type = host_to_target_flock(fl->l_type);
6443     __put_user(l_type, &target_fl->l_type);
6444     __put_user(fl->l_whence, &target_fl->l_whence);
6445     __put_user(fl->l_start, &target_fl->l_start);
6446     __put_user(fl->l_len, &target_fl->l_len);
6447     __put_user(fl->l_pid, &target_fl->l_pid);
6448     unlock_user_struct(target_fl, target_flock_addr, 1);
6449     return 0;
6450 }
6451 #endif
6452 
6453 static inline abi_long copy_from_user_flock64(struct flock64 *fl,
6454                                               abi_ulong target_flock_addr)
6455 {
6456     struct target_flock64 *target_fl;
6457     int l_type;
6458 
6459     if (!lock_user_struct(VERIFY_READ, target_fl, target_flock_addr, 1)) {
6460         return -TARGET_EFAULT;
6461     }
6462 
6463     __get_user(l_type, &target_fl->l_type);
6464     l_type = target_to_host_flock(l_type);
6465     if (l_type < 0) {
6466         return l_type;
6467     }
6468     fl->l_type = l_type;
6469     __get_user(fl->l_whence, &target_fl->l_whence);
6470     __get_user(fl->l_start, &target_fl->l_start);
6471     __get_user(fl->l_len, &target_fl->l_len);
6472     __get_user(fl->l_pid, &target_fl->l_pid);
6473     unlock_user_struct(target_fl, target_flock_addr, 0);
6474     return 0;
6475 }
6476 
6477 static inline abi_long copy_to_user_flock64(abi_ulong target_flock_addr,
6478                                             const struct flock64 *fl)
6479 {
6480     struct target_flock64 *target_fl;
6481     short l_type;
6482 
6483     if (!lock_user_struct(VERIFY_WRITE, target_fl, target_flock_addr, 0)) {
6484         return -TARGET_EFAULT;
6485     }
6486 
6487     l_type = host_to_target_flock(fl->l_type);
6488     __put_user(l_type, &target_fl->l_type);
6489     __put_user(fl->l_whence, &target_fl->l_whence);
6490     __put_user(fl->l_start, &target_fl->l_start);
6491     __put_user(fl->l_len, &target_fl->l_len);
6492     __put_user(fl->l_pid, &target_fl->l_pid);
6493     unlock_user_struct(target_fl, target_flock_addr, 1);
6494     return 0;
6495 }
6496 
6497 static abi_long do_fcntl(int fd, int cmd, abi_ulong arg)
6498 {
6499     struct flock64 fl64;
6500 #ifdef F_GETOWN_EX
6501     struct f_owner_ex fox;
6502     struct target_f_owner_ex *target_fox;
6503 #endif
6504     abi_long ret;
6505     int host_cmd = target_to_host_fcntl_cmd(cmd);
6506 
6507     if (host_cmd == -TARGET_EINVAL)
6508 	    return host_cmd;
6509 
6510     switch(cmd) {
6511     case TARGET_F_GETLK:
6512         ret = copy_from_user_flock(&fl64, arg);
6513         if (ret) {
6514             return ret;
6515         }
6516         ret = get_errno(safe_fcntl(fd, host_cmd, &fl64));
6517         if (ret == 0) {
6518             ret = copy_to_user_flock(arg, &fl64);
6519         }
6520         break;
6521 
6522     case TARGET_F_SETLK:
6523     case TARGET_F_SETLKW:
6524         ret = copy_from_user_flock(&fl64, arg);
6525         if (ret) {
6526             return ret;
6527         }
6528         ret = get_errno(safe_fcntl(fd, host_cmd, &fl64));
6529         break;
6530 
6531     case TARGET_F_GETLK64:
6532     case TARGET_F_OFD_GETLK:
6533         ret = copy_from_user_flock64(&fl64, arg);
6534         if (ret) {
6535             return ret;
6536         }
6537         ret = get_errno(safe_fcntl(fd, host_cmd, &fl64));
6538         if (ret == 0) {
6539             ret = copy_to_user_flock64(arg, &fl64);
6540         }
6541         break;
6542     case TARGET_F_SETLK64:
6543     case TARGET_F_SETLKW64:
6544     case TARGET_F_OFD_SETLK:
6545     case TARGET_F_OFD_SETLKW:
6546         ret = copy_from_user_flock64(&fl64, arg);
6547         if (ret) {
6548             return ret;
6549         }
6550         ret = get_errno(safe_fcntl(fd, host_cmd, &fl64));
6551         break;
6552 
6553     case TARGET_F_GETFL:
6554         ret = get_errno(safe_fcntl(fd, host_cmd, arg));
6555         if (ret >= 0) {
6556             ret = host_to_target_bitmask(ret, fcntl_flags_tbl);
6557         }
6558         break;
6559 
6560     case TARGET_F_SETFL:
6561         ret = get_errno(safe_fcntl(fd, host_cmd,
6562                                    target_to_host_bitmask(arg,
6563                                                           fcntl_flags_tbl)));
6564         break;
6565 
6566 #ifdef F_GETOWN_EX
6567     case TARGET_F_GETOWN_EX:
6568         ret = get_errno(safe_fcntl(fd, host_cmd, &fox));
6569         if (ret >= 0) {
6570             if (!lock_user_struct(VERIFY_WRITE, target_fox, arg, 0))
6571                 return -TARGET_EFAULT;
6572             target_fox->type = tswap32(fox.type);
6573             target_fox->pid = tswap32(fox.pid);
6574             unlock_user_struct(target_fox, arg, 1);
6575         }
6576         break;
6577 #endif
6578 
6579 #ifdef F_SETOWN_EX
6580     case TARGET_F_SETOWN_EX:
6581         if (!lock_user_struct(VERIFY_READ, target_fox, arg, 1))
6582             return -TARGET_EFAULT;
6583         fox.type = tswap32(target_fox->type);
6584         fox.pid = tswap32(target_fox->pid);
6585         unlock_user_struct(target_fox, arg, 0);
6586         ret = get_errno(safe_fcntl(fd, host_cmd, &fox));
6587         break;
6588 #endif
6589 
6590     case TARGET_F_SETOWN:
6591     case TARGET_F_GETOWN:
6592     case TARGET_F_SETSIG:
6593     case TARGET_F_GETSIG:
6594     case TARGET_F_SETLEASE:
6595     case TARGET_F_GETLEASE:
6596     case TARGET_F_SETPIPE_SZ:
6597     case TARGET_F_GETPIPE_SZ:
6598         ret = get_errno(safe_fcntl(fd, host_cmd, arg));
6599         break;
6600 
6601     default:
6602         ret = get_errno(safe_fcntl(fd, cmd, arg));
6603         break;
6604     }
6605     return ret;
6606 }
6607 
6608 #ifdef USE_UID16
6609 
6610 static inline int high2lowuid(int uid)
6611 {
6612     if (uid > 65535)
6613         return 65534;
6614     else
6615         return uid;
6616 }
6617 
6618 static inline int high2lowgid(int gid)
6619 {
6620     if (gid > 65535)
6621         return 65534;
6622     else
6623         return gid;
6624 }
6625 
6626 static inline int low2highuid(int uid)
6627 {
6628     if ((int16_t)uid == -1)
6629         return -1;
6630     else
6631         return uid;
6632 }
6633 
6634 static inline int low2highgid(int gid)
6635 {
6636     if ((int16_t)gid == -1)
6637         return -1;
6638     else
6639         return gid;
6640 }
6641 static inline int tswapid(int id)
6642 {
6643     return tswap16(id);
6644 }
6645 
6646 #define put_user_id(x, gaddr) put_user_u16(x, gaddr)
6647 
6648 #else /* !USE_UID16 */
6649 static inline int high2lowuid(int uid)
6650 {
6651     return uid;
6652 }
6653 static inline int high2lowgid(int gid)
6654 {
6655     return gid;
6656 }
6657 static inline int low2highuid(int uid)
6658 {
6659     return uid;
6660 }
6661 static inline int low2highgid(int gid)
6662 {
6663     return gid;
6664 }
6665 static inline int tswapid(int id)
6666 {
6667     return tswap32(id);
6668 }
6669 
6670 #define put_user_id(x, gaddr) put_user_u32(x, gaddr)
6671 
6672 #endif /* USE_UID16 */
6673 
6674 /* We must do direct syscalls for setting UID/GID, because we want to
6675  * implement the Linux system call semantics of "change only for this thread",
6676  * not the libc/POSIX semantics of "change for all threads in process".
6677  * (See http://ewontfix.com/17/ for more details.)
6678  * We use the 32-bit version of the syscalls if present; if it is not
6679  * then either the host architecture supports 32-bit UIDs natively with
6680  * the standard syscall, or the 16-bit UID is the best we can do.
6681  */
6682 #ifdef __NR_setuid32
6683 #define __NR_sys_setuid __NR_setuid32
6684 #else
6685 #define __NR_sys_setuid __NR_setuid
6686 #endif
6687 #ifdef __NR_setgid32
6688 #define __NR_sys_setgid __NR_setgid32
6689 #else
6690 #define __NR_sys_setgid __NR_setgid
6691 #endif
6692 #ifdef __NR_setresuid32
6693 #define __NR_sys_setresuid __NR_setresuid32
6694 #else
6695 #define __NR_sys_setresuid __NR_setresuid
6696 #endif
6697 #ifdef __NR_setresgid32
6698 #define __NR_sys_setresgid __NR_setresgid32
6699 #else
6700 #define __NR_sys_setresgid __NR_setresgid
6701 #endif
6702 
6703 _syscall1(int, sys_setuid, uid_t, uid)
6704 _syscall1(int, sys_setgid, gid_t, gid)
6705 _syscall3(int, sys_setresuid, uid_t, ruid, uid_t, euid, uid_t, suid)
6706 _syscall3(int, sys_setresgid, gid_t, rgid, gid_t, egid, gid_t, sgid)
6707 
6708 void syscall_init(void)
6709 {
6710     IOCTLEntry *ie;
6711     const argtype *arg_type;
6712     int size;
6713     int i;
6714 
6715     thunk_init(STRUCT_MAX);
6716 
6717 #define STRUCT(name, ...) thunk_register_struct(STRUCT_ ## name, #name, struct_ ## name ## _def);
6718 #define STRUCT_SPECIAL(name) thunk_register_struct_direct(STRUCT_ ## name, #name, &struct_ ## name ## _def);
6719 #include "syscall_types.h"
6720 #undef STRUCT
6721 #undef STRUCT_SPECIAL
6722 
6723     /* Build target_to_host_errno_table[] table from
6724      * host_to_target_errno_table[]. */
6725     for (i = 0; i < ERRNO_TABLE_SIZE; i++) {
6726         target_to_host_errno_table[host_to_target_errno_table[i]] = i;
6727     }
6728 
6729     /* we patch the ioctl size if necessary. We rely on the fact that
6730        no ioctl has all the bits at '1' in the size field */
6731     ie = ioctl_entries;
6732     while (ie->target_cmd != 0) {
6733         if (((ie->target_cmd >> TARGET_IOC_SIZESHIFT) & TARGET_IOC_SIZEMASK) ==
6734             TARGET_IOC_SIZEMASK) {
6735             arg_type = ie->arg_type;
6736             if (arg_type[0] != TYPE_PTR) {
6737                 fprintf(stderr, "cannot patch size for ioctl 0x%x\n",
6738                         ie->target_cmd);
6739                 exit(1);
6740             }
6741             arg_type++;
6742             size = thunk_type_size(arg_type, 0);
6743             ie->target_cmd = (ie->target_cmd &
6744                               ~(TARGET_IOC_SIZEMASK << TARGET_IOC_SIZESHIFT)) |
6745                 (size << TARGET_IOC_SIZESHIFT);
6746         }
6747 
6748         /* automatic consistency check if same arch */
6749 #if (defined(__i386__) && defined(TARGET_I386) && defined(TARGET_ABI32)) || \
6750     (defined(__x86_64__) && defined(TARGET_X86_64))
6751         if (unlikely(ie->target_cmd != ie->host_cmd)) {
6752             fprintf(stderr, "ERROR: ioctl(%s): target=0x%x host=0x%x\n",
6753                     ie->name, ie->target_cmd, ie->host_cmd);
6754         }
6755 #endif
6756         ie++;
6757     }
6758 }
6759 
6760 #ifdef TARGET_NR_truncate64
6761 static inline abi_long target_truncate64(void *cpu_env, const char *arg1,
6762                                          abi_long arg2,
6763                                          abi_long arg3,
6764                                          abi_long arg4)
6765 {
6766     if (regpairs_aligned(cpu_env, TARGET_NR_truncate64)) {
6767         arg2 = arg3;
6768         arg3 = arg4;
6769     }
6770     return get_errno(truncate64(arg1, target_offset64(arg2, arg3)));
6771 }
6772 #endif
6773 
6774 #ifdef TARGET_NR_ftruncate64
6775 static inline abi_long target_ftruncate64(void *cpu_env, abi_long arg1,
6776                                           abi_long arg2,
6777                                           abi_long arg3,
6778                                           abi_long arg4)
6779 {
6780     if (regpairs_aligned(cpu_env, TARGET_NR_ftruncate64)) {
6781         arg2 = arg3;
6782         arg3 = arg4;
6783     }
6784     return get_errno(ftruncate64(arg1, target_offset64(arg2, arg3)));
6785 }
6786 #endif
6787 
6788 #if defined(TARGET_NR_timer_settime) || \
6789     (defined(TARGET_NR_timerfd_settime) && defined(CONFIG_TIMERFD))
6790 static inline abi_long target_to_host_itimerspec(struct itimerspec *host_its,
6791                                                  abi_ulong target_addr)
6792 {
6793     if (target_to_host_timespec(&host_its->it_interval, target_addr +
6794                                 offsetof(struct target_itimerspec,
6795                                          it_interval)) ||
6796         target_to_host_timespec(&host_its->it_value, target_addr +
6797                                 offsetof(struct target_itimerspec,
6798                                          it_value))) {
6799         return -TARGET_EFAULT;
6800     }
6801 
6802     return 0;
6803 }
6804 #endif
6805 
6806 #if defined(TARGET_NR_timer_settime64) || \
6807     (defined(TARGET_NR_timerfd_settime64) && defined(CONFIG_TIMERFD))
6808 static inline abi_long target_to_host_itimerspec64(struct itimerspec *host_its,
6809                                                    abi_ulong target_addr)
6810 {
6811     if (target_to_host_timespec64(&host_its->it_interval, target_addr +
6812                                   offsetof(struct target__kernel_itimerspec,
6813                                            it_interval)) ||
6814         target_to_host_timespec64(&host_its->it_value, target_addr +
6815                                   offsetof(struct target__kernel_itimerspec,
6816                                            it_value))) {
6817         return -TARGET_EFAULT;
6818     }
6819 
6820     return 0;
6821 }
6822 #endif
6823 
6824 #if ((defined(TARGET_NR_timerfd_gettime) || \
6825       defined(TARGET_NR_timerfd_settime)) && defined(CONFIG_TIMERFD)) || \
6826       defined(TARGET_NR_timer_gettime) || defined(TARGET_NR_timer_settime)
6827 static inline abi_long host_to_target_itimerspec(abi_ulong target_addr,
6828                                                  struct itimerspec *host_its)
6829 {
6830     if (host_to_target_timespec(target_addr + offsetof(struct target_itimerspec,
6831                                                        it_interval),
6832                                 &host_its->it_interval) ||
6833         host_to_target_timespec(target_addr + offsetof(struct target_itimerspec,
6834                                                        it_value),
6835                                 &host_its->it_value)) {
6836         return -TARGET_EFAULT;
6837     }
6838     return 0;
6839 }
6840 #endif
6841 
6842 #if ((defined(TARGET_NR_timerfd_gettime64) || \
6843       defined(TARGET_NR_timerfd_settime64)) && defined(CONFIG_TIMERFD)) || \
6844       defined(TARGET_NR_timer_gettime64) || defined(TARGET_NR_timer_settime64)
6845 static inline abi_long host_to_target_itimerspec64(abi_ulong target_addr,
6846                                                    struct itimerspec *host_its)
6847 {
6848     if (host_to_target_timespec64(target_addr +
6849                                   offsetof(struct target__kernel_itimerspec,
6850                                            it_interval),
6851                                   &host_its->it_interval) ||
6852         host_to_target_timespec64(target_addr +
6853                                   offsetof(struct target__kernel_itimerspec,
6854                                            it_value),
6855                                   &host_its->it_value)) {
6856         return -TARGET_EFAULT;
6857     }
6858     return 0;
6859 }
6860 #endif
6861 
6862 #if defined(TARGET_NR_adjtimex) || \
6863     (defined(TARGET_NR_clock_adjtime) && defined(CONFIG_CLOCK_ADJTIME))
6864 static inline abi_long target_to_host_timex(struct timex *host_tx,
6865                                             abi_long target_addr)
6866 {
6867     struct target_timex *target_tx;
6868 
6869     if (!lock_user_struct(VERIFY_READ, target_tx, target_addr, 1)) {
6870         return -TARGET_EFAULT;
6871     }
6872 
6873     __get_user(host_tx->modes, &target_tx->modes);
6874     __get_user(host_tx->offset, &target_tx->offset);
6875     __get_user(host_tx->freq, &target_tx->freq);
6876     __get_user(host_tx->maxerror, &target_tx->maxerror);
6877     __get_user(host_tx->esterror, &target_tx->esterror);
6878     __get_user(host_tx->status, &target_tx->status);
6879     __get_user(host_tx->constant, &target_tx->constant);
6880     __get_user(host_tx->precision, &target_tx->precision);
6881     __get_user(host_tx->tolerance, &target_tx->tolerance);
6882     __get_user(host_tx->time.tv_sec, &target_tx->time.tv_sec);
6883     __get_user(host_tx->time.tv_usec, &target_tx->time.tv_usec);
6884     __get_user(host_tx->tick, &target_tx->tick);
6885     __get_user(host_tx->ppsfreq, &target_tx->ppsfreq);
6886     __get_user(host_tx->jitter, &target_tx->jitter);
6887     __get_user(host_tx->shift, &target_tx->shift);
6888     __get_user(host_tx->stabil, &target_tx->stabil);
6889     __get_user(host_tx->jitcnt, &target_tx->jitcnt);
6890     __get_user(host_tx->calcnt, &target_tx->calcnt);
6891     __get_user(host_tx->errcnt, &target_tx->errcnt);
6892     __get_user(host_tx->stbcnt, &target_tx->stbcnt);
6893     __get_user(host_tx->tai, &target_tx->tai);
6894 
6895     unlock_user_struct(target_tx, target_addr, 0);
6896     return 0;
6897 }
6898 
6899 static inline abi_long host_to_target_timex(abi_long target_addr,
6900                                             struct timex *host_tx)
6901 {
6902     struct target_timex *target_tx;
6903 
6904     if (!lock_user_struct(VERIFY_WRITE, target_tx, target_addr, 0)) {
6905         return -TARGET_EFAULT;
6906     }
6907 
6908     __put_user(host_tx->modes, &target_tx->modes);
6909     __put_user(host_tx->offset, &target_tx->offset);
6910     __put_user(host_tx->freq, &target_tx->freq);
6911     __put_user(host_tx->maxerror, &target_tx->maxerror);
6912     __put_user(host_tx->esterror, &target_tx->esterror);
6913     __put_user(host_tx->status, &target_tx->status);
6914     __put_user(host_tx->constant, &target_tx->constant);
6915     __put_user(host_tx->precision, &target_tx->precision);
6916     __put_user(host_tx->tolerance, &target_tx->tolerance);
6917     __put_user(host_tx->time.tv_sec, &target_tx->time.tv_sec);
6918     __put_user(host_tx->time.tv_usec, &target_tx->time.tv_usec);
6919     __put_user(host_tx->tick, &target_tx->tick);
6920     __put_user(host_tx->ppsfreq, &target_tx->ppsfreq);
6921     __put_user(host_tx->jitter, &target_tx->jitter);
6922     __put_user(host_tx->shift, &target_tx->shift);
6923     __put_user(host_tx->stabil, &target_tx->stabil);
6924     __put_user(host_tx->jitcnt, &target_tx->jitcnt);
6925     __put_user(host_tx->calcnt, &target_tx->calcnt);
6926     __put_user(host_tx->errcnt, &target_tx->errcnt);
6927     __put_user(host_tx->stbcnt, &target_tx->stbcnt);
6928     __put_user(host_tx->tai, &target_tx->tai);
6929 
6930     unlock_user_struct(target_tx, target_addr, 1);
6931     return 0;
6932 }
6933 #endif
6934 
6935 static inline abi_long target_to_host_sigevent(struct sigevent *host_sevp,
6936                                                abi_ulong target_addr)
6937 {
6938     struct target_sigevent *target_sevp;
6939 
6940     if (!lock_user_struct(VERIFY_READ, target_sevp, target_addr, 1)) {
6941         return -TARGET_EFAULT;
6942     }
6943 
6944     /* This union is awkward on 64 bit systems because it has a 32 bit
6945      * integer and a pointer in it; we follow the conversion approach
6946      * used for handling sigval types in signal.c so the guest should get
6947      * the correct value back even if we did a 64 bit byteswap and it's
6948      * using the 32 bit integer.
6949      */
6950     host_sevp->sigev_value.sival_ptr =
6951         (void *)(uintptr_t)tswapal(target_sevp->sigev_value.sival_ptr);
6952     host_sevp->sigev_signo =
6953         target_to_host_signal(tswap32(target_sevp->sigev_signo));
6954     host_sevp->sigev_notify = tswap32(target_sevp->sigev_notify);
6955     host_sevp->_sigev_un._tid = tswap32(target_sevp->_sigev_un._tid);
6956 
6957     unlock_user_struct(target_sevp, target_addr, 1);
6958     return 0;
6959 }
6960 
6961 #if defined(TARGET_NR_mlockall)
6962 static inline int target_to_host_mlockall_arg(int arg)
6963 {
6964     int result = 0;
6965 
6966     if (arg & TARGET_MLOCKALL_MCL_CURRENT) {
6967         result |= MCL_CURRENT;
6968     }
6969     if (arg & TARGET_MLOCKALL_MCL_FUTURE) {
6970         result |= MCL_FUTURE;
6971     }
6972     return result;
6973 }
6974 #endif
6975 
6976 #if (defined(TARGET_NR_stat64) || defined(TARGET_NR_lstat64) ||     \
6977      defined(TARGET_NR_fstat64) || defined(TARGET_NR_fstatat64) ||  \
6978      defined(TARGET_NR_newfstatat))
6979 static inline abi_long host_to_target_stat64(void *cpu_env,
6980                                              abi_ulong target_addr,
6981                                              struct stat *host_st)
6982 {
6983 #if defined(TARGET_ARM) && defined(TARGET_ABI32)
6984     if (((CPUARMState *)cpu_env)->eabi) {
6985         struct target_eabi_stat64 *target_st;
6986 
6987         if (!lock_user_struct(VERIFY_WRITE, target_st, target_addr, 0))
6988             return -TARGET_EFAULT;
6989         memset(target_st, 0, sizeof(struct target_eabi_stat64));
6990         __put_user(host_st->st_dev, &target_st->st_dev);
6991         __put_user(host_st->st_ino, &target_st->st_ino);
6992 #ifdef TARGET_STAT64_HAS_BROKEN_ST_INO
6993         __put_user(host_st->st_ino, &target_st->__st_ino);
6994 #endif
6995         __put_user(host_st->st_mode, &target_st->st_mode);
6996         __put_user(host_st->st_nlink, &target_st->st_nlink);
6997         __put_user(host_st->st_uid, &target_st->st_uid);
6998         __put_user(host_st->st_gid, &target_st->st_gid);
6999         __put_user(host_st->st_rdev, &target_st->st_rdev);
7000         __put_user(host_st->st_size, &target_st->st_size);
7001         __put_user(host_st->st_blksize, &target_st->st_blksize);
7002         __put_user(host_st->st_blocks, &target_st->st_blocks);
7003         __put_user(host_st->st_atime, &target_st->target_st_atime);
7004         __put_user(host_st->st_mtime, &target_st->target_st_mtime);
7005         __put_user(host_st->st_ctime, &target_st->target_st_ctime);
7006 #if _POSIX_C_SOURCE >= 200809L || _XOPEN_SOURCE >= 700
7007         __put_user(host_st->st_atim.tv_nsec, &target_st->target_st_atime_nsec);
7008         __put_user(host_st->st_mtim.tv_nsec, &target_st->target_st_mtime_nsec);
7009         __put_user(host_st->st_ctim.tv_nsec, &target_st->target_st_ctime_nsec);
7010 #endif
7011         unlock_user_struct(target_st, target_addr, 1);
7012     } else
7013 #endif
7014     {
7015 #if defined(TARGET_HAS_STRUCT_STAT64)
7016         struct target_stat64 *target_st;
7017 #else
7018         struct target_stat *target_st;
7019 #endif
7020 
7021         if (!lock_user_struct(VERIFY_WRITE, target_st, target_addr, 0))
7022             return -TARGET_EFAULT;
7023         memset(target_st, 0, sizeof(*target_st));
7024         __put_user(host_st->st_dev, &target_st->st_dev);
7025         __put_user(host_st->st_ino, &target_st->st_ino);
7026 #ifdef TARGET_STAT64_HAS_BROKEN_ST_INO
7027         __put_user(host_st->st_ino, &target_st->__st_ino);
7028 #endif
7029         __put_user(host_st->st_mode, &target_st->st_mode);
7030         __put_user(host_st->st_nlink, &target_st->st_nlink);
7031         __put_user(host_st->st_uid, &target_st->st_uid);
7032         __put_user(host_st->st_gid, &target_st->st_gid);
7033         __put_user(host_st->st_rdev, &target_st->st_rdev);
7034         /* XXX: better use of kernel struct */
7035         __put_user(host_st->st_size, &target_st->st_size);
7036         __put_user(host_st->st_blksize, &target_st->st_blksize);
7037         __put_user(host_st->st_blocks, &target_st->st_blocks);
7038         __put_user(host_st->st_atime, &target_st->target_st_atime);
7039         __put_user(host_st->st_mtime, &target_st->target_st_mtime);
7040         __put_user(host_st->st_ctime, &target_st->target_st_ctime);
7041 #if _POSIX_C_SOURCE >= 200809L || _XOPEN_SOURCE >= 700
7042         __put_user(host_st->st_atim.tv_nsec, &target_st->target_st_atime_nsec);
7043         __put_user(host_st->st_mtim.tv_nsec, &target_st->target_st_mtime_nsec);
7044         __put_user(host_st->st_ctim.tv_nsec, &target_st->target_st_ctime_nsec);
7045 #endif
7046         unlock_user_struct(target_st, target_addr, 1);
7047     }
7048 
7049     return 0;
7050 }
7051 #endif
7052 
7053 #if defined(TARGET_NR_statx) && defined(__NR_statx)
7054 static inline abi_long host_to_target_statx(struct target_statx *host_stx,
7055                                             abi_ulong target_addr)
7056 {
7057     struct target_statx *target_stx;
7058 
7059     if (!lock_user_struct(VERIFY_WRITE, target_stx, target_addr,  0)) {
7060         return -TARGET_EFAULT;
7061     }
7062     memset(target_stx, 0, sizeof(*target_stx));
7063 
7064     __put_user(host_stx->stx_mask, &target_stx->stx_mask);
7065     __put_user(host_stx->stx_blksize, &target_stx->stx_blksize);
7066     __put_user(host_stx->stx_attributes, &target_stx->stx_attributes);
7067     __put_user(host_stx->stx_nlink, &target_stx->stx_nlink);
7068     __put_user(host_stx->stx_uid, &target_stx->stx_uid);
7069     __put_user(host_stx->stx_gid, &target_stx->stx_gid);
7070     __put_user(host_stx->stx_mode, &target_stx->stx_mode);
7071     __put_user(host_stx->stx_ino, &target_stx->stx_ino);
7072     __put_user(host_stx->stx_size, &target_stx->stx_size);
7073     __put_user(host_stx->stx_blocks, &target_stx->stx_blocks);
7074     __put_user(host_stx->stx_attributes_mask, &target_stx->stx_attributes_mask);
7075     __put_user(host_stx->stx_atime.tv_sec, &target_stx->stx_atime.tv_sec);
7076     __put_user(host_stx->stx_atime.tv_nsec, &target_stx->stx_atime.tv_nsec);
7077     __put_user(host_stx->stx_btime.tv_sec, &target_stx->stx_btime.tv_sec);
7078     __put_user(host_stx->stx_btime.tv_nsec, &target_stx->stx_btime.tv_nsec);
7079     __put_user(host_stx->stx_ctime.tv_sec, &target_stx->stx_ctime.tv_sec);
7080     __put_user(host_stx->stx_ctime.tv_nsec, &target_stx->stx_ctime.tv_nsec);
7081     __put_user(host_stx->stx_mtime.tv_sec, &target_stx->stx_mtime.tv_sec);
7082     __put_user(host_stx->stx_mtime.tv_nsec, &target_stx->stx_mtime.tv_nsec);
7083     __put_user(host_stx->stx_rdev_major, &target_stx->stx_rdev_major);
7084     __put_user(host_stx->stx_rdev_minor, &target_stx->stx_rdev_minor);
7085     __put_user(host_stx->stx_dev_major, &target_stx->stx_dev_major);
7086     __put_user(host_stx->stx_dev_minor, &target_stx->stx_dev_minor);
7087 
7088     unlock_user_struct(target_stx, target_addr, 1);
7089 
7090     return 0;
7091 }
7092 #endif
7093 
7094 static int do_sys_futex(int *uaddr, int op, int val,
7095                          const struct timespec *timeout, int *uaddr2,
7096                          int val3)
7097 {
7098 #if HOST_LONG_BITS == 64
7099 #if defined(__NR_futex)
7100     /* always a 64-bit time_t, it doesn't define _time64 version  */
7101     return sys_futex(uaddr, op, val, timeout, uaddr2, val3);
7102 
7103 #endif
7104 #else /* HOST_LONG_BITS == 64 */
7105 #if defined(__NR_futex_time64)
7106     if (sizeof(timeout->tv_sec) == 8) {
7107         /* _time64 function on 32bit arch */
7108         return sys_futex_time64(uaddr, op, val, timeout, uaddr2, val3);
7109     }
7110 #endif
7111 #if defined(__NR_futex)
7112     /* old function on 32bit arch */
7113     return sys_futex(uaddr, op, val, timeout, uaddr2, val3);
7114 #endif
7115 #endif /* HOST_LONG_BITS == 64 */
7116     g_assert_not_reached();
7117 }
7118 
7119 static int do_safe_futex(int *uaddr, int op, int val,
7120                          const struct timespec *timeout, int *uaddr2,
7121                          int val3)
7122 {
7123 #if HOST_LONG_BITS == 64
7124 #if defined(__NR_futex)
7125     /* always a 64-bit time_t, it doesn't define _time64 version  */
7126     return get_errno(safe_futex(uaddr, op, val, timeout, uaddr2, val3));
7127 #endif
7128 #else /* HOST_LONG_BITS == 64 */
7129 #if defined(__NR_futex_time64)
7130     if (sizeof(timeout->tv_sec) == 8) {
7131         /* _time64 function on 32bit arch */
7132         return get_errno(safe_futex_time64(uaddr, op, val, timeout, uaddr2,
7133                                            val3));
7134     }
7135 #endif
7136 #if defined(__NR_futex)
7137     /* old function on 32bit arch */
7138     return get_errno(safe_futex(uaddr, op, val, timeout, uaddr2, val3));
7139 #endif
7140 #endif /* HOST_LONG_BITS == 64 */
7141     return -TARGET_ENOSYS;
7142 }
7143 
7144 /* ??? Using host futex calls even when target atomic operations
7145    are not really atomic probably breaks things.  However implementing
7146    futexes locally would make futexes shared between multiple processes
7147    tricky.  However they're probably useless because guest atomic
7148    operations won't work either.  */
7149 #if defined(TARGET_NR_futex)
7150 static int do_futex(target_ulong uaddr, int op, int val, target_ulong timeout,
7151                     target_ulong uaddr2, int val3)
7152 {
7153     struct timespec ts, *pts;
7154     int base_op;
7155 
7156     /* ??? We assume FUTEX_* constants are the same on both host
7157        and target.  */
7158 #ifdef FUTEX_CMD_MASK
7159     base_op = op & FUTEX_CMD_MASK;
7160 #else
7161     base_op = op;
7162 #endif
7163     switch (base_op) {
7164     case FUTEX_WAIT:
7165     case FUTEX_WAIT_BITSET:
7166         if (timeout) {
7167             pts = &ts;
7168             target_to_host_timespec(pts, timeout);
7169         } else {
7170             pts = NULL;
7171         }
7172         return do_safe_futex(g2h(uaddr), op, tswap32(val), pts, NULL, val3);
7173     case FUTEX_WAKE:
7174         return do_safe_futex(g2h(uaddr), op, val, NULL, NULL, 0);
7175     case FUTEX_FD:
7176         return do_safe_futex(g2h(uaddr), op, val, NULL, NULL, 0);
7177     case FUTEX_REQUEUE:
7178     case FUTEX_CMP_REQUEUE:
7179     case FUTEX_WAKE_OP:
7180         /* For FUTEX_REQUEUE, FUTEX_CMP_REQUEUE, and FUTEX_WAKE_OP, the
7181            TIMEOUT parameter is interpreted as a uint32_t by the kernel.
7182            But the prototype takes a `struct timespec *'; insert casts
7183            to satisfy the compiler.  We do not need to tswap TIMEOUT
7184            since it's not compared to guest memory.  */
7185         pts = (struct timespec *)(uintptr_t) timeout;
7186         return do_safe_futex(g2h(uaddr), op, val, pts, g2h(uaddr2),
7187                              (base_op == FUTEX_CMP_REQUEUE
7188                                       ? tswap32(val3)
7189                                       : val3));
7190     default:
7191         return -TARGET_ENOSYS;
7192     }
7193 }
7194 #endif
7195 
7196 #if defined(TARGET_NR_futex_time64)
7197 static int do_futex_time64(target_ulong uaddr, int op, int val, target_ulong timeout,
7198                            target_ulong uaddr2, int val3)
7199 {
7200     struct timespec ts, *pts;
7201     int base_op;
7202 
7203     /* ??? We assume FUTEX_* constants are the same on both host
7204        and target.  */
7205 #ifdef FUTEX_CMD_MASK
7206     base_op = op & FUTEX_CMD_MASK;
7207 #else
7208     base_op = op;
7209 #endif
7210     switch (base_op) {
7211     case FUTEX_WAIT:
7212     case FUTEX_WAIT_BITSET:
7213         if (timeout) {
7214             pts = &ts;
7215             target_to_host_timespec64(pts, timeout);
7216         } else {
7217             pts = NULL;
7218         }
7219         return do_safe_futex(g2h(uaddr), op, tswap32(val), pts, NULL, val3);
7220     case FUTEX_WAKE:
7221         return do_safe_futex(g2h(uaddr), op, val, NULL, NULL, 0);
7222     case FUTEX_FD:
7223         return do_safe_futex(g2h(uaddr), op, val, NULL, NULL, 0);
7224     case FUTEX_REQUEUE:
7225     case FUTEX_CMP_REQUEUE:
7226     case FUTEX_WAKE_OP:
7227         /* For FUTEX_REQUEUE, FUTEX_CMP_REQUEUE, and FUTEX_WAKE_OP, the
7228            TIMEOUT parameter is interpreted as a uint32_t by the kernel.
7229            But the prototype takes a `struct timespec *'; insert casts
7230            to satisfy the compiler.  We do not need to tswap TIMEOUT
7231            since it's not compared to guest memory.  */
7232         pts = (struct timespec *)(uintptr_t) timeout;
7233         return do_safe_futex(g2h(uaddr), op, val, pts, g2h(uaddr2),
7234                              (base_op == FUTEX_CMP_REQUEUE
7235                                       ? tswap32(val3)
7236                                       : val3));
7237     default:
7238         return -TARGET_ENOSYS;
7239     }
7240 }
7241 #endif
7242 
7243 #if defined(TARGET_NR_name_to_handle_at) && defined(CONFIG_OPEN_BY_HANDLE)
7244 static abi_long do_name_to_handle_at(abi_long dirfd, abi_long pathname,
7245                                      abi_long handle, abi_long mount_id,
7246                                      abi_long flags)
7247 {
7248     struct file_handle *target_fh;
7249     struct file_handle *fh;
7250     int mid = 0;
7251     abi_long ret;
7252     char *name;
7253     unsigned int size, total_size;
7254 
7255     if (get_user_s32(size, handle)) {
7256         return -TARGET_EFAULT;
7257     }
7258 
7259     name = lock_user_string(pathname);
7260     if (!name) {
7261         return -TARGET_EFAULT;
7262     }
7263 
7264     total_size = sizeof(struct file_handle) + size;
7265     target_fh = lock_user(VERIFY_WRITE, handle, total_size, 0);
7266     if (!target_fh) {
7267         unlock_user(name, pathname, 0);
7268         return -TARGET_EFAULT;
7269     }
7270 
7271     fh = g_malloc0(total_size);
7272     fh->handle_bytes = size;
7273 
7274     ret = get_errno(name_to_handle_at(dirfd, path(name), fh, &mid, flags));
7275     unlock_user(name, pathname, 0);
7276 
7277     /* man name_to_handle_at(2):
7278      * Other than the use of the handle_bytes field, the caller should treat
7279      * the file_handle structure as an opaque data type
7280      */
7281 
7282     memcpy(target_fh, fh, total_size);
7283     target_fh->handle_bytes = tswap32(fh->handle_bytes);
7284     target_fh->handle_type = tswap32(fh->handle_type);
7285     g_free(fh);
7286     unlock_user(target_fh, handle, total_size);
7287 
7288     if (put_user_s32(mid, mount_id)) {
7289         return -TARGET_EFAULT;
7290     }
7291 
7292     return ret;
7293 
7294 }
7295 #endif
7296 
7297 #if defined(TARGET_NR_open_by_handle_at) && defined(CONFIG_OPEN_BY_HANDLE)
7298 static abi_long do_open_by_handle_at(abi_long mount_fd, abi_long handle,
7299                                      abi_long flags)
7300 {
7301     struct file_handle *target_fh;
7302     struct file_handle *fh;
7303     unsigned int size, total_size;
7304     abi_long ret;
7305 
7306     if (get_user_s32(size, handle)) {
7307         return -TARGET_EFAULT;
7308     }
7309 
7310     total_size = sizeof(struct file_handle) + size;
7311     target_fh = lock_user(VERIFY_READ, handle, total_size, 1);
7312     if (!target_fh) {
7313         return -TARGET_EFAULT;
7314     }
7315 
7316     fh = g_memdup(target_fh, total_size);
7317     fh->handle_bytes = size;
7318     fh->handle_type = tswap32(target_fh->handle_type);
7319 
7320     ret = get_errno(open_by_handle_at(mount_fd, fh,
7321                     target_to_host_bitmask(flags, fcntl_flags_tbl)));
7322 
7323     g_free(fh);
7324 
7325     unlock_user(target_fh, handle, total_size);
7326 
7327     return ret;
7328 }
7329 #endif
7330 
7331 #if defined(TARGET_NR_signalfd) || defined(TARGET_NR_signalfd4)
7332 
7333 static abi_long do_signalfd4(int fd, abi_long mask, int flags)
7334 {
7335     int host_flags;
7336     target_sigset_t *target_mask;
7337     sigset_t host_mask;
7338     abi_long ret;
7339 
7340     if (flags & ~(TARGET_O_NONBLOCK | TARGET_O_CLOEXEC)) {
7341         return -TARGET_EINVAL;
7342     }
7343     if (!lock_user_struct(VERIFY_READ, target_mask, mask, 1)) {
7344         return -TARGET_EFAULT;
7345     }
7346 
7347     target_to_host_sigset(&host_mask, target_mask);
7348 
7349     host_flags = target_to_host_bitmask(flags, fcntl_flags_tbl);
7350 
7351     ret = get_errno(signalfd(fd, &host_mask, host_flags));
7352     if (ret >= 0) {
7353         fd_trans_register(ret, &target_signalfd_trans);
7354     }
7355 
7356     unlock_user_struct(target_mask, mask, 0);
7357 
7358     return ret;
7359 }
7360 #endif
7361 
7362 /* Map host to target signal numbers for the wait family of syscalls.
7363    Assume all other status bits are the same.  */
7364 int host_to_target_waitstatus(int status)
7365 {
7366     if (WIFSIGNALED(status)) {
7367         return host_to_target_signal(WTERMSIG(status)) | (status & ~0x7f);
7368     }
7369     if (WIFSTOPPED(status)) {
7370         return (host_to_target_signal(WSTOPSIG(status)) << 8)
7371                | (status & 0xff);
7372     }
7373     return status;
7374 }
7375 
7376 static int open_self_cmdline(void *cpu_env, int fd)
7377 {
7378     CPUState *cpu = env_cpu((CPUArchState *)cpu_env);
7379     struct linux_binprm *bprm = ((TaskState *)cpu->opaque)->bprm;
7380     int i;
7381 
7382     for (i = 0; i < bprm->argc; i++) {
7383         size_t len = strlen(bprm->argv[i]) + 1;
7384 
7385         if (write(fd, bprm->argv[i], len) != len) {
7386             return -1;
7387         }
7388     }
7389 
7390     return 0;
7391 }
7392 
7393 static int open_self_maps(void *cpu_env, int fd)
7394 {
7395     CPUState *cpu = env_cpu((CPUArchState *)cpu_env);
7396     TaskState *ts = cpu->opaque;
7397     GSList *map_info = read_self_maps();
7398     GSList *s;
7399     int count;
7400 
7401     for (s = map_info; s; s = g_slist_next(s)) {
7402         MapInfo *e = (MapInfo *) s->data;
7403 
7404         if (h2g_valid(e->start)) {
7405             unsigned long min = e->start;
7406             unsigned long max = e->end;
7407             int flags = page_get_flags(h2g(min));
7408             const char *path;
7409 
7410             max = h2g_valid(max - 1) ?
7411                 max : (uintptr_t) g2h(GUEST_ADDR_MAX) + 1;
7412 
7413             if (page_check_range(h2g(min), max - min, flags) == -1) {
7414                 continue;
7415             }
7416 
7417             if (h2g(min) == ts->info->stack_limit) {
7418                 path = "[stack]";
7419             } else {
7420                 path = e->path;
7421             }
7422 
7423             count = dprintf(fd, TARGET_ABI_FMT_ptr "-" TARGET_ABI_FMT_ptr
7424                             " %c%c%c%c %08" PRIx64 " %s %"PRId64,
7425                             h2g(min), h2g(max - 1) + 1,
7426                             e->is_read ? 'r' : '-',
7427                             e->is_write ? 'w' : '-',
7428                             e->is_exec ? 'x' : '-',
7429                             e->is_priv ? 'p' : '-',
7430                             (uint64_t) e->offset, e->dev, e->inode);
7431             if (path) {
7432                 dprintf(fd, "%*s%s\n", 73 - count, "", path);
7433             } else {
7434                 dprintf(fd, "\n");
7435             }
7436         }
7437     }
7438 
7439     free_self_maps(map_info);
7440 
7441 #ifdef TARGET_VSYSCALL_PAGE
7442     /*
7443      * We only support execution from the vsyscall page.
7444      * This is as if CONFIG_LEGACY_VSYSCALL_XONLY=y from v5.3.
7445      */
7446     count = dprintf(fd, TARGET_FMT_lx "-" TARGET_FMT_lx
7447                     " --xp 00000000 00:00 0",
7448                     TARGET_VSYSCALL_PAGE, TARGET_VSYSCALL_PAGE + TARGET_PAGE_SIZE);
7449     dprintf(fd, "%*s%s\n", 73 - count, "",  "[vsyscall]");
7450 #endif
7451 
7452     return 0;
7453 }
7454 
7455 static int open_self_stat(void *cpu_env, int fd)
7456 {
7457     CPUState *cpu = env_cpu((CPUArchState *)cpu_env);
7458     TaskState *ts = cpu->opaque;
7459     g_autoptr(GString) buf = g_string_new(NULL);
7460     int i;
7461 
7462     for (i = 0; i < 44; i++) {
7463         if (i == 0) {
7464             /* pid */
7465             g_string_printf(buf, FMT_pid " ", getpid());
7466         } else if (i == 1) {
7467             /* app name */
7468             gchar *bin = g_strrstr(ts->bprm->argv[0], "/");
7469             bin = bin ? bin + 1 : ts->bprm->argv[0];
7470             g_string_printf(buf, "(%.15s) ", bin);
7471         } else if (i == 27) {
7472             /* stack bottom */
7473             g_string_printf(buf, TARGET_ABI_FMT_ld " ", ts->info->start_stack);
7474         } else {
7475             /* for the rest, there is MasterCard */
7476             g_string_printf(buf, "0%c", i == 43 ? '\n' : ' ');
7477         }
7478 
7479         if (write(fd, buf->str, buf->len) != buf->len) {
7480             return -1;
7481         }
7482     }
7483 
7484     return 0;
7485 }
7486 
7487 static int open_self_auxv(void *cpu_env, int fd)
7488 {
7489     CPUState *cpu = env_cpu((CPUArchState *)cpu_env);
7490     TaskState *ts = cpu->opaque;
7491     abi_ulong auxv = ts->info->saved_auxv;
7492     abi_ulong len = ts->info->auxv_len;
7493     char *ptr;
7494 
7495     /*
7496      * Auxiliary vector is stored in target process stack.
7497      * read in whole auxv vector and copy it to file
7498      */
7499     ptr = lock_user(VERIFY_READ, auxv, len, 0);
7500     if (ptr != NULL) {
7501         while (len > 0) {
7502             ssize_t r;
7503             r = write(fd, ptr, len);
7504             if (r <= 0) {
7505                 break;
7506             }
7507             len -= r;
7508             ptr += r;
7509         }
7510         lseek(fd, 0, SEEK_SET);
7511         unlock_user(ptr, auxv, len);
7512     }
7513 
7514     return 0;
7515 }
7516 
7517 static int is_proc_myself(const char *filename, const char *entry)
7518 {
7519     if (!strncmp(filename, "/proc/", strlen("/proc/"))) {
7520         filename += strlen("/proc/");
7521         if (!strncmp(filename, "self/", strlen("self/"))) {
7522             filename += strlen("self/");
7523         } else if (*filename >= '1' && *filename <= '9') {
7524             char myself[80];
7525             snprintf(myself, sizeof(myself), "%d/", getpid());
7526             if (!strncmp(filename, myself, strlen(myself))) {
7527                 filename += strlen(myself);
7528             } else {
7529                 return 0;
7530             }
7531         } else {
7532             return 0;
7533         }
7534         if (!strcmp(filename, entry)) {
7535             return 1;
7536         }
7537     }
7538     return 0;
7539 }
7540 
7541 #if defined(HOST_WORDS_BIGENDIAN) != defined(TARGET_WORDS_BIGENDIAN) || \
7542     defined(TARGET_SPARC) || defined(TARGET_M68K) || defined(TARGET_HPPA)
7543 static int is_proc(const char *filename, const char *entry)
7544 {
7545     return strcmp(filename, entry) == 0;
7546 }
7547 #endif
7548 
7549 #if defined(HOST_WORDS_BIGENDIAN) != defined(TARGET_WORDS_BIGENDIAN)
7550 static int open_net_route(void *cpu_env, int fd)
7551 {
7552     FILE *fp;
7553     char *line = NULL;
7554     size_t len = 0;
7555     ssize_t read;
7556 
7557     fp = fopen("/proc/net/route", "r");
7558     if (fp == NULL) {
7559         return -1;
7560     }
7561 
7562     /* read header */
7563 
7564     read = getline(&line, &len, fp);
7565     dprintf(fd, "%s", line);
7566 
7567     /* read routes */
7568 
7569     while ((read = getline(&line, &len, fp)) != -1) {
7570         char iface[16];
7571         uint32_t dest, gw, mask;
7572         unsigned int flags, refcnt, use, metric, mtu, window, irtt;
7573         int fields;
7574 
7575         fields = sscanf(line,
7576                         "%s\t%08x\t%08x\t%04x\t%d\t%d\t%d\t%08x\t%d\t%u\t%u\n",
7577                         iface, &dest, &gw, &flags, &refcnt, &use, &metric,
7578                         &mask, &mtu, &window, &irtt);
7579         if (fields != 11) {
7580             continue;
7581         }
7582         dprintf(fd, "%s\t%08x\t%08x\t%04x\t%d\t%d\t%d\t%08x\t%d\t%u\t%u\n",
7583                 iface, tswap32(dest), tswap32(gw), flags, refcnt, use,
7584                 metric, tswap32(mask), mtu, window, irtt);
7585     }
7586 
7587     free(line);
7588     fclose(fp);
7589 
7590     return 0;
7591 }
7592 #endif
7593 
7594 #if defined(TARGET_SPARC)
7595 static int open_cpuinfo(void *cpu_env, int fd)
7596 {
7597     dprintf(fd, "type\t\t: sun4u\n");
7598     return 0;
7599 }
7600 #endif
7601 
7602 #if defined(TARGET_HPPA)
7603 static int open_cpuinfo(void *cpu_env, int fd)
7604 {
7605     dprintf(fd, "cpu family\t: PA-RISC 1.1e\n");
7606     dprintf(fd, "cpu\t\t: PA7300LC (PCX-L2)\n");
7607     dprintf(fd, "capabilities\t: os32\n");
7608     dprintf(fd, "model\t\t: 9000/778/B160L\n");
7609     dprintf(fd, "model name\t: Merlin L2 160 QEMU (9000/778/B160L)\n");
7610     return 0;
7611 }
7612 #endif
7613 
7614 #if defined(TARGET_M68K)
7615 static int open_hardware(void *cpu_env, int fd)
7616 {
7617     dprintf(fd, "Model:\t\tqemu-m68k\n");
7618     return 0;
7619 }
7620 #endif
7621 
7622 static int do_openat(void *cpu_env, int dirfd, const char *pathname, int flags, mode_t mode)
7623 {
7624     struct fake_open {
7625         const char *filename;
7626         int (*fill)(void *cpu_env, int fd);
7627         int (*cmp)(const char *s1, const char *s2);
7628     };
7629     const struct fake_open *fake_open;
7630     static const struct fake_open fakes[] = {
7631         { "maps", open_self_maps, is_proc_myself },
7632         { "stat", open_self_stat, is_proc_myself },
7633         { "auxv", open_self_auxv, is_proc_myself },
7634         { "cmdline", open_self_cmdline, is_proc_myself },
7635 #if defined(HOST_WORDS_BIGENDIAN) != defined(TARGET_WORDS_BIGENDIAN)
7636         { "/proc/net/route", open_net_route, is_proc },
7637 #endif
7638 #if defined(TARGET_SPARC) || defined(TARGET_HPPA)
7639         { "/proc/cpuinfo", open_cpuinfo, is_proc },
7640 #endif
7641 #if defined(TARGET_M68K)
7642         { "/proc/hardware", open_hardware, is_proc },
7643 #endif
7644         { NULL, NULL, NULL }
7645     };
7646 
7647     if (is_proc_myself(pathname, "exe")) {
7648         int execfd = qemu_getauxval(AT_EXECFD);
7649         return execfd ? execfd : safe_openat(dirfd, exec_path, flags, mode);
7650     }
7651 
7652     for (fake_open = fakes; fake_open->filename; fake_open++) {
7653         if (fake_open->cmp(pathname, fake_open->filename)) {
7654             break;
7655         }
7656     }
7657 
7658     if (fake_open->filename) {
7659         const char *tmpdir;
7660         char filename[PATH_MAX];
7661         int fd, r;
7662 
7663         /* create temporary file to map stat to */
7664         tmpdir = getenv("TMPDIR");
7665         if (!tmpdir)
7666             tmpdir = "/tmp";
7667         snprintf(filename, sizeof(filename), "%s/qemu-open.XXXXXX", tmpdir);
7668         fd = mkstemp(filename);
7669         if (fd < 0) {
7670             return fd;
7671         }
7672         unlink(filename);
7673 
7674         if ((r = fake_open->fill(cpu_env, fd))) {
7675             int e = errno;
7676             close(fd);
7677             errno = e;
7678             return r;
7679         }
7680         lseek(fd, 0, SEEK_SET);
7681 
7682         return fd;
7683     }
7684 
7685     return safe_openat(dirfd, path(pathname), flags, mode);
7686 }
7687 
7688 #define TIMER_MAGIC 0x0caf0000
7689 #define TIMER_MAGIC_MASK 0xffff0000
7690 
7691 /* Convert QEMU provided timer ID back to internal 16bit index format */
7692 static target_timer_t get_timer_id(abi_long arg)
7693 {
7694     target_timer_t timerid = arg;
7695 
7696     if ((timerid & TIMER_MAGIC_MASK) != TIMER_MAGIC) {
7697         return -TARGET_EINVAL;
7698     }
7699 
7700     timerid &= 0xffff;
7701 
7702     if (timerid >= ARRAY_SIZE(g_posix_timers)) {
7703         return -TARGET_EINVAL;
7704     }
7705 
7706     return timerid;
7707 }
7708 
7709 static int target_to_host_cpu_mask(unsigned long *host_mask,
7710                                    size_t host_size,
7711                                    abi_ulong target_addr,
7712                                    size_t target_size)
7713 {
7714     unsigned target_bits = sizeof(abi_ulong) * 8;
7715     unsigned host_bits = sizeof(*host_mask) * 8;
7716     abi_ulong *target_mask;
7717     unsigned i, j;
7718 
7719     assert(host_size >= target_size);
7720 
7721     target_mask = lock_user(VERIFY_READ, target_addr, target_size, 1);
7722     if (!target_mask) {
7723         return -TARGET_EFAULT;
7724     }
7725     memset(host_mask, 0, host_size);
7726 
7727     for (i = 0 ; i < target_size / sizeof(abi_ulong); i++) {
7728         unsigned bit = i * target_bits;
7729         abi_ulong val;
7730 
7731         __get_user(val, &target_mask[i]);
7732         for (j = 0; j < target_bits; j++, bit++) {
7733             if (val & (1UL << j)) {
7734                 host_mask[bit / host_bits] |= 1UL << (bit % host_bits);
7735             }
7736         }
7737     }
7738 
7739     unlock_user(target_mask, target_addr, 0);
7740     return 0;
7741 }
7742 
7743 static int host_to_target_cpu_mask(const unsigned long *host_mask,
7744                                    size_t host_size,
7745                                    abi_ulong target_addr,
7746                                    size_t target_size)
7747 {
7748     unsigned target_bits = sizeof(abi_ulong) * 8;
7749     unsigned host_bits = sizeof(*host_mask) * 8;
7750     abi_ulong *target_mask;
7751     unsigned i, j;
7752 
7753     assert(host_size >= target_size);
7754 
7755     target_mask = lock_user(VERIFY_WRITE, target_addr, target_size, 0);
7756     if (!target_mask) {
7757         return -TARGET_EFAULT;
7758     }
7759 
7760     for (i = 0 ; i < target_size / sizeof(abi_ulong); i++) {
7761         unsigned bit = i * target_bits;
7762         abi_ulong val = 0;
7763 
7764         for (j = 0; j < target_bits; j++, bit++) {
7765             if (host_mask[bit / host_bits] & (1UL << (bit % host_bits))) {
7766                 val |= 1UL << j;
7767             }
7768         }
7769         __put_user(val, &target_mask[i]);
7770     }
7771 
7772     unlock_user(target_mask, target_addr, target_size);
7773     return 0;
7774 }
7775 
7776 /* This is an internal helper for do_syscall so that it is easier
7777  * to have a single return point, so that actions, such as logging
7778  * of syscall results, can be performed.
7779  * All errnos that do_syscall() returns must be -TARGET_<errcode>.
7780  */
7781 static abi_long do_syscall1(void *cpu_env, int num, abi_long arg1,
7782                             abi_long arg2, abi_long arg3, abi_long arg4,
7783                             abi_long arg5, abi_long arg6, abi_long arg7,
7784                             abi_long arg8)
7785 {
7786     CPUState *cpu = env_cpu(cpu_env);
7787     abi_long ret;
7788 #if defined(TARGET_NR_stat) || defined(TARGET_NR_stat64) \
7789     || defined(TARGET_NR_lstat) || defined(TARGET_NR_lstat64) \
7790     || defined(TARGET_NR_fstat) || defined(TARGET_NR_fstat64) \
7791     || defined(TARGET_NR_statx)
7792     struct stat st;
7793 #endif
7794 #if defined(TARGET_NR_statfs) || defined(TARGET_NR_statfs64) \
7795     || defined(TARGET_NR_fstatfs)
7796     struct statfs stfs;
7797 #endif
7798     void *p;
7799 
7800     switch(num) {
7801     case TARGET_NR_exit:
7802         /* In old applications this may be used to implement _exit(2).
7803            However in threaded applictions it is used for thread termination,
7804            and _exit_group is used for application termination.
7805            Do thread termination if we have more then one thread.  */
7806 
7807         if (block_signals()) {
7808             return -TARGET_ERESTARTSYS;
7809         }
7810 
7811         pthread_mutex_lock(&clone_lock);
7812 
7813         if (CPU_NEXT(first_cpu)) {
7814             TaskState *ts = cpu->opaque;
7815 
7816             object_property_set_bool(OBJECT(cpu), "realized", false, NULL);
7817             object_unref(OBJECT(cpu));
7818             /*
7819              * At this point the CPU should be unrealized and removed
7820              * from cpu lists. We can clean-up the rest of the thread
7821              * data without the lock held.
7822              */
7823 
7824             pthread_mutex_unlock(&clone_lock);
7825 
7826             if (ts->child_tidptr) {
7827                 put_user_u32(0, ts->child_tidptr);
7828                 do_sys_futex(g2h(ts->child_tidptr), FUTEX_WAKE, INT_MAX,
7829                           NULL, NULL, 0);
7830             }
7831             thread_cpu = NULL;
7832             g_free(ts);
7833             rcu_unregister_thread();
7834             pthread_exit(NULL);
7835         }
7836 
7837         pthread_mutex_unlock(&clone_lock);
7838         preexit_cleanup(cpu_env, arg1);
7839         _exit(arg1);
7840         return 0; /* avoid warning */
7841     case TARGET_NR_read:
7842         if (arg2 == 0 && arg3 == 0) {
7843             return get_errno(safe_read(arg1, 0, 0));
7844         } else {
7845             if (!(p = lock_user(VERIFY_WRITE, arg2, arg3, 0)))
7846                 return -TARGET_EFAULT;
7847             ret = get_errno(safe_read(arg1, p, arg3));
7848             if (ret >= 0 &&
7849                 fd_trans_host_to_target_data(arg1)) {
7850                 ret = fd_trans_host_to_target_data(arg1)(p, ret);
7851             }
7852             unlock_user(p, arg2, ret);
7853         }
7854         return ret;
7855     case TARGET_NR_write:
7856         if (arg2 == 0 && arg3 == 0) {
7857             return get_errno(safe_write(arg1, 0, 0));
7858         }
7859         if (!(p = lock_user(VERIFY_READ, arg2, arg3, 1)))
7860             return -TARGET_EFAULT;
7861         if (fd_trans_target_to_host_data(arg1)) {
7862             void *copy = g_malloc(arg3);
7863             memcpy(copy, p, arg3);
7864             ret = fd_trans_target_to_host_data(arg1)(copy, arg3);
7865             if (ret >= 0) {
7866                 ret = get_errno(safe_write(arg1, copy, ret));
7867             }
7868             g_free(copy);
7869         } else {
7870             ret = get_errno(safe_write(arg1, p, arg3));
7871         }
7872         unlock_user(p, arg2, 0);
7873         return ret;
7874 
7875 #ifdef TARGET_NR_open
7876     case TARGET_NR_open:
7877         if (!(p = lock_user_string(arg1)))
7878             return -TARGET_EFAULT;
7879         ret = get_errno(do_openat(cpu_env, AT_FDCWD, p,
7880                                   target_to_host_bitmask(arg2, fcntl_flags_tbl),
7881                                   arg3));
7882         fd_trans_unregister(ret);
7883         unlock_user(p, arg1, 0);
7884         return ret;
7885 #endif
7886     case TARGET_NR_openat:
7887         if (!(p = lock_user_string(arg2)))
7888             return -TARGET_EFAULT;
7889         ret = get_errno(do_openat(cpu_env, arg1, p,
7890                                   target_to_host_bitmask(arg3, fcntl_flags_tbl),
7891                                   arg4));
7892         fd_trans_unregister(ret);
7893         unlock_user(p, arg2, 0);
7894         return ret;
7895 #if defined(TARGET_NR_name_to_handle_at) && defined(CONFIG_OPEN_BY_HANDLE)
7896     case TARGET_NR_name_to_handle_at:
7897         ret = do_name_to_handle_at(arg1, arg2, arg3, arg4, arg5);
7898         return ret;
7899 #endif
7900 #if defined(TARGET_NR_open_by_handle_at) && defined(CONFIG_OPEN_BY_HANDLE)
7901     case TARGET_NR_open_by_handle_at:
7902         ret = do_open_by_handle_at(arg1, arg2, arg3);
7903         fd_trans_unregister(ret);
7904         return ret;
7905 #endif
7906     case TARGET_NR_close:
7907         fd_trans_unregister(arg1);
7908         return get_errno(close(arg1));
7909 
7910     case TARGET_NR_brk:
7911         return do_brk(arg1);
7912 #ifdef TARGET_NR_fork
7913     case TARGET_NR_fork:
7914         return get_errno(do_fork(cpu_env, TARGET_SIGCHLD, 0, 0, 0, 0));
7915 #endif
7916 #ifdef TARGET_NR_waitpid
7917     case TARGET_NR_waitpid:
7918         {
7919             int status;
7920             ret = get_errno(safe_wait4(arg1, &status, arg3, 0));
7921             if (!is_error(ret) && arg2 && ret
7922                 && put_user_s32(host_to_target_waitstatus(status), arg2))
7923                 return -TARGET_EFAULT;
7924         }
7925         return ret;
7926 #endif
7927 #ifdef TARGET_NR_waitid
7928     case TARGET_NR_waitid:
7929         {
7930             siginfo_t info;
7931             info.si_pid = 0;
7932             ret = get_errno(safe_waitid(arg1, arg2, &info, arg4, NULL));
7933             if (!is_error(ret) && arg3 && info.si_pid != 0) {
7934                 if (!(p = lock_user(VERIFY_WRITE, arg3, sizeof(target_siginfo_t), 0)))
7935                     return -TARGET_EFAULT;
7936                 host_to_target_siginfo(p, &info);
7937                 unlock_user(p, arg3, sizeof(target_siginfo_t));
7938             }
7939         }
7940         return ret;
7941 #endif
7942 #ifdef TARGET_NR_creat /* not on alpha */
7943     case TARGET_NR_creat:
7944         if (!(p = lock_user_string(arg1)))
7945             return -TARGET_EFAULT;
7946         ret = get_errno(creat(p, arg2));
7947         fd_trans_unregister(ret);
7948         unlock_user(p, arg1, 0);
7949         return ret;
7950 #endif
7951 #ifdef TARGET_NR_link
7952     case TARGET_NR_link:
7953         {
7954             void * p2;
7955             p = lock_user_string(arg1);
7956             p2 = lock_user_string(arg2);
7957             if (!p || !p2)
7958                 ret = -TARGET_EFAULT;
7959             else
7960                 ret = get_errno(link(p, p2));
7961             unlock_user(p2, arg2, 0);
7962             unlock_user(p, arg1, 0);
7963         }
7964         return ret;
7965 #endif
7966 #if defined(TARGET_NR_linkat)
7967     case TARGET_NR_linkat:
7968         {
7969             void * p2 = NULL;
7970             if (!arg2 || !arg4)
7971                 return -TARGET_EFAULT;
7972             p  = lock_user_string(arg2);
7973             p2 = lock_user_string(arg4);
7974             if (!p || !p2)
7975                 ret = -TARGET_EFAULT;
7976             else
7977                 ret = get_errno(linkat(arg1, p, arg3, p2, arg5));
7978             unlock_user(p, arg2, 0);
7979             unlock_user(p2, arg4, 0);
7980         }
7981         return ret;
7982 #endif
7983 #ifdef TARGET_NR_unlink
7984     case TARGET_NR_unlink:
7985         if (!(p = lock_user_string(arg1)))
7986             return -TARGET_EFAULT;
7987         ret = get_errno(unlink(p));
7988         unlock_user(p, arg1, 0);
7989         return ret;
7990 #endif
7991 #if defined(TARGET_NR_unlinkat)
7992     case TARGET_NR_unlinkat:
7993         if (!(p = lock_user_string(arg2)))
7994             return -TARGET_EFAULT;
7995         ret = get_errno(unlinkat(arg1, p, arg3));
7996         unlock_user(p, arg2, 0);
7997         return ret;
7998 #endif
7999     case TARGET_NR_execve:
8000         {
8001             char **argp, **envp;
8002             int argc, envc;
8003             abi_ulong gp;
8004             abi_ulong guest_argp;
8005             abi_ulong guest_envp;
8006             abi_ulong addr;
8007             char **q;
8008             int total_size = 0;
8009 
8010             argc = 0;
8011             guest_argp = arg2;
8012             for (gp = guest_argp; gp; gp += sizeof(abi_ulong)) {
8013                 if (get_user_ual(addr, gp))
8014                     return -TARGET_EFAULT;
8015                 if (!addr)
8016                     break;
8017                 argc++;
8018             }
8019             envc = 0;
8020             guest_envp = arg3;
8021             for (gp = guest_envp; gp; gp += sizeof(abi_ulong)) {
8022                 if (get_user_ual(addr, gp))
8023                     return -TARGET_EFAULT;
8024                 if (!addr)
8025                     break;
8026                 envc++;
8027             }
8028 
8029             argp = g_new0(char *, argc + 1);
8030             envp = g_new0(char *, envc + 1);
8031 
8032             for (gp = guest_argp, q = argp; gp;
8033                   gp += sizeof(abi_ulong), q++) {
8034                 if (get_user_ual(addr, gp))
8035                     goto execve_efault;
8036                 if (!addr)
8037                     break;
8038                 if (!(*q = lock_user_string(addr)))
8039                     goto execve_efault;
8040                 total_size += strlen(*q) + 1;
8041             }
8042             *q = NULL;
8043 
8044             for (gp = guest_envp, q = envp; gp;
8045                   gp += sizeof(abi_ulong), q++) {
8046                 if (get_user_ual(addr, gp))
8047                     goto execve_efault;
8048                 if (!addr)
8049                     break;
8050                 if (!(*q = lock_user_string(addr)))
8051                     goto execve_efault;
8052                 total_size += strlen(*q) + 1;
8053             }
8054             *q = NULL;
8055 
8056             if (!(p = lock_user_string(arg1)))
8057                 goto execve_efault;
8058             /* Although execve() is not an interruptible syscall it is
8059              * a special case where we must use the safe_syscall wrapper:
8060              * if we allow a signal to happen before we make the host
8061              * syscall then we will 'lose' it, because at the point of
8062              * execve the process leaves QEMU's control. So we use the
8063              * safe syscall wrapper to ensure that we either take the
8064              * signal as a guest signal, or else it does not happen
8065              * before the execve completes and makes it the other
8066              * program's problem.
8067              */
8068             ret = get_errno(safe_execve(p, argp, envp));
8069             unlock_user(p, arg1, 0);
8070 
8071             goto execve_end;
8072 
8073         execve_efault:
8074             ret = -TARGET_EFAULT;
8075 
8076         execve_end:
8077             for (gp = guest_argp, q = argp; *q;
8078                   gp += sizeof(abi_ulong), q++) {
8079                 if (get_user_ual(addr, gp)
8080                     || !addr)
8081                     break;
8082                 unlock_user(*q, addr, 0);
8083             }
8084             for (gp = guest_envp, q = envp; *q;
8085                   gp += sizeof(abi_ulong), q++) {
8086                 if (get_user_ual(addr, gp)
8087                     || !addr)
8088                     break;
8089                 unlock_user(*q, addr, 0);
8090             }
8091 
8092             g_free(argp);
8093             g_free(envp);
8094         }
8095         return ret;
8096     case TARGET_NR_chdir:
8097         if (!(p = lock_user_string(arg1)))
8098             return -TARGET_EFAULT;
8099         ret = get_errno(chdir(p));
8100         unlock_user(p, arg1, 0);
8101         return ret;
8102 #ifdef TARGET_NR_time
8103     case TARGET_NR_time:
8104         {
8105             time_t host_time;
8106             ret = get_errno(time(&host_time));
8107             if (!is_error(ret)
8108                 && arg1
8109                 && put_user_sal(host_time, arg1))
8110                 return -TARGET_EFAULT;
8111         }
8112         return ret;
8113 #endif
8114 #ifdef TARGET_NR_mknod
8115     case TARGET_NR_mknod:
8116         if (!(p = lock_user_string(arg1)))
8117             return -TARGET_EFAULT;
8118         ret = get_errno(mknod(p, arg2, arg3));
8119         unlock_user(p, arg1, 0);
8120         return ret;
8121 #endif
8122 #if defined(TARGET_NR_mknodat)
8123     case TARGET_NR_mknodat:
8124         if (!(p = lock_user_string(arg2)))
8125             return -TARGET_EFAULT;
8126         ret = get_errno(mknodat(arg1, p, arg3, arg4));
8127         unlock_user(p, arg2, 0);
8128         return ret;
8129 #endif
8130 #ifdef TARGET_NR_chmod
8131     case TARGET_NR_chmod:
8132         if (!(p = lock_user_string(arg1)))
8133             return -TARGET_EFAULT;
8134         ret = get_errno(chmod(p, arg2));
8135         unlock_user(p, arg1, 0);
8136         return ret;
8137 #endif
8138 #ifdef TARGET_NR_lseek
8139     case TARGET_NR_lseek:
8140         return get_errno(lseek(arg1, arg2, arg3));
8141 #endif
8142 #if defined(TARGET_NR_getxpid) && defined(TARGET_ALPHA)
8143     /* Alpha specific */
8144     case TARGET_NR_getxpid:
8145         ((CPUAlphaState *)cpu_env)->ir[IR_A4] = getppid();
8146         return get_errno(getpid());
8147 #endif
8148 #ifdef TARGET_NR_getpid
8149     case TARGET_NR_getpid:
8150         return get_errno(getpid());
8151 #endif
8152     case TARGET_NR_mount:
8153         {
8154             /* need to look at the data field */
8155             void *p2, *p3;
8156 
8157             if (arg1) {
8158                 p = lock_user_string(arg1);
8159                 if (!p) {
8160                     return -TARGET_EFAULT;
8161                 }
8162             } else {
8163                 p = NULL;
8164             }
8165 
8166             p2 = lock_user_string(arg2);
8167             if (!p2) {
8168                 if (arg1) {
8169                     unlock_user(p, arg1, 0);
8170                 }
8171                 return -TARGET_EFAULT;
8172             }
8173 
8174             if (arg3) {
8175                 p3 = lock_user_string(arg3);
8176                 if (!p3) {
8177                     if (arg1) {
8178                         unlock_user(p, arg1, 0);
8179                     }
8180                     unlock_user(p2, arg2, 0);
8181                     return -TARGET_EFAULT;
8182                 }
8183             } else {
8184                 p3 = NULL;
8185             }
8186 
8187             /* FIXME - arg5 should be locked, but it isn't clear how to
8188              * do that since it's not guaranteed to be a NULL-terminated
8189              * string.
8190              */
8191             if (!arg5) {
8192                 ret = mount(p, p2, p3, (unsigned long)arg4, NULL);
8193             } else {
8194                 ret = mount(p, p2, p3, (unsigned long)arg4, g2h(arg5));
8195             }
8196             ret = get_errno(ret);
8197 
8198             if (arg1) {
8199                 unlock_user(p, arg1, 0);
8200             }
8201             unlock_user(p2, arg2, 0);
8202             if (arg3) {
8203                 unlock_user(p3, arg3, 0);
8204             }
8205         }
8206         return ret;
8207 #if defined(TARGET_NR_umount) || defined(TARGET_NR_oldumount)
8208 #if defined(TARGET_NR_umount)
8209     case TARGET_NR_umount:
8210 #endif
8211 #if defined(TARGET_NR_oldumount)
8212     case TARGET_NR_oldumount:
8213 #endif
8214         if (!(p = lock_user_string(arg1)))
8215             return -TARGET_EFAULT;
8216         ret = get_errno(umount(p));
8217         unlock_user(p, arg1, 0);
8218         return ret;
8219 #endif
8220 #ifdef TARGET_NR_stime /* not on alpha */
8221     case TARGET_NR_stime:
8222         {
8223             struct timespec ts;
8224             ts.tv_nsec = 0;
8225             if (get_user_sal(ts.tv_sec, arg1)) {
8226                 return -TARGET_EFAULT;
8227             }
8228             return get_errno(clock_settime(CLOCK_REALTIME, &ts));
8229         }
8230 #endif
8231 #ifdef TARGET_NR_alarm /* not on alpha */
8232     case TARGET_NR_alarm:
8233         return alarm(arg1);
8234 #endif
8235 #ifdef TARGET_NR_pause /* not on alpha */
8236     case TARGET_NR_pause:
8237         if (!block_signals()) {
8238             sigsuspend(&((TaskState *)cpu->opaque)->signal_mask);
8239         }
8240         return -TARGET_EINTR;
8241 #endif
8242 #ifdef TARGET_NR_utime
8243     case TARGET_NR_utime:
8244         {
8245             struct utimbuf tbuf, *host_tbuf;
8246             struct target_utimbuf *target_tbuf;
8247             if (arg2) {
8248                 if (!lock_user_struct(VERIFY_READ, target_tbuf, arg2, 1))
8249                     return -TARGET_EFAULT;
8250                 tbuf.actime = tswapal(target_tbuf->actime);
8251                 tbuf.modtime = tswapal(target_tbuf->modtime);
8252                 unlock_user_struct(target_tbuf, arg2, 0);
8253                 host_tbuf = &tbuf;
8254             } else {
8255                 host_tbuf = NULL;
8256             }
8257             if (!(p = lock_user_string(arg1)))
8258                 return -TARGET_EFAULT;
8259             ret = get_errno(utime(p, host_tbuf));
8260             unlock_user(p, arg1, 0);
8261         }
8262         return ret;
8263 #endif
8264 #ifdef TARGET_NR_utimes
8265     case TARGET_NR_utimes:
8266         {
8267             struct timeval *tvp, tv[2];
8268             if (arg2) {
8269                 if (copy_from_user_timeval(&tv[0], arg2)
8270                     || copy_from_user_timeval(&tv[1],
8271                                               arg2 + sizeof(struct target_timeval)))
8272                     return -TARGET_EFAULT;
8273                 tvp = tv;
8274             } else {
8275                 tvp = NULL;
8276             }
8277             if (!(p = lock_user_string(arg1)))
8278                 return -TARGET_EFAULT;
8279             ret = get_errno(utimes(p, tvp));
8280             unlock_user(p, arg1, 0);
8281         }
8282         return ret;
8283 #endif
8284 #if defined(TARGET_NR_futimesat)
8285     case TARGET_NR_futimesat:
8286         {
8287             struct timeval *tvp, tv[2];
8288             if (arg3) {
8289                 if (copy_from_user_timeval(&tv[0], arg3)
8290                     || copy_from_user_timeval(&tv[1],
8291                                               arg3 + sizeof(struct target_timeval)))
8292                     return -TARGET_EFAULT;
8293                 tvp = tv;
8294             } else {
8295                 tvp = NULL;
8296             }
8297             if (!(p = lock_user_string(arg2))) {
8298                 return -TARGET_EFAULT;
8299             }
8300             ret = get_errno(futimesat(arg1, path(p), tvp));
8301             unlock_user(p, arg2, 0);
8302         }
8303         return ret;
8304 #endif
8305 #ifdef TARGET_NR_access
8306     case TARGET_NR_access:
8307         if (!(p = lock_user_string(arg1))) {
8308             return -TARGET_EFAULT;
8309         }
8310         ret = get_errno(access(path(p), arg2));
8311         unlock_user(p, arg1, 0);
8312         return ret;
8313 #endif
8314 #if defined(TARGET_NR_faccessat) && defined(__NR_faccessat)
8315     case TARGET_NR_faccessat:
8316         if (!(p = lock_user_string(arg2))) {
8317             return -TARGET_EFAULT;
8318         }
8319         ret = get_errno(faccessat(arg1, p, arg3, 0));
8320         unlock_user(p, arg2, 0);
8321         return ret;
8322 #endif
8323 #ifdef TARGET_NR_nice /* not on alpha */
8324     case TARGET_NR_nice:
8325         return get_errno(nice(arg1));
8326 #endif
8327     case TARGET_NR_sync:
8328         sync();
8329         return 0;
8330 #if defined(TARGET_NR_syncfs) && defined(CONFIG_SYNCFS)
8331     case TARGET_NR_syncfs:
8332         return get_errno(syncfs(arg1));
8333 #endif
8334     case TARGET_NR_kill:
8335         return get_errno(safe_kill(arg1, target_to_host_signal(arg2)));
8336 #ifdef TARGET_NR_rename
8337     case TARGET_NR_rename:
8338         {
8339             void *p2;
8340             p = lock_user_string(arg1);
8341             p2 = lock_user_string(arg2);
8342             if (!p || !p2)
8343                 ret = -TARGET_EFAULT;
8344             else
8345                 ret = get_errno(rename(p, p2));
8346             unlock_user(p2, arg2, 0);
8347             unlock_user(p, arg1, 0);
8348         }
8349         return ret;
8350 #endif
8351 #if defined(TARGET_NR_renameat)
8352     case TARGET_NR_renameat:
8353         {
8354             void *p2;
8355             p  = lock_user_string(arg2);
8356             p2 = lock_user_string(arg4);
8357             if (!p || !p2)
8358                 ret = -TARGET_EFAULT;
8359             else
8360                 ret = get_errno(renameat(arg1, p, arg3, p2));
8361             unlock_user(p2, arg4, 0);
8362             unlock_user(p, arg2, 0);
8363         }
8364         return ret;
8365 #endif
8366 #if defined(TARGET_NR_renameat2)
8367     case TARGET_NR_renameat2:
8368         {
8369             void *p2;
8370             p  = lock_user_string(arg2);
8371             p2 = lock_user_string(arg4);
8372             if (!p || !p2) {
8373                 ret = -TARGET_EFAULT;
8374             } else {
8375                 ret = get_errno(sys_renameat2(arg1, p, arg3, p2, arg5));
8376             }
8377             unlock_user(p2, arg4, 0);
8378             unlock_user(p, arg2, 0);
8379         }
8380         return ret;
8381 #endif
8382 #ifdef TARGET_NR_mkdir
8383     case TARGET_NR_mkdir:
8384         if (!(p = lock_user_string(arg1)))
8385             return -TARGET_EFAULT;
8386         ret = get_errno(mkdir(p, arg2));
8387         unlock_user(p, arg1, 0);
8388         return ret;
8389 #endif
8390 #if defined(TARGET_NR_mkdirat)
8391     case TARGET_NR_mkdirat:
8392         if (!(p = lock_user_string(arg2)))
8393             return -TARGET_EFAULT;
8394         ret = get_errno(mkdirat(arg1, p, arg3));
8395         unlock_user(p, arg2, 0);
8396         return ret;
8397 #endif
8398 #ifdef TARGET_NR_rmdir
8399     case TARGET_NR_rmdir:
8400         if (!(p = lock_user_string(arg1)))
8401             return -TARGET_EFAULT;
8402         ret = get_errno(rmdir(p));
8403         unlock_user(p, arg1, 0);
8404         return ret;
8405 #endif
8406     case TARGET_NR_dup:
8407         ret = get_errno(dup(arg1));
8408         if (ret >= 0) {
8409             fd_trans_dup(arg1, ret);
8410         }
8411         return ret;
8412 #ifdef TARGET_NR_pipe
8413     case TARGET_NR_pipe:
8414         return do_pipe(cpu_env, arg1, 0, 0);
8415 #endif
8416 #ifdef TARGET_NR_pipe2
8417     case TARGET_NR_pipe2:
8418         return do_pipe(cpu_env, arg1,
8419                        target_to_host_bitmask(arg2, fcntl_flags_tbl), 1);
8420 #endif
8421     case TARGET_NR_times:
8422         {
8423             struct target_tms *tmsp;
8424             struct tms tms;
8425             ret = get_errno(times(&tms));
8426             if (arg1) {
8427                 tmsp = lock_user(VERIFY_WRITE, arg1, sizeof(struct target_tms), 0);
8428                 if (!tmsp)
8429                     return -TARGET_EFAULT;
8430                 tmsp->tms_utime = tswapal(host_to_target_clock_t(tms.tms_utime));
8431                 tmsp->tms_stime = tswapal(host_to_target_clock_t(tms.tms_stime));
8432                 tmsp->tms_cutime = tswapal(host_to_target_clock_t(tms.tms_cutime));
8433                 tmsp->tms_cstime = tswapal(host_to_target_clock_t(tms.tms_cstime));
8434             }
8435             if (!is_error(ret))
8436                 ret = host_to_target_clock_t(ret);
8437         }
8438         return ret;
8439     case TARGET_NR_acct:
8440         if (arg1 == 0) {
8441             ret = get_errno(acct(NULL));
8442         } else {
8443             if (!(p = lock_user_string(arg1))) {
8444                 return -TARGET_EFAULT;
8445             }
8446             ret = get_errno(acct(path(p)));
8447             unlock_user(p, arg1, 0);
8448         }
8449         return ret;
8450 #ifdef TARGET_NR_umount2
8451     case TARGET_NR_umount2:
8452         if (!(p = lock_user_string(arg1)))
8453             return -TARGET_EFAULT;
8454         ret = get_errno(umount2(p, arg2));
8455         unlock_user(p, arg1, 0);
8456         return ret;
8457 #endif
8458     case TARGET_NR_ioctl:
8459         return do_ioctl(arg1, arg2, arg3);
8460 #ifdef TARGET_NR_fcntl
8461     case TARGET_NR_fcntl:
8462         return do_fcntl(arg1, arg2, arg3);
8463 #endif
8464     case TARGET_NR_setpgid:
8465         return get_errno(setpgid(arg1, arg2));
8466     case TARGET_NR_umask:
8467         return get_errno(umask(arg1));
8468     case TARGET_NR_chroot:
8469         if (!(p = lock_user_string(arg1)))
8470             return -TARGET_EFAULT;
8471         ret = get_errno(chroot(p));
8472         unlock_user(p, arg1, 0);
8473         return ret;
8474 #ifdef TARGET_NR_dup2
8475     case TARGET_NR_dup2:
8476         ret = get_errno(dup2(arg1, arg2));
8477         if (ret >= 0) {
8478             fd_trans_dup(arg1, arg2);
8479         }
8480         return ret;
8481 #endif
8482 #if defined(CONFIG_DUP3) && defined(TARGET_NR_dup3)
8483     case TARGET_NR_dup3:
8484     {
8485         int host_flags;
8486 
8487         if ((arg3 & ~TARGET_O_CLOEXEC) != 0) {
8488             return -EINVAL;
8489         }
8490         host_flags = target_to_host_bitmask(arg3, fcntl_flags_tbl);
8491         ret = get_errno(dup3(arg1, arg2, host_flags));
8492         if (ret >= 0) {
8493             fd_trans_dup(arg1, arg2);
8494         }
8495         return ret;
8496     }
8497 #endif
8498 #ifdef TARGET_NR_getppid /* not on alpha */
8499     case TARGET_NR_getppid:
8500         return get_errno(getppid());
8501 #endif
8502 #ifdef TARGET_NR_getpgrp
8503     case TARGET_NR_getpgrp:
8504         return get_errno(getpgrp());
8505 #endif
8506     case TARGET_NR_setsid:
8507         return get_errno(setsid());
8508 #ifdef TARGET_NR_sigaction
8509     case TARGET_NR_sigaction:
8510         {
8511 #if defined(TARGET_ALPHA)
8512             struct target_sigaction act, oact, *pact = 0;
8513             struct target_old_sigaction *old_act;
8514             if (arg2) {
8515                 if (!lock_user_struct(VERIFY_READ, old_act, arg2, 1))
8516                     return -TARGET_EFAULT;
8517                 act._sa_handler = old_act->_sa_handler;
8518                 target_siginitset(&act.sa_mask, old_act->sa_mask);
8519                 act.sa_flags = old_act->sa_flags;
8520                 act.sa_restorer = 0;
8521                 unlock_user_struct(old_act, arg2, 0);
8522                 pact = &act;
8523             }
8524             ret = get_errno(do_sigaction(arg1, pact, &oact));
8525             if (!is_error(ret) && arg3) {
8526                 if (!lock_user_struct(VERIFY_WRITE, old_act, arg3, 0))
8527                     return -TARGET_EFAULT;
8528                 old_act->_sa_handler = oact._sa_handler;
8529                 old_act->sa_mask = oact.sa_mask.sig[0];
8530                 old_act->sa_flags = oact.sa_flags;
8531                 unlock_user_struct(old_act, arg3, 1);
8532             }
8533 #elif defined(TARGET_MIPS)
8534 	    struct target_sigaction act, oact, *pact, *old_act;
8535 
8536 	    if (arg2) {
8537                 if (!lock_user_struct(VERIFY_READ, old_act, arg2, 1))
8538                     return -TARGET_EFAULT;
8539 		act._sa_handler = old_act->_sa_handler;
8540 		target_siginitset(&act.sa_mask, old_act->sa_mask.sig[0]);
8541 		act.sa_flags = old_act->sa_flags;
8542 		unlock_user_struct(old_act, arg2, 0);
8543 		pact = &act;
8544 	    } else {
8545 		pact = NULL;
8546 	    }
8547 
8548 	    ret = get_errno(do_sigaction(arg1, pact, &oact));
8549 
8550 	    if (!is_error(ret) && arg3) {
8551                 if (!lock_user_struct(VERIFY_WRITE, old_act, arg3, 0))
8552                     return -TARGET_EFAULT;
8553 		old_act->_sa_handler = oact._sa_handler;
8554 		old_act->sa_flags = oact.sa_flags;
8555 		old_act->sa_mask.sig[0] = oact.sa_mask.sig[0];
8556 		old_act->sa_mask.sig[1] = 0;
8557 		old_act->sa_mask.sig[2] = 0;
8558 		old_act->sa_mask.sig[3] = 0;
8559 		unlock_user_struct(old_act, arg3, 1);
8560 	    }
8561 #else
8562             struct target_old_sigaction *old_act;
8563             struct target_sigaction act, oact, *pact;
8564             if (arg2) {
8565                 if (!lock_user_struct(VERIFY_READ, old_act, arg2, 1))
8566                     return -TARGET_EFAULT;
8567                 act._sa_handler = old_act->_sa_handler;
8568                 target_siginitset(&act.sa_mask, old_act->sa_mask);
8569                 act.sa_flags = old_act->sa_flags;
8570                 act.sa_restorer = old_act->sa_restorer;
8571 #ifdef TARGET_ARCH_HAS_KA_RESTORER
8572                 act.ka_restorer = 0;
8573 #endif
8574                 unlock_user_struct(old_act, arg2, 0);
8575                 pact = &act;
8576             } else {
8577                 pact = NULL;
8578             }
8579             ret = get_errno(do_sigaction(arg1, pact, &oact));
8580             if (!is_error(ret) && arg3) {
8581                 if (!lock_user_struct(VERIFY_WRITE, old_act, arg3, 0))
8582                     return -TARGET_EFAULT;
8583                 old_act->_sa_handler = oact._sa_handler;
8584                 old_act->sa_mask = oact.sa_mask.sig[0];
8585                 old_act->sa_flags = oact.sa_flags;
8586                 old_act->sa_restorer = oact.sa_restorer;
8587                 unlock_user_struct(old_act, arg3, 1);
8588             }
8589 #endif
8590         }
8591         return ret;
8592 #endif
8593     case TARGET_NR_rt_sigaction:
8594         {
8595 #if defined(TARGET_ALPHA)
8596             /* For Alpha and SPARC this is a 5 argument syscall, with
8597              * a 'restorer' parameter which must be copied into the
8598              * sa_restorer field of the sigaction struct.
8599              * For Alpha that 'restorer' is arg5; for SPARC it is arg4,
8600              * and arg5 is the sigsetsize.
8601              * Alpha also has a separate rt_sigaction struct that it uses
8602              * here; SPARC uses the usual sigaction struct.
8603              */
8604             struct target_rt_sigaction *rt_act;
8605             struct target_sigaction act, oact, *pact = 0;
8606 
8607             if (arg4 != sizeof(target_sigset_t)) {
8608                 return -TARGET_EINVAL;
8609             }
8610             if (arg2) {
8611                 if (!lock_user_struct(VERIFY_READ, rt_act, arg2, 1))
8612                     return -TARGET_EFAULT;
8613                 act._sa_handler = rt_act->_sa_handler;
8614                 act.sa_mask = rt_act->sa_mask;
8615                 act.sa_flags = rt_act->sa_flags;
8616                 act.sa_restorer = arg5;
8617                 unlock_user_struct(rt_act, arg2, 0);
8618                 pact = &act;
8619             }
8620             ret = get_errno(do_sigaction(arg1, pact, &oact));
8621             if (!is_error(ret) && arg3) {
8622                 if (!lock_user_struct(VERIFY_WRITE, rt_act, arg3, 0))
8623                     return -TARGET_EFAULT;
8624                 rt_act->_sa_handler = oact._sa_handler;
8625                 rt_act->sa_mask = oact.sa_mask;
8626                 rt_act->sa_flags = oact.sa_flags;
8627                 unlock_user_struct(rt_act, arg3, 1);
8628             }
8629 #else
8630 #ifdef TARGET_SPARC
8631             target_ulong restorer = arg4;
8632             target_ulong sigsetsize = arg5;
8633 #else
8634             target_ulong sigsetsize = arg4;
8635 #endif
8636             struct target_sigaction *act;
8637             struct target_sigaction *oact;
8638 
8639             if (sigsetsize != sizeof(target_sigset_t)) {
8640                 return -TARGET_EINVAL;
8641             }
8642             if (arg2) {
8643                 if (!lock_user_struct(VERIFY_READ, act, arg2, 1)) {
8644                     return -TARGET_EFAULT;
8645                 }
8646 #ifdef TARGET_ARCH_HAS_KA_RESTORER
8647                 act->ka_restorer = restorer;
8648 #endif
8649             } else {
8650                 act = NULL;
8651             }
8652             if (arg3) {
8653                 if (!lock_user_struct(VERIFY_WRITE, oact, arg3, 0)) {
8654                     ret = -TARGET_EFAULT;
8655                     goto rt_sigaction_fail;
8656                 }
8657             } else
8658                 oact = NULL;
8659             ret = get_errno(do_sigaction(arg1, act, oact));
8660 	rt_sigaction_fail:
8661             if (act)
8662                 unlock_user_struct(act, arg2, 0);
8663             if (oact)
8664                 unlock_user_struct(oact, arg3, 1);
8665 #endif
8666         }
8667         return ret;
8668 #ifdef TARGET_NR_sgetmask /* not on alpha */
8669     case TARGET_NR_sgetmask:
8670         {
8671             sigset_t cur_set;
8672             abi_ulong target_set;
8673             ret = do_sigprocmask(0, NULL, &cur_set);
8674             if (!ret) {
8675                 host_to_target_old_sigset(&target_set, &cur_set);
8676                 ret = target_set;
8677             }
8678         }
8679         return ret;
8680 #endif
8681 #ifdef TARGET_NR_ssetmask /* not on alpha */
8682     case TARGET_NR_ssetmask:
8683         {
8684             sigset_t set, oset;
8685             abi_ulong target_set = arg1;
8686             target_to_host_old_sigset(&set, &target_set);
8687             ret = do_sigprocmask(SIG_SETMASK, &set, &oset);
8688             if (!ret) {
8689                 host_to_target_old_sigset(&target_set, &oset);
8690                 ret = target_set;
8691             }
8692         }
8693         return ret;
8694 #endif
8695 #ifdef TARGET_NR_sigprocmask
8696     case TARGET_NR_sigprocmask:
8697         {
8698 #if defined(TARGET_ALPHA)
8699             sigset_t set, oldset;
8700             abi_ulong mask;
8701             int how;
8702 
8703             switch (arg1) {
8704             case TARGET_SIG_BLOCK:
8705                 how = SIG_BLOCK;
8706                 break;
8707             case TARGET_SIG_UNBLOCK:
8708                 how = SIG_UNBLOCK;
8709                 break;
8710             case TARGET_SIG_SETMASK:
8711                 how = SIG_SETMASK;
8712                 break;
8713             default:
8714                 return -TARGET_EINVAL;
8715             }
8716             mask = arg2;
8717             target_to_host_old_sigset(&set, &mask);
8718 
8719             ret = do_sigprocmask(how, &set, &oldset);
8720             if (!is_error(ret)) {
8721                 host_to_target_old_sigset(&mask, &oldset);
8722                 ret = mask;
8723                 ((CPUAlphaState *)cpu_env)->ir[IR_V0] = 0; /* force no error */
8724             }
8725 #else
8726             sigset_t set, oldset, *set_ptr;
8727             int how;
8728 
8729             if (arg2) {
8730                 switch (arg1) {
8731                 case TARGET_SIG_BLOCK:
8732                     how = SIG_BLOCK;
8733                     break;
8734                 case TARGET_SIG_UNBLOCK:
8735                     how = SIG_UNBLOCK;
8736                     break;
8737                 case TARGET_SIG_SETMASK:
8738                     how = SIG_SETMASK;
8739                     break;
8740                 default:
8741                     return -TARGET_EINVAL;
8742                 }
8743                 if (!(p = lock_user(VERIFY_READ, arg2, sizeof(target_sigset_t), 1)))
8744                     return -TARGET_EFAULT;
8745                 target_to_host_old_sigset(&set, p);
8746                 unlock_user(p, arg2, 0);
8747                 set_ptr = &set;
8748             } else {
8749                 how = 0;
8750                 set_ptr = NULL;
8751             }
8752             ret = do_sigprocmask(how, set_ptr, &oldset);
8753             if (!is_error(ret) && arg3) {
8754                 if (!(p = lock_user(VERIFY_WRITE, arg3, sizeof(target_sigset_t), 0)))
8755                     return -TARGET_EFAULT;
8756                 host_to_target_old_sigset(p, &oldset);
8757                 unlock_user(p, arg3, sizeof(target_sigset_t));
8758             }
8759 #endif
8760         }
8761         return ret;
8762 #endif
8763     case TARGET_NR_rt_sigprocmask:
8764         {
8765             int how = arg1;
8766             sigset_t set, oldset, *set_ptr;
8767 
8768             if (arg4 != sizeof(target_sigset_t)) {
8769                 return -TARGET_EINVAL;
8770             }
8771 
8772             if (arg2) {
8773                 switch(how) {
8774                 case TARGET_SIG_BLOCK:
8775                     how = SIG_BLOCK;
8776                     break;
8777                 case TARGET_SIG_UNBLOCK:
8778                     how = SIG_UNBLOCK;
8779                     break;
8780                 case TARGET_SIG_SETMASK:
8781                     how = SIG_SETMASK;
8782                     break;
8783                 default:
8784                     return -TARGET_EINVAL;
8785                 }
8786                 if (!(p = lock_user(VERIFY_READ, arg2, sizeof(target_sigset_t), 1)))
8787                     return -TARGET_EFAULT;
8788                 target_to_host_sigset(&set, p);
8789                 unlock_user(p, arg2, 0);
8790                 set_ptr = &set;
8791             } else {
8792                 how = 0;
8793                 set_ptr = NULL;
8794             }
8795             ret = do_sigprocmask(how, set_ptr, &oldset);
8796             if (!is_error(ret) && arg3) {
8797                 if (!(p = lock_user(VERIFY_WRITE, arg3, sizeof(target_sigset_t), 0)))
8798                     return -TARGET_EFAULT;
8799                 host_to_target_sigset(p, &oldset);
8800                 unlock_user(p, arg3, sizeof(target_sigset_t));
8801             }
8802         }
8803         return ret;
8804 #ifdef TARGET_NR_sigpending
8805     case TARGET_NR_sigpending:
8806         {
8807             sigset_t set;
8808             ret = get_errno(sigpending(&set));
8809             if (!is_error(ret)) {
8810                 if (!(p = lock_user(VERIFY_WRITE, arg1, sizeof(target_sigset_t), 0)))
8811                     return -TARGET_EFAULT;
8812                 host_to_target_old_sigset(p, &set);
8813                 unlock_user(p, arg1, sizeof(target_sigset_t));
8814             }
8815         }
8816         return ret;
8817 #endif
8818     case TARGET_NR_rt_sigpending:
8819         {
8820             sigset_t set;
8821 
8822             /* Yes, this check is >, not != like most. We follow the kernel's
8823              * logic and it does it like this because it implements
8824              * NR_sigpending through the same code path, and in that case
8825              * the old_sigset_t is smaller in size.
8826              */
8827             if (arg2 > sizeof(target_sigset_t)) {
8828                 return -TARGET_EINVAL;
8829             }
8830 
8831             ret = get_errno(sigpending(&set));
8832             if (!is_error(ret)) {
8833                 if (!(p = lock_user(VERIFY_WRITE, arg1, sizeof(target_sigset_t), 0)))
8834                     return -TARGET_EFAULT;
8835                 host_to_target_sigset(p, &set);
8836                 unlock_user(p, arg1, sizeof(target_sigset_t));
8837             }
8838         }
8839         return ret;
8840 #ifdef TARGET_NR_sigsuspend
8841     case TARGET_NR_sigsuspend:
8842         {
8843             TaskState *ts = cpu->opaque;
8844 #if defined(TARGET_ALPHA)
8845             abi_ulong mask = arg1;
8846             target_to_host_old_sigset(&ts->sigsuspend_mask, &mask);
8847 #else
8848             if (!(p = lock_user(VERIFY_READ, arg1, sizeof(target_sigset_t), 1)))
8849                 return -TARGET_EFAULT;
8850             target_to_host_old_sigset(&ts->sigsuspend_mask, p);
8851             unlock_user(p, arg1, 0);
8852 #endif
8853             ret = get_errno(safe_rt_sigsuspend(&ts->sigsuspend_mask,
8854                                                SIGSET_T_SIZE));
8855             if (ret != -TARGET_ERESTARTSYS) {
8856                 ts->in_sigsuspend = 1;
8857             }
8858         }
8859         return ret;
8860 #endif
8861     case TARGET_NR_rt_sigsuspend:
8862         {
8863             TaskState *ts = cpu->opaque;
8864 
8865             if (arg2 != sizeof(target_sigset_t)) {
8866                 return -TARGET_EINVAL;
8867             }
8868             if (!(p = lock_user(VERIFY_READ, arg1, sizeof(target_sigset_t), 1)))
8869                 return -TARGET_EFAULT;
8870             target_to_host_sigset(&ts->sigsuspend_mask, p);
8871             unlock_user(p, arg1, 0);
8872             ret = get_errno(safe_rt_sigsuspend(&ts->sigsuspend_mask,
8873                                                SIGSET_T_SIZE));
8874             if (ret != -TARGET_ERESTARTSYS) {
8875                 ts->in_sigsuspend = 1;
8876             }
8877         }
8878         return ret;
8879 #ifdef TARGET_NR_rt_sigtimedwait
8880     case TARGET_NR_rt_sigtimedwait:
8881         {
8882             sigset_t set;
8883             struct timespec uts, *puts;
8884             siginfo_t uinfo;
8885 
8886             if (arg4 != sizeof(target_sigset_t)) {
8887                 return -TARGET_EINVAL;
8888             }
8889 
8890             if (!(p = lock_user(VERIFY_READ, arg1, sizeof(target_sigset_t), 1)))
8891                 return -TARGET_EFAULT;
8892             target_to_host_sigset(&set, p);
8893             unlock_user(p, arg1, 0);
8894             if (arg3) {
8895                 puts = &uts;
8896                 if (target_to_host_timespec(puts, arg3)) {
8897                     return -TARGET_EFAULT;
8898                 }
8899             } else {
8900                 puts = NULL;
8901             }
8902             ret = get_errno(safe_rt_sigtimedwait(&set, &uinfo, puts,
8903                                                  SIGSET_T_SIZE));
8904             if (!is_error(ret)) {
8905                 if (arg2) {
8906                     p = lock_user(VERIFY_WRITE, arg2, sizeof(target_siginfo_t),
8907                                   0);
8908                     if (!p) {
8909                         return -TARGET_EFAULT;
8910                     }
8911                     host_to_target_siginfo(p, &uinfo);
8912                     unlock_user(p, arg2, sizeof(target_siginfo_t));
8913                 }
8914                 ret = host_to_target_signal(ret);
8915             }
8916         }
8917         return ret;
8918 #endif
8919     case TARGET_NR_rt_sigqueueinfo:
8920         {
8921             siginfo_t uinfo;
8922 
8923             p = lock_user(VERIFY_READ, arg3, sizeof(target_siginfo_t), 1);
8924             if (!p) {
8925                 return -TARGET_EFAULT;
8926             }
8927             target_to_host_siginfo(&uinfo, p);
8928             unlock_user(p, arg3, 0);
8929             ret = get_errno(sys_rt_sigqueueinfo(arg1, arg2, &uinfo));
8930         }
8931         return ret;
8932     case TARGET_NR_rt_tgsigqueueinfo:
8933         {
8934             siginfo_t uinfo;
8935 
8936             p = lock_user(VERIFY_READ, arg4, sizeof(target_siginfo_t), 1);
8937             if (!p) {
8938                 return -TARGET_EFAULT;
8939             }
8940             target_to_host_siginfo(&uinfo, p);
8941             unlock_user(p, arg4, 0);
8942             ret = get_errno(sys_rt_tgsigqueueinfo(arg1, arg2, arg3, &uinfo));
8943         }
8944         return ret;
8945 #ifdef TARGET_NR_sigreturn
8946     case TARGET_NR_sigreturn:
8947         if (block_signals()) {
8948             return -TARGET_ERESTARTSYS;
8949         }
8950         return do_sigreturn(cpu_env);
8951 #endif
8952     case TARGET_NR_rt_sigreturn:
8953         if (block_signals()) {
8954             return -TARGET_ERESTARTSYS;
8955         }
8956         return do_rt_sigreturn(cpu_env);
8957     case TARGET_NR_sethostname:
8958         if (!(p = lock_user_string(arg1)))
8959             return -TARGET_EFAULT;
8960         ret = get_errno(sethostname(p, arg2));
8961         unlock_user(p, arg1, 0);
8962         return ret;
8963 #ifdef TARGET_NR_setrlimit
8964     case TARGET_NR_setrlimit:
8965         {
8966             int resource = target_to_host_resource(arg1);
8967             struct target_rlimit *target_rlim;
8968             struct rlimit rlim;
8969             if (!lock_user_struct(VERIFY_READ, target_rlim, arg2, 1))
8970                 return -TARGET_EFAULT;
8971             rlim.rlim_cur = target_to_host_rlim(target_rlim->rlim_cur);
8972             rlim.rlim_max = target_to_host_rlim(target_rlim->rlim_max);
8973             unlock_user_struct(target_rlim, arg2, 0);
8974             /*
8975              * If we just passed through resource limit settings for memory then
8976              * they would also apply to QEMU's own allocations, and QEMU will
8977              * crash or hang or die if its allocations fail. Ideally we would
8978              * track the guest allocations in QEMU and apply the limits ourselves.
8979              * For now, just tell the guest the call succeeded but don't actually
8980              * limit anything.
8981              */
8982             if (resource != RLIMIT_AS &&
8983                 resource != RLIMIT_DATA &&
8984                 resource != RLIMIT_STACK) {
8985                 return get_errno(setrlimit(resource, &rlim));
8986             } else {
8987                 return 0;
8988             }
8989         }
8990 #endif
8991 #ifdef TARGET_NR_getrlimit
8992     case TARGET_NR_getrlimit:
8993         {
8994             int resource = target_to_host_resource(arg1);
8995             struct target_rlimit *target_rlim;
8996             struct rlimit rlim;
8997 
8998             ret = get_errno(getrlimit(resource, &rlim));
8999             if (!is_error(ret)) {
9000                 if (!lock_user_struct(VERIFY_WRITE, target_rlim, arg2, 0))
9001                     return -TARGET_EFAULT;
9002                 target_rlim->rlim_cur = host_to_target_rlim(rlim.rlim_cur);
9003                 target_rlim->rlim_max = host_to_target_rlim(rlim.rlim_max);
9004                 unlock_user_struct(target_rlim, arg2, 1);
9005             }
9006         }
9007         return ret;
9008 #endif
9009     case TARGET_NR_getrusage:
9010         {
9011             struct rusage rusage;
9012             ret = get_errno(getrusage(arg1, &rusage));
9013             if (!is_error(ret)) {
9014                 ret = host_to_target_rusage(arg2, &rusage);
9015             }
9016         }
9017         return ret;
9018 #if defined(TARGET_NR_gettimeofday)
9019     case TARGET_NR_gettimeofday:
9020         {
9021             struct timeval tv;
9022             struct timezone tz;
9023 
9024             ret = get_errno(gettimeofday(&tv, &tz));
9025             if (!is_error(ret)) {
9026                 if (arg1 && copy_to_user_timeval(arg1, &tv)) {
9027                     return -TARGET_EFAULT;
9028                 }
9029                 if (arg2 && copy_to_user_timezone(arg2, &tz)) {
9030                     return -TARGET_EFAULT;
9031                 }
9032             }
9033         }
9034         return ret;
9035 #endif
9036 #if defined(TARGET_NR_settimeofday)
9037     case TARGET_NR_settimeofday:
9038         {
9039             struct timeval tv, *ptv = NULL;
9040             struct timezone tz, *ptz = NULL;
9041 
9042             if (arg1) {
9043                 if (copy_from_user_timeval(&tv, arg1)) {
9044                     return -TARGET_EFAULT;
9045                 }
9046                 ptv = &tv;
9047             }
9048 
9049             if (arg2) {
9050                 if (copy_from_user_timezone(&tz, arg2)) {
9051                     return -TARGET_EFAULT;
9052                 }
9053                 ptz = &tz;
9054             }
9055 
9056             return get_errno(settimeofday(ptv, ptz));
9057         }
9058 #endif
9059 #if defined(TARGET_NR_select)
9060     case TARGET_NR_select:
9061 #if defined(TARGET_WANT_NI_OLD_SELECT)
9062         /* some architectures used to have old_select here
9063          * but now ENOSYS it.
9064          */
9065         ret = -TARGET_ENOSYS;
9066 #elif defined(TARGET_WANT_OLD_SYS_SELECT)
9067         ret = do_old_select(arg1);
9068 #else
9069         ret = do_select(arg1, arg2, arg3, arg4, arg5);
9070 #endif
9071         return ret;
9072 #endif
9073 #ifdef TARGET_NR_pselect6
9074     case TARGET_NR_pselect6:
9075         {
9076             abi_long rfd_addr, wfd_addr, efd_addr, n, ts_addr;
9077             fd_set rfds, wfds, efds;
9078             fd_set *rfds_ptr, *wfds_ptr, *efds_ptr;
9079             struct timespec ts, *ts_ptr;
9080 
9081             /*
9082              * The 6th arg is actually two args smashed together,
9083              * so we cannot use the C library.
9084              */
9085             sigset_t set;
9086             struct {
9087                 sigset_t *set;
9088                 size_t size;
9089             } sig, *sig_ptr;
9090 
9091             abi_ulong arg_sigset, arg_sigsize, *arg7;
9092             target_sigset_t *target_sigset;
9093 
9094             n = arg1;
9095             rfd_addr = arg2;
9096             wfd_addr = arg3;
9097             efd_addr = arg4;
9098             ts_addr = arg5;
9099 
9100             ret = copy_from_user_fdset_ptr(&rfds, &rfds_ptr, rfd_addr, n);
9101             if (ret) {
9102                 return ret;
9103             }
9104             ret = copy_from_user_fdset_ptr(&wfds, &wfds_ptr, wfd_addr, n);
9105             if (ret) {
9106                 return ret;
9107             }
9108             ret = copy_from_user_fdset_ptr(&efds, &efds_ptr, efd_addr, n);
9109             if (ret) {
9110                 return ret;
9111             }
9112 
9113             /*
9114              * This takes a timespec, and not a timeval, so we cannot
9115              * use the do_select() helper ...
9116              */
9117             if (ts_addr) {
9118                 if (target_to_host_timespec(&ts, ts_addr)) {
9119                     return -TARGET_EFAULT;
9120                 }
9121                 ts_ptr = &ts;
9122             } else {
9123                 ts_ptr = NULL;
9124             }
9125 
9126             /* Extract the two packed args for the sigset */
9127             if (arg6) {
9128                 sig_ptr = &sig;
9129                 sig.size = SIGSET_T_SIZE;
9130 
9131                 arg7 = lock_user(VERIFY_READ, arg6, sizeof(*arg7) * 2, 1);
9132                 if (!arg7) {
9133                     return -TARGET_EFAULT;
9134                 }
9135                 arg_sigset = tswapal(arg7[0]);
9136                 arg_sigsize = tswapal(arg7[1]);
9137                 unlock_user(arg7, arg6, 0);
9138 
9139                 if (arg_sigset) {
9140                     sig.set = &set;
9141                     if (arg_sigsize != sizeof(*target_sigset)) {
9142                         /* Like the kernel, we enforce correct size sigsets */
9143                         return -TARGET_EINVAL;
9144                     }
9145                     target_sigset = lock_user(VERIFY_READ, arg_sigset,
9146                                               sizeof(*target_sigset), 1);
9147                     if (!target_sigset) {
9148                         return -TARGET_EFAULT;
9149                     }
9150                     target_to_host_sigset(&set, target_sigset);
9151                     unlock_user(target_sigset, arg_sigset, 0);
9152                 } else {
9153                     sig.set = NULL;
9154                 }
9155             } else {
9156                 sig_ptr = NULL;
9157             }
9158 
9159             ret = get_errno(safe_pselect6(n, rfds_ptr, wfds_ptr, efds_ptr,
9160                                           ts_ptr, sig_ptr));
9161 
9162             if (!is_error(ret)) {
9163                 if (rfd_addr && copy_to_user_fdset(rfd_addr, &rfds, n))
9164                     return -TARGET_EFAULT;
9165                 if (wfd_addr && copy_to_user_fdset(wfd_addr, &wfds, n))
9166                     return -TARGET_EFAULT;
9167                 if (efd_addr && copy_to_user_fdset(efd_addr, &efds, n))
9168                     return -TARGET_EFAULT;
9169 
9170                 if (ts_addr && host_to_target_timespec(ts_addr, &ts))
9171                     return -TARGET_EFAULT;
9172             }
9173         }
9174         return ret;
9175 #endif
9176 #ifdef TARGET_NR_symlink
9177     case TARGET_NR_symlink:
9178         {
9179             void *p2;
9180             p = lock_user_string(arg1);
9181             p2 = lock_user_string(arg2);
9182             if (!p || !p2)
9183                 ret = -TARGET_EFAULT;
9184             else
9185                 ret = get_errno(symlink(p, p2));
9186             unlock_user(p2, arg2, 0);
9187             unlock_user(p, arg1, 0);
9188         }
9189         return ret;
9190 #endif
9191 #if defined(TARGET_NR_symlinkat)
9192     case TARGET_NR_symlinkat:
9193         {
9194             void *p2;
9195             p  = lock_user_string(arg1);
9196             p2 = lock_user_string(arg3);
9197             if (!p || !p2)
9198                 ret = -TARGET_EFAULT;
9199             else
9200                 ret = get_errno(symlinkat(p, arg2, p2));
9201             unlock_user(p2, arg3, 0);
9202             unlock_user(p, arg1, 0);
9203         }
9204         return ret;
9205 #endif
9206 #ifdef TARGET_NR_readlink
9207     case TARGET_NR_readlink:
9208         {
9209             void *p2;
9210             p = lock_user_string(arg1);
9211             p2 = lock_user(VERIFY_WRITE, arg2, arg3, 0);
9212             if (!p || !p2) {
9213                 ret = -TARGET_EFAULT;
9214             } else if (!arg3) {
9215                 /* Short circuit this for the magic exe check. */
9216                 ret = -TARGET_EINVAL;
9217             } else if (is_proc_myself((const char *)p, "exe")) {
9218                 char real[PATH_MAX], *temp;
9219                 temp = realpath(exec_path, real);
9220                 /* Return value is # of bytes that we wrote to the buffer. */
9221                 if (temp == NULL) {
9222                     ret = get_errno(-1);
9223                 } else {
9224                     /* Don't worry about sign mismatch as earlier mapping
9225                      * logic would have thrown a bad address error. */
9226                     ret = MIN(strlen(real), arg3);
9227                     /* We cannot NUL terminate the string. */
9228                     memcpy(p2, real, ret);
9229                 }
9230             } else {
9231                 ret = get_errno(readlink(path(p), p2, arg3));
9232             }
9233             unlock_user(p2, arg2, ret);
9234             unlock_user(p, arg1, 0);
9235         }
9236         return ret;
9237 #endif
9238 #if defined(TARGET_NR_readlinkat)
9239     case TARGET_NR_readlinkat:
9240         {
9241             void *p2;
9242             p  = lock_user_string(arg2);
9243             p2 = lock_user(VERIFY_WRITE, arg3, arg4, 0);
9244             if (!p || !p2) {
9245                 ret = -TARGET_EFAULT;
9246             } else if (is_proc_myself((const char *)p, "exe")) {
9247                 char real[PATH_MAX], *temp;
9248                 temp = realpath(exec_path, real);
9249                 ret = temp == NULL ? get_errno(-1) : strlen(real) ;
9250                 snprintf((char *)p2, arg4, "%s", real);
9251             } else {
9252                 ret = get_errno(readlinkat(arg1, path(p), p2, arg4));
9253             }
9254             unlock_user(p2, arg3, ret);
9255             unlock_user(p, arg2, 0);
9256         }
9257         return ret;
9258 #endif
9259 #ifdef TARGET_NR_swapon
9260     case TARGET_NR_swapon:
9261         if (!(p = lock_user_string(arg1)))
9262             return -TARGET_EFAULT;
9263         ret = get_errno(swapon(p, arg2));
9264         unlock_user(p, arg1, 0);
9265         return ret;
9266 #endif
9267     case TARGET_NR_reboot:
9268         if (arg3 == LINUX_REBOOT_CMD_RESTART2) {
9269            /* arg4 must be ignored in all other cases */
9270            p = lock_user_string(arg4);
9271            if (!p) {
9272                return -TARGET_EFAULT;
9273            }
9274            ret = get_errno(reboot(arg1, arg2, arg3, p));
9275            unlock_user(p, arg4, 0);
9276         } else {
9277            ret = get_errno(reboot(arg1, arg2, arg3, NULL));
9278         }
9279         return ret;
9280 #ifdef TARGET_NR_mmap
9281     case TARGET_NR_mmap:
9282 #if (defined(TARGET_I386) && defined(TARGET_ABI32)) || \
9283     (defined(TARGET_ARM) && defined(TARGET_ABI32)) || \
9284     defined(TARGET_M68K) || defined(TARGET_CRIS) || defined(TARGET_MICROBLAZE) \
9285     || defined(TARGET_S390X)
9286         {
9287             abi_ulong *v;
9288             abi_ulong v1, v2, v3, v4, v5, v6;
9289             if (!(v = lock_user(VERIFY_READ, arg1, 6 * sizeof(abi_ulong), 1)))
9290                 return -TARGET_EFAULT;
9291             v1 = tswapal(v[0]);
9292             v2 = tswapal(v[1]);
9293             v3 = tswapal(v[2]);
9294             v4 = tswapal(v[3]);
9295             v5 = tswapal(v[4]);
9296             v6 = tswapal(v[5]);
9297             unlock_user(v, arg1, 0);
9298             ret = get_errno(target_mmap(v1, v2, v3,
9299                                         target_to_host_bitmask(v4, mmap_flags_tbl),
9300                                         v5, v6));
9301         }
9302 #else
9303         ret = get_errno(target_mmap(arg1, arg2, arg3,
9304                                     target_to_host_bitmask(arg4, mmap_flags_tbl),
9305                                     arg5,
9306                                     arg6));
9307 #endif
9308         return ret;
9309 #endif
9310 #ifdef TARGET_NR_mmap2
9311     case TARGET_NR_mmap2:
9312 #ifndef MMAP_SHIFT
9313 #define MMAP_SHIFT 12
9314 #endif
9315         ret = target_mmap(arg1, arg2, arg3,
9316                           target_to_host_bitmask(arg4, mmap_flags_tbl),
9317                           arg5, arg6 << MMAP_SHIFT);
9318         return get_errno(ret);
9319 #endif
9320     case TARGET_NR_munmap:
9321         return get_errno(target_munmap(arg1, arg2));
9322     case TARGET_NR_mprotect:
9323         {
9324             TaskState *ts = cpu->opaque;
9325             /* Special hack to detect libc making the stack executable.  */
9326             if ((arg3 & PROT_GROWSDOWN)
9327                 && arg1 >= ts->info->stack_limit
9328                 && arg1 <= ts->info->start_stack) {
9329                 arg3 &= ~PROT_GROWSDOWN;
9330                 arg2 = arg2 + arg1 - ts->info->stack_limit;
9331                 arg1 = ts->info->stack_limit;
9332             }
9333         }
9334         return get_errno(target_mprotect(arg1, arg2, arg3));
9335 #ifdef TARGET_NR_mremap
9336     case TARGET_NR_mremap:
9337         return get_errno(target_mremap(arg1, arg2, arg3, arg4, arg5));
9338 #endif
9339         /* ??? msync/mlock/munlock are broken for softmmu.  */
9340 #ifdef TARGET_NR_msync
9341     case TARGET_NR_msync:
9342         return get_errno(msync(g2h(arg1), arg2, arg3));
9343 #endif
9344 #ifdef TARGET_NR_mlock
9345     case TARGET_NR_mlock:
9346         return get_errno(mlock(g2h(arg1), arg2));
9347 #endif
9348 #ifdef TARGET_NR_munlock
9349     case TARGET_NR_munlock:
9350         return get_errno(munlock(g2h(arg1), arg2));
9351 #endif
9352 #ifdef TARGET_NR_mlockall
9353     case TARGET_NR_mlockall:
9354         return get_errno(mlockall(target_to_host_mlockall_arg(arg1)));
9355 #endif
9356 #ifdef TARGET_NR_munlockall
9357     case TARGET_NR_munlockall:
9358         return get_errno(munlockall());
9359 #endif
9360 #ifdef TARGET_NR_truncate
9361     case TARGET_NR_truncate:
9362         if (!(p = lock_user_string(arg1)))
9363             return -TARGET_EFAULT;
9364         ret = get_errno(truncate(p, arg2));
9365         unlock_user(p, arg1, 0);
9366         return ret;
9367 #endif
9368 #ifdef TARGET_NR_ftruncate
9369     case TARGET_NR_ftruncate:
9370         return get_errno(ftruncate(arg1, arg2));
9371 #endif
9372     case TARGET_NR_fchmod:
9373         return get_errno(fchmod(arg1, arg2));
9374 #if defined(TARGET_NR_fchmodat)
9375     case TARGET_NR_fchmodat:
9376         if (!(p = lock_user_string(arg2)))
9377             return -TARGET_EFAULT;
9378         ret = get_errno(fchmodat(arg1, p, arg3, 0));
9379         unlock_user(p, arg2, 0);
9380         return ret;
9381 #endif
9382     case TARGET_NR_getpriority:
9383         /* Note that negative values are valid for getpriority, so we must
9384            differentiate based on errno settings.  */
9385         errno = 0;
9386         ret = getpriority(arg1, arg2);
9387         if (ret == -1 && errno != 0) {
9388             return -host_to_target_errno(errno);
9389         }
9390 #ifdef TARGET_ALPHA
9391         /* Return value is the unbiased priority.  Signal no error.  */
9392         ((CPUAlphaState *)cpu_env)->ir[IR_V0] = 0;
9393 #else
9394         /* Return value is a biased priority to avoid negative numbers.  */
9395         ret = 20 - ret;
9396 #endif
9397         return ret;
9398     case TARGET_NR_setpriority:
9399         return get_errno(setpriority(arg1, arg2, arg3));
9400 #ifdef TARGET_NR_statfs
9401     case TARGET_NR_statfs:
9402         if (!(p = lock_user_string(arg1))) {
9403             return -TARGET_EFAULT;
9404         }
9405         ret = get_errno(statfs(path(p), &stfs));
9406         unlock_user(p, arg1, 0);
9407     convert_statfs:
9408         if (!is_error(ret)) {
9409             struct target_statfs *target_stfs;
9410 
9411             if (!lock_user_struct(VERIFY_WRITE, target_stfs, arg2, 0))
9412                 return -TARGET_EFAULT;
9413             __put_user(stfs.f_type, &target_stfs->f_type);
9414             __put_user(stfs.f_bsize, &target_stfs->f_bsize);
9415             __put_user(stfs.f_blocks, &target_stfs->f_blocks);
9416             __put_user(stfs.f_bfree, &target_stfs->f_bfree);
9417             __put_user(stfs.f_bavail, &target_stfs->f_bavail);
9418             __put_user(stfs.f_files, &target_stfs->f_files);
9419             __put_user(stfs.f_ffree, &target_stfs->f_ffree);
9420             __put_user(stfs.f_fsid.__val[0], &target_stfs->f_fsid.val[0]);
9421             __put_user(stfs.f_fsid.__val[1], &target_stfs->f_fsid.val[1]);
9422             __put_user(stfs.f_namelen, &target_stfs->f_namelen);
9423             __put_user(stfs.f_frsize, &target_stfs->f_frsize);
9424 #ifdef _STATFS_F_FLAGS
9425             __put_user(stfs.f_flags, &target_stfs->f_flags);
9426 #else
9427             __put_user(0, &target_stfs->f_flags);
9428 #endif
9429             memset(target_stfs->f_spare, 0, sizeof(target_stfs->f_spare));
9430             unlock_user_struct(target_stfs, arg2, 1);
9431         }
9432         return ret;
9433 #endif
9434 #ifdef TARGET_NR_fstatfs
9435     case TARGET_NR_fstatfs:
9436         ret = get_errno(fstatfs(arg1, &stfs));
9437         goto convert_statfs;
9438 #endif
9439 #ifdef TARGET_NR_statfs64
9440     case TARGET_NR_statfs64:
9441         if (!(p = lock_user_string(arg1))) {
9442             return -TARGET_EFAULT;
9443         }
9444         ret = get_errno(statfs(path(p), &stfs));
9445         unlock_user(p, arg1, 0);
9446     convert_statfs64:
9447         if (!is_error(ret)) {
9448             struct target_statfs64 *target_stfs;
9449 
9450             if (!lock_user_struct(VERIFY_WRITE, target_stfs, arg3, 0))
9451                 return -TARGET_EFAULT;
9452             __put_user(stfs.f_type, &target_stfs->f_type);
9453             __put_user(stfs.f_bsize, &target_stfs->f_bsize);
9454             __put_user(stfs.f_blocks, &target_stfs->f_blocks);
9455             __put_user(stfs.f_bfree, &target_stfs->f_bfree);
9456             __put_user(stfs.f_bavail, &target_stfs->f_bavail);
9457             __put_user(stfs.f_files, &target_stfs->f_files);
9458             __put_user(stfs.f_ffree, &target_stfs->f_ffree);
9459             __put_user(stfs.f_fsid.__val[0], &target_stfs->f_fsid.val[0]);
9460             __put_user(stfs.f_fsid.__val[1], &target_stfs->f_fsid.val[1]);
9461             __put_user(stfs.f_namelen, &target_stfs->f_namelen);
9462             __put_user(stfs.f_frsize, &target_stfs->f_frsize);
9463             memset(target_stfs->f_spare, 0, sizeof(target_stfs->f_spare));
9464             unlock_user_struct(target_stfs, arg3, 1);
9465         }
9466         return ret;
9467     case TARGET_NR_fstatfs64:
9468         ret = get_errno(fstatfs(arg1, &stfs));
9469         goto convert_statfs64;
9470 #endif
9471 #ifdef TARGET_NR_socketcall
9472     case TARGET_NR_socketcall:
9473         return do_socketcall(arg1, arg2);
9474 #endif
9475 #ifdef TARGET_NR_accept
9476     case TARGET_NR_accept:
9477         return do_accept4(arg1, arg2, arg3, 0);
9478 #endif
9479 #ifdef TARGET_NR_accept4
9480     case TARGET_NR_accept4:
9481         return do_accept4(arg1, arg2, arg3, arg4);
9482 #endif
9483 #ifdef TARGET_NR_bind
9484     case TARGET_NR_bind:
9485         return do_bind(arg1, arg2, arg3);
9486 #endif
9487 #ifdef TARGET_NR_connect
9488     case TARGET_NR_connect:
9489         return do_connect(arg1, arg2, arg3);
9490 #endif
9491 #ifdef TARGET_NR_getpeername
9492     case TARGET_NR_getpeername:
9493         return do_getpeername(arg1, arg2, arg3);
9494 #endif
9495 #ifdef TARGET_NR_getsockname
9496     case TARGET_NR_getsockname:
9497         return do_getsockname(arg1, arg2, arg3);
9498 #endif
9499 #ifdef TARGET_NR_getsockopt
9500     case TARGET_NR_getsockopt:
9501         return do_getsockopt(arg1, arg2, arg3, arg4, arg5);
9502 #endif
9503 #ifdef TARGET_NR_listen
9504     case TARGET_NR_listen:
9505         return get_errno(listen(arg1, arg2));
9506 #endif
9507 #ifdef TARGET_NR_recv
9508     case TARGET_NR_recv:
9509         return do_recvfrom(arg1, arg2, arg3, arg4, 0, 0);
9510 #endif
9511 #ifdef TARGET_NR_recvfrom
9512     case TARGET_NR_recvfrom:
9513         return do_recvfrom(arg1, arg2, arg3, arg4, arg5, arg6);
9514 #endif
9515 #ifdef TARGET_NR_recvmsg
9516     case TARGET_NR_recvmsg:
9517         return do_sendrecvmsg(arg1, arg2, arg3, 0);
9518 #endif
9519 #ifdef TARGET_NR_send
9520     case TARGET_NR_send:
9521         return do_sendto(arg1, arg2, arg3, arg4, 0, 0);
9522 #endif
9523 #ifdef TARGET_NR_sendmsg
9524     case TARGET_NR_sendmsg:
9525         return do_sendrecvmsg(arg1, arg2, arg3, 1);
9526 #endif
9527 #ifdef TARGET_NR_sendmmsg
9528     case TARGET_NR_sendmmsg:
9529         return do_sendrecvmmsg(arg1, arg2, arg3, arg4, 1);
9530 #endif
9531 #ifdef TARGET_NR_recvmmsg
9532     case TARGET_NR_recvmmsg:
9533         return do_sendrecvmmsg(arg1, arg2, arg3, arg4, 0);
9534 #endif
9535 #ifdef TARGET_NR_sendto
9536     case TARGET_NR_sendto:
9537         return do_sendto(arg1, arg2, arg3, arg4, arg5, arg6);
9538 #endif
9539 #ifdef TARGET_NR_shutdown
9540     case TARGET_NR_shutdown:
9541         return get_errno(shutdown(arg1, arg2));
9542 #endif
9543 #if defined(TARGET_NR_getrandom) && defined(__NR_getrandom)
9544     case TARGET_NR_getrandom:
9545         p = lock_user(VERIFY_WRITE, arg1, arg2, 0);
9546         if (!p) {
9547             return -TARGET_EFAULT;
9548         }
9549         ret = get_errno(getrandom(p, arg2, arg3));
9550         unlock_user(p, arg1, ret);
9551         return ret;
9552 #endif
9553 #ifdef TARGET_NR_socket
9554     case TARGET_NR_socket:
9555         return do_socket(arg1, arg2, arg3);
9556 #endif
9557 #ifdef TARGET_NR_socketpair
9558     case TARGET_NR_socketpair:
9559         return do_socketpair(arg1, arg2, arg3, arg4);
9560 #endif
9561 #ifdef TARGET_NR_setsockopt
9562     case TARGET_NR_setsockopt:
9563         return do_setsockopt(arg1, arg2, arg3, arg4, (socklen_t) arg5);
9564 #endif
9565 #if defined(TARGET_NR_syslog)
9566     case TARGET_NR_syslog:
9567         {
9568             int len = arg2;
9569 
9570             switch (arg1) {
9571             case TARGET_SYSLOG_ACTION_CLOSE:         /* Close log */
9572             case TARGET_SYSLOG_ACTION_OPEN:          /* Open log */
9573             case TARGET_SYSLOG_ACTION_CLEAR:         /* Clear ring buffer */
9574             case TARGET_SYSLOG_ACTION_CONSOLE_OFF:   /* Disable logging */
9575             case TARGET_SYSLOG_ACTION_CONSOLE_ON:    /* Enable logging */
9576             case TARGET_SYSLOG_ACTION_CONSOLE_LEVEL: /* Set messages level */
9577             case TARGET_SYSLOG_ACTION_SIZE_UNREAD:   /* Number of chars */
9578             case TARGET_SYSLOG_ACTION_SIZE_BUFFER:   /* Size of the buffer */
9579                 return get_errno(sys_syslog((int)arg1, NULL, (int)arg3));
9580             case TARGET_SYSLOG_ACTION_READ:          /* Read from log */
9581             case TARGET_SYSLOG_ACTION_READ_CLEAR:    /* Read/clear msgs */
9582             case TARGET_SYSLOG_ACTION_READ_ALL:      /* Read last messages */
9583                 {
9584                     if (len < 0) {
9585                         return -TARGET_EINVAL;
9586                     }
9587                     if (len == 0) {
9588                         return 0;
9589                     }
9590                     p = lock_user(VERIFY_WRITE, arg2, arg3, 0);
9591                     if (!p) {
9592                         return -TARGET_EFAULT;
9593                     }
9594                     ret = get_errno(sys_syslog((int)arg1, p, (int)arg3));
9595                     unlock_user(p, arg2, arg3);
9596                 }
9597                 return ret;
9598             default:
9599                 return -TARGET_EINVAL;
9600             }
9601         }
9602         break;
9603 #endif
9604     case TARGET_NR_setitimer:
9605         {
9606             struct itimerval value, ovalue, *pvalue;
9607 
9608             if (arg2) {
9609                 pvalue = &value;
9610                 if (copy_from_user_timeval(&pvalue->it_interval, arg2)
9611                     || copy_from_user_timeval(&pvalue->it_value,
9612                                               arg2 + sizeof(struct target_timeval)))
9613                     return -TARGET_EFAULT;
9614             } else {
9615                 pvalue = NULL;
9616             }
9617             ret = get_errno(setitimer(arg1, pvalue, &ovalue));
9618             if (!is_error(ret) && arg3) {
9619                 if (copy_to_user_timeval(arg3,
9620                                          &ovalue.it_interval)
9621                     || copy_to_user_timeval(arg3 + sizeof(struct target_timeval),
9622                                             &ovalue.it_value))
9623                     return -TARGET_EFAULT;
9624             }
9625         }
9626         return ret;
9627     case TARGET_NR_getitimer:
9628         {
9629             struct itimerval value;
9630 
9631             ret = get_errno(getitimer(arg1, &value));
9632             if (!is_error(ret) && arg2) {
9633                 if (copy_to_user_timeval(arg2,
9634                                          &value.it_interval)
9635                     || copy_to_user_timeval(arg2 + sizeof(struct target_timeval),
9636                                             &value.it_value))
9637                     return -TARGET_EFAULT;
9638             }
9639         }
9640         return ret;
9641 #ifdef TARGET_NR_stat
9642     case TARGET_NR_stat:
9643         if (!(p = lock_user_string(arg1))) {
9644             return -TARGET_EFAULT;
9645         }
9646         ret = get_errno(stat(path(p), &st));
9647         unlock_user(p, arg1, 0);
9648         goto do_stat;
9649 #endif
9650 #ifdef TARGET_NR_lstat
9651     case TARGET_NR_lstat:
9652         if (!(p = lock_user_string(arg1))) {
9653             return -TARGET_EFAULT;
9654         }
9655         ret = get_errno(lstat(path(p), &st));
9656         unlock_user(p, arg1, 0);
9657         goto do_stat;
9658 #endif
9659 #ifdef TARGET_NR_fstat
9660     case TARGET_NR_fstat:
9661         {
9662             ret = get_errno(fstat(arg1, &st));
9663 #if defined(TARGET_NR_stat) || defined(TARGET_NR_lstat)
9664         do_stat:
9665 #endif
9666             if (!is_error(ret)) {
9667                 struct target_stat *target_st;
9668 
9669                 if (!lock_user_struct(VERIFY_WRITE, target_st, arg2, 0))
9670                     return -TARGET_EFAULT;
9671                 memset(target_st, 0, sizeof(*target_st));
9672                 __put_user(st.st_dev, &target_st->st_dev);
9673                 __put_user(st.st_ino, &target_st->st_ino);
9674                 __put_user(st.st_mode, &target_st->st_mode);
9675                 __put_user(st.st_uid, &target_st->st_uid);
9676                 __put_user(st.st_gid, &target_st->st_gid);
9677                 __put_user(st.st_nlink, &target_st->st_nlink);
9678                 __put_user(st.st_rdev, &target_st->st_rdev);
9679                 __put_user(st.st_size, &target_st->st_size);
9680                 __put_user(st.st_blksize, &target_st->st_blksize);
9681                 __put_user(st.st_blocks, &target_st->st_blocks);
9682                 __put_user(st.st_atime, &target_st->target_st_atime);
9683                 __put_user(st.st_mtime, &target_st->target_st_mtime);
9684                 __put_user(st.st_ctime, &target_st->target_st_ctime);
9685 #if (_POSIX_C_SOURCE >= 200809L || _XOPEN_SOURCE >= 700) && \
9686     defined(TARGET_STAT_HAVE_NSEC)
9687                 __put_user(st.st_atim.tv_nsec,
9688                            &target_st->target_st_atime_nsec);
9689                 __put_user(st.st_mtim.tv_nsec,
9690                            &target_st->target_st_mtime_nsec);
9691                 __put_user(st.st_ctim.tv_nsec,
9692                            &target_st->target_st_ctime_nsec);
9693 #endif
9694                 unlock_user_struct(target_st, arg2, 1);
9695             }
9696         }
9697         return ret;
9698 #endif
9699     case TARGET_NR_vhangup:
9700         return get_errno(vhangup());
9701 #ifdef TARGET_NR_syscall
9702     case TARGET_NR_syscall:
9703         return do_syscall(cpu_env, arg1 & 0xffff, arg2, arg3, arg4, arg5,
9704                           arg6, arg7, arg8, 0);
9705 #endif
9706 #if defined(TARGET_NR_wait4)
9707     case TARGET_NR_wait4:
9708         {
9709             int status;
9710             abi_long status_ptr = arg2;
9711             struct rusage rusage, *rusage_ptr;
9712             abi_ulong target_rusage = arg4;
9713             abi_long rusage_err;
9714             if (target_rusage)
9715                 rusage_ptr = &rusage;
9716             else
9717                 rusage_ptr = NULL;
9718             ret = get_errno(safe_wait4(arg1, &status, arg3, rusage_ptr));
9719             if (!is_error(ret)) {
9720                 if (status_ptr && ret) {
9721                     status = host_to_target_waitstatus(status);
9722                     if (put_user_s32(status, status_ptr))
9723                         return -TARGET_EFAULT;
9724                 }
9725                 if (target_rusage) {
9726                     rusage_err = host_to_target_rusage(target_rusage, &rusage);
9727                     if (rusage_err) {
9728                         ret = rusage_err;
9729                     }
9730                 }
9731             }
9732         }
9733         return ret;
9734 #endif
9735 #ifdef TARGET_NR_swapoff
9736     case TARGET_NR_swapoff:
9737         if (!(p = lock_user_string(arg1)))
9738             return -TARGET_EFAULT;
9739         ret = get_errno(swapoff(p));
9740         unlock_user(p, arg1, 0);
9741         return ret;
9742 #endif
9743     case TARGET_NR_sysinfo:
9744         {
9745             struct target_sysinfo *target_value;
9746             struct sysinfo value;
9747             ret = get_errno(sysinfo(&value));
9748             if (!is_error(ret) && arg1)
9749             {
9750                 if (!lock_user_struct(VERIFY_WRITE, target_value, arg1, 0))
9751                     return -TARGET_EFAULT;
9752                 __put_user(value.uptime, &target_value->uptime);
9753                 __put_user(value.loads[0], &target_value->loads[0]);
9754                 __put_user(value.loads[1], &target_value->loads[1]);
9755                 __put_user(value.loads[2], &target_value->loads[2]);
9756                 __put_user(value.totalram, &target_value->totalram);
9757                 __put_user(value.freeram, &target_value->freeram);
9758                 __put_user(value.sharedram, &target_value->sharedram);
9759                 __put_user(value.bufferram, &target_value->bufferram);
9760                 __put_user(value.totalswap, &target_value->totalswap);
9761                 __put_user(value.freeswap, &target_value->freeswap);
9762                 __put_user(value.procs, &target_value->procs);
9763                 __put_user(value.totalhigh, &target_value->totalhigh);
9764                 __put_user(value.freehigh, &target_value->freehigh);
9765                 __put_user(value.mem_unit, &target_value->mem_unit);
9766                 unlock_user_struct(target_value, arg1, 1);
9767             }
9768         }
9769         return ret;
9770 #ifdef TARGET_NR_ipc
9771     case TARGET_NR_ipc:
9772         return do_ipc(cpu_env, arg1, arg2, arg3, arg4, arg5, arg6);
9773 #endif
9774 #ifdef TARGET_NR_semget
9775     case TARGET_NR_semget:
9776         return get_errno(semget(arg1, arg2, arg3));
9777 #endif
9778 #ifdef TARGET_NR_semop
9779     case TARGET_NR_semop:
9780         return do_semtimedop(arg1, arg2, arg3, 0);
9781 #endif
9782 #ifdef TARGET_NR_semtimedop
9783     case TARGET_NR_semtimedop:
9784         return do_semtimedop(arg1, arg2, arg3, arg4);
9785 #endif
9786 #ifdef TARGET_NR_semctl
9787     case TARGET_NR_semctl:
9788         return do_semctl(arg1, arg2, arg3, arg4);
9789 #endif
9790 #ifdef TARGET_NR_msgctl
9791     case TARGET_NR_msgctl:
9792         return do_msgctl(arg1, arg2, arg3);
9793 #endif
9794 #ifdef TARGET_NR_msgget
9795     case TARGET_NR_msgget:
9796         return get_errno(msgget(arg1, arg2));
9797 #endif
9798 #ifdef TARGET_NR_msgrcv
9799     case TARGET_NR_msgrcv:
9800         return do_msgrcv(arg1, arg2, arg3, arg4, arg5);
9801 #endif
9802 #ifdef TARGET_NR_msgsnd
9803     case TARGET_NR_msgsnd:
9804         return do_msgsnd(arg1, arg2, arg3, arg4);
9805 #endif
9806 #ifdef TARGET_NR_shmget
9807     case TARGET_NR_shmget:
9808         return get_errno(shmget(arg1, arg2, arg3));
9809 #endif
9810 #ifdef TARGET_NR_shmctl
9811     case TARGET_NR_shmctl:
9812         return do_shmctl(arg1, arg2, arg3);
9813 #endif
9814 #ifdef TARGET_NR_shmat
9815     case TARGET_NR_shmat:
9816         return do_shmat(cpu_env, arg1, arg2, arg3);
9817 #endif
9818 #ifdef TARGET_NR_shmdt
9819     case TARGET_NR_shmdt:
9820         return do_shmdt(arg1);
9821 #endif
9822     case TARGET_NR_fsync:
9823         return get_errno(fsync(arg1));
9824     case TARGET_NR_clone:
9825         /* Linux manages to have three different orderings for its
9826          * arguments to clone(); the BACKWARDS and BACKWARDS2 defines
9827          * match the kernel's CONFIG_CLONE_* settings.
9828          * Microblaze is further special in that it uses a sixth
9829          * implicit argument to clone for the TLS pointer.
9830          */
9831 #if defined(TARGET_MICROBLAZE)
9832         ret = get_errno(do_fork(cpu_env, arg1, arg2, arg4, arg6, arg5));
9833 #elif defined(TARGET_CLONE_BACKWARDS)
9834         ret = get_errno(do_fork(cpu_env, arg1, arg2, arg3, arg4, arg5));
9835 #elif defined(TARGET_CLONE_BACKWARDS2)
9836         ret = get_errno(do_fork(cpu_env, arg2, arg1, arg3, arg5, arg4));
9837 #else
9838         ret = get_errno(do_fork(cpu_env, arg1, arg2, arg3, arg5, arg4));
9839 #endif
9840         return ret;
9841 #ifdef __NR_exit_group
9842         /* new thread calls */
9843     case TARGET_NR_exit_group:
9844         preexit_cleanup(cpu_env, arg1);
9845         return get_errno(exit_group(arg1));
9846 #endif
9847     case TARGET_NR_setdomainname:
9848         if (!(p = lock_user_string(arg1)))
9849             return -TARGET_EFAULT;
9850         ret = get_errno(setdomainname(p, arg2));
9851         unlock_user(p, arg1, 0);
9852         return ret;
9853     case TARGET_NR_uname:
9854         /* no need to transcode because we use the linux syscall */
9855         {
9856             struct new_utsname * buf;
9857 
9858             if (!lock_user_struct(VERIFY_WRITE, buf, arg1, 0))
9859                 return -TARGET_EFAULT;
9860             ret = get_errno(sys_uname(buf));
9861             if (!is_error(ret)) {
9862                 /* Overwrite the native machine name with whatever is being
9863                    emulated. */
9864                 g_strlcpy(buf->machine, cpu_to_uname_machine(cpu_env),
9865                           sizeof(buf->machine));
9866                 /* Allow the user to override the reported release.  */
9867                 if (qemu_uname_release && *qemu_uname_release) {
9868                     g_strlcpy(buf->release, qemu_uname_release,
9869                               sizeof(buf->release));
9870                 }
9871             }
9872             unlock_user_struct(buf, arg1, 1);
9873         }
9874         return ret;
9875 #ifdef TARGET_I386
9876     case TARGET_NR_modify_ldt:
9877         return do_modify_ldt(cpu_env, arg1, arg2, arg3);
9878 #if !defined(TARGET_X86_64)
9879     case TARGET_NR_vm86:
9880         return do_vm86(cpu_env, arg1, arg2);
9881 #endif
9882 #endif
9883 #if defined(TARGET_NR_adjtimex)
9884     case TARGET_NR_adjtimex:
9885         {
9886             struct timex host_buf;
9887 
9888             if (target_to_host_timex(&host_buf, arg1) != 0) {
9889                 return -TARGET_EFAULT;
9890             }
9891             ret = get_errno(adjtimex(&host_buf));
9892             if (!is_error(ret)) {
9893                 if (host_to_target_timex(arg1, &host_buf) != 0) {
9894                     return -TARGET_EFAULT;
9895                 }
9896             }
9897         }
9898         return ret;
9899 #endif
9900 #if defined(TARGET_NR_clock_adjtime) && defined(CONFIG_CLOCK_ADJTIME)
9901     case TARGET_NR_clock_adjtime:
9902         {
9903             struct timex htx, *phtx = &htx;
9904 
9905             if (target_to_host_timex(phtx, arg2) != 0) {
9906                 return -TARGET_EFAULT;
9907             }
9908             ret = get_errno(clock_adjtime(arg1, phtx));
9909             if (!is_error(ret) && phtx) {
9910                 if (host_to_target_timex(arg2, phtx) != 0) {
9911                     return -TARGET_EFAULT;
9912                 }
9913             }
9914         }
9915         return ret;
9916 #endif
9917     case TARGET_NR_getpgid:
9918         return get_errno(getpgid(arg1));
9919     case TARGET_NR_fchdir:
9920         return get_errno(fchdir(arg1));
9921     case TARGET_NR_personality:
9922         return get_errno(personality(arg1));
9923 #ifdef TARGET_NR__llseek /* Not on alpha */
9924     case TARGET_NR__llseek:
9925         {
9926             int64_t res;
9927 #if !defined(__NR_llseek)
9928             res = lseek(arg1, ((uint64_t)arg2 << 32) | (abi_ulong)arg3, arg5);
9929             if (res == -1) {
9930                 ret = get_errno(res);
9931             } else {
9932                 ret = 0;
9933             }
9934 #else
9935             ret = get_errno(_llseek(arg1, arg2, arg3, &res, arg5));
9936 #endif
9937             if ((ret == 0) && put_user_s64(res, arg4)) {
9938                 return -TARGET_EFAULT;
9939             }
9940         }
9941         return ret;
9942 #endif
9943 #ifdef TARGET_NR_getdents
9944     case TARGET_NR_getdents:
9945 #ifdef EMULATE_GETDENTS_WITH_GETDENTS
9946 #if TARGET_ABI_BITS == 32 && HOST_LONG_BITS == 64
9947         {
9948             struct target_dirent *target_dirp;
9949             struct linux_dirent *dirp;
9950             abi_long count = arg3;
9951 
9952             dirp = g_try_malloc(count);
9953             if (!dirp) {
9954                 return -TARGET_ENOMEM;
9955             }
9956 
9957             ret = get_errno(sys_getdents(arg1, dirp, count));
9958             if (!is_error(ret)) {
9959                 struct linux_dirent *de;
9960 		struct target_dirent *tde;
9961                 int len = ret;
9962                 int reclen, treclen;
9963 		int count1, tnamelen;
9964 
9965 		count1 = 0;
9966                 de = dirp;
9967                 if (!(target_dirp = lock_user(VERIFY_WRITE, arg2, count, 0)))
9968                     return -TARGET_EFAULT;
9969 		tde = target_dirp;
9970                 while (len > 0) {
9971                     reclen = de->d_reclen;
9972                     tnamelen = reclen - offsetof(struct linux_dirent, d_name);
9973                     assert(tnamelen >= 0);
9974                     treclen = tnamelen + offsetof(struct target_dirent, d_name);
9975                     assert(count1 + treclen <= count);
9976                     tde->d_reclen = tswap16(treclen);
9977                     tde->d_ino = tswapal(de->d_ino);
9978                     tde->d_off = tswapal(de->d_off);
9979                     memcpy(tde->d_name, de->d_name, tnamelen);
9980                     de = (struct linux_dirent *)((char *)de + reclen);
9981                     len -= reclen;
9982                     tde = (struct target_dirent *)((char *)tde + treclen);
9983 		    count1 += treclen;
9984                 }
9985 		ret = count1;
9986                 unlock_user(target_dirp, arg2, ret);
9987             }
9988             g_free(dirp);
9989         }
9990 #else
9991         {
9992             struct linux_dirent *dirp;
9993             abi_long count = arg3;
9994 
9995             if (!(dirp = lock_user(VERIFY_WRITE, arg2, count, 0)))
9996                 return -TARGET_EFAULT;
9997             ret = get_errno(sys_getdents(arg1, dirp, count));
9998             if (!is_error(ret)) {
9999                 struct linux_dirent *de;
10000                 int len = ret;
10001                 int reclen;
10002                 de = dirp;
10003                 while (len > 0) {
10004                     reclen = de->d_reclen;
10005                     if (reclen > len)
10006                         break;
10007                     de->d_reclen = tswap16(reclen);
10008                     tswapls(&de->d_ino);
10009                     tswapls(&de->d_off);
10010                     de = (struct linux_dirent *)((char *)de + reclen);
10011                     len -= reclen;
10012                 }
10013             }
10014             unlock_user(dirp, arg2, ret);
10015         }
10016 #endif
10017 #else
10018         /* Implement getdents in terms of getdents64 */
10019         {
10020             struct linux_dirent64 *dirp;
10021             abi_long count = arg3;
10022 
10023             dirp = lock_user(VERIFY_WRITE, arg2, count, 0);
10024             if (!dirp) {
10025                 return -TARGET_EFAULT;
10026             }
10027             ret = get_errno(sys_getdents64(arg1, dirp, count));
10028             if (!is_error(ret)) {
10029                 /* Convert the dirent64 structs to target dirent.  We do this
10030                  * in-place, since we can guarantee that a target_dirent is no
10031                  * larger than a dirent64; however this means we have to be
10032                  * careful to read everything before writing in the new format.
10033                  */
10034                 struct linux_dirent64 *de;
10035                 struct target_dirent *tde;
10036                 int len = ret;
10037                 int tlen = 0;
10038 
10039                 de = dirp;
10040                 tde = (struct target_dirent *)dirp;
10041                 while (len > 0) {
10042                     int namelen, treclen;
10043                     int reclen = de->d_reclen;
10044                     uint64_t ino = de->d_ino;
10045                     int64_t off = de->d_off;
10046                     uint8_t type = de->d_type;
10047 
10048                     namelen = strlen(de->d_name);
10049                     treclen = offsetof(struct target_dirent, d_name)
10050                         + namelen + 2;
10051                     treclen = QEMU_ALIGN_UP(treclen, sizeof(abi_long));
10052 
10053                     memmove(tde->d_name, de->d_name, namelen + 1);
10054                     tde->d_ino = tswapal(ino);
10055                     tde->d_off = tswapal(off);
10056                     tde->d_reclen = tswap16(treclen);
10057                     /* The target_dirent type is in what was formerly a padding
10058                      * byte at the end of the structure:
10059                      */
10060                     *(((char *)tde) + treclen - 1) = type;
10061 
10062                     de = (struct linux_dirent64 *)((char *)de + reclen);
10063                     tde = (struct target_dirent *)((char *)tde + treclen);
10064                     len -= reclen;
10065                     tlen += treclen;
10066                 }
10067                 ret = tlen;
10068             }
10069             unlock_user(dirp, arg2, ret);
10070         }
10071 #endif
10072         return ret;
10073 #endif /* TARGET_NR_getdents */
10074 #if defined(TARGET_NR_getdents64) && defined(__NR_getdents64)
10075     case TARGET_NR_getdents64:
10076         {
10077             struct linux_dirent64 *dirp;
10078             abi_long count = arg3;
10079             if (!(dirp = lock_user(VERIFY_WRITE, arg2, count, 0)))
10080                 return -TARGET_EFAULT;
10081             ret = get_errno(sys_getdents64(arg1, dirp, count));
10082             if (!is_error(ret)) {
10083                 struct linux_dirent64 *de;
10084                 int len = ret;
10085                 int reclen;
10086                 de = dirp;
10087                 while (len > 0) {
10088                     reclen = de->d_reclen;
10089                     if (reclen > len)
10090                         break;
10091                     de->d_reclen = tswap16(reclen);
10092                     tswap64s((uint64_t *)&de->d_ino);
10093                     tswap64s((uint64_t *)&de->d_off);
10094                     de = (struct linux_dirent64 *)((char *)de + reclen);
10095                     len -= reclen;
10096                 }
10097             }
10098             unlock_user(dirp, arg2, ret);
10099         }
10100         return ret;
10101 #endif /* TARGET_NR_getdents64 */
10102 #if defined(TARGET_NR__newselect)
10103     case TARGET_NR__newselect:
10104         return do_select(arg1, arg2, arg3, arg4, arg5);
10105 #endif
10106 #if defined(TARGET_NR_poll) || defined(TARGET_NR_ppoll)
10107 # ifdef TARGET_NR_poll
10108     case TARGET_NR_poll:
10109 # endif
10110 # ifdef TARGET_NR_ppoll
10111     case TARGET_NR_ppoll:
10112 # endif
10113         {
10114             struct target_pollfd *target_pfd;
10115             unsigned int nfds = arg2;
10116             struct pollfd *pfd;
10117             unsigned int i;
10118 
10119             pfd = NULL;
10120             target_pfd = NULL;
10121             if (nfds) {
10122                 if (nfds > (INT_MAX / sizeof(struct target_pollfd))) {
10123                     return -TARGET_EINVAL;
10124                 }
10125 
10126                 target_pfd = lock_user(VERIFY_WRITE, arg1,
10127                                        sizeof(struct target_pollfd) * nfds, 1);
10128                 if (!target_pfd) {
10129                     return -TARGET_EFAULT;
10130                 }
10131 
10132                 pfd = alloca(sizeof(struct pollfd) * nfds);
10133                 for (i = 0; i < nfds; i++) {
10134                     pfd[i].fd = tswap32(target_pfd[i].fd);
10135                     pfd[i].events = tswap16(target_pfd[i].events);
10136                 }
10137             }
10138 
10139             switch (num) {
10140 # ifdef TARGET_NR_ppoll
10141             case TARGET_NR_ppoll:
10142             {
10143                 struct timespec _timeout_ts, *timeout_ts = &_timeout_ts;
10144                 target_sigset_t *target_set;
10145                 sigset_t _set, *set = &_set;
10146 
10147                 if (arg3) {
10148                     if (target_to_host_timespec(timeout_ts, arg3)) {
10149                         unlock_user(target_pfd, arg1, 0);
10150                         return -TARGET_EFAULT;
10151                     }
10152                 } else {
10153                     timeout_ts = NULL;
10154                 }
10155 
10156                 if (arg4) {
10157                     if (arg5 != sizeof(target_sigset_t)) {
10158                         unlock_user(target_pfd, arg1, 0);
10159                         return -TARGET_EINVAL;
10160                     }
10161 
10162                     target_set = lock_user(VERIFY_READ, arg4, sizeof(target_sigset_t), 1);
10163                     if (!target_set) {
10164                         unlock_user(target_pfd, arg1, 0);
10165                         return -TARGET_EFAULT;
10166                     }
10167                     target_to_host_sigset(set, target_set);
10168                 } else {
10169                     set = NULL;
10170                 }
10171 
10172                 ret = get_errno(safe_ppoll(pfd, nfds, timeout_ts,
10173                                            set, SIGSET_T_SIZE));
10174 
10175                 if (!is_error(ret) && arg3) {
10176                     host_to_target_timespec(arg3, timeout_ts);
10177                 }
10178                 if (arg4) {
10179                     unlock_user(target_set, arg4, 0);
10180                 }
10181                 break;
10182             }
10183 # endif
10184 # ifdef TARGET_NR_poll
10185             case TARGET_NR_poll:
10186             {
10187                 struct timespec ts, *pts;
10188 
10189                 if (arg3 >= 0) {
10190                     /* Convert ms to secs, ns */
10191                     ts.tv_sec = arg3 / 1000;
10192                     ts.tv_nsec = (arg3 % 1000) * 1000000LL;
10193                     pts = &ts;
10194                 } else {
10195                     /* -ve poll() timeout means "infinite" */
10196                     pts = NULL;
10197                 }
10198                 ret = get_errno(safe_ppoll(pfd, nfds, pts, NULL, 0));
10199                 break;
10200             }
10201 # endif
10202             default:
10203                 g_assert_not_reached();
10204             }
10205 
10206             if (!is_error(ret)) {
10207                 for(i = 0; i < nfds; i++) {
10208                     target_pfd[i].revents = tswap16(pfd[i].revents);
10209                 }
10210             }
10211             unlock_user(target_pfd, arg1, sizeof(struct target_pollfd) * nfds);
10212         }
10213         return ret;
10214 #endif
10215     case TARGET_NR_flock:
10216         /* NOTE: the flock constant seems to be the same for every
10217            Linux platform */
10218         return get_errno(safe_flock(arg1, arg2));
10219     case TARGET_NR_readv:
10220         {
10221             struct iovec *vec = lock_iovec(VERIFY_WRITE, arg2, arg3, 0);
10222             if (vec != NULL) {
10223                 ret = get_errno(safe_readv(arg1, vec, arg3));
10224                 unlock_iovec(vec, arg2, arg3, 1);
10225             } else {
10226                 ret = -host_to_target_errno(errno);
10227             }
10228         }
10229         return ret;
10230     case TARGET_NR_writev:
10231         {
10232             struct iovec *vec = lock_iovec(VERIFY_READ, arg2, arg3, 1);
10233             if (vec != NULL) {
10234                 ret = get_errno(safe_writev(arg1, vec, arg3));
10235                 unlock_iovec(vec, arg2, arg3, 0);
10236             } else {
10237                 ret = -host_to_target_errno(errno);
10238             }
10239         }
10240         return ret;
10241 #if defined(TARGET_NR_preadv)
10242     case TARGET_NR_preadv:
10243         {
10244             struct iovec *vec = lock_iovec(VERIFY_WRITE, arg2, arg3, 0);
10245             if (vec != NULL) {
10246                 unsigned long low, high;
10247 
10248                 target_to_host_low_high(arg4, arg5, &low, &high);
10249                 ret = get_errno(safe_preadv(arg1, vec, arg3, low, high));
10250                 unlock_iovec(vec, arg2, arg3, 1);
10251             } else {
10252                 ret = -host_to_target_errno(errno);
10253            }
10254         }
10255         return ret;
10256 #endif
10257 #if defined(TARGET_NR_pwritev)
10258     case TARGET_NR_pwritev:
10259         {
10260             struct iovec *vec = lock_iovec(VERIFY_READ, arg2, arg3, 1);
10261             if (vec != NULL) {
10262                 unsigned long low, high;
10263 
10264                 target_to_host_low_high(arg4, arg5, &low, &high);
10265                 ret = get_errno(safe_pwritev(arg1, vec, arg3, low, high));
10266                 unlock_iovec(vec, arg2, arg3, 0);
10267             } else {
10268                 ret = -host_to_target_errno(errno);
10269            }
10270         }
10271         return ret;
10272 #endif
10273     case TARGET_NR_getsid:
10274         return get_errno(getsid(arg1));
10275 #if defined(TARGET_NR_fdatasync) /* Not on alpha (osf_datasync ?) */
10276     case TARGET_NR_fdatasync:
10277         return get_errno(fdatasync(arg1));
10278 #endif
10279 #ifdef TARGET_NR__sysctl
10280     case TARGET_NR__sysctl:
10281         /* We don't implement this, but ENOTDIR is always a safe
10282            return value. */
10283         return -TARGET_ENOTDIR;
10284 #endif
10285     case TARGET_NR_sched_getaffinity:
10286         {
10287             unsigned int mask_size;
10288             unsigned long *mask;
10289 
10290             /*
10291              * sched_getaffinity needs multiples of ulong, so need to take
10292              * care of mismatches between target ulong and host ulong sizes.
10293              */
10294             if (arg2 & (sizeof(abi_ulong) - 1)) {
10295                 return -TARGET_EINVAL;
10296             }
10297             mask_size = (arg2 + (sizeof(*mask) - 1)) & ~(sizeof(*mask) - 1);
10298 
10299             mask = alloca(mask_size);
10300             memset(mask, 0, mask_size);
10301             ret = get_errno(sys_sched_getaffinity(arg1, mask_size, mask));
10302 
10303             if (!is_error(ret)) {
10304                 if (ret > arg2) {
10305                     /* More data returned than the caller's buffer will fit.
10306                      * This only happens if sizeof(abi_long) < sizeof(long)
10307                      * and the caller passed us a buffer holding an odd number
10308                      * of abi_longs. If the host kernel is actually using the
10309                      * extra 4 bytes then fail EINVAL; otherwise we can just
10310                      * ignore them and only copy the interesting part.
10311                      */
10312                     int numcpus = sysconf(_SC_NPROCESSORS_CONF);
10313                     if (numcpus > arg2 * 8) {
10314                         return -TARGET_EINVAL;
10315                     }
10316                     ret = arg2;
10317                 }
10318 
10319                 if (host_to_target_cpu_mask(mask, mask_size, arg3, ret)) {
10320                     return -TARGET_EFAULT;
10321                 }
10322             }
10323         }
10324         return ret;
10325     case TARGET_NR_sched_setaffinity:
10326         {
10327             unsigned int mask_size;
10328             unsigned long *mask;
10329 
10330             /*
10331              * sched_setaffinity needs multiples of ulong, so need to take
10332              * care of mismatches between target ulong and host ulong sizes.
10333              */
10334             if (arg2 & (sizeof(abi_ulong) - 1)) {
10335                 return -TARGET_EINVAL;
10336             }
10337             mask_size = (arg2 + (sizeof(*mask) - 1)) & ~(sizeof(*mask) - 1);
10338             mask = alloca(mask_size);
10339 
10340             ret = target_to_host_cpu_mask(mask, mask_size, arg3, arg2);
10341             if (ret) {
10342                 return ret;
10343             }
10344 
10345             return get_errno(sys_sched_setaffinity(arg1, mask_size, mask));
10346         }
10347     case TARGET_NR_getcpu:
10348         {
10349             unsigned cpu, node;
10350             ret = get_errno(sys_getcpu(arg1 ? &cpu : NULL,
10351                                        arg2 ? &node : NULL,
10352                                        NULL));
10353             if (is_error(ret)) {
10354                 return ret;
10355             }
10356             if (arg1 && put_user_u32(cpu, arg1)) {
10357                 return -TARGET_EFAULT;
10358             }
10359             if (arg2 && put_user_u32(node, arg2)) {
10360                 return -TARGET_EFAULT;
10361             }
10362         }
10363         return ret;
10364     case TARGET_NR_sched_setparam:
10365         {
10366             struct sched_param *target_schp;
10367             struct sched_param schp;
10368 
10369             if (arg2 == 0) {
10370                 return -TARGET_EINVAL;
10371             }
10372             if (!lock_user_struct(VERIFY_READ, target_schp, arg2, 1))
10373                 return -TARGET_EFAULT;
10374             schp.sched_priority = tswap32(target_schp->sched_priority);
10375             unlock_user_struct(target_schp, arg2, 0);
10376             return get_errno(sched_setparam(arg1, &schp));
10377         }
10378     case TARGET_NR_sched_getparam:
10379         {
10380             struct sched_param *target_schp;
10381             struct sched_param schp;
10382 
10383             if (arg2 == 0) {
10384                 return -TARGET_EINVAL;
10385             }
10386             ret = get_errno(sched_getparam(arg1, &schp));
10387             if (!is_error(ret)) {
10388                 if (!lock_user_struct(VERIFY_WRITE, target_schp, arg2, 0))
10389                     return -TARGET_EFAULT;
10390                 target_schp->sched_priority = tswap32(schp.sched_priority);
10391                 unlock_user_struct(target_schp, arg2, 1);
10392             }
10393         }
10394         return ret;
10395     case TARGET_NR_sched_setscheduler:
10396         {
10397             struct sched_param *target_schp;
10398             struct sched_param schp;
10399             if (arg3 == 0) {
10400                 return -TARGET_EINVAL;
10401             }
10402             if (!lock_user_struct(VERIFY_READ, target_schp, arg3, 1))
10403                 return -TARGET_EFAULT;
10404             schp.sched_priority = tswap32(target_schp->sched_priority);
10405             unlock_user_struct(target_schp, arg3, 0);
10406             return get_errno(sched_setscheduler(arg1, arg2, &schp));
10407         }
10408     case TARGET_NR_sched_getscheduler:
10409         return get_errno(sched_getscheduler(arg1));
10410     case TARGET_NR_sched_yield:
10411         return get_errno(sched_yield());
10412     case TARGET_NR_sched_get_priority_max:
10413         return get_errno(sched_get_priority_max(arg1));
10414     case TARGET_NR_sched_get_priority_min:
10415         return get_errno(sched_get_priority_min(arg1));
10416 #ifdef TARGET_NR_sched_rr_get_interval
10417     case TARGET_NR_sched_rr_get_interval:
10418         {
10419             struct timespec ts;
10420             ret = get_errno(sched_rr_get_interval(arg1, &ts));
10421             if (!is_error(ret)) {
10422                 ret = host_to_target_timespec(arg2, &ts);
10423             }
10424         }
10425         return ret;
10426 #endif
10427 #if defined(TARGET_NR_nanosleep)
10428     case TARGET_NR_nanosleep:
10429         {
10430             struct timespec req, rem;
10431             target_to_host_timespec(&req, arg1);
10432             ret = get_errno(safe_nanosleep(&req, &rem));
10433             if (is_error(ret) && arg2) {
10434                 host_to_target_timespec(arg2, &rem);
10435             }
10436         }
10437         return ret;
10438 #endif
10439     case TARGET_NR_prctl:
10440         switch (arg1) {
10441         case PR_GET_PDEATHSIG:
10442         {
10443             int deathsig;
10444             ret = get_errno(prctl(arg1, &deathsig, arg3, arg4, arg5));
10445             if (!is_error(ret) && arg2
10446                 && put_user_ual(deathsig, arg2)) {
10447                 return -TARGET_EFAULT;
10448             }
10449             return ret;
10450         }
10451 #ifdef PR_GET_NAME
10452         case PR_GET_NAME:
10453         {
10454             void *name = lock_user(VERIFY_WRITE, arg2, 16, 1);
10455             if (!name) {
10456                 return -TARGET_EFAULT;
10457             }
10458             ret = get_errno(prctl(arg1, (unsigned long)name,
10459                                   arg3, arg4, arg5));
10460             unlock_user(name, arg2, 16);
10461             return ret;
10462         }
10463         case PR_SET_NAME:
10464         {
10465             void *name = lock_user(VERIFY_READ, arg2, 16, 1);
10466             if (!name) {
10467                 return -TARGET_EFAULT;
10468             }
10469             ret = get_errno(prctl(arg1, (unsigned long)name,
10470                                   arg3, arg4, arg5));
10471             unlock_user(name, arg2, 0);
10472             return ret;
10473         }
10474 #endif
10475 #ifdef TARGET_MIPS
10476         case TARGET_PR_GET_FP_MODE:
10477         {
10478             CPUMIPSState *env = ((CPUMIPSState *)cpu_env);
10479             ret = 0;
10480             if (env->CP0_Status & (1 << CP0St_FR)) {
10481                 ret |= TARGET_PR_FP_MODE_FR;
10482             }
10483             if (env->CP0_Config5 & (1 << CP0C5_FRE)) {
10484                 ret |= TARGET_PR_FP_MODE_FRE;
10485             }
10486             return ret;
10487         }
10488         case TARGET_PR_SET_FP_MODE:
10489         {
10490             CPUMIPSState *env = ((CPUMIPSState *)cpu_env);
10491             bool old_fr = env->CP0_Status & (1 << CP0St_FR);
10492             bool old_fre = env->CP0_Config5 & (1 << CP0C5_FRE);
10493             bool new_fr = arg2 & TARGET_PR_FP_MODE_FR;
10494             bool new_fre = arg2 & TARGET_PR_FP_MODE_FRE;
10495 
10496             const unsigned int known_bits = TARGET_PR_FP_MODE_FR |
10497                                             TARGET_PR_FP_MODE_FRE;
10498 
10499             /* If nothing to change, return right away, successfully.  */
10500             if (old_fr == new_fr && old_fre == new_fre) {
10501                 return 0;
10502             }
10503             /* Check the value is valid */
10504             if (arg2 & ~known_bits) {
10505                 return -TARGET_EOPNOTSUPP;
10506             }
10507             /* Setting FRE without FR is not supported.  */
10508             if (new_fre && !new_fr) {
10509                 return -TARGET_EOPNOTSUPP;
10510             }
10511             if (new_fr && !(env->active_fpu.fcr0 & (1 << FCR0_F64))) {
10512                 /* FR1 is not supported */
10513                 return -TARGET_EOPNOTSUPP;
10514             }
10515             if (!new_fr && (env->active_fpu.fcr0 & (1 << FCR0_F64))
10516                 && !(env->CP0_Status_rw_bitmask & (1 << CP0St_FR))) {
10517                 /* cannot set FR=0 */
10518                 return -TARGET_EOPNOTSUPP;
10519             }
10520             if (new_fre && !(env->active_fpu.fcr0 & (1 << FCR0_FREP))) {
10521                 /* Cannot set FRE=1 */
10522                 return -TARGET_EOPNOTSUPP;
10523             }
10524 
10525             int i;
10526             fpr_t *fpr = env->active_fpu.fpr;
10527             for (i = 0; i < 32 ; i += 2) {
10528                 if (!old_fr && new_fr) {
10529                     fpr[i].w[!FP_ENDIAN_IDX] = fpr[i + 1].w[FP_ENDIAN_IDX];
10530                 } else if (old_fr && !new_fr) {
10531                     fpr[i + 1].w[FP_ENDIAN_IDX] = fpr[i].w[!FP_ENDIAN_IDX];
10532                 }
10533             }
10534 
10535             if (new_fr) {
10536                 env->CP0_Status |= (1 << CP0St_FR);
10537                 env->hflags |= MIPS_HFLAG_F64;
10538             } else {
10539                 env->CP0_Status &= ~(1 << CP0St_FR);
10540                 env->hflags &= ~MIPS_HFLAG_F64;
10541             }
10542             if (new_fre) {
10543                 env->CP0_Config5 |= (1 << CP0C5_FRE);
10544                 if (env->active_fpu.fcr0 & (1 << FCR0_FREP)) {
10545                     env->hflags |= MIPS_HFLAG_FRE;
10546                 }
10547             } else {
10548                 env->CP0_Config5 &= ~(1 << CP0C5_FRE);
10549                 env->hflags &= ~MIPS_HFLAG_FRE;
10550             }
10551 
10552             return 0;
10553         }
10554 #endif /* MIPS */
10555 #ifdef TARGET_AARCH64
10556         case TARGET_PR_SVE_SET_VL:
10557             /*
10558              * We cannot support either PR_SVE_SET_VL_ONEXEC or
10559              * PR_SVE_VL_INHERIT.  Note the kernel definition
10560              * of sve_vl_valid allows for VQ=512, i.e. VL=8192,
10561              * even though the current architectural maximum is VQ=16.
10562              */
10563             ret = -TARGET_EINVAL;
10564             if (cpu_isar_feature(aa64_sve, env_archcpu(cpu_env))
10565                 && arg2 >= 0 && arg2 <= 512 * 16 && !(arg2 & 15)) {
10566                 CPUARMState *env = cpu_env;
10567                 ARMCPU *cpu = env_archcpu(env);
10568                 uint32_t vq, old_vq;
10569 
10570                 old_vq = (env->vfp.zcr_el[1] & 0xf) + 1;
10571                 vq = MAX(arg2 / 16, 1);
10572                 vq = MIN(vq, cpu->sve_max_vq);
10573 
10574                 if (vq < old_vq) {
10575                     aarch64_sve_narrow_vq(env, vq);
10576                 }
10577                 env->vfp.zcr_el[1] = vq - 1;
10578                 arm_rebuild_hflags(env);
10579                 ret = vq * 16;
10580             }
10581             return ret;
10582         case TARGET_PR_SVE_GET_VL:
10583             ret = -TARGET_EINVAL;
10584             {
10585                 ARMCPU *cpu = env_archcpu(cpu_env);
10586                 if (cpu_isar_feature(aa64_sve, cpu)) {
10587                     ret = ((cpu->env.vfp.zcr_el[1] & 0xf) + 1) * 16;
10588                 }
10589             }
10590             return ret;
10591         case TARGET_PR_PAC_RESET_KEYS:
10592             {
10593                 CPUARMState *env = cpu_env;
10594                 ARMCPU *cpu = env_archcpu(env);
10595 
10596                 if (arg3 || arg4 || arg5) {
10597                     return -TARGET_EINVAL;
10598                 }
10599                 if (cpu_isar_feature(aa64_pauth, cpu)) {
10600                     int all = (TARGET_PR_PAC_APIAKEY | TARGET_PR_PAC_APIBKEY |
10601                                TARGET_PR_PAC_APDAKEY | TARGET_PR_PAC_APDBKEY |
10602                                TARGET_PR_PAC_APGAKEY);
10603                     int ret = 0;
10604                     Error *err = NULL;
10605 
10606                     if (arg2 == 0) {
10607                         arg2 = all;
10608                     } else if (arg2 & ~all) {
10609                         return -TARGET_EINVAL;
10610                     }
10611                     if (arg2 & TARGET_PR_PAC_APIAKEY) {
10612                         ret |= qemu_guest_getrandom(&env->keys.apia,
10613                                                     sizeof(ARMPACKey), &err);
10614                     }
10615                     if (arg2 & TARGET_PR_PAC_APIBKEY) {
10616                         ret |= qemu_guest_getrandom(&env->keys.apib,
10617                                                     sizeof(ARMPACKey), &err);
10618                     }
10619                     if (arg2 & TARGET_PR_PAC_APDAKEY) {
10620                         ret |= qemu_guest_getrandom(&env->keys.apda,
10621                                                     sizeof(ARMPACKey), &err);
10622                     }
10623                     if (arg2 & TARGET_PR_PAC_APDBKEY) {
10624                         ret |= qemu_guest_getrandom(&env->keys.apdb,
10625                                                     sizeof(ARMPACKey), &err);
10626                     }
10627                     if (arg2 & TARGET_PR_PAC_APGAKEY) {
10628                         ret |= qemu_guest_getrandom(&env->keys.apga,
10629                                                     sizeof(ARMPACKey), &err);
10630                     }
10631                     if (ret != 0) {
10632                         /*
10633                          * Some unknown failure in the crypto.  The best
10634                          * we can do is log it and fail the syscall.
10635                          * The real syscall cannot fail this way.
10636                          */
10637                         qemu_log_mask(LOG_UNIMP,
10638                                       "PR_PAC_RESET_KEYS: Crypto failure: %s",
10639                                       error_get_pretty(err));
10640                         error_free(err);
10641                         return -TARGET_EIO;
10642                     }
10643                     return 0;
10644                 }
10645             }
10646             return -TARGET_EINVAL;
10647 #endif /* AARCH64 */
10648         case PR_GET_SECCOMP:
10649         case PR_SET_SECCOMP:
10650             /* Disable seccomp to prevent the target disabling syscalls we
10651              * need. */
10652             return -TARGET_EINVAL;
10653         default:
10654             /* Most prctl options have no pointer arguments */
10655             return get_errno(prctl(arg1, arg2, arg3, arg4, arg5));
10656         }
10657         break;
10658 #ifdef TARGET_NR_arch_prctl
10659     case TARGET_NR_arch_prctl:
10660         return do_arch_prctl(cpu_env, arg1, arg2);
10661 #endif
10662 #ifdef TARGET_NR_pread64
10663     case TARGET_NR_pread64:
10664         if (regpairs_aligned(cpu_env, num)) {
10665             arg4 = arg5;
10666             arg5 = arg6;
10667         }
10668         if (arg2 == 0 && arg3 == 0) {
10669             /* Special-case NULL buffer and zero length, which should succeed */
10670             p = 0;
10671         } else {
10672             p = lock_user(VERIFY_WRITE, arg2, arg3, 0);
10673             if (!p) {
10674                 return -TARGET_EFAULT;
10675             }
10676         }
10677         ret = get_errno(pread64(arg1, p, arg3, target_offset64(arg4, arg5)));
10678         unlock_user(p, arg2, ret);
10679         return ret;
10680     case TARGET_NR_pwrite64:
10681         if (regpairs_aligned(cpu_env, num)) {
10682             arg4 = arg5;
10683             arg5 = arg6;
10684         }
10685         if (arg2 == 0 && arg3 == 0) {
10686             /* Special-case NULL buffer and zero length, which should succeed */
10687             p = 0;
10688         } else {
10689             p = lock_user(VERIFY_READ, arg2, arg3, 1);
10690             if (!p) {
10691                 return -TARGET_EFAULT;
10692             }
10693         }
10694         ret = get_errno(pwrite64(arg1, p, arg3, target_offset64(arg4, arg5)));
10695         unlock_user(p, arg2, 0);
10696         return ret;
10697 #endif
10698     case TARGET_NR_getcwd:
10699         if (!(p = lock_user(VERIFY_WRITE, arg1, arg2, 0)))
10700             return -TARGET_EFAULT;
10701         ret = get_errno(sys_getcwd1(p, arg2));
10702         unlock_user(p, arg1, ret);
10703         return ret;
10704     case TARGET_NR_capget:
10705     case TARGET_NR_capset:
10706     {
10707         struct target_user_cap_header *target_header;
10708         struct target_user_cap_data *target_data = NULL;
10709         struct __user_cap_header_struct header;
10710         struct __user_cap_data_struct data[2];
10711         struct __user_cap_data_struct *dataptr = NULL;
10712         int i, target_datalen;
10713         int data_items = 1;
10714 
10715         if (!lock_user_struct(VERIFY_WRITE, target_header, arg1, 1)) {
10716             return -TARGET_EFAULT;
10717         }
10718         header.version = tswap32(target_header->version);
10719         header.pid = tswap32(target_header->pid);
10720 
10721         if (header.version != _LINUX_CAPABILITY_VERSION) {
10722             /* Version 2 and up takes pointer to two user_data structs */
10723             data_items = 2;
10724         }
10725 
10726         target_datalen = sizeof(*target_data) * data_items;
10727 
10728         if (arg2) {
10729             if (num == TARGET_NR_capget) {
10730                 target_data = lock_user(VERIFY_WRITE, arg2, target_datalen, 0);
10731             } else {
10732                 target_data = lock_user(VERIFY_READ, arg2, target_datalen, 1);
10733             }
10734             if (!target_data) {
10735                 unlock_user_struct(target_header, arg1, 0);
10736                 return -TARGET_EFAULT;
10737             }
10738 
10739             if (num == TARGET_NR_capset) {
10740                 for (i = 0; i < data_items; i++) {
10741                     data[i].effective = tswap32(target_data[i].effective);
10742                     data[i].permitted = tswap32(target_data[i].permitted);
10743                     data[i].inheritable = tswap32(target_data[i].inheritable);
10744                 }
10745             }
10746 
10747             dataptr = data;
10748         }
10749 
10750         if (num == TARGET_NR_capget) {
10751             ret = get_errno(capget(&header, dataptr));
10752         } else {
10753             ret = get_errno(capset(&header, dataptr));
10754         }
10755 
10756         /* The kernel always updates version for both capget and capset */
10757         target_header->version = tswap32(header.version);
10758         unlock_user_struct(target_header, arg1, 1);
10759 
10760         if (arg2) {
10761             if (num == TARGET_NR_capget) {
10762                 for (i = 0; i < data_items; i++) {
10763                     target_data[i].effective = tswap32(data[i].effective);
10764                     target_data[i].permitted = tswap32(data[i].permitted);
10765                     target_data[i].inheritable = tswap32(data[i].inheritable);
10766                 }
10767                 unlock_user(target_data, arg2, target_datalen);
10768             } else {
10769                 unlock_user(target_data, arg2, 0);
10770             }
10771         }
10772         return ret;
10773     }
10774     case TARGET_NR_sigaltstack:
10775         return do_sigaltstack(arg1, arg2,
10776                               get_sp_from_cpustate((CPUArchState *)cpu_env));
10777 
10778 #ifdef CONFIG_SENDFILE
10779 #ifdef TARGET_NR_sendfile
10780     case TARGET_NR_sendfile:
10781     {
10782         off_t *offp = NULL;
10783         off_t off;
10784         if (arg3) {
10785             ret = get_user_sal(off, arg3);
10786             if (is_error(ret)) {
10787                 return ret;
10788             }
10789             offp = &off;
10790         }
10791         ret = get_errno(sendfile(arg1, arg2, offp, arg4));
10792         if (!is_error(ret) && arg3) {
10793             abi_long ret2 = put_user_sal(off, arg3);
10794             if (is_error(ret2)) {
10795                 ret = ret2;
10796             }
10797         }
10798         return ret;
10799     }
10800 #endif
10801 #ifdef TARGET_NR_sendfile64
10802     case TARGET_NR_sendfile64:
10803     {
10804         off_t *offp = NULL;
10805         off_t off;
10806         if (arg3) {
10807             ret = get_user_s64(off, arg3);
10808             if (is_error(ret)) {
10809                 return ret;
10810             }
10811             offp = &off;
10812         }
10813         ret = get_errno(sendfile(arg1, arg2, offp, arg4));
10814         if (!is_error(ret) && arg3) {
10815             abi_long ret2 = put_user_s64(off, arg3);
10816             if (is_error(ret2)) {
10817                 ret = ret2;
10818             }
10819         }
10820         return ret;
10821     }
10822 #endif
10823 #endif
10824 #ifdef TARGET_NR_vfork
10825     case TARGET_NR_vfork:
10826         return get_errno(do_fork(cpu_env,
10827                          CLONE_VFORK | CLONE_VM | TARGET_SIGCHLD,
10828                          0, 0, 0, 0));
10829 #endif
10830 #ifdef TARGET_NR_ugetrlimit
10831     case TARGET_NR_ugetrlimit:
10832     {
10833 	struct rlimit rlim;
10834 	int resource = target_to_host_resource(arg1);
10835 	ret = get_errno(getrlimit(resource, &rlim));
10836 	if (!is_error(ret)) {
10837 	    struct target_rlimit *target_rlim;
10838             if (!lock_user_struct(VERIFY_WRITE, target_rlim, arg2, 0))
10839                 return -TARGET_EFAULT;
10840 	    target_rlim->rlim_cur = host_to_target_rlim(rlim.rlim_cur);
10841 	    target_rlim->rlim_max = host_to_target_rlim(rlim.rlim_max);
10842             unlock_user_struct(target_rlim, arg2, 1);
10843 	}
10844         return ret;
10845     }
10846 #endif
10847 #ifdef TARGET_NR_truncate64
10848     case TARGET_NR_truncate64:
10849         if (!(p = lock_user_string(arg1)))
10850             return -TARGET_EFAULT;
10851 	ret = target_truncate64(cpu_env, p, arg2, arg3, arg4);
10852         unlock_user(p, arg1, 0);
10853         return ret;
10854 #endif
10855 #ifdef TARGET_NR_ftruncate64
10856     case TARGET_NR_ftruncate64:
10857         return target_ftruncate64(cpu_env, arg1, arg2, arg3, arg4);
10858 #endif
10859 #ifdef TARGET_NR_stat64
10860     case TARGET_NR_stat64:
10861         if (!(p = lock_user_string(arg1))) {
10862             return -TARGET_EFAULT;
10863         }
10864         ret = get_errno(stat(path(p), &st));
10865         unlock_user(p, arg1, 0);
10866         if (!is_error(ret))
10867             ret = host_to_target_stat64(cpu_env, arg2, &st);
10868         return ret;
10869 #endif
10870 #ifdef TARGET_NR_lstat64
10871     case TARGET_NR_lstat64:
10872         if (!(p = lock_user_string(arg1))) {
10873             return -TARGET_EFAULT;
10874         }
10875         ret = get_errno(lstat(path(p), &st));
10876         unlock_user(p, arg1, 0);
10877         if (!is_error(ret))
10878             ret = host_to_target_stat64(cpu_env, arg2, &st);
10879         return ret;
10880 #endif
10881 #ifdef TARGET_NR_fstat64
10882     case TARGET_NR_fstat64:
10883         ret = get_errno(fstat(arg1, &st));
10884         if (!is_error(ret))
10885             ret = host_to_target_stat64(cpu_env, arg2, &st);
10886         return ret;
10887 #endif
10888 #if (defined(TARGET_NR_fstatat64) || defined(TARGET_NR_newfstatat))
10889 #ifdef TARGET_NR_fstatat64
10890     case TARGET_NR_fstatat64:
10891 #endif
10892 #ifdef TARGET_NR_newfstatat
10893     case TARGET_NR_newfstatat:
10894 #endif
10895         if (!(p = lock_user_string(arg2))) {
10896             return -TARGET_EFAULT;
10897         }
10898         ret = get_errno(fstatat(arg1, path(p), &st, arg4));
10899         unlock_user(p, arg2, 0);
10900         if (!is_error(ret))
10901             ret = host_to_target_stat64(cpu_env, arg3, &st);
10902         return ret;
10903 #endif
10904 #if defined(TARGET_NR_statx)
10905     case TARGET_NR_statx:
10906         {
10907             struct target_statx *target_stx;
10908             int dirfd = arg1;
10909             int flags = arg3;
10910 
10911             p = lock_user_string(arg2);
10912             if (p == NULL) {
10913                 return -TARGET_EFAULT;
10914             }
10915 #if defined(__NR_statx)
10916             {
10917                 /*
10918                  * It is assumed that struct statx is architecture independent.
10919                  */
10920                 struct target_statx host_stx;
10921                 int mask = arg4;
10922 
10923                 ret = get_errno(sys_statx(dirfd, p, flags, mask, &host_stx));
10924                 if (!is_error(ret)) {
10925                     if (host_to_target_statx(&host_stx, arg5) != 0) {
10926                         unlock_user(p, arg2, 0);
10927                         return -TARGET_EFAULT;
10928                     }
10929                 }
10930 
10931                 if (ret != -TARGET_ENOSYS) {
10932                     unlock_user(p, arg2, 0);
10933                     return ret;
10934                 }
10935             }
10936 #endif
10937             ret = get_errno(fstatat(dirfd, path(p), &st, flags));
10938             unlock_user(p, arg2, 0);
10939 
10940             if (!is_error(ret)) {
10941                 if (!lock_user_struct(VERIFY_WRITE, target_stx, arg5, 0)) {
10942                     return -TARGET_EFAULT;
10943                 }
10944                 memset(target_stx, 0, sizeof(*target_stx));
10945                 __put_user(major(st.st_dev), &target_stx->stx_dev_major);
10946                 __put_user(minor(st.st_dev), &target_stx->stx_dev_minor);
10947                 __put_user(st.st_ino, &target_stx->stx_ino);
10948                 __put_user(st.st_mode, &target_stx->stx_mode);
10949                 __put_user(st.st_uid, &target_stx->stx_uid);
10950                 __put_user(st.st_gid, &target_stx->stx_gid);
10951                 __put_user(st.st_nlink, &target_stx->stx_nlink);
10952                 __put_user(major(st.st_rdev), &target_stx->stx_rdev_major);
10953                 __put_user(minor(st.st_rdev), &target_stx->stx_rdev_minor);
10954                 __put_user(st.st_size, &target_stx->stx_size);
10955                 __put_user(st.st_blksize, &target_stx->stx_blksize);
10956                 __put_user(st.st_blocks, &target_stx->stx_blocks);
10957                 __put_user(st.st_atime, &target_stx->stx_atime.tv_sec);
10958                 __put_user(st.st_mtime, &target_stx->stx_mtime.tv_sec);
10959                 __put_user(st.st_ctime, &target_stx->stx_ctime.tv_sec);
10960                 unlock_user_struct(target_stx, arg5, 1);
10961             }
10962         }
10963         return ret;
10964 #endif
10965 #ifdef TARGET_NR_lchown
10966     case TARGET_NR_lchown:
10967         if (!(p = lock_user_string(arg1)))
10968             return -TARGET_EFAULT;
10969         ret = get_errno(lchown(p, low2highuid(arg2), low2highgid(arg3)));
10970         unlock_user(p, arg1, 0);
10971         return ret;
10972 #endif
10973 #ifdef TARGET_NR_getuid
10974     case TARGET_NR_getuid:
10975         return get_errno(high2lowuid(getuid()));
10976 #endif
10977 #ifdef TARGET_NR_getgid
10978     case TARGET_NR_getgid:
10979         return get_errno(high2lowgid(getgid()));
10980 #endif
10981 #ifdef TARGET_NR_geteuid
10982     case TARGET_NR_geteuid:
10983         return get_errno(high2lowuid(geteuid()));
10984 #endif
10985 #ifdef TARGET_NR_getegid
10986     case TARGET_NR_getegid:
10987         return get_errno(high2lowgid(getegid()));
10988 #endif
10989     case TARGET_NR_setreuid:
10990         return get_errno(setreuid(low2highuid(arg1), low2highuid(arg2)));
10991     case TARGET_NR_setregid:
10992         return get_errno(setregid(low2highgid(arg1), low2highgid(arg2)));
10993     case TARGET_NR_getgroups:
10994         {
10995             int gidsetsize = arg1;
10996             target_id *target_grouplist;
10997             gid_t *grouplist;
10998             int i;
10999 
11000             grouplist = alloca(gidsetsize * sizeof(gid_t));
11001             ret = get_errno(getgroups(gidsetsize, grouplist));
11002             if (gidsetsize == 0)
11003                 return ret;
11004             if (!is_error(ret)) {
11005                 target_grouplist = lock_user(VERIFY_WRITE, arg2, gidsetsize * sizeof(target_id), 0);
11006                 if (!target_grouplist)
11007                     return -TARGET_EFAULT;
11008                 for(i = 0;i < ret; i++)
11009                     target_grouplist[i] = tswapid(high2lowgid(grouplist[i]));
11010                 unlock_user(target_grouplist, arg2, gidsetsize * sizeof(target_id));
11011             }
11012         }
11013         return ret;
11014     case TARGET_NR_setgroups:
11015         {
11016             int gidsetsize = arg1;
11017             target_id *target_grouplist;
11018             gid_t *grouplist = NULL;
11019             int i;
11020             if (gidsetsize) {
11021                 grouplist = alloca(gidsetsize * sizeof(gid_t));
11022                 target_grouplist = lock_user(VERIFY_READ, arg2, gidsetsize * sizeof(target_id), 1);
11023                 if (!target_grouplist) {
11024                     return -TARGET_EFAULT;
11025                 }
11026                 for (i = 0; i < gidsetsize; i++) {
11027                     grouplist[i] = low2highgid(tswapid(target_grouplist[i]));
11028                 }
11029                 unlock_user(target_grouplist, arg2, 0);
11030             }
11031             return get_errno(setgroups(gidsetsize, grouplist));
11032         }
11033     case TARGET_NR_fchown:
11034         return get_errno(fchown(arg1, low2highuid(arg2), low2highgid(arg3)));
11035 #if defined(TARGET_NR_fchownat)
11036     case TARGET_NR_fchownat:
11037         if (!(p = lock_user_string(arg2)))
11038             return -TARGET_EFAULT;
11039         ret = get_errno(fchownat(arg1, p, low2highuid(arg3),
11040                                  low2highgid(arg4), arg5));
11041         unlock_user(p, arg2, 0);
11042         return ret;
11043 #endif
11044 #ifdef TARGET_NR_setresuid
11045     case TARGET_NR_setresuid:
11046         return get_errno(sys_setresuid(low2highuid(arg1),
11047                                        low2highuid(arg2),
11048                                        low2highuid(arg3)));
11049 #endif
11050 #ifdef TARGET_NR_getresuid
11051     case TARGET_NR_getresuid:
11052         {
11053             uid_t ruid, euid, suid;
11054             ret = get_errno(getresuid(&ruid, &euid, &suid));
11055             if (!is_error(ret)) {
11056                 if (put_user_id(high2lowuid(ruid), arg1)
11057                     || put_user_id(high2lowuid(euid), arg2)
11058                     || put_user_id(high2lowuid(suid), arg3))
11059                     return -TARGET_EFAULT;
11060             }
11061         }
11062         return ret;
11063 #endif
11064 #ifdef TARGET_NR_getresgid
11065     case TARGET_NR_setresgid:
11066         return get_errno(sys_setresgid(low2highgid(arg1),
11067                                        low2highgid(arg2),
11068                                        low2highgid(arg3)));
11069 #endif
11070 #ifdef TARGET_NR_getresgid
11071     case TARGET_NR_getresgid:
11072         {
11073             gid_t rgid, egid, sgid;
11074             ret = get_errno(getresgid(&rgid, &egid, &sgid));
11075             if (!is_error(ret)) {
11076                 if (put_user_id(high2lowgid(rgid), arg1)
11077                     || put_user_id(high2lowgid(egid), arg2)
11078                     || put_user_id(high2lowgid(sgid), arg3))
11079                     return -TARGET_EFAULT;
11080             }
11081         }
11082         return ret;
11083 #endif
11084 #ifdef TARGET_NR_chown
11085     case TARGET_NR_chown:
11086         if (!(p = lock_user_string(arg1)))
11087             return -TARGET_EFAULT;
11088         ret = get_errno(chown(p, low2highuid(arg2), low2highgid(arg3)));
11089         unlock_user(p, arg1, 0);
11090         return ret;
11091 #endif
11092     case TARGET_NR_setuid:
11093         return get_errno(sys_setuid(low2highuid(arg1)));
11094     case TARGET_NR_setgid:
11095         return get_errno(sys_setgid(low2highgid(arg1)));
11096     case TARGET_NR_setfsuid:
11097         return get_errno(setfsuid(arg1));
11098     case TARGET_NR_setfsgid:
11099         return get_errno(setfsgid(arg1));
11100 
11101 #ifdef TARGET_NR_lchown32
11102     case TARGET_NR_lchown32:
11103         if (!(p = lock_user_string(arg1)))
11104             return -TARGET_EFAULT;
11105         ret = get_errno(lchown(p, arg2, arg3));
11106         unlock_user(p, arg1, 0);
11107         return ret;
11108 #endif
11109 #ifdef TARGET_NR_getuid32
11110     case TARGET_NR_getuid32:
11111         return get_errno(getuid());
11112 #endif
11113 
11114 #if defined(TARGET_NR_getxuid) && defined(TARGET_ALPHA)
11115    /* Alpha specific */
11116     case TARGET_NR_getxuid:
11117          {
11118             uid_t euid;
11119             euid=geteuid();
11120             ((CPUAlphaState *)cpu_env)->ir[IR_A4]=euid;
11121          }
11122         return get_errno(getuid());
11123 #endif
11124 #if defined(TARGET_NR_getxgid) && defined(TARGET_ALPHA)
11125    /* Alpha specific */
11126     case TARGET_NR_getxgid:
11127          {
11128             uid_t egid;
11129             egid=getegid();
11130             ((CPUAlphaState *)cpu_env)->ir[IR_A4]=egid;
11131          }
11132         return get_errno(getgid());
11133 #endif
11134 #if defined(TARGET_NR_osf_getsysinfo) && defined(TARGET_ALPHA)
11135     /* Alpha specific */
11136     case TARGET_NR_osf_getsysinfo:
11137         ret = -TARGET_EOPNOTSUPP;
11138         switch (arg1) {
11139           case TARGET_GSI_IEEE_FP_CONTROL:
11140             {
11141                 uint64_t fpcr = cpu_alpha_load_fpcr(cpu_env);
11142                 uint64_t swcr = ((CPUAlphaState *)cpu_env)->swcr;
11143 
11144                 swcr &= ~SWCR_STATUS_MASK;
11145                 swcr |= (fpcr >> 35) & SWCR_STATUS_MASK;
11146 
11147                 if (put_user_u64 (swcr, arg2))
11148                         return -TARGET_EFAULT;
11149                 ret = 0;
11150             }
11151             break;
11152 
11153           /* case GSI_IEEE_STATE_AT_SIGNAL:
11154              -- Not implemented in linux kernel.
11155              case GSI_UACPROC:
11156              -- Retrieves current unaligned access state; not much used.
11157              case GSI_PROC_TYPE:
11158              -- Retrieves implver information; surely not used.
11159              case GSI_GET_HWRPB:
11160              -- Grabs a copy of the HWRPB; surely not used.
11161           */
11162         }
11163         return ret;
11164 #endif
11165 #if defined(TARGET_NR_osf_setsysinfo) && defined(TARGET_ALPHA)
11166     /* Alpha specific */
11167     case TARGET_NR_osf_setsysinfo:
11168         ret = -TARGET_EOPNOTSUPP;
11169         switch (arg1) {
11170           case TARGET_SSI_IEEE_FP_CONTROL:
11171             {
11172                 uint64_t swcr, fpcr;
11173 
11174                 if (get_user_u64 (swcr, arg2)) {
11175                     return -TARGET_EFAULT;
11176                 }
11177 
11178                 /*
11179                  * The kernel calls swcr_update_status to update the
11180                  * status bits from the fpcr at every point that it
11181                  * could be queried.  Therefore, we store the status
11182                  * bits only in FPCR.
11183                  */
11184                 ((CPUAlphaState *)cpu_env)->swcr
11185                     = swcr & (SWCR_TRAP_ENABLE_MASK | SWCR_MAP_MASK);
11186 
11187                 fpcr = cpu_alpha_load_fpcr(cpu_env);
11188                 fpcr &= ((uint64_t)FPCR_DYN_MASK << 32);
11189                 fpcr |= alpha_ieee_swcr_to_fpcr(swcr);
11190                 cpu_alpha_store_fpcr(cpu_env, fpcr);
11191                 ret = 0;
11192             }
11193             break;
11194 
11195           case TARGET_SSI_IEEE_RAISE_EXCEPTION:
11196             {
11197                 uint64_t exc, fpcr, fex;
11198 
11199                 if (get_user_u64(exc, arg2)) {
11200                     return -TARGET_EFAULT;
11201                 }
11202                 exc &= SWCR_STATUS_MASK;
11203                 fpcr = cpu_alpha_load_fpcr(cpu_env);
11204 
11205                 /* Old exceptions are not signaled.  */
11206                 fex = alpha_ieee_fpcr_to_swcr(fpcr);
11207                 fex = exc & ~fex;
11208                 fex >>= SWCR_STATUS_TO_EXCSUM_SHIFT;
11209                 fex &= ((CPUArchState *)cpu_env)->swcr;
11210 
11211                 /* Update the hardware fpcr.  */
11212                 fpcr |= alpha_ieee_swcr_to_fpcr(exc);
11213                 cpu_alpha_store_fpcr(cpu_env, fpcr);
11214 
11215                 if (fex) {
11216                     int si_code = TARGET_FPE_FLTUNK;
11217                     target_siginfo_t info;
11218 
11219                     if (fex & SWCR_TRAP_ENABLE_DNO) {
11220                         si_code = TARGET_FPE_FLTUND;
11221                     }
11222                     if (fex & SWCR_TRAP_ENABLE_INE) {
11223                         si_code = TARGET_FPE_FLTRES;
11224                     }
11225                     if (fex & SWCR_TRAP_ENABLE_UNF) {
11226                         si_code = TARGET_FPE_FLTUND;
11227                     }
11228                     if (fex & SWCR_TRAP_ENABLE_OVF) {
11229                         si_code = TARGET_FPE_FLTOVF;
11230                     }
11231                     if (fex & SWCR_TRAP_ENABLE_DZE) {
11232                         si_code = TARGET_FPE_FLTDIV;
11233                     }
11234                     if (fex & SWCR_TRAP_ENABLE_INV) {
11235                         si_code = TARGET_FPE_FLTINV;
11236                     }
11237 
11238                     info.si_signo = SIGFPE;
11239                     info.si_errno = 0;
11240                     info.si_code = si_code;
11241                     info._sifields._sigfault._addr
11242                         = ((CPUArchState *)cpu_env)->pc;
11243                     queue_signal((CPUArchState *)cpu_env, info.si_signo,
11244                                  QEMU_SI_FAULT, &info);
11245                 }
11246                 ret = 0;
11247             }
11248             break;
11249 
11250           /* case SSI_NVPAIRS:
11251              -- Used with SSIN_UACPROC to enable unaligned accesses.
11252              case SSI_IEEE_STATE_AT_SIGNAL:
11253              case SSI_IEEE_IGNORE_STATE_AT_SIGNAL:
11254              -- Not implemented in linux kernel
11255           */
11256         }
11257         return ret;
11258 #endif
11259 #ifdef TARGET_NR_osf_sigprocmask
11260     /* Alpha specific.  */
11261     case TARGET_NR_osf_sigprocmask:
11262         {
11263             abi_ulong mask;
11264             int how;
11265             sigset_t set, oldset;
11266 
11267             switch(arg1) {
11268             case TARGET_SIG_BLOCK:
11269                 how = SIG_BLOCK;
11270                 break;
11271             case TARGET_SIG_UNBLOCK:
11272                 how = SIG_UNBLOCK;
11273                 break;
11274             case TARGET_SIG_SETMASK:
11275                 how = SIG_SETMASK;
11276                 break;
11277             default:
11278                 return -TARGET_EINVAL;
11279             }
11280             mask = arg2;
11281             target_to_host_old_sigset(&set, &mask);
11282             ret = do_sigprocmask(how, &set, &oldset);
11283             if (!ret) {
11284                 host_to_target_old_sigset(&mask, &oldset);
11285                 ret = mask;
11286             }
11287         }
11288         return ret;
11289 #endif
11290 
11291 #ifdef TARGET_NR_getgid32
11292     case TARGET_NR_getgid32:
11293         return get_errno(getgid());
11294 #endif
11295 #ifdef TARGET_NR_geteuid32
11296     case TARGET_NR_geteuid32:
11297         return get_errno(geteuid());
11298 #endif
11299 #ifdef TARGET_NR_getegid32
11300     case TARGET_NR_getegid32:
11301         return get_errno(getegid());
11302 #endif
11303 #ifdef TARGET_NR_setreuid32
11304     case TARGET_NR_setreuid32:
11305         return get_errno(setreuid(arg1, arg2));
11306 #endif
11307 #ifdef TARGET_NR_setregid32
11308     case TARGET_NR_setregid32:
11309         return get_errno(setregid(arg1, arg2));
11310 #endif
11311 #ifdef TARGET_NR_getgroups32
11312     case TARGET_NR_getgroups32:
11313         {
11314             int gidsetsize = arg1;
11315             uint32_t *target_grouplist;
11316             gid_t *grouplist;
11317             int i;
11318 
11319             grouplist = alloca(gidsetsize * sizeof(gid_t));
11320             ret = get_errno(getgroups(gidsetsize, grouplist));
11321             if (gidsetsize == 0)
11322                 return ret;
11323             if (!is_error(ret)) {
11324                 target_grouplist = lock_user(VERIFY_WRITE, arg2, gidsetsize * 4, 0);
11325                 if (!target_grouplist) {
11326                     return -TARGET_EFAULT;
11327                 }
11328                 for(i = 0;i < ret; i++)
11329                     target_grouplist[i] = tswap32(grouplist[i]);
11330                 unlock_user(target_grouplist, arg2, gidsetsize * 4);
11331             }
11332         }
11333         return ret;
11334 #endif
11335 #ifdef TARGET_NR_setgroups32
11336     case TARGET_NR_setgroups32:
11337         {
11338             int gidsetsize = arg1;
11339             uint32_t *target_grouplist;
11340             gid_t *grouplist;
11341             int i;
11342 
11343             grouplist = alloca(gidsetsize * sizeof(gid_t));
11344             target_grouplist = lock_user(VERIFY_READ, arg2, gidsetsize * 4, 1);
11345             if (!target_grouplist) {
11346                 return -TARGET_EFAULT;
11347             }
11348             for(i = 0;i < gidsetsize; i++)
11349                 grouplist[i] = tswap32(target_grouplist[i]);
11350             unlock_user(target_grouplist, arg2, 0);
11351             return get_errno(setgroups(gidsetsize, grouplist));
11352         }
11353 #endif
11354 #ifdef TARGET_NR_fchown32
11355     case TARGET_NR_fchown32:
11356         return get_errno(fchown(arg1, arg2, arg3));
11357 #endif
11358 #ifdef TARGET_NR_setresuid32
11359     case TARGET_NR_setresuid32:
11360         return get_errno(sys_setresuid(arg1, arg2, arg3));
11361 #endif
11362 #ifdef TARGET_NR_getresuid32
11363     case TARGET_NR_getresuid32:
11364         {
11365             uid_t ruid, euid, suid;
11366             ret = get_errno(getresuid(&ruid, &euid, &suid));
11367             if (!is_error(ret)) {
11368                 if (put_user_u32(ruid, arg1)
11369                     || put_user_u32(euid, arg2)
11370                     || put_user_u32(suid, arg3))
11371                     return -TARGET_EFAULT;
11372             }
11373         }
11374         return ret;
11375 #endif
11376 #ifdef TARGET_NR_setresgid32
11377     case TARGET_NR_setresgid32:
11378         return get_errno(sys_setresgid(arg1, arg2, arg3));
11379 #endif
11380 #ifdef TARGET_NR_getresgid32
11381     case TARGET_NR_getresgid32:
11382         {
11383             gid_t rgid, egid, sgid;
11384             ret = get_errno(getresgid(&rgid, &egid, &sgid));
11385             if (!is_error(ret)) {
11386                 if (put_user_u32(rgid, arg1)
11387                     || put_user_u32(egid, arg2)
11388                     || put_user_u32(sgid, arg3))
11389                     return -TARGET_EFAULT;
11390             }
11391         }
11392         return ret;
11393 #endif
11394 #ifdef TARGET_NR_chown32
11395     case TARGET_NR_chown32:
11396         if (!(p = lock_user_string(arg1)))
11397             return -TARGET_EFAULT;
11398         ret = get_errno(chown(p, arg2, arg3));
11399         unlock_user(p, arg1, 0);
11400         return ret;
11401 #endif
11402 #ifdef TARGET_NR_setuid32
11403     case TARGET_NR_setuid32:
11404         return get_errno(sys_setuid(arg1));
11405 #endif
11406 #ifdef TARGET_NR_setgid32
11407     case TARGET_NR_setgid32:
11408         return get_errno(sys_setgid(arg1));
11409 #endif
11410 #ifdef TARGET_NR_setfsuid32
11411     case TARGET_NR_setfsuid32:
11412         return get_errno(setfsuid(arg1));
11413 #endif
11414 #ifdef TARGET_NR_setfsgid32
11415     case TARGET_NR_setfsgid32:
11416         return get_errno(setfsgid(arg1));
11417 #endif
11418 #ifdef TARGET_NR_mincore
11419     case TARGET_NR_mincore:
11420         {
11421             void *a = lock_user(VERIFY_READ, arg1, arg2, 0);
11422             if (!a) {
11423                 return -TARGET_ENOMEM;
11424             }
11425             p = lock_user_string(arg3);
11426             if (!p) {
11427                 ret = -TARGET_EFAULT;
11428             } else {
11429                 ret = get_errno(mincore(a, arg2, p));
11430                 unlock_user(p, arg3, ret);
11431             }
11432             unlock_user(a, arg1, 0);
11433         }
11434         return ret;
11435 #endif
11436 #ifdef TARGET_NR_arm_fadvise64_64
11437     case TARGET_NR_arm_fadvise64_64:
11438         /* arm_fadvise64_64 looks like fadvise64_64 but
11439          * with different argument order: fd, advice, offset, len
11440          * rather than the usual fd, offset, len, advice.
11441          * Note that offset and len are both 64-bit so appear as
11442          * pairs of 32-bit registers.
11443          */
11444         ret = posix_fadvise(arg1, target_offset64(arg3, arg4),
11445                             target_offset64(arg5, arg6), arg2);
11446         return -host_to_target_errno(ret);
11447 #endif
11448 
11449 #if TARGET_ABI_BITS == 32
11450 
11451 #ifdef TARGET_NR_fadvise64_64
11452     case TARGET_NR_fadvise64_64:
11453 #if defined(TARGET_PPC) || defined(TARGET_XTENSA)
11454         /* 6 args: fd, advice, offset (high, low), len (high, low) */
11455         ret = arg2;
11456         arg2 = arg3;
11457         arg3 = arg4;
11458         arg4 = arg5;
11459         arg5 = arg6;
11460         arg6 = ret;
11461 #else
11462         /* 6 args: fd, offset (high, low), len (high, low), advice */
11463         if (regpairs_aligned(cpu_env, num)) {
11464             /* offset is in (3,4), len in (5,6) and advice in 7 */
11465             arg2 = arg3;
11466             arg3 = arg4;
11467             arg4 = arg5;
11468             arg5 = arg6;
11469             arg6 = arg7;
11470         }
11471 #endif
11472         ret = posix_fadvise(arg1, target_offset64(arg2, arg3),
11473                             target_offset64(arg4, arg5), arg6);
11474         return -host_to_target_errno(ret);
11475 #endif
11476 
11477 #ifdef TARGET_NR_fadvise64
11478     case TARGET_NR_fadvise64:
11479         /* 5 args: fd, offset (high, low), len, advice */
11480         if (regpairs_aligned(cpu_env, num)) {
11481             /* offset is in (3,4), len in 5 and advice in 6 */
11482             arg2 = arg3;
11483             arg3 = arg4;
11484             arg4 = arg5;
11485             arg5 = arg6;
11486         }
11487         ret = posix_fadvise(arg1, target_offset64(arg2, arg3), arg4, arg5);
11488         return -host_to_target_errno(ret);
11489 #endif
11490 
11491 #else /* not a 32-bit ABI */
11492 #if defined(TARGET_NR_fadvise64_64) || defined(TARGET_NR_fadvise64)
11493 #ifdef TARGET_NR_fadvise64_64
11494     case TARGET_NR_fadvise64_64:
11495 #endif
11496 #ifdef TARGET_NR_fadvise64
11497     case TARGET_NR_fadvise64:
11498 #endif
11499 #ifdef TARGET_S390X
11500         switch (arg4) {
11501         case 4: arg4 = POSIX_FADV_NOREUSE + 1; break; /* make sure it's an invalid value */
11502         case 5: arg4 = POSIX_FADV_NOREUSE + 2; break; /* ditto */
11503         case 6: arg4 = POSIX_FADV_DONTNEED; break;
11504         case 7: arg4 = POSIX_FADV_NOREUSE; break;
11505         default: break;
11506         }
11507 #endif
11508         return -host_to_target_errno(posix_fadvise(arg1, arg2, arg3, arg4));
11509 #endif
11510 #endif /* end of 64-bit ABI fadvise handling */
11511 
11512 #ifdef TARGET_NR_madvise
11513     case TARGET_NR_madvise:
11514         /* A straight passthrough may not be safe because qemu sometimes
11515            turns private file-backed mappings into anonymous mappings.
11516            This will break MADV_DONTNEED.
11517            This is a hint, so ignoring and returning success is ok.  */
11518         return 0;
11519 #endif
11520 #ifdef TARGET_NR_fcntl64
11521     case TARGET_NR_fcntl64:
11522     {
11523         int cmd;
11524         struct flock64 fl;
11525         from_flock64_fn *copyfrom = copy_from_user_flock64;
11526         to_flock64_fn *copyto = copy_to_user_flock64;
11527 
11528 #ifdef TARGET_ARM
11529         if (!((CPUARMState *)cpu_env)->eabi) {
11530             copyfrom = copy_from_user_oabi_flock64;
11531             copyto = copy_to_user_oabi_flock64;
11532         }
11533 #endif
11534 
11535         cmd = target_to_host_fcntl_cmd(arg2);
11536         if (cmd == -TARGET_EINVAL) {
11537             return cmd;
11538         }
11539 
11540         switch(arg2) {
11541         case TARGET_F_GETLK64:
11542             ret = copyfrom(&fl, arg3);
11543             if (ret) {
11544                 break;
11545             }
11546             ret = get_errno(safe_fcntl(arg1, cmd, &fl));
11547             if (ret == 0) {
11548                 ret = copyto(arg3, &fl);
11549             }
11550 	    break;
11551 
11552         case TARGET_F_SETLK64:
11553         case TARGET_F_SETLKW64:
11554             ret = copyfrom(&fl, arg3);
11555             if (ret) {
11556                 break;
11557             }
11558             ret = get_errno(safe_fcntl(arg1, cmd, &fl));
11559 	    break;
11560         default:
11561             ret = do_fcntl(arg1, arg2, arg3);
11562             break;
11563         }
11564         return ret;
11565     }
11566 #endif
11567 #ifdef TARGET_NR_cacheflush
11568     case TARGET_NR_cacheflush:
11569         /* self-modifying code is handled automatically, so nothing needed */
11570         return 0;
11571 #endif
11572 #ifdef TARGET_NR_getpagesize
11573     case TARGET_NR_getpagesize:
11574         return TARGET_PAGE_SIZE;
11575 #endif
11576     case TARGET_NR_gettid:
11577         return get_errno(sys_gettid());
11578 #ifdef TARGET_NR_readahead
11579     case TARGET_NR_readahead:
11580 #if TARGET_ABI_BITS == 32
11581         if (regpairs_aligned(cpu_env, num)) {
11582             arg2 = arg3;
11583             arg3 = arg4;
11584             arg4 = arg5;
11585         }
11586         ret = get_errno(readahead(arg1, target_offset64(arg2, arg3) , arg4));
11587 #else
11588         ret = get_errno(readahead(arg1, arg2, arg3));
11589 #endif
11590         return ret;
11591 #endif
11592 #ifdef CONFIG_ATTR
11593 #ifdef TARGET_NR_setxattr
11594     case TARGET_NR_listxattr:
11595     case TARGET_NR_llistxattr:
11596     {
11597         void *p, *b = 0;
11598         if (arg2) {
11599             b = lock_user(VERIFY_WRITE, arg2, arg3, 0);
11600             if (!b) {
11601                 return -TARGET_EFAULT;
11602             }
11603         }
11604         p = lock_user_string(arg1);
11605         if (p) {
11606             if (num == TARGET_NR_listxattr) {
11607                 ret = get_errno(listxattr(p, b, arg3));
11608             } else {
11609                 ret = get_errno(llistxattr(p, b, arg3));
11610             }
11611         } else {
11612             ret = -TARGET_EFAULT;
11613         }
11614         unlock_user(p, arg1, 0);
11615         unlock_user(b, arg2, arg3);
11616         return ret;
11617     }
11618     case TARGET_NR_flistxattr:
11619     {
11620         void *b = 0;
11621         if (arg2) {
11622             b = lock_user(VERIFY_WRITE, arg2, arg3, 0);
11623             if (!b) {
11624                 return -TARGET_EFAULT;
11625             }
11626         }
11627         ret = get_errno(flistxattr(arg1, b, arg3));
11628         unlock_user(b, arg2, arg3);
11629         return ret;
11630     }
11631     case TARGET_NR_setxattr:
11632     case TARGET_NR_lsetxattr:
11633         {
11634             void *p, *n, *v = 0;
11635             if (arg3) {
11636                 v = lock_user(VERIFY_READ, arg3, arg4, 1);
11637                 if (!v) {
11638                     return -TARGET_EFAULT;
11639                 }
11640             }
11641             p = lock_user_string(arg1);
11642             n = lock_user_string(arg2);
11643             if (p && n) {
11644                 if (num == TARGET_NR_setxattr) {
11645                     ret = get_errno(setxattr(p, n, v, arg4, arg5));
11646                 } else {
11647                     ret = get_errno(lsetxattr(p, n, v, arg4, arg5));
11648                 }
11649             } else {
11650                 ret = -TARGET_EFAULT;
11651             }
11652             unlock_user(p, arg1, 0);
11653             unlock_user(n, arg2, 0);
11654             unlock_user(v, arg3, 0);
11655         }
11656         return ret;
11657     case TARGET_NR_fsetxattr:
11658         {
11659             void *n, *v = 0;
11660             if (arg3) {
11661                 v = lock_user(VERIFY_READ, arg3, arg4, 1);
11662                 if (!v) {
11663                     return -TARGET_EFAULT;
11664                 }
11665             }
11666             n = lock_user_string(arg2);
11667             if (n) {
11668                 ret = get_errno(fsetxattr(arg1, n, v, arg4, arg5));
11669             } else {
11670                 ret = -TARGET_EFAULT;
11671             }
11672             unlock_user(n, arg2, 0);
11673             unlock_user(v, arg3, 0);
11674         }
11675         return ret;
11676     case TARGET_NR_getxattr:
11677     case TARGET_NR_lgetxattr:
11678         {
11679             void *p, *n, *v = 0;
11680             if (arg3) {
11681                 v = lock_user(VERIFY_WRITE, arg3, arg4, 0);
11682                 if (!v) {
11683                     return -TARGET_EFAULT;
11684                 }
11685             }
11686             p = lock_user_string(arg1);
11687             n = lock_user_string(arg2);
11688             if (p && n) {
11689                 if (num == TARGET_NR_getxattr) {
11690                     ret = get_errno(getxattr(p, n, v, arg4));
11691                 } else {
11692                     ret = get_errno(lgetxattr(p, n, v, arg4));
11693                 }
11694             } else {
11695                 ret = -TARGET_EFAULT;
11696             }
11697             unlock_user(p, arg1, 0);
11698             unlock_user(n, arg2, 0);
11699             unlock_user(v, arg3, arg4);
11700         }
11701         return ret;
11702     case TARGET_NR_fgetxattr:
11703         {
11704             void *n, *v = 0;
11705             if (arg3) {
11706                 v = lock_user(VERIFY_WRITE, arg3, arg4, 0);
11707                 if (!v) {
11708                     return -TARGET_EFAULT;
11709                 }
11710             }
11711             n = lock_user_string(arg2);
11712             if (n) {
11713                 ret = get_errno(fgetxattr(arg1, n, v, arg4));
11714             } else {
11715                 ret = -TARGET_EFAULT;
11716             }
11717             unlock_user(n, arg2, 0);
11718             unlock_user(v, arg3, arg4);
11719         }
11720         return ret;
11721     case TARGET_NR_removexattr:
11722     case TARGET_NR_lremovexattr:
11723         {
11724             void *p, *n;
11725             p = lock_user_string(arg1);
11726             n = lock_user_string(arg2);
11727             if (p && n) {
11728                 if (num == TARGET_NR_removexattr) {
11729                     ret = get_errno(removexattr(p, n));
11730                 } else {
11731                     ret = get_errno(lremovexattr(p, n));
11732                 }
11733             } else {
11734                 ret = -TARGET_EFAULT;
11735             }
11736             unlock_user(p, arg1, 0);
11737             unlock_user(n, arg2, 0);
11738         }
11739         return ret;
11740     case TARGET_NR_fremovexattr:
11741         {
11742             void *n;
11743             n = lock_user_string(arg2);
11744             if (n) {
11745                 ret = get_errno(fremovexattr(arg1, n));
11746             } else {
11747                 ret = -TARGET_EFAULT;
11748             }
11749             unlock_user(n, arg2, 0);
11750         }
11751         return ret;
11752 #endif
11753 #endif /* CONFIG_ATTR */
11754 #ifdef TARGET_NR_set_thread_area
11755     case TARGET_NR_set_thread_area:
11756 #if defined(TARGET_MIPS)
11757       ((CPUMIPSState *) cpu_env)->active_tc.CP0_UserLocal = arg1;
11758       return 0;
11759 #elif defined(TARGET_CRIS)
11760       if (arg1 & 0xff)
11761           ret = -TARGET_EINVAL;
11762       else {
11763           ((CPUCRISState *) cpu_env)->pregs[PR_PID] = arg1;
11764           ret = 0;
11765       }
11766       return ret;
11767 #elif defined(TARGET_I386) && defined(TARGET_ABI32)
11768       return do_set_thread_area(cpu_env, arg1);
11769 #elif defined(TARGET_M68K)
11770       {
11771           TaskState *ts = cpu->opaque;
11772           ts->tp_value = arg1;
11773           return 0;
11774       }
11775 #else
11776       return -TARGET_ENOSYS;
11777 #endif
11778 #endif
11779 #ifdef TARGET_NR_get_thread_area
11780     case TARGET_NR_get_thread_area:
11781 #if defined(TARGET_I386) && defined(TARGET_ABI32)
11782         return do_get_thread_area(cpu_env, arg1);
11783 #elif defined(TARGET_M68K)
11784         {
11785             TaskState *ts = cpu->opaque;
11786             return ts->tp_value;
11787         }
11788 #else
11789         return -TARGET_ENOSYS;
11790 #endif
11791 #endif
11792 #ifdef TARGET_NR_getdomainname
11793     case TARGET_NR_getdomainname:
11794         return -TARGET_ENOSYS;
11795 #endif
11796 
11797 #ifdef TARGET_NR_clock_settime
11798     case TARGET_NR_clock_settime:
11799     {
11800         struct timespec ts;
11801 
11802         ret = target_to_host_timespec(&ts, arg2);
11803         if (!is_error(ret)) {
11804             ret = get_errno(clock_settime(arg1, &ts));
11805         }
11806         return ret;
11807     }
11808 #endif
11809 #ifdef TARGET_NR_clock_settime64
11810     case TARGET_NR_clock_settime64:
11811     {
11812         struct timespec ts;
11813 
11814         ret = target_to_host_timespec64(&ts, arg2);
11815         if (!is_error(ret)) {
11816             ret = get_errno(clock_settime(arg1, &ts));
11817         }
11818         return ret;
11819     }
11820 #endif
11821 #ifdef TARGET_NR_clock_gettime
11822     case TARGET_NR_clock_gettime:
11823     {
11824         struct timespec ts;
11825         ret = get_errno(clock_gettime(arg1, &ts));
11826         if (!is_error(ret)) {
11827             ret = host_to_target_timespec(arg2, &ts);
11828         }
11829         return ret;
11830     }
11831 #endif
11832 #ifdef TARGET_NR_clock_gettime64
11833     case TARGET_NR_clock_gettime64:
11834     {
11835         struct timespec ts;
11836         ret = get_errno(clock_gettime(arg1, &ts));
11837         if (!is_error(ret)) {
11838             ret = host_to_target_timespec64(arg2, &ts);
11839         }
11840         return ret;
11841     }
11842 #endif
11843 #ifdef TARGET_NR_clock_getres
11844     case TARGET_NR_clock_getres:
11845     {
11846         struct timespec ts;
11847         ret = get_errno(clock_getres(arg1, &ts));
11848         if (!is_error(ret)) {
11849             host_to_target_timespec(arg2, &ts);
11850         }
11851         return ret;
11852     }
11853 #endif
11854 #ifdef TARGET_NR_clock_getres_time64
11855     case TARGET_NR_clock_getres_time64:
11856     {
11857         struct timespec ts;
11858         ret = get_errno(clock_getres(arg1, &ts));
11859         if (!is_error(ret)) {
11860             host_to_target_timespec64(arg2, &ts);
11861         }
11862         return ret;
11863     }
11864 #endif
11865 #ifdef TARGET_NR_clock_nanosleep
11866     case TARGET_NR_clock_nanosleep:
11867     {
11868         struct timespec ts;
11869         target_to_host_timespec(&ts, arg3);
11870         ret = get_errno(safe_clock_nanosleep(arg1, arg2,
11871                                              &ts, arg4 ? &ts : NULL));
11872         /*
11873          * if the call is interrupted by a signal handler, it fails
11874          * with error -TARGET_EINTR and if arg4 is not NULL and arg2 is not
11875          * TIMER_ABSTIME, it returns the remaining unslept time in arg4.
11876          */
11877         if (ret == -TARGET_EINTR && arg4 && arg2 != TIMER_ABSTIME) {
11878             host_to_target_timespec(arg4, &ts);
11879         }
11880 
11881         return ret;
11882     }
11883 #endif
11884 
11885 #if defined(TARGET_NR_set_tid_address) && defined(__NR_set_tid_address)
11886     case TARGET_NR_set_tid_address:
11887         return get_errno(set_tid_address((int *)g2h(arg1)));
11888 #endif
11889 
11890     case TARGET_NR_tkill:
11891         return get_errno(safe_tkill((int)arg1, target_to_host_signal(arg2)));
11892 
11893     case TARGET_NR_tgkill:
11894         return get_errno(safe_tgkill((int)arg1, (int)arg2,
11895                          target_to_host_signal(arg3)));
11896 
11897 #ifdef TARGET_NR_set_robust_list
11898     case TARGET_NR_set_robust_list:
11899     case TARGET_NR_get_robust_list:
11900         /* The ABI for supporting robust futexes has userspace pass
11901          * the kernel a pointer to a linked list which is updated by
11902          * userspace after the syscall; the list is walked by the kernel
11903          * when the thread exits. Since the linked list in QEMU guest
11904          * memory isn't a valid linked list for the host and we have
11905          * no way to reliably intercept the thread-death event, we can't
11906          * support these. Silently return ENOSYS so that guest userspace
11907          * falls back to a non-robust futex implementation (which should
11908          * be OK except in the corner case of the guest crashing while
11909          * holding a mutex that is shared with another process via
11910          * shared memory).
11911          */
11912         return -TARGET_ENOSYS;
11913 #endif
11914 
11915 #if defined(TARGET_NR_utimensat)
11916     case TARGET_NR_utimensat:
11917         {
11918             struct timespec *tsp, ts[2];
11919             if (!arg3) {
11920                 tsp = NULL;
11921             } else {
11922                 if (target_to_host_timespec(ts, arg3)) {
11923                     return -TARGET_EFAULT;
11924                 }
11925                 if (target_to_host_timespec(ts + 1, arg3 +
11926                                             sizeof(struct target_timespec))) {
11927                     return -TARGET_EFAULT;
11928                 }
11929                 tsp = ts;
11930             }
11931             if (!arg2)
11932                 ret = get_errno(sys_utimensat(arg1, NULL, tsp, arg4));
11933             else {
11934                 if (!(p = lock_user_string(arg2))) {
11935                     return -TARGET_EFAULT;
11936                 }
11937                 ret = get_errno(sys_utimensat(arg1, path(p), tsp, arg4));
11938                 unlock_user(p, arg2, 0);
11939             }
11940         }
11941         return ret;
11942 #endif
11943 #ifdef TARGET_NR_futex
11944     case TARGET_NR_futex:
11945         return do_futex(arg1, arg2, arg3, arg4, arg5, arg6);
11946 #endif
11947 #ifdef TARGET_NR_futex_time64
11948     case TARGET_NR_futex_time64:
11949         return do_futex_time64(arg1, arg2, arg3, arg4, arg5, arg6);
11950 #endif
11951 #if defined(TARGET_NR_inotify_init) && defined(__NR_inotify_init)
11952     case TARGET_NR_inotify_init:
11953         ret = get_errno(sys_inotify_init());
11954         if (ret >= 0) {
11955             fd_trans_register(ret, &target_inotify_trans);
11956         }
11957         return ret;
11958 #endif
11959 #ifdef CONFIG_INOTIFY1
11960 #if defined(TARGET_NR_inotify_init1) && defined(__NR_inotify_init1)
11961     case TARGET_NR_inotify_init1:
11962         ret = get_errno(sys_inotify_init1(target_to_host_bitmask(arg1,
11963                                           fcntl_flags_tbl)));
11964         if (ret >= 0) {
11965             fd_trans_register(ret, &target_inotify_trans);
11966         }
11967         return ret;
11968 #endif
11969 #endif
11970 #if defined(TARGET_NR_inotify_add_watch) && defined(__NR_inotify_add_watch)
11971     case TARGET_NR_inotify_add_watch:
11972         p = lock_user_string(arg2);
11973         ret = get_errno(sys_inotify_add_watch(arg1, path(p), arg3));
11974         unlock_user(p, arg2, 0);
11975         return ret;
11976 #endif
11977 #if defined(TARGET_NR_inotify_rm_watch) && defined(__NR_inotify_rm_watch)
11978     case TARGET_NR_inotify_rm_watch:
11979         return get_errno(sys_inotify_rm_watch(arg1, arg2));
11980 #endif
11981 
11982 #if defined(TARGET_NR_mq_open) && defined(__NR_mq_open)
11983     case TARGET_NR_mq_open:
11984         {
11985             struct mq_attr posix_mq_attr;
11986             struct mq_attr *pposix_mq_attr;
11987             int host_flags;
11988 
11989             host_flags = target_to_host_bitmask(arg2, fcntl_flags_tbl);
11990             pposix_mq_attr = NULL;
11991             if (arg4) {
11992                 if (copy_from_user_mq_attr(&posix_mq_attr, arg4) != 0) {
11993                     return -TARGET_EFAULT;
11994                 }
11995                 pposix_mq_attr = &posix_mq_attr;
11996             }
11997             p = lock_user_string(arg1 - 1);
11998             if (!p) {
11999                 return -TARGET_EFAULT;
12000             }
12001             ret = get_errno(mq_open(p, host_flags, arg3, pposix_mq_attr));
12002             unlock_user (p, arg1, 0);
12003         }
12004         return ret;
12005 
12006     case TARGET_NR_mq_unlink:
12007         p = lock_user_string(arg1 - 1);
12008         if (!p) {
12009             return -TARGET_EFAULT;
12010         }
12011         ret = get_errno(mq_unlink(p));
12012         unlock_user (p, arg1, 0);
12013         return ret;
12014 
12015 #ifdef TARGET_NR_mq_timedsend
12016     case TARGET_NR_mq_timedsend:
12017         {
12018             struct timespec ts;
12019 
12020             p = lock_user (VERIFY_READ, arg2, arg3, 1);
12021             if (arg5 != 0) {
12022                 target_to_host_timespec(&ts, arg5);
12023                 ret = get_errno(safe_mq_timedsend(arg1, p, arg3, arg4, &ts));
12024                 host_to_target_timespec(arg5, &ts);
12025             } else {
12026                 ret = get_errno(safe_mq_timedsend(arg1, p, arg3, arg4, NULL));
12027             }
12028             unlock_user (p, arg2, arg3);
12029         }
12030         return ret;
12031 #endif
12032 
12033 #ifdef TARGET_NR_mq_timedreceive
12034     case TARGET_NR_mq_timedreceive:
12035         {
12036             struct timespec ts;
12037             unsigned int prio;
12038 
12039             p = lock_user (VERIFY_READ, arg2, arg3, 1);
12040             if (arg5 != 0) {
12041                 target_to_host_timespec(&ts, arg5);
12042                 ret = get_errno(safe_mq_timedreceive(arg1, p, arg3,
12043                                                      &prio, &ts));
12044                 host_to_target_timespec(arg5, &ts);
12045             } else {
12046                 ret = get_errno(safe_mq_timedreceive(arg1, p, arg3,
12047                                                      &prio, NULL));
12048             }
12049             unlock_user (p, arg2, arg3);
12050             if (arg4 != 0)
12051                 put_user_u32(prio, arg4);
12052         }
12053         return ret;
12054 #endif
12055 
12056     /* Not implemented for now... */
12057 /*     case TARGET_NR_mq_notify: */
12058 /*         break; */
12059 
12060     case TARGET_NR_mq_getsetattr:
12061         {
12062             struct mq_attr posix_mq_attr_in, posix_mq_attr_out;
12063             ret = 0;
12064             if (arg2 != 0) {
12065                 copy_from_user_mq_attr(&posix_mq_attr_in, arg2);
12066                 ret = get_errno(mq_setattr(arg1, &posix_mq_attr_in,
12067                                            &posix_mq_attr_out));
12068             } else if (arg3 != 0) {
12069                 ret = get_errno(mq_getattr(arg1, &posix_mq_attr_out));
12070             }
12071             if (ret == 0 && arg3 != 0) {
12072                 copy_to_user_mq_attr(arg3, &posix_mq_attr_out);
12073             }
12074         }
12075         return ret;
12076 #endif
12077 
12078 #ifdef CONFIG_SPLICE
12079 #ifdef TARGET_NR_tee
12080     case TARGET_NR_tee:
12081         {
12082             ret = get_errno(tee(arg1,arg2,arg3,arg4));
12083         }
12084         return ret;
12085 #endif
12086 #ifdef TARGET_NR_splice
12087     case TARGET_NR_splice:
12088         {
12089             loff_t loff_in, loff_out;
12090             loff_t *ploff_in = NULL, *ploff_out = NULL;
12091             if (arg2) {
12092                 if (get_user_u64(loff_in, arg2)) {
12093                     return -TARGET_EFAULT;
12094                 }
12095                 ploff_in = &loff_in;
12096             }
12097             if (arg4) {
12098                 if (get_user_u64(loff_out, arg4)) {
12099                     return -TARGET_EFAULT;
12100                 }
12101                 ploff_out = &loff_out;
12102             }
12103             ret = get_errno(splice(arg1, ploff_in, arg3, ploff_out, arg5, arg6));
12104             if (arg2) {
12105                 if (put_user_u64(loff_in, arg2)) {
12106                     return -TARGET_EFAULT;
12107                 }
12108             }
12109             if (arg4) {
12110                 if (put_user_u64(loff_out, arg4)) {
12111                     return -TARGET_EFAULT;
12112                 }
12113             }
12114         }
12115         return ret;
12116 #endif
12117 #ifdef TARGET_NR_vmsplice
12118 	case TARGET_NR_vmsplice:
12119         {
12120             struct iovec *vec = lock_iovec(VERIFY_READ, arg2, arg3, 1);
12121             if (vec != NULL) {
12122                 ret = get_errno(vmsplice(arg1, vec, arg3, arg4));
12123                 unlock_iovec(vec, arg2, arg3, 0);
12124             } else {
12125                 ret = -host_to_target_errno(errno);
12126             }
12127         }
12128         return ret;
12129 #endif
12130 #endif /* CONFIG_SPLICE */
12131 #ifdef CONFIG_EVENTFD
12132 #if defined(TARGET_NR_eventfd)
12133     case TARGET_NR_eventfd:
12134         ret = get_errno(eventfd(arg1, 0));
12135         if (ret >= 0) {
12136             fd_trans_register(ret, &target_eventfd_trans);
12137         }
12138         return ret;
12139 #endif
12140 #if defined(TARGET_NR_eventfd2)
12141     case TARGET_NR_eventfd2:
12142     {
12143         int host_flags = arg2 & (~(TARGET_O_NONBLOCK | TARGET_O_CLOEXEC));
12144         if (arg2 & TARGET_O_NONBLOCK) {
12145             host_flags |= O_NONBLOCK;
12146         }
12147         if (arg2 & TARGET_O_CLOEXEC) {
12148             host_flags |= O_CLOEXEC;
12149         }
12150         ret = get_errno(eventfd(arg1, host_flags));
12151         if (ret >= 0) {
12152             fd_trans_register(ret, &target_eventfd_trans);
12153         }
12154         return ret;
12155     }
12156 #endif
12157 #endif /* CONFIG_EVENTFD  */
12158 #if defined(CONFIG_FALLOCATE) && defined(TARGET_NR_fallocate)
12159     case TARGET_NR_fallocate:
12160 #if TARGET_ABI_BITS == 32
12161         ret = get_errno(fallocate(arg1, arg2, target_offset64(arg3, arg4),
12162                                   target_offset64(arg5, arg6)));
12163 #else
12164         ret = get_errno(fallocate(arg1, arg2, arg3, arg4));
12165 #endif
12166         return ret;
12167 #endif
12168 #if defined(CONFIG_SYNC_FILE_RANGE)
12169 #if defined(TARGET_NR_sync_file_range)
12170     case TARGET_NR_sync_file_range:
12171 #if TARGET_ABI_BITS == 32
12172 #if defined(TARGET_MIPS)
12173         ret = get_errno(sync_file_range(arg1, target_offset64(arg3, arg4),
12174                                         target_offset64(arg5, arg6), arg7));
12175 #else
12176         ret = get_errno(sync_file_range(arg1, target_offset64(arg2, arg3),
12177                                         target_offset64(arg4, arg5), arg6));
12178 #endif /* !TARGET_MIPS */
12179 #else
12180         ret = get_errno(sync_file_range(arg1, arg2, arg3, arg4));
12181 #endif
12182         return ret;
12183 #endif
12184 #if defined(TARGET_NR_sync_file_range2) || \
12185     defined(TARGET_NR_arm_sync_file_range)
12186 #if defined(TARGET_NR_sync_file_range2)
12187     case TARGET_NR_sync_file_range2:
12188 #endif
12189 #if defined(TARGET_NR_arm_sync_file_range)
12190     case TARGET_NR_arm_sync_file_range:
12191 #endif
12192         /* This is like sync_file_range but the arguments are reordered */
12193 #if TARGET_ABI_BITS == 32
12194         ret = get_errno(sync_file_range(arg1, target_offset64(arg3, arg4),
12195                                         target_offset64(arg5, arg6), arg2));
12196 #else
12197         ret = get_errno(sync_file_range(arg1, arg3, arg4, arg2));
12198 #endif
12199         return ret;
12200 #endif
12201 #endif
12202 #if defined(TARGET_NR_signalfd4)
12203     case TARGET_NR_signalfd4:
12204         return do_signalfd4(arg1, arg2, arg4);
12205 #endif
12206 #if defined(TARGET_NR_signalfd)
12207     case TARGET_NR_signalfd:
12208         return do_signalfd4(arg1, arg2, 0);
12209 #endif
12210 #if defined(CONFIG_EPOLL)
12211 #if defined(TARGET_NR_epoll_create)
12212     case TARGET_NR_epoll_create:
12213         return get_errno(epoll_create(arg1));
12214 #endif
12215 #if defined(TARGET_NR_epoll_create1) && defined(CONFIG_EPOLL_CREATE1)
12216     case TARGET_NR_epoll_create1:
12217         return get_errno(epoll_create1(target_to_host_bitmask(arg1, fcntl_flags_tbl)));
12218 #endif
12219 #if defined(TARGET_NR_epoll_ctl)
12220     case TARGET_NR_epoll_ctl:
12221     {
12222         struct epoll_event ep;
12223         struct epoll_event *epp = 0;
12224         if (arg4) {
12225             struct target_epoll_event *target_ep;
12226             if (!lock_user_struct(VERIFY_READ, target_ep, arg4, 1)) {
12227                 return -TARGET_EFAULT;
12228             }
12229             ep.events = tswap32(target_ep->events);
12230             /* The epoll_data_t union is just opaque data to the kernel,
12231              * so we transfer all 64 bits across and need not worry what
12232              * actual data type it is.
12233              */
12234             ep.data.u64 = tswap64(target_ep->data.u64);
12235             unlock_user_struct(target_ep, arg4, 0);
12236             epp = &ep;
12237         }
12238         return get_errno(epoll_ctl(arg1, arg2, arg3, epp));
12239     }
12240 #endif
12241 
12242 #if defined(TARGET_NR_epoll_wait) || defined(TARGET_NR_epoll_pwait)
12243 #if defined(TARGET_NR_epoll_wait)
12244     case TARGET_NR_epoll_wait:
12245 #endif
12246 #if defined(TARGET_NR_epoll_pwait)
12247     case TARGET_NR_epoll_pwait:
12248 #endif
12249     {
12250         struct target_epoll_event *target_ep;
12251         struct epoll_event *ep;
12252         int epfd = arg1;
12253         int maxevents = arg3;
12254         int timeout = arg4;
12255 
12256         if (maxevents <= 0 || maxevents > TARGET_EP_MAX_EVENTS) {
12257             return -TARGET_EINVAL;
12258         }
12259 
12260         target_ep = lock_user(VERIFY_WRITE, arg2,
12261                               maxevents * sizeof(struct target_epoll_event), 1);
12262         if (!target_ep) {
12263             return -TARGET_EFAULT;
12264         }
12265 
12266         ep = g_try_new(struct epoll_event, maxevents);
12267         if (!ep) {
12268             unlock_user(target_ep, arg2, 0);
12269             return -TARGET_ENOMEM;
12270         }
12271 
12272         switch (num) {
12273 #if defined(TARGET_NR_epoll_pwait)
12274         case TARGET_NR_epoll_pwait:
12275         {
12276             target_sigset_t *target_set;
12277             sigset_t _set, *set = &_set;
12278 
12279             if (arg5) {
12280                 if (arg6 != sizeof(target_sigset_t)) {
12281                     ret = -TARGET_EINVAL;
12282                     break;
12283                 }
12284 
12285                 target_set = lock_user(VERIFY_READ, arg5,
12286                                        sizeof(target_sigset_t), 1);
12287                 if (!target_set) {
12288                     ret = -TARGET_EFAULT;
12289                     break;
12290                 }
12291                 target_to_host_sigset(set, target_set);
12292                 unlock_user(target_set, arg5, 0);
12293             } else {
12294                 set = NULL;
12295             }
12296 
12297             ret = get_errno(safe_epoll_pwait(epfd, ep, maxevents, timeout,
12298                                              set, SIGSET_T_SIZE));
12299             break;
12300         }
12301 #endif
12302 #if defined(TARGET_NR_epoll_wait)
12303         case TARGET_NR_epoll_wait:
12304             ret = get_errno(safe_epoll_pwait(epfd, ep, maxevents, timeout,
12305                                              NULL, 0));
12306             break;
12307 #endif
12308         default:
12309             ret = -TARGET_ENOSYS;
12310         }
12311         if (!is_error(ret)) {
12312             int i;
12313             for (i = 0; i < ret; i++) {
12314                 target_ep[i].events = tswap32(ep[i].events);
12315                 target_ep[i].data.u64 = tswap64(ep[i].data.u64);
12316             }
12317             unlock_user(target_ep, arg2,
12318                         ret * sizeof(struct target_epoll_event));
12319         } else {
12320             unlock_user(target_ep, arg2, 0);
12321         }
12322         g_free(ep);
12323         return ret;
12324     }
12325 #endif
12326 #endif
12327 #ifdef TARGET_NR_prlimit64
12328     case TARGET_NR_prlimit64:
12329     {
12330         /* args: pid, resource number, ptr to new rlimit, ptr to old rlimit */
12331         struct target_rlimit64 *target_rnew, *target_rold;
12332         struct host_rlimit64 rnew, rold, *rnewp = 0;
12333         int resource = target_to_host_resource(arg2);
12334 
12335         if (arg3 && (resource != RLIMIT_AS &&
12336                      resource != RLIMIT_DATA &&
12337                      resource != RLIMIT_STACK)) {
12338             if (!lock_user_struct(VERIFY_READ, target_rnew, arg3, 1)) {
12339                 return -TARGET_EFAULT;
12340             }
12341             rnew.rlim_cur = tswap64(target_rnew->rlim_cur);
12342             rnew.rlim_max = tswap64(target_rnew->rlim_max);
12343             unlock_user_struct(target_rnew, arg3, 0);
12344             rnewp = &rnew;
12345         }
12346 
12347         ret = get_errno(sys_prlimit64(arg1, resource, rnewp, arg4 ? &rold : 0));
12348         if (!is_error(ret) && arg4) {
12349             if (!lock_user_struct(VERIFY_WRITE, target_rold, arg4, 1)) {
12350                 return -TARGET_EFAULT;
12351             }
12352             target_rold->rlim_cur = tswap64(rold.rlim_cur);
12353             target_rold->rlim_max = tswap64(rold.rlim_max);
12354             unlock_user_struct(target_rold, arg4, 1);
12355         }
12356         return ret;
12357     }
12358 #endif
12359 #ifdef TARGET_NR_gethostname
12360     case TARGET_NR_gethostname:
12361     {
12362         char *name = lock_user(VERIFY_WRITE, arg1, arg2, 0);
12363         if (name) {
12364             ret = get_errno(gethostname(name, arg2));
12365             unlock_user(name, arg1, arg2);
12366         } else {
12367             ret = -TARGET_EFAULT;
12368         }
12369         return ret;
12370     }
12371 #endif
12372 #ifdef TARGET_NR_atomic_cmpxchg_32
12373     case TARGET_NR_atomic_cmpxchg_32:
12374     {
12375         /* should use start_exclusive from main.c */
12376         abi_ulong mem_value;
12377         if (get_user_u32(mem_value, arg6)) {
12378             target_siginfo_t info;
12379             info.si_signo = SIGSEGV;
12380             info.si_errno = 0;
12381             info.si_code = TARGET_SEGV_MAPERR;
12382             info._sifields._sigfault._addr = arg6;
12383             queue_signal((CPUArchState *)cpu_env, info.si_signo,
12384                          QEMU_SI_FAULT, &info);
12385             ret = 0xdeadbeef;
12386 
12387         }
12388         if (mem_value == arg2)
12389             put_user_u32(arg1, arg6);
12390         return mem_value;
12391     }
12392 #endif
12393 #ifdef TARGET_NR_atomic_barrier
12394     case TARGET_NR_atomic_barrier:
12395         /* Like the kernel implementation and the
12396            qemu arm barrier, no-op this? */
12397         return 0;
12398 #endif
12399 
12400 #ifdef TARGET_NR_timer_create
12401     case TARGET_NR_timer_create:
12402     {
12403         /* args: clockid_t clockid, struct sigevent *sevp, timer_t *timerid */
12404 
12405         struct sigevent host_sevp = { {0}, }, *phost_sevp = NULL;
12406 
12407         int clkid = arg1;
12408         int timer_index = next_free_host_timer();
12409 
12410         if (timer_index < 0) {
12411             ret = -TARGET_EAGAIN;
12412         } else {
12413             timer_t *phtimer = g_posix_timers  + timer_index;
12414 
12415             if (arg2) {
12416                 phost_sevp = &host_sevp;
12417                 ret = target_to_host_sigevent(phost_sevp, arg2);
12418                 if (ret != 0) {
12419                     return ret;
12420                 }
12421             }
12422 
12423             ret = get_errno(timer_create(clkid, phost_sevp, phtimer));
12424             if (ret) {
12425                 phtimer = NULL;
12426             } else {
12427                 if (put_user(TIMER_MAGIC | timer_index, arg3, target_timer_t)) {
12428                     return -TARGET_EFAULT;
12429                 }
12430             }
12431         }
12432         return ret;
12433     }
12434 #endif
12435 
12436 #ifdef TARGET_NR_timer_settime
12437     case TARGET_NR_timer_settime:
12438     {
12439         /* args: timer_t timerid, int flags, const struct itimerspec *new_value,
12440          * struct itimerspec * old_value */
12441         target_timer_t timerid = get_timer_id(arg1);
12442 
12443         if (timerid < 0) {
12444             ret = timerid;
12445         } else if (arg3 == 0) {
12446             ret = -TARGET_EINVAL;
12447         } else {
12448             timer_t htimer = g_posix_timers[timerid];
12449             struct itimerspec hspec_new = {{0},}, hspec_old = {{0},};
12450 
12451             if (target_to_host_itimerspec(&hspec_new, arg3)) {
12452                 return -TARGET_EFAULT;
12453             }
12454             ret = get_errno(
12455                           timer_settime(htimer, arg2, &hspec_new, &hspec_old));
12456             if (arg4 && host_to_target_itimerspec(arg4, &hspec_old)) {
12457                 return -TARGET_EFAULT;
12458             }
12459         }
12460         return ret;
12461     }
12462 #endif
12463 
12464 #ifdef TARGET_NR_timer_settime64
12465     case TARGET_NR_timer_settime64:
12466     {
12467         target_timer_t timerid = get_timer_id(arg1);
12468 
12469         if (timerid < 0) {
12470             ret = timerid;
12471         } else if (arg3 == 0) {
12472             ret = -TARGET_EINVAL;
12473         } else {
12474             timer_t htimer = g_posix_timers[timerid];
12475             struct itimerspec hspec_new = {{0},}, hspec_old = {{0},};
12476 
12477             if (target_to_host_itimerspec64(&hspec_new, arg3)) {
12478                 return -TARGET_EFAULT;
12479             }
12480             ret = get_errno(
12481                           timer_settime(htimer, arg2, &hspec_new, &hspec_old));
12482             if (arg4 && host_to_target_itimerspec64(arg4, &hspec_old)) {
12483                 return -TARGET_EFAULT;
12484             }
12485         }
12486         return ret;
12487     }
12488 #endif
12489 
12490 #ifdef TARGET_NR_timer_gettime
12491     case TARGET_NR_timer_gettime:
12492     {
12493         /* args: timer_t timerid, struct itimerspec *curr_value */
12494         target_timer_t timerid = get_timer_id(arg1);
12495 
12496         if (timerid < 0) {
12497             ret = timerid;
12498         } else if (!arg2) {
12499             ret = -TARGET_EFAULT;
12500         } else {
12501             timer_t htimer = g_posix_timers[timerid];
12502             struct itimerspec hspec;
12503             ret = get_errno(timer_gettime(htimer, &hspec));
12504 
12505             if (host_to_target_itimerspec(arg2, &hspec)) {
12506                 ret = -TARGET_EFAULT;
12507             }
12508         }
12509         return ret;
12510     }
12511 #endif
12512 
12513 #ifdef TARGET_NR_timer_gettime64
12514     case TARGET_NR_timer_gettime64:
12515     {
12516         /* args: timer_t timerid, struct itimerspec64 *curr_value */
12517         target_timer_t timerid = get_timer_id(arg1);
12518 
12519         if (timerid < 0) {
12520             ret = timerid;
12521         } else if (!arg2) {
12522             ret = -TARGET_EFAULT;
12523         } else {
12524             timer_t htimer = g_posix_timers[timerid];
12525             struct itimerspec hspec;
12526             ret = get_errno(timer_gettime(htimer, &hspec));
12527 
12528             if (host_to_target_itimerspec64(arg2, &hspec)) {
12529                 ret = -TARGET_EFAULT;
12530             }
12531         }
12532         return ret;
12533     }
12534 #endif
12535 
12536 #ifdef TARGET_NR_timer_getoverrun
12537     case TARGET_NR_timer_getoverrun:
12538     {
12539         /* args: timer_t timerid */
12540         target_timer_t timerid = get_timer_id(arg1);
12541 
12542         if (timerid < 0) {
12543             ret = timerid;
12544         } else {
12545             timer_t htimer = g_posix_timers[timerid];
12546             ret = get_errno(timer_getoverrun(htimer));
12547         }
12548         return ret;
12549     }
12550 #endif
12551 
12552 #ifdef TARGET_NR_timer_delete
12553     case TARGET_NR_timer_delete:
12554     {
12555         /* args: timer_t timerid */
12556         target_timer_t timerid = get_timer_id(arg1);
12557 
12558         if (timerid < 0) {
12559             ret = timerid;
12560         } else {
12561             timer_t htimer = g_posix_timers[timerid];
12562             ret = get_errno(timer_delete(htimer));
12563             g_posix_timers[timerid] = 0;
12564         }
12565         return ret;
12566     }
12567 #endif
12568 
12569 #if defined(TARGET_NR_timerfd_create) && defined(CONFIG_TIMERFD)
12570     case TARGET_NR_timerfd_create:
12571         return get_errno(timerfd_create(arg1,
12572                           target_to_host_bitmask(arg2, fcntl_flags_tbl)));
12573 #endif
12574 
12575 #if defined(TARGET_NR_timerfd_gettime) && defined(CONFIG_TIMERFD)
12576     case TARGET_NR_timerfd_gettime:
12577         {
12578             struct itimerspec its_curr;
12579 
12580             ret = get_errno(timerfd_gettime(arg1, &its_curr));
12581 
12582             if (arg2 && host_to_target_itimerspec(arg2, &its_curr)) {
12583                 return -TARGET_EFAULT;
12584             }
12585         }
12586         return ret;
12587 #endif
12588 
12589 #if defined(TARGET_NR_timerfd_gettime64) && defined(CONFIG_TIMERFD)
12590     case TARGET_NR_timerfd_gettime64:
12591         {
12592             struct itimerspec its_curr;
12593 
12594             ret = get_errno(timerfd_gettime(arg1, &its_curr));
12595 
12596             if (arg2 && host_to_target_itimerspec64(arg2, &its_curr)) {
12597                 return -TARGET_EFAULT;
12598             }
12599         }
12600         return ret;
12601 #endif
12602 
12603 #if defined(TARGET_NR_timerfd_settime) && defined(CONFIG_TIMERFD)
12604     case TARGET_NR_timerfd_settime:
12605         {
12606             struct itimerspec its_new, its_old, *p_new;
12607 
12608             if (arg3) {
12609                 if (target_to_host_itimerspec(&its_new, arg3)) {
12610                     return -TARGET_EFAULT;
12611                 }
12612                 p_new = &its_new;
12613             } else {
12614                 p_new = NULL;
12615             }
12616 
12617             ret = get_errno(timerfd_settime(arg1, arg2, p_new, &its_old));
12618 
12619             if (arg4 && host_to_target_itimerspec(arg4, &its_old)) {
12620                 return -TARGET_EFAULT;
12621             }
12622         }
12623         return ret;
12624 #endif
12625 
12626 #if defined(TARGET_NR_timerfd_settime64) && defined(CONFIG_TIMERFD)
12627     case TARGET_NR_timerfd_settime64:
12628         {
12629             struct itimerspec its_new, its_old, *p_new;
12630 
12631             if (arg3) {
12632                 if (target_to_host_itimerspec64(&its_new, arg3)) {
12633                     return -TARGET_EFAULT;
12634                 }
12635                 p_new = &its_new;
12636             } else {
12637                 p_new = NULL;
12638             }
12639 
12640             ret = get_errno(timerfd_settime(arg1, arg2, p_new, &its_old));
12641 
12642             if (arg4 && host_to_target_itimerspec64(arg4, &its_old)) {
12643                 return -TARGET_EFAULT;
12644             }
12645         }
12646         return ret;
12647 #endif
12648 
12649 #if defined(TARGET_NR_ioprio_get) && defined(__NR_ioprio_get)
12650     case TARGET_NR_ioprio_get:
12651         return get_errno(ioprio_get(arg1, arg2));
12652 #endif
12653 
12654 #if defined(TARGET_NR_ioprio_set) && defined(__NR_ioprio_set)
12655     case TARGET_NR_ioprio_set:
12656         return get_errno(ioprio_set(arg1, arg2, arg3));
12657 #endif
12658 
12659 #if defined(TARGET_NR_setns) && defined(CONFIG_SETNS)
12660     case TARGET_NR_setns:
12661         return get_errno(setns(arg1, arg2));
12662 #endif
12663 #if defined(TARGET_NR_unshare) && defined(CONFIG_SETNS)
12664     case TARGET_NR_unshare:
12665         return get_errno(unshare(arg1));
12666 #endif
12667 #if defined(TARGET_NR_kcmp) && defined(__NR_kcmp)
12668     case TARGET_NR_kcmp:
12669         return get_errno(kcmp(arg1, arg2, arg3, arg4, arg5));
12670 #endif
12671 #ifdef TARGET_NR_swapcontext
12672     case TARGET_NR_swapcontext:
12673         /* PowerPC specific.  */
12674         return do_swapcontext(cpu_env, arg1, arg2, arg3);
12675 #endif
12676 #ifdef TARGET_NR_memfd_create
12677     case TARGET_NR_memfd_create:
12678         p = lock_user_string(arg1);
12679         if (!p) {
12680             return -TARGET_EFAULT;
12681         }
12682         ret = get_errno(memfd_create(p, arg2));
12683         fd_trans_unregister(ret);
12684         unlock_user(p, arg1, 0);
12685         return ret;
12686 #endif
12687 #if defined TARGET_NR_membarrier && defined __NR_membarrier
12688     case TARGET_NR_membarrier:
12689         return get_errno(membarrier(arg1, arg2));
12690 #endif
12691 
12692     default:
12693         qemu_log_mask(LOG_UNIMP, "Unsupported syscall: %d\n", num);
12694         return -TARGET_ENOSYS;
12695     }
12696     return ret;
12697 }
12698 
12699 abi_long do_syscall(void *cpu_env, int num, abi_long arg1,
12700                     abi_long arg2, abi_long arg3, abi_long arg4,
12701                     abi_long arg5, abi_long arg6, abi_long arg7,
12702                     abi_long arg8)
12703 {
12704     CPUState *cpu = env_cpu(cpu_env);
12705     abi_long ret;
12706 
12707 #ifdef DEBUG_ERESTARTSYS
12708     /* Debug-only code for exercising the syscall-restart code paths
12709      * in the per-architecture cpu main loops: restart every syscall
12710      * the guest makes once before letting it through.
12711      */
12712     {
12713         static bool flag;
12714         flag = !flag;
12715         if (flag) {
12716             return -TARGET_ERESTARTSYS;
12717         }
12718     }
12719 #endif
12720 
12721     record_syscall_start(cpu, num, arg1,
12722                          arg2, arg3, arg4, arg5, arg6, arg7, arg8);
12723 
12724     if (unlikely(qemu_loglevel_mask(LOG_STRACE))) {
12725         print_syscall(num, arg1, arg2, arg3, arg4, arg5, arg6);
12726     }
12727 
12728     ret = do_syscall1(cpu_env, num, arg1, arg2, arg3, arg4,
12729                       arg5, arg6, arg7, arg8);
12730 
12731     if (unlikely(qemu_loglevel_mask(LOG_STRACE))) {
12732         print_syscall_ret(num, ret, arg1, arg2, arg3, arg4, arg5, arg6);
12733     }
12734 
12735     record_syscall_return(cpu, num, ret);
12736     return ret;
12737 }
12738