xref: /openbmc/qemu/linux-user/syscall.c (revision 9bdfa4d2)
1 /*
2  *  Linux syscalls
3  *
4  *  Copyright (c) 2003 Fabrice Bellard
5  *
6  *  This program is free software; you can redistribute it and/or modify
7  *  it under the terms of the GNU General Public License as published by
8  *  the Free Software Foundation; either version 2 of the License, or
9  *  (at your option) any later version.
10  *
11  *  This program is distributed in the hope that it will be useful,
12  *  but WITHOUT ANY WARRANTY; without even the implied warranty of
13  *  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
14  *  GNU General Public License for more details.
15  *
16  *  You should have received a copy of the GNU General Public License
17  *  along with this program; if not, see <http://www.gnu.org/licenses/>.
18  */
19 #define _ATFILE_SOURCE
20 #include "qemu/osdep.h"
21 #include "qemu/cutils.h"
22 #include "qemu/path.h"
23 #include "qemu/memfd.h"
24 #include "qemu/queue.h"
25 #include <elf.h>
26 #include <endian.h>
27 #include <grp.h>
28 #include <sys/ipc.h>
29 #include <sys/msg.h>
30 #include <sys/wait.h>
31 #include <sys/mount.h>
32 #include <sys/file.h>
33 #include <sys/fsuid.h>
34 #include <sys/personality.h>
35 #include <sys/prctl.h>
36 #include <sys/resource.h>
37 #include <sys/swap.h>
38 #include <linux/capability.h>
39 #include <sched.h>
40 #include <sys/timex.h>
41 #include <sys/socket.h>
42 #include <linux/sockios.h>
43 #include <sys/un.h>
44 #include <sys/uio.h>
45 #include <poll.h>
46 #include <sys/times.h>
47 #include <sys/shm.h>
48 #include <sys/sem.h>
49 #include <sys/statfs.h>
50 #include <utime.h>
51 #include <sys/sysinfo.h>
52 #include <sys/signalfd.h>
53 //#include <sys/user.h>
54 #include <netinet/ip.h>
55 #include <netinet/tcp.h>
56 #include <linux/wireless.h>
57 #include <linux/icmp.h>
58 #include <linux/icmpv6.h>
59 #include <linux/errqueue.h>
60 #include <linux/random.h>
61 #ifdef CONFIG_TIMERFD
62 #include <sys/timerfd.h>
63 #endif
64 #ifdef CONFIG_EVENTFD
65 #include <sys/eventfd.h>
66 #endif
67 #ifdef CONFIG_EPOLL
68 #include <sys/epoll.h>
69 #endif
70 #ifdef CONFIG_ATTR
71 #include "qemu/xattr.h"
72 #endif
73 #ifdef CONFIG_SENDFILE
74 #include <sys/sendfile.h>
75 #endif
76 
77 #define termios host_termios
78 #define winsize host_winsize
79 #define termio host_termio
80 #define sgttyb host_sgttyb /* same as target */
81 #define tchars host_tchars /* same as target */
82 #define ltchars host_ltchars /* same as target */
83 
84 #include <linux/termios.h>
85 #include <linux/unistd.h>
86 #include <linux/cdrom.h>
87 #include <linux/hdreg.h>
88 #include <linux/soundcard.h>
89 #include <linux/kd.h>
90 #include <linux/mtio.h>
91 #include <linux/fs.h>
92 #if defined(CONFIG_FIEMAP)
93 #include <linux/fiemap.h>
94 #endif
95 #include <linux/fb.h>
96 #if defined(CONFIG_USBFS)
97 #include <linux/usbdevice_fs.h>
98 #include <linux/usb/ch9.h>
99 #endif
100 #include <linux/vt.h>
101 #include <linux/dm-ioctl.h>
102 #include <linux/reboot.h>
103 #include <linux/route.h>
104 #include <linux/filter.h>
105 #include <linux/blkpg.h>
106 #include <netpacket/packet.h>
107 #include <linux/netlink.h>
108 #include <linux/if_alg.h>
109 #include "linux_loop.h"
110 #include "uname.h"
111 
112 #include "qemu.h"
113 #include "qemu/guest-random.h"
114 #include "qapi/error.h"
115 #include "fd-trans.h"
116 
117 #ifndef CLONE_IO
118 #define CLONE_IO                0x80000000      /* Clone io context */
119 #endif
120 
121 /* We can't directly call the host clone syscall, because this will
122  * badly confuse libc (breaking mutexes, for example). So we must
123  * divide clone flags into:
124  *  * flag combinations that look like pthread_create()
125  *  * flag combinations that look like fork()
126  *  * flags we can implement within QEMU itself
127  *  * flags we can't support and will return an error for
128  */
129 /* For thread creation, all these flags must be present; for
130  * fork, none must be present.
131  */
132 #define CLONE_THREAD_FLAGS                              \
133     (CLONE_VM | CLONE_FS | CLONE_FILES |                \
134      CLONE_SIGHAND | CLONE_THREAD | CLONE_SYSVSEM)
135 
136 /* These flags are ignored:
137  * CLONE_DETACHED is now ignored by the kernel;
138  * CLONE_IO is just an optimisation hint to the I/O scheduler
139  */
140 #define CLONE_IGNORED_FLAGS                     \
141     (CLONE_DETACHED | CLONE_IO)
142 
143 /* Flags for fork which we can implement within QEMU itself */
144 #define CLONE_OPTIONAL_FORK_FLAGS               \
145     (CLONE_SETTLS | CLONE_PARENT_SETTID |       \
146      CLONE_CHILD_CLEARTID | CLONE_CHILD_SETTID)
147 
148 /* Flags for thread creation which we can implement within QEMU itself */
149 #define CLONE_OPTIONAL_THREAD_FLAGS                             \
150     (CLONE_SETTLS | CLONE_PARENT_SETTID |                       \
151      CLONE_CHILD_CLEARTID | CLONE_CHILD_SETTID | CLONE_PARENT)
152 
153 #define CLONE_INVALID_FORK_FLAGS                                        \
154     (~(CSIGNAL | CLONE_OPTIONAL_FORK_FLAGS | CLONE_IGNORED_FLAGS))
155 
156 #define CLONE_INVALID_THREAD_FLAGS                                      \
157     (~(CSIGNAL | CLONE_THREAD_FLAGS | CLONE_OPTIONAL_THREAD_FLAGS |     \
158        CLONE_IGNORED_FLAGS))
159 
160 /* CLONE_VFORK is special cased early in do_fork(). The other flag bits
161  * have almost all been allocated. We cannot support any of
162  * CLONE_NEWNS, CLONE_NEWCGROUP, CLONE_NEWUTS, CLONE_NEWIPC,
163  * CLONE_NEWUSER, CLONE_NEWPID, CLONE_NEWNET, CLONE_PTRACE, CLONE_UNTRACED.
164  * The checks against the invalid thread masks above will catch these.
165  * (The one remaining unallocated bit is 0x1000 which used to be CLONE_PID.)
166  */
167 
168 /* Define DEBUG_ERESTARTSYS to force every syscall to be restarted
169  * once. This exercises the codepaths for restart.
170  */
171 //#define DEBUG_ERESTARTSYS
172 
173 //#include <linux/msdos_fs.h>
174 #define	VFAT_IOCTL_READDIR_BOTH		_IOR('r', 1, struct linux_dirent [2])
175 #define	VFAT_IOCTL_READDIR_SHORT	_IOR('r', 2, struct linux_dirent [2])
176 
177 #undef _syscall0
178 #undef _syscall1
179 #undef _syscall2
180 #undef _syscall3
181 #undef _syscall4
182 #undef _syscall5
183 #undef _syscall6
184 
185 #define _syscall0(type,name)		\
186 static type name (void)			\
187 {					\
188 	return syscall(__NR_##name);	\
189 }
190 
191 #define _syscall1(type,name,type1,arg1)		\
192 static type name (type1 arg1)			\
193 {						\
194 	return syscall(__NR_##name, arg1);	\
195 }
196 
197 #define _syscall2(type,name,type1,arg1,type2,arg2)	\
198 static type name (type1 arg1,type2 arg2)		\
199 {							\
200 	return syscall(__NR_##name, arg1, arg2);	\
201 }
202 
203 #define _syscall3(type,name,type1,arg1,type2,arg2,type3,arg3)	\
204 static type name (type1 arg1,type2 arg2,type3 arg3)		\
205 {								\
206 	return syscall(__NR_##name, arg1, arg2, arg3);		\
207 }
208 
209 #define _syscall4(type,name,type1,arg1,type2,arg2,type3,arg3,type4,arg4)	\
210 static type name (type1 arg1,type2 arg2,type3 arg3,type4 arg4)			\
211 {										\
212 	return syscall(__NR_##name, arg1, arg2, arg3, arg4);			\
213 }
214 
215 #define _syscall5(type,name,type1,arg1,type2,arg2,type3,arg3,type4,arg4,	\
216 		  type5,arg5)							\
217 static type name (type1 arg1,type2 arg2,type3 arg3,type4 arg4,type5 arg5)	\
218 {										\
219 	return syscall(__NR_##name, arg1, arg2, arg3, arg4, arg5);		\
220 }
221 
222 
223 #define _syscall6(type,name,type1,arg1,type2,arg2,type3,arg3,type4,arg4,	\
224 		  type5,arg5,type6,arg6)					\
225 static type name (type1 arg1,type2 arg2,type3 arg3,type4 arg4,type5 arg5,	\
226                   type6 arg6)							\
227 {										\
228 	return syscall(__NR_##name, arg1, arg2, arg3, arg4, arg5, arg6);	\
229 }
230 
231 
232 #define __NR_sys_uname __NR_uname
233 #define __NR_sys_getcwd1 __NR_getcwd
234 #define __NR_sys_getdents __NR_getdents
235 #define __NR_sys_getdents64 __NR_getdents64
236 #define __NR_sys_getpriority __NR_getpriority
237 #define __NR_sys_rt_sigqueueinfo __NR_rt_sigqueueinfo
238 #define __NR_sys_rt_tgsigqueueinfo __NR_rt_tgsigqueueinfo
239 #define __NR_sys_syslog __NR_syslog
240 #define __NR_sys_futex __NR_futex
241 #define __NR_sys_inotify_init __NR_inotify_init
242 #define __NR_sys_inotify_add_watch __NR_inotify_add_watch
243 #define __NR_sys_inotify_rm_watch __NR_inotify_rm_watch
244 #define __NR_sys_statx __NR_statx
245 
246 #if defined(__alpha__) || defined(__x86_64__) || defined(__s390x__)
247 #define __NR__llseek __NR_lseek
248 #endif
249 
250 /* Newer kernel ports have llseek() instead of _llseek() */
251 #if defined(TARGET_NR_llseek) && !defined(TARGET_NR__llseek)
252 #define TARGET_NR__llseek TARGET_NR_llseek
253 #endif
254 
255 #define __NR_sys_gettid __NR_gettid
256 _syscall0(int, sys_gettid)
257 
258 /* For the 64-bit guest on 32-bit host case we must emulate
259  * getdents using getdents64, because otherwise the host
260  * might hand us back more dirent records than we can fit
261  * into the guest buffer after structure format conversion.
262  * Otherwise we emulate getdents with getdents if the host has it.
263  */
264 #if defined(__NR_getdents) && HOST_LONG_BITS >= TARGET_ABI_BITS
265 #define EMULATE_GETDENTS_WITH_GETDENTS
266 #endif
267 
268 #if defined(TARGET_NR_getdents) && defined(EMULATE_GETDENTS_WITH_GETDENTS)
269 _syscall3(int, sys_getdents, uint, fd, struct linux_dirent *, dirp, uint, count);
270 #endif
271 #if (defined(TARGET_NR_getdents) && \
272       !defined(EMULATE_GETDENTS_WITH_GETDENTS)) || \
273     (defined(TARGET_NR_getdents64) && defined(__NR_getdents64))
274 _syscall3(int, sys_getdents64, uint, fd, struct linux_dirent64 *, dirp, uint, count);
275 #endif
276 #if defined(TARGET_NR__llseek) && defined(__NR_llseek)
277 _syscall5(int, _llseek,  uint,  fd, ulong, hi, ulong, lo,
278           loff_t *, res, uint, wh);
279 #endif
280 _syscall3(int, sys_rt_sigqueueinfo, pid_t, pid, int, sig, siginfo_t *, uinfo)
281 _syscall4(int, sys_rt_tgsigqueueinfo, pid_t, pid, pid_t, tid, int, sig,
282           siginfo_t *, uinfo)
283 _syscall3(int,sys_syslog,int,type,char*,bufp,int,len)
284 #ifdef __NR_exit_group
285 _syscall1(int,exit_group,int,error_code)
286 #endif
287 #if defined(TARGET_NR_set_tid_address) && defined(__NR_set_tid_address)
288 _syscall1(int,set_tid_address,int *,tidptr)
289 #endif
290 #if defined(TARGET_NR_futex) && defined(__NR_futex)
291 _syscall6(int,sys_futex,int *,uaddr,int,op,int,val,
292           const struct timespec *,timeout,int *,uaddr2,int,val3)
293 #endif
294 #define __NR_sys_sched_getaffinity __NR_sched_getaffinity
295 _syscall3(int, sys_sched_getaffinity, pid_t, pid, unsigned int, len,
296           unsigned long *, user_mask_ptr);
297 #define __NR_sys_sched_setaffinity __NR_sched_setaffinity
298 _syscall3(int, sys_sched_setaffinity, pid_t, pid, unsigned int, len,
299           unsigned long *, user_mask_ptr);
300 #define __NR_sys_getcpu __NR_getcpu
301 _syscall3(int, sys_getcpu, unsigned *, cpu, unsigned *, node, void *, tcache);
302 _syscall4(int, reboot, int, magic1, int, magic2, unsigned int, cmd,
303           void *, arg);
304 _syscall2(int, capget, struct __user_cap_header_struct *, header,
305           struct __user_cap_data_struct *, data);
306 _syscall2(int, capset, struct __user_cap_header_struct *, header,
307           struct __user_cap_data_struct *, data);
308 #if defined(TARGET_NR_ioprio_get) && defined(__NR_ioprio_get)
309 _syscall2(int, ioprio_get, int, which, int, who)
310 #endif
311 #if defined(TARGET_NR_ioprio_set) && defined(__NR_ioprio_set)
312 _syscall3(int, ioprio_set, int, which, int, who, int, ioprio)
313 #endif
314 #if defined(TARGET_NR_getrandom) && defined(__NR_getrandom)
315 _syscall3(int, getrandom, void *, buf, size_t, buflen, unsigned int, flags)
316 #endif
317 
318 #if defined(TARGET_NR_kcmp) && defined(__NR_kcmp)
319 _syscall5(int, kcmp, pid_t, pid1, pid_t, pid2, int, type,
320           unsigned long, idx1, unsigned long, idx2)
321 #endif
322 
323 /*
324  * It is assumed that struct statx is architecture independent.
325  */
326 #if defined(TARGET_NR_statx) && defined(__NR_statx)
327 _syscall5(int, sys_statx, int, dirfd, const char *, pathname, int, flags,
328           unsigned int, mask, struct target_statx *, statxbuf)
329 #endif
330 
331 static bitmask_transtbl fcntl_flags_tbl[] = {
332   { TARGET_O_ACCMODE,   TARGET_O_WRONLY,    O_ACCMODE,   O_WRONLY,    },
333   { TARGET_O_ACCMODE,   TARGET_O_RDWR,      O_ACCMODE,   O_RDWR,      },
334   { TARGET_O_CREAT,     TARGET_O_CREAT,     O_CREAT,     O_CREAT,     },
335   { TARGET_O_EXCL,      TARGET_O_EXCL,      O_EXCL,      O_EXCL,      },
336   { TARGET_O_NOCTTY,    TARGET_O_NOCTTY,    O_NOCTTY,    O_NOCTTY,    },
337   { TARGET_O_TRUNC,     TARGET_O_TRUNC,     O_TRUNC,     O_TRUNC,     },
338   { TARGET_O_APPEND,    TARGET_O_APPEND,    O_APPEND,    O_APPEND,    },
339   { TARGET_O_NONBLOCK,  TARGET_O_NONBLOCK,  O_NONBLOCK,  O_NONBLOCK,  },
340   { TARGET_O_SYNC,      TARGET_O_DSYNC,     O_SYNC,      O_DSYNC,     },
341   { TARGET_O_SYNC,      TARGET_O_SYNC,      O_SYNC,      O_SYNC,      },
342   { TARGET_FASYNC,      TARGET_FASYNC,      FASYNC,      FASYNC,      },
343   { TARGET_O_DIRECTORY, TARGET_O_DIRECTORY, O_DIRECTORY, O_DIRECTORY, },
344   { TARGET_O_NOFOLLOW,  TARGET_O_NOFOLLOW,  O_NOFOLLOW,  O_NOFOLLOW,  },
345 #if defined(O_DIRECT)
346   { TARGET_O_DIRECT,    TARGET_O_DIRECT,    O_DIRECT,    O_DIRECT,    },
347 #endif
348 #if defined(O_NOATIME)
349   { TARGET_O_NOATIME,   TARGET_O_NOATIME,   O_NOATIME,   O_NOATIME    },
350 #endif
351 #if defined(O_CLOEXEC)
352   { TARGET_O_CLOEXEC,   TARGET_O_CLOEXEC,   O_CLOEXEC,   O_CLOEXEC    },
353 #endif
354 #if defined(O_PATH)
355   { TARGET_O_PATH,      TARGET_O_PATH,      O_PATH,      O_PATH       },
356 #endif
357 #if defined(O_TMPFILE)
358   { TARGET_O_TMPFILE,   TARGET_O_TMPFILE,   O_TMPFILE,   O_TMPFILE    },
359 #endif
360   /* Don't terminate the list prematurely on 64-bit host+guest.  */
361 #if TARGET_O_LARGEFILE != 0 || O_LARGEFILE != 0
362   { TARGET_O_LARGEFILE, TARGET_O_LARGEFILE, O_LARGEFILE, O_LARGEFILE, },
363 #endif
364   { 0, 0, 0, 0 }
365 };
366 
367 static int sys_getcwd1(char *buf, size_t size)
368 {
369   if (getcwd(buf, size) == NULL) {
370       /* getcwd() sets errno */
371       return (-1);
372   }
373   return strlen(buf)+1;
374 }
375 
376 #ifdef TARGET_NR_utimensat
377 #if defined(__NR_utimensat)
378 #define __NR_sys_utimensat __NR_utimensat
379 _syscall4(int,sys_utimensat,int,dirfd,const char *,pathname,
380           const struct timespec *,tsp,int,flags)
381 #else
382 static int sys_utimensat(int dirfd, const char *pathname,
383                          const struct timespec times[2], int flags)
384 {
385     errno = ENOSYS;
386     return -1;
387 }
388 #endif
389 #endif /* TARGET_NR_utimensat */
390 
391 #ifdef TARGET_NR_renameat2
392 #if defined(__NR_renameat2)
393 #define __NR_sys_renameat2 __NR_renameat2
394 _syscall5(int, sys_renameat2, int, oldfd, const char *, old, int, newfd,
395           const char *, new, unsigned int, flags)
396 #else
397 static int sys_renameat2(int oldfd, const char *old,
398                          int newfd, const char *new, int flags)
399 {
400     if (flags == 0) {
401         return renameat(oldfd, old, newfd, new);
402     }
403     errno = ENOSYS;
404     return -1;
405 }
406 #endif
407 #endif /* TARGET_NR_renameat2 */
408 
409 #ifdef CONFIG_INOTIFY
410 #include <sys/inotify.h>
411 
412 #if defined(TARGET_NR_inotify_init) && defined(__NR_inotify_init)
413 static int sys_inotify_init(void)
414 {
415   return (inotify_init());
416 }
417 #endif
418 #if defined(TARGET_NR_inotify_add_watch) && defined(__NR_inotify_add_watch)
419 static int sys_inotify_add_watch(int fd,const char *pathname, int32_t mask)
420 {
421   return (inotify_add_watch(fd, pathname, mask));
422 }
423 #endif
424 #if defined(TARGET_NR_inotify_rm_watch) && defined(__NR_inotify_rm_watch)
425 static int sys_inotify_rm_watch(int fd, int32_t wd)
426 {
427   return (inotify_rm_watch(fd, wd));
428 }
429 #endif
430 #ifdef CONFIG_INOTIFY1
431 #if defined(TARGET_NR_inotify_init1) && defined(__NR_inotify_init1)
432 static int sys_inotify_init1(int flags)
433 {
434   return (inotify_init1(flags));
435 }
436 #endif
437 #endif
438 #else
439 /* Userspace can usually survive runtime without inotify */
440 #undef TARGET_NR_inotify_init
441 #undef TARGET_NR_inotify_init1
442 #undef TARGET_NR_inotify_add_watch
443 #undef TARGET_NR_inotify_rm_watch
444 #endif /* CONFIG_INOTIFY  */
445 
446 #if defined(TARGET_NR_prlimit64)
447 #ifndef __NR_prlimit64
448 # define __NR_prlimit64 -1
449 #endif
450 #define __NR_sys_prlimit64 __NR_prlimit64
451 /* The glibc rlimit structure may not be that used by the underlying syscall */
452 struct host_rlimit64 {
453     uint64_t rlim_cur;
454     uint64_t rlim_max;
455 };
456 _syscall4(int, sys_prlimit64, pid_t, pid, int, resource,
457           const struct host_rlimit64 *, new_limit,
458           struct host_rlimit64 *, old_limit)
459 #endif
460 
461 
462 #if defined(TARGET_NR_timer_create)
463 /* Maxiumum of 32 active POSIX timers allowed at any one time. */
464 static timer_t g_posix_timers[32] = { 0, } ;
465 
466 static inline int next_free_host_timer(void)
467 {
468     int k ;
469     /* FIXME: Does finding the next free slot require a lock? */
470     for (k = 0; k < ARRAY_SIZE(g_posix_timers); k++) {
471         if (g_posix_timers[k] == 0) {
472             g_posix_timers[k] = (timer_t) 1;
473             return k;
474         }
475     }
476     return -1;
477 }
478 #endif
479 
480 /* ARM EABI and MIPS expect 64bit types aligned even on pairs or registers */
481 #ifdef TARGET_ARM
482 static inline int regpairs_aligned(void *cpu_env, int num)
483 {
484     return ((((CPUARMState *)cpu_env)->eabi) == 1) ;
485 }
486 #elif defined(TARGET_MIPS) && (TARGET_ABI_BITS == 32)
487 static inline int regpairs_aligned(void *cpu_env, int num) { return 1; }
488 #elif defined(TARGET_PPC) && !defined(TARGET_PPC64)
489 /* SysV AVI for PPC32 expects 64bit parameters to be passed on odd/even pairs
490  * of registers which translates to the same as ARM/MIPS, because we start with
491  * r3 as arg1 */
492 static inline int regpairs_aligned(void *cpu_env, int num) { return 1; }
493 #elif defined(TARGET_SH4)
494 /* SH4 doesn't align register pairs, except for p{read,write}64 */
495 static inline int regpairs_aligned(void *cpu_env, int num)
496 {
497     switch (num) {
498     case TARGET_NR_pread64:
499     case TARGET_NR_pwrite64:
500         return 1;
501 
502     default:
503         return 0;
504     }
505 }
506 #elif defined(TARGET_XTENSA)
507 static inline int regpairs_aligned(void *cpu_env, int num) { return 1; }
508 #else
509 static inline int regpairs_aligned(void *cpu_env, int num) { return 0; }
510 #endif
511 
512 #define ERRNO_TABLE_SIZE 1200
513 
514 /* target_to_host_errno_table[] is initialized from
515  * host_to_target_errno_table[] in syscall_init(). */
516 static uint16_t target_to_host_errno_table[ERRNO_TABLE_SIZE] = {
517 };
518 
519 /*
520  * This list is the union of errno values overridden in asm-<arch>/errno.h
521  * minus the errnos that are not actually generic to all archs.
522  */
523 static uint16_t host_to_target_errno_table[ERRNO_TABLE_SIZE] = {
524     [EAGAIN]		= TARGET_EAGAIN,
525     [EIDRM]		= TARGET_EIDRM,
526     [ECHRNG]		= TARGET_ECHRNG,
527     [EL2NSYNC]		= TARGET_EL2NSYNC,
528     [EL3HLT]		= TARGET_EL3HLT,
529     [EL3RST]		= TARGET_EL3RST,
530     [ELNRNG]		= TARGET_ELNRNG,
531     [EUNATCH]		= TARGET_EUNATCH,
532     [ENOCSI]		= TARGET_ENOCSI,
533     [EL2HLT]		= TARGET_EL2HLT,
534     [EDEADLK]		= TARGET_EDEADLK,
535     [ENOLCK]		= TARGET_ENOLCK,
536     [EBADE]		= TARGET_EBADE,
537     [EBADR]		= TARGET_EBADR,
538     [EXFULL]		= TARGET_EXFULL,
539     [ENOANO]		= TARGET_ENOANO,
540     [EBADRQC]		= TARGET_EBADRQC,
541     [EBADSLT]		= TARGET_EBADSLT,
542     [EBFONT]		= TARGET_EBFONT,
543     [ENOSTR]		= TARGET_ENOSTR,
544     [ENODATA]		= TARGET_ENODATA,
545     [ETIME]		= TARGET_ETIME,
546     [ENOSR]		= TARGET_ENOSR,
547     [ENONET]		= TARGET_ENONET,
548     [ENOPKG]		= TARGET_ENOPKG,
549     [EREMOTE]		= TARGET_EREMOTE,
550     [ENOLINK]		= TARGET_ENOLINK,
551     [EADV]		= TARGET_EADV,
552     [ESRMNT]		= TARGET_ESRMNT,
553     [ECOMM]		= TARGET_ECOMM,
554     [EPROTO]		= TARGET_EPROTO,
555     [EDOTDOT]		= TARGET_EDOTDOT,
556     [EMULTIHOP]		= TARGET_EMULTIHOP,
557     [EBADMSG]		= TARGET_EBADMSG,
558     [ENAMETOOLONG]	= TARGET_ENAMETOOLONG,
559     [EOVERFLOW]		= TARGET_EOVERFLOW,
560     [ENOTUNIQ]		= TARGET_ENOTUNIQ,
561     [EBADFD]		= TARGET_EBADFD,
562     [EREMCHG]		= TARGET_EREMCHG,
563     [ELIBACC]		= TARGET_ELIBACC,
564     [ELIBBAD]		= TARGET_ELIBBAD,
565     [ELIBSCN]		= TARGET_ELIBSCN,
566     [ELIBMAX]		= TARGET_ELIBMAX,
567     [ELIBEXEC]		= TARGET_ELIBEXEC,
568     [EILSEQ]		= TARGET_EILSEQ,
569     [ENOSYS]		= TARGET_ENOSYS,
570     [ELOOP]		= TARGET_ELOOP,
571     [ERESTART]		= TARGET_ERESTART,
572     [ESTRPIPE]		= TARGET_ESTRPIPE,
573     [ENOTEMPTY]		= TARGET_ENOTEMPTY,
574     [EUSERS]		= TARGET_EUSERS,
575     [ENOTSOCK]		= TARGET_ENOTSOCK,
576     [EDESTADDRREQ]	= TARGET_EDESTADDRREQ,
577     [EMSGSIZE]		= TARGET_EMSGSIZE,
578     [EPROTOTYPE]	= TARGET_EPROTOTYPE,
579     [ENOPROTOOPT]	= TARGET_ENOPROTOOPT,
580     [EPROTONOSUPPORT]	= TARGET_EPROTONOSUPPORT,
581     [ESOCKTNOSUPPORT]	= TARGET_ESOCKTNOSUPPORT,
582     [EOPNOTSUPP]	= TARGET_EOPNOTSUPP,
583     [EPFNOSUPPORT]	= TARGET_EPFNOSUPPORT,
584     [EAFNOSUPPORT]	= TARGET_EAFNOSUPPORT,
585     [EADDRINUSE]	= TARGET_EADDRINUSE,
586     [EADDRNOTAVAIL]	= TARGET_EADDRNOTAVAIL,
587     [ENETDOWN]		= TARGET_ENETDOWN,
588     [ENETUNREACH]	= TARGET_ENETUNREACH,
589     [ENETRESET]		= TARGET_ENETRESET,
590     [ECONNABORTED]	= TARGET_ECONNABORTED,
591     [ECONNRESET]	= TARGET_ECONNRESET,
592     [ENOBUFS]		= TARGET_ENOBUFS,
593     [EISCONN]		= TARGET_EISCONN,
594     [ENOTCONN]		= TARGET_ENOTCONN,
595     [EUCLEAN]		= TARGET_EUCLEAN,
596     [ENOTNAM]		= TARGET_ENOTNAM,
597     [ENAVAIL]		= TARGET_ENAVAIL,
598     [EISNAM]		= TARGET_EISNAM,
599     [EREMOTEIO]		= TARGET_EREMOTEIO,
600     [EDQUOT]            = TARGET_EDQUOT,
601     [ESHUTDOWN]		= TARGET_ESHUTDOWN,
602     [ETOOMANYREFS]	= TARGET_ETOOMANYREFS,
603     [ETIMEDOUT]		= TARGET_ETIMEDOUT,
604     [ECONNREFUSED]	= TARGET_ECONNREFUSED,
605     [EHOSTDOWN]		= TARGET_EHOSTDOWN,
606     [EHOSTUNREACH]	= TARGET_EHOSTUNREACH,
607     [EALREADY]		= TARGET_EALREADY,
608     [EINPROGRESS]	= TARGET_EINPROGRESS,
609     [ESTALE]		= TARGET_ESTALE,
610     [ECANCELED]		= TARGET_ECANCELED,
611     [ENOMEDIUM]		= TARGET_ENOMEDIUM,
612     [EMEDIUMTYPE]	= TARGET_EMEDIUMTYPE,
613 #ifdef ENOKEY
614     [ENOKEY]		= TARGET_ENOKEY,
615 #endif
616 #ifdef EKEYEXPIRED
617     [EKEYEXPIRED]	= TARGET_EKEYEXPIRED,
618 #endif
619 #ifdef EKEYREVOKED
620     [EKEYREVOKED]	= TARGET_EKEYREVOKED,
621 #endif
622 #ifdef EKEYREJECTED
623     [EKEYREJECTED]	= TARGET_EKEYREJECTED,
624 #endif
625 #ifdef EOWNERDEAD
626     [EOWNERDEAD]	= TARGET_EOWNERDEAD,
627 #endif
628 #ifdef ENOTRECOVERABLE
629     [ENOTRECOVERABLE]	= TARGET_ENOTRECOVERABLE,
630 #endif
631 #ifdef ENOMSG
632     [ENOMSG]            = TARGET_ENOMSG,
633 #endif
634 #ifdef ERKFILL
635     [ERFKILL]           = TARGET_ERFKILL,
636 #endif
637 #ifdef EHWPOISON
638     [EHWPOISON]         = TARGET_EHWPOISON,
639 #endif
640 };
641 
642 static inline int host_to_target_errno(int err)
643 {
644     if (err >= 0 && err < ERRNO_TABLE_SIZE &&
645         host_to_target_errno_table[err]) {
646         return host_to_target_errno_table[err];
647     }
648     return err;
649 }
650 
651 static inline int target_to_host_errno(int err)
652 {
653     if (err >= 0 && err < ERRNO_TABLE_SIZE &&
654         target_to_host_errno_table[err]) {
655         return target_to_host_errno_table[err];
656     }
657     return err;
658 }
659 
660 static inline abi_long get_errno(abi_long ret)
661 {
662     if (ret == -1)
663         return -host_to_target_errno(errno);
664     else
665         return ret;
666 }
667 
668 const char *target_strerror(int err)
669 {
670     if (err == TARGET_ERESTARTSYS) {
671         return "To be restarted";
672     }
673     if (err == TARGET_QEMU_ESIGRETURN) {
674         return "Successful exit from sigreturn";
675     }
676 
677     if ((err >= ERRNO_TABLE_SIZE) || (err < 0)) {
678         return NULL;
679     }
680     return strerror(target_to_host_errno(err));
681 }
682 
683 #define safe_syscall0(type, name) \
684 static type safe_##name(void) \
685 { \
686     return safe_syscall(__NR_##name); \
687 }
688 
689 #define safe_syscall1(type, name, type1, arg1) \
690 static type safe_##name(type1 arg1) \
691 { \
692     return safe_syscall(__NR_##name, arg1); \
693 }
694 
695 #define safe_syscall2(type, name, type1, arg1, type2, arg2) \
696 static type safe_##name(type1 arg1, type2 arg2) \
697 { \
698     return safe_syscall(__NR_##name, arg1, arg2); \
699 }
700 
701 #define safe_syscall3(type, name, type1, arg1, type2, arg2, type3, arg3) \
702 static type safe_##name(type1 arg1, type2 arg2, type3 arg3) \
703 { \
704     return safe_syscall(__NR_##name, arg1, arg2, arg3); \
705 }
706 
707 #define safe_syscall4(type, name, type1, arg1, type2, arg2, type3, arg3, \
708     type4, arg4) \
709 static type safe_##name(type1 arg1, type2 arg2, type3 arg3, type4 arg4) \
710 { \
711     return safe_syscall(__NR_##name, arg1, arg2, arg3, arg4); \
712 }
713 
714 #define safe_syscall5(type, name, type1, arg1, type2, arg2, type3, arg3, \
715     type4, arg4, type5, arg5) \
716 static type safe_##name(type1 arg1, type2 arg2, type3 arg3, type4 arg4, \
717     type5 arg5) \
718 { \
719     return safe_syscall(__NR_##name, arg1, arg2, arg3, arg4, arg5); \
720 }
721 
722 #define safe_syscall6(type, name, type1, arg1, type2, arg2, type3, arg3, \
723     type4, arg4, type5, arg5, type6, arg6) \
724 static type safe_##name(type1 arg1, type2 arg2, type3 arg3, type4 arg4, \
725     type5 arg5, type6 arg6) \
726 { \
727     return safe_syscall(__NR_##name, arg1, arg2, arg3, arg4, arg5, arg6); \
728 }
729 
730 safe_syscall3(ssize_t, read, int, fd, void *, buff, size_t, count)
731 safe_syscall3(ssize_t, write, int, fd, const void *, buff, size_t, count)
732 safe_syscall4(int, openat, int, dirfd, const char *, pathname, \
733               int, flags, mode_t, mode)
734 safe_syscall4(pid_t, wait4, pid_t, pid, int *, status, int, options, \
735               struct rusage *, rusage)
736 safe_syscall5(int, waitid, idtype_t, idtype, id_t, id, siginfo_t *, infop, \
737               int, options, struct rusage *, rusage)
738 safe_syscall3(int, execve, const char *, filename, char **, argv, char **, envp)
739 safe_syscall6(int, pselect6, int, nfds, fd_set *, readfds, fd_set *, writefds, \
740               fd_set *, exceptfds, struct timespec *, timeout, void *, sig)
741 safe_syscall5(int, ppoll, struct pollfd *, ufds, unsigned int, nfds,
742               struct timespec *, tsp, const sigset_t *, sigmask,
743               size_t, sigsetsize)
744 safe_syscall6(int, epoll_pwait, int, epfd, struct epoll_event *, events,
745               int, maxevents, int, timeout, const sigset_t *, sigmask,
746               size_t, sigsetsize)
747 safe_syscall6(int,futex,int *,uaddr,int,op,int,val, \
748               const struct timespec *,timeout,int *,uaddr2,int,val3)
749 safe_syscall2(int, rt_sigsuspend, sigset_t *, newset, size_t, sigsetsize)
750 safe_syscall2(int, kill, pid_t, pid, int, sig)
751 safe_syscall2(int, tkill, int, tid, int, sig)
752 safe_syscall3(int, tgkill, int, tgid, int, pid, int, sig)
753 safe_syscall3(ssize_t, readv, int, fd, const struct iovec *, iov, int, iovcnt)
754 safe_syscall3(ssize_t, writev, int, fd, const struct iovec *, iov, int, iovcnt)
755 safe_syscall5(ssize_t, preadv, int, fd, const struct iovec *, iov, int, iovcnt,
756               unsigned long, pos_l, unsigned long, pos_h)
757 safe_syscall5(ssize_t, pwritev, int, fd, const struct iovec *, iov, int, iovcnt,
758               unsigned long, pos_l, unsigned long, pos_h)
759 safe_syscall3(int, connect, int, fd, const struct sockaddr *, addr,
760               socklen_t, addrlen)
761 safe_syscall6(ssize_t, sendto, int, fd, const void *, buf, size_t, len,
762               int, flags, const struct sockaddr *, addr, socklen_t, addrlen)
763 safe_syscall6(ssize_t, recvfrom, int, fd, void *, buf, size_t, len,
764               int, flags, struct sockaddr *, addr, socklen_t *, addrlen)
765 safe_syscall3(ssize_t, sendmsg, int, fd, const struct msghdr *, msg, int, flags)
766 safe_syscall3(ssize_t, recvmsg, int, fd, struct msghdr *, msg, int, flags)
767 safe_syscall2(int, flock, int, fd, int, operation)
768 safe_syscall4(int, rt_sigtimedwait, const sigset_t *, these, siginfo_t *, uinfo,
769               const struct timespec *, uts, size_t, sigsetsize)
770 safe_syscall4(int, accept4, int, fd, struct sockaddr *, addr, socklen_t *, len,
771               int, flags)
772 safe_syscall2(int, nanosleep, const struct timespec *, req,
773               struct timespec *, rem)
774 #ifdef TARGET_NR_clock_nanosleep
775 safe_syscall4(int, clock_nanosleep, const clockid_t, clock, int, flags,
776               const struct timespec *, req, struct timespec *, rem)
777 #endif
778 #ifdef __NR_ipc
779 safe_syscall6(int, ipc, int, call, long, first, long, second, long, third,
780               void *, ptr, long, fifth)
781 #endif
782 #ifdef __NR_msgsnd
783 safe_syscall4(int, msgsnd, int, msgid, const void *, msgp, size_t, sz,
784               int, flags)
785 #endif
786 #ifdef __NR_msgrcv
787 safe_syscall5(int, msgrcv, int, msgid, void *, msgp, size_t, sz,
788               long, msgtype, int, flags)
789 #endif
790 #ifdef __NR_semtimedop
791 safe_syscall4(int, semtimedop, int, semid, struct sembuf *, tsops,
792               unsigned, nsops, const struct timespec *, timeout)
793 #endif
794 #if defined(TARGET_NR_mq_open) && defined(__NR_mq_open)
795 safe_syscall5(int, mq_timedsend, int, mqdes, const char *, msg_ptr,
796               size_t, len, unsigned, prio, const struct timespec *, timeout)
797 safe_syscall5(int, mq_timedreceive, int, mqdes, char *, msg_ptr,
798               size_t, len, unsigned *, prio, const struct timespec *, timeout)
799 #endif
800 /* We do ioctl like this rather than via safe_syscall3 to preserve the
801  * "third argument might be integer or pointer or not present" behaviour of
802  * the libc function.
803  */
804 #define safe_ioctl(...) safe_syscall(__NR_ioctl, __VA_ARGS__)
805 /* Similarly for fcntl. Note that callers must always:
806  *  pass the F_GETLK64 etc constants rather than the unsuffixed F_GETLK
807  *  use the flock64 struct rather than unsuffixed flock
808  * This will then work and use a 64-bit offset for both 32-bit and 64-bit hosts.
809  */
810 #ifdef __NR_fcntl64
811 #define safe_fcntl(...) safe_syscall(__NR_fcntl64, __VA_ARGS__)
812 #else
813 #define safe_fcntl(...) safe_syscall(__NR_fcntl, __VA_ARGS__)
814 #endif
815 
816 static inline int host_to_target_sock_type(int host_type)
817 {
818     int target_type;
819 
820     switch (host_type & 0xf /* SOCK_TYPE_MASK */) {
821     case SOCK_DGRAM:
822         target_type = TARGET_SOCK_DGRAM;
823         break;
824     case SOCK_STREAM:
825         target_type = TARGET_SOCK_STREAM;
826         break;
827     default:
828         target_type = host_type & 0xf /* SOCK_TYPE_MASK */;
829         break;
830     }
831 
832 #if defined(SOCK_CLOEXEC)
833     if (host_type & SOCK_CLOEXEC) {
834         target_type |= TARGET_SOCK_CLOEXEC;
835     }
836 #endif
837 
838 #if defined(SOCK_NONBLOCK)
839     if (host_type & SOCK_NONBLOCK) {
840         target_type |= TARGET_SOCK_NONBLOCK;
841     }
842 #endif
843 
844     return target_type;
845 }
846 
847 static abi_ulong target_brk;
848 static abi_ulong target_original_brk;
849 static abi_ulong brk_page;
850 
851 void target_set_brk(abi_ulong new_brk)
852 {
853     target_original_brk = target_brk = HOST_PAGE_ALIGN(new_brk);
854     brk_page = HOST_PAGE_ALIGN(target_brk);
855 }
856 
857 //#define DEBUGF_BRK(message, args...) do { fprintf(stderr, (message), ## args); } while (0)
858 #define DEBUGF_BRK(message, args...)
859 
860 /* do_brk() must return target values and target errnos. */
861 abi_long do_brk(abi_ulong new_brk)
862 {
863     abi_long mapped_addr;
864     abi_ulong new_alloc_size;
865 
866     DEBUGF_BRK("do_brk(" TARGET_ABI_FMT_lx ") -> ", new_brk);
867 
868     if (!new_brk) {
869         DEBUGF_BRK(TARGET_ABI_FMT_lx " (!new_brk)\n", target_brk);
870         return target_brk;
871     }
872     if (new_brk < target_original_brk) {
873         DEBUGF_BRK(TARGET_ABI_FMT_lx " (new_brk < target_original_brk)\n",
874                    target_brk);
875         return target_brk;
876     }
877 
878     /* If the new brk is less than the highest page reserved to the
879      * target heap allocation, set it and we're almost done...  */
880     if (new_brk <= brk_page) {
881         /* Heap contents are initialized to zero, as for anonymous
882          * mapped pages.  */
883         if (new_brk > target_brk) {
884             memset(g2h(target_brk), 0, new_brk - target_brk);
885         }
886 	target_brk = new_brk;
887         DEBUGF_BRK(TARGET_ABI_FMT_lx " (new_brk <= brk_page)\n", target_brk);
888 	return target_brk;
889     }
890 
891     /* We need to allocate more memory after the brk... Note that
892      * we don't use MAP_FIXED because that will map over the top of
893      * any existing mapping (like the one with the host libc or qemu
894      * itself); instead we treat "mapped but at wrong address" as
895      * a failure and unmap again.
896      */
897     new_alloc_size = HOST_PAGE_ALIGN(new_brk - brk_page);
898     mapped_addr = get_errno(target_mmap(brk_page, new_alloc_size,
899                                         PROT_READ|PROT_WRITE,
900                                         MAP_ANON|MAP_PRIVATE, 0, 0));
901 
902     if (mapped_addr == brk_page) {
903         /* Heap contents are initialized to zero, as for anonymous
904          * mapped pages.  Technically the new pages are already
905          * initialized to zero since they *are* anonymous mapped
906          * pages, however we have to take care with the contents that
907          * come from the remaining part of the previous page: it may
908          * contains garbage data due to a previous heap usage (grown
909          * then shrunken).  */
910         memset(g2h(target_brk), 0, brk_page - target_brk);
911 
912         target_brk = new_brk;
913         brk_page = HOST_PAGE_ALIGN(target_brk);
914         DEBUGF_BRK(TARGET_ABI_FMT_lx " (mapped_addr == brk_page)\n",
915             target_brk);
916         return target_brk;
917     } else if (mapped_addr != -1) {
918         /* Mapped but at wrong address, meaning there wasn't actually
919          * enough space for this brk.
920          */
921         target_munmap(mapped_addr, new_alloc_size);
922         mapped_addr = -1;
923         DEBUGF_BRK(TARGET_ABI_FMT_lx " (mapped_addr != -1)\n", target_brk);
924     }
925     else {
926         DEBUGF_BRK(TARGET_ABI_FMT_lx " (otherwise)\n", target_brk);
927     }
928 
929 #if defined(TARGET_ALPHA)
930     /* We (partially) emulate OSF/1 on Alpha, which requires we
931        return a proper errno, not an unchanged brk value.  */
932     return -TARGET_ENOMEM;
933 #endif
934     /* For everything else, return the previous break. */
935     return target_brk;
936 }
937 
938 static inline abi_long copy_from_user_fdset(fd_set *fds,
939                                             abi_ulong target_fds_addr,
940                                             int n)
941 {
942     int i, nw, j, k;
943     abi_ulong b, *target_fds;
944 
945     nw = DIV_ROUND_UP(n, TARGET_ABI_BITS);
946     if (!(target_fds = lock_user(VERIFY_READ,
947                                  target_fds_addr,
948                                  sizeof(abi_ulong) * nw,
949                                  1)))
950         return -TARGET_EFAULT;
951 
952     FD_ZERO(fds);
953     k = 0;
954     for (i = 0; i < nw; i++) {
955         /* grab the abi_ulong */
956         __get_user(b, &target_fds[i]);
957         for (j = 0; j < TARGET_ABI_BITS; j++) {
958             /* check the bit inside the abi_ulong */
959             if ((b >> j) & 1)
960                 FD_SET(k, fds);
961             k++;
962         }
963     }
964 
965     unlock_user(target_fds, target_fds_addr, 0);
966 
967     return 0;
968 }
969 
970 static inline abi_ulong copy_from_user_fdset_ptr(fd_set *fds, fd_set **fds_ptr,
971                                                  abi_ulong target_fds_addr,
972                                                  int n)
973 {
974     if (target_fds_addr) {
975         if (copy_from_user_fdset(fds, target_fds_addr, n))
976             return -TARGET_EFAULT;
977         *fds_ptr = fds;
978     } else {
979         *fds_ptr = NULL;
980     }
981     return 0;
982 }
983 
984 static inline abi_long copy_to_user_fdset(abi_ulong target_fds_addr,
985                                           const fd_set *fds,
986                                           int n)
987 {
988     int i, nw, j, k;
989     abi_long v;
990     abi_ulong *target_fds;
991 
992     nw = DIV_ROUND_UP(n, TARGET_ABI_BITS);
993     if (!(target_fds = lock_user(VERIFY_WRITE,
994                                  target_fds_addr,
995                                  sizeof(abi_ulong) * nw,
996                                  0)))
997         return -TARGET_EFAULT;
998 
999     k = 0;
1000     for (i = 0; i < nw; i++) {
1001         v = 0;
1002         for (j = 0; j < TARGET_ABI_BITS; j++) {
1003             v |= ((abi_ulong)(FD_ISSET(k, fds) != 0) << j);
1004             k++;
1005         }
1006         __put_user(v, &target_fds[i]);
1007     }
1008 
1009     unlock_user(target_fds, target_fds_addr, sizeof(abi_ulong) * nw);
1010 
1011     return 0;
1012 }
1013 
1014 #if defined(__alpha__)
1015 #define HOST_HZ 1024
1016 #else
1017 #define HOST_HZ 100
1018 #endif
1019 
1020 static inline abi_long host_to_target_clock_t(long ticks)
1021 {
1022 #if HOST_HZ == TARGET_HZ
1023     return ticks;
1024 #else
1025     return ((int64_t)ticks * TARGET_HZ) / HOST_HZ;
1026 #endif
1027 }
1028 
1029 static inline abi_long host_to_target_rusage(abi_ulong target_addr,
1030                                              const struct rusage *rusage)
1031 {
1032     struct target_rusage *target_rusage;
1033 
1034     if (!lock_user_struct(VERIFY_WRITE, target_rusage, target_addr, 0))
1035         return -TARGET_EFAULT;
1036     target_rusage->ru_utime.tv_sec = tswapal(rusage->ru_utime.tv_sec);
1037     target_rusage->ru_utime.tv_usec = tswapal(rusage->ru_utime.tv_usec);
1038     target_rusage->ru_stime.tv_sec = tswapal(rusage->ru_stime.tv_sec);
1039     target_rusage->ru_stime.tv_usec = tswapal(rusage->ru_stime.tv_usec);
1040     target_rusage->ru_maxrss = tswapal(rusage->ru_maxrss);
1041     target_rusage->ru_ixrss = tswapal(rusage->ru_ixrss);
1042     target_rusage->ru_idrss = tswapal(rusage->ru_idrss);
1043     target_rusage->ru_isrss = tswapal(rusage->ru_isrss);
1044     target_rusage->ru_minflt = tswapal(rusage->ru_minflt);
1045     target_rusage->ru_majflt = tswapal(rusage->ru_majflt);
1046     target_rusage->ru_nswap = tswapal(rusage->ru_nswap);
1047     target_rusage->ru_inblock = tswapal(rusage->ru_inblock);
1048     target_rusage->ru_oublock = tswapal(rusage->ru_oublock);
1049     target_rusage->ru_msgsnd = tswapal(rusage->ru_msgsnd);
1050     target_rusage->ru_msgrcv = tswapal(rusage->ru_msgrcv);
1051     target_rusage->ru_nsignals = tswapal(rusage->ru_nsignals);
1052     target_rusage->ru_nvcsw = tswapal(rusage->ru_nvcsw);
1053     target_rusage->ru_nivcsw = tswapal(rusage->ru_nivcsw);
1054     unlock_user_struct(target_rusage, target_addr, 1);
1055 
1056     return 0;
1057 }
1058 
1059 static inline rlim_t target_to_host_rlim(abi_ulong target_rlim)
1060 {
1061     abi_ulong target_rlim_swap;
1062     rlim_t result;
1063 
1064     target_rlim_swap = tswapal(target_rlim);
1065     if (target_rlim_swap == TARGET_RLIM_INFINITY)
1066         return RLIM_INFINITY;
1067 
1068     result = target_rlim_swap;
1069     if (target_rlim_swap != (rlim_t)result)
1070         return RLIM_INFINITY;
1071 
1072     return result;
1073 }
1074 
1075 static inline abi_ulong host_to_target_rlim(rlim_t rlim)
1076 {
1077     abi_ulong target_rlim_swap;
1078     abi_ulong result;
1079 
1080     if (rlim == RLIM_INFINITY || rlim != (abi_long)rlim)
1081         target_rlim_swap = TARGET_RLIM_INFINITY;
1082     else
1083         target_rlim_swap = rlim;
1084     result = tswapal(target_rlim_swap);
1085 
1086     return result;
1087 }
1088 
1089 static inline int target_to_host_resource(int code)
1090 {
1091     switch (code) {
1092     case TARGET_RLIMIT_AS:
1093         return RLIMIT_AS;
1094     case TARGET_RLIMIT_CORE:
1095         return RLIMIT_CORE;
1096     case TARGET_RLIMIT_CPU:
1097         return RLIMIT_CPU;
1098     case TARGET_RLIMIT_DATA:
1099         return RLIMIT_DATA;
1100     case TARGET_RLIMIT_FSIZE:
1101         return RLIMIT_FSIZE;
1102     case TARGET_RLIMIT_LOCKS:
1103         return RLIMIT_LOCKS;
1104     case TARGET_RLIMIT_MEMLOCK:
1105         return RLIMIT_MEMLOCK;
1106     case TARGET_RLIMIT_MSGQUEUE:
1107         return RLIMIT_MSGQUEUE;
1108     case TARGET_RLIMIT_NICE:
1109         return RLIMIT_NICE;
1110     case TARGET_RLIMIT_NOFILE:
1111         return RLIMIT_NOFILE;
1112     case TARGET_RLIMIT_NPROC:
1113         return RLIMIT_NPROC;
1114     case TARGET_RLIMIT_RSS:
1115         return RLIMIT_RSS;
1116     case TARGET_RLIMIT_RTPRIO:
1117         return RLIMIT_RTPRIO;
1118     case TARGET_RLIMIT_SIGPENDING:
1119         return RLIMIT_SIGPENDING;
1120     case TARGET_RLIMIT_STACK:
1121         return RLIMIT_STACK;
1122     default:
1123         return code;
1124     }
1125 }
1126 
1127 static inline abi_long copy_from_user_timeval(struct timeval *tv,
1128                                               abi_ulong target_tv_addr)
1129 {
1130     struct target_timeval *target_tv;
1131 
1132     if (!lock_user_struct(VERIFY_READ, target_tv, target_tv_addr, 1)) {
1133         return -TARGET_EFAULT;
1134     }
1135 
1136     __get_user(tv->tv_sec, &target_tv->tv_sec);
1137     __get_user(tv->tv_usec, &target_tv->tv_usec);
1138 
1139     unlock_user_struct(target_tv, target_tv_addr, 0);
1140 
1141     return 0;
1142 }
1143 
1144 static inline abi_long copy_to_user_timeval(abi_ulong target_tv_addr,
1145                                             const struct timeval *tv)
1146 {
1147     struct target_timeval *target_tv;
1148 
1149     if (!lock_user_struct(VERIFY_WRITE, target_tv, target_tv_addr, 0)) {
1150         return -TARGET_EFAULT;
1151     }
1152 
1153     __put_user(tv->tv_sec, &target_tv->tv_sec);
1154     __put_user(tv->tv_usec, &target_tv->tv_usec);
1155 
1156     unlock_user_struct(target_tv, target_tv_addr, 1);
1157 
1158     return 0;
1159 }
1160 
1161 static inline abi_long copy_to_user_timeval64(abi_ulong target_tv_addr,
1162                                              const struct timeval *tv)
1163 {
1164     struct target__kernel_sock_timeval *target_tv;
1165 
1166     if (!lock_user_struct(VERIFY_WRITE, target_tv, target_tv_addr, 0)) {
1167         return -TARGET_EFAULT;
1168     }
1169 
1170     __put_user(tv->tv_sec, &target_tv->tv_sec);
1171     __put_user(tv->tv_usec, &target_tv->tv_usec);
1172 
1173     unlock_user_struct(target_tv, target_tv_addr, 1);
1174 
1175     return 0;
1176 }
1177 
1178 static inline abi_long target_to_host_timespec(struct timespec *host_ts,
1179                                                abi_ulong target_addr)
1180 {
1181     struct target_timespec *target_ts;
1182 
1183     if (!lock_user_struct(VERIFY_READ, target_ts, target_addr, 1)) {
1184         return -TARGET_EFAULT;
1185     }
1186     __get_user(host_ts->tv_sec, &target_ts->tv_sec);
1187     __get_user(host_ts->tv_nsec, &target_ts->tv_nsec);
1188     unlock_user_struct(target_ts, target_addr, 0);
1189     return 0;
1190 }
1191 
1192 static inline abi_long host_to_target_timespec(abi_ulong target_addr,
1193                                                struct timespec *host_ts)
1194 {
1195     struct target_timespec *target_ts;
1196 
1197     if (!lock_user_struct(VERIFY_WRITE, target_ts, target_addr, 0)) {
1198         return -TARGET_EFAULT;
1199     }
1200     __put_user(host_ts->tv_sec, &target_ts->tv_sec);
1201     __put_user(host_ts->tv_nsec, &target_ts->tv_nsec);
1202     unlock_user_struct(target_ts, target_addr, 1);
1203     return 0;
1204 }
1205 
1206 static inline abi_long host_to_target_timespec64(abi_ulong target_addr,
1207                                                  struct timespec *host_ts)
1208 {
1209     struct target__kernel_timespec *target_ts;
1210 
1211     if (!lock_user_struct(VERIFY_WRITE, target_ts, target_addr, 0)) {
1212         return -TARGET_EFAULT;
1213     }
1214     __put_user(host_ts->tv_sec, &target_ts->tv_sec);
1215     __put_user(host_ts->tv_nsec, &target_ts->tv_nsec);
1216     unlock_user_struct(target_ts, target_addr, 1);
1217     return 0;
1218 }
1219 
1220 static inline abi_long copy_from_user_timezone(struct timezone *tz,
1221                                                abi_ulong target_tz_addr)
1222 {
1223     struct target_timezone *target_tz;
1224 
1225     if (!lock_user_struct(VERIFY_READ, target_tz, target_tz_addr, 1)) {
1226         return -TARGET_EFAULT;
1227     }
1228 
1229     __get_user(tz->tz_minuteswest, &target_tz->tz_minuteswest);
1230     __get_user(tz->tz_dsttime, &target_tz->tz_dsttime);
1231 
1232     unlock_user_struct(target_tz, target_tz_addr, 0);
1233 
1234     return 0;
1235 }
1236 
1237 #if defined(TARGET_NR_mq_open) && defined(__NR_mq_open)
1238 #include <mqueue.h>
1239 
1240 static inline abi_long copy_from_user_mq_attr(struct mq_attr *attr,
1241                                               abi_ulong target_mq_attr_addr)
1242 {
1243     struct target_mq_attr *target_mq_attr;
1244 
1245     if (!lock_user_struct(VERIFY_READ, target_mq_attr,
1246                           target_mq_attr_addr, 1))
1247         return -TARGET_EFAULT;
1248 
1249     __get_user(attr->mq_flags, &target_mq_attr->mq_flags);
1250     __get_user(attr->mq_maxmsg, &target_mq_attr->mq_maxmsg);
1251     __get_user(attr->mq_msgsize, &target_mq_attr->mq_msgsize);
1252     __get_user(attr->mq_curmsgs, &target_mq_attr->mq_curmsgs);
1253 
1254     unlock_user_struct(target_mq_attr, target_mq_attr_addr, 0);
1255 
1256     return 0;
1257 }
1258 
1259 static inline abi_long copy_to_user_mq_attr(abi_ulong target_mq_attr_addr,
1260                                             const struct mq_attr *attr)
1261 {
1262     struct target_mq_attr *target_mq_attr;
1263 
1264     if (!lock_user_struct(VERIFY_WRITE, target_mq_attr,
1265                           target_mq_attr_addr, 0))
1266         return -TARGET_EFAULT;
1267 
1268     __put_user(attr->mq_flags, &target_mq_attr->mq_flags);
1269     __put_user(attr->mq_maxmsg, &target_mq_attr->mq_maxmsg);
1270     __put_user(attr->mq_msgsize, &target_mq_attr->mq_msgsize);
1271     __put_user(attr->mq_curmsgs, &target_mq_attr->mq_curmsgs);
1272 
1273     unlock_user_struct(target_mq_attr, target_mq_attr_addr, 1);
1274 
1275     return 0;
1276 }
1277 #endif
1278 
1279 #if defined(TARGET_NR_select) || defined(TARGET_NR__newselect)
1280 /* do_select() must return target values and target errnos. */
1281 static abi_long do_select(int n,
1282                           abi_ulong rfd_addr, abi_ulong wfd_addr,
1283                           abi_ulong efd_addr, abi_ulong target_tv_addr)
1284 {
1285     fd_set rfds, wfds, efds;
1286     fd_set *rfds_ptr, *wfds_ptr, *efds_ptr;
1287     struct timeval tv;
1288     struct timespec ts, *ts_ptr;
1289     abi_long ret;
1290 
1291     ret = copy_from_user_fdset_ptr(&rfds, &rfds_ptr, rfd_addr, n);
1292     if (ret) {
1293         return ret;
1294     }
1295     ret = copy_from_user_fdset_ptr(&wfds, &wfds_ptr, wfd_addr, n);
1296     if (ret) {
1297         return ret;
1298     }
1299     ret = copy_from_user_fdset_ptr(&efds, &efds_ptr, efd_addr, n);
1300     if (ret) {
1301         return ret;
1302     }
1303 
1304     if (target_tv_addr) {
1305         if (copy_from_user_timeval(&tv, target_tv_addr))
1306             return -TARGET_EFAULT;
1307         ts.tv_sec = tv.tv_sec;
1308         ts.tv_nsec = tv.tv_usec * 1000;
1309         ts_ptr = &ts;
1310     } else {
1311         ts_ptr = NULL;
1312     }
1313 
1314     ret = get_errno(safe_pselect6(n, rfds_ptr, wfds_ptr, efds_ptr,
1315                                   ts_ptr, NULL));
1316 
1317     if (!is_error(ret)) {
1318         if (rfd_addr && copy_to_user_fdset(rfd_addr, &rfds, n))
1319             return -TARGET_EFAULT;
1320         if (wfd_addr && copy_to_user_fdset(wfd_addr, &wfds, n))
1321             return -TARGET_EFAULT;
1322         if (efd_addr && copy_to_user_fdset(efd_addr, &efds, n))
1323             return -TARGET_EFAULT;
1324 
1325         if (target_tv_addr) {
1326             tv.tv_sec = ts.tv_sec;
1327             tv.tv_usec = ts.tv_nsec / 1000;
1328             if (copy_to_user_timeval(target_tv_addr, &tv)) {
1329                 return -TARGET_EFAULT;
1330             }
1331         }
1332     }
1333 
1334     return ret;
1335 }
1336 
1337 #if defined(TARGET_WANT_OLD_SYS_SELECT)
1338 static abi_long do_old_select(abi_ulong arg1)
1339 {
1340     struct target_sel_arg_struct *sel;
1341     abi_ulong inp, outp, exp, tvp;
1342     long nsel;
1343 
1344     if (!lock_user_struct(VERIFY_READ, sel, arg1, 1)) {
1345         return -TARGET_EFAULT;
1346     }
1347 
1348     nsel = tswapal(sel->n);
1349     inp = tswapal(sel->inp);
1350     outp = tswapal(sel->outp);
1351     exp = tswapal(sel->exp);
1352     tvp = tswapal(sel->tvp);
1353 
1354     unlock_user_struct(sel, arg1, 0);
1355 
1356     return do_select(nsel, inp, outp, exp, tvp);
1357 }
1358 #endif
1359 #endif
1360 
1361 static abi_long do_pipe2(int host_pipe[], int flags)
1362 {
1363 #ifdef CONFIG_PIPE2
1364     return pipe2(host_pipe, flags);
1365 #else
1366     return -ENOSYS;
1367 #endif
1368 }
1369 
1370 static abi_long do_pipe(void *cpu_env, abi_ulong pipedes,
1371                         int flags, int is_pipe2)
1372 {
1373     int host_pipe[2];
1374     abi_long ret;
1375     ret = flags ? do_pipe2(host_pipe, flags) : pipe(host_pipe);
1376 
1377     if (is_error(ret))
1378         return get_errno(ret);
1379 
1380     /* Several targets have special calling conventions for the original
1381        pipe syscall, but didn't replicate this into the pipe2 syscall.  */
1382     if (!is_pipe2) {
1383 #if defined(TARGET_ALPHA)
1384         ((CPUAlphaState *)cpu_env)->ir[IR_A4] = host_pipe[1];
1385         return host_pipe[0];
1386 #elif defined(TARGET_MIPS)
1387         ((CPUMIPSState*)cpu_env)->active_tc.gpr[3] = host_pipe[1];
1388         return host_pipe[0];
1389 #elif defined(TARGET_SH4)
1390         ((CPUSH4State*)cpu_env)->gregs[1] = host_pipe[1];
1391         return host_pipe[0];
1392 #elif defined(TARGET_SPARC)
1393         ((CPUSPARCState*)cpu_env)->regwptr[1] = host_pipe[1];
1394         return host_pipe[0];
1395 #endif
1396     }
1397 
1398     if (put_user_s32(host_pipe[0], pipedes)
1399         || put_user_s32(host_pipe[1], pipedes + sizeof(host_pipe[0])))
1400         return -TARGET_EFAULT;
1401     return get_errno(ret);
1402 }
1403 
1404 static inline abi_long target_to_host_ip_mreq(struct ip_mreqn *mreqn,
1405                                               abi_ulong target_addr,
1406                                               socklen_t len)
1407 {
1408     struct target_ip_mreqn *target_smreqn;
1409 
1410     target_smreqn = lock_user(VERIFY_READ, target_addr, len, 1);
1411     if (!target_smreqn)
1412         return -TARGET_EFAULT;
1413     mreqn->imr_multiaddr.s_addr = target_smreqn->imr_multiaddr.s_addr;
1414     mreqn->imr_address.s_addr = target_smreqn->imr_address.s_addr;
1415     if (len == sizeof(struct target_ip_mreqn))
1416         mreqn->imr_ifindex = tswapal(target_smreqn->imr_ifindex);
1417     unlock_user(target_smreqn, target_addr, 0);
1418 
1419     return 0;
1420 }
1421 
1422 static inline abi_long target_to_host_sockaddr(int fd, struct sockaddr *addr,
1423                                                abi_ulong target_addr,
1424                                                socklen_t len)
1425 {
1426     const socklen_t unix_maxlen = sizeof (struct sockaddr_un);
1427     sa_family_t sa_family;
1428     struct target_sockaddr *target_saddr;
1429 
1430     if (fd_trans_target_to_host_addr(fd)) {
1431         return fd_trans_target_to_host_addr(fd)(addr, target_addr, len);
1432     }
1433 
1434     target_saddr = lock_user(VERIFY_READ, target_addr, len, 1);
1435     if (!target_saddr)
1436         return -TARGET_EFAULT;
1437 
1438     sa_family = tswap16(target_saddr->sa_family);
1439 
1440     /* Oops. The caller might send a incomplete sun_path; sun_path
1441      * must be terminated by \0 (see the manual page), but
1442      * unfortunately it is quite common to specify sockaddr_un
1443      * length as "strlen(x->sun_path)" while it should be
1444      * "strlen(...) + 1". We'll fix that here if needed.
1445      * Linux kernel has a similar feature.
1446      */
1447 
1448     if (sa_family == AF_UNIX) {
1449         if (len < unix_maxlen && len > 0) {
1450             char *cp = (char*)target_saddr;
1451 
1452             if ( cp[len-1] && !cp[len] )
1453                 len++;
1454         }
1455         if (len > unix_maxlen)
1456             len = unix_maxlen;
1457     }
1458 
1459     memcpy(addr, target_saddr, len);
1460     addr->sa_family = sa_family;
1461     if (sa_family == AF_NETLINK) {
1462         struct sockaddr_nl *nladdr;
1463 
1464         nladdr = (struct sockaddr_nl *)addr;
1465         nladdr->nl_pid = tswap32(nladdr->nl_pid);
1466         nladdr->nl_groups = tswap32(nladdr->nl_groups);
1467     } else if (sa_family == AF_PACKET) {
1468 	struct target_sockaddr_ll *lladdr;
1469 
1470 	lladdr = (struct target_sockaddr_ll *)addr;
1471 	lladdr->sll_ifindex = tswap32(lladdr->sll_ifindex);
1472 	lladdr->sll_hatype = tswap16(lladdr->sll_hatype);
1473     }
1474     unlock_user(target_saddr, target_addr, 0);
1475 
1476     return 0;
1477 }
1478 
1479 static inline abi_long host_to_target_sockaddr(abi_ulong target_addr,
1480                                                struct sockaddr *addr,
1481                                                socklen_t len)
1482 {
1483     struct target_sockaddr *target_saddr;
1484 
1485     if (len == 0) {
1486         return 0;
1487     }
1488     assert(addr);
1489 
1490     target_saddr = lock_user(VERIFY_WRITE, target_addr, len, 0);
1491     if (!target_saddr)
1492         return -TARGET_EFAULT;
1493     memcpy(target_saddr, addr, len);
1494     if (len >= offsetof(struct target_sockaddr, sa_family) +
1495         sizeof(target_saddr->sa_family)) {
1496         target_saddr->sa_family = tswap16(addr->sa_family);
1497     }
1498     if (addr->sa_family == AF_NETLINK && len >= sizeof(struct sockaddr_nl)) {
1499         struct sockaddr_nl *target_nl = (struct sockaddr_nl *)target_saddr;
1500         target_nl->nl_pid = tswap32(target_nl->nl_pid);
1501         target_nl->nl_groups = tswap32(target_nl->nl_groups);
1502     } else if (addr->sa_family == AF_PACKET) {
1503         struct sockaddr_ll *target_ll = (struct sockaddr_ll *)target_saddr;
1504         target_ll->sll_ifindex = tswap32(target_ll->sll_ifindex);
1505         target_ll->sll_hatype = tswap16(target_ll->sll_hatype);
1506     } else if (addr->sa_family == AF_INET6 &&
1507                len >= sizeof(struct target_sockaddr_in6)) {
1508         struct target_sockaddr_in6 *target_in6 =
1509                (struct target_sockaddr_in6 *)target_saddr;
1510         target_in6->sin6_scope_id = tswap16(target_in6->sin6_scope_id);
1511     }
1512     unlock_user(target_saddr, target_addr, len);
1513 
1514     return 0;
1515 }
1516 
1517 static inline abi_long target_to_host_cmsg(struct msghdr *msgh,
1518                                            struct target_msghdr *target_msgh)
1519 {
1520     struct cmsghdr *cmsg = CMSG_FIRSTHDR(msgh);
1521     abi_long msg_controllen;
1522     abi_ulong target_cmsg_addr;
1523     struct target_cmsghdr *target_cmsg, *target_cmsg_start;
1524     socklen_t space = 0;
1525 
1526     msg_controllen = tswapal(target_msgh->msg_controllen);
1527     if (msg_controllen < sizeof (struct target_cmsghdr))
1528         goto the_end;
1529     target_cmsg_addr = tswapal(target_msgh->msg_control);
1530     target_cmsg = lock_user(VERIFY_READ, target_cmsg_addr, msg_controllen, 1);
1531     target_cmsg_start = target_cmsg;
1532     if (!target_cmsg)
1533         return -TARGET_EFAULT;
1534 
1535     while (cmsg && target_cmsg) {
1536         void *data = CMSG_DATA(cmsg);
1537         void *target_data = TARGET_CMSG_DATA(target_cmsg);
1538 
1539         int len = tswapal(target_cmsg->cmsg_len)
1540             - sizeof(struct target_cmsghdr);
1541 
1542         space += CMSG_SPACE(len);
1543         if (space > msgh->msg_controllen) {
1544             space -= CMSG_SPACE(len);
1545             /* This is a QEMU bug, since we allocated the payload
1546              * area ourselves (unlike overflow in host-to-target
1547              * conversion, which is just the guest giving us a buffer
1548              * that's too small). It can't happen for the payload types
1549              * we currently support; if it becomes an issue in future
1550              * we would need to improve our allocation strategy to
1551              * something more intelligent than "twice the size of the
1552              * target buffer we're reading from".
1553              */
1554             gemu_log("Host cmsg overflow\n");
1555             break;
1556         }
1557 
1558         if (tswap32(target_cmsg->cmsg_level) == TARGET_SOL_SOCKET) {
1559             cmsg->cmsg_level = SOL_SOCKET;
1560         } else {
1561             cmsg->cmsg_level = tswap32(target_cmsg->cmsg_level);
1562         }
1563         cmsg->cmsg_type = tswap32(target_cmsg->cmsg_type);
1564         cmsg->cmsg_len = CMSG_LEN(len);
1565 
1566         if (cmsg->cmsg_level == SOL_SOCKET && cmsg->cmsg_type == SCM_RIGHTS) {
1567             int *fd = (int *)data;
1568             int *target_fd = (int *)target_data;
1569             int i, numfds = len / sizeof(int);
1570 
1571             for (i = 0; i < numfds; i++) {
1572                 __get_user(fd[i], target_fd + i);
1573             }
1574         } else if (cmsg->cmsg_level == SOL_SOCKET
1575                &&  cmsg->cmsg_type == SCM_CREDENTIALS) {
1576             struct ucred *cred = (struct ucred *)data;
1577             struct target_ucred *target_cred =
1578                 (struct target_ucred *)target_data;
1579 
1580             __get_user(cred->pid, &target_cred->pid);
1581             __get_user(cred->uid, &target_cred->uid);
1582             __get_user(cred->gid, &target_cred->gid);
1583         } else {
1584             gemu_log("Unsupported ancillary data: %d/%d\n",
1585                                         cmsg->cmsg_level, cmsg->cmsg_type);
1586             memcpy(data, target_data, len);
1587         }
1588 
1589         cmsg = CMSG_NXTHDR(msgh, cmsg);
1590         target_cmsg = TARGET_CMSG_NXTHDR(target_msgh, target_cmsg,
1591                                          target_cmsg_start);
1592     }
1593     unlock_user(target_cmsg, target_cmsg_addr, 0);
1594  the_end:
1595     msgh->msg_controllen = space;
1596     return 0;
1597 }
1598 
1599 static inline abi_long host_to_target_cmsg(struct target_msghdr *target_msgh,
1600                                            struct msghdr *msgh)
1601 {
1602     struct cmsghdr *cmsg = CMSG_FIRSTHDR(msgh);
1603     abi_long msg_controllen;
1604     abi_ulong target_cmsg_addr;
1605     struct target_cmsghdr *target_cmsg, *target_cmsg_start;
1606     socklen_t space = 0;
1607 
1608     msg_controllen = tswapal(target_msgh->msg_controllen);
1609     if (msg_controllen < sizeof (struct target_cmsghdr))
1610         goto the_end;
1611     target_cmsg_addr = tswapal(target_msgh->msg_control);
1612     target_cmsg = lock_user(VERIFY_WRITE, target_cmsg_addr, msg_controllen, 0);
1613     target_cmsg_start = target_cmsg;
1614     if (!target_cmsg)
1615         return -TARGET_EFAULT;
1616 
1617     while (cmsg && target_cmsg) {
1618         void *data = CMSG_DATA(cmsg);
1619         void *target_data = TARGET_CMSG_DATA(target_cmsg);
1620 
1621         int len = cmsg->cmsg_len - sizeof(struct cmsghdr);
1622         int tgt_len, tgt_space;
1623 
1624         /* We never copy a half-header but may copy half-data;
1625          * this is Linux's behaviour in put_cmsg(). Note that
1626          * truncation here is a guest problem (which we report
1627          * to the guest via the CTRUNC bit), unlike truncation
1628          * in target_to_host_cmsg, which is a QEMU bug.
1629          */
1630         if (msg_controllen < sizeof(struct target_cmsghdr)) {
1631             target_msgh->msg_flags |= tswap32(MSG_CTRUNC);
1632             break;
1633         }
1634 
1635         if (cmsg->cmsg_level == SOL_SOCKET) {
1636             target_cmsg->cmsg_level = tswap32(TARGET_SOL_SOCKET);
1637         } else {
1638             target_cmsg->cmsg_level = tswap32(cmsg->cmsg_level);
1639         }
1640         target_cmsg->cmsg_type = tswap32(cmsg->cmsg_type);
1641 
1642         /* Payload types which need a different size of payload on
1643          * the target must adjust tgt_len here.
1644          */
1645         tgt_len = len;
1646         switch (cmsg->cmsg_level) {
1647         case SOL_SOCKET:
1648             switch (cmsg->cmsg_type) {
1649             case SO_TIMESTAMP:
1650                 tgt_len = sizeof(struct target_timeval);
1651                 break;
1652             default:
1653                 break;
1654             }
1655             break;
1656         default:
1657             break;
1658         }
1659 
1660         if (msg_controllen < TARGET_CMSG_LEN(tgt_len)) {
1661             target_msgh->msg_flags |= tswap32(MSG_CTRUNC);
1662             tgt_len = msg_controllen - sizeof(struct target_cmsghdr);
1663         }
1664 
1665         /* We must now copy-and-convert len bytes of payload
1666          * into tgt_len bytes of destination space. Bear in mind
1667          * that in both source and destination we may be dealing
1668          * with a truncated value!
1669          */
1670         switch (cmsg->cmsg_level) {
1671         case SOL_SOCKET:
1672             switch (cmsg->cmsg_type) {
1673             case SCM_RIGHTS:
1674             {
1675                 int *fd = (int *)data;
1676                 int *target_fd = (int *)target_data;
1677                 int i, numfds = tgt_len / sizeof(int);
1678 
1679                 for (i = 0; i < numfds; i++) {
1680                     __put_user(fd[i], target_fd + i);
1681                 }
1682                 break;
1683             }
1684             case SO_TIMESTAMP:
1685             {
1686                 struct timeval *tv = (struct timeval *)data;
1687                 struct target_timeval *target_tv =
1688                     (struct target_timeval *)target_data;
1689 
1690                 if (len != sizeof(struct timeval) ||
1691                     tgt_len != sizeof(struct target_timeval)) {
1692                     goto unimplemented;
1693                 }
1694 
1695                 /* copy struct timeval to target */
1696                 __put_user(tv->tv_sec, &target_tv->tv_sec);
1697                 __put_user(tv->tv_usec, &target_tv->tv_usec);
1698                 break;
1699             }
1700             case SCM_CREDENTIALS:
1701             {
1702                 struct ucred *cred = (struct ucred *)data;
1703                 struct target_ucred *target_cred =
1704                     (struct target_ucred *)target_data;
1705 
1706                 __put_user(cred->pid, &target_cred->pid);
1707                 __put_user(cred->uid, &target_cred->uid);
1708                 __put_user(cred->gid, &target_cred->gid);
1709                 break;
1710             }
1711             default:
1712                 goto unimplemented;
1713             }
1714             break;
1715 
1716         case SOL_IP:
1717             switch (cmsg->cmsg_type) {
1718             case IP_TTL:
1719             {
1720                 uint32_t *v = (uint32_t *)data;
1721                 uint32_t *t_int = (uint32_t *)target_data;
1722 
1723                 if (len != sizeof(uint32_t) ||
1724                     tgt_len != sizeof(uint32_t)) {
1725                     goto unimplemented;
1726                 }
1727                 __put_user(*v, t_int);
1728                 break;
1729             }
1730             case IP_RECVERR:
1731             {
1732                 struct errhdr_t {
1733                    struct sock_extended_err ee;
1734                    struct sockaddr_in offender;
1735                 };
1736                 struct errhdr_t *errh = (struct errhdr_t *)data;
1737                 struct errhdr_t *target_errh =
1738                     (struct errhdr_t *)target_data;
1739 
1740                 if (len != sizeof(struct errhdr_t) ||
1741                     tgt_len != sizeof(struct errhdr_t)) {
1742                     goto unimplemented;
1743                 }
1744                 __put_user(errh->ee.ee_errno, &target_errh->ee.ee_errno);
1745                 __put_user(errh->ee.ee_origin, &target_errh->ee.ee_origin);
1746                 __put_user(errh->ee.ee_type,  &target_errh->ee.ee_type);
1747                 __put_user(errh->ee.ee_code, &target_errh->ee.ee_code);
1748                 __put_user(errh->ee.ee_pad, &target_errh->ee.ee_pad);
1749                 __put_user(errh->ee.ee_info, &target_errh->ee.ee_info);
1750                 __put_user(errh->ee.ee_data, &target_errh->ee.ee_data);
1751                 host_to_target_sockaddr((unsigned long) &target_errh->offender,
1752                     (void *) &errh->offender, sizeof(errh->offender));
1753                 break;
1754             }
1755             default:
1756                 goto unimplemented;
1757             }
1758             break;
1759 
1760         case SOL_IPV6:
1761             switch (cmsg->cmsg_type) {
1762             case IPV6_HOPLIMIT:
1763             {
1764                 uint32_t *v = (uint32_t *)data;
1765                 uint32_t *t_int = (uint32_t *)target_data;
1766 
1767                 if (len != sizeof(uint32_t) ||
1768                     tgt_len != sizeof(uint32_t)) {
1769                     goto unimplemented;
1770                 }
1771                 __put_user(*v, t_int);
1772                 break;
1773             }
1774             case IPV6_RECVERR:
1775             {
1776                 struct errhdr6_t {
1777                    struct sock_extended_err ee;
1778                    struct sockaddr_in6 offender;
1779                 };
1780                 struct errhdr6_t *errh = (struct errhdr6_t *)data;
1781                 struct errhdr6_t *target_errh =
1782                     (struct errhdr6_t *)target_data;
1783 
1784                 if (len != sizeof(struct errhdr6_t) ||
1785                     tgt_len != sizeof(struct errhdr6_t)) {
1786                     goto unimplemented;
1787                 }
1788                 __put_user(errh->ee.ee_errno, &target_errh->ee.ee_errno);
1789                 __put_user(errh->ee.ee_origin, &target_errh->ee.ee_origin);
1790                 __put_user(errh->ee.ee_type,  &target_errh->ee.ee_type);
1791                 __put_user(errh->ee.ee_code, &target_errh->ee.ee_code);
1792                 __put_user(errh->ee.ee_pad, &target_errh->ee.ee_pad);
1793                 __put_user(errh->ee.ee_info, &target_errh->ee.ee_info);
1794                 __put_user(errh->ee.ee_data, &target_errh->ee.ee_data);
1795                 host_to_target_sockaddr((unsigned long) &target_errh->offender,
1796                     (void *) &errh->offender, sizeof(errh->offender));
1797                 break;
1798             }
1799             default:
1800                 goto unimplemented;
1801             }
1802             break;
1803 
1804         default:
1805         unimplemented:
1806             gemu_log("Unsupported ancillary data: %d/%d\n",
1807                                         cmsg->cmsg_level, cmsg->cmsg_type);
1808             memcpy(target_data, data, MIN(len, tgt_len));
1809             if (tgt_len > len) {
1810                 memset(target_data + len, 0, tgt_len - len);
1811             }
1812         }
1813 
1814         target_cmsg->cmsg_len = tswapal(TARGET_CMSG_LEN(tgt_len));
1815         tgt_space = TARGET_CMSG_SPACE(tgt_len);
1816         if (msg_controllen < tgt_space) {
1817             tgt_space = msg_controllen;
1818         }
1819         msg_controllen -= tgt_space;
1820         space += tgt_space;
1821         cmsg = CMSG_NXTHDR(msgh, cmsg);
1822         target_cmsg = TARGET_CMSG_NXTHDR(target_msgh, target_cmsg,
1823                                          target_cmsg_start);
1824     }
1825     unlock_user(target_cmsg, target_cmsg_addr, space);
1826  the_end:
1827     target_msgh->msg_controllen = tswapal(space);
1828     return 0;
1829 }
1830 
1831 /* do_setsockopt() Must return target values and target errnos. */
1832 static abi_long do_setsockopt(int sockfd, int level, int optname,
1833                               abi_ulong optval_addr, socklen_t optlen)
1834 {
1835     abi_long ret;
1836     int val;
1837     struct ip_mreqn *ip_mreq;
1838     struct ip_mreq_source *ip_mreq_source;
1839 
1840     switch(level) {
1841     case SOL_TCP:
1842         /* TCP options all take an 'int' value.  */
1843         if (optlen < sizeof(uint32_t))
1844             return -TARGET_EINVAL;
1845 
1846         if (get_user_u32(val, optval_addr))
1847             return -TARGET_EFAULT;
1848         ret = get_errno(setsockopt(sockfd, level, optname, &val, sizeof(val)));
1849         break;
1850     case SOL_IP:
1851         switch(optname) {
1852         case IP_TOS:
1853         case IP_TTL:
1854         case IP_HDRINCL:
1855         case IP_ROUTER_ALERT:
1856         case IP_RECVOPTS:
1857         case IP_RETOPTS:
1858         case IP_PKTINFO:
1859         case IP_MTU_DISCOVER:
1860         case IP_RECVERR:
1861         case IP_RECVTTL:
1862         case IP_RECVTOS:
1863 #ifdef IP_FREEBIND
1864         case IP_FREEBIND:
1865 #endif
1866         case IP_MULTICAST_TTL:
1867         case IP_MULTICAST_LOOP:
1868             val = 0;
1869             if (optlen >= sizeof(uint32_t)) {
1870                 if (get_user_u32(val, optval_addr))
1871                     return -TARGET_EFAULT;
1872             } else if (optlen >= 1) {
1873                 if (get_user_u8(val, optval_addr))
1874                     return -TARGET_EFAULT;
1875             }
1876             ret = get_errno(setsockopt(sockfd, level, optname, &val, sizeof(val)));
1877             break;
1878         case IP_ADD_MEMBERSHIP:
1879         case IP_DROP_MEMBERSHIP:
1880             if (optlen < sizeof (struct target_ip_mreq) ||
1881                 optlen > sizeof (struct target_ip_mreqn))
1882                 return -TARGET_EINVAL;
1883 
1884             ip_mreq = (struct ip_mreqn *) alloca(optlen);
1885             target_to_host_ip_mreq(ip_mreq, optval_addr, optlen);
1886             ret = get_errno(setsockopt(sockfd, level, optname, ip_mreq, optlen));
1887             break;
1888 
1889         case IP_BLOCK_SOURCE:
1890         case IP_UNBLOCK_SOURCE:
1891         case IP_ADD_SOURCE_MEMBERSHIP:
1892         case IP_DROP_SOURCE_MEMBERSHIP:
1893             if (optlen != sizeof (struct target_ip_mreq_source))
1894                 return -TARGET_EINVAL;
1895 
1896             ip_mreq_source = lock_user(VERIFY_READ, optval_addr, optlen, 1);
1897             ret = get_errno(setsockopt(sockfd, level, optname, ip_mreq_source, optlen));
1898             unlock_user (ip_mreq_source, optval_addr, 0);
1899             break;
1900 
1901         default:
1902             goto unimplemented;
1903         }
1904         break;
1905     case SOL_IPV6:
1906         switch (optname) {
1907         case IPV6_MTU_DISCOVER:
1908         case IPV6_MTU:
1909         case IPV6_V6ONLY:
1910         case IPV6_RECVPKTINFO:
1911         case IPV6_UNICAST_HOPS:
1912         case IPV6_MULTICAST_HOPS:
1913         case IPV6_MULTICAST_LOOP:
1914         case IPV6_RECVERR:
1915         case IPV6_RECVHOPLIMIT:
1916         case IPV6_2292HOPLIMIT:
1917         case IPV6_CHECKSUM:
1918         case IPV6_ADDRFORM:
1919         case IPV6_2292PKTINFO:
1920         case IPV6_RECVTCLASS:
1921         case IPV6_RECVRTHDR:
1922         case IPV6_2292RTHDR:
1923         case IPV6_RECVHOPOPTS:
1924         case IPV6_2292HOPOPTS:
1925         case IPV6_RECVDSTOPTS:
1926         case IPV6_2292DSTOPTS:
1927         case IPV6_TCLASS:
1928 #ifdef IPV6_RECVPATHMTU
1929         case IPV6_RECVPATHMTU:
1930 #endif
1931 #ifdef IPV6_TRANSPARENT
1932         case IPV6_TRANSPARENT:
1933 #endif
1934 #ifdef IPV6_FREEBIND
1935         case IPV6_FREEBIND:
1936 #endif
1937 #ifdef IPV6_RECVORIGDSTADDR
1938         case IPV6_RECVORIGDSTADDR:
1939 #endif
1940             val = 0;
1941             if (optlen < sizeof(uint32_t)) {
1942                 return -TARGET_EINVAL;
1943             }
1944             if (get_user_u32(val, optval_addr)) {
1945                 return -TARGET_EFAULT;
1946             }
1947             ret = get_errno(setsockopt(sockfd, level, optname,
1948                                        &val, sizeof(val)));
1949             break;
1950         case IPV6_PKTINFO:
1951         {
1952             struct in6_pktinfo pki;
1953 
1954             if (optlen < sizeof(pki)) {
1955                 return -TARGET_EINVAL;
1956             }
1957 
1958             if (copy_from_user(&pki, optval_addr, sizeof(pki))) {
1959                 return -TARGET_EFAULT;
1960             }
1961 
1962             pki.ipi6_ifindex = tswap32(pki.ipi6_ifindex);
1963 
1964             ret = get_errno(setsockopt(sockfd, level, optname,
1965                                        &pki, sizeof(pki)));
1966             break;
1967         }
1968         case IPV6_ADD_MEMBERSHIP:
1969         case IPV6_DROP_MEMBERSHIP:
1970         {
1971             struct ipv6_mreq ipv6mreq;
1972 
1973             if (optlen < sizeof(ipv6mreq)) {
1974                 return -TARGET_EINVAL;
1975             }
1976 
1977             if (copy_from_user(&ipv6mreq, optval_addr, sizeof(ipv6mreq))) {
1978                 return -TARGET_EFAULT;
1979             }
1980 
1981             ipv6mreq.ipv6mr_interface = tswap32(ipv6mreq.ipv6mr_interface);
1982 
1983             ret = get_errno(setsockopt(sockfd, level, optname,
1984                                        &ipv6mreq, sizeof(ipv6mreq)));
1985             break;
1986         }
1987         default:
1988             goto unimplemented;
1989         }
1990         break;
1991     case SOL_ICMPV6:
1992         switch (optname) {
1993         case ICMPV6_FILTER:
1994         {
1995             struct icmp6_filter icmp6f;
1996 
1997             if (optlen > sizeof(icmp6f)) {
1998                 optlen = sizeof(icmp6f);
1999             }
2000 
2001             if (copy_from_user(&icmp6f, optval_addr, optlen)) {
2002                 return -TARGET_EFAULT;
2003             }
2004 
2005             for (val = 0; val < 8; val++) {
2006                 icmp6f.data[val] = tswap32(icmp6f.data[val]);
2007             }
2008 
2009             ret = get_errno(setsockopt(sockfd, level, optname,
2010                                        &icmp6f, optlen));
2011             break;
2012         }
2013         default:
2014             goto unimplemented;
2015         }
2016         break;
2017     case SOL_RAW:
2018         switch (optname) {
2019         case ICMP_FILTER:
2020         case IPV6_CHECKSUM:
2021             /* those take an u32 value */
2022             if (optlen < sizeof(uint32_t)) {
2023                 return -TARGET_EINVAL;
2024             }
2025 
2026             if (get_user_u32(val, optval_addr)) {
2027                 return -TARGET_EFAULT;
2028             }
2029             ret = get_errno(setsockopt(sockfd, level, optname,
2030                                        &val, sizeof(val)));
2031             break;
2032 
2033         default:
2034             goto unimplemented;
2035         }
2036         break;
2037 #if defined(SOL_ALG) && defined(ALG_SET_KEY) && defined(ALG_SET_AEAD_AUTHSIZE)
2038     case SOL_ALG:
2039         switch (optname) {
2040         case ALG_SET_KEY:
2041         {
2042             char *alg_key = g_malloc(optlen);
2043 
2044             if (!alg_key) {
2045                 return -TARGET_ENOMEM;
2046             }
2047             if (copy_from_user(alg_key, optval_addr, optlen)) {
2048                 g_free(alg_key);
2049                 return -TARGET_EFAULT;
2050             }
2051             ret = get_errno(setsockopt(sockfd, level, optname,
2052                                        alg_key, optlen));
2053             g_free(alg_key);
2054             break;
2055         }
2056         case ALG_SET_AEAD_AUTHSIZE:
2057         {
2058             ret = get_errno(setsockopt(sockfd, level, optname,
2059                                        NULL, optlen));
2060             break;
2061         }
2062         default:
2063             goto unimplemented;
2064         }
2065         break;
2066 #endif
2067     case TARGET_SOL_SOCKET:
2068         switch (optname) {
2069         case TARGET_SO_RCVTIMEO:
2070         {
2071                 struct timeval tv;
2072 
2073                 optname = SO_RCVTIMEO;
2074 
2075 set_timeout:
2076                 if (optlen != sizeof(struct target_timeval)) {
2077                     return -TARGET_EINVAL;
2078                 }
2079 
2080                 if (copy_from_user_timeval(&tv, optval_addr)) {
2081                     return -TARGET_EFAULT;
2082                 }
2083 
2084                 ret = get_errno(setsockopt(sockfd, SOL_SOCKET, optname,
2085                                 &tv, sizeof(tv)));
2086                 return ret;
2087         }
2088         case TARGET_SO_SNDTIMEO:
2089                 optname = SO_SNDTIMEO;
2090                 goto set_timeout;
2091         case TARGET_SO_ATTACH_FILTER:
2092         {
2093                 struct target_sock_fprog *tfprog;
2094                 struct target_sock_filter *tfilter;
2095                 struct sock_fprog fprog;
2096                 struct sock_filter *filter;
2097                 int i;
2098 
2099                 if (optlen != sizeof(*tfprog)) {
2100                     return -TARGET_EINVAL;
2101                 }
2102                 if (!lock_user_struct(VERIFY_READ, tfprog, optval_addr, 0)) {
2103                     return -TARGET_EFAULT;
2104                 }
2105                 if (!lock_user_struct(VERIFY_READ, tfilter,
2106                                       tswapal(tfprog->filter), 0)) {
2107                     unlock_user_struct(tfprog, optval_addr, 1);
2108                     return -TARGET_EFAULT;
2109                 }
2110 
2111                 fprog.len = tswap16(tfprog->len);
2112                 filter = g_try_new(struct sock_filter, fprog.len);
2113                 if (filter == NULL) {
2114                     unlock_user_struct(tfilter, tfprog->filter, 1);
2115                     unlock_user_struct(tfprog, optval_addr, 1);
2116                     return -TARGET_ENOMEM;
2117                 }
2118                 for (i = 0; i < fprog.len; i++) {
2119                     filter[i].code = tswap16(tfilter[i].code);
2120                     filter[i].jt = tfilter[i].jt;
2121                     filter[i].jf = tfilter[i].jf;
2122                     filter[i].k = tswap32(tfilter[i].k);
2123                 }
2124                 fprog.filter = filter;
2125 
2126                 ret = get_errno(setsockopt(sockfd, SOL_SOCKET,
2127                                 SO_ATTACH_FILTER, &fprog, sizeof(fprog)));
2128                 g_free(filter);
2129 
2130                 unlock_user_struct(tfilter, tfprog->filter, 1);
2131                 unlock_user_struct(tfprog, optval_addr, 1);
2132                 return ret;
2133         }
2134 	case TARGET_SO_BINDTODEVICE:
2135 	{
2136 		char *dev_ifname, *addr_ifname;
2137 
2138 		if (optlen > IFNAMSIZ - 1) {
2139 		    optlen = IFNAMSIZ - 1;
2140 		}
2141 		dev_ifname = lock_user(VERIFY_READ, optval_addr, optlen, 1);
2142 		if (!dev_ifname) {
2143 		    return -TARGET_EFAULT;
2144 		}
2145 		optname = SO_BINDTODEVICE;
2146 		addr_ifname = alloca(IFNAMSIZ);
2147 		memcpy(addr_ifname, dev_ifname, optlen);
2148 		addr_ifname[optlen] = 0;
2149 		ret = get_errno(setsockopt(sockfd, SOL_SOCKET, optname,
2150                                            addr_ifname, optlen));
2151 		unlock_user (dev_ifname, optval_addr, 0);
2152 		return ret;
2153 	}
2154         case TARGET_SO_LINGER:
2155         {
2156                 struct linger lg;
2157                 struct target_linger *tlg;
2158 
2159                 if (optlen != sizeof(struct target_linger)) {
2160                     return -TARGET_EINVAL;
2161                 }
2162                 if (!lock_user_struct(VERIFY_READ, tlg, optval_addr, 1)) {
2163                     return -TARGET_EFAULT;
2164                 }
2165                 __get_user(lg.l_onoff, &tlg->l_onoff);
2166                 __get_user(lg.l_linger, &tlg->l_linger);
2167                 ret = get_errno(setsockopt(sockfd, SOL_SOCKET, SO_LINGER,
2168                                 &lg, sizeof(lg)));
2169                 unlock_user_struct(tlg, optval_addr, 0);
2170                 return ret;
2171         }
2172             /* Options with 'int' argument.  */
2173         case TARGET_SO_DEBUG:
2174 		optname = SO_DEBUG;
2175 		break;
2176         case TARGET_SO_REUSEADDR:
2177 		optname = SO_REUSEADDR;
2178 		break;
2179 #ifdef SO_REUSEPORT
2180         case TARGET_SO_REUSEPORT:
2181                 optname = SO_REUSEPORT;
2182                 break;
2183 #endif
2184         case TARGET_SO_TYPE:
2185 		optname = SO_TYPE;
2186 		break;
2187         case TARGET_SO_ERROR:
2188 		optname = SO_ERROR;
2189 		break;
2190         case TARGET_SO_DONTROUTE:
2191 		optname = SO_DONTROUTE;
2192 		break;
2193         case TARGET_SO_BROADCAST:
2194 		optname = SO_BROADCAST;
2195 		break;
2196         case TARGET_SO_SNDBUF:
2197 		optname = SO_SNDBUF;
2198 		break;
2199         case TARGET_SO_SNDBUFFORCE:
2200                 optname = SO_SNDBUFFORCE;
2201                 break;
2202         case TARGET_SO_RCVBUF:
2203 		optname = SO_RCVBUF;
2204 		break;
2205         case TARGET_SO_RCVBUFFORCE:
2206                 optname = SO_RCVBUFFORCE;
2207                 break;
2208         case TARGET_SO_KEEPALIVE:
2209 		optname = SO_KEEPALIVE;
2210 		break;
2211         case TARGET_SO_OOBINLINE:
2212 		optname = SO_OOBINLINE;
2213 		break;
2214         case TARGET_SO_NO_CHECK:
2215 		optname = SO_NO_CHECK;
2216 		break;
2217         case TARGET_SO_PRIORITY:
2218 		optname = SO_PRIORITY;
2219 		break;
2220 #ifdef SO_BSDCOMPAT
2221         case TARGET_SO_BSDCOMPAT:
2222 		optname = SO_BSDCOMPAT;
2223 		break;
2224 #endif
2225         case TARGET_SO_PASSCRED:
2226 		optname = SO_PASSCRED;
2227 		break;
2228         case TARGET_SO_PASSSEC:
2229                 optname = SO_PASSSEC;
2230                 break;
2231         case TARGET_SO_TIMESTAMP:
2232 		optname = SO_TIMESTAMP;
2233 		break;
2234         case TARGET_SO_RCVLOWAT:
2235 		optname = SO_RCVLOWAT;
2236 		break;
2237         default:
2238             goto unimplemented;
2239         }
2240 	if (optlen < sizeof(uint32_t))
2241             return -TARGET_EINVAL;
2242 
2243 	if (get_user_u32(val, optval_addr))
2244             return -TARGET_EFAULT;
2245 	ret = get_errno(setsockopt(sockfd, SOL_SOCKET, optname, &val, sizeof(val)));
2246         break;
2247     default:
2248     unimplemented:
2249         gemu_log("Unsupported setsockopt level=%d optname=%d\n", level, optname);
2250         ret = -TARGET_ENOPROTOOPT;
2251     }
2252     return ret;
2253 }
2254 
2255 /* do_getsockopt() Must return target values and target errnos. */
2256 static abi_long do_getsockopt(int sockfd, int level, int optname,
2257                               abi_ulong optval_addr, abi_ulong optlen)
2258 {
2259     abi_long ret;
2260     int len, val;
2261     socklen_t lv;
2262 
2263     switch(level) {
2264     case TARGET_SOL_SOCKET:
2265         level = SOL_SOCKET;
2266         switch (optname) {
2267         /* These don't just return a single integer */
2268         case TARGET_SO_RCVTIMEO:
2269         case TARGET_SO_SNDTIMEO:
2270         case TARGET_SO_PEERNAME:
2271             goto unimplemented;
2272         case TARGET_SO_PEERCRED: {
2273             struct ucred cr;
2274             socklen_t crlen;
2275             struct target_ucred *tcr;
2276 
2277             if (get_user_u32(len, optlen)) {
2278                 return -TARGET_EFAULT;
2279             }
2280             if (len < 0) {
2281                 return -TARGET_EINVAL;
2282             }
2283 
2284             crlen = sizeof(cr);
2285             ret = get_errno(getsockopt(sockfd, level, SO_PEERCRED,
2286                                        &cr, &crlen));
2287             if (ret < 0) {
2288                 return ret;
2289             }
2290             if (len > crlen) {
2291                 len = crlen;
2292             }
2293             if (!lock_user_struct(VERIFY_WRITE, tcr, optval_addr, 0)) {
2294                 return -TARGET_EFAULT;
2295             }
2296             __put_user(cr.pid, &tcr->pid);
2297             __put_user(cr.uid, &tcr->uid);
2298             __put_user(cr.gid, &tcr->gid);
2299             unlock_user_struct(tcr, optval_addr, 1);
2300             if (put_user_u32(len, optlen)) {
2301                 return -TARGET_EFAULT;
2302             }
2303             break;
2304         }
2305         case TARGET_SO_LINGER:
2306         {
2307             struct linger lg;
2308             socklen_t lglen;
2309             struct target_linger *tlg;
2310 
2311             if (get_user_u32(len, optlen)) {
2312                 return -TARGET_EFAULT;
2313             }
2314             if (len < 0) {
2315                 return -TARGET_EINVAL;
2316             }
2317 
2318             lglen = sizeof(lg);
2319             ret = get_errno(getsockopt(sockfd, level, SO_LINGER,
2320                                        &lg, &lglen));
2321             if (ret < 0) {
2322                 return ret;
2323             }
2324             if (len > lglen) {
2325                 len = lglen;
2326             }
2327             if (!lock_user_struct(VERIFY_WRITE, tlg, optval_addr, 0)) {
2328                 return -TARGET_EFAULT;
2329             }
2330             __put_user(lg.l_onoff, &tlg->l_onoff);
2331             __put_user(lg.l_linger, &tlg->l_linger);
2332             unlock_user_struct(tlg, optval_addr, 1);
2333             if (put_user_u32(len, optlen)) {
2334                 return -TARGET_EFAULT;
2335             }
2336             break;
2337         }
2338         /* Options with 'int' argument.  */
2339         case TARGET_SO_DEBUG:
2340             optname = SO_DEBUG;
2341             goto int_case;
2342         case TARGET_SO_REUSEADDR:
2343             optname = SO_REUSEADDR;
2344             goto int_case;
2345 #ifdef SO_REUSEPORT
2346         case TARGET_SO_REUSEPORT:
2347             optname = SO_REUSEPORT;
2348             goto int_case;
2349 #endif
2350         case TARGET_SO_TYPE:
2351             optname = SO_TYPE;
2352             goto int_case;
2353         case TARGET_SO_ERROR:
2354             optname = SO_ERROR;
2355             goto int_case;
2356         case TARGET_SO_DONTROUTE:
2357             optname = SO_DONTROUTE;
2358             goto int_case;
2359         case TARGET_SO_BROADCAST:
2360             optname = SO_BROADCAST;
2361             goto int_case;
2362         case TARGET_SO_SNDBUF:
2363             optname = SO_SNDBUF;
2364             goto int_case;
2365         case TARGET_SO_RCVBUF:
2366             optname = SO_RCVBUF;
2367             goto int_case;
2368         case TARGET_SO_KEEPALIVE:
2369             optname = SO_KEEPALIVE;
2370             goto int_case;
2371         case TARGET_SO_OOBINLINE:
2372             optname = SO_OOBINLINE;
2373             goto int_case;
2374         case TARGET_SO_NO_CHECK:
2375             optname = SO_NO_CHECK;
2376             goto int_case;
2377         case TARGET_SO_PRIORITY:
2378             optname = SO_PRIORITY;
2379             goto int_case;
2380 #ifdef SO_BSDCOMPAT
2381         case TARGET_SO_BSDCOMPAT:
2382             optname = SO_BSDCOMPAT;
2383             goto int_case;
2384 #endif
2385         case TARGET_SO_PASSCRED:
2386             optname = SO_PASSCRED;
2387             goto int_case;
2388         case TARGET_SO_TIMESTAMP:
2389             optname = SO_TIMESTAMP;
2390             goto int_case;
2391         case TARGET_SO_RCVLOWAT:
2392             optname = SO_RCVLOWAT;
2393             goto int_case;
2394         case TARGET_SO_ACCEPTCONN:
2395             optname = SO_ACCEPTCONN;
2396             goto int_case;
2397         default:
2398             goto int_case;
2399         }
2400         break;
2401     case SOL_TCP:
2402         /* TCP options all take an 'int' value.  */
2403     int_case:
2404         if (get_user_u32(len, optlen))
2405             return -TARGET_EFAULT;
2406         if (len < 0)
2407             return -TARGET_EINVAL;
2408         lv = sizeof(lv);
2409         ret = get_errno(getsockopt(sockfd, level, optname, &val, &lv));
2410         if (ret < 0)
2411             return ret;
2412         if (optname == SO_TYPE) {
2413             val = host_to_target_sock_type(val);
2414         }
2415         if (len > lv)
2416             len = lv;
2417         if (len == 4) {
2418             if (put_user_u32(val, optval_addr))
2419                 return -TARGET_EFAULT;
2420         } else {
2421             if (put_user_u8(val, optval_addr))
2422                 return -TARGET_EFAULT;
2423         }
2424         if (put_user_u32(len, optlen))
2425             return -TARGET_EFAULT;
2426         break;
2427     case SOL_IP:
2428         switch(optname) {
2429         case IP_TOS:
2430         case IP_TTL:
2431         case IP_HDRINCL:
2432         case IP_ROUTER_ALERT:
2433         case IP_RECVOPTS:
2434         case IP_RETOPTS:
2435         case IP_PKTINFO:
2436         case IP_MTU_DISCOVER:
2437         case IP_RECVERR:
2438         case IP_RECVTOS:
2439 #ifdef IP_FREEBIND
2440         case IP_FREEBIND:
2441 #endif
2442         case IP_MULTICAST_TTL:
2443         case IP_MULTICAST_LOOP:
2444             if (get_user_u32(len, optlen))
2445                 return -TARGET_EFAULT;
2446             if (len < 0)
2447                 return -TARGET_EINVAL;
2448             lv = sizeof(lv);
2449             ret = get_errno(getsockopt(sockfd, level, optname, &val, &lv));
2450             if (ret < 0)
2451                 return ret;
2452             if (len < sizeof(int) && len > 0 && val >= 0 && val < 255) {
2453                 len = 1;
2454                 if (put_user_u32(len, optlen)
2455                     || put_user_u8(val, optval_addr))
2456                     return -TARGET_EFAULT;
2457             } else {
2458                 if (len > sizeof(int))
2459                     len = sizeof(int);
2460                 if (put_user_u32(len, optlen)
2461                     || put_user_u32(val, optval_addr))
2462                     return -TARGET_EFAULT;
2463             }
2464             break;
2465         default:
2466             ret = -TARGET_ENOPROTOOPT;
2467             break;
2468         }
2469         break;
2470     case SOL_IPV6:
2471         switch (optname) {
2472         case IPV6_MTU_DISCOVER:
2473         case IPV6_MTU:
2474         case IPV6_V6ONLY:
2475         case IPV6_RECVPKTINFO:
2476         case IPV6_UNICAST_HOPS:
2477         case IPV6_MULTICAST_HOPS:
2478         case IPV6_MULTICAST_LOOP:
2479         case IPV6_RECVERR:
2480         case IPV6_RECVHOPLIMIT:
2481         case IPV6_2292HOPLIMIT:
2482         case IPV6_CHECKSUM:
2483         case IPV6_ADDRFORM:
2484         case IPV6_2292PKTINFO:
2485         case IPV6_RECVTCLASS:
2486         case IPV6_RECVRTHDR:
2487         case IPV6_2292RTHDR:
2488         case IPV6_RECVHOPOPTS:
2489         case IPV6_2292HOPOPTS:
2490         case IPV6_RECVDSTOPTS:
2491         case IPV6_2292DSTOPTS:
2492         case IPV6_TCLASS:
2493 #ifdef IPV6_RECVPATHMTU
2494         case IPV6_RECVPATHMTU:
2495 #endif
2496 #ifdef IPV6_TRANSPARENT
2497         case IPV6_TRANSPARENT:
2498 #endif
2499 #ifdef IPV6_FREEBIND
2500         case IPV6_FREEBIND:
2501 #endif
2502 #ifdef IPV6_RECVORIGDSTADDR
2503         case IPV6_RECVORIGDSTADDR:
2504 #endif
2505             if (get_user_u32(len, optlen))
2506                 return -TARGET_EFAULT;
2507             if (len < 0)
2508                 return -TARGET_EINVAL;
2509             lv = sizeof(lv);
2510             ret = get_errno(getsockopt(sockfd, level, optname, &val, &lv));
2511             if (ret < 0)
2512                 return ret;
2513             if (len < sizeof(int) && len > 0 && val >= 0 && val < 255) {
2514                 len = 1;
2515                 if (put_user_u32(len, optlen)
2516                     || put_user_u8(val, optval_addr))
2517                     return -TARGET_EFAULT;
2518             } else {
2519                 if (len > sizeof(int))
2520                     len = sizeof(int);
2521                 if (put_user_u32(len, optlen)
2522                     || put_user_u32(val, optval_addr))
2523                     return -TARGET_EFAULT;
2524             }
2525             break;
2526         default:
2527             ret = -TARGET_ENOPROTOOPT;
2528             break;
2529         }
2530         break;
2531     default:
2532     unimplemented:
2533         gemu_log("getsockopt level=%d optname=%d not yet supported\n",
2534                  level, optname);
2535         ret = -TARGET_EOPNOTSUPP;
2536         break;
2537     }
2538     return ret;
2539 }
2540 
2541 /* Convert target low/high pair representing file offset into the host
2542  * low/high pair. This function doesn't handle offsets bigger than 64 bits
2543  * as the kernel doesn't handle them either.
2544  */
2545 static void target_to_host_low_high(abi_ulong tlow,
2546                                     abi_ulong thigh,
2547                                     unsigned long *hlow,
2548                                     unsigned long *hhigh)
2549 {
2550     uint64_t off = tlow |
2551         ((unsigned long long)thigh << TARGET_LONG_BITS / 2) <<
2552         TARGET_LONG_BITS / 2;
2553 
2554     *hlow = off;
2555     *hhigh = (off >> HOST_LONG_BITS / 2) >> HOST_LONG_BITS / 2;
2556 }
2557 
2558 static struct iovec *lock_iovec(int type, abi_ulong target_addr,
2559                                 abi_ulong count, int copy)
2560 {
2561     struct target_iovec *target_vec;
2562     struct iovec *vec;
2563     abi_ulong total_len, max_len;
2564     int i;
2565     int err = 0;
2566     bool bad_address = false;
2567 
2568     if (count == 0) {
2569         errno = 0;
2570         return NULL;
2571     }
2572     if (count > IOV_MAX) {
2573         errno = EINVAL;
2574         return NULL;
2575     }
2576 
2577     vec = g_try_new0(struct iovec, count);
2578     if (vec == NULL) {
2579         errno = ENOMEM;
2580         return NULL;
2581     }
2582 
2583     target_vec = lock_user(VERIFY_READ, target_addr,
2584                            count * sizeof(struct target_iovec), 1);
2585     if (target_vec == NULL) {
2586         err = EFAULT;
2587         goto fail2;
2588     }
2589 
2590     /* ??? If host page size > target page size, this will result in a
2591        value larger than what we can actually support.  */
2592     max_len = 0x7fffffff & TARGET_PAGE_MASK;
2593     total_len = 0;
2594 
2595     for (i = 0; i < count; i++) {
2596         abi_ulong base = tswapal(target_vec[i].iov_base);
2597         abi_long len = tswapal(target_vec[i].iov_len);
2598 
2599         if (len < 0) {
2600             err = EINVAL;
2601             goto fail;
2602         } else if (len == 0) {
2603             /* Zero length pointer is ignored.  */
2604             vec[i].iov_base = 0;
2605         } else {
2606             vec[i].iov_base = lock_user(type, base, len, copy);
2607             /* If the first buffer pointer is bad, this is a fault.  But
2608              * subsequent bad buffers will result in a partial write; this
2609              * is realized by filling the vector with null pointers and
2610              * zero lengths. */
2611             if (!vec[i].iov_base) {
2612                 if (i == 0) {
2613                     err = EFAULT;
2614                     goto fail;
2615                 } else {
2616                     bad_address = true;
2617                 }
2618             }
2619             if (bad_address) {
2620                 len = 0;
2621             }
2622             if (len > max_len - total_len) {
2623                 len = max_len - total_len;
2624             }
2625         }
2626         vec[i].iov_len = len;
2627         total_len += len;
2628     }
2629 
2630     unlock_user(target_vec, target_addr, 0);
2631     return vec;
2632 
2633  fail:
2634     while (--i >= 0) {
2635         if (tswapal(target_vec[i].iov_len) > 0) {
2636             unlock_user(vec[i].iov_base, tswapal(target_vec[i].iov_base), 0);
2637         }
2638     }
2639     unlock_user(target_vec, target_addr, 0);
2640  fail2:
2641     g_free(vec);
2642     errno = err;
2643     return NULL;
2644 }
2645 
2646 static void unlock_iovec(struct iovec *vec, abi_ulong target_addr,
2647                          abi_ulong count, int copy)
2648 {
2649     struct target_iovec *target_vec;
2650     int i;
2651 
2652     target_vec = lock_user(VERIFY_READ, target_addr,
2653                            count * sizeof(struct target_iovec), 1);
2654     if (target_vec) {
2655         for (i = 0; i < count; i++) {
2656             abi_ulong base = tswapal(target_vec[i].iov_base);
2657             abi_long len = tswapal(target_vec[i].iov_len);
2658             if (len < 0) {
2659                 break;
2660             }
2661             unlock_user(vec[i].iov_base, base, copy ? vec[i].iov_len : 0);
2662         }
2663         unlock_user(target_vec, target_addr, 0);
2664     }
2665 
2666     g_free(vec);
2667 }
2668 
2669 static inline int target_to_host_sock_type(int *type)
2670 {
2671     int host_type = 0;
2672     int target_type = *type;
2673 
2674     switch (target_type & TARGET_SOCK_TYPE_MASK) {
2675     case TARGET_SOCK_DGRAM:
2676         host_type = SOCK_DGRAM;
2677         break;
2678     case TARGET_SOCK_STREAM:
2679         host_type = SOCK_STREAM;
2680         break;
2681     default:
2682         host_type = target_type & TARGET_SOCK_TYPE_MASK;
2683         break;
2684     }
2685     if (target_type & TARGET_SOCK_CLOEXEC) {
2686 #if defined(SOCK_CLOEXEC)
2687         host_type |= SOCK_CLOEXEC;
2688 #else
2689         return -TARGET_EINVAL;
2690 #endif
2691     }
2692     if (target_type & TARGET_SOCK_NONBLOCK) {
2693 #if defined(SOCK_NONBLOCK)
2694         host_type |= SOCK_NONBLOCK;
2695 #elif !defined(O_NONBLOCK)
2696         return -TARGET_EINVAL;
2697 #endif
2698     }
2699     *type = host_type;
2700     return 0;
2701 }
2702 
2703 /* Try to emulate socket type flags after socket creation.  */
2704 static int sock_flags_fixup(int fd, int target_type)
2705 {
2706 #if !defined(SOCK_NONBLOCK) && defined(O_NONBLOCK)
2707     if (target_type & TARGET_SOCK_NONBLOCK) {
2708         int flags = fcntl(fd, F_GETFL);
2709         if (fcntl(fd, F_SETFL, O_NONBLOCK | flags) == -1) {
2710             close(fd);
2711             return -TARGET_EINVAL;
2712         }
2713     }
2714 #endif
2715     return fd;
2716 }
2717 
2718 /* do_socket() Must return target values and target errnos. */
2719 static abi_long do_socket(int domain, int type, int protocol)
2720 {
2721     int target_type = type;
2722     int ret;
2723 
2724     ret = target_to_host_sock_type(&type);
2725     if (ret) {
2726         return ret;
2727     }
2728 
2729     if (domain == PF_NETLINK && !(
2730 #ifdef CONFIG_RTNETLINK
2731          protocol == NETLINK_ROUTE ||
2732 #endif
2733          protocol == NETLINK_KOBJECT_UEVENT ||
2734          protocol == NETLINK_AUDIT)) {
2735         return -EPFNOSUPPORT;
2736     }
2737 
2738     if (domain == AF_PACKET ||
2739         (domain == AF_INET && type == SOCK_PACKET)) {
2740         protocol = tswap16(protocol);
2741     }
2742 
2743     ret = get_errno(socket(domain, type, protocol));
2744     if (ret >= 0) {
2745         ret = sock_flags_fixup(ret, target_type);
2746         if (type == SOCK_PACKET) {
2747             /* Manage an obsolete case :
2748              * if socket type is SOCK_PACKET, bind by name
2749              */
2750             fd_trans_register(ret, &target_packet_trans);
2751         } else if (domain == PF_NETLINK) {
2752             switch (protocol) {
2753 #ifdef CONFIG_RTNETLINK
2754             case NETLINK_ROUTE:
2755                 fd_trans_register(ret, &target_netlink_route_trans);
2756                 break;
2757 #endif
2758             case NETLINK_KOBJECT_UEVENT:
2759                 /* nothing to do: messages are strings */
2760                 break;
2761             case NETLINK_AUDIT:
2762                 fd_trans_register(ret, &target_netlink_audit_trans);
2763                 break;
2764             default:
2765                 g_assert_not_reached();
2766             }
2767         }
2768     }
2769     return ret;
2770 }
2771 
2772 /* do_bind() Must return target values and target errnos. */
2773 static abi_long do_bind(int sockfd, abi_ulong target_addr,
2774                         socklen_t addrlen)
2775 {
2776     void *addr;
2777     abi_long ret;
2778 
2779     if ((int)addrlen < 0) {
2780         return -TARGET_EINVAL;
2781     }
2782 
2783     addr = alloca(addrlen+1);
2784 
2785     ret = target_to_host_sockaddr(sockfd, addr, target_addr, addrlen);
2786     if (ret)
2787         return ret;
2788 
2789     return get_errno(bind(sockfd, addr, addrlen));
2790 }
2791 
2792 /* do_connect() Must return target values and target errnos. */
2793 static abi_long do_connect(int sockfd, abi_ulong target_addr,
2794                            socklen_t addrlen)
2795 {
2796     void *addr;
2797     abi_long ret;
2798 
2799     if ((int)addrlen < 0) {
2800         return -TARGET_EINVAL;
2801     }
2802 
2803     addr = alloca(addrlen+1);
2804 
2805     ret = target_to_host_sockaddr(sockfd, addr, target_addr, addrlen);
2806     if (ret)
2807         return ret;
2808 
2809     return get_errno(safe_connect(sockfd, addr, addrlen));
2810 }
2811 
2812 /* do_sendrecvmsg_locked() Must return target values and target errnos. */
2813 static abi_long do_sendrecvmsg_locked(int fd, struct target_msghdr *msgp,
2814                                       int flags, int send)
2815 {
2816     abi_long ret, len;
2817     struct msghdr msg;
2818     abi_ulong count;
2819     struct iovec *vec;
2820     abi_ulong target_vec;
2821 
2822     if (msgp->msg_name) {
2823         msg.msg_namelen = tswap32(msgp->msg_namelen);
2824         msg.msg_name = alloca(msg.msg_namelen+1);
2825         ret = target_to_host_sockaddr(fd, msg.msg_name,
2826                                       tswapal(msgp->msg_name),
2827                                       msg.msg_namelen);
2828         if (ret == -TARGET_EFAULT) {
2829             /* For connected sockets msg_name and msg_namelen must
2830              * be ignored, so returning EFAULT immediately is wrong.
2831              * Instead, pass a bad msg_name to the host kernel, and
2832              * let it decide whether to return EFAULT or not.
2833              */
2834             msg.msg_name = (void *)-1;
2835         } else if (ret) {
2836             goto out2;
2837         }
2838     } else {
2839         msg.msg_name = NULL;
2840         msg.msg_namelen = 0;
2841     }
2842     msg.msg_controllen = 2 * tswapal(msgp->msg_controllen);
2843     msg.msg_control = alloca(msg.msg_controllen);
2844     memset(msg.msg_control, 0, msg.msg_controllen);
2845 
2846     msg.msg_flags = tswap32(msgp->msg_flags);
2847 
2848     count = tswapal(msgp->msg_iovlen);
2849     target_vec = tswapal(msgp->msg_iov);
2850 
2851     if (count > IOV_MAX) {
2852         /* sendrcvmsg returns a different errno for this condition than
2853          * readv/writev, so we must catch it here before lock_iovec() does.
2854          */
2855         ret = -TARGET_EMSGSIZE;
2856         goto out2;
2857     }
2858 
2859     vec = lock_iovec(send ? VERIFY_READ : VERIFY_WRITE,
2860                      target_vec, count, send);
2861     if (vec == NULL) {
2862         ret = -host_to_target_errno(errno);
2863         goto out2;
2864     }
2865     msg.msg_iovlen = count;
2866     msg.msg_iov = vec;
2867 
2868     if (send) {
2869         if (fd_trans_target_to_host_data(fd)) {
2870             void *host_msg;
2871 
2872             host_msg = g_malloc(msg.msg_iov->iov_len);
2873             memcpy(host_msg, msg.msg_iov->iov_base, msg.msg_iov->iov_len);
2874             ret = fd_trans_target_to_host_data(fd)(host_msg,
2875                                                    msg.msg_iov->iov_len);
2876             if (ret >= 0) {
2877                 msg.msg_iov->iov_base = host_msg;
2878                 ret = get_errno(safe_sendmsg(fd, &msg, flags));
2879             }
2880             g_free(host_msg);
2881         } else {
2882             ret = target_to_host_cmsg(&msg, msgp);
2883             if (ret == 0) {
2884                 ret = get_errno(safe_sendmsg(fd, &msg, flags));
2885             }
2886         }
2887     } else {
2888         ret = get_errno(safe_recvmsg(fd, &msg, flags));
2889         if (!is_error(ret)) {
2890             len = ret;
2891             if (fd_trans_host_to_target_data(fd)) {
2892                 ret = fd_trans_host_to_target_data(fd)(msg.msg_iov->iov_base,
2893                                                MIN(msg.msg_iov->iov_len, len));
2894             } else {
2895                 ret = host_to_target_cmsg(msgp, &msg);
2896             }
2897             if (!is_error(ret)) {
2898                 msgp->msg_namelen = tswap32(msg.msg_namelen);
2899                 msgp->msg_flags = tswap32(msg.msg_flags);
2900                 if (msg.msg_name != NULL && msg.msg_name != (void *)-1) {
2901                     ret = host_to_target_sockaddr(tswapal(msgp->msg_name),
2902                                     msg.msg_name, msg.msg_namelen);
2903                     if (ret) {
2904                         goto out;
2905                     }
2906                 }
2907 
2908                 ret = len;
2909             }
2910         }
2911     }
2912 
2913 out:
2914     unlock_iovec(vec, target_vec, count, !send);
2915 out2:
2916     return ret;
2917 }
2918 
2919 static abi_long do_sendrecvmsg(int fd, abi_ulong target_msg,
2920                                int flags, int send)
2921 {
2922     abi_long ret;
2923     struct target_msghdr *msgp;
2924 
2925     if (!lock_user_struct(send ? VERIFY_READ : VERIFY_WRITE,
2926                           msgp,
2927                           target_msg,
2928                           send ? 1 : 0)) {
2929         return -TARGET_EFAULT;
2930     }
2931     ret = do_sendrecvmsg_locked(fd, msgp, flags, send);
2932     unlock_user_struct(msgp, target_msg, send ? 0 : 1);
2933     return ret;
2934 }
2935 
2936 /* We don't rely on the C library to have sendmmsg/recvmmsg support,
2937  * so it might not have this *mmsg-specific flag either.
2938  */
2939 #ifndef MSG_WAITFORONE
2940 #define MSG_WAITFORONE 0x10000
2941 #endif
2942 
2943 static abi_long do_sendrecvmmsg(int fd, abi_ulong target_msgvec,
2944                                 unsigned int vlen, unsigned int flags,
2945                                 int send)
2946 {
2947     struct target_mmsghdr *mmsgp;
2948     abi_long ret = 0;
2949     int i;
2950 
2951     if (vlen > UIO_MAXIOV) {
2952         vlen = UIO_MAXIOV;
2953     }
2954 
2955     mmsgp = lock_user(VERIFY_WRITE, target_msgvec, sizeof(*mmsgp) * vlen, 1);
2956     if (!mmsgp) {
2957         return -TARGET_EFAULT;
2958     }
2959 
2960     for (i = 0; i < vlen; i++) {
2961         ret = do_sendrecvmsg_locked(fd, &mmsgp[i].msg_hdr, flags, send);
2962         if (is_error(ret)) {
2963             break;
2964         }
2965         mmsgp[i].msg_len = tswap32(ret);
2966         /* MSG_WAITFORONE turns on MSG_DONTWAIT after one packet */
2967         if (flags & MSG_WAITFORONE) {
2968             flags |= MSG_DONTWAIT;
2969         }
2970     }
2971 
2972     unlock_user(mmsgp, target_msgvec, sizeof(*mmsgp) * i);
2973 
2974     /* Return number of datagrams sent if we sent any at all;
2975      * otherwise return the error.
2976      */
2977     if (i) {
2978         return i;
2979     }
2980     return ret;
2981 }
2982 
2983 /* do_accept4() Must return target values and target errnos. */
2984 static abi_long do_accept4(int fd, abi_ulong target_addr,
2985                            abi_ulong target_addrlen_addr, int flags)
2986 {
2987     socklen_t addrlen, ret_addrlen;
2988     void *addr;
2989     abi_long ret;
2990     int host_flags;
2991 
2992     host_flags = target_to_host_bitmask(flags, fcntl_flags_tbl);
2993 
2994     if (target_addr == 0) {
2995         return get_errno(safe_accept4(fd, NULL, NULL, host_flags));
2996     }
2997 
2998     /* linux returns EINVAL if addrlen pointer is invalid */
2999     if (get_user_u32(addrlen, target_addrlen_addr))
3000         return -TARGET_EINVAL;
3001 
3002     if ((int)addrlen < 0) {
3003         return -TARGET_EINVAL;
3004     }
3005 
3006     if (!access_ok(VERIFY_WRITE, target_addr, addrlen))
3007         return -TARGET_EINVAL;
3008 
3009     addr = alloca(addrlen);
3010 
3011     ret_addrlen = addrlen;
3012     ret = get_errno(safe_accept4(fd, addr, &ret_addrlen, host_flags));
3013     if (!is_error(ret)) {
3014         host_to_target_sockaddr(target_addr, addr, MIN(addrlen, ret_addrlen));
3015         if (put_user_u32(ret_addrlen, target_addrlen_addr)) {
3016             ret = -TARGET_EFAULT;
3017         }
3018     }
3019     return ret;
3020 }
3021 
3022 /* do_getpeername() Must return target values and target errnos. */
3023 static abi_long do_getpeername(int fd, abi_ulong target_addr,
3024                                abi_ulong target_addrlen_addr)
3025 {
3026     socklen_t addrlen, ret_addrlen;
3027     void *addr;
3028     abi_long ret;
3029 
3030     if (get_user_u32(addrlen, target_addrlen_addr))
3031         return -TARGET_EFAULT;
3032 
3033     if ((int)addrlen < 0) {
3034         return -TARGET_EINVAL;
3035     }
3036 
3037     if (!access_ok(VERIFY_WRITE, target_addr, addrlen))
3038         return -TARGET_EFAULT;
3039 
3040     addr = alloca(addrlen);
3041 
3042     ret_addrlen = addrlen;
3043     ret = get_errno(getpeername(fd, addr, &ret_addrlen));
3044     if (!is_error(ret)) {
3045         host_to_target_sockaddr(target_addr, addr, MIN(addrlen, ret_addrlen));
3046         if (put_user_u32(ret_addrlen, target_addrlen_addr)) {
3047             ret = -TARGET_EFAULT;
3048         }
3049     }
3050     return ret;
3051 }
3052 
3053 /* do_getsockname() Must return target values and target errnos. */
3054 static abi_long do_getsockname(int fd, abi_ulong target_addr,
3055                                abi_ulong target_addrlen_addr)
3056 {
3057     socklen_t addrlen, ret_addrlen;
3058     void *addr;
3059     abi_long ret;
3060 
3061     if (get_user_u32(addrlen, target_addrlen_addr))
3062         return -TARGET_EFAULT;
3063 
3064     if ((int)addrlen < 0) {
3065         return -TARGET_EINVAL;
3066     }
3067 
3068     if (!access_ok(VERIFY_WRITE, target_addr, addrlen))
3069         return -TARGET_EFAULT;
3070 
3071     addr = alloca(addrlen);
3072 
3073     ret_addrlen = addrlen;
3074     ret = get_errno(getsockname(fd, addr, &ret_addrlen));
3075     if (!is_error(ret)) {
3076         host_to_target_sockaddr(target_addr, addr, MIN(addrlen, ret_addrlen));
3077         if (put_user_u32(ret_addrlen, target_addrlen_addr)) {
3078             ret = -TARGET_EFAULT;
3079         }
3080     }
3081     return ret;
3082 }
3083 
3084 /* do_socketpair() Must return target values and target errnos. */
3085 static abi_long do_socketpair(int domain, int type, int protocol,
3086                               abi_ulong target_tab_addr)
3087 {
3088     int tab[2];
3089     abi_long ret;
3090 
3091     target_to_host_sock_type(&type);
3092 
3093     ret = get_errno(socketpair(domain, type, protocol, tab));
3094     if (!is_error(ret)) {
3095         if (put_user_s32(tab[0], target_tab_addr)
3096             || put_user_s32(tab[1], target_tab_addr + sizeof(tab[0])))
3097             ret = -TARGET_EFAULT;
3098     }
3099     return ret;
3100 }
3101 
3102 /* do_sendto() Must return target values and target errnos. */
3103 static abi_long do_sendto(int fd, abi_ulong msg, size_t len, int flags,
3104                           abi_ulong target_addr, socklen_t addrlen)
3105 {
3106     void *addr;
3107     void *host_msg;
3108     void *copy_msg = NULL;
3109     abi_long ret;
3110 
3111     if ((int)addrlen < 0) {
3112         return -TARGET_EINVAL;
3113     }
3114 
3115     host_msg = lock_user(VERIFY_READ, msg, len, 1);
3116     if (!host_msg)
3117         return -TARGET_EFAULT;
3118     if (fd_trans_target_to_host_data(fd)) {
3119         copy_msg = host_msg;
3120         host_msg = g_malloc(len);
3121         memcpy(host_msg, copy_msg, len);
3122         ret = fd_trans_target_to_host_data(fd)(host_msg, len);
3123         if (ret < 0) {
3124             goto fail;
3125         }
3126     }
3127     if (target_addr) {
3128         addr = alloca(addrlen+1);
3129         ret = target_to_host_sockaddr(fd, addr, target_addr, addrlen);
3130         if (ret) {
3131             goto fail;
3132         }
3133         ret = get_errno(safe_sendto(fd, host_msg, len, flags, addr, addrlen));
3134     } else {
3135         ret = get_errno(safe_sendto(fd, host_msg, len, flags, NULL, 0));
3136     }
3137 fail:
3138     if (copy_msg) {
3139         g_free(host_msg);
3140         host_msg = copy_msg;
3141     }
3142     unlock_user(host_msg, msg, 0);
3143     return ret;
3144 }
3145 
3146 /* do_recvfrom() Must return target values and target errnos. */
3147 static abi_long do_recvfrom(int fd, abi_ulong msg, size_t len, int flags,
3148                             abi_ulong target_addr,
3149                             abi_ulong target_addrlen)
3150 {
3151     socklen_t addrlen, ret_addrlen;
3152     void *addr;
3153     void *host_msg;
3154     abi_long ret;
3155 
3156     host_msg = lock_user(VERIFY_WRITE, msg, len, 0);
3157     if (!host_msg)
3158         return -TARGET_EFAULT;
3159     if (target_addr) {
3160         if (get_user_u32(addrlen, target_addrlen)) {
3161             ret = -TARGET_EFAULT;
3162             goto fail;
3163         }
3164         if ((int)addrlen < 0) {
3165             ret = -TARGET_EINVAL;
3166             goto fail;
3167         }
3168         addr = alloca(addrlen);
3169         ret_addrlen = addrlen;
3170         ret = get_errno(safe_recvfrom(fd, host_msg, len, flags,
3171                                       addr, &ret_addrlen));
3172     } else {
3173         addr = NULL; /* To keep compiler quiet.  */
3174         addrlen = 0; /* To keep compiler quiet.  */
3175         ret = get_errno(safe_recvfrom(fd, host_msg, len, flags, NULL, 0));
3176     }
3177     if (!is_error(ret)) {
3178         if (fd_trans_host_to_target_data(fd)) {
3179             abi_long trans;
3180             trans = fd_trans_host_to_target_data(fd)(host_msg, MIN(ret, len));
3181             if (is_error(trans)) {
3182                 ret = trans;
3183                 goto fail;
3184             }
3185         }
3186         if (target_addr) {
3187             host_to_target_sockaddr(target_addr, addr,
3188                                     MIN(addrlen, ret_addrlen));
3189             if (put_user_u32(ret_addrlen, target_addrlen)) {
3190                 ret = -TARGET_EFAULT;
3191                 goto fail;
3192             }
3193         }
3194         unlock_user(host_msg, msg, len);
3195     } else {
3196 fail:
3197         unlock_user(host_msg, msg, 0);
3198     }
3199     return ret;
3200 }
3201 
3202 #ifdef TARGET_NR_socketcall
3203 /* do_socketcall() must return target values and target errnos. */
3204 static abi_long do_socketcall(int num, abi_ulong vptr)
3205 {
3206     static const unsigned nargs[] = { /* number of arguments per operation */
3207         [TARGET_SYS_SOCKET] = 3,      /* domain, type, protocol */
3208         [TARGET_SYS_BIND] = 3,        /* fd, addr, addrlen */
3209         [TARGET_SYS_CONNECT] = 3,     /* fd, addr, addrlen */
3210         [TARGET_SYS_LISTEN] = 2,      /* fd, backlog */
3211         [TARGET_SYS_ACCEPT] = 3,      /* fd, addr, addrlen */
3212         [TARGET_SYS_GETSOCKNAME] = 3, /* fd, addr, addrlen */
3213         [TARGET_SYS_GETPEERNAME] = 3, /* fd, addr, addrlen */
3214         [TARGET_SYS_SOCKETPAIR] = 4,  /* domain, type, protocol, tab */
3215         [TARGET_SYS_SEND] = 4,        /* fd, msg, len, flags */
3216         [TARGET_SYS_RECV] = 4,        /* fd, msg, len, flags */
3217         [TARGET_SYS_SENDTO] = 6,      /* fd, msg, len, flags, addr, addrlen */
3218         [TARGET_SYS_RECVFROM] = 6,    /* fd, msg, len, flags, addr, addrlen */
3219         [TARGET_SYS_SHUTDOWN] = 2,    /* fd, how */
3220         [TARGET_SYS_SETSOCKOPT] = 5,  /* fd, level, optname, optval, optlen */
3221         [TARGET_SYS_GETSOCKOPT] = 5,  /* fd, level, optname, optval, optlen */
3222         [TARGET_SYS_SENDMSG] = 3,     /* fd, msg, flags */
3223         [TARGET_SYS_RECVMSG] = 3,     /* fd, msg, flags */
3224         [TARGET_SYS_ACCEPT4] = 4,     /* fd, addr, addrlen, flags */
3225         [TARGET_SYS_RECVMMSG] = 4,    /* fd, msgvec, vlen, flags */
3226         [TARGET_SYS_SENDMMSG] = 4,    /* fd, msgvec, vlen, flags */
3227     };
3228     abi_long a[6]; /* max 6 args */
3229     unsigned i;
3230 
3231     /* check the range of the first argument num */
3232     /* (TARGET_SYS_SENDMMSG is the highest among TARGET_SYS_xxx) */
3233     if (num < 1 || num > TARGET_SYS_SENDMMSG) {
3234         return -TARGET_EINVAL;
3235     }
3236     /* ensure we have space for args */
3237     if (nargs[num] > ARRAY_SIZE(a)) {
3238         return -TARGET_EINVAL;
3239     }
3240     /* collect the arguments in a[] according to nargs[] */
3241     for (i = 0; i < nargs[num]; ++i) {
3242         if (get_user_ual(a[i], vptr + i * sizeof(abi_long)) != 0) {
3243             return -TARGET_EFAULT;
3244         }
3245     }
3246     /* now when we have the args, invoke the appropriate underlying function */
3247     switch (num) {
3248     case TARGET_SYS_SOCKET: /* domain, type, protocol */
3249         return do_socket(a[0], a[1], a[2]);
3250     case TARGET_SYS_BIND: /* sockfd, addr, addrlen */
3251         return do_bind(a[0], a[1], a[2]);
3252     case TARGET_SYS_CONNECT: /* sockfd, addr, addrlen */
3253         return do_connect(a[0], a[1], a[2]);
3254     case TARGET_SYS_LISTEN: /* sockfd, backlog */
3255         return get_errno(listen(a[0], a[1]));
3256     case TARGET_SYS_ACCEPT: /* sockfd, addr, addrlen */
3257         return do_accept4(a[0], a[1], a[2], 0);
3258     case TARGET_SYS_GETSOCKNAME: /* sockfd, addr, addrlen */
3259         return do_getsockname(a[0], a[1], a[2]);
3260     case TARGET_SYS_GETPEERNAME: /* sockfd, addr, addrlen */
3261         return do_getpeername(a[0], a[1], a[2]);
3262     case TARGET_SYS_SOCKETPAIR: /* domain, type, protocol, tab */
3263         return do_socketpair(a[0], a[1], a[2], a[3]);
3264     case TARGET_SYS_SEND: /* sockfd, msg, len, flags */
3265         return do_sendto(a[0], a[1], a[2], a[3], 0, 0);
3266     case TARGET_SYS_RECV: /* sockfd, msg, len, flags */
3267         return do_recvfrom(a[0], a[1], a[2], a[3], 0, 0);
3268     case TARGET_SYS_SENDTO: /* sockfd, msg, len, flags, addr, addrlen */
3269         return do_sendto(a[0], a[1], a[2], a[3], a[4], a[5]);
3270     case TARGET_SYS_RECVFROM: /* sockfd, msg, len, flags, addr, addrlen */
3271         return do_recvfrom(a[0], a[1], a[2], a[3], a[4], a[5]);
3272     case TARGET_SYS_SHUTDOWN: /* sockfd, how */
3273         return get_errno(shutdown(a[0], a[1]));
3274     case TARGET_SYS_SETSOCKOPT: /* sockfd, level, optname, optval, optlen */
3275         return do_setsockopt(a[0], a[1], a[2], a[3], a[4]);
3276     case TARGET_SYS_GETSOCKOPT: /* sockfd, level, optname, optval, optlen */
3277         return do_getsockopt(a[0], a[1], a[2], a[3], a[4]);
3278     case TARGET_SYS_SENDMSG: /* sockfd, msg, flags */
3279         return do_sendrecvmsg(a[0], a[1], a[2], 1);
3280     case TARGET_SYS_RECVMSG: /* sockfd, msg, flags */
3281         return do_sendrecvmsg(a[0], a[1], a[2], 0);
3282     case TARGET_SYS_ACCEPT4: /* sockfd, addr, addrlen, flags */
3283         return do_accept4(a[0], a[1], a[2], a[3]);
3284     case TARGET_SYS_RECVMMSG: /* sockfd, msgvec, vlen, flags */
3285         return do_sendrecvmmsg(a[0], a[1], a[2], a[3], 0);
3286     case TARGET_SYS_SENDMMSG: /* sockfd, msgvec, vlen, flags */
3287         return do_sendrecvmmsg(a[0], a[1], a[2], a[3], 1);
3288     default:
3289         gemu_log("Unsupported socketcall: %d\n", num);
3290         return -TARGET_EINVAL;
3291     }
3292 }
3293 #endif
3294 
3295 #define N_SHM_REGIONS	32
3296 
3297 static struct shm_region {
3298     abi_ulong start;
3299     abi_ulong size;
3300     bool in_use;
3301 } shm_regions[N_SHM_REGIONS];
3302 
3303 #ifndef TARGET_SEMID64_DS
3304 /* asm-generic version of this struct */
3305 struct target_semid64_ds
3306 {
3307   struct target_ipc_perm sem_perm;
3308   abi_ulong sem_otime;
3309 #if TARGET_ABI_BITS == 32
3310   abi_ulong __unused1;
3311 #endif
3312   abi_ulong sem_ctime;
3313 #if TARGET_ABI_BITS == 32
3314   abi_ulong __unused2;
3315 #endif
3316   abi_ulong sem_nsems;
3317   abi_ulong __unused3;
3318   abi_ulong __unused4;
3319 };
3320 #endif
3321 
3322 static inline abi_long target_to_host_ipc_perm(struct ipc_perm *host_ip,
3323                                                abi_ulong target_addr)
3324 {
3325     struct target_ipc_perm *target_ip;
3326     struct target_semid64_ds *target_sd;
3327 
3328     if (!lock_user_struct(VERIFY_READ, target_sd, target_addr, 1))
3329         return -TARGET_EFAULT;
3330     target_ip = &(target_sd->sem_perm);
3331     host_ip->__key = tswap32(target_ip->__key);
3332     host_ip->uid = tswap32(target_ip->uid);
3333     host_ip->gid = tswap32(target_ip->gid);
3334     host_ip->cuid = tswap32(target_ip->cuid);
3335     host_ip->cgid = tswap32(target_ip->cgid);
3336 #if defined(TARGET_ALPHA) || defined(TARGET_MIPS) || defined(TARGET_PPC)
3337     host_ip->mode = tswap32(target_ip->mode);
3338 #else
3339     host_ip->mode = tswap16(target_ip->mode);
3340 #endif
3341 #if defined(TARGET_PPC)
3342     host_ip->__seq = tswap32(target_ip->__seq);
3343 #else
3344     host_ip->__seq = tswap16(target_ip->__seq);
3345 #endif
3346     unlock_user_struct(target_sd, target_addr, 0);
3347     return 0;
3348 }
3349 
3350 static inline abi_long host_to_target_ipc_perm(abi_ulong target_addr,
3351                                                struct ipc_perm *host_ip)
3352 {
3353     struct target_ipc_perm *target_ip;
3354     struct target_semid64_ds *target_sd;
3355 
3356     if (!lock_user_struct(VERIFY_WRITE, target_sd, target_addr, 0))
3357         return -TARGET_EFAULT;
3358     target_ip = &(target_sd->sem_perm);
3359     target_ip->__key = tswap32(host_ip->__key);
3360     target_ip->uid = tswap32(host_ip->uid);
3361     target_ip->gid = tswap32(host_ip->gid);
3362     target_ip->cuid = tswap32(host_ip->cuid);
3363     target_ip->cgid = tswap32(host_ip->cgid);
3364 #if defined(TARGET_ALPHA) || defined(TARGET_MIPS) || defined(TARGET_PPC)
3365     target_ip->mode = tswap32(host_ip->mode);
3366 #else
3367     target_ip->mode = tswap16(host_ip->mode);
3368 #endif
3369 #if defined(TARGET_PPC)
3370     target_ip->__seq = tswap32(host_ip->__seq);
3371 #else
3372     target_ip->__seq = tswap16(host_ip->__seq);
3373 #endif
3374     unlock_user_struct(target_sd, target_addr, 1);
3375     return 0;
3376 }
3377 
3378 static inline abi_long target_to_host_semid_ds(struct semid_ds *host_sd,
3379                                                abi_ulong target_addr)
3380 {
3381     struct target_semid64_ds *target_sd;
3382 
3383     if (!lock_user_struct(VERIFY_READ, target_sd, target_addr, 1))
3384         return -TARGET_EFAULT;
3385     if (target_to_host_ipc_perm(&(host_sd->sem_perm),target_addr))
3386         return -TARGET_EFAULT;
3387     host_sd->sem_nsems = tswapal(target_sd->sem_nsems);
3388     host_sd->sem_otime = tswapal(target_sd->sem_otime);
3389     host_sd->sem_ctime = tswapal(target_sd->sem_ctime);
3390     unlock_user_struct(target_sd, target_addr, 0);
3391     return 0;
3392 }
3393 
3394 static inline abi_long host_to_target_semid_ds(abi_ulong target_addr,
3395                                                struct semid_ds *host_sd)
3396 {
3397     struct target_semid64_ds *target_sd;
3398 
3399     if (!lock_user_struct(VERIFY_WRITE, target_sd, target_addr, 0))
3400         return -TARGET_EFAULT;
3401     if (host_to_target_ipc_perm(target_addr,&(host_sd->sem_perm)))
3402         return -TARGET_EFAULT;
3403     target_sd->sem_nsems = tswapal(host_sd->sem_nsems);
3404     target_sd->sem_otime = tswapal(host_sd->sem_otime);
3405     target_sd->sem_ctime = tswapal(host_sd->sem_ctime);
3406     unlock_user_struct(target_sd, target_addr, 1);
3407     return 0;
3408 }
3409 
3410 struct target_seminfo {
3411     int semmap;
3412     int semmni;
3413     int semmns;
3414     int semmnu;
3415     int semmsl;
3416     int semopm;
3417     int semume;
3418     int semusz;
3419     int semvmx;
3420     int semaem;
3421 };
3422 
3423 static inline abi_long host_to_target_seminfo(abi_ulong target_addr,
3424                                               struct seminfo *host_seminfo)
3425 {
3426     struct target_seminfo *target_seminfo;
3427     if (!lock_user_struct(VERIFY_WRITE, target_seminfo, target_addr, 0))
3428         return -TARGET_EFAULT;
3429     __put_user(host_seminfo->semmap, &target_seminfo->semmap);
3430     __put_user(host_seminfo->semmni, &target_seminfo->semmni);
3431     __put_user(host_seminfo->semmns, &target_seminfo->semmns);
3432     __put_user(host_seminfo->semmnu, &target_seminfo->semmnu);
3433     __put_user(host_seminfo->semmsl, &target_seminfo->semmsl);
3434     __put_user(host_seminfo->semopm, &target_seminfo->semopm);
3435     __put_user(host_seminfo->semume, &target_seminfo->semume);
3436     __put_user(host_seminfo->semusz, &target_seminfo->semusz);
3437     __put_user(host_seminfo->semvmx, &target_seminfo->semvmx);
3438     __put_user(host_seminfo->semaem, &target_seminfo->semaem);
3439     unlock_user_struct(target_seminfo, target_addr, 1);
3440     return 0;
3441 }
3442 
3443 union semun {
3444 	int val;
3445 	struct semid_ds *buf;
3446 	unsigned short *array;
3447 	struct seminfo *__buf;
3448 };
3449 
3450 union target_semun {
3451 	int val;
3452 	abi_ulong buf;
3453 	abi_ulong array;
3454 	abi_ulong __buf;
3455 };
3456 
3457 static inline abi_long target_to_host_semarray(int semid, unsigned short **host_array,
3458                                                abi_ulong target_addr)
3459 {
3460     int nsems;
3461     unsigned short *array;
3462     union semun semun;
3463     struct semid_ds semid_ds;
3464     int i, ret;
3465 
3466     semun.buf = &semid_ds;
3467 
3468     ret = semctl(semid, 0, IPC_STAT, semun);
3469     if (ret == -1)
3470         return get_errno(ret);
3471 
3472     nsems = semid_ds.sem_nsems;
3473 
3474     *host_array = g_try_new(unsigned short, nsems);
3475     if (!*host_array) {
3476         return -TARGET_ENOMEM;
3477     }
3478     array = lock_user(VERIFY_READ, target_addr,
3479                       nsems*sizeof(unsigned short), 1);
3480     if (!array) {
3481         g_free(*host_array);
3482         return -TARGET_EFAULT;
3483     }
3484 
3485     for(i=0; i<nsems; i++) {
3486         __get_user((*host_array)[i], &array[i]);
3487     }
3488     unlock_user(array, target_addr, 0);
3489 
3490     return 0;
3491 }
3492 
3493 static inline abi_long host_to_target_semarray(int semid, abi_ulong target_addr,
3494                                                unsigned short **host_array)
3495 {
3496     int nsems;
3497     unsigned short *array;
3498     union semun semun;
3499     struct semid_ds semid_ds;
3500     int i, ret;
3501 
3502     semun.buf = &semid_ds;
3503 
3504     ret = semctl(semid, 0, IPC_STAT, semun);
3505     if (ret == -1)
3506         return get_errno(ret);
3507 
3508     nsems = semid_ds.sem_nsems;
3509 
3510     array = lock_user(VERIFY_WRITE, target_addr,
3511                       nsems*sizeof(unsigned short), 0);
3512     if (!array)
3513         return -TARGET_EFAULT;
3514 
3515     for(i=0; i<nsems; i++) {
3516         __put_user((*host_array)[i], &array[i]);
3517     }
3518     g_free(*host_array);
3519     unlock_user(array, target_addr, 1);
3520 
3521     return 0;
3522 }
3523 
3524 static inline abi_long do_semctl(int semid, int semnum, int cmd,
3525                                  abi_ulong target_arg)
3526 {
3527     union target_semun target_su = { .buf = target_arg };
3528     union semun arg;
3529     struct semid_ds dsarg;
3530     unsigned short *array = NULL;
3531     struct seminfo seminfo;
3532     abi_long ret = -TARGET_EINVAL;
3533     abi_long err;
3534     cmd &= 0xff;
3535 
3536     switch( cmd ) {
3537 	case GETVAL:
3538 	case SETVAL:
3539             /* In 64 bit cross-endian situations, we will erroneously pick up
3540              * the wrong half of the union for the "val" element.  To rectify
3541              * this, the entire 8-byte structure is byteswapped, followed by
3542 	     * a swap of the 4 byte val field. In other cases, the data is
3543 	     * already in proper host byte order. */
3544 	    if (sizeof(target_su.val) != (sizeof(target_su.buf))) {
3545 		target_su.buf = tswapal(target_su.buf);
3546 		arg.val = tswap32(target_su.val);
3547 	    } else {
3548 		arg.val = target_su.val;
3549 	    }
3550             ret = get_errno(semctl(semid, semnum, cmd, arg));
3551             break;
3552 	case GETALL:
3553 	case SETALL:
3554             err = target_to_host_semarray(semid, &array, target_su.array);
3555             if (err)
3556                 return err;
3557             arg.array = array;
3558             ret = get_errno(semctl(semid, semnum, cmd, arg));
3559             err = host_to_target_semarray(semid, target_su.array, &array);
3560             if (err)
3561                 return err;
3562             break;
3563 	case IPC_STAT:
3564 	case IPC_SET:
3565 	case SEM_STAT:
3566             err = target_to_host_semid_ds(&dsarg, target_su.buf);
3567             if (err)
3568                 return err;
3569             arg.buf = &dsarg;
3570             ret = get_errno(semctl(semid, semnum, cmd, arg));
3571             err = host_to_target_semid_ds(target_su.buf, &dsarg);
3572             if (err)
3573                 return err;
3574             break;
3575 	case IPC_INFO:
3576 	case SEM_INFO:
3577             arg.__buf = &seminfo;
3578             ret = get_errno(semctl(semid, semnum, cmd, arg));
3579             err = host_to_target_seminfo(target_su.__buf, &seminfo);
3580             if (err)
3581                 return err;
3582             break;
3583 	case IPC_RMID:
3584 	case GETPID:
3585 	case GETNCNT:
3586 	case GETZCNT:
3587             ret = get_errno(semctl(semid, semnum, cmd, NULL));
3588             break;
3589     }
3590 
3591     return ret;
3592 }
3593 
3594 struct target_sembuf {
3595     unsigned short sem_num;
3596     short sem_op;
3597     short sem_flg;
3598 };
3599 
3600 static inline abi_long target_to_host_sembuf(struct sembuf *host_sembuf,
3601                                              abi_ulong target_addr,
3602                                              unsigned nsops)
3603 {
3604     struct target_sembuf *target_sembuf;
3605     int i;
3606 
3607     target_sembuf = lock_user(VERIFY_READ, target_addr,
3608                               nsops*sizeof(struct target_sembuf), 1);
3609     if (!target_sembuf)
3610         return -TARGET_EFAULT;
3611 
3612     for(i=0; i<nsops; i++) {
3613         __get_user(host_sembuf[i].sem_num, &target_sembuf[i].sem_num);
3614         __get_user(host_sembuf[i].sem_op, &target_sembuf[i].sem_op);
3615         __get_user(host_sembuf[i].sem_flg, &target_sembuf[i].sem_flg);
3616     }
3617 
3618     unlock_user(target_sembuf, target_addr, 0);
3619 
3620     return 0;
3621 }
3622 
3623 static inline abi_long do_semop(int semid, abi_long ptr, unsigned nsops)
3624 {
3625     struct sembuf sops[nsops];
3626     abi_long ret;
3627 
3628     if (target_to_host_sembuf(sops, ptr, nsops))
3629         return -TARGET_EFAULT;
3630 
3631     ret = -TARGET_ENOSYS;
3632 #ifdef __NR_semtimedop
3633     ret = get_errno(safe_semtimedop(semid, sops, nsops, NULL));
3634 #endif
3635 #ifdef __NR_ipc
3636     if (ret == -TARGET_ENOSYS) {
3637         ret = get_errno(safe_ipc(IPCOP_semtimedop, semid, nsops, 0, sops, 0));
3638     }
3639 #endif
3640     return ret;
3641 }
3642 
3643 struct target_msqid_ds
3644 {
3645     struct target_ipc_perm msg_perm;
3646     abi_ulong msg_stime;
3647 #if TARGET_ABI_BITS == 32
3648     abi_ulong __unused1;
3649 #endif
3650     abi_ulong msg_rtime;
3651 #if TARGET_ABI_BITS == 32
3652     abi_ulong __unused2;
3653 #endif
3654     abi_ulong msg_ctime;
3655 #if TARGET_ABI_BITS == 32
3656     abi_ulong __unused3;
3657 #endif
3658     abi_ulong __msg_cbytes;
3659     abi_ulong msg_qnum;
3660     abi_ulong msg_qbytes;
3661     abi_ulong msg_lspid;
3662     abi_ulong msg_lrpid;
3663     abi_ulong __unused4;
3664     abi_ulong __unused5;
3665 };
3666 
3667 static inline abi_long target_to_host_msqid_ds(struct msqid_ds *host_md,
3668                                                abi_ulong target_addr)
3669 {
3670     struct target_msqid_ds *target_md;
3671 
3672     if (!lock_user_struct(VERIFY_READ, target_md, target_addr, 1))
3673         return -TARGET_EFAULT;
3674     if (target_to_host_ipc_perm(&(host_md->msg_perm),target_addr))
3675         return -TARGET_EFAULT;
3676     host_md->msg_stime = tswapal(target_md->msg_stime);
3677     host_md->msg_rtime = tswapal(target_md->msg_rtime);
3678     host_md->msg_ctime = tswapal(target_md->msg_ctime);
3679     host_md->__msg_cbytes = tswapal(target_md->__msg_cbytes);
3680     host_md->msg_qnum = tswapal(target_md->msg_qnum);
3681     host_md->msg_qbytes = tswapal(target_md->msg_qbytes);
3682     host_md->msg_lspid = tswapal(target_md->msg_lspid);
3683     host_md->msg_lrpid = tswapal(target_md->msg_lrpid);
3684     unlock_user_struct(target_md, target_addr, 0);
3685     return 0;
3686 }
3687 
3688 static inline abi_long host_to_target_msqid_ds(abi_ulong target_addr,
3689                                                struct msqid_ds *host_md)
3690 {
3691     struct target_msqid_ds *target_md;
3692 
3693     if (!lock_user_struct(VERIFY_WRITE, target_md, target_addr, 0))
3694         return -TARGET_EFAULT;
3695     if (host_to_target_ipc_perm(target_addr,&(host_md->msg_perm)))
3696         return -TARGET_EFAULT;
3697     target_md->msg_stime = tswapal(host_md->msg_stime);
3698     target_md->msg_rtime = tswapal(host_md->msg_rtime);
3699     target_md->msg_ctime = tswapal(host_md->msg_ctime);
3700     target_md->__msg_cbytes = tswapal(host_md->__msg_cbytes);
3701     target_md->msg_qnum = tswapal(host_md->msg_qnum);
3702     target_md->msg_qbytes = tswapal(host_md->msg_qbytes);
3703     target_md->msg_lspid = tswapal(host_md->msg_lspid);
3704     target_md->msg_lrpid = tswapal(host_md->msg_lrpid);
3705     unlock_user_struct(target_md, target_addr, 1);
3706     return 0;
3707 }
3708 
3709 struct target_msginfo {
3710     int msgpool;
3711     int msgmap;
3712     int msgmax;
3713     int msgmnb;
3714     int msgmni;
3715     int msgssz;
3716     int msgtql;
3717     unsigned short int msgseg;
3718 };
3719 
3720 static inline abi_long host_to_target_msginfo(abi_ulong target_addr,
3721                                               struct msginfo *host_msginfo)
3722 {
3723     struct target_msginfo *target_msginfo;
3724     if (!lock_user_struct(VERIFY_WRITE, target_msginfo, target_addr, 0))
3725         return -TARGET_EFAULT;
3726     __put_user(host_msginfo->msgpool, &target_msginfo->msgpool);
3727     __put_user(host_msginfo->msgmap, &target_msginfo->msgmap);
3728     __put_user(host_msginfo->msgmax, &target_msginfo->msgmax);
3729     __put_user(host_msginfo->msgmnb, &target_msginfo->msgmnb);
3730     __put_user(host_msginfo->msgmni, &target_msginfo->msgmni);
3731     __put_user(host_msginfo->msgssz, &target_msginfo->msgssz);
3732     __put_user(host_msginfo->msgtql, &target_msginfo->msgtql);
3733     __put_user(host_msginfo->msgseg, &target_msginfo->msgseg);
3734     unlock_user_struct(target_msginfo, target_addr, 1);
3735     return 0;
3736 }
3737 
3738 static inline abi_long do_msgctl(int msgid, int cmd, abi_long ptr)
3739 {
3740     struct msqid_ds dsarg;
3741     struct msginfo msginfo;
3742     abi_long ret = -TARGET_EINVAL;
3743 
3744     cmd &= 0xff;
3745 
3746     switch (cmd) {
3747     case IPC_STAT:
3748     case IPC_SET:
3749     case MSG_STAT:
3750         if (target_to_host_msqid_ds(&dsarg,ptr))
3751             return -TARGET_EFAULT;
3752         ret = get_errno(msgctl(msgid, cmd, &dsarg));
3753         if (host_to_target_msqid_ds(ptr,&dsarg))
3754             return -TARGET_EFAULT;
3755         break;
3756     case IPC_RMID:
3757         ret = get_errno(msgctl(msgid, cmd, NULL));
3758         break;
3759     case IPC_INFO:
3760     case MSG_INFO:
3761         ret = get_errno(msgctl(msgid, cmd, (struct msqid_ds *)&msginfo));
3762         if (host_to_target_msginfo(ptr, &msginfo))
3763             return -TARGET_EFAULT;
3764         break;
3765     }
3766 
3767     return ret;
3768 }
3769 
3770 struct target_msgbuf {
3771     abi_long mtype;
3772     char	mtext[1];
3773 };
3774 
3775 static inline abi_long do_msgsnd(int msqid, abi_long msgp,
3776                                  ssize_t msgsz, int msgflg)
3777 {
3778     struct target_msgbuf *target_mb;
3779     struct msgbuf *host_mb;
3780     abi_long ret = 0;
3781 
3782     if (msgsz < 0) {
3783         return -TARGET_EINVAL;
3784     }
3785 
3786     if (!lock_user_struct(VERIFY_READ, target_mb, msgp, 0))
3787         return -TARGET_EFAULT;
3788     host_mb = g_try_malloc(msgsz + sizeof(long));
3789     if (!host_mb) {
3790         unlock_user_struct(target_mb, msgp, 0);
3791         return -TARGET_ENOMEM;
3792     }
3793     host_mb->mtype = (abi_long) tswapal(target_mb->mtype);
3794     memcpy(host_mb->mtext, target_mb->mtext, msgsz);
3795     ret = -TARGET_ENOSYS;
3796 #ifdef __NR_msgsnd
3797     ret = get_errno(safe_msgsnd(msqid, host_mb, msgsz, msgflg));
3798 #endif
3799 #ifdef __NR_ipc
3800     if (ret == -TARGET_ENOSYS) {
3801         ret = get_errno(safe_ipc(IPCOP_msgsnd, msqid, msgsz, msgflg,
3802                                  host_mb, 0));
3803     }
3804 #endif
3805     g_free(host_mb);
3806     unlock_user_struct(target_mb, msgp, 0);
3807 
3808     return ret;
3809 }
3810 
3811 static inline abi_long do_msgrcv(int msqid, abi_long msgp,
3812                                  ssize_t msgsz, abi_long msgtyp,
3813                                  int msgflg)
3814 {
3815     struct target_msgbuf *target_mb;
3816     char *target_mtext;
3817     struct msgbuf *host_mb;
3818     abi_long ret = 0;
3819 
3820     if (msgsz < 0) {
3821         return -TARGET_EINVAL;
3822     }
3823 
3824     if (!lock_user_struct(VERIFY_WRITE, target_mb, msgp, 0))
3825         return -TARGET_EFAULT;
3826 
3827     host_mb = g_try_malloc(msgsz + sizeof(long));
3828     if (!host_mb) {
3829         ret = -TARGET_ENOMEM;
3830         goto end;
3831     }
3832     ret = -TARGET_ENOSYS;
3833 #ifdef __NR_msgrcv
3834     ret = get_errno(safe_msgrcv(msqid, host_mb, msgsz, msgtyp, msgflg));
3835 #endif
3836 #ifdef __NR_ipc
3837     if (ret == -TARGET_ENOSYS) {
3838         ret = get_errno(safe_ipc(IPCOP_CALL(1, IPCOP_msgrcv), msqid, msgsz,
3839                         msgflg, host_mb, msgtyp));
3840     }
3841 #endif
3842 
3843     if (ret > 0) {
3844         abi_ulong target_mtext_addr = msgp + sizeof(abi_ulong);
3845         target_mtext = lock_user(VERIFY_WRITE, target_mtext_addr, ret, 0);
3846         if (!target_mtext) {
3847             ret = -TARGET_EFAULT;
3848             goto end;
3849         }
3850         memcpy(target_mb->mtext, host_mb->mtext, ret);
3851         unlock_user(target_mtext, target_mtext_addr, ret);
3852     }
3853 
3854     target_mb->mtype = tswapal(host_mb->mtype);
3855 
3856 end:
3857     if (target_mb)
3858         unlock_user_struct(target_mb, msgp, 1);
3859     g_free(host_mb);
3860     return ret;
3861 }
3862 
3863 static inline abi_long target_to_host_shmid_ds(struct shmid_ds *host_sd,
3864                                                abi_ulong target_addr)
3865 {
3866     struct target_shmid_ds *target_sd;
3867 
3868     if (!lock_user_struct(VERIFY_READ, target_sd, target_addr, 1))
3869         return -TARGET_EFAULT;
3870     if (target_to_host_ipc_perm(&(host_sd->shm_perm), target_addr))
3871         return -TARGET_EFAULT;
3872     __get_user(host_sd->shm_segsz, &target_sd->shm_segsz);
3873     __get_user(host_sd->shm_atime, &target_sd->shm_atime);
3874     __get_user(host_sd->shm_dtime, &target_sd->shm_dtime);
3875     __get_user(host_sd->shm_ctime, &target_sd->shm_ctime);
3876     __get_user(host_sd->shm_cpid, &target_sd->shm_cpid);
3877     __get_user(host_sd->shm_lpid, &target_sd->shm_lpid);
3878     __get_user(host_sd->shm_nattch, &target_sd->shm_nattch);
3879     unlock_user_struct(target_sd, target_addr, 0);
3880     return 0;
3881 }
3882 
3883 static inline abi_long host_to_target_shmid_ds(abi_ulong target_addr,
3884                                                struct shmid_ds *host_sd)
3885 {
3886     struct target_shmid_ds *target_sd;
3887 
3888     if (!lock_user_struct(VERIFY_WRITE, target_sd, target_addr, 0))
3889         return -TARGET_EFAULT;
3890     if (host_to_target_ipc_perm(target_addr, &(host_sd->shm_perm)))
3891         return -TARGET_EFAULT;
3892     __put_user(host_sd->shm_segsz, &target_sd->shm_segsz);
3893     __put_user(host_sd->shm_atime, &target_sd->shm_atime);
3894     __put_user(host_sd->shm_dtime, &target_sd->shm_dtime);
3895     __put_user(host_sd->shm_ctime, &target_sd->shm_ctime);
3896     __put_user(host_sd->shm_cpid, &target_sd->shm_cpid);
3897     __put_user(host_sd->shm_lpid, &target_sd->shm_lpid);
3898     __put_user(host_sd->shm_nattch, &target_sd->shm_nattch);
3899     unlock_user_struct(target_sd, target_addr, 1);
3900     return 0;
3901 }
3902 
3903 struct  target_shminfo {
3904     abi_ulong shmmax;
3905     abi_ulong shmmin;
3906     abi_ulong shmmni;
3907     abi_ulong shmseg;
3908     abi_ulong shmall;
3909 };
3910 
3911 static inline abi_long host_to_target_shminfo(abi_ulong target_addr,
3912                                               struct shminfo *host_shminfo)
3913 {
3914     struct target_shminfo *target_shminfo;
3915     if (!lock_user_struct(VERIFY_WRITE, target_shminfo, target_addr, 0))
3916         return -TARGET_EFAULT;
3917     __put_user(host_shminfo->shmmax, &target_shminfo->shmmax);
3918     __put_user(host_shminfo->shmmin, &target_shminfo->shmmin);
3919     __put_user(host_shminfo->shmmni, &target_shminfo->shmmni);
3920     __put_user(host_shminfo->shmseg, &target_shminfo->shmseg);
3921     __put_user(host_shminfo->shmall, &target_shminfo->shmall);
3922     unlock_user_struct(target_shminfo, target_addr, 1);
3923     return 0;
3924 }
3925 
3926 struct target_shm_info {
3927     int used_ids;
3928     abi_ulong shm_tot;
3929     abi_ulong shm_rss;
3930     abi_ulong shm_swp;
3931     abi_ulong swap_attempts;
3932     abi_ulong swap_successes;
3933 };
3934 
3935 static inline abi_long host_to_target_shm_info(abi_ulong target_addr,
3936                                                struct shm_info *host_shm_info)
3937 {
3938     struct target_shm_info *target_shm_info;
3939     if (!lock_user_struct(VERIFY_WRITE, target_shm_info, target_addr, 0))
3940         return -TARGET_EFAULT;
3941     __put_user(host_shm_info->used_ids, &target_shm_info->used_ids);
3942     __put_user(host_shm_info->shm_tot, &target_shm_info->shm_tot);
3943     __put_user(host_shm_info->shm_rss, &target_shm_info->shm_rss);
3944     __put_user(host_shm_info->shm_swp, &target_shm_info->shm_swp);
3945     __put_user(host_shm_info->swap_attempts, &target_shm_info->swap_attempts);
3946     __put_user(host_shm_info->swap_successes, &target_shm_info->swap_successes);
3947     unlock_user_struct(target_shm_info, target_addr, 1);
3948     return 0;
3949 }
3950 
3951 static inline abi_long do_shmctl(int shmid, int cmd, abi_long buf)
3952 {
3953     struct shmid_ds dsarg;
3954     struct shminfo shminfo;
3955     struct shm_info shm_info;
3956     abi_long ret = -TARGET_EINVAL;
3957 
3958     cmd &= 0xff;
3959 
3960     switch(cmd) {
3961     case IPC_STAT:
3962     case IPC_SET:
3963     case SHM_STAT:
3964         if (target_to_host_shmid_ds(&dsarg, buf))
3965             return -TARGET_EFAULT;
3966         ret = get_errno(shmctl(shmid, cmd, &dsarg));
3967         if (host_to_target_shmid_ds(buf, &dsarg))
3968             return -TARGET_EFAULT;
3969         break;
3970     case IPC_INFO:
3971         ret = get_errno(shmctl(shmid, cmd, (struct shmid_ds *)&shminfo));
3972         if (host_to_target_shminfo(buf, &shminfo))
3973             return -TARGET_EFAULT;
3974         break;
3975     case SHM_INFO:
3976         ret = get_errno(shmctl(shmid, cmd, (struct shmid_ds *)&shm_info));
3977         if (host_to_target_shm_info(buf, &shm_info))
3978             return -TARGET_EFAULT;
3979         break;
3980     case IPC_RMID:
3981     case SHM_LOCK:
3982     case SHM_UNLOCK:
3983         ret = get_errno(shmctl(shmid, cmd, NULL));
3984         break;
3985     }
3986 
3987     return ret;
3988 }
3989 
3990 #ifndef TARGET_FORCE_SHMLBA
3991 /* For most architectures, SHMLBA is the same as the page size;
3992  * some architectures have larger values, in which case they should
3993  * define TARGET_FORCE_SHMLBA and provide a target_shmlba() function.
3994  * This corresponds to the kernel arch code defining __ARCH_FORCE_SHMLBA
3995  * and defining its own value for SHMLBA.
3996  *
3997  * The kernel also permits SHMLBA to be set by the architecture to a
3998  * value larger than the page size without setting __ARCH_FORCE_SHMLBA;
3999  * this means that addresses are rounded to the large size if
4000  * SHM_RND is set but addresses not aligned to that size are not rejected
4001  * as long as they are at least page-aligned. Since the only architecture
4002  * which uses this is ia64 this code doesn't provide for that oddity.
4003  */
4004 static inline abi_ulong target_shmlba(CPUArchState *cpu_env)
4005 {
4006     return TARGET_PAGE_SIZE;
4007 }
4008 #endif
4009 
4010 static inline abi_ulong do_shmat(CPUArchState *cpu_env,
4011                                  int shmid, abi_ulong shmaddr, int shmflg)
4012 {
4013     abi_long raddr;
4014     void *host_raddr;
4015     struct shmid_ds shm_info;
4016     int i,ret;
4017     abi_ulong shmlba;
4018 
4019     /* find out the length of the shared memory segment */
4020     ret = get_errno(shmctl(shmid, IPC_STAT, &shm_info));
4021     if (is_error(ret)) {
4022         /* can't get length, bail out */
4023         return ret;
4024     }
4025 
4026     shmlba = target_shmlba(cpu_env);
4027 
4028     if (shmaddr & (shmlba - 1)) {
4029         if (shmflg & SHM_RND) {
4030             shmaddr &= ~(shmlba - 1);
4031         } else {
4032             return -TARGET_EINVAL;
4033         }
4034     }
4035     if (!guest_range_valid(shmaddr, shm_info.shm_segsz)) {
4036         return -TARGET_EINVAL;
4037     }
4038 
4039     mmap_lock();
4040 
4041     if (shmaddr)
4042         host_raddr = shmat(shmid, (void *)g2h(shmaddr), shmflg);
4043     else {
4044         abi_ulong mmap_start;
4045 
4046         /* In order to use the host shmat, we need to honor host SHMLBA.  */
4047         mmap_start = mmap_find_vma(0, shm_info.shm_segsz, MAX(SHMLBA, shmlba));
4048 
4049         if (mmap_start == -1) {
4050             errno = ENOMEM;
4051             host_raddr = (void *)-1;
4052         } else
4053             host_raddr = shmat(shmid, g2h(mmap_start), shmflg | SHM_REMAP);
4054     }
4055 
4056     if (host_raddr == (void *)-1) {
4057         mmap_unlock();
4058         return get_errno((long)host_raddr);
4059     }
4060     raddr=h2g((unsigned long)host_raddr);
4061 
4062     page_set_flags(raddr, raddr + shm_info.shm_segsz,
4063                    PAGE_VALID | PAGE_READ |
4064                    ((shmflg & SHM_RDONLY)? 0 : PAGE_WRITE));
4065 
4066     for (i = 0; i < N_SHM_REGIONS; i++) {
4067         if (!shm_regions[i].in_use) {
4068             shm_regions[i].in_use = true;
4069             shm_regions[i].start = raddr;
4070             shm_regions[i].size = shm_info.shm_segsz;
4071             break;
4072         }
4073     }
4074 
4075     mmap_unlock();
4076     return raddr;
4077 
4078 }
4079 
4080 static inline abi_long do_shmdt(abi_ulong shmaddr)
4081 {
4082     int i;
4083     abi_long rv;
4084 
4085     mmap_lock();
4086 
4087     for (i = 0; i < N_SHM_REGIONS; ++i) {
4088         if (shm_regions[i].in_use && shm_regions[i].start == shmaddr) {
4089             shm_regions[i].in_use = false;
4090             page_set_flags(shmaddr, shmaddr + shm_regions[i].size, 0);
4091             break;
4092         }
4093     }
4094     rv = get_errno(shmdt(g2h(shmaddr)));
4095 
4096     mmap_unlock();
4097 
4098     return rv;
4099 }
4100 
4101 #ifdef TARGET_NR_ipc
4102 /* ??? This only works with linear mappings.  */
4103 /* do_ipc() must return target values and target errnos. */
4104 static abi_long do_ipc(CPUArchState *cpu_env,
4105                        unsigned int call, abi_long first,
4106                        abi_long second, abi_long third,
4107                        abi_long ptr, abi_long fifth)
4108 {
4109     int version;
4110     abi_long ret = 0;
4111 
4112     version = call >> 16;
4113     call &= 0xffff;
4114 
4115     switch (call) {
4116     case IPCOP_semop:
4117         ret = do_semop(first, ptr, second);
4118         break;
4119 
4120     case IPCOP_semget:
4121         ret = get_errno(semget(first, second, third));
4122         break;
4123 
4124     case IPCOP_semctl: {
4125         /* The semun argument to semctl is passed by value, so dereference the
4126          * ptr argument. */
4127         abi_ulong atptr;
4128         get_user_ual(atptr, ptr);
4129         ret = do_semctl(first, second, third, atptr);
4130         break;
4131     }
4132 
4133     case IPCOP_msgget:
4134         ret = get_errno(msgget(first, second));
4135         break;
4136 
4137     case IPCOP_msgsnd:
4138         ret = do_msgsnd(first, ptr, second, third);
4139         break;
4140 
4141     case IPCOP_msgctl:
4142         ret = do_msgctl(first, second, ptr);
4143         break;
4144 
4145     case IPCOP_msgrcv:
4146         switch (version) {
4147         case 0:
4148             {
4149                 struct target_ipc_kludge {
4150                     abi_long msgp;
4151                     abi_long msgtyp;
4152                 } *tmp;
4153 
4154                 if (!lock_user_struct(VERIFY_READ, tmp, ptr, 1)) {
4155                     ret = -TARGET_EFAULT;
4156                     break;
4157                 }
4158 
4159                 ret = do_msgrcv(first, tswapal(tmp->msgp), second, tswapal(tmp->msgtyp), third);
4160 
4161                 unlock_user_struct(tmp, ptr, 0);
4162                 break;
4163             }
4164         default:
4165             ret = do_msgrcv(first, ptr, second, fifth, third);
4166         }
4167         break;
4168 
4169     case IPCOP_shmat:
4170         switch (version) {
4171         default:
4172         {
4173             abi_ulong raddr;
4174             raddr = do_shmat(cpu_env, first, ptr, second);
4175             if (is_error(raddr))
4176                 return get_errno(raddr);
4177             if (put_user_ual(raddr, third))
4178                 return -TARGET_EFAULT;
4179             break;
4180         }
4181         case 1:
4182             ret = -TARGET_EINVAL;
4183             break;
4184         }
4185 	break;
4186     case IPCOP_shmdt:
4187         ret = do_shmdt(ptr);
4188 	break;
4189 
4190     case IPCOP_shmget:
4191 	/* IPC_* flag values are the same on all linux platforms */
4192 	ret = get_errno(shmget(first, second, third));
4193 	break;
4194 
4195 	/* IPC_* and SHM_* command values are the same on all linux platforms */
4196     case IPCOP_shmctl:
4197         ret = do_shmctl(first, second, ptr);
4198         break;
4199     default:
4200 	gemu_log("Unsupported ipc call: %d (version %d)\n", call, version);
4201 	ret = -TARGET_ENOSYS;
4202 	break;
4203     }
4204     return ret;
4205 }
4206 #endif
4207 
4208 /* kernel structure types definitions */
4209 
4210 #define STRUCT(name, ...) STRUCT_ ## name,
4211 #define STRUCT_SPECIAL(name) STRUCT_ ## name,
4212 enum {
4213 #include "syscall_types.h"
4214 STRUCT_MAX
4215 };
4216 #undef STRUCT
4217 #undef STRUCT_SPECIAL
4218 
4219 #define STRUCT(name, ...) static const argtype struct_ ## name ## _def[] = {  __VA_ARGS__, TYPE_NULL };
4220 #define STRUCT_SPECIAL(name)
4221 #include "syscall_types.h"
4222 #undef STRUCT
4223 #undef STRUCT_SPECIAL
4224 
4225 typedef struct IOCTLEntry IOCTLEntry;
4226 
4227 typedef abi_long do_ioctl_fn(const IOCTLEntry *ie, uint8_t *buf_temp,
4228                              int fd, int cmd, abi_long arg);
4229 
4230 struct IOCTLEntry {
4231     int target_cmd;
4232     unsigned int host_cmd;
4233     const char *name;
4234     int access;
4235     do_ioctl_fn *do_ioctl;
4236     const argtype arg_type[5];
4237 };
4238 
4239 #define IOC_R 0x0001
4240 #define IOC_W 0x0002
4241 #define IOC_RW (IOC_R | IOC_W)
4242 
4243 #define MAX_STRUCT_SIZE 4096
4244 
4245 #ifdef CONFIG_FIEMAP
4246 /* So fiemap access checks don't overflow on 32 bit systems.
4247  * This is very slightly smaller than the limit imposed by
4248  * the underlying kernel.
4249  */
4250 #define FIEMAP_MAX_EXTENTS ((UINT_MAX - sizeof(struct fiemap))  \
4251                             / sizeof(struct fiemap_extent))
4252 
4253 static abi_long do_ioctl_fs_ioc_fiemap(const IOCTLEntry *ie, uint8_t *buf_temp,
4254                                        int fd, int cmd, abi_long arg)
4255 {
4256     /* The parameter for this ioctl is a struct fiemap followed
4257      * by an array of struct fiemap_extent whose size is set
4258      * in fiemap->fm_extent_count. The array is filled in by the
4259      * ioctl.
4260      */
4261     int target_size_in, target_size_out;
4262     struct fiemap *fm;
4263     const argtype *arg_type = ie->arg_type;
4264     const argtype extent_arg_type[] = { MK_STRUCT(STRUCT_fiemap_extent) };
4265     void *argptr, *p;
4266     abi_long ret;
4267     int i, extent_size = thunk_type_size(extent_arg_type, 0);
4268     uint32_t outbufsz;
4269     int free_fm = 0;
4270 
4271     assert(arg_type[0] == TYPE_PTR);
4272     assert(ie->access == IOC_RW);
4273     arg_type++;
4274     target_size_in = thunk_type_size(arg_type, 0);
4275     argptr = lock_user(VERIFY_READ, arg, target_size_in, 1);
4276     if (!argptr) {
4277         return -TARGET_EFAULT;
4278     }
4279     thunk_convert(buf_temp, argptr, arg_type, THUNK_HOST);
4280     unlock_user(argptr, arg, 0);
4281     fm = (struct fiemap *)buf_temp;
4282     if (fm->fm_extent_count > FIEMAP_MAX_EXTENTS) {
4283         return -TARGET_EINVAL;
4284     }
4285 
4286     outbufsz = sizeof (*fm) +
4287         (sizeof(struct fiemap_extent) * fm->fm_extent_count);
4288 
4289     if (outbufsz > MAX_STRUCT_SIZE) {
4290         /* We can't fit all the extents into the fixed size buffer.
4291          * Allocate one that is large enough and use it instead.
4292          */
4293         fm = g_try_malloc(outbufsz);
4294         if (!fm) {
4295             return -TARGET_ENOMEM;
4296         }
4297         memcpy(fm, buf_temp, sizeof(struct fiemap));
4298         free_fm = 1;
4299     }
4300     ret = get_errno(safe_ioctl(fd, ie->host_cmd, fm));
4301     if (!is_error(ret)) {
4302         target_size_out = target_size_in;
4303         /* An extent_count of 0 means we were only counting the extents
4304          * so there are no structs to copy
4305          */
4306         if (fm->fm_extent_count != 0) {
4307             target_size_out += fm->fm_mapped_extents * extent_size;
4308         }
4309         argptr = lock_user(VERIFY_WRITE, arg, target_size_out, 0);
4310         if (!argptr) {
4311             ret = -TARGET_EFAULT;
4312         } else {
4313             /* Convert the struct fiemap */
4314             thunk_convert(argptr, fm, arg_type, THUNK_TARGET);
4315             if (fm->fm_extent_count != 0) {
4316                 p = argptr + target_size_in;
4317                 /* ...and then all the struct fiemap_extents */
4318                 for (i = 0; i < fm->fm_mapped_extents; i++) {
4319                     thunk_convert(p, &fm->fm_extents[i], extent_arg_type,
4320                                   THUNK_TARGET);
4321                     p += extent_size;
4322                 }
4323             }
4324             unlock_user(argptr, arg, target_size_out);
4325         }
4326     }
4327     if (free_fm) {
4328         g_free(fm);
4329     }
4330     return ret;
4331 }
4332 #endif
4333 
4334 static abi_long do_ioctl_ifconf(const IOCTLEntry *ie, uint8_t *buf_temp,
4335                                 int fd, int cmd, abi_long arg)
4336 {
4337     const argtype *arg_type = ie->arg_type;
4338     int target_size;
4339     void *argptr;
4340     int ret;
4341     struct ifconf *host_ifconf;
4342     uint32_t outbufsz;
4343     const argtype ifreq_arg_type[] = { MK_STRUCT(STRUCT_sockaddr_ifreq) };
4344     int target_ifreq_size;
4345     int nb_ifreq;
4346     int free_buf = 0;
4347     int i;
4348     int target_ifc_len;
4349     abi_long target_ifc_buf;
4350     int host_ifc_len;
4351     char *host_ifc_buf;
4352 
4353     assert(arg_type[0] == TYPE_PTR);
4354     assert(ie->access == IOC_RW);
4355 
4356     arg_type++;
4357     target_size = thunk_type_size(arg_type, 0);
4358 
4359     argptr = lock_user(VERIFY_READ, arg, target_size, 1);
4360     if (!argptr)
4361         return -TARGET_EFAULT;
4362     thunk_convert(buf_temp, argptr, arg_type, THUNK_HOST);
4363     unlock_user(argptr, arg, 0);
4364 
4365     host_ifconf = (struct ifconf *)(unsigned long)buf_temp;
4366     target_ifc_buf = (abi_long)(unsigned long)host_ifconf->ifc_buf;
4367     target_ifreq_size = thunk_type_size(ifreq_arg_type, 0);
4368 
4369     if (target_ifc_buf != 0) {
4370         target_ifc_len = host_ifconf->ifc_len;
4371         nb_ifreq = target_ifc_len / target_ifreq_size;
4372         host_ifc_len = nb_ifreq * sizeof(struct ifreq);
4373 
4374         outbufsz = sizeof(*host_ifconf) + host_ifc_len;
4375         if (outbufsz > MAX_STRUCT_SIZE) {
4376             /*
4377              * We can't fit all the extents into the fixed size buffer.
4378              * Allocate one that is large enough and use it instead.
4379              */
4380             host_ifconf = malloc(outbufsz);
4381             if (!host_ifconf) {
4382                 return -TARGET_ENOMEM;
4383             }
4384             memcpy(host_ifconf, buf_temp, sizeof(*host_ifconf));
4385             free_buf = 1;
4386         }
4387         host_ifc_buf = (char *)host_ifconf + sizeof(*host_ifconf);
4388 
4389         host_ifconf->ifc_len = host_ifc_len;
4390     } else {
4391       host_ifc_buf = NULL;
4392     }
4393     host_ifconf->ifc_buf = host_ifc_buf;
4394 
4395     ret = get_errno(safe_ioctl(fd, ie->host_cmd, host_ifconf));
4396     if (!is_error(ret)) {
4397 	/* convert host ifc_len to target ifc_len */
4398 
4399         nb_ifreq = host_ifconf->ifc_len / sizeof(struct ifreq);
4400         target_ifc_len = nb_ifreq * target_ifreq_size;
4401         host_ifconf->ifc_len = target_ifc_len;
4402 
4403 	/* restore target ifc_buf */
4404 
4405         host_ifconf->ifc_buf = (char *)(unsigned long)target_ifc_buf;
4406 
4407 	/* copy struct ifconf to target user */
4408 
4409         argptr = lock_user(VERIFY_WRITE, arg, target_size, 0);
4410         if (!argptr)
4411             return -TARGET_EFAULT;
4412         thunk_convert(argptr, host_ifconf, arg_type, THUNK_TARGET);
4413         unlock_user(argptr, arg, target_size);
4414 
4415         if (target_ifc_buf != 0) {
4416             /* copy ifreq[] to target user */
4417             argptr = lock_user(VERIFY_WRITE, target_ifc_buf, target_ifc_len, 0);
4418             for (i = 0; i < nb_ifreq ; i++) {
4419                 thunk_convert(argptr + i * target_ifreq_size,
4420                               host_ifc_buf + i * sizeof(struct ifreq),
4421                               ifreq_arg_type, THUNK_TARGET);
4422             }
4423             unlock_user(argptr, target_ifc_buf, target_ifc_len);
4424         }
4425     }
4426 
4427     if (free_buf) {
4428         free(host_ifconf);
4429     }
4430 
4431     return ret;
4432 }
4433 
4434 #if defined(CONFIG_USBFS)
4435 #if HOST_LONG_BITS > 64
4436 #error USBDEVFS thunks do not support >64 bit hosts yet.
4437 #endif
4438 struct live_urb {
4439     uint64_t target_urb_adr;
4440     uint64_t target_buf_adr;
4441     char *target_buf_ptr;
4442     struct usbdevfs_urb host_urb;
4443 };
4444 
4445 static GHashTable *usbdevfs_urb_hashtable(void)
4446 {
4447     static GHashTable *urb_hashtable;
4448 
4449     if (!urb_hashtable) {
4450         urb_hashtable = g_hash_table_new(g_int64_hash, g_int64_equal);
4451     }
4452     return urb_hashtable;
4453 }
4454 
4455 static void urb_hashtable_insert(struct live_urb *urb)
4456 {
4457     GHashTable *urb_hashtable = usbdevfs_urb_hashtable();
4458     g_hash_table_insert(urb_hashtable, urb, urb);
4459 }
4460 
4461 static struct live_urb *urb_hashtable_lookup(uint64_t target_urb_adr)
4462 {
4463     GHashTable *urb_hashtable = usbdevfs_urb_hashtable();
4464     return g_hash_table_lookup(urb_hashtable, &target_urb_adr);
4465 }
4466 
4467 static void urb_hashtable_remove(struct live_urb *urb)
4468 {
4469     GHashTable *urb_hashtable = usbdevfs_urb_hashtable();
4470     g_hash_table_remove(urb_hashtable, urb);
4471 }
4472 
4473 static abi_long
4474 do_ioctl_usbdevfs_reapurb(const IOCTLEntry *ie, uint8_t *buf_temp,
4475                           int fd, int cmd, abi_long arg)
4476 {
4477     const argtype usbfsurb_arg_type[] = { MK_STRUCT(STRUCT_usbdevfs_urb) };
4478     const argtype ptrvoid_arg_type[] = { TYPE_PTRVOID, 0, 0 };
4479     struct live_urb *lurb;
4480     void *argptr;
4481     uint64_t hurb;
4482     int target_size;
4483     uintptr_t target_urb_adr;
4484     abi_long ret;
4485 
4486     target_size = thunk_type_size(usbfsurb_arg_type, THUNK_TARGET);
4487 
4488     memset(buf_temp, 0, sizeof(uint64_t));
4489     ret = get_errno(safe_ioctl(fd, ie->host_cmd, buf_temp));
4490     if (is_error(ret)) {
4491         return ret;
4492     }
4493 
4494     memcpy(&hurb, buf_temp, sizeof(uint64_t));
4495     lurb = (void *)((uintptr_t)hurb - offsetof(struct live_urb, host_urb));
4496     if (!lurb->target_urb_adr) {
4497         return -TARGET_EFAULT;
4498     }
4499     urb_hashtable_remove(lurb);
4500     unlock_user(lurb->target_buf_ptr, lurb->target_buf_adr,
4501         lurb->host_urb.buffer_length);
4502     lurb->target_buf_ptr = NULL;
4503 
4504     /* restore the guest buffer pointer */
4505     lurb->host_urb.buffer = (void *)(uintptr_t)lurb->target_buf_adr;
4506 
4507     /* update the guest urb struct */
4508     argptr = lock_user(VERIFY_WRITE, lurb->target_urb_adr, target_size, 0);
4509     if (!argptr) {
4510         g_free(lurb);
4511         return -TARGET_EFAULT;
4512     }
4513     thunk_convert(argptr, &lurb->host_urb, usbfsurb_arg_type, THUNK_TARGET);
4514     unlock_user(argptr, lurb->target_urb_adr, target_size);
4515 
4516     target_size = thunk_type_size(ptrvoid_arg_type, THUNK_TARGET);
4517     /* write back the urb handle */
4518     argptr = lock_user(VERIFY_WRITE, arg, target_size, 0);
4519     if (!argptr) {
4520         g_free(lurb);
4521         return -TARGET_EFAULT;
4522     }
4523 
4524     /* GHashTable uses 64-bit keys but thunk_convert expects uintptr_t */
4525     target_urb_adr = lurb->target_urb_adr;
4526     thunk_convert(argptr, &target_urb_adr, ptrvoid_arg_type, THUNK_TARGET);
4527     unlock_user(argptr, arg, target_size);
4528 
4529     g_free(lurb);
4530     return ret;
4531 }
4532 
4533 static abi_long
4534 do_ioctl_usbdevfs_discardurb(const IOCTLEntry *ie,
4535                              uint8_t *buf_temp __attribute__((unused)),
4536                              int fd, int cmd, abi_long arg)
4537 {
4538     struct live_urb *lurb;
4539 
4540     /* map target address back to host URB with metadata. */
4541     lurb = urb_hashtable_lookup(arg);
4542     if (!lurb) {
4543         return -TARGET_EFAULT;
4544     }
4545     return get_errno(safe_ioctl(fd, ie->host_cmd, &lurb->host_urb));
4546 }
4547 
4548 static abi_long
4549 do_ioctl_usbdevfs_submiturb(const IOCTLEntry *ie, uint8_t *buf_temp,
4550                             int fd, int cmd, abi_long arg)
4551 {
4552     const argtype *arg_type = ie->arg_type;
4553     int target_size;
4554     abi_long ret;
4555     void *argptr;
4556     int rw_dir;
4557     struct live_urb *lurb;
4558 
4559     /*
4560      * each submitted URB needs to map to a unique ID for the
4561      * kernel, and that unique ID needs to be a pointer to
4562      * host memory.  hence, we need to malloc for each URB.
4563      * isochronous transfers have a variable length struct.
4564      */
4565     arg_type++;
4566     target_size = thunk_type_size(arg_type, THUNK_TARGET);
4567 
4568     /* construct host copy of urb and metadata */
4569     lurb = g_try_malloc0(sizeof(struct live_urb));
4570     if (!lurb) {
4571         return -TARGET_ENOMEM;
4572     }
4573 
4574     argptr = lock_user(VERIFY_READ, arg, target_size, 1);
4575     if (!argptr) {
4576         g_free(lurb);
4577         return -TARGET_EFAULT;
4578     }
4579     thunk_convert(&lurb->host_urb, argptr, arg_type, THUNK_HOST);
4580     unlock_user(argptr, arg, 0);
4581 
4582     lurb->target_urb_adr = arg;
4583     lurb->target_buf_adr = (uintptr_t)lurb->host_urb.buffer;
4584 
4585     /* buffer space used depends on endpoint type so lock the entire buffer */
4586     /* control type urbs should check the buffer contents for true direction */
4587     rw_dir = lurb->host_urb.endpoint & USB_DIR_IN ? VERIFY_WRITE : VERIFY_READ;
4588     lurb->target_buf_ptr = lock_user(rw_dir, lurb->target_buf_adr,
4589         lurb->host_urb.buffer_length, 1);
4590     if (lurb->target_buf_ptr == NULL) {
4591         g_free(lurb);
4592         return -TARGET_EFAULT;
4593     }
4594 
4595     /* update buffer pointer in host copy */
4596     lurb->host_urb.buffer = lurb->target_buf_ptr;
4597 
4598     ret = get_errno(safe_ioctl(fd, ie->host_cmd, &lurb->host_urb));
4599     if (is_error(ret)) {
4600         unlock_user(lurb->target_buf_ptr, lurb->target_buf_adr, 0);
4601         g_free(lurb);
4602     } else {
4603         urb_hashtable_insert(lurb);
4604     }
4605 
4606     return ret;
4607 }
4608 #endif /* CONFIG_USBFS */
4609 
4610 static abi_long do_ioctl_dm(const IOCTLEntry *ie, uint8_t *buf_temp, int fd,
4611                             int cmd, abi_long arg)
4612 {
4613     void *argptr;
4614     struct dm_ioctl *host_dm;
4615     abi_long guest_data;
4616     uint32_t guest_data_size;
4617     int target_size;
4618     const argtype *arg_type = ie->arg_type;
4619     abi_long ret;
4620     void *big_buf = NULL;
4621     char *host_data;
4622 
4623     arg_type++;
4624     target_size = thunk_type_size(arg_type, 0);
4625     argptr = lock_user(VERIFY_READ, arg, target_size, 1);
4626     if (!argptr) {
4627         ret = -TARGET_EFAULT;
4628         goto out;
4629     }
4630     thunk_convert(buf_temp, argptr, arg_type, THUNK_HOST);
4631     unlock_user(argptr, arg, 0);
4632 
4633     /* buf_temp is too small, so fetch things into a bigger buffer */
4634     big_buf = g_malloc0(((struct dm_ioctl*)buf_temp)->data_size * 2);
4635     memcpy(big_buf, buf_temp, target_size);
4636     buf_temp = big_buf;
4637     host_dm = big_buf;
4638 
4639     guest_data = arg + host_dm->data_start;
4640     if ((guest_data - arg) < 0) {
4641         ret = -TARGET_EINVAL;
4642         goto out;
4643     }
4644     guest_data_size = host_dm->data_size - host_dm->data_start;
4645     host_data = (char*)host_dm + host_dm->data_start;
4646 
4647     argptr = lock_user(VERIFY_READ, guest_data, guest_data_size, 1);
4648     if (!argptr) {
4649         ret = -TARGET_EFAULT;
4650         goto out;
4651     }
4652 
4653     switch (ie->host_cmd) {
4654     case DM_REMOVE_ALL:
4655     case DM_LIST_DEVICES:
4656     case DM_DEV_CREATE:
4657     case DM_DEV_REMOVE:
4658     case DM_DEV_SUSPEND:
4659     case DM_DEV_STATUS:
4660     case DM_DEV_WAIT:
4661     case DM_TABLE_STATUS:
4662     case DM_TABLE_CLEAR:
4663     case DM_TABLE_DEPS:
4664     case DM_LIST_VERSIONS:
4665         /* no input data */
4666         break;
4667     case DM_DEV_RENAME:
4668     case DM_DEV_SET_GEOMETRY:
4669         /* data contains only strings */
4670         memcpy(host_data, argptr, guest_data_size);
4671         break;
4672     case DM_TARGET_MSG:
4673         memcpy(host_data, argptr, guest_data_size);
4674         *(uint64_t*)host_data = tswap64(*(uint64_t*)argptr);
4675         break;
4676     case DM_TABLE_LOAD:
4677     {
4678         void *gspec = argptr;
4679         void *cur_data = host_data;
4680         const argtype arg_type[] = { MK_STRUCT(STRUCT_dm_target_spec) };
4681         int spec_size = thunk_type_size(arg_type, 0);
4682         int i;
4683 
4684         for (i = 0; i < host_dm->target_count; i++) {
4685             struct dm_target_spec *spec = cur_data;
4686             uint32_t next;
4687             int slen;
4688 
4689             thunk_convert(spec, gspec, arg_type, THUNK_HOST);
4690             slen = strlen((char*)gspec + spec_size) + 1;
4691             next = spec->next;
4692             spec->next = sizeof(*spec) + slen;
4693             strcpy((char*)&spec[1], gspec + spec_size);
4694             gspec += next;
4695             cur_data += spec->next;
4696         }
4697         break;
4698     }
4699     default:
4700         ret = -TARGET_EINVAL;
4701         unlock_user(argptr, guest_data, 0);
4702         goto out;
4703     }
4704     unlock_user(argptr, guest_data, 0);
4705 
4706     ret = get_errno(safe_ioctl(fd, ie->host_cmd, buf_temp));
4707     if (!is_error(ret)) {
4708         guest_data = arg + host_dm->data_start;
4709         guest_data_size = host_dm->data_size - host_dm->data_start;
4710         argptr = lock_user(VERIFY_WRITE, guest_data, guest_data_size, 0);
4711         switch (ie->host_cmd) {
4712         case DM_REMOVE_ALL:
4713         case DM_DEV_CREATE:
4714         case DM_DEV_REMOVE:
4715         case DM_DEV_RENAME:
4716         case DM_DEV_SUSPEND:
4717         case DM_DEV_STATUS:
4718         case DM_TABLE_LOAD:
4719         case DM_TABLE_CLEAR:
4720         case DM_TARGET_MSG:
4721         case DM_DEV_SET_GEOMETRY:
4722             /* no return data */
4723             break;
4724         case DM_LIST_DEVICES:
4725         {
4726             struct dm_name_list *nl = (void*)host_dm + host_dm->data_start;
4727             uint32_t remaining_data = guest_data_size;
4728             void *cur_data = argptr;
4729             const argtype arg_type[] = { MK_STRUCT(STRUCT_dm_name_list) };
4730             int nl_size = 12; /* can't use thunk_size due to alignment */
4731 
4732             while (1) {
4733                 uint32_t next = nl->next;
4734                 if (next) {
4735                     nl->next = nl_size + (strlen(nl->name) + 1);
4736                 }
4737                 if (remaining_data < nl->next) {
4738                     host_dm->flags |= DM_BUFFER_FULL_FLAG;
4739                     break;
4740                 }
4741                 thunk_convert(cur_data, nl, arg_type, THUNK_TARGET);
4742                 strcpy(cur_data + nl_size, nl->name);
4743                 cur_data += nl->next;
4744                 remaining_data -= nl->next;
4745                 if (!next) {
4746                     break;
4747                 }
4748                 nl = (void*)nl + next;
4749             }
4750             break;
4751         }
4752         case DM_DEV_WAIT:
4753         case DM_TABLE_STATUS:
4754         {
4755             struct dm_target_spec *spec = (void*)host_dm + host_dm->data_start;
4756             void *cur_data = argptr;
4757             const argtype arg_type[] = { MK_STRUCT(STRUCT_dm_target_spec) };
4758             int spec_size = thunk_type_size(arg_type, 0);
4759             int i;
4760 
4761             for (i = 0; i < host_dm->target_count; i++) {
4762                 uint32_t next = spec->next;
4763                 int slen = strlen((char*)&spec[1]) + 1;
4764                 spec->next = (cur_data - argptr) + spec_size + slen;
4765                 if (guest_data_size < spec->next) {
4766                     host_dm->flags |= DM_BUFFER_FULL_FLAG;
4767                     break;
4768                 }
4769                 thunk_convert(cur_data, spec, arg_type, THUNK_TARGET);
4770                 strcpy(cur_data + spec_size, (char*)&spec[1]);
4771                 cur_data = argptr + spec->next;
4772                 spec = (void*)host_dm + host_dm->data_start + next;
4773             }
4774             break;
4775         }
4776         case DM_TABLE_DEPS:
4777         {
4778             void *hdata = (void*)host_dm + host_dm->data_start;
4779             int count = *(uint32_t*)hdata;
4780             uint64_t *hdev = hdata + 8;
4781             uint64_t *gdev = argptr + 8;
4782             int i;
4783 
4784             *(uint32_t*)argptr = tswap32(count);
4785             for (i = 0; i < count; i++) {
4786                 *gdev = tswap64(*hdev);
4787                 gdev++;
4788                 hdev++;
4789             }
4790             break;
4791         }
4792         case DM_LIST_VERSIONS:
4793         {
4794             struct dm_target_versions *vers = (void*)host_dm + host_dm->data_start;
4795             uint32_t remaining_data = guest_data_size;
4796             void *cur_data = argptr;
4797             const argtype arg_type[] = { MK_STRUCT(STRUCT_dm_target_versions) };
4798             int vers_size = thunk_type_size(arg_type, 0);
4799 
4800             while (1) {
4801                 uint32_t next = vers->next;
4802                 if (next) {
4803                     vers->next = vers_size + (strlen(vers->name) + 1);
4804                 }
4805                 if (remaining_data < vers->next) {
4806                     host_dm->flags |= DM_BUFFER_FULL_FLAG;
4807                     break;
4808                 }
4809                 thunk_convert(cur_data, vers, arg_type, THUNK_TARGET);
4810                 strcpy(cur_data + vers_size, vers->name);
4811                 cur_data += vers->next;
4812                 remaining_data -= vers->next;
4813                 if (!next) {
4814                     break;
4815                 }
4816                 vers = (void*)vers + next;
4817             }
4818             break;
4819         }
4820         default:
4821             unlock_user(argptr, guest_data, 0);
4822             ret = -TARGET_EINVAL;
4823             goto out;
4824         }
4825         unlock_user(argptr, guest_data, guest_data_size);
4826 
4827         argptr = lock_user(VERIFY_WRITE, arg, target_size, 0);
4828         if (!argptr) {
4829             ret = -TARGET_EFAULT;
4830             goto out;
4831         }
4832         thunk_convert(argptr, buf_temp, arg_type, THUNK_TARGET);
4833         unlock_user(argptr, arg, target_size);
4834     }
4835 out:
4836     g_free(big_buf);
4837     return ret;
4838 }
4839 
4840 static abi_long do_ioctl_blkpg(const IOCTLEntry *ie, uint8_t *buf_temp, int fd,
4841                                int cmd, abi_long arg)
4842 {
4843     void *argptr;
4844     int target_size;
4845     const argtype *arg_type = ie->arg_type;
4846     const argtype part_arg_type[] = { MK_STRUCT(STRUCT_blkpg_partition) };
4847     abi_long ret;
4848 
4849     struct blkpg_ioctl_arg *host_blkpg = (void*)buf_temp;
4850     struct blkpg_partition host_part;
4851 
4852     /* Read and convert blkpg */
4853     arg_type++;
4854     target_size = thunk_type_size(arg_type, 0);
4855     argptr = lock_user(VERIFY_READ, arg, target_size, 1);
4856     if (!argptr) {
4857         ret = -TARGET_EFAULT;
4858         goto out;
4859     }
4860     thunk_convert(buf_temp, argptr, arg_type, THUNK_HOST);
4861     unlock_user(argptr, arg, 0);
4862 
4863     switch (host_blkpg->op) {
4864     case BLKPG_ADD_PARTITION:
4865     case BLKPG_DEL_PARTITION:
4866         /* payload is struct blkpg_partition */
4867         break;
4868     default:
4869         /* Unknown opcode */
4870         ret = -TARGET_EINVAL;
4871         goto out;
4872     }
4873 
4874     /* Read and convert blkpg->data */
4875     arg = (abi_long)(uintptr_t)host_blkpg->data;
4876     target_size = thunk_type_size(part_arg_type, 0);
4877     argptr = lock_user(VERIFY_READ, arg, target_size, 1);
4878     if (!argptr) {
4879         ret = -TARGET_EFAULT;
4880         goto out;
4881     }
4882     thunk_convert(&host_part, argptr, part_arg_type, THUNK_HOST);
4883     unlock_user(argptr, arg, 0);
4884 
4885     /* Swizzle the data pointer to our local copy and call! */
4886     host_blkpg->data = &host_part;
4887     ret = get_errno(safe_ioctl(fd, ie->host_cmd, host_blkpg));
4888 
4889 out:
4890     return ret;
4891 }
4892 
4893 static abi_long do_ioctl_rt(const IOCTLEntry *ie, uint8_t *buf_temp,
4894                                 int fd, int cmd, abi_long arg)
4895 {
4896     const argtype *arg_type = ie->arg_type;
4897     const StructEntry *se;
4898     const argtype *field_types;
4899     const int *dst_offsets, *src_offsets;
4900     int target_size;
4901     void *argptr;
4902     abi_ulong *target_rt_dev_ptr = NULL;
4903     unsigned long *host_rt_dev_ptr = NULL;
4904     abi_long ret;
4905     int i;
4906 
4907     assert(ie->access == IOC_W);
4908     assert(*arg_type == TYPE_PTR);
4909     arg_type++;
4910     assert(*arg_type == TYPE_STRUCT);
4911     target_size = thunk_type_size(arg_type, 0);
4912     argptr = lock_user(VERIFY_READ, arg, target_size, 1);
4913     if (!argptr) {
4914         return -TARGET_EFAULT;
4915     }
4916     arg_type++;
4917     assert(*arg_type == (int)STRUCT_rtentry);
4918     se = struct_entries + *arg_type++;
4919     assert(se->convert[0] == NULL);
4920     /* convert struct here to be able to catch rt_dev string */
4921     field_types = se->field_types;
4922     dst_offsets = se->field_offsets[THUNK_HOST];
4923     src_offsets = se->field_offsets[THUNK_TARGET];
4924     for (i = 0; i < se->nb_fields; i++) {
4925         if (dst_offsets[i] == offsetof(struct rtentry, rt_dev)) {
4926             assert(*field_types == TYPE_PTRVOID);
4927             target_rt_dev_ptr = (abi_ulong *)(argptr + src_offsets[i]);
4928             host_rt_dev_ptr = (unsigned long *)(buf_temp + dst_offsets[i]);
4929             if (*target_rt_dev_ptr != 0) {
4930                 *host_rt_dev_ptr = (unsigned long)lock_user_string(
4931                                                   tswapal(*target_rt_dev_ptr));
4932                 if (!*host_rt_dev_ptr) {
4933                     unlock_user(argptr, arg, 0);
4934                     return -TARGET_EFAULT;
4935                 }
4936             } else {
4937                 *host_rt_dev_ptr = 0;
4938             }
4939             field_types++;
4940             continue;
4941         }
4942         field_types = thunk_convert(buf_temp + dst_offsets[i],
4943                                     argptr + src_offsets[i],
4944                                     field_types, THUNK_HOST);
4945     }
4946     unlock_user(argptr, arg, 0);
4947 
4948     ret = get_errno(safe_ioctl(fd, ie->host_cmd, buf_temp));
4949 
4950     assert(host_rt_dev_ptr != NULL);
4951     assert(target_rt_dev_ptr != NULL);
4952     if (*host_rt_dev_ptr != 0) {
4953         unlock_user((void *)*host_rt_dev_ptr,
4954                     *target_rt_dev_ptr, 0);
4955     }
4956     return ret;
4957 }
4958 
4959 static abi_long do_ioctl_kdsigaccept(const IOCTLEntry *ie, uint8_t *buf_temp,
4960                                      int fd, int cmd, abi_long arg)
4961 {
4962     int sig = target_to_host_signal(arg);
4963     return get_errno(safe_ioctl(fd, ie->host_cmd, sig));
4964 }
4965 
4966 static abi_long do_ioctl_SIOCGSTAMP(const IOCTLEntry *ie, uint8_t *buf_temp,
4967                                     int fd, int cmd, abi_long arg)
4968 {
4969     struct timeval tv;
4970     abi_long ret;
4971 
4972     ret = get_errno(safe_ioctl(fd, SIOCGSTAMP, &tv));
4973     if (is_error(ret)) {
4974         return ret;
4975     }
4976 
4977     if (cmd == (int)TARGET_SIOCGSTAMP_OLD) {
4978         if (copy_to_user_timeval(arg, &tv)) {
4979             return -TARGET_EFAULT;
4980         }
4981     } else {
4982         if (copy_to_user_timeval64(arg, &tv)) {
4983             return -TARGET_EFAULT;
4984         }
4985     }
4986 
4987     return ret;
4988 }
4989 
4990 static abi_long do_ioctl_SIOCGSTAMPNS(const IOCTLEntry *ie, uint8_t *buf_temp,
4991                                       int fd, int cmd, abi_long arg)
4992 {
4993     struct timespec ts;
4994     abi_long ret;
4995 
4996     ret = get_errno(safe_ioctl(fd, SIOCGSTAMPNS, &ts));
4997     if (is_error(ret)) {
4998         return ret;
4999     }
5000 
5001     if (cmd == (int)TARGET_SIOCGSTAMPNS_OLD) {
5002         if (host_to_target_timespec(arg, &ts)) {
5003             return -TARGET_EFAULT;
5004         }
5005     } else{
5006         if (host_to_target_timespec64(arg, &ts)) {
5007             return -TARGET_EFAULT;
5008         }
5009     }
5010 
5011     return ret;
5012 }
5013 
5014 #ifdef TIOCGPTPEER
5015 static abi_long do_ioctl_tiocgptpeer(const IOCTLEntry *ie, uint8_t *buf_temp,
5016                                      int fd, int cmd, abi_long arg)
5017 {
5018     int flags = target_to_host_bitmask(arg, fcntl_flags_tbl);
5019     return get_errno(safe_ioctl(fd, ie->host_cmd, flags));
5020 }
5021 #endif
5022 
5023 static IOCTLEntry ioctl_entries[] = {
5024 #define IOCTL(cmd, access, ...) \
5025     { TARGET_ ## cmd, cmd, #cmd, access, 0, {  __VA_ARGS__ } },
5026 #define IOCTL_SPECIAL(cmd, access, dofn, ...)                      \
5027     { TARGET_ ## cmd, cmd, #cmd, access, dofn, {  __VA_ARGS__ } },
5028 #define IOCTL_IGNORE(cmd) \
5029     { TARGET_ ## cmd, 0, #cmd },
5030 #include "ioctls.h"
5031     { 0, 0, },
5032 };
5033 
5034 /* ??? Implement proper locking for ioctls.  */
5035 /* do_ioctl() Must return target values and target errnos. */
5036 static abi_long do_ioctl(int fd, int cmd, abi_long arg)
5037 {
5038     const IOCTLEntry *ie;
5039     const argtype *arg_type;
5040     abi_long ret;
5041     uint8_t buf_temp[MAX_STRUCT_SIZE];
5042     int target_size;
5043     void *argptr;
5044 
5045     ie = ioctl_entries;
5046     for(;;) {
5047         if (ie->target_cmd == 0) {
5048             gemu_log("Unsupported ioctl: cmd=0x%04lx\n", (long)cmd);
5049             return -TARGET_ENOSYS;
5050         }
5051         if (ie->target_cmd == cmd)
5052             break;
5053         ie++;
5054     }
5055     arg_type = ie->arg_type;
5056     if (ie->do_ioctl) {
5057         return ie->do_ioctl(ie, buf_temp, fd, cmd, arg);
5058     } else if (!ie->host_cmd) {
5059         /* Some architectures define BSD ioctls in their headers
5060            that are not implemented in Linux.  */
5061         return -TARGET_ENOSYS;
5062     }
5063 
5064     switch(arg_type[0]) {
5065     case TYPE_NULL:
5066         /* no argument */
5067         ret = get_errno(safe_ioctl(fd, ie->host_cmd));
5068         break;
5069     case TYPE_PTRVOID:
5070     case TYPE_INT:
5071         ret = get_errno(safe_ioctl(fd, ie->host_cmd, arg));
5072         break;
5073     case TYPE_PTR:
5074         arg_type++;
5075         target_size = thunk_type_size(arg_type, 0);
5076         switch(ie->access) {
5077         case IOC_R:
5078             ret = get_errno(safe_ioctl(fd, ie->host_cmd, buf_temp));
5079             if (!is_error(ret)) {
5080                 argptr = lock_user(VERIFY_WRITE, arg, target_size, 0);
5081                 if (!argptr)
5082                     return -TARGET_EFAULT;
5083                 thunk_convert(argptr, buf_temp, arg_type, THUNK_TARGET);
5084                 unlock_user(argptr, arg, target_size);
5085             }
5086             break;
5087         case IOC_W:
5088             argptr = lock_user(VERIFY_READ, arg, target_size, 1);
5089             if (!argptr)
5090                 return -TARGET_EFAULT;
5091             thunk_convert(buf_temp, argptr, arg_type, THUNK_HOST);
5092             unlock_user(argptr, arg, 0);
5093             ret = get_errno(safe_ioctl(fd, ie->host_cmd, buf_temp));
5094             break;
5095         default:
5096         case IOC_RW:
5097             argptr = lock_user(VERIFY_READ, arg, target_size, 1);
5098             if (!argptr)
5099                 return -TARGET_EFAULT;
5100             thunk_convert(buf_temp, argptr, arg_type, THUNK_HOST);
5101             unlock_user(argptr, arg, 0);
5102             ret = get_errno(safe_ioctl(fd, ie->host_cmd, buf_temp));
5103             if (!is_error(ret)) {
5104                 argptr = lock_user(VERIFY_WRITE, arg, target_size, 0);
5105                 if (!argptr)
5106                     return -TARGET_EFAULT;
5107                 thunk_convert(argptr, buf_temp, arg_type, THUNK_TARGET);
5108                 unlock_user(argptr, arg, target_size);
5109             }
5110             break;
5111         }
5112         break;
5113     default:
5114         gemu_log("Unsupported ioctl type: cmd=0x%04lx type=%d\n",
5115                  (long)cmd, arg_type[0]);
5116         ret = -TARGET_ENOSYS;
5117         break;
5118     }
5119     return ret;
5120 }
5121 
5122 static const bitmask_transtbl iflag_tbl[] = {
5123         { TARGET_IGNBRK, TARGET_IGNBRK, IGNBRK, IGNBRK },
5124         { TARGET_BRKINT, TARGET_BRKINT, BRKINT, BRKINT },
5125         { TARGET_IGNPAR, TARGET_IGNPAR, IGNPAR, IGNPAR },
5126         { TARGET_PARMRK, TARGET_PARMRK, PARMRK, PARMRK },
5127         { TARGET_INPCK, TARGET_INPCK, INPCK, INPCK },
5128         { TARGET_ISTRIP, TARGET_ISTRIP, ISTRIP, ISTRIP },
5129         { TARGET_INLCR, TARGET_INLCR, INLCR, INLCR },
5130         { TARGET_IGNCR, TARGET_IGNCR, IGNCR, IGNCR },
5131         { TARGET_ICRNL, TARGET_ICRNL, ICRNL, ICRNL },
5132         { TARGET_IUCLC, TARGET_IUCLC, IUCLC, IUCLC },
5133         { TARGET_IXON, TARGET_IXON, IXON, IXON },
5134         { TARGET_IXANY, TARGET_IXANY, IXANY, IXANY },
5135         { TARGET_IXOFF, TARGET_IXOFF, IXOFF, IXOFF },
5136         { TARGET_IMAXBEL, TARGET_IMAXBEL, IMAXBEL, IMAXBEL },
5137         { 0, 0, 0, 0 }
5138 };
5139 
5140 static const bitmask_transtbl oflag_tbl[] = {
5141 	{ TARGET_OPOST, TARGET_OPOST, OPOST, OPOST },
5142 	{ TARGET_OLCUC, TARGET_OLCUC, OLCUC, OLCUC },
5143 	{ TARGET_ONLCR, TARGET_ONLCR, ONLCR, ONLCR },
5144 	{ TARGET_OCRNL, TARGET_OCRNL, OCRNL, OCRNL },
5145 	{ TARGET_ONOCR, TARGET_ONOCR, ONOCR, ONOCR },
5146 	{ TARGET_ONLRET, TARGET_ONLRET, ONLRET, ONLRET },
5147 	{ TARGET_OFILL, TARGET_OFILL, OFILL, OFILL },
5148 	{ TARGET_OFDEL, TARGET_OFDEL, OFDEL, OFDEL },
5149 	{ TARGET_NLDLY, TARGET_NL0, NLDLY, NL0 },
5150 	{ TARGET_NLDLY, TARGET_NL1, NLDLY, NL1 },
5151 	{ TARGET_CRDLY, TARGET_CR0, CRDLY, CR0 },
5152 	{ TARGET_CRDLY, TARGET_CR1, CRDLY, CR1 },
5153 	{ TARGET_CRDLY, TARGET_CR2, CRDLY, CR2 },
5154 	{ TARGET_CRDLY, TARGET_CR3, CRDLY, CR3 },
5155 	{ TARGET_TABDLY, TARGET_TAB0, TABDLY, TAB0 },
5156 	{ TARGET_TABDLY, TARGET_TAB1, TABDLY, TAB1 },
5157 	{ TARGET_TABDLY, TARGET_TAB2, TABDLY, TAB2 },
5158 	{ TARGET_TABDLY, TARGET_TAB3, TABDLY, TAB3 },
5159 	{ TARGET_BSDLY, TARGET_BS0, BSDLY, BS0 },
5160 	{ TARGET_BSDLY, TARGET_BS1, BSDLY, BS1 },
5161 	{ TARGET_VTDLY, TARGET_VT0, VTDLY, VT0 },
5162 	{ TARGET_VTDLY, TARGET_VT1, VTDLY, VT1 },
5163 	{ TARGET_FFDLY, TARGET_FF0, FFDLY, FF0 },
5164 	{ TARGET_FFDLY, TARGET_FF1, FFDLY, FF1 },
5165 	{ 0, 0, 0, 0 }
5166 };
5167 
5168 static const bitmask_transtbl cflag_tbl[] = {
5169 	{ TARGET_CBAUD, TARGET_B0, CBAUD, B0 },
5170 	{ TARGET_CBAUD, TARGET_B50, CBAUD, B50 },
5171 	{ TARGET_CBAUD, TARGET_B75, CBAUD, B75 },
5172 	{ TARGET_CBAUD, TARGET_B110, CBAUD, B110 },
5173 	{ TARGET_CBAUD, TARGET_B134, CBAUD, B134 },
5174 	{ TARGET_CBAUD, TARGET_B150, CBAUD, B150 },
5175 	{ TARGET_CBAUD, TARGET_B200, CBAUD, B200 },
5176 	{ TARGET_CBAUD, TARGET_B300, CBAUD, B300 },
5177 	{ TARGET_CBAUD, TARGET_B600, CBAUD, B600 },
5178 	{ TARGET_CBAUD, TARGET_B1200, CBAUD, B1200 },
5179 	{ TARGET_CBAUD, TARGET_B1800, CBAUD, B1800 },
5180 	{ TARGET_CBAUD, TARGET_B2400, CBAUD, B2400 },
5181 	{ TARGET_CBAUD, TARGET_B4800, CBAUD, B4800 },
5182 	{ TARGET_CBAUD, TARGET_B9600, CBAUD, B9600 },
5183 	{ TARGET_CBAUD, TARGET_B19200, CBAUD, B19200 },
5184 	{ TARGET_CBAUD, TARGET_B38400, CBAUD, B38400 },
5185 	{ TARGET_CBAUD, TARGET_B57600, CBAUD, B57600 },
5186 	{ TARGET_CBAUD, TARGET_B115200, CBAUD, B115200 },
5187 	{ TARGET_CBAUD, TARGET_B230400, CBAUD, B230400 },
5188 	{ TARGET_CBAUD, TARGET_B460800, CBAUD, B460800 },
5189 	{ TARGET_CSIZE, TARGET_CS5, CSIZE, CS5 },
5190 	{ TARGET_CSIZE, TARGET_CS6, CSIZE, CS6 },
5191 	{ TARGET_CSIZE, TARGET_CS7, CSIZE, CS7 },
5192 	{ TARGET_CSIZE, TARGET_CS8, CSIZE, CS8 },
5193 	{ TARGET_CSTOPB, TARGET_CSTOPB, CSTOPB, CSTOPB },
5194 	{ TARGET_CREAD, TARGET_CREAD, CREAD, CREAD },
5195 	{ TARGET_PARENB, TARGET_PARENB, PARENB, PARENB },
5196 	{ TARGET_PARODD, TARGET_PARODD, PARODD, PARODD },
5197 	{ TARGET_HUPCL, TARGET_HUPCL, HUPCL, HUPCL },
5198 	{ TARGET_CLOCAL, TARGET_CLOCAL, CLOCAL, CLOCAL },
5199 	{ TARGET_CRTSCTS, TARGET_CRTSCTS, CRTSCTS, CRTSCTS },
5200 	{ 0, 0, 0, 0 }
5201 };
5202 
5203 static const bitmask_transtbl lflag_tbl[] = {
5204 	{ TARGET_ISIG, TARGET_ISIG, ISIG, ISIG },
5205 	{ TARGET_ICANON, TARGET_ICANON, ICANON, ICANON },
5206 	{ TARGET_XCASE, TARGET_XCASE, XCASE, XCASE },
5207 	{ TARGET_ECHO, TARGET_ECHO, ECHO, ECHO },
5208 	{ TARGET_ECHOE, TARGET_ECHOE, ECHOE, ECHOE },
5209 	{ TARGET_ECHOK, TARGET_ECHOK, ECHOK, ECHOK },
5210 	{ TARGET_ECHONL, TARGET_ECHONL, ECHONL, ECHONL },
5211 	{ TARGET_NOFLSH, TARGET_NOFLSH, NOFLSH, NOFLSH },
5212 	{ TARGET_TOSTOP, TARGET_TOSTOP, TOSTOP, TOSTOP },
5213 	{ TARGET_ECHOCTL, TARGET_ECHOCTL, ECHOCTL, ECHOCTL },
5214 	{ TARGET_ECHOPRT, TARGET_ECHOPRT, ECHOPRT, ECHOPRT },
5215 	{ TARGET_ECHOKE, TARGET_ECHOKE, ECHOKE, ECHOKE },
5216 	{ TARGET_FLUSHO, TARGET_FLUSHO, FLUSHO, FLUSHO },
5217 	{ TARGET_PENDIN, TARGET_PENDIN, PENDIN, PENDIN },
5218 	{ TARGET_IEXTEN, TARGET_IEXTEN, IEXTEN, IEXTEN },
5219 	{ 0, 0, 0, 0 }
5220 };
5221 
5222 static void target_to_host_termios (void *dst, const void *src)
5223 {
5224     struct host_termios *host = dst;
5225     const struct target_termios *target = src;
5226 
5227     host->c_iflag =
5228         target_to_host_bitmask(tswap32(target->c_iflag), iflag_tbl);
5229     host->c_oflag =
5230         target_to_host_bitmask(tswap32(target->c_oflag), oflag_tbl);
5231     host->c_cflag =
5232         target_to_host_bitmask(tswap32(target->c_cflag), cflag_tbl);
5233     host->c_lflag =
5234         target_to_host_bitmask(tswap32(target->c_lflag), lflag_tbl);
5235     host->c_line = target->c_line;
5236 
5237     memset(host->c_cc, 0, sizeof(host->c_cc));
5238     host->c_cc[VINTR] = target->c_cc[TARGET_VINTR];
5239     host->c_cc[VQUIT] = target->c_cc[TARGET_VQUIT];
5240     host->c_cc[VERASE] = target->c_cc[TARGET_VERASE];
5241     host->c_cc[VKILL] = target->c_cc[TARGET_VKILL];
5242     host->c_cc[VEOF] = target->c_cc[TARGET_VEOF];
5243     host->c_cc[VTIME] = target->c_cc[TARGET_VTIME];
5244     host->c_cc[VMIN] = target->c_cc[TARGET_VMIN];
5245     host->c_cc[VSWTC] = target->c_cc[TARGET_VSWTC];
5246     host->c_cc[VSTART] = target->c_cc[TARGET_VSTART];
5247     host->c_cc[VSTOP] = target->c_cc[TARGET_VSTOP];
5248     host->c_cc[VSUSP] = target->c_cc[TARGET_VSUSP];
5249     host->c_cc[VEOL] = target->c_cc[TARGET_VEOL];
5250     host->c_cc[VREPRINT] = target->c_cc[TARGET_VREPRINT];
5251     host->c_cc[VDISCARD] = target->c_cc[TARGET_VDISCARD];
5252     host->c_cc[VWERASE] = target->c_cc[TARGET_VWERASE];
5253     host->c_cc[VLNEXT] = target->c_cc[TARGET_VLNEXT];
5254     host->c_cc[VEOL2] = target->c_cc[TARGET_VEOL2];
5255 }
5256 
5257 static void host_to_target_termios (void *dst, const void *src)
5258 {
5259     struct target_termios *target = dst;
5260     const struct host_termios *host = src;
5261 
5262     target->c_iflag =
5263         tswap32(host_to_target_bitmask(host->c_iflag, iflag_tbl));
5264     target->c_oflag =
5265         tswap32(host_to_target_bitmask(host->c_oflag, oflag_tbl));
5266     target->c_cflag =
5267         tswap32(host_to_target_bitmask(host->c_cflag, cflag_tbl));
5268     target->c_lflag =
5269         tswap32(host_to_target_bitmask(host->c_lflag, lflag_tbl));
5270     target->c_line = host->c_line;
5271 
5272     memset(target->c_cc, 0, sizeof(target->c_cc));
5273     target->c_cc[TARGET_VINTR] = host->c_cc[VINTR];
5274     target->c_cc[TARGET_VQUIT] = host->c_cc[VQUIT];
5275     target->c_cc[TARGET_VERASE] = host->c_cc[VERASE];
5276     target->c_cc[TARGET_VKILL] = host->c_cc[VKILL];
5277     target->c_cc[TARGET_VEOF] = host->c_cc[VEOF];
5278     target->c_cc[TARGET_VTIME] = host->c_cc[VTIME];
5279     target->c_cc[TARGET_VMIN] = host->c_cc[VMIN];
5280     target->c_cc[TARGET_VSWTC] = host->c_cc[VSWTC];
5281     target->c_cc[TARGET_VSTART] = host->c_cc[VSTART];
5282     target->c_cc[TARGET_VSTOP] = host->c_cc[VSTOP];
5283     target->c_cc[TARGET_VSUSP] = host->c_cc[VSUSP];
5284     target->c_cc[TARGET_VEOL] = host->c_cc[VEOL];
5285     target->c_cc[TARGET_VREPRINT] = host->c_cc[VREPRINT];
5286     target->c_cc[TARGET_VDISCARD] = host->c_cc[VDISCARD];
5287     target->c_cc[TARGET_VWERASE] = host->c_cc[VWERASE];
5288     target->c_cc[TARGET_VLNEXT] = host->c_cc[VLNEXT];
5289     target->c_cc[TARGET_VEOL2] = host->c_cc[VEOL2];
5290 }
5291 
5292 static const StructEntry struct_termios_def = {
5293     .convert = { host_to_target_termios, target_to_host_termios },
5294     .size = { sizeof(struct target_termios), sizeof(struct host_termios) },
5295     .align = { __alignof__(struct target_termios), __alignof__(struct host_termios) },
5296 };
5297 
5298 static bitmask_transtbl mmap_flags_tbl[] = {
5299     { TARGET_MAP_SHARED, TARGET_MAP_SHARED, MAP_SHARED, MAP_SHARED },
5300     { TARGET_MAP_PRIVATE, TARGET_MAP_PRIVATE, MAP_PRIVATE, MAP_PRIVATE },
5301     { TARGET_MAP_FIXED, TARGET_MAP_FIXED, MAP_FIXED, MAP_FIXED },
5302     { TARGET_MAP_ANONYMOUS, TARGET_MAP_ANONYMOUS,
5303       MAP_ANONYMOUS, MAP_ANONYMOUS },
5304     { TARGET_MAP_GROWSDOWN, TARGET_MAP_GROWSDOWN,
5305       MAP_GROWSDOWN, MAP_GROWSDOWN },
5306     { TARGET_MAP_DENYWRITE, TARGET_MAP_DENYWRITE,
5307       MAP_DENYWRITE, MAP_DENYWRITE },
5308     { TARGET_MAP_EXECUTABLE, TARGET_MAP_EXECUTABLE,
5309       MAP_EXECUTABLE, MAP_EXECUTABLE },
5310     { TARGET_MAP_LOCKED, TARGET_MAP_LOCKED, MAP_LOCKED, MAP_LOCKED },
5311     { TARGET_MAP_NORESERVE, TARGET_MAP_NORESERVE,
5312       MAP_NORESERVE, MAP_NORESERVE },
5313     { TARGET_MAP_HUGETLB, TARGET_MAP_HUGETLB, MAP_HUGETLB, MAP_HUGETLB },
5314     /* MAP_STACK had been ignored by the kernel for quite some time.
5315        Recognize it for the target insofar as we do not want to pass
5316        it through to the host.  */
5317     { TARGET_MAP_STACK, TARGET_MAP_STACK, 0, 0 },
5318     { 0, 0, 0, 0 }
5319 };
5320 
5321 #if defined(TARGET_I386)
5322 
5323 /* NOTE: there is really one LDT for all the threads */
5324 static uint8_t *ldt_table;
5325 
5326 static abi_long read_ldt(abi_ulong ptr, unsigned long bytecount)
5327 {
5328     int size;
5329     void *p;
5330 
5331     if (!ldt_table)
5332         return 0;
5333     size = TARGET_LDT_ENTRIES * TARGET_LDT_ENTRY_SIZE;
5334     if (size > bytecount)
5335         size = bytecount;
5336     p = lock_user(VERIFY_WRITE, ptr, size, 0);
5337     if (!p)
5338         return -TARGET_EFAULT;
5339     /* ??? Should this by byteswapped?  */
5340     memcpy(p, ldt_table, size);
5341     unlock_user(p, ptr, size);
5342     return size;
5343 }
5344 
5345 /* XXX: add locking support */
5346 static abi_long write_ldt(CPUX86State *env,
5347                           abi_ulong ptr, unsigned long bytecount, int oldmode)
5348 {
5349     struct target_modify_ldt_ldt_s ldt_info;
5350     struct target_modify_ldt_ldt_s *target_ldt_info;
5351     int seg_32bit, contents, read_exec_only, limit_in_pages;
5352     int seg_not_present, useable, lm;
5353     uint32_t *lp, entry_1, entry_2;
5354 
5355     if (bytecount != sizeof(ldt_info))
5356         return -TARGET_EINVAL;
5357     if (!lock_user_struct(VERIFY_READ, target_ldt_info, ptr, 1))
5358         return -TARGET_EFAULT;
5359     ldt_info.entry_number = tswap32(target_ldt_info->entry_number);
5360     ldt_info.base_addr = tswapal(target_ldt_info->base_addr);
5361     ldt_info.limit = tswap32(target_ldt_info->limit);
5362     ldt_info.flags = tswap32(target_ldt_info->flags);
5363     unlock_user_struct(target_ldt_info, ptr, 0);
5364 
5365     if (ldt_info.entry_number >= TARGET_LDT_ENTRIES)
5366         return -TARGET_EINVAL;
5367     seg_32bit = ldt_info.flags & 1;
5368     contents = (ldt_info.flags >> 1) & 3;
5369     read_exec_only = (ldt_info.flags >> 3) & 1;
5370     limit_in_pages = (ldt_info.flags >> 4) & 1;
5371     seg_not_present = (ldt_info.flags >> 5) & 1;
5372     useable = (ldt_info.flags >> 6) & 1;
5373 #ifdef TARGET_ABI32
5374     lm = 0;
5375 #else
5376     lm = (ldt_info.flags >> 7) & 1;
5377 #endif
5378     if (contents == 3) {
5379         if (oldmode)
5380             return -TARGET_EINVAL;
5381         if (seg_not_present == 0)
5382             return -TARGET_EINVAL;
5383     }
5384     /* allocate the LDT */
5385     if (!ldt_table) {
5386         env->ldt.base = target_mmap(0,
5387                                     TARGET_LDT_ENTRIES * TARGET_LDT_ENTRY_SIZE,
5388                                     PROT_READ|PROT_WRITE,
5389                                     MAP_ANONYMOUS|MAP_PRIVATE, -1, 0);
5390         if (env->ldt.base == -1)
5391             return -TARGET_ENOMEM;
5392         memset(g2h(env->ldt.base), 0,
5393                TARGET_LDT_ENTRIES * TARGET_LDT_ENTRY_SIZE);
5394         env->ldt.limit = 0xffff;
5395         ldt_table = g2h(env->ldt.base);
5396     }
5397 
5398     /* NOTE: same code as Linux kernel */
5399     /* Allow LDTs to be cleared by the user. */
5400     if (ldt_info.base_addr == 0 && ldt_info.limit == 0) {
5401         if (oldmode ||
5402             (contents == 0		&&
5403              read_exec_only == 1	&&
5404              seg_32bit == 0		&&
5405              limit_in_pages == 0	&&
5406              seg_not_present == 1	&&
5407              useable == 0 )) {
5408             entry_1 = 0;
5409             entry_2 = 0;
5410             goto install;
5411         }
5412     }
5413 
5414     entry_1 = ((ldt_info.base_addr & 0x0000ffff) << 16) |
5415         (ldt_info.limit & 0x0ffff);
5416     entry_2 = (ldt_info.base_addr & 0xff000000) |
5417         ((ldt_info.base_addr & 0x00ff0000) >> 16) |
5418         (ldt_info.limit & 0xf0000) |
5419         ((read_exec_only ^ 1) << 9) |
5420         (contents << 10) |
5421         ((seg_not_present ^ 1) << 15) |
5422         (seg_32bit << 22) |
5423         (limit_in_pages << 23) |
5424         (lm << 21) |
5425         0x7000;
5426     if (!oldmode)
5427         entry_2 |= (useable << 20);
5428 
5429     /* Install the new entry ...  */
5430 install:
5431     lp = (uint32_t *)(ldt_table + (ldt_info.entry_number << 3));
5432     lp[0] = tswap32(entry_1);
5433     lp[1] = tswap32(entry_2);
5434     return 0;
5435 }
5436 
5437 /* specific and weird i386 syscalls */
5438 static abi_long do_modify_ldt(CPUX86State *env, int func, abi_ulong ptr,
5439                               unsigned long bytecount)
5440 {
5441     abi_long ret;
5442 
5443     switch (func) {
5444     case 0:
5445         ret = read_ldt(ptr, bytecount);
5446         break;
5447     case 1:
5448         ret = write_ldt(env, ptr, bytecount, 1);
5449         break;
5450     case 0x11:
5451         ret = write_ldt(env, ptr, bytecount, 0);
5452         break;
5453     default:
5454         ret = -TARGET_ENOSYS;
5455         break;
5456     }
5457     return ret;
5458 }
5459 
5460 #if defined(TARGET_I386) && defined(TARGET_ABI32)
5461 abi_long do_set_thread_area(CPUX86State *env, abi_ulong ptr)
5462 {
5463     uint64_t *gdt_table = g2h(env->gdt.base);
5464     struct target_modify_ldt_ldt_s ldt_info;
5465     struct target_modify_ldt_ldt_s *target_ldt_info;
5466     int seg_32bit, contents, read_exec_only, limit_in_pages;
5467     int seg_not_present, useable, lm;
5468     uint32_t *lp, entry_1, entry_2;
5469     int i;
5470 
5471     lock_user_struct(VERIFY_WRITE, target_ldt_info, ptr, 1);
5472     if (!target_ldt_info)
5473         return -TARGET_EFAULT;
5474     ldt_info.entry_number = tswap32(target_ldt_info->entry_number);
5475     ldt_info.base_addr = tswapal(target_ldt_info->base_addr);
5476     ldt_info.limit = tswap32(target_ldt_info->limit);
5477     ldt_info.flags = tswap32(target_ldt_info->flags);
5478     if (ldt_info.entry_number == -1) {
5479         for (i=TARGET_GDT_ENTRY_TLS_MIN; i<=TARGET_GDT_ENTRY_TLS_MAX; i++) {
5480             if (gdt_table[i] == 0) {
5481                 ldt_info.entry_number = i;
5482                 target_ldt_info->entry_number = tswap32(i);
5483                 break;
5484             }
5485         }
5486     }
5487     unlock_user_struct(target_ldt_info, ptr, 1);
5488 
5489     if (ldt_info.entry_number < TARGET_GDT_ENTRY_TLS_MIN ||
5490         ldt_info.entry_number > TARGET_GDT_ENTRY_TLS_MAX)
5491            return -TARGET_EINVAL;
5492     seg_32bit = ldt_info.flags & 1;
5493     contents = (ldt_info.flags >> 1) & 3;
5494     read_exec_only = (ldt_info.flags >> 3) & 1;
5495     limit_in_pages = (ldt_info.flags >> 4) & 1;
5496     seg_not_present = (ldt_info.flags >> 5) & 1;
5497     useable = (ldt_info.flags >> 6) & 1;
5498 #ifdef TARGET_ABI32
5499     lm = 0;
5500 #else
5501     lm = (ldt_info.flags >> 7) & 1;
5502 #endif
5503 
5504     if (contents == 3) {
5505         if (seg_not_present == 0)
5506             return -TARGET_EINVAL;
5507     }
5508 
5509     /* NOTE: same code as Linux kernel */
5510     /* Allow LDTs to be cleared by the user. */
5511     if (ldt_info.base_addr == 0 && ldt_info.limit == 0) {
5512         if ((contents == 0             &&
5513              read_exec_only == 1       &&
5514              seg_32bit == 0            &&
5515              limit_in_pages == 0       &&
5516              seg_not_present == 1      &&
5517              useable == 0 )) {
5518             entry_1 = 0;
5519             entry_2 = 0;
5520             goto install;
5521         }
5522     }
5523 
5524     entry_1 = ((ldt_info.base_addr & 0x0000ffff) << 16) |
5525         (ldt_info.limit & 0x0ffff);
5526     entry_2 = (ldt_info.base_addr & 0xff000000) |
5527         ((ldt_info.base_addr & 0x00ff0000) >> 16) |
5528         (ldt_info.limit & 0xf0000) |
5529         ((read_exec_only ^ 1) << 9) |
5530         (contents << 10) |
5531         ((seg_not_present ^ 1) << 15) |
5532         (seg_32bit << 22) |
5533         (limit_in_pages << 23) |
5534         (useable << 20) |
5535         (lm << 21) |
5536         0x7000;
5537 
5538     /* Install the new entry ...  */
5539 install:
5540     lp = (uint32_t *)(gdt_table + ldt_info.entry_number);
5541     lp[0] = tswap32(entry_1);
5542     lp[1] = tswap32(entry_2);
5543     return 0;
5544 }
5545 
5546 static abi_long do_get_thread_area(CPUX86State *env, abi_ulong ptr)
5547 {
5548     struct target_modify_ldt_ldt_s *target_ldt_info;
5549     uint64_t *gdt_table = g2h(env->gdt.base);
5550     uint32_t base_addr, limit, flags;
5551     int seg_32bit, contents, read_exec_only, limit_in_pages, idx;
5552     int seg_not_present, useable, lm;
5553     uint32_t *lp, entry_1, entry_2;
5554 
5555     lock_user_struct(VERIFY_WRITE, target_ldt_info, ptr, 1);
5556     if (!target_ldt_info)
5557         return -TARGET_EFAULT;
5558     idx = tswap32(target_ldt_info->entry_number);
5559     if (idx < TARGET_GDT_ENTRY_TLS_MIN ||
5560         idx > TARGET_GDT_ENTRY_TLS_MAX) {
5561         unlock_user_struct(target_ldt_info, ptr, 1);
5562         return -TARGET_EINVAL;
5563     }
5564     lp = (uint32_t *)(gdt_table + idx);
5565     entry_1 = tswap32(lp[0]);
5566     entry_2 = tswap32(lp[1]);
5567 
5568     read_exec_only = ((entry_2 >> 9) & 1) ^ 1;
5569     contents = (entry_2 >> 10) & 3;
5570     seg_not_present = ((entry_2 >> 15) & 1) ^ 1;
5571     seg_32bit = (entry_2 >> 22) & 1;
5572     limit_in_pages = (entry_2 >> 23) & 1;
5573     useable = (entry_2 >> 20) & 1;
5574 #ifdef TARGET_ABI32
5575     lm = 0;
5576 #else
5577     lm = (entry_2 >> 21) & 1;
5578 #endif
5579     flags = (seg_32bit << 0) | (contents << 1) |
5580         (read_exec_only << 3) | (limit_in_pages << 4) |
5581         (seg_not_present << 5) | (useable << 6) | (lm << 7);
5582     limit = (entry_1 & 0xffff) | (entry_2  & 0xf0000);
5583     base_addr = (entry_1 >> 16) |
5584         (entry_2 & 0xff000000) |
5585         ((entry_2 & 0xff) << 16);
5586     target_ldt_info->base_addr = tswapal(base_addr);
5587     target_ldt_info->limit = tswap32(limit);
5588     target_ldt_info->flags = tswap32(flags);
5589     unlock_user_struct(target_ldt_info, ptr, 1);
5590     return 0;
5591 }
5592 #endif /* TARGET_I386 && TARGET_ABI32 */
5593 
5594 #ifndef TARGET_ABI32
5595 abi_long do_arch_prctl(CPUX86State *env, int code, abi_ulong addr)
5596 {
5597     abi_long ret = 0;
5598     abi_ulong val;
5599     int idx;
5600 
5601     switch(code) {
5602     case TARGET_ARCH_SET_GS:
5603     case TARGET_ARCH_SET_FS:
5604         if (code == TARGET_ARCH_SET_GS)
5605             idx = R_GS;
5606         else
5607             idx = R_FS;
5608         cpu_x86_load_seg(env, idx, 0);
5609         env->segs[idx].base = addr;
5610         break;
5611     case TARGET_ARCH_GET_GS:
5612     case TARGET_ARCH_GET_FS:
5613         if (code == TARGET_ARCH_GET_GS)
5614             idx = R_GS;
5615         else
5616             idx = R_FS;
5617         val = env->segs[idx].base;
5618         if (put_user(val, addr, abi_ulong))
5619             ret = -TARGET_EFAULT;
5620         break;
5621     default:
5622         ret = -TARGET_EINVAL;
5623         break;
5624     }
5625     return ret;
5626 }
5627 #endif
5628 
5629 #endif /* defined(TARGET_I386) */
5630 
5631 #define NEW_STACK_SIZE 0x40000
5632 
5633 
5634 static pthread_mutex_t clone_lock = PTHREAD_MUTEX_INITIALIZER;
5635 typedef struct {
5636     CPUArchState *env;
5637     pthread_mutex_t mutex;
5638     pthread_cond_t cond;
5639     pthread_t thread;
5640     uint32_t tid;
5641     abi_ulong child_tidptr;
5642     abi_ulong parent_tidptr;
5643     sigset_t sigmask;
5644 } new_thread_info;
5645 
5646 static void *clone_func(void *arg)
5647 {
5648     new_thread_info *info = arg;
5649     CPUArchState *env;
5650     CPUState *cpu;
5651     TaskState *ts;
5652 
5653     rcu_register_thread();
5654     tcg_register_thread();
5655     env = info->env;
5656     cpu = env_cpu(env);
5657     thread_cpu = cpu;
5658     ts = (TaskState *)cpu->opaque;
5659     info->tid = sys_gettid();
5660     task_settid(ts);
5661     if (info->child_tidptr)
5662         put_user_u32(info->tid, info->child_tidptr);
5663     if (info->parent_tidptr)
5664         put_user_u32(info->tid, info->parent_tidptr);
5665     qemu_guest_random_seed_thread_part2(cpu->random_seed);
5666     /* Enable signals.  */
5667     sigprocmask(SIG_SETMASK, &info->sigmask, NULL);
5668     /* Signal to the parent that we're ready.  */
5669     pthread_mutex_lock(&info->mutex);
5670     pthread_cond_broadcast(&info->cond);
5671     pthread_mutex_unlock(&info->mutex);
5672     /* Wait until the parent has finished initializing the tls state.  */
5673     pthread_mutex_lock(&clone_lock);
5674     pthread_mutex_unlock(&clone_lock);
5675     cpu_loop(env);
5676     /* never exits */
5677     return NULL;
5678 }
5679 
5680 /* do_fork() Must return host values and target errnos (unlike most
5681    do_*() functions). */
5682 static int do_fork(CPUArchState *env, unsigned int flags, abi_ulong newsp,
5683                    abi_ulong parent_tidptr, target_ulong newtls,
5684                    abi_ulong child_tidptr)
5685 {
5686     CPUState *cpu = env_cpu(env);
5687     int ret;
5688     TaskState *ts;
5689     CPUState *new_cpu;
5690     CPUArchState *new_env;
5691     sigset_t sigmask;
5692 
5693     flags &= ~CLONE_IGNORED_FLAGS;
5694 
5695     /* Emulate vfork() with fork() */
5696     if (flags & CLONE_VFORK)
5697         flags &= ~(CLONE_VFORK | CLONE_VM);
5698 
5699     if (flags & CLONE_VM) {
5700         TaskState *parent_ts = (TaskState *)cpu->opaque;
5701         new_thread_info info;
5702         pthread_attr_t attr;
5703 
5704         if (((flags & CLONE_THREAD_FLAGS) != CLONE_THREAD_FLAGS) ||
5705             (flags & CLONE_INVALID_THREAD_FLAGS)) {
5706             return -TARGET_EINVAL;
5707         }
5708 
5709         ts = g_new0(TaskState, 1);
5710         init_task_state(ts);
5711 
5712         /* Grab a mutex so that thread setup appears atomic.  */
5713         pthread_mutex_lock(&clone_lock);
5714 
5715         /* we create a new CPU instance. */
5716         new_env = cpu_copy(env);
5717         /* Init regs that differ from the parent.  */
5718         cpu_clone_regs(new_env, newsp);
5719         new_cpu = env_cpu(new_env);
5720         new_cpu->opaque = ts;
5721         ts->bprm = parent_ts->bprm;
5722         ts->info = parent_ts->info;
5723         ts->signal_mask = parent_ts->signal_mask;
5724 
5725         if (flags & CLONE_CHILD_CLEARTID) {
5726             ts->child_tidptr = child_tidptr;
5727         }
5728 
5729         if (flags & CLONE_SETTLS) {
5730             cpu_set_tls (new_env, newtls);
5731         }
5732 
5733         memset(&info, 0, sizeof(info));
5734         pthread_mutex_init(&info.mutex, NULL);
5735         pthread_mutex_lock(&info.mutex);
5736         pthread_cond_init(&info.cond, NULL);
5737         info.env = new_env;
5738         if (flags & CLONE_CHILD_SETTID) {
5739             info.child_tidptr = child_tidptr;
5740         }
5741         if (flags & CLONE_PARENT_SETTID) {
5742             info.parent_tidptr = parent_tidptr;
5743         }
5744 
5745         ret = pthread_attr_init(&attr);
5746         ret = pthread_attr_setstacksize(&attr, NEW_STACK_SIZE);
5747         ret = pthread_attr_setdetachstate(&attr, PTHREAD_CREATE_DETACHED);
5748         /* It is not safe to deliver signals until the child has finished
5749            initializing, so temporarily block all signals.  */
5750         sigfillset(&sigmask);
5751         sigprocmask(SIG_BLOCK, &sigmask, &info.sigmask);
5752         cpu->random_seed = qemu_guest_random_seed_thread_part1();
5753 
5754         /* If this is our first additional thread, we need to ensure we
5755          * generate code for parallel execution and flush old translations.
5756          */
5757         if (!parallel_cpus) {
5758             parallel_cpus = true;
5759             tb_flush(cpu);
5760         }
5761 
5762         ret = pthread_create(&info.thread, &attr, clone_func, &info);
5763         /* TODO: Free new CPU state if thread creation failed.  */
5764 
5765         sigprocmask(SIG_SETMASK, &info.sigmask, NULL);
5766         pthread_attr_destroy(&attr);
5767         if (ret == 0) {
5768             /* Wait for the child to initialize.  */
5769             pthread_cond_wait(&info.cond, &info.mutex);
5770             ret = info.tid;
5771         } else {
5772             ret = -1;
5773         }
5774         pthread_mutex_unlock(&info.mutex);
5775         pthread_cond_destroy(&info.cond);
5776         pthread_mutex_destroy(&info.mutex);
5777         pthread_mutex_unlock(&clone_lock);
5778     } else {
5779         /* if no CLONE_VM, we consider it is a fork */
5780         if (flags & CLONE_INVALID_FORK_FLAGS) {
5781             return -TARGET_EINVAL;
5782         }
5783 
5784         /* We can't support custom termination signals */
5785         if ((flags & CSIGNAL) != TARGET_SIGCHLD) {
5786             return -TARGET_EINVAL;
5787         }
5788 
5789         if (block_signals()) {
5790             return -TARGET_ERESTARTSYS;
5791         }
5792 
5793         fork_start();
5794         ret = fork();
5795         if (ret == 0) {
5796             /* Child Process.  */
5797             cpu_clone_regs(env, newsp);
5798             fork_end(1);
5799             /* There is a race condition here.  The parent process could
5800                theoretically read the TID in the child process before the child
5801                tid is set.  This would require using either ptrace
5802                (not implemented) or having *_tidptr to point at a shared memory
5803                mapping.  We can't repeat the spinlock hack used above because
5804                the child process gets its own copy of the lock.  */
5805             if (flags & CLONE_CHILD_SETTID)
5806                 put_user_u32(sys_gettid(), child_tidptr);
5807             if (flags & CLONE_PARENT_SETTID)
5808                 put_user_u32(sys_gettid(), parent_tidptr);
5809             ts = (TaskState *)cpu->opaque;
5810             if (flags & CLONE_SETTLS)
5811                 cpu_set_tls (env, newtls);
5812             if (flags & CLONE_CHILD_CLEARTID)
5813                 ts->child_tidptr = child_tidptr;
5814         } else {
5815             fork_end(0);
5816         }
5817     }
5818     return ret;
5819 }
5820 
5821 /* warning : doesn't handle linux specific flags... */
5822 static int target_to_host_fcntl_cmd(int cmd)
5823 {
5824     int ret;
5825 
5826     switch(cmd) {
5827     case TARGET_F_DUPFD:
5828     case TARGET_F_GETFD:
5829     case TARGET_F_SETFD:
5830     case TARGET_F_GETFL:
5831     case TARGET_F_SETFL:
5832         ret = cmd;
5833         break;
5834     case TARGET_F_GETLK:
5835         ret = F_GETLK64;
5836         break;
5837     case TARGET_F_SETLK:
5838         ret = F_SETLK64;
5839         break;
5840     case TARGET_F_SETLKW:
5841         ret = F_SETLKW64;
5842         break;
5843     case TARGET_F_GETOWN:
5844         ret = F_GETOWN;
5845         break;
5846     case TARGET_F_SETOWN:
5847         ret = F_SETOWN;
5848         break;
5849     case TARGET_F_GETSIG:
5850         ret = F_GETSIG;
5851         break;
5852     case TARGET_F_SETSIG:
5853         ret = F_SETSIG;
5854         break;
5855 #if TARGET_ABI_BITS == 32
5856     case TARGET_F_GETLK64:
5857         ret = F_GETLK64;
5858         break;
5859     case TARGET_F_SETLK64:
5860         ret = F_SETLK64;
5861         break;
5862     case TARGET_F_SETLKW64:
5863         ret = F_SETLKW64;
5864         break;
5865 #endif
5866     case TARGET_F_SETLEASE:
5867         ret = F_SETLEASE;
5868         break;
5869     case TARGET_F_GETLEASE:
5870         ret = F_GETLEASE;
5871         break;
5872 #ifdef F_DUPFD_CLOEXEC
5873     case TARGET_F_DUPFD_CLOEXEC:
5874         ret = F_DUPFD_CLOEXEC;
5875         break;
5876 #endif
5877     case TARGET_F_NOTIFY:
5878         ret = F_NOTIFY;
5879         break;
5880 #ifdef F_GETOWN_EX
5881     case TARGET_F_GETOWN_EX:
5882         ret = F_GETOWN_EX;
5883         break;
5884 #endif
5885 #ifdef F_SETOWN_EX
5886     case TARGET_F_SETOWN_EX:
5887         ret = F_SETOWN_EX;
5888         break;
5889 #endif
5890 #ifdef F_SETPIPE_SZ
5891     case TARGET_F_SETPIPE_SZ:
5892         ret = F_SETPIPE_SZ;
5893         break;
5894     case TARGET_F_GETPIPE_SZ:
5895         ret = F_GETPIPE_SZ;
5896         break;
5897 #endif
5898     default:
5899         ret = -TARGET_EINVAL;
5900         break;
5901     }
5902 
5903 #if defined(__powerpc64__)
5904     /* On PPC64, glibc headers has the F_*LK* defined to 12, 13 and 14 and
5905      * is not supported by kernel. The glibc fcntl call actually adjusts
5906      * them to 5, 6 and 7 before making the syscall(). Since we make the
5907      * syscall directly, adjust to what is supported by the kernel.
5908      */
5909     if (ret >= F_GETLK64 && ret <= F_SETLKW64) {
5910         ret -= F_GETLK64 - 5;
5911     }
5912 #endif
5913 
5914     return ret;
5915 }
5916 
5917 #define FLOCK_TRANSTBL \
5918     switch (type) { \
5919     TRANSTBL_CONVERT(F_RDLCK); \
5920     TRANSTBL_CONVERT(F_WRLCK); \
5921     TRANSTBL_CONVERT(F_UNLCK); \
5922     TRANSTBL_CONVERT(F_EXLCK); \
5923     TRANSTBL_CONVERT(F_SHLCK); \
5924     }
5925 
5926 static int target_to_host_flock(int type)
5927 {
5928 #define TRANSTBL_CONVERT(a) case TARGET_##a: return a
5929     FLOCK_TRANSTBL
5930 #undef  TRANSTBL_CONVERT
5931     return -TARGET_EINVAL;
5932 }
5933 
5934 static int host_to_target_flock(int type)
5935 {
5936 #define TRANSTBL_CONVERT(a) case a: return TARGET_##a
5937     FLOCK_TRANSTBL
5938 #undef  TRANSTBL_CONVERT
5939     /* if we don't know how to convert the value coming
5940      * from the host we copy to the target field as-is
5941      */
5942     return type;
5943 }
5944 
5945 static inline abi_long copy_from_user_flock(struct flock64 *fl,
5946                                             abi_ulong target_flock_addr)
5947 {
5948     struct target_flock *target_fl;
5949     int l_type;
5950 
5951     if (!lock_user_struct(VERIFY_READ, target_fl, target_flock_addr, 1)) {
5952         return -TARGET_EFAULT;
5953     }
5954 
5955     __get_user(l_type, &target_fl->l_type);
5956     l_type = target_to_host_flock(l_type);
5957     if (l_type < 0) {
5958         return l_type;
5959     }
5960     fl->l_type = l_type;
5961     __get_user(fl->l_whence, &target_fl->l_whence);
5962     __get_user(fl->l_start, &target_fl->l_start);
5963     __get_user(fl->l_len, &target_fl->l_len);
5964     __get_user(fl->l_pid, &target_fl->l_pid);
5965     unlock_user_struct(target_fl, target_flock_addr, 0);
5966     return 0;
5967 }
5968 
5969 static inline abi_long copy_to_user_flock(abi_ulong target_flock_addr,
5970                                           const struct flock64 *fl)
5971 {
5972     struct target_flock *target_fl;
5973     short l_type;
5974 
5975     if (!lock_user_struct(VERIFY_WRITE, target_fl, target_flock_addr, 0)) {
5976         return -TARGET_EFAULT;
5977     }
5978 
5979     l_type = host_to_target_flock(fl->l_type);
5980     __put_user(l_type, &target_fl->l_type);
5981     __put_user(fl->l_whence, &target_fl->l_whence);
5982     __put_user(fl->l_start, &target_fl->l_start);
5983     __put_user(fl->l_len, &target_fl->l_len);
5984     __put_user(fl->l_pid, &target_fl->l_pid);
5985     unlock_user_struct(target_fl, target_flock_addr, 1);
5986     return 0;
5987 }
5988 
5989 typedef abi_long from_flock64_fn(struct flock64 *fl, abi_ulong target_addr);
5990 typedef abi_long to_flock64_fn(abi_ulong target_addr, const struct flock64 *fl);
5991 
5992 #if defined(TARGET_ARM) && TARGET_ABI_BITS == 32
5993 static inline abi_long copy_from_user_oabi_flock64(struct flock64 *fl,
5994                                                    abi_ulong target_flock_addr)
5995 {
5996     struct target_oabi_flock64 *target_fl;
5997     int l_type;
5998 
5999     if (!lock_user_struct(VERIFY_READ, target_fl, target_flock_addr, 1)) {
6000         return -TARGET_EFAULT;
6001     }
6002 
6003     __get_user(l_type, &target_fl->l_type);
6004     l_type = target_to_host_flock(l_type);
6005     if (l_type < 0) {
6006         return l_type;
6007     }
6008     fl->l_type = l_type;
6009     __get_user(fl->l_whence, &target_fl->l_whence);
6010     __get_user(fl->l_start, &target_fl->l_start);
6011     __get_user(fl->l_len, &target_fl->l_len);
6012     __get_user(fl->l_pid, &target_fl->l_pid);
6013     unlock_user_struct(target_fl, target_flock_addr, 0);
6014     return 0;
6015 }
6016 
6017 static inline abi_long copy_to_user_oabi_flock64(abi_ulong target_flock_addr,
6018                                                  const struct flock64 *fl)
6019 {
6020     struct target_oabi_flock64 *target_fl;
6021     short l_type;
6022 
6023     if (!lock_user_struct(VERIFY_WRITE, target_fl, target_flock_addr, 0)) {
6024         return -TARGET_EFAULT;
6025     }
6026 
6027     l_type = host_to_target_flock(fl->l_type);
6028     __put_user(l_type, &target_fl->l_type);
6029     __put_user(fl->l_whence, &target_fl->l_whence);
6030     __put_user(fl->l_start, &target_fl->l_start);
6031     __put_user(fl->l_len, &target_fl->l_len);
6032     __put_user(fl->l_pid, &target_fl->l_pid);
6033     unlock_user_struct(target_fl, target_flock_addr, 1);
6034     return 0;
6035 }
6036 #endif
6037 
6038 static inline abi_long copy_from_user_flock64(struct flock64 *fl,
6039                                               abi_ulong target_flock_addr)
6040 {
6041     struct target_flock64 *target_fl;
6042     int l_type;
6043 
6044     if (!lock_user_struct(VERIFY_READ, target_fl, target_flock_addr, 1)) {
6045         return -TARGET_EFAULT;
6046     }
6047 
6048     __get_user(l_type, &target_fl->l_type);
6049     l_type = target_to_host_flock(l_type);
6050     if (l_type < 0) {
6051         return l_type;
6052     }
6053     fl->l_type = l_type;
6054     __get_user(fl->l_whence, &target_fl->l_whence);
6055     __get_user(fl->l_start, &target_fl->l_start);
6056     __get_user(fl->l_len, &target_fl->l_len);
6057     __get_user(fl->l_pid, &target_fl->l_pid);
6058     unlock_user_struct(target_fl, target_flock_addr, 0);
6059     return 0;
6060 }
6061 
6062 static inline abi_long copy_to_user_flock64(abi_ulong target_flock_addr,
6063                                             const struct flock64 *fl)
6064 {
6065     struct target_flock64 *target_fl;
6066     short l_type;
6067 
6068     if (!lock_user_struct(VERIFY_WRITE, target_fl, target_flock_addr, 0)) {
6069         return -TARGET_EFAULT;
6070     }
6071 
6072     l_type = host_to_target_flock(fl->l_type);
6073     __put_user(l_type, &target_fl->l_type);
6074     __put_user(fl->l_whence, &target_fl->l_whence);
6075     __put_user(fl->l_start, &target_fl->l_start);
6076     __put_user(fl->l_len, &target_fl->l_len);
6077     __put_user(fl->l_pid, &target_fl->l_pid);
6078     unlock_user_struct(target_fl, target_flock_addr, 1);
6079     return 0;
6080 }
6081 
6082 static abi_long do_fcntl(int fd, int cmd, abi_ulong arg)
6083 {
6084     struct flock64 fl64;
6085 #ifdef F_GETOWN_EX
6086     struct f_owner_ex fox;
6087     struct target_f_owner_ex *target_fox;
6088 #endif
6089     abi_long ret;
6090     int host_cmd = target_to_host_fcntl_cmd(cmd);
6091 
6092     if (host_cmd == -TARGET_EINVAL)
6093 	    return host_cmd;
6094 
6095     switch(cmd) {
6096     case TARGET_F_GETLK:
6097         ret = copy_from_user_flock(&fl64, arg);
6098         if (ret) {
6099             return ret;
6100         }
6101         ret = get_errno(safe_fcntl(fd, host_cmd, &fl64));
6102         if (ret == 0) {
6103             ret = copy_to_user_flock(arg, &fl64);
6104         }
6105         break;
6106 
6107     case TARGET_F_SETLK:
6108     case TARGET_F_SETLKW:
6109         ret = copy_from_user_flock(&fl64, arg);
6110         if (ret) {
6111             return ret;
6112         }
6113         ret = get_errno(safe_fcntl(fd, host_cmd, &fl64));
6114         break;
6115 
6116     case TARGET_F_GETLK64:
6117         ret = copy_from_user_flock64(&fl64, arg);
6118         if (ret) {
6119             return ret;
6120         }
6121         ret = get_errno(safe_fcntl(fd, host_cmd, &fl64));
6122         if (ret == 0) {
6123             ret = copy_to_user_flock64(arg, &fl64);
6124         }
6125         break;
6126     case TARGET_F_SETLK64:
6127     case TARGET_F_SETLKW64:
6128         ret = copy_from_user_flock64(&fl64, arg);
6129         if (ret) {
6130             return ret;
6131         }
6132         ret = get_errno(safe_fcntl(fd, host_cmd, &fl64));
6133         break;
6134 
6135     case TARGET_F_GETFL:
6136         ret = get_errno(safe_fcntl(fd, host_cmd, arg));
6137         if (ret >= 0) {
6138             ret = host_to_target_bitmask(ret, fcntl_flags_tbl);
6139         }
6140         break;
6141 
6142     case TARGET_F_SETFL:
6143         ret = get_errno(safe_fcntl(fd, host_cmd,
6144                                    target_to_host_bitmask(arg,
6145                                                           fcntl_flags_tbl)));
6146         break;
6147 
6148 #ifdef F_GETOWN_EX
6149     case TARGET_F_GETOWN_EX:
6150         ret = get_errno(safe_fcntl(fd, host_cmd, &fox));
6151         if (ret >= 0) {
6152             if (!lock_user_struct(VERIFY_WRITE, target_fox, arg, 0))
6153                 return -TARGET_EFAULT;
6154             target_fox->type = tswap32(fox.type);
6155             target_fox->pid = tswap32(fox.pid);
6156             unlock_user_struct(target_fox, arg, 1);
6157         }
6158         break;
6159 #endif
6160 
6161 #ifdef F_SETOWN_EX
6162     case TARGET_F_SETOWN_EX:
6163         if (!lock_user_struct(VERIFY_READ, target_fox, arg, 1))
6164             return -TARGET_EFAULT;
6165         fox.type = tswap32(target_fox->type);
6166         fox.pid = tswap32(target_fox->pid);
6167         unlock_user_struct(target_fox, arg, 0);
6168         ret = get_errno(safe_fcntl(fd, host_cmd, &fox));
6169         break;
6170 #endif
6171 
6172     case TARGET_F_SETOWN:
6173     case TARGET_F_GETOWN:
6174     case TARGET_F_SETSIG:
6175     case TARGET_F_GETSIG:
6176     case TARGET_F_SETLEASE:
6177     case TARGET_F_GETLEASE:
6178     case TARGET_F_SETPIPE_SZ:
6179     case TARGET_F_GETPIPE_SZ:
6180         ret = get_errno(safe_fcntl(fd, host_cmd, arg));
6181         break;
6182 
6183     default:
6184         ret = get_errno(safe_fcntl(fd, cmd, arg));
6185         break;
6186     }
6187     return ret;
6188 }
6189 
6190 #ifdef USE_UID16
6191 
6192 static inline int high2lowuid(int uid)
6193 {
6194     if (uid > 65535)
6195         return 65534;
6196     else
6197         return uid;
6198 }
6199 
6200 static inline int high2lowgid(int gid)
6201 {
6202     if (gid > 65535)
6203         return 65534;
6204     else
6205         return gid;
6206 }
6207 
6208 static inline int low2highuid(int uid)
6209 {
6210     if ((int16_t)uid == -1)
6211         return -1;
6212     else
6213         return uid;
6214 }
6215 
6216 static inline int low2highgid(int gid)
6217 {
6218     if ((int16_t)gid == -1)
6219         return -1;
6220     else
6221         return gid;
6222 }
6223 static inline int tswapid(int id)
6224 {
6225     return tswap16(id);
6226 }
6227 
6228 #define put_user_id(x, gaddr) put_user_u16(x, gaddr)
6229 
6230 #else /* !USE_UID16 */
6231 static inline int high2lowuid(int uid)
6232 {
6233     return uid;
6234 }
6235 static inline int high2lowgid(int gid)
6236 {
6237     return gid;
6238 }
6239 static inline int low2highuid(int uid)
6240 {
6241     return uid;
6242 }
6243 static inline int low2highgid(int gid)
6244 {
6245     return gid;
6246 }
6247 static inline int tswapid(int id)
6248 {
6249     return tswap32(id);
6250 }
6251 
6252 #define put_user_id(x, gaddr) put_user_u32(x, gaddr)
6253 
6254 #endif /* USE_UID16 */
6255 
6256 /* We must do direct syscalls for setting UID/GID, because we want to
6257  * implement the Linux system call semantics of "change only for this thread",
6258  * not the libc/POSIX semantics of "change for all threads in process".
6259  * (See http://ewontfix.com/17/ for more details.)
6260  * We use the 32-bit version of the syscalls if present; if it is not
6261  * then either the host architecture supports 32-bit UIDs natively with
6262  * the standard syscall, or the 16-bit UID is the best we can do.
6263  */
6264 #ifdef __NR_setuid32
6265 #define __NR_sys_setuid __NR_setuid32
6266 #else
6267 #define __NR_sys_setuid __NR_setuid
6268 #endif
6269 #ifdef __NR_setgid32
6270 #define __NR_sys_setgid __NR_setgid32
6271 #else
6272 #define __NR_sys_setgid __NR_setgid
6273 #endif
6274 #ifdef __NR_setresuid32
6275 #define __NR_sys_setresuid __NR_setresuid32
6276 #else
6277 #define __NR_sys_setresuid __NR_setresuid
6278 #endif
6279 #ifdef __NR_setresgid32
6280 #define __NR_sys_setresgid __NR_setresgid32
6281 #else
6282 #define __NR_sys_setresgid __NR_setresgid
6283 #endif
6284 
6285 _syscall1(int, sys_setuid, uid_t, uid)
6286 _syscall1(int, sys_setgid, gid_t, gid)
6287 _syscall3(int, sys_setresuid, uid_t, ruid, uid_t, euid, uid_t, suid)
6288 _syscall3(int, sys_setresgid, gid_t, rgid, gid_t, egid, gid_t, sgid)
6289 
6290 void syscall_init(void)
6291 {
6292     IOCTLEntry *ie;
6293     const argtype *arg_type;
6294     int size;
6295     int i;
6296 
6297     thunk_init(STRUCT_MAX);
6298 
6299 #define STRUCT(name, ...) thunk_register_struct(STRUCT_ ## name, #name, struct_ ## name ## _def);
6300 #define STRUCT_SPECIAL(name) thunk_register_struct_direct(STRUCT_ ## name, #name, &struct_ ## name ## _def);
6301 #include "syscall_types.h"
6302 #undef STRUCT
6303 #undef STRUCT_SPECIAL
6304 
6305     /* Build target_to_host_errno_table[] table from
6306      * host_to_target_errno_table[]. */
6307     for (i = 0; i < ERRNO_TABLE_SIZE; i++) {
6308         target_to_host_errno_table[host_to_target_errno_table[i]] = i;
6309     }
6310 
6311     /* we patch the ioctl size if necessary. We rely on the fact that
6312        no ioctl has all the bits at '1' in the size field */
6313     ie = ioctl_entries;
6314     while (ie->target_cmd != 0) {
6315         if (((ie->target_cmd >> TARGET_IOC_SIZESHIFT) & TARGET_IOC_SIZEMASK) ==
6316             TARGET_IOC_SIZEMASK) {
6317             arg_type = ie->arg_type;
6318             if (arg_type[0] != TYPE_PTR) {
6319                 fprintf(stderr, "cannot patch size for ioctl 0x%x\n",
6320                         ie->target_cmd);
6321                 exit(1);
6322             }
6323             arg_type++;
6324             size = thunk_type_size(arg_type, 0);
6325             ie->target_cmd = (ie->target_cmd &
6326                               ~(TARGET_IOC_SIZEMASK << TARGET_IOC_SIZESHIFT)) |
6327                 (size << TARGET_IOC_SIZESHIFT);
6328         }
6329 
6330         /* automatic consistency check if same arch */
6331 #if (defined(__i386__) && defined(TARGET_I386) && defined(TARGET_ABI32)) || \
6332     (defined(__x86_64__) && defined(TARGET_X86_64))
6333         if (unlikely(ie->target_cmd != ie->host_cmd)) {
6334             fprintf(stderr, "ERROR: ioctl(%s): target=0x%x host=0x%x\n",
6335                     ie->name, ie->target_cmd, ie->host_cmd);
6336         }
6337 #endif
6338         ie++;
6339     }
6340 }
6341 
6342 #if TARGET_ABI_BITS == 32
6343 static inline uint64_t target_offset64(uint32_t word0, uint32_t word1)
6344 {
6345 #ifdef TARGET_WORDS_BIGENDIAN
6346     return ((uint64_t)word0 << 32) | word1;
6347 #else
6348     return ((uint64_t)word1 << 32) | word0;
6349 #endif
6350 }
6351 #else /* TARGET_ABI_BITS == 32 */
6352 static inline uint64_t target_offset64(uint64_t word0, uint64_t word1)
6353 {
6354     return word0;
6355 }
6356 #endif /* TARGET_ABI_BITS != 32 */
6357 
6358 #ifdef TARGET_NR_truncate64
6359 static inline abi_long target_truncate64(void *cpu_env, const char *arg1,
6360                                          abi_long arg2,
6361                                          abi_long arg3,
6362                                          abi_long arg4)
6363 {
6364     if (regpairs_aligned(cpu_env, TARGET_NR_truncate64)) {
6365         arg2 = arg3;
6366         arg3 = arg4;
6367     }
6368     return get_errno(truncate64(arg1, target_offset64(arg2, arg3)));
6369 }
6370 #endif
6371 
6372 #ifdef TARGET_NR_ftruncate64
6373 static inline abi_long target_ftruncate64(void *cpu_env, abi_long arg1,
6374                                           abi_long arg2,
6375                                           abi_long arg3,
6376                                           abi_long arg4)
6377 {
6378     if (regpairs_aligned(cpu_env, TARGET_NR_ftruncate64)) {
6379         arg2 = arg3;
6380         arg3 = arg4;
6381     }
6382     return get_errno(ftruncate64(arg1, target_offset64(arg2, arg3)));
6383 }
6384 #endif
6385 
6386 static inline abi_long target_to_host_itimerspec(struct itimerspec *host_itspec,
6387                                                  abi_ulong target_addr)
6388 {
6389     struct target_itimerspec *target_itspec;
6390 
6391     if (!lock_user_struct(VERIFY_READ, target_itspec, target_addr, 1)) {
6392         return -TARGET_EFAULT;
6393     }
6394 
6395     host_itspec->it_interval.tv_sec =
6396                             tswapal(target_itspec->it_interval.tv_sec);
6397     host_itspec->it_interval.tv_nsec =
6398                             tswapal(target_itspec->it_interval.tv_nsec);
6399     host_itspec->it_value.tv_sec = tswapal(target_itspec->it_value.tv_sec);
6400     host_itspec->it_value.tv_nsec = tswapal(target_itspec->it_value.tv_nsec);
6401 
6402     unlock_user_struct(target_itspec, target_addr, 1);
6403     return 0;
6404 }
6405 
6406 static inline abi_long host_to_target_itimerspec(abi_ulong target_addr,
6407                                                struct itimerspec *host_its)
6408 {
6409     struct target_itimerspec *target_itspec;
6410 
6411     if (!lock_user_struct(VERIFY_WRITE, target_itspec, target_addr, 0)) {
6412         return -TARGET_EFAULT;
6413     }
6414 
6415     target_itspec->it_interval.tv_sec = tswapal(host_its->it_interval.tv_sec);
6416     target_itspec->it_interval.tv_nsec = tswapal(host_its->it_interval.tv_nsec);
6417 
6418     target_itspec->it_value.tv_sec = tswapal(host_its->it_value.tv_sec);
6419     target_itspec->it_value.tv_nsec = tswapal(host_its->it_value.tv_nsec);
6420 
6421     unlock_user_struct(target_itspec, target_addr, 0);
6422     return 0;
6423 }
6424 
6425 static inline abi_long target_to_host_timex(struct timex *host_tx,
6426                                             abi_long target_addr)
6427 {
6428     struct target_timex *target_tx;
6429 
6430     if (!lock_user_struct(VERIFY_READ, target_tx, target_addr, 1)) {
6431         return -TARGET_EFAULT;
6432     }
6433 
6434     __get_user(host_tx->modes, &target_tx->modes);
6435     __get_user(host_tx->offset, &target_tx->offset);
6436     __get_user(host_tx->freq, &target_tx->freq);
6437     __get_user(host_tx->maxerror, &target_tx->maxerror);
6438     __get_user(host_tx->esterror, &target_tx->esterror);
6439     __get_user(host_tx->status, &target_tx->status);
6440     __get_user(host_tx->constant, &target_tx->constant);
6441     __get_user(host_tx->precision, &target_tx->precision);
6442     __get_user(host_tx->tolerance, &target_tx->tolerance);
6443     __get_user(host_tx->time.tv_sec, &target_tx->time.tv_sec);
6444     __get_user(host_tx->time.tv_usec, &target_tx->time.tv_usec);
6445     __get_user(host_tx->tick, &target_tx->tick);
6446     __get_user(host_tx->ppsfreq, &target_tx->ppsfreq);
6447     __get_user(host_tx->jitter, &target_tx->jitter);
6448     __get_user(host_tx->shift, &target_tx->shift);
6449     __get_user(host_tx->stabil, &target_tx->stabil);
6450     __get_user(host_tx->jitcnt, &target_tx->jitcnt);
6451     __get_user(host_tx->calcnt, &target_tx->calcnt);
6452     __get_user(host_tx->errcnt, &target_tx->errcnt);
6453     __get_user(host_tx->stbcnt, &target_tx->stbcnt);
6454     __get_user(host_tx->tai, &target_tx->tai);
6455 
6456     unlock_user_struct(target_tx, target_addr, 0);
6457     return 0;
6458 }
6459 
6460 static inline abi_long host_to_target_timex(abi_long target_addr,
6461                                             struct timex *host_tx)
6462 {
6463     struct target_timex *target_tx;
6464 
6465     if (!lock_user_struct(VERIFY_WRITE, target_tx, target_addr, 0)) {
6466         return -TARGET_EFAULT;
6467     }
6468 
6469     __put_user(host_tx->modes, &target_tx->modes);
6470     __put_user(host_tx->offset, &target_tx->offset);
6471     __put_user(host_tx->freq, &target_tx->freq);
6472     __put_user(host_tx->maxerror, &target_tx->maxerror);
6473     __put_user(host_tx->esterror, &target_tx->esterror);
6474     __put_user(host_tx->status, &target_tx->status);
6475     __put_user(host_tx->constant, &target_tx->constant);
6476     __put_user(host_tx->precision, &target_tx->precision);
6477     __put_user(host_tx->tolerance, &target_tx->tolerance);
6478     __put_user(host_tx->time.tv_sec, &target_tx->time.tv_sec);
6479     __put_user(host_tx->time.tv_usec, &target_tx->time.tv_usec);
6480     __put_user(host_tx->tick, &target_tx->tick);
6481     __put_user(host_tx->ppsfreq, &target_tx->ppsfreq);
6482     __put_user(host_tx->jitter, &target_tx->jitter);
6483     __put_user(host_tx->shift, &target_tx->shift);
6484     __put_user(host_tx->stabil, &target_tx->stabil);
6485     __put_user(host_tx->jitcnt, &target_tx->jitcnt);
6486     __put_user(host_tx->calcnt, &target_tx->calcnt);
6487     __put_user(host_tx->errcnt, &target_tx->errcnt);
6488     __put_user(host_tx->stbcnt, &target_tx->stbcnt);
6489     __put_user(host_tx->tai, &target_tx->tai);
6490 
6491     unlock_user_struct(target_tx, target_addr, 1);
6492     return 0;
6493 }
6494 
6495 
6496 static inline abi_long target_to_host_sigevent(struct sigevent *host_sevp,
6497                                                abi_ulong target_addr)
6498 {
6499     struct target_sigevent *target_sevp;
6500 
6501     if (!lock_user_struct(VERIFY_READ, target_sevp, target_addr, 1)) {
6502         return -TARGET_EFAULT;
6503     }
6504 
6505     /* This union is awkward on 64 bit systems because it has a 32 bit
6506      * integer and a pointer in it; we follow the conversion approach
6507      * used for handling sigval types in signal.c so the guest should get
6508      * the correct value back even if we did a 64 bit byteswap and it's
6509      * using the 32 bit integer.
6510      */
6511     host_sevp->sigev_value.sival_ptr =
6512         (void *)(uintptr_t)tswapal(target_sevp->sigev_value.sival_ptr);
6513     host_sevp->sigev_signo =
6514         target_to_host_signal(tswap32(target_sevp->sigev_signo));
6515     host_sevp->sigev_notify = tswap32(target_sevp->sigev_notify);
6516     host_sevp->_sigev_un._tid = tswap32(target_sevp->_sigev_un._tid);
6517 
6518     unlock_user_struct(target_sevp, target_addr, 1);
6519     return 0;
6520 }
6521 
6522 #if defined(TARGET_NR_mlockall)
6523 static inline int target_to_host_mlockall_arg(int arg)
6524 {
6525     int result = 0;
6526 
6527     if (arg & TARGET_MLOCKALL_MCL_CURRENT) {
6528         result |= MCL_CURRENT;
6529     }
6530     if (arg & TARGET_MLOCKALL_MCL_FUTURE) {
6531         result |= MCL_FUTURE;
6532     }
6533     return result;
6534 }
6535 #endif
6536 
6537 #if (defined(TARGET_NR_stat64) || defined(TARGET_NR_lstat64) ||     \
6538      defined(TARGET_NR_fstat64) || defined(TARGET_NR_fstatat64) ||  \
6539      defined(TARGET_NR_newfstatat))
6540 static inline abi_long host_to_target_stat64(void *cpu_env,
6541                                              abi_ulong target_addr,
6542                                              struct stat *host_st)
6543 {
6544 #if defined(TARGET_ARM) && defined(TARGET_ABI32)
6545     if (((CPUARMState *)cpu_env)->eabi) {
6546         struct target_eabi_stat64 *target_st;
6547 
6548         if (!lock_user_struct(VERIFY_WRITE, target_st, target_addr, 0))
6549             return -TARGET_EFAULT;
6550         memset(target_st, 0, sizeof(struct target_eabi_stat64));
6551         __put_user(host_st->st_dev, &target_st->st_dev);
6552         __put_user(host_st->st_ino, &target_st->st_ino);
6553 #ifdef TARGET_STAT64_HAS_BROKEN_ST_INO
6554         __put_user(host_st->st_ino, &target_st->__st_ino);
6555 #endif
6556         __put_user(host_st->st_mode, &target_st->st_mode);
6557         __put_user(host_st->st_nlink, &target_st->st_nlink);
6558         __put_user(host_st->st_uid, &target_st->st_uid);
6559         __put_user(host_st->st_gid, &target_st->st_gid);
6560         __put_user(host_st->st_rdev, &target_st->st_rdev);
6561         __put_user(host_st->st_size, &target_st->st_size);
6562         __put_user(host_st->st_blksize, &target_st->st_blksize);
6563         __put_user(host_st->st_blocks, &target_st->st_blocks);
6564         __put_user(host_st->st_atime, &target_st->target_st_atime);
6565         __put_user(host_st->st_mtime, &target_st->target_st_mtime);
6566         __put_user(host_st->st_ctime, &target_st->target_st_ctime);
6567 #if _POSIX_C_SOURCE >= 200809L || _XOPEN_SOURCE >= 700
6568         __put_user(host_st->st_atim.tv_nsec, &target_st->target_st_atime_nsec);
6569         __put_user(host_st->st_mtim.tv_nsec, &target_st->target_st_mtime_nsec);
6570         __put_user(host_st->st_ctim.tv_nsec, &target_st->target_st_ctime_nsec);
6571 #endif
6572         unlock_user_struct(target_st, target_addr, 1);
6573     } else
6574 #endif
6575     {
6576 #if defined(TARGET_HAS_STRUCT_STAT64)
6577         struct target_stat64 *target_st;
6578 #else
6579         struct target_stat *target_st;
6580 #endif
6581 
6582         if (!lock_user_struct(VERIFY_WRITE, target_st, target_addr, 0))
6583             return -TARGET_EFAULT;
6584         memset(target_st, 0, sizeof(*target_st));
6585         __put_user(host_st->st_dev, &target_st->st_dev);
6586         __put_user(host_st->st_ino, &target_st->st_ino);
6587 #ifdef TARGET_STAT64_HAS_BROKEN_ST_INO
6588         __put_user(host_st->st_ino, &target_st->__st_ino);
6589 #endif
6590         __put_user(host_st->st_mode, &target_st->st_mode);
6591         __put_user(host_st->st_nlink, &target_st->st_nlink);
6592         __put_user(host_st->st_uid, &target_st->st_uid);
6593         __put_user(host_st->st_gid, &target_st->st_gid);
6594         __put_user(host_st->st_rdev, &target_st->st_rdev);
6595         /* XXX: better use of kernel struct */
6596         __put_user(host_st->st_size, &target_st->st_size);
6597         __put_user(host_st->st_blksize, &target_st->st_blksize);
6598         __put_user(host_st->st_blocks, &target_st->st_blocks);
6599         __put_user(host_st->st_atime, &target_st->target_st_atime);
6600         __put_user(host_st->st_mtime, &target_st->target_st_mtime);
6601         __put_user(host_st->st_ctime, &target_st->target_st_ctime);
6602 #if _POSIX_C_SOURCE >= 200809L || _XOPEN_SOURCE >= 700
6603         __put_user(host_st->st_atim.tv_nsec, &target_st->target_st_atime_nsec);
6604         __put_user(host_st->st_mtim.tv_nsec, &target_st->target_st_mtime_nsec);
6605         __put_user(host_st->st_ctim.tv_nsec, &target_st->target_st_ctime_nsec);
6606 #endif
6607         unlock_user_struct(target_st, target_addr, 1);
6608     }
6609 
6610     return 0;
6611 }
6612 #endif
6613 
6614 #if defined(TARGET_NR_statx) && defined(__NR_statx)
6615 static inline abi_long host_to_target_statx(struct target_statx *host_stx,
6616                                             abi_ulong target_addr)
6617 {
6618     struct target_statx *target_stx;
6619 
6620     if (!lock_user_struct(VERIFY_WRITE, target_stx, target_addr,  0)) {
6621         return -TARGET_EFAULT;
6622     }
6623     memset(target_stx, 0, sizeof(*target_stx));
6624 
6625     __put_user(host_stx->stx_mask, &target_stx->stx_mask);
6626     __put_user(host_stx->stx_blksize, &target_stx->stx_blksize);
6627     __put_user(host_stx->stx_attributes, &target_stx->stx_attributes);
6628     __put_user(host_stx->stx_nlink, &target_stx->stx_nlink);
6629     __put_user(host_stx->stx_uid, &target_stx->stx_uid);
6630     __put_user(host_stx->stx_gid, &target_stx->stx_gid);
6631     __put_user(host_stx->stx_mode, &target_stx->stx_mode);
6632     __put_user(host_stx->stx_ino, &target_stx->stx_ino);
6633     __put_user(host_stx->stx_size, &target_stx->stx_size);
6634     __put_user(host_stx->stx_blocks, &target_stx->stx_blocks);
6635     __put_user(host_stx->stx_attributes_mask, &target_stx->stx_attributes_mask);
6636     __put_user(host_stx->stx_atime.tv_sec, &target_stx->stx_atime.tv_sec);
6637     __put_user(host_stx->stx_atime.tv_nsec, &target_stx->stx_atime.tv_nsec);
6638     __put_user(host_stx->stx_btime.tv_sec, &target_stx->stx_atime.tv_sec);
6639     __put_user(host_stx->stx_btime.tv_nsec, &target_stx->stx_atime.tv_nsec);
6640     __put_user(host_stx->stx_ctime.tv_sec, &target_stx->stx_atime.tv_sec);
6641     __put_user(host_stx->stx_ctime.tv_nsec, &target_stx->stx_atime.tv_nsec);
6642     __put_user(host_stx->stx_mtime.tv_sec, &target_stx->stx_atime.tv_sec);
6643     __put_user(host_stx->stx_mtime.tv_nsec, &target_stx->stx_atime.tv_nsec);
6644     __put_user(host_stx->stx_rdev_major, &target_stx->stx_rdev_major);
6645     __put_user(host_stx->stx_rdev_minor, &target_stx->stx_rdev_minor);
6646     __put_user(host_stx->stx_dev_major, &target_stx->stx_dev_major);
6647     __put_user(host_stx->stx_dev_minor, &target_stx->stx_dev_minor);
6648 
6649     unlock_user_struct(target_stx, target_addr, 1);
6650 
6651     return 0;
6652 }
6653 #endif
6654 
6655 
6656 /* ??? Using host futex calls even when target atomic operations
6657    are not really atomic probably breaks things.  However implementing
6658    futexes locally would make futexes shared between multiple processes
6659    tricky.  However they're probably useless because guest atomic
6660    operations won't work either.  */
6661 static int do_futex(target_ulong uaddr, int op, int val, target_ulong timeout,
6662                     target_ulong uaddr2, int val3)
6663 {
6664     struct timespec ts, *pts;
6665     int base_op;
6666 
6667     /* ??? We assume FUTEX_* constants are the same on both host
6668        and target.  */
6669 #ifdef FUTEX_CMD_MASK
6670     base_op = op & FUTEX_CMD_MASK;
6671 #else
6672     base_op = op;
6673 #endif
6674     switch (base_op) {
6675     case FUTEX_WAIT:
6676     case FUTEX_WAIT_BITSET:
6677         if (timeout) {
6678             pts = &ts;
6679             target_to_host_timespec(pts, timeout);
6680         } else {
6681             pts = NULL;
6682         }
6683         return get_errno(safe_futex(g2h(uaddr), op, tswap32(val),
6684                          pts, NULL, val3));
6685     case FUTEX_WAKE:
6686         return get_errno(safe_futex(g2h(uaddr), op, val, NULL, NULL, 0));
6687     case FUTEX_FD:
6688         return get_errno(safe_futex(g2h(uaddr), op, val, NULL, NULL, 0));
6689     case FUTEX_REQUEUE:
6690     case FUTEX_CMP_REQUEUE:
6691     case FUTEX_WAKE_OP:
6692         /* For FUTEX_REQUEUE, FUTEX_CMP_REQUEUE, and FUTEX_WAKE_OP, the
6693            TIMEOUT parameter is interpreted as a uint32_t by the kernel.
6694            But the prototype takes a `struct timespec *'; insert casts
6695            to satisfy the compiler.  We do not need to tswap TIMEOUT
6696            since it's not compared to guest memory.  */
6697         pts = (struct timespec *)(uintptr_t) timeout;
6698         return get_errno(safe_futex(g2h(uaddr), op, val, pts,
6699                                     g2h(uaddr2),
6700                                     (base_op == FUTEX_CMP_REQUEUE
6701                                      ? tswap32(val3)
6702                                      : val3)));
6703     default:
6704         return -TARGET_ENOSYS;
6705     }
6706 }
6707 #if defined(TARGET_NR_name_to_handle_at) && defined(CONFIG_OPEN_BY_HANDLE)
6708 static abi_long do_name_to_handle_at(abi_long dirfd, abi_long pathname,
6709                                      abi_long handle, abi_long mount_id,
6710                                      abi_long flags)
6711 {
6712     struct file_handle *target_fh;
6713     struct file_handle *fh;
6714     int mid = 0;
6715     abi_long ret;
6716     char *name;
6717     unsigned int size, total_size;
6718 
6719     if (get_user_s32(size, handle)) {
6720         return -TARGET_EFAULT;
6721     }
6722 
6723     name = lock_user_string(pathname);
6724     if (!name) {
6725         return -TARGET_EFAULT;
6726     }
6727 
6728     total_size = sizeof(struct file_handle) + size;
6729     target_fh = lock_user(VERIFY_WRITE, handle, total_size, 0);
6730     if (!target_fh) {
6731         unlock_user(name, pathname, 0);
6732         return -TARGET_EFAULT;
6733     }
6734 
6735     fh = g_malloc0(total_size);
6736     fh->handle_bytes = size;
6737 
6738     ret = get_errno(name_to_handle_at(dirfd, path(name), fh, &mid, flags));
6739     unlock_user(name, pathname, 0);
6740 
6741     /* man name_to_handle_at(2):
6742      * Other than the use of the handle_bytes field, the caller should treat
6743      * the file_handle structure as an opaque data type
6744      */
6745 
6746     memcpy(target_fh, fh, total_size);
6747     target_fh->handle_bytes = tswap32(fh->handle_bytes);
6748     target_fh->handle_type = tswap32(fh->handle_type);
6749     g_free(fh);
6750     unlock_user(target_fh, handle, total_size);
6751 
6752     if (put_user_s32(mid, mount_id)) {
6753         return -TARGET_EFAULT;
6754     }
6755 
6756     return ret;
6757 
6758 }
6759 #endif
6760 
6761 #if defined(TARGET_NR_open_by_handle_at) && defined(CONFIG_OPEN_BY_HANDLE)
6762 static abi_long do_open_by_handle_at(abi_long mount_fd, abi_long handle,
6763                                      abi_long flags)
6764 {
6765     struct file_handle *target_fh;
6766     struct file_handle *fh;
6767     unsigned int size, total_size;
6768     abi_long ret;
6769 
6770     if (get_user_s32(size, handle)) {
6771         return -TARGET_EFAULT;
6772     }
6773 
6774     total_size = sizeof(struct file_handle) + size;
6775     target_fh = lock_user(VERIFY_READ, handle, total_size, 1);
6776     if (!target_fh) {
6777         return -TARGET_EFAULT;
6778     }
6779 
6780     fh = g_memdup(target_fh, total_size);
6781     fh->handle_bytes = size;
6782     fh->handle_type = tswap32(target_fh->handle_type);
6783 
6784     ret = get_errno(open_by_handle_at(mount_fd, fh,
6785                     target_to_host_bitmask(flags, fcntl_flags_tbl)));
6786 
6787     g_free(fh);
6788 
6789     unlock_user(target_fh, handle, total_size);
6790 
6791     return ret;
6792 }
6793 #endif
6794 
6795 #if defined(TARGET_NR_signalfd) || defined(TARGET_NR_signalfd4)
6796 
6797 static abi_long do_signalfd4(int fd, abi_long mask, int flags)
6798 {
6799     int host_flags;
6800     target_sigset_t *target_mask;
6801     sigset_t host_mask;
6802     abi_long ret;
6803 
6804     if (flags & ~(TARGET_O_NONBLOCK | TARGET_O_CLOEXEC)) {
6805         return -TARGET_EINVAL;
6806     }
6807     if (!lock_user_struct(VERIFY_READ, target_mask, mask, 1)) {
6808         return -TARGET_EFAULT;
6809     }
6810 
6811     target_to_host_sigset(&host_mask, target_mask);
6812 
6813     host_flags = target_to_host_bitmask(flags, fcntl_flags_tbl);
6814 
6815     ret = get_errno(signalfd(fd, &host_mask, host_flags));
6816     if (ret >= 0) {
6817         fd_trans_register(ret, &target_signalfd_trans);
6818     }
6819 
6820     unlock_user_struct(target_mask, mask, 0);
6821 
6822     return ret;
6823 }
6824 #endif
6825 
6826 /* Map host to target signal numbers for the wait family of syscalls.
6827    Assume all other status bits are the same.  */
6828 int host_to_target_waitstatus(int status)
6829 {
6830     if (WIFSIGNALED(status)) {
6831         return host_to_target_signal(WTERMSIG(status)) | (status & ~0x7f);
6832     }
6833     if (WIFSTOPPED(status)) {
6834         return (host_to_target_signal(WSTOPSIG(status)) << 8)
6835                | (status & 0xff);
6836     }
6837     return status;
6838 }
6839 
6840 static int open_self_cmdline(void *cpu_env, int fd)
6841 {
6842     CPUState *cpu = env_cpu((CPUArchState *)cpu_env);
6843     struct linux_binprm *bprm = ((TaskState *)cpu->opaque)->bprm;
6844     int i;
6845 
6846     for (i = 0; i < bprm->argc; i++) {
6847         size_t len = strlen(bprm->argv[i]) + 1;
6848 
6849         if (write(fd, bprm->argv[i], len) != len) {
6850             return -1;
6851         }
6852     }
6853 
6854     return 0;
6855 }
6856 
6857 static int open_self_maps(void *cpu_env, int fd)
6858 {
6859     CPUState *cpu = env_cpu((CPUArchState *)cpu_env);
6860     TaskState *ts = cpu->opaque;
6861     FILE *fp;
6862     char *line = NULL;
6863     size_t len = 0;
6864     ssize_t read;
6865 
6866     fp = fopen("/proc/self/maps", "r");
6867     if (fp == NULL) {
6868         return -1;
6869     }
6870 
6871     while ((read = getline(&line, &len, fp)) != -1) {
6872         int fields, dev_maj, dev_min, inode;
6873         uint64_t min, max, offset;
6874         char flag_r, flag_w, flag_x, flag_p;
6875         char path[512] = "";
6876         fields = sscanf(line, "%"PRIx64"-%"PRIx64" %c%c%c%c %"PRIx64" %x:%x %d"
6877                         " %512s", &min, &max, &flag_r, &flag_w, &flag_x,
6878                         &flag_p, &offset, &dev_maj, &dev_min, &inode, path);
6879 
6880         if ((fields < 10) || (fields > 11)) {
6881             continue;
6882         }
6883         if (h2g_valid(min)) {
6884             int flags = page_get_flags(h2g(min));
6885             max = h2g_valid(max - 1) ? max : (uintptr_t)g2h(GUEST_ADDR_MAX) + 1;
6886             if (page_check_range(h2g(min), max - min, flags) == -1) {
6887                 continue;
6888             }
6889             if (h2g(min) == ts->info->stack_limit) {
6890                 pstrcpy(path, sizeof(path), "      [stack]");
6891             }
6892             dprintf(fd, TARGET_ABI_FMT_ptr "-" TARGET_ABI_FMT_ptr
6893                     " %c%c%c%c %08" PRIx64 " %02x:%02x %d %s%s\n",
6894                     h2g(min), h2g(max - 1) + 1, flag_r, flag_w,
6895                     flag_x, flag_p, offset, dev_maj, dev_min, inode,
6896                     path[0] ? "         " : "", path);
6897         }
6898     }
6899 
6900     free(line);
6901     fclose(fp);
6902 
6903     return 0;
6904 }
6905 
6906 static int open_self_stat(void *cpu_env, int fd)
6907 {
6908     CPUState *cpu = env_cpu((CPUArchState *)cpu_env);
6909     TaskState *ts = cpu->opaque;
6910     abi_ulong start_stack = ts->info->start_stack;
6911     int i;
6912 
6913     for (i = 0; i < 44; i++) {
6914       char buf[128];
6915       int len;
6916       uint64_t val = 0;
6917 
6918       if (i == 0) {
6919         /* pid */
6920         val = getpid();
6921         snprintf(buf, sizeof(buf), "%"PRId64 " ", val);
6922       } else if (i == 1) {
6923         /* app name */
6924         snprintf(buf, sizeof(buf), "(%s) ", ts->bprm->argv[0]);
6925       } else if (i == 27) {
6926         /* stack bottom */
6927         val = start_stack;
6928         snprintf(buf, sizeof(buf), "%"PRId64 " ", val);
6929       } else {
6930         /* for the rest, there is MasterCard */
6931         snprintf(buf, sizeof(buf), "0%c", i == 43 ? '\n' : ' ');
6932       }
6933 
6934       len = strlen(buf);
6935       if (write(fd, buf, len) != len) {
6936           return -1;
6937       }
6938     }
6939 
6940     return 0;
6941 }
6942 
6943 static int open_self_auxv(void *cpu_env, int fd)
6944 {
6945     CPUState *cpu = env_cpu((CPUArchState *)cpu_env);
6946     TaskState *ts = cpu->opaque;
6947     abi_ulong auxv = ts->info->saved_auxv;
6948     abi_ulong len = ts->info->auxv_len;
6949     char *ptr;
6950 
6951     /*
6952      * Auxiliary vector is stored in target process stack.
6953      * read in whole auxv vector and copy it to file
6954      */
6955     ptr = lock_user(VERIFY_READ, auxv, len, 0);
6956     if (ptr != NULL) {
6957         while (len > 0) {
6958             ssize_t r;
6959             r = write(fd, ptr, len);
6960             if (r <= 0) {
6961                 break;
6962             }
6963             len -= r;
6964             ptr += r;
6965         }
6966         lseek(fd, 0, SEEK_SET);
6967         unlock_user(ptr, auxv, len);
6968     }
6969 
6970     return 0;
6971 }
6972 
6973 static int is_proc_myself(const char *filename, const char *entry)
6974 {
6975     if (!strncmp(filename, "/proc/", strlen("/proc/"))) {
6976         filename += strlen("/proc/");
6977         if (!strncmp(filename, "self/", strlen("self/"))) {
6978             filename += strlen("self/");
6979         } else if (*filename >= '1' && *filename <= '9') {
6980             char myself[80];
6981             snprintf(myself, sizeof(myself), "%d/", getpid());
6982             if (!strncmp(filename, myself, strlen(myself))) {
6983                 filename += strlen(myself);
6984             } else {
6985                 return 0;
6986             }
6987         } else {
6988             return 0;
6989         }
6990         if (!strcmp(filename, entry)) {
6991             return 1;
6992         }
6993     }
6994     return 0;
6995 }
6996 
6997 #if defined(HOST_WORDS_BIGENDIAN) != defined(TARGET_WORDS_BIGENDIAN) || \
6998     defined(TARGET_SPARC) || defined(TARGET_M68K)
6999 static int is_proc(const char *filename, const char *entry)
7000 {
7001     return strcmp(filename, entry) == 0;
7002 }
7003 #endif
7004 
7005 #if defined(HOST_WORDS_BIGENDIAN) != defined(TARGET_WORDS_BIGENDIAN)
7006 static int open_net_route(void *cpu_env, int fd)
7007 {
7008     FILE *fp;
7009     char *line = NULL;
7010     size_t len = 0;
7011     ssize_t read;
7012 
7013     fp = fopen("/proc/net/route", "r");
7014     if (fp == NULL) {
7015         return -1;
7016     }
7017 
7018     /* read header */
7019 
7020     read = getline(&line, &len, fp);
7021     dprintf(fd, "%s", line);
7022 
7023     /* read routes */
7024 
7025     while ((read = getline(&line, &len, fp)) != -1) {
7026         char iface[16];
7027         uint32_t dest, gw, mask;
7028         unsigned int flags, refcnt, use, metric, mtu, window, irtt;
7029         int fields;
7030 
7031         fields = sscanf(line,
7032                         "%s\t%08x\t%08x\t%04x\t%d\t%d\t%d\t%08x\t%d\t%u\t%u\n",
7033                         iface, &dest, &gw, &flags, &refcnt, &use, &metric,
7034                         &mask, &mtu, &window, &irtt);
7035         if (fields != 11) {
7036             continue;
7037         }
7038         dprintf(fd, "%s\t%08x\t%08x\t%04x\t%d\t%d\t%d\t%08x\t%d\t%u\t%u\n",
7039                 iface, tswap32(dest), tswap32(gw), flags, refcnt, use,
7040                 metric, tswap32(mask), mtu, window, irtt);
7041     }
7042 
7043     free(line);
7044     fclose(fp);
7045 
7046     return 0;
7047 }
7048 #endif
7049 
7050 #if defined(TARGET_SPARC)
7051 static int open_cpuinfo(void *cpu_env, int fd)
7052 {
7053     dprintf(fd, "type\t\t: sun4u\n");
7054     return 0;
7055 }
7056 #endif
7057 
7058 #if defined(TARGET_M68K)
7059 static int open_hardware(void *cpu_env, int fd)
7060 {
7061     dprintf(fd, "Model:\t\tqemu-m68k\n");
7062     return 0;
7063 }
7064 #endif
7065 
7066 static int do_openat(void *cpu_env, int dirfd, const char *pathname, int flags, mode_t mode)
7067 {
7068     struct fake_open {
7069         const char *filename;
7070         int (*fill)(void *cpu_env, int fd);
7071         int (*cmp)(const char *s1, const char *s2);
7072     };
7073     const struct fake_open *fake_open;
7074     static const struct fake_open fakes[] = {
7075         { "maps", open_self_maps, is_proc_myself },
7076         { "stat", open_self_stat, is_proc_myself },
7077         { "auxv", open_self_auxv, is_proc_myself },
7078         { "cmdline", open_self_cmdline, is_proc_myself },
7079 #if defined(HOST_WORDS_BIGENDIAN) != defined(TARGET_WORDS_BIGENDIAN)
7080         { "/proc/net/route", open_net_route, is_proc },
7081 #endif
7082 #if defined(TARGET_SPARC)
7083         { "/proc/cpuinfo", open_cpuinfo, is_proc },
7084 #endif
7085 #if defined(TARGET_M68K)
7086         { "/proc/hardware", open_hardware, is_proc },
7087 #endif
7088         { NULL, NULL, NULL }
7089     };
7090 
7091     if (is_proc_myself(pathname, "exe")) {
7092         int execfd = qemu_getauxval(AT_EXECFD);
7093         return execfd ? execfd : safe_openat(dirfd, exec_path, flags, mode);
7094     }
7095 
7096     for (fake_open = fakes; fake_open->filename; fake_open++) {
7097         if (fake_open->cmp(pathname, fake_open->filename)) {
7098             break;
7099         }
7100     }
7101 
7102     if (fake_open->filename) {
7103         const char *tmpdir;
7104         char filename[PATH_MAX];
7105         int fd, r;
7106 
7107         /* create temporary file to map stat to */
7108         tmpdir = getenv("TMPDIR");
7109         if (!tmpdir)
7110             tmpdir = "/tmp";
7111         snprintf(filename, sizeof(filename), "%s/qemu-open.XXXXXX", tmpdir);
7112         fd = mkstemp(filename);
7113         if (fd < 0) {
7114             return fd;
7115         }
7116         unlink(filename);
7117 
7118         if ((r = fake_open->fill(cpu_env, fd))) {
7119             int e = errno;
7120             close(fd);
7121             errno = e;
7122             return r;
7123         }
7124         lseek(fd, 0, SEEK_SET);
7125 
7126         return fd;
7127     }
7128 
7129     return safe_openat(dirfd, path(pathname), flags, mode);
7130 }
7131 
7132 #define TIMER_MAGIC 0x0caf0000
7133 #define TIMER_MAGIC_MASK 0xffff0000
7134 
7135 /* Convert QEMU provided timer ID back to internal 16bit index format */
7136 static target_timer_t get_timer_id(abi_long arg)
7137 {
7138     target_timer_t timerid = arg;
7139 
7140     if ((timerid & TIMER_MAGIC_MASK) != TIMER_MAGIC) {
7141         return -TARGET_EINVAL;
7142     }
7143 
7144     timerid &= 0xffff;
7145 
7146     if (timerid >= ARRAY_SIZE(g_posix_timers)) {
7147         return -TARGET_EINVAL;
7148     }
7149 
7150     return timerid;
7151 }
7152 
7153 static int target_to_host_cpu_mask(unsigned long *host_mask,
7154                                    size_t host_size,
7155                                    abi_ulong target_addr,
7156                                    size_t target_size)
7157 {
7158     unsigned target_bits = sizeof(abi_ulong) * 8;
7159     unsigned host_bits = sizeof(*host_mask) * 8;
7160     abi_ulong *target_mask;
7161     unsigned i, j;
7162 
7163     assert(host_size >= target_size);
7164 
7165     target_mask = lock_user(VERIFY_READ, target_addr, target_size, 1);
7166     if (!target_mask) {
7167         return -TARGET_EFAULT;
7168     }
7169     memset(host_mask, 0, host_size);
7170 
7171     for (i = 0 ; i < target_size / sizeof(abi_ulong); i++) {
7172         unsigned bit = i * target_bits;
7173         abi_ulong val;
7174 
7175         __get_user(val, &target_mask[i]);
7176         for (j = 0; j < target_bits; j++, bit++) {
7177             if (val & (1UL << j)) {
7178                 host_mask[bit / host_bits] |= 1UL << (bit % host_bits);
7179             }
7180         }
7181     }
7182 
7183     unlock_user(target_mask, target_addr, 0);
7184     return 0;
7185 }
7186 
7187 static int host_to_target_cpu_mask(const unsigned long *host_mask,
7188                                    size_t host_size,
7189                                    abi_ulong target_addr,
7190                                    size_t target_size)
7191 {
7192     unsigned target_bits = sizeof(abi_ulong) * 8;
7193     unsigned host_bits = sizeof(*host_mask) * 8;
7194     abi_ulong *target_mask;
7195     unsigned i, j;
7196 
7197     assert(host_size >= target_size);
7198 
7199     target_mask = lock_user(VERIFY_WRITE, target_addr, target_size, 0);
7200     if (!target_mask) {
7201         return -TARGET_EFAULT;
7202     }
7203 
7204     for (i = 0 ; i < target_size / sizeof(abi_ulong); i++) {
7205         unsigned bit = i * target_bits;
7206         abi_ulong val = 0;
7207 
7208         for (j = 0; j < target_bits; j++, bit++) {
7209             if (host_mask[bit / host_bits] & (1UL << (bit % host_bits))) {
7210                 val |= 1UL << j;
7211             }
7212         }
7213         __put_user(val, &target_mask[i]);
7214     }
7215 
7216     unlock_user(target_mask, target_addr, target_size);
7217     return 0;
7218 }
7219 
7220 /* This is an internal helper for do_syscall so that it is easier
7221  * to have a single return point, so that actions, such as logging
7222  * of syscall results, can be performed.
7223  * All errnos that do_syscall() returns must be -TARGET_<errcode>.
7224  */
7225 static abi_long do_syscall1(void *cpu_env, int num, abi_long arg1,
7226                             abi_long arg2, abi_long arg3, abi_long arg4,
7227                             abi_long arg5, abi_long arg6, abi_long arg7,
7228                             abi_long arg8)
7229 {
7230     CPUState *cpu = env_cpu(cpu_env);
7231     abi_long ret;
7232 #if defined(TARGET_NR_stat) || defined(TARGET_NR_stat64) \
7233     || defined(TARGET_NR_lstat) || defined(TARGET_NR_lstat64) \
7234     || defined(TARGET_NR_fstat) || defined(TARGET_NR_fstat64) \
7235     || defined(TARGET_NR_statx)
7236     struct stat st;
7237 #endif
7238 #if defined(TARGET_NR_statfs) || defined(TARGET_NR_statfs64) \
7239     || defined(TARGET_NR_fstatfs)
7240     struct statfs stfs;
7241 #endif
7242     void *p;
7243 
7244     switch(num) {
7245     case TARGET_NR_exit:
7246         /* In old applications this may be used to implement _exit(2).
7247            However in threaded applictions it is used for thread termination,
7248            and _exit_group is used for application termination.
7249            Do thread termination if we have more then one thread.  */
7250 
7251         if (block_signals()) {
7252             return -TARGET_ERESTARTSYS;
7253         }
7254 
7255         cpu_list_lock();
7256 
7257         if (CPU_NEXT(first_cpu)) {
7258             TaskState *ts;
7259 
7260             /* Remove the CPU from the list.  */
7261             QTAILQ_REMOVE_RCU(&cpus, cpu, node);
7262 
7263             cpu_list_unlock();
7264 
7265             ts = cpu->opaque;
7266             if (ts->child_tidptr) {
7267                 put_user_u32(0, ts->child_tidptr);
7268                 sys_futex(g2h(ts->child_tidptr), FUTEX_WAKE, INT_MAX,
7269                           NULL, NULL, 0);
7270             }
7271             thread_cpu = NULL;
7272             object_unref(OBJECT(cpu));
7273             g_free(ts);
7274             rcu_unregister_thread();
7275             pthread_exit(NULL);
7276         }
7277 
7278         cpu_list_unlock();
7279         preexit_cleanup(cpu_env, arg1);
7280         _exit(arg1);
7281         return 0; /* avoid warning */
7282     case TARGET_NR_read:
7283         if (arg2 == 0 && arg3 == 0) {
7284             return get_errno(safe_read(arg1, 0, 0));
7285         } else {
7286             if (!(p = lock_user(VERIFY_WRITE, arg2, arg3, 0)))
7287                 return -TARGET_EFAULT;
7288             ret = get_errno(safe_read(arg1, p, arg3));
7289             if (ret >= 0 &&
7290                 fd_trans_host_to_target_data(arg1)) {
7291                 ret = fd_trans_host_to_target_data(arg1)(p, ret);
7292             }
7293             unlock_user(p, arg2, ret);
7294         }
7295         return ret;
7296     case TARGET_NR_write:
7297         if (arg2 == 0 && arg3 == 0) {
7298             return get_errno(safe_write(arg1, 0, 0));
7299         }
7300         if (!(p = lock_user(VERIFY_READ, arg2, arg3, 1)))
7301             return -TARGET_EFAULT;
7302         if (fd_trans_target_to_host_data(arg1)) {
7303             void *copy = g_malloc(arg3);
7304             memcpy(copy, p, arg3);
7305             ret = fd_trans_target_to_host_data(arg1)(copy, arg3);
7306             if (ret >= 0) {
7307                 ret = get_errno(safe_write(arg1, copy, ret));
7308             }
7309             g_free(copy);
7310         } else {
7311             ret = get_errno(safe_write(arg1, p, arg3));
7312         }
7313         unlock_user(p, arg2, 0);
7314         return ret;
7315 
7316 #ifdef TARGET_NR_open
7317     case TARGET_NR_open:
7318         if (!(p = lock_user_string(arg1)))
7319             return -TARGET_EFAULT;
7320         ret = get_errno(do_openat(cpu_env, AT_FDCWD, p,
7321                                   target_to_host_bitmask(arg2, fcntl_flags_tbl),
7322                                   arg3));
7323         fd_trans_unregister(ret);
7324         unlock_user(p, arg1, 0);
7325         return ret;
7326 #endif
7327     case TARGET_NR_openat:
7328         if (!(p = lock_user_string(arg2)))
7329             return -TARGET_EFAULT;
7330         ret = get_errno(do_openat(cpu_env, arg1, p,
7331                                   target_to_host_bitmask(arg3, fcntl_flags_tbl),
7332                                   arg4));
7333         fd_trans_unregister(ret);
7334         unlock_user(p, arg2, 0);
7335         return ret;
7336 #if defined(TARGET_NR_name_to_handle_at) && defined(CONFIG_OPEN_BY_HANDLE)
7337     case TARGET_NR_name_to_handle_at:
7338         ret = do_name_to_handle_at(arg1, arg2, arg3, arg4, arg5);
7339         return ret;
7340 #endif
7341 #if defined(TARGET_NR_open_by_handle_at) && defined(CONFIG_OPEN_BY_HANDLE)
7342     case TARGET_NR_open_by_handle_at:
7343         ret = do_open_by_handle_at(arg1, arg2, arg3);
7344         fd_trans_unregister(ret);
7345         return ret;
7346 #endif
7347     case TARGET_NR_close:
7348         fd_trans_unregister(arg1);
7349         return get_errno(close(arg1));
7350 
7351     case TARGET_NR_brk:
7352         return do_brk(arg1);
7353 #ifdef TARGET_NR_fork
7354     case TARGET_NR_fork:
7355         return get_errno(do_fork(cpu_env, TARGET_SIGCHLD, 0, 0, 0, 0));
7356 #endif
7357 #ifdef TARGET_NR_waitpid
7358     case TARGET_NR_waitpid:
7359         {
7360             int status;
7361             ret = get_errno(safe_wait4(arg1, &status, arg3, 0));
7362             if (!is_error(ret) && arg2 && ret
7363                 && put_user_s32(host_to_target_waitstatus(status), arg2))
7364                 return -TARGET_EFAULT;
7365         }
7366         return ret;
7367 #endif
7368 #ifdef TARGET_NR_waitid
7369     case TARGET_NR_waitid:
7370         {
7371             siginfo_t info;
7372             info.si_pid = 0;
7373             ret = get_errno(safe_waitid(arg1, arg2, &info, arg4, NULL));
7374             if (!is_error(ret) && arg3 && info.si_pid != 0) {
7375                 if (!(p = lock_user(VERIFY_WRITE, arg3, sizeof(target_siginfo_t), 0)))
7376                     return -TARGET_EFAULT;
7377                 host_to_target_siginfo(p, &info);
7378                 unlock_user(p, arg3, sizeof(target_siginfo_t));
7379             }
7380         }
7381         return ret;
7382 #endif
7383 #ifdef TARGET_NR_creat /* not on alpha */
7384     case TARGET_NR_creat:
7385         if (!(p = lock_user_string(arg1)))
7386             return -TARGET_EFAULT;
7387         ret = get_errno(creat(p, arg2));
7388         fd_trans_unregister(ret);
7389         unlock_user(p, arg1, 0);
7390         return ret;
7391 #endif
7392 #ifdef TARGET_NR_link
7393     case TARGET_NR_link:
7394         {
7395             void * p2;
7396             p = lock_user_string(arg1);
7397             p2 = lock_user_string(arg2);
7398             if (!p || !p2)
7399                 ret = -TARGET_EFAULT;
7400             else
7401                 ret = get_errno(link(p, p2));
7402             unlock_user(p2, arg2, 0);
7403             unlock_user(p, arg1, 0);
7404         }
7405         return ret;
7406 #endif
7407 #if defined(TARGET_NR_linkat)
7408     case TARGET_NR_linkat:
7409         {
7410             void * p2 = NULL;
7411             if (!arg2 || !arg4)
7412                 return -TARGET_EFAULT;
7413             p  = lock_user_string(arg2);
7414             p2 = lock_user_string(arg4);
7415             if (!p || !p2)
7416                 ret = -TARGET_EFAULT;
7417             else
7418                 ret = get_errno(linkat(arg1, p, arg3, p2, arg5));
7419             unlock_user(p, arg2, 0);
7420             unlock_user(p2, arg4, 0);
7421         }
7422         return ret;
7423 #endif
7424 #ifdef TARGET_NR_unlink
7425     case TARGET_NR_unlink:
7426         if (!(p = lock_user_string(arg1)))
7427             return -TARGET_EFAULT;
7428         ret = get_errno(unlink(p));
7429         unlock_user(p, arg1, 0);
7430         return ret;
7431 #endif
7432 #if defined(TARGET_NR_unlinkat)
7433     case TARGET_NR_unlinkat:
7434         if (!(p = lock_user_string(arg2)))
7435             return -TARGET_EFAULT;
7436         ret = get_errno(unlinkat(arg1, p, arg3));
7437         unlock_user(p, arg2, 0);
7438         return ret;
7439 #endif
7440     case TARGET_NR_execve:
7441         {
7442             char **argp, **envp;
7443             int argc, envc;
7444             abi_ulong gp;
7445             abi_ulong guest_argp;
7446             abi_ulong guest_envp;
7447             abi_ulong addr;
7448             char **q;
7449             int total_size = 0;
7450 
7451             argc = 0;
7452             guest_argp = arg2;
7453             for (gp = guest_argp; gp; gp += sizeof(abi_ulong)) {
7454                 if (get_user_ual(addr, gp))
7455                     return -TARGET_EFAULT;
7456                 if (!addr)
7457                     break;
7458                 argc++;
7459             }
7460             envc = 0;
7461             guest_envp = arg3;
7462             for (gp = guest_envp; gp; gp += sizeof(abi_ulong)) {
7463                 if (get_user_ual(addr, gp))
7464                     return -TARGET_EFAULT;
7465                 if (!addr)
7466                     break;
7467                 envc++;
7468             }
7469 
7470             argp = g_new0(char *, argc + 1);
7471             envp = g_new0(char *, envc + 1);
7472 
7473             for (gp = guest_argp, q = argp; gp;
7474                   gp += sizeof(abi_ulong), q++) {
7475                 if (get_user_ual(addr, gp))
7476                     goto execve_efault;
7477                 if (!addr)
7478                     break;
7479                 if (!(*q = lock_user_string(addr)))
7480                     goto execve_efault;
7481                 total_size += strlen(*q) + 1;
7482             }
7483             *q = NULL;
7484 
7485             for (gp = guest_envp, q = envp; gp;
7486                   gp += sizeof(abi_ulong), q++) {
7487                 if (get_user_ual(addr, gp))
7488                     goto execve_efault;
7489                 if (!addr)
7490                     break;
7491                 if (!(*q = lock_user_string(addr)))
7492                     goto execve_efault;
7493                 total_size += strlen(*q) + 1;
7494             }
7495             *q = NULL;
7496 
7497             if (!(p = lock_user_string(arg1)))
7498                 goto execve_efault;
7499             /* Although execve() is not an interruptible syscall it is
7500              * a special case where we must use the safe_syscall wrapper:
7501              * if we allow a signal to happen before we make the host
7502              * syscall then we will 'lose' it, because at the point of
7503              * execve the process leaves QEMU's control. So we use the
7504              * safe syscall wrapper to ensure that we either take the
7505              * signal as a guest signal, or else it does not happen
7506              * before the execve completes and makes it the other
7507              * program's problem.
7508              */
7509             ret = get_errno(safe_execve(p, argp, envp));
7510             unlock_user(p, arg1, 0);
7511 
7512             goto execve_end;
7513 
7514         execve_efault:
7515             ret = -TARGET_EFAULT;
7516 
7517         execve_end:
7518             for (gp = guest_argp, q = argp; *q;
7519                   gp += sizeof(abi_ulong), q++) {
7520                 if (get_user_ual(addr, gp)
7521                     || !addr)
7522                     break;
7523                 unlock_user(*q, addr, 0);
7524             }
7525             for (gp = guest_envp, q = envp; *q;
7526                   gp += sizeof(abi_ulong), q++) {
7527                 if (get_user_ual(addr, gp)
7528                     || !addr)
7529                     break;
7530                 unlock_user(*q, addr, 0);
7531             }
7532 
7533             g_free(argp);
7534             g_free(envp);
7535         }
7536         return ret;
7537     case TARGET_NR_chdir:
7538         if (!(p = lock_user_string(arg1)))
7539             return -TARGET_EFAULT;
7540         ret = get_errno(chdir(p));
7541         unlock_user(p, arg1, 0);
7542         return ret;
7543 #ifdef TARGET_NR_time
7544     case TARGET_NR_time:
7545         {
7546             time_t host_time;
7547             ret = get_errno(time(&host_time));
7548             if (!is_error(ret)
7549                 && arg1
7550                 && put_user_sal(host_time, arg1))
7551                 return -TARGET_EFAULT;
7552         }
7553         return ret;
7554 #endif
7555 #ifdef TARGET_NR_mknod
7556     case TARGET_NR_mknod:
7557         if (!(p = lock_user_string(arg1)))
7558             return -TARGET_EFAULT;
7559         ret = get_errno(mknod(p, arg2, arg3));
7560         unlock_user(p, arg1, 0);
7561         return ret;
7562 #endif
7563 #if defined(TARGET_NR_mknodat)
7564     case TARGET_NR_mknodat:
7565         if (!(p = lock_user_string(arg2)))
7566             return -TARGET_EFAULT;
7567         ret = get_errno(mknodat(arg1, p, arg3, arg4));
7568         unlock_user(p, arg2, 0);
7569         return ret;
7570 #endif
7571 #ifdef TARGET_NR_chmod
7572     case TARGET_NR_chmod:
7573         if (!(p = lock_user_string(arg1)))
7574             return -TARGET_EFAULT;
7575         ret = get_errno(chmod(p, arg2));
7576         unlock_user(p, arg1, 0);
7577         return ret;
7578 #endif
7579 #ifdef TARGET_NR_lseek
7580     case TARGET_NR_lseek:
7581         return get_errno(lseek(arg1, arg2, arg3));
7582 #endif
7583 #if defined(TARGET_NR_getxpid) && defined(TARGET_ALPHA)
7584     /* Alpha specific */
7585     case TARGET_NR_getxpid:
7586         ((CPUAlphaState *)cpu_env)->ir[IR_A4] = getppid();
7587         return get_errno(getpid());
7588 #endif
7589 #ifdef TARGET_NR_getpid
7590     case TARGET_NR_getpid:
7591         return get_errno(getpid());
7592 #endif
7593     case TARGET_NR_mount:
7594         {
7595             /* need to look at the data field */
7596             void *p2, *p3;
7597 
7598             if (arg1) {
7599                 p = lock_user_string(arg1);
7600                 if (!p) {
7601                     return -TARGET_EFAULT;
7602                 }
7603             } else {
7604                 p = NULL;
7605             }
7606 
7607             p2 = lock_user_string(arg2);
7608             if (!p2) {
7609                 if (arg1) {
7610                     unlock_user(p, arg1, 0);
7611                 }
7612                 return -TARGET_EFAULT;
7613             }
7614 
7615             if (arg3) {
7616                 p3 = lock_user_string(arg3);
7617                 if (!p3) {
7618                     if (arg1) {
7619                         unlock_user(p, arg1, 0);
7620                     }
7621                     unlock_user(p2, arg2, 0);
7622                     return -TARGET_EFAULT;
7623                 }
7624             } else {
7625                 p3 = NULL;
7626             }
7627 
7628             /* FIXME - arg5 should be locked, but it isn't clear how to
7629              * do that since it's not guaranteed to be a NULL-terminated
7630              * string.
7631              */
7632             if (!arg5) {
7633                 ret = mount(p, p2, p3, (unsigned long)arg4, NULL);
7634             } else {
7635                 ret = mount(p, p2, p3, (unsigned long)arg4, g2h(arg5));
7636             }
7637             ret = get_errno(ret);
7638 
7639             if (arg1) {
7640                 unlock_user(p, arg1, 0);
7641             }
7642             unlock_user(p2, arg2, 0);
7643             if (arg3) {
7644                 unlock_user(p3, arg3, 0);
7645             }
7646         }
7647         return ret;
7648 #ifdef TARGET_NR_umount
7649     case TARGET_NR_umount:
7650         if (!(p = lock_user_string(arg1)))
7651             return -TARGET_EFAULT;
7652         ret = get_errno(umount(p));
7653         unlock_user(p, arg1, 0);
7654         return ret;
7655 #endif
7656 #ifdef TARGET_NR_stime /* not on alpha */
7657     case TARGET_NR_stime:
7658         {
7659             time_t host_time;
7660             if (get_user_sal(host_time, arg1))
7661                 return -TARGET_EFAULT;
7662             return get_errno(stime(&host_time));
7663         }
7664 #endif
7665 #ifdef TARGET_NR_alarm /* not on alpha */
7666     case TARGET_NR_alarm:
7667         return alarm(arg1);
7668 #endif
7669 #ifdef TARGET_NR_pause /* not on alpha */
7670     case TARGET_NR_pause:
7671         if (!block_signals()) {
7672             sigsuspend(&((TaskState *)cpu->opaque)->signal_mask);
7673         }
7674         return -TARGET_EINTR;
7675 #endif
7676 #ifdef TARGET_NR_utime
7677     case TARGET_NR_utime:
7678         {
7679             struct utimbuf tbuf, *host_tbuf;
7680             struct target_utimbuf *target_tbuf;
7681             if (arg2) {
7682                 if (!lock_user_struct(VERIFY_READ, target_tbuf, arg2, 1))
7683                     return -TARGET_EFAULT;
7684                 tbuf.actime = tswapal(target_tbuf->actime);
7685                 tbuf.modtime = tswapal(target_tbuf->modtime);
7686                 unlock_user_struct(target_tbuf, arg2, 0);
7687                 host_tbuf = &tbuf;
7688             } else {
7689                 host_tbuf = NULL;
7690             }
7691             if (!(p = lock_user_string(arg1)))
7692                 return -TARGET_EFAULT;
7693             ret = get_errno(utime(p, host_tbuf));
7694             unlock_user(p, arg1, 0);
7695         }
7696         return ret;
7697 #endif
7698 #ifdef TARGET_NR_utimes
7699     case TARGET_NR_utimes:
7700         {
7701             struct timeval *tvp, tv[2];
7702             if (arg2) {
7703                 if (copy_from_user_timeval(&tv[0], arg2)
7704                     || copy_from_user_timeval(&tv[1],
7705                                               arg2 + sizeof(struct target_timeval)))
7706                     return -TARGET_EFAULT;
7707                 tvp = tv;
7708             } else {
7709                 tvp = NULL;
7710             }
7711             if (!(p = lock_user_string(arg1)))
7712                 return -TARGET_EFAULT;
7713             ret = get_errno(utimes(p, tvp));
7714             unlock_user(p, arg1, 0);
7715         }
7716         return ret;
7717 #endif
7718 #if defined(TARGET_NR_futimesat)
7719     case TARGET_NR_futimesat:
7720         {
7721             struct timeval *tvp, tv[2];
7722             if (arg3) {
7723                 if (copy_from_user_timeval(&tv[0], arg3)
7724                     || copy_from_user_timeval(&tv[1],
7725                                               arg3 + sizeof(struct target_timeval)))
7726                     return -TARGET_EFAULT;
7727                 tvp = tv;
7728             } else {
7729                 tvp = NULL;
7730             }
7731             if (!(p = lock_user_string(arg2))) {
7732                 return -TARGET_EFAULT;
7733             }
7734             ret = get_errno(futimesat(arg1, path(p), tvp));
7735             unlock_user(p, arg2, 0);
7736         }
7737         return ret;
7738 #endif
7739 #ifdef TARGET_NR_access
7740     case TARGET_NR_access:
7741         if (!(p = lock_user_string(arg1))) {
7742             return -TARGET_EFAULT;
7743         }
7744         ret = get_errno(access(path(p), arg2));
7745         unlock_user(p, arg1, 0);
7746         return ret;
7747 #endif
7748 #if defined(TARGET_NR_faccessat) && defined(__NR_faccessat)
7749     case TARGET_NR_faccessat:
7750         if (!(p = lock_user_string(arg2))) {
7751             return -TARGET_EFAULT;
7752         }
7753         ret = get_errno(faccessat(arg1, p, arg3, 0));
7754         unlock_user(p, arg2, 0);
7755         return ret;
7756 #endif
7757 #ifdef TARGET_NR_nice /* not on alpha */
7758     case TARGET_NR_nice:
7759         return get_errno(nice(arg1));
7760 #endif
7761     case TARGET_NR_sync:
7762         sync();
7763         return 0;
7764 #if defined(TARGET_NR_syncfs) && defined(CONFIG_SYNCFS)
7765     case TARGET_NR_syncfs:
7766         return get_errno(syncfs(arg1));
7767 #endif
7768     case TARGET_NR_kill:
7769         return get_errno(safe_kill(arg1, target_to_host_signal(arg2)));
7770 #ifdef TARGET_NR_rename
7771     case TARGET_NR_rename:
7772         {
7773             void *p2;
7774             p = lock_user_string(arg1);
7775             p2 = lock_user_string(arg2);
7776             if (!p || !p2)
7777                 ret = -TARGET_EFAULT;
7778             else
7779                 ret = get_errno(rename(p, p2));
7780             unlock_user(p2, arg2, 0);
7781             unlock_user(p, arg1, 0);
7782         }
7783         return ret;
7784 #endif
7785 #if defined(TARGET_NR_renameat)
7786     case TARGET_NR_renameat:
7787         {
7788             void *p2;
7789             p  = lock_user_string(arg2);
7790             p2 = lock_user_string(arg4);
7791             if (!p || !p2)
7792                 ret = -TARGET_EFAULT;
7793             else
7794                 ret = get_errno(renameat(arg1, p, arg3, p2));
7795             unlock_user(p2, arg4, 0);
7796             unlock_user(p, arg2, 0);
7797         }
7798         return ret;
7799 #endif
7800 #if defined(TARGET_NR_renameat2)
7801     case TARGET_NR_renameat2:
7802         {
7803             void *p2;
7804             p  = lock_user_string(arg2);
7805             p2 = lock_user_string(arg4);
7806             if (!p || !p2) {
7807                 ret = -TARGET_EFAULT;
7808             } else {
7809                 ret = get_errno(sys_renameat2(arg1, p, arg3, p2, arg5));
7810             }
7811             unlock_user(p2, arg4, 0);
7812             unlock_user(p, arg2, 0);
7813         }
7814         return ret;
7815 #endif
7816 #ifdef TARGET_NR_mkdir
7817     case TARGET_NR_mkdir:
7818         if (!(p = lock_user_string(arg1)))
7819             return -TARGET_EFAULT;
7820         ret = get_errno(mkdir(p, arg2));
7821         unlock_user(p, arg1, 0);
7822         return ret;
7823 #endif
7824 #if defined(TARGET_NR_mkdirat)
7825     case TARGET_NR_mkdirat:
7826         if (!(p = lock_user_string(arg2)))
7827             return -TARGET_EFAULT;
7828         ret = get_errno(mkdirat(arg1, p, arg3));
7829         unlock_user(p, arg2, 0);
7830         return ret;
7831 #endif
7832 #ifdef TARGET_NR_rmdir
7833     case TARGET_NR_rmdir:
7834         if (!(p = lock_user_string(arg1)))
7835             return -TARGET_EFAULT;
7836         ret = get_errno(rmdir(p));
7837         unlock_user(p, arg1, 0);
7838         return ret;
7839 #endif
7840     case TARGET_NR_dup:
7841         ret = get_errno(dup(arg1));
7842         if (ret >= 0) {
7843             fd_trans_dup(arg1, ret);
7844         }
7845         return ret;
7846 #ifdef TARGET_NR_pipe
7847     case TARGET_NR_pipe:
7848         return do_pipe(cpu_env, arg1, 0, 0);
7849 #endif
7850 #ifdef TARGET_NR_pipe2
7851     case TARGET_NR_pipe2:
7852         return do_pipe(cpu_env, arg1,
7853                        target_to_host_bitmask(arg2, fcntl_flags_tbl), 1);
7854 #endif
7855     case TARGET_NR_times:
7856         {
7857             struct target_tms *tmsp;
7858             struct tms tms;
7859             ret = get_errno(times(&tms));
7860             if (arg1) {
7861                 tmsp = lock_user(VERIFY_WRITE, arg1, sizeof(struct target_tms), 0);
7862                 if (!tmsp)
7863                     return -TARGET_EFAULT;
7864                 tmsp->tms_utime = tswapal(host_to_target_clock_t(tms.tms_utime));
7865                 tmsp->tms_stime = tswapal(host_to_target_clock_t(tms.tms_stime));
7866                 tmsp->tms_cutime = tswapal(host_to_target_clock_t(tms.tms_cutime));
7867                 tmsp->tms_cstime = tswapal(host_to_target_clock_t(tms.tms_cstime));
7868             }
7869             if (!is_error(ret))
7870                 ret = host_to_target_clock_t(ret);
7871         }
7872         return ret;
7873     case TARGET_NR_acct:
7874         if (arg1 == 0) {
7875             ret = get_errno(acct(NULL));
7876         } else {
7877             if (!(p = lock_user_string(arg1))) {
7878                 return -TARGET_EFAULT;
7879             }
7880             ret = get_errno(acct(path(p)));
7881             unlock_user(p, arg1, 0);
7882         }
7883         return ret;
7884 #ifdef TARGET_NR_umount2
7885     case TARGET_NR_umount2:
7886         if (!(p = lock_user_string(arg1)))
7887             return -TARGET_EFAULT;
7888         ret = get_errno(umount2(p, arg2));
7889         unlock_user(p, arg1, 0);
7890         return ret;
7891 #endif
7892     case TARGET_NR_ioctl:
7893         return do_ioctl(arg1, arg2, arg3);
7894 #ifdef TARGET_NR_fcntl
7895     case TARGET_NR_fcntl:
7896         return do_fcntl(arg1, arg2, arg3);
7897 #endif
7898     case TARGET_NR_setpgid:
7899         return get_errno(setpgid(arg1, arg2));
7900     case TARGET_NR_umask:
7901         return get_errno(umask(arg1));
7902     case TARGET_NR_chroot:
7903         if (!(p = lock_user_string(arg1)))
7904             return -TARGET_EFAULT;
7905         ret = get_errno(chroot(p));
7906         unlock_user(p, arg1, 0);
7907         return ret;
7908 #ifdef TARGET_NR_dup2
7909     case TARGET_NR_dup2:
7910         ret = get_errno(dup2(arg1, arg2));
7911         if (ret >= 0) {
7912             fd_trans_dup(arg1, arg2);
7913         }
7914         return ret;
7915 #endif
7916 #if defined(CONFIG_DUP3) && defined(TARGET_NR_dup3)
7917     case TARGET_NR_dup3:
7918     {
7919         int host_flags;
7920 
7921         if ((arg3 & ~TARGET_O_CLOEXEC) != 0) {
7922             return -EINVAL;
7923         }
7924         host_flags = target_to_host_bitmask(arg3, fcntl_flags_tbl);
7925         ret = get_errno(dup3(arg1, arg2, host_flags));
7926         if (ret >= 0) {
7927             fd_trans_dup(arg1, arg2);
7928         }
7929         return ret;
7930     }
7931 #endif
7932 #ifdef TARGET_NR_getppid /* not on alpha */
7933     case TARGET_NR_getppid:
7934         return get_errno(getppid());
7935 #endif
7936 #ifdef TARGET_NR_getpgrp
7937     case TARGET_NR_getpgrp:
7938         return get_errno(getpgrp());
7939 #endif
7940     case TARGET_NR_setsid:
7941         return get_errno(setsid());
7942 #ifdef TARGET_NR_sigaction
7943     case TARGET_NR_sigaction:
7944         {
7945 #if defined(TARGET_ALPHA)
7946             struct target_sigaction act, oact, *pact = 0;
7947             struct target_old_sigaction *old_act;
7948             if (arg2) {
7949                 if (!lock_user_struct(VERIFY_READ, old_act, arg2, 1))
7950                     return -TARGET_EFAULT;
7951                 act._sa_handler = old_act->_sa_handler;
7952                 target_siginitset(&act.sa_mask, old_act->sa_mask);
7953                 act.sa_flags = old_act->sa_flags;
7954                 act.sa_restorer = 0;
7955                 unlock_user_struct(old_act, arg2, 0);
7956                 pact = &act;
7957             }
7958             ret = get_errno(do_sigaction(arg1, pact, &oact));
7959             if (!is_error(ret) && arg3) {
7960                 if (!lock_user_struct(VERIFY_WRITE, old_act, arg3, 0))
7961                     return -TARGET_EFAULT;
7962                 old_act->_sa_handler = oact._sa_handler;
7963                 old_act->sa_mask = oact.sa_mask.sig[0];
7964                 old_act->sa_flags = oact.sa_flags;
7965                 unlock_user_struct(old_act, arg3, 1);
7966             }
7967 #elif defined(TARGET_MIPS)
7968 	    struct target_sigaction act, oact, *pact, *old_act;
7969 
7970 	    if (arg2) {
7971                 if (!lock_user_struct(VERIFY_READ, old_act, arg2, 1))
7972                     return -TARGET_EFAULT;
7973 		act._sa_handler = old_act->_sa_handler;
7974 		target_siginitset(&act.sa_mask, old_act->sa_mask.sig[0]);
7975 		act.sa_flags = old_act->sa_flags;
7976 		unlock_user_struct(old_act, arg2, 0);
7977 		pact = &act;
7978 	    } else {
7979 		pact = NULL;
7980 	    }
7981 
7982 	    ret = get_errno(do_sigaction(arg1, pact, &oact));
7983 
7984 	    if (!is_error(ret) && arg3) {
7985                 if (!lock_user_struct(VERIFY_WRITE, old_act, arg3, 0))
7986                     return -TARGET_EFAULT;
7987 		old_act->_sa_handler = oact._sa_handler;
7988 		old_act->sa_flags = oact.sa_flags;
7989 		old_act->sa_mask.sig[0] = oact.sa_mask.sig[0];
7990 		old_act->sa_mask.sig[1] = 0;
7991 		old_act->sa_mask.sig[2] = 0;
7992 		old_act->sa_mask.sig[3] = 0;
7993 		unlock_user_struct(old_act, arg3, 1);
7994 	    }
7995 #else
7996             struct target_old_sigaction *old_act;
7997             struct target_sigaction act, oact, *pact;
7998             if (arg2) {
7999                 if (!lock_user_struct(VERIFY_READ, old_act, arg2, 1))
8000                     return -TARGET_EFAULT;
8001                 act._sa_handler = old_act->_sa_handler;
8002                 target_siginitset(&act.sa_mask, old_act->sa_mask);
8003                 act.sa_flags = old_act->sa_flags;
8004                 act.sa_restorer = old_act->sa_restorer;
8005 #ifdef TARGET_ARCH_HAS_KA_RESTORER
8006                 act.ka_restorer = 0;
8007 #endif
8008                 unlock_user_struct(old_act, arg2, 0);
8009                 pact = &act;
8010             } else {
8011                 pact = NULL;
8012             }
8013             ret = get_errno(do_sigaction(arg1, pact, &oact));
8014             if (!is_error(ret) && arg3) {
8015                 if (!lock_user_struct(VERIFY_WRITE, old_act, arg3, 0))
8016                     return -TARGET_EFAULT;
8017                 old_act->_sa_handler = oact._sa_handler;
8018                 old_act->sa_mask = oact.sa_mask.sig[0];
8019                 old_act->sa_flags = oact.sa_flags;
8020                 old_act->sa_restorer = oact.sa_restorer;
8021                 unlock_user_struct(old_act, arg3, 1);
8022             }
8023 #endif
8024         }
8025         return ret;
8026 #endif
8027     case TARGET_NR_rt_sigaction:
8028         {
8029 #if defined(TARGET_ALPHA)
8030             /* For Alpha and SPARC this is a 5 argument syscall, with
8031              * a 'restorer' parameter which must be copied into the
8032              * sa_restorer field of the sigaction struct.
8033              * For Alpha that 'restorer' is arg5; for SPARC it is arg4,
8034              * and arg5 is the sigsetsize.
8035              * Alpha also has a separate rt_sigaction struct that it uses
8036              * here; SPARC uses the usual sigaction struct.
8037              */
8038             struct target_rt_sigaction *rt_act;
8039             struct target_sigaction act, oact, *pact = 0;
8040 
8041             if (arg4 != sizeof(target_sigset_t)) {
8042                 return -TARGET_EINVAL;
8043             }
8044             if (arg2) {
8045                 if (!lock_user_struct(VERIFY_READ, rt_act, arg2, 1))
8046                     return -TARGET_EFAULT;
8047                 act._sa_handler = rt_act->_sa_handler;
8048                 act.sa_mask = rt_act->sa_mask;
8049                 act.sa_flags = rt_act->sa_flags;
8050                 act.sa_restorer = arg5;
8051                 unlock_user_struct(rt_act, arg2, 0);
8052                 pact = &act;
8053             }
8054             ret = get_errno(do_sigaction(arg1, pact, &oact));
8055             if (!is_error(ret) && arg3) {
8056                 if (!lock_user_struct(VERIFY_WRITE, rt_act, arg3, 0))
8057                     return -TARGET_EFAULT;
8058                 rt_act->_sa_handler = oact._sa_handler;
8059                 rt_act->sa_mask = oact.sa_mask;
8060                 rt_act->sa_flags = oact.sa_flags;
8061                 unlock_user_struct(rt_act, arg3, 1);
8062             }
8063 #else
8064 #ifdef TARGET_SPARC
8065             target_ulong restorer = arg4;
8066             target_ulong sigsetsize = arg5;
8067 #else
8068             target_ulong sigsetsize = arg4;
8069 #endif
8070             struct target_sigaction *act;
8071             struct target_sigaction *oact;
8072 
8073             if (sigsetsize != sizeof(target_sigset_t)) {
8074                 return -TARGET_EINVAL;
8075             }
8076             if (arg2) {
8077                 if (!lock_user_struct(VERIFY_READ, act, arg2, 1)) {
8078                     return -TARGET_EFAULT;
8079                 }
8080 #ifdef TARGET_ARCH_HAS_KA_RESTORER
8081                 act->ka_restorer = restorer;
8082 #endif
8083             } else {
8084                 act = NULL;
8085             }
8086             if (arg3) {
8087                 if (!lock_user_struct(VERIFY_WRITE, oact, arg3, 0)) {
8088                     ret = -TARGET_EFAULT;
8089                     goto rt_sigaction_fail;
8090                 }
8091             } else
8092                 oact = NULL;
8093             ret = get_errno(do_sigaction(arg1, act, oact));
8094 	rt_sigaction_fail:
8095             if (act)
8096                 unlock_user_struct(act, arg2, 0);
8097             if (oact)
8098                 unlock_user_struct(oact, arg3, 1);
8099 #endif
8100         }
8101         return ret;
8102 #ifdef TARGET_NR_sgetmask /* not on alpha */
8103     case TARGET_NR_sgetmask:
8104         {
8105             sigset_t cur_set;
8106             abi_ulong target_set;
8107             ret = do_sigprocmask(0, NULL, &cur_set);
8108             if (!ret) {
8109                 host_to_target_old_sigset(&target_set, &cur_set);
8110                 ret = target_set;
8111             }
8112         }
8113         return ret;
8114 #endif
8115 #ifdef TARGET_NR_ssetmask /* not on alpha */
8116     case TARGET_NR_ssetmask:
8117         {
8118             sigset_t set, oset;
8119             abi_ulong target_set = arg1;
8120             target_to_host_old_sigset(&set, &target_set);
8121             ret = do_sigprocmask(SIG_SETMASK, &set, &oset);
8122             if (!ret) {
8123                 host_to_target_old_sigset(&target_set, &oset);
8124                 ret = target_set;
8125             }
8126         }
8127         return ret;
8128 #endif
8129 #ifdef TARGET_NR_sigprocmask
8130     case TARGET_NR_sigprocmask:
8131         {
8132 #if defined(TARGET_ALPHA)
8133             sigset_t set, oldset;
8134             abi_ulong mask;
8135             int how;
8136 
8137             switch (arg1) {
8138             case TARGET_SIG_BLOCK:
8139                 how = SIG_BLOCK;
8140                 break;
8141             case TARGET_SIG_UNBLOCK:
8142                 how = SIG_UNBLOCK;
8143                 break;
8144             case TARGET_SIG_SETMASK:
8145                 how = SIG_SETMASK;
8146                 break;
8147             default:
8148                 return -TARGET_EINVAL;
8149             }
8150             mask = arg2;
8151             target_to_host_old_sigset(&set, &mask);
8152 
8153             ret = do_sigprocmask(how, &set, &oldset);
8154             if (!is_error(ret)) {
8155                 host_to_target_old_sigset(&mask, &oldset);
8156                 ret = mask;
8157                 ((CPUAlphaState *)cpu_env)->ir[IR_V0] = 0; /* force no error */
8158             }
8159 #else
8160             sigset_t set, oldset, *set_ptr;
8161             int how;
8162 
8163             if (arg2) {
8164                 switch (arg1) {
8165                 case TARGET_SIG_BLOCK:
8166                     how = SIG_BLOCK;
8167                     break;
8168                 case TARGET_SIG_UNBLOCK:
8169                     how = SIG_UNBLOCK;
8170                     break;
8171                 case TARGET_SIG_SETMASK:
8172                     how = SIG_SETMASK;
8173                     break;
8174                 default:
8175                     return -TARGET_EINVAL;
8176                 }
8177                 if (!(p = lock_user(VERIFY_READ, arg2, sizeof(target_sigset_t), 1)))
8178                     return -TARGET_EFAULT;
8179                 target_to_host_old_sigset(&set, p);
8180                 unlock_user(p, arg2, 0);
8181                 set_ptr = &set;
8182             } else {
8183                 how = 0;
8184                 set_ptr = NULL;
8185             }
8186             ret = do_sigprocmask(how, set_ptr, &oldset);
8187             if (!is_error(ret) && arg3) {
8188                 if (!(p = lock_user(VERIFY_WRITE, arg3, sizeof(target_sigset_t), 0)))
8189                     return -TARGET_EFAULT;
8190                 host_to_target_old_sigset(p, &oldset);
8191                 unlock_user(p, arg3, sizeof(target_sigset_t));
8192             }
8193 #endif
8194         }
8195         return ret;
8196 #endif
8197     case TARGET_NR_rt_sigprocmask:
8198         {
8199             int how = arg1;
8200             sigset_t set, oldset, *set_ptr;
8201 
8202             if (arg4 != sizeof(target_sigset_t)) {
8203                 return -TARGET_EINVAL;
8204             }
8205 
8206             if (arg2) {
8207                 switch(how) {
8208                 case TARGET_SIG_BLOCK:
8209                     how = SIG_BLOCK;
8210                     break;
8211                 case TARGET_SIG_UNBLOCK:
8212                     how = SIG_UNBLOCK;
8213                     break;
8214                 case TARGET_SIG_SETMASK:
8215                     how = SIG_SETMASK;
8216                     break;
8217                 default:
8218                     return -TARGET_EINVAL;
8219                 }
8220                 if (!(p = lock_user(VERIFY_READ, arg2, sizeof(target_sigset_t), 1)))
8221                     return -TARGET_EFAULT;
8222                 target_to_host_sigset(&set, p);
8223                 unlock_user(p, arg2, 0);
8224                 set_ptr = &set;
8225             } else {
8226                 how = 0;
8227                 set_ptr = NULL;
8228             }
8229             ret = do_sigprocmask(how, set_ptr, &oldset);
8230             if (!is_error(ret) && arg3) {
8231                 if (!(p = lock_user(VERIFY_WRITE, arg3, sizeof(target_sigset_t), 0)))
8232                     return -TARGET_EFAULT;
8233                 host_to_target_sigset(p, &oldset);
8234                 unlock_user(p, arg3, sizeof(target_sigset_t));
8235             }
8236         }
8237         return ret;
8238 #ifdef TARGET_NR_sigpending
8239     case TARGET_NR_sigpending:
8240         {
8241             sigset_t set;
8242             ret = get_errno(sigpending(&set));
8243             if (!is_error(ret)) {
8244                 if (!(p = lock_user(VERIFY_WRITE, arg1, sizeof(target_sigset_t), 0)))
8245                     return -TARGET_EFAULT;
8246                 host_to_target_old_sigset(p, &set);
8247                 unlock_user(p, arg1, sizeof(target_sigset_t));
8248             }
8249         }
8250         return ret;
8251 #endif
8252     case TARGET_NR_rt_sigpending:
8253         {
8254             sigset_t set;
8255 
8256             /* Yes, this check is >, not != like most. We follow the kernel's
8257              * logic and it does it like this because it implements
8258              * NR_sigpending through the same code path, and in that case
8259              * the old_sigset_t is smaller in size.
8260              */
8261             if (arg2 > sizeof(target_sigset_t)) {
8262                 return -TARGET_EINVAL;
8263             }
8264 
8265             ret = get_errno(sigpending(&set));
8266             if (!is_error(ret)) {
8267                 if (!(p = lock_user(VERIFY_WRITE, arg1, sizeof(target_sigset_t), 0)))
8268                     return -TARGET_EFAULT;
8269                 host_to_target_sigset(p, &set);
8270                 unlock_user(p, arg1, sizeof(target_sigset_t));
8271             }
8272         }
8273         return ret;
8274 #ifdef TARGET_NR_sigsuspend
8275     case TARGET_NR_sigsuspend:
8276         {
8277             TaskState *ts = cpu->opaque;
8278 #if defined(TARGET_ALPHA)
8279             abi_ulong mask = arg1;
8280             target_to_host_old_sigset(&ts->sigsuspend_mask, &mask);
8281 #else
8282             if (!(p = lock_user(VERIFY_READ, arg1, sizeof(target_sigset_t), 1)))
8283                 return -TARGET_EFAULT;
8284             target_to_host_old_sigset(&ts->sigsuspend_mask, p);
8285             unlock_user(p, arg1, 0);
8286 #endif
8287             ret = get_errno(safe_rt_sigsuspend(&ts->sigsuspend_mask,
8288                                                SIGSET_T_SIZE));
8289             if (ret != -TARGET_ERESTARTSYS) {
8290                 ts->in_sigsuspend = 1;
8291             }
8292         }
8293         return ret;
8294 #endif
8295     case TARGET_NR_rt_sigsuspend:
8296         {
8297             TaskState *ts = cpu->opaque;
8298 
8299             if (arg2 != sizeof(target_sigset_t)) {
8300                 return -TARGET_EINVAL;
8301             }
8302             if (!(p = lock_user(VERIFY_READ, arg1, sizeof(target_sigset_t), 1)))
8303                 return -TARGET_EFAULT;
8304             target_to_host_sigset(&ts->sigsuspend_mask, p);
8305             unlock_user(p, arg1, 0);
8306             ret = get_errno(safe_rt_sigsuspend(&ts->sigsuspend_mask,
8307                                                SIGSET_T_SIZE));
8308             if (ret != -TARGET_ERESTARTSYS) {
8309                 ts->in_sigsuspend = 1;
8310             }
8311         }
8312         return ret;
8313     case TARGET_NR_rt_sigtimedwait:
8314         {
8315             sigset_t set;
8316             struct timespec uts, *puts;
8317             siginfo_t uinfo;
8318 
8319             if (arg4 != sizeof(target_sigset_t)) {
8320                 return -TARGET_EINVAL;
8321             }
8322 
8323             if (!(p = lock_user(VERIFY_READ, arg1, sizeof(target_sigset_t), 1)))
8324                 return -TARGET_EFAULT;
8325             target_to_host_sigset(&set, p);
8326             unlock_user(p, arg1, 0);
8327             if (arg3) {
8328                 puts = &uts;
8329                 target_to_host_timespec(puts, arg3);
8330             } else {
8331                 puts = NULL;
8332             }
8333             ret = get_errno(safe_rt_sigtimedwait(&set, &uinfo, puts,
8334                                                  SIGSET_T_SIZE));
8335             if (!is_error(ret)) {
8336                 if (arg2) {
8337                     p = lock_user(VERIFY_WRITE, arg2, sizeof(target_siginfo_t),
8338                                   0);
8339                     if (!p) {
8340                         return -TARGET_EFAULT;
8341                     }
8342                     host_to_target_siginfo(p, &uinfo);
8343                     unlock_user(p, arg2, sizeof(target_siginfo_t));
8344                 }
8345                 ret = host_to_target_signal(ret);
8346             }
8347         }
8348         return ret;
8349     case TARGET_NR_rt_sigqueueinfo:
8350         {
8351             siginfo_t uinfo;
8352 
8353             p = lock_user(VERIFY_READ, arg3, sizeof(target_siginfo_t), 1);
8354             if (!p) {
8355                 return -TARGET_EFAULT;
8356             }
8357             target_to_host_siginfo(&uinfo, p);
8358             unlock_user(p, arg3, 0);
8359             ret = get_errno(sys_rt_sigqueueinfo(arg1, arg2, &uinfo));
8360         }
8361         return ret;
8362     case TARGET_NR_rt_tgsigqueueinfo:
8363         {
8364             siginfo_t uinfo;
8365 
8366             p = lock_user(VERIFY_READ, arg4, sizeof(target_siginfo_t), 1);
8367             if (!p) {
8368                 return -TARGET_EFAULT;
8369             }
8370             target_to_host_siginfo(&uinfo, p);
8371             unlock_user(p, arg4, 0);
8372             ret = get_errno(sys_rt_tgsigqueueinfo(arg1, arg2, arg3, &uinfo));
8373         }
8374         return ret;
8375 #ifdef TARGET_NR_sigreturn
8376     case TARGET_NR_sigreturn:
8377         if (block_signals()) {
8378             return -TARGET_ERESTARTSYS;
8379         }
8380         return do_sigreturn(cpu_env);
8381 #endif
8382     case TARGET_NR_rt_sigreturn:
8383         if (block_signals()) {
8384             return -TARGET_ERESTARTSYS;
8385         }
8386         return do_rt_sigreturn(cpu_env);
8387     case TARGET_NR_sethostname:
8388         if (!(p = lock_user_string(arg1)))
8389             return -TARGET_EFAULT;
8390         ret = get_errno(sethostname(p, arg2));
8391         unlock_user(p, arg1, 0);
8392         return ret;
8393 #ifdef TARGET_NR_setrlimit
8394     case TARGET_NR_setrlimit:
8395         {
8396             int resource = target_to_host_resource(arg1);
8397             struct target_rlimit *target_rlim;
8398             struct rlimit rlim;
8399             if (!lock_user_struct(VERIFY_READ, target_rlim, arg2, 1))
8400                 return -TARGET_EFAULT;
8401             rlim.rlim_cur = target_to_host_rlim(target_rlim->rlim_cur);
8402             rlim.rlim_max = target_to_host_rlim(target_rlim->rlim_max);
8403             unlock_user_struct(target_rlim, arg2, 0);
8404             /*
8405              * If we just passed through resource limit settings for memory then
8406              * they would also apply to QEMU's own allocations, and QEMU will
8407              * crash or hang or die if its allocations fail. Ideally we would
8408              * track the guest allocations in QEMU and apply the limits ourselves.
8409              * For now, just tell the guest the call succeeded but don't actually
8410              * limit anything.
8411              */
8412             if (resource != RLIMIT_AS &&
8413                 resource != RLIMIT_DATA &&
8414                 resource != RLIMIT_STACK) {
8415                 return get_errno(setrlimit(resource, &rlim));
8416             } else {
8417                 return 0;
8418             }
8419         }
8420 #endif
8421 #ifdef TARGET_NR_getrlimit
8422     case TARGET_NR_getrlimit:
8423         {
8424             int resource = target_to_host_resource(arg1);
8425             struct target_rlimit *target_rlim;
8426             struct rlimit rlim;
8427 
8428             ret = get_errno(getrlimit(resource, &rlim));
8429             if (!is_error(ret)) {
8430                 if (!lock_user_struct(VERIFY_WRITE, target_rlim, arg2, 0))
8431                     return -TARGET_EFAULT;
8432                 target_rlim->rlim_cur = host_to_target_rlim(rlim.rlim_cur);
8433                 target_rlim->rlim_max = host_to_target_rlim(rlim.rlim_max);
8434                 unlock_user_struct(target_rlim, arg2, 1);
8435             }
8436         }
8437         return ret;
8438 #endif
8439     case TARGET_NR_getrusage:
8440         {
8441             struct rusage rusage;
8442             ret = get_errno(getrusage(arg1, &rusage));
8443             if (!is_error(ret)) {
8444                 ret = host_to_target_rusage(arg2, &rusage);
8445             }
8446         }
8447         return ret;
8448     case TARGET_NR_gettimeofday:
8449         {
8450             struct timeval tv;
8451             ret = get_errno(gettimeofday(&tv, NULL));
8452             if (!is_error(ret)) {
8453                 if (copy_to_user_timeval(arg1, &tv))
8454                     return -TARGET_EFAULT;
8455             }
8456         }
8457         return ret;
8458     case TARGET_NR_settimeofday:
8459         {
8460             struct timeval tv, *ptv = NULL;
8461             struct timezone tz, *ptz = NULL;
8462 
8463             if (arg1) {
8464                 if (copy_from_user_timeval(&tv, arg1)) {
8465                     return -TARGET_EFAULT;
8466                 }
8467                 ptv = &tv;
8468             }
8469 
8470             if (arg2) {
8471                 if (copy_from_user_timezone(&tz, arg2)) {
8472                     return -TARGET_EFAULT;
8473                 }
8474                 ptz = &tz;
8475             }
8476 
8477             return get_errno(settimeofday(ptv, ptz));
8478         }
8479 #if defined(TARGET_NR_select)
8480     case TARGET_NR_select:
8481 #if defined(TARGET_WANT_NI_OLD_SELECT)
8482         /* some architectures used to have old_select here
8483          * but now ENOSYS it.
8484          */
8485         ret = -TARGET_ENOSYS;
8486 #elif defined(TARGET_WANT_OLD_SYS_SELECT)
8487         ret = do_old_select(arg1);
8488 #else
8489         ret = do_select(arg1, arg2, arg3, arg4, arg5);
8490 #endif
8491         return ret;
8492 #endif
8493 #ifdef TARGET_NR_pselect6
8494     case TARGET_NR_pselect6:
8495         {
8496             abi_long rfd_addr, wfd_addr, efd_addr, n, ts_addr;
8497             fd_set rfds, wfds, efds;
8498             fd_set *rfds_ptr, *wfds_ptr, *efds_ptr;
8499             struct timespec ts, *ts_ptr;
8500 
8501             /*
8502              * The 6th arg is actually two args smashed together,
8503              * so we cannot use the C library.
8504              */
8505             sigset_t set;
8506             struct {
8507                 sigset_t *set;
8508                 size_t size;
8509             } sig, *sig_ptr;
8510 
8511             abi_ulong arg_sigset, arg_sigsize, *arg7;
8512             target_sigset_t *target_sigset;
8513 
8514             n = arg1;
8515             rfd_addr = arg2;
8516             wfd_addr = arg3;
8517             efd_addr = arg4;
8518             ts_addr = arg5;
8519 
8520             ret = copy_from_user_fdset_ptr(&rfds, &rfds_ptr, rfd_addr, n);
8521             if (ret) {
8522                 return ret;
8523             }
8524             ret = copy_from_user_fdset_ptr(&wfds, &wfds_ptr, wfd_addr, n);
8525             if (ret) {
8526                 return ret;
8527             }
8528             ret = copy_from_user_fdset_ptr(&efds, &efds_ptr, efd_addr, n);
8529             if (ret) {
8530                 return ret;
8531             }
8532 
8533             /*
8534              * This takes a timespec, and not a timeval, so we cannot
8535              * use the do_select() helper ...
8536              */
8537             if (ts_addr) {
8538                 if (target_to_host_timespec(&ts, ts_addr)) {
8539                     return -TARGET_EFAULT;
8540                 }
8541                 ts_ptr = &ts;
8542             } else {
8543                 ts_ptr = NULL;
8544             }
8545 
8546             /* Extract the two packed args for the sigset */
8547             if (arg6) {
8548                 sig_ptr = &sig;
8549                 sig.size = SIGSET_T_SIZE;
8550 
8551                 arg7 = lock_user(VERIFY_READ, arg6, sizeof(*arg7) * 2, 1);
8552                 if (!arg7) {
8553                     return -TARGET_EFAULT;
8554                 }
8555                 arg_sigset = tswapal(arg7[0]);
8556                 arg_sigsize = tswapal(arg7[1]);
8557                 unlock_user(arg7, arg6, 0);
8558 
8559                 if (arg_sigset) {
8560                     sig.set = &set;
8561                     if (arg_sigsize != sizeof(*target_sigset)) {
8562                         /* Like the kernel, we enforce correct size sigsets */
8563                         return -TARGET_EINVAL;
8564                     }
8565                     target_sigset = lock_user(VERIFY_READ, arg_sigset,
8566                                               sizeof(*target_sigset), 1);
8567                     if (!target_sigset) {
8568                         return -TARGET_EFAULT;
8569                     }
8570                     target_to_host_sigset(&set, target_sigset);
8571                     unlock_user(target_sigset, arg_sigset, 0);
8572                 } else {
8573                     sig.set = NULL;
8574                 }
8575             } else {
8576                 sig_ptr = NULL;
8577             }
8578 
8579             ret = get_errno(safe_pselect6(n, rfds_ptr, wfds_ptr, efds_ptr,
8580                                           ts_ptr, sig_ptr));
8581 
8582             if (!is_error(ret)) {
8583                 if (rfd_addr && copy_to_user_fdset(rfd_addr, &rfds, n))
8584                     return -TARGET_EFAULT;
8585                 if (wfd_addr && copy_to_user_fdset(wfd_addr, &wfds, n))
8586                     return -TARGET_EFAULT;
8587                 if (efd_addr && copy_to_user_fdset(efd_addr, &efds, n))
8588                     return -TARGET_EFAULT;
8589 
8590                 if (ts_addr && host_to_target_timespec(ts_addr, &ts))
8591                     return -TARGET_EFAULT;
8592             }
8593         }
8594         return ret;
8595 #endif
8596 #ifdef TARGET_NR_symlink
8597     case TARGET_NR_symlink:
8598         {
8599             void *p2;
8600             p = lock_user_string(arg1);
8601             p2 = lock_user_string(arg2);
8602             if (!p || !p2)
8603                 ret = -TARGET_EFAULT;
8604             else
8605                 ret = get_errno(symlink(p, p2));
8606             unlock_user(p2, arg2, 0);
8607             unlock_user(p, arg1, 0);
8608         }
8609         return ret;
8610 #endif
8611 #if defined(TARGET_NR_symlinkat)
8612     case TARGET_NR_symlinkat:
8613         {
8614             void *p2;
8615             p  = lock_user_string(arg1);
8616             p2 = lock_user_string(arg3);
8617             if (!p || !p2)
8618                 ret = -TARGET_EFAULT;
8619             else
8620                 ret = get_errno(symlinkat(p, arg2, p2));
8621             unlock_user(p2, arg3, 0);
8622             unlock_user(p, arg1, 0);
8623         }
8624         return ret;
8625 #endif
8626 #ifdef TARGET_NR_readlink
8627     case TARGET_NR_readlink:
8628         {
8629             void *p2;
8630             p = lock_user_string(arg1);
8631             p2 = lock_user(VERIFY_WRITE, arg2, arg3, 0);
8632             if (!p || !p2) {
8633                 ret = -TARGET_EFAULT;
8634             } else if (!arg3) {
8635                 /* Short circuit this for the magic exe check. */
8636                 ret = -TARGET_EINVAL;
8637             } else if (is_proc_myself((const char *)p, "exe")) {
8638                 char real[PATH_MAX], *temp;
8639                 temp = realpath(exec_path, real);
8640                 /* Return value is # of bytes that we wrote to the buffer. */
8641                 if (temp == NULL) {
8642                     ret = get_errno(-1);
8643                 } else {
8644                     /* Don't worry about sign mismatch as earlier mapping
8645                      * logic would have thrown a bad address error. */
8646                     ret = MIN(strlen(real), arg3);
8647                     /* We cannot NUL terminate the string. */
8648                     memcpy(p2, real, ret);
8649                 }
8650             } else {
8651                 ret = get_errno(readlink(path(p), p2, arg3));
8652             }
8653             unlock_user(p2, arg2, ret);
8654             unlock_user(p, arg1, 0);
8655         }
8656         return ret;
8657 #endif
8658 #if defined(TARGET_NR_readlinkat)
8659     case TARGET_NR_readlinkat:
8660         {
8661             void *p2;
8662             p  = lock_user_string(arg2);
8663             p2 = lock_user(VERIFY_WRITE, arg3, arg4, 0);
8664             if (!p || !p2) {
8665                 ret = -TARGET_EFAULT;
8666             } else if (is_proc_myself((const char *)p, "exe")) {
8667                 char real[PATH_MAX], *temp;
8668                 temp = realpath(exec_path, real);
8669                 ret = temp == NULL ? get_errno(-1) : strlen(real) ;
8670                 snprintf((char *)p2, arg4, "%s", real);
8671             } else {
8672                 ret = get_errno(readlinkat(arg1, path(p), p2, arg4));
8673             }
8674             unlock_user(p2, arg3, ret);
8675             unlock_user(p, arg2, 0);
8676         }
8677         return ret;
8678 #endif
8679 #ifdef TARGET_NR_swapon
8680     case TARGET_NR_swapon:
8681         if (!(p = lock_user_string(arg1)))
8682             return -TARGET_EFAULT;
8683         ret = get_errno(swapon(p, arg2));
8684         unlock_user(p, arg1, 0);
8685         return ret;
8686 #endif
8687     case TARGET_NR_reboot:
8688         if (arg3 == LINUX_REBOOT_CMD_RESTART2) {
8689            /* arg4 must be ignored in all other cases */
8690            p = lock_user_string(arg4);
8691            if (!p) {
8692                return -TARGET_EFAULT;
8693            }
8694            ret = get_errno(reboot(arg1, arg2, arg3, p));
8695            unlock_user(p, arg4, 0);
8696         } else {
8697            ret = get_errno(reboot(arg1, arg2, arg3, NULL));
8698         }
8699         return ret;
8700 #ifdef TARGET_NR_mmap
8701     case TARGET_NR_mmap:
8702 #if (defined(TARGET_I386) && defined(TARGET_ABI32)) || \
8703     (defined(TARGET_ARM) && defined(TARGET_ABI32)) || \
8704     defined(TARGET_M68K) || defined(TARGET_CRIS) || defined(TARGET_MICROBLAZE) \
8705     || defined(TARGET_S390X)
8706         {
8707             abi_ulong *v;
8708             abi_ulong v1, v2, v3, v4, v5, v6;
8709             if (!(v = lock_user(VERIFY_READ, arg1, 6 * sizeof(abi_ulong), 1)))
8710                 return -TARGET_EFAULT;
8711             v1 = tswapal(v[0]);
8712             v2 = tswapal(v[1]);
8713             v3 = tswapal(v[2]);
8714             v4 = tswapal(v[3]);
8715             v5 = tswapal(v[4]);
8716             v6 = tswapal(v[5]);
8717             unlock_user(v, arg1, 0);
8718             ret = get_errno(target_mmap(v1, v2, v3,
8719                                         target_to_host_bitmask(v4, mmap_flags_tbl),
8720                                         v5, v6));
8721         }
8722 #else
8723         ret = get_errno(target_mmap(arg1, arg2, arg3,
8724                                     target_to_host_bitmask(arg4, mmap_flags_tbl),
8725                                     arg5,
8726                                     arg6));
8727 #endif
8728         return ret;
8729 #endif
8730 #ifdef TARGET_NR_mmap2
8731     case TARGET_NR_mmap2:
8732 #ifndef MMAP_SHIFT
8733 #define MMAP_SHIFT 12
8734 #endif
8735         ret = target_mmap(arg1, arg2, arg3,
8736                           target_to_host_bitmask(arg4, mmap_flags_tbl),
8737                           arg5, arg6 << MMAP_SHIFT);
8738         return get_errno(ret);
8739 #endif
8740     case TARGET_NR_munmap:
8741         return get_errno(target_munmap(arg1, arg2));
8742     case TARGET_NR_mprotect:
8743         {
8744             TaskState *ts = cpu->opaque;
8745             /* Special hack to detect libc making the stack executable.  */
8746             if ((arg3 & PROT_GROWSDOWN)
8747                 && arg1 >= ts->info->stack_limit
8748                 && arg1 <= ts->info->start_stack) {
8749                 arg3 &= ~PROT_GROWSDOWN;
8750                 arg2 = arg2 + arg1 - ts->info->stack_limit;
8751                 arg1 = ts->info->stack_limit;
8752             }
8753         }
8754         return get_errno(target_mprotect(arg1, arg2, arg3));
8755 #ifdef TARGET_NR_mremap
8756     case TARGET_NR_mremap:
8757         return get_errno(target_mremap(arg1, arg2, arg3, arg4, arg5));
8758 #endif
8759         /* ??? msync/mlock/munlock are broken for softmmu.  */
8760 #ifdef TARGET_NR_msync
8761     case TARGET_NR_msync:
8762         return get_errno(msync(g2h(arg1), arg2, arg3));
8763 #endif
8764 #ifdef TARGET_NR_mlock
8765     case TARGET_NR_mlock:
8766         return get_errno(mlock(g2h(arg1), arg2));
8767 #endif
8768 #ifdef TARGET_NR_munlock
8769     case TARGET_NR_munlock:
8770         return get_errno(munlock(g2h(arg1), arg2));
8771 #endif
8772 #ifdef TARGET_NR_mlockall
8773     case TARGET_NR_mlockall:
8774         return get_errno(mlockall(target_to_host_mlockall_arg(arg1)));
8775 #endif
8776 #ifdef TARGET_NR_munlockall
8777     case TARGET_NR_munlockall:
8778         return get_errno(munlockall());
8779 #endif
8780 #ifdef TARGET_NR_truncate
8781     case TARGET_NR_truncate:
8782         if (!(p = lock_user_string(arg1)))
8783             return -TARGET_EFAULT;
8784         ret = get_errno(truncate(p, arg2));
8785         unlock_user(p, arg1, 0);
8786         return ret;
8787 #endif
8788 #ifdef TARGET_NR_ftruncate
8789     case TARGET_NR_ftruncate:
8790         return get_errno(ftruncate(arg1, arg2));
8791 #endif
8792     case TARGET_NR_fchmod:
8793         return get_errno(fchmod(arg1, arg2));
8794 #if defined(TARGET_NR_fchmodat)
8795     case TARGET_NR_fchmodat:
8796         if (!(p = lock_user_string(arg2)))
8797             return -TARGET_EFAULT;
8798         ret = get_errno(fchmodat(arg1, p, arg3, 0));
8799         unlock_user(p, arg2, 0);
8800         return ret;
8801 #endif
8802     case TARGET_NR_getpriority:
8803         /* Note that negative values are valid for getpriority, so we must
8804            differentiate based on errno settings.  */
8805         errno = 0;
8806         ret = getpriority(arg1, arg2);
8807         if (ret == -1 && errno != 0) {
8808             return -host_to_target_errno(errno);
8809         }
8810 #ifdef TARGET_ALPHA
8811         /* Return value is the unbiased priority.  Signal no error.  */
8812         ((CPUAlphaState *)cpu_env)->ir[IR_V0] = 0;
8813 #else
8814         /* Return value is a biased priority to avoid negative numbers.  */
8815         ret = 20 - ret;
8816 #endif
8817         return ret;
8818     case TARGET_NR_setpriority:
8819         return get_errno(setpriority(arg1, arg2, arg3));
8820 #ifdef TARGET_NR_statfs
8821     case TARGET_NR_statfs:
8822         if (!(p = lock_user_string(arg1))) {
8823             return -TARGET_EFAULT;
8824         }
8825         ret = get_errno(statfs(path(p), &stfs));
8826         unlock_user(p, arg1, 0);
8827     convert_statfs:
8828         if (!is_error(ret)) {
8829             struct target_statfs *target_stfs;
8830 
8831             if (!lock_user_struct(VERIFY_WRITE, target_stfs, arg2, 0))
8832                 return -TARGET_EFAULT;
8833             __put_user(stfs.f_type, &target_stfs->f_type);
8834             __put_user(stfs.f_bsize, &target_stfs->f_bsize);
8835             __put_user(stfs.f_blocks, &target_stfs->f_blocks);
8836             __put_user(stfs.f_bfree, &target_stfs->f_bfree);
8837             __put_user(stfs.f_bavail, &target_stfs->f_bavail);
8838             __put_user(stfs.f_files, &target_stfs->f_files);
8839             __put_user(stfs.f_ffree, &target_stfs->f_ffree);
8840             __put_user(stfs.f_fsid.__val[0], &target_stfs->f_fsid.val[0]);
8841             __put_user(stfs.f_fsid.__val[1], &target_stfs->f_fsid.val[1]);
8842             __put_user(stfs.f_namelen, &target_stfs->f_namelen);
8843             __put_user(stfs.f_frsize, &target_stfs->f_frsize);
8844 #ifdef _STATFS_F_FLAGS
8845             __put_user(stfs.f_flags, &target_stfs->f_flags);
8846 #else
8847             __put_user(0, &target_stfs->f_flags);
8848 #endif
8849             memset(target_stfs->f_spare, 0, sizeof(target_stfs->f_spare));
8850             unlock_user_struct(target_stfs, arg2, 1);
8851         }
8852         return ret;
8853 #endif
8854 #ifdef TARGET_NR_fstatfs
8855     case TARGET_NR_fstatfs:
8856         ret = get_errno(fstatfs(arg1, &stfs));
8857         goto convert_statfs;
8858 #endif
8859 #ifdef TARGET_NR_statfs64
8860     case TARGET_NR_statfs64:
8861         if (!(p = lock_user_string(arg1))) {
8862             return -TARGET_EFAULT;
8863         }
8864         ret = get_errno(statfs(path(p), &stfs));
8865         unlock_user(p, arg1, 0);
8866     convert_statfs64:
8867         if (!is_error(ret)) {
8868             struct target_statfs64 *target_stfs;
8869 
8870             if (!lock_user_struct(VERIFY_WRITE, target_stfs, arg3, 0))
8871                 return -TARGET_EFAULT;
8872             __put_user(stfs.f_type, &target_stfs->f_type);
8873             __put_user(stfs.f_bsize, &target_stfs->f_bsize);
8874             __put_user(stfs.f_blocks, &target_stfs->f_blocks);
8875             __put_user(stfs.f_bfree, &target_stfs->f_bfree);
8876             __put_user(stfs.f_bavail, &target_stfs->f_bavail);
8877             __put_user(stfs.f_files, &target_stfs->f_files);
8878             __put_user(stfs.f_ffree, &target_stfs->f_ffree);
8879             __put_user(stfs.f_fsid.__val[0], &target_stfs->f_fsid.val[0]);
8880             __put_user(stfs.f_fsid.__val[1], &target_stfs->f_fsid.val[1]);
8881             __put_user(stfs.f_namelen, &target_stfs->f_namelen);
8882             __put_user(stfs.f_frsize, &target_stfs->f_frsize);
8883             memset(target_stfs->f_spare, 0, sizeof(target_stfs->f_spare));
8884             unlock_user_struct(target_stfs, arg3, 1);
8885         }
8886         return ret;
8887     case TARGET_NR_fstatfs64:
8888         ret = get_errno(fstatfs(arg1, &stfs));
8889         goto convert_statfs64;
8890 #endif
8891 #ifdef TARGET_NR_socketcall
8892     case TARGET_NR_socketcall:
8893         return do_socketcall(arg1, arg2);
8894 #endif
8895 #ifdef TARGET_NR_accept
8896     case TARGET_NR_accept:
8897         return do_accept4(arg1, arg2, arg3, 0);
8898 #endif
8899 #ifdef TARGET_NR_accept4
8900     case TARGET_NR_accept4:
8901         return do_accept4(arg1, arg2, arg3, arg4);
8902 #endif
8903 #ifdef TARGET_NR_bind
8904     case TARGET_NR_bind:
8905         return do_bind(arg1, arg2, arg3);
8906 #endif
8907 #ifdef TARGET_NR_connect
8908     case TARGET_NR_connect:
8909         return do_connect(arg1, arg2, arg3);
8910 #endif
8911 #ifdef TARGET_NR_getpeername
8912     case TARGET_NR_getpeername:
8913         return do_getpeername(arg1, arg2, arg3);
8914 #endif
8915 #ifdef TARGET_NR_getsockname
8916     case TARGET_NR_getsockname:
8917         return do_getsockname(arg1, arg2, arg3);
8918 #endif
8919 #ifdef TARGET_NR_getsockopt
8920     case TARGET_NR_getsockopt:
8921         return do_getsockopt(arg1, arg2, arg3, arg4, arg5);
8922 #endif
8923 #ifdef TARGET_NR_listen
8924     case TARGET_NR_listen:
8925         return get_errno(listen(arg1, arg2));
8926 #endif
8927 #ifdef TARGET_NR_recv
8928     case TARGET_NR_recv:
8929         return do_recvfrom(arg1, arg2, arg3, arg4, 0, 0);
8930 #endif
8931 #ifdef TARGET_NR_recvfrom
8932     case TARGET_NR_recvfrom:
8933         return do_recvfrom(arg1, arg2, arg3, arg4, arg5, arg6);
8934 #endif
8935 #ifdef TARGET_NR_recvmsg
8936     case TARGET_NR_recvmsg:
8937         return do_sendrecvmsg(arg1, arg2, arg3, 0);
8938 #endif
8939 #ifdef TARGET_NR_send
8940     case TARGET_NR_send:
8941         return do_sendto(arg1, arg2, arg3, arg4, 0, 0);
8942 #endif
8943 #ifdef TARGET_NR_sendmsg
8944     case TARGET_NR_sendmsg:
8945         return do_sendrecvmsg(arg1, arg2, arg3, 1);
8946 #endif
8947 #ifdef TARGET_NR_sendmmsg
8948     case TARGET_NR_sendmmsg:
8949         return do_sendrecvmmsg(arg1, arg2, arg3, arg4, 1);
8950     case TARGET_NR_recvmmsg:
8951         return do_sendrecvmmsg(arg1, arg2, arg3, arg4, 0);
8952 #endif
8953 #ifdef TARGET_NR_sendto
8954     case TARGET_NR_sendto:
8955         return do_sendto(arg1, arg2, arg3, arg4, arg5, arg6);
8956 #endif
8957 #ifdef TARGET_NR_shutdown
8958     case TARGET_NR_shutdown:
8959         return get_errno(shutdown(arg1, arg2));
8960 #endif
8961 #if defined(TARGET_NR_getrandom) && defined(__NR_getrandom)
8962     case TARGET_NR_getrandom:
8963         p = lock_user(VERIFY_WRITE, arg1, arg2, 0);
8964         if (!p) {
8965             return -TARGET_EFAULT;
8966         }
8967         ret = get_errno(getrandom(p, arg2, arg3));
8968         unlock_user(p, arg1, ret);
8969         return ret;
8970 #endif
8971 #ifdef TARGET_NR_socket
8972     case TARGET_NR_socket:
8973         return do_socket(arg1, arg2, arg3);
8974 #endif
8975 #ifdef TARGET_NR_socketpair
8976     case TARGET_NR_socketpair:
8977         return do_socketpair(arg1, arg2, arg3, arg4);
8978 #endif
8979 #ifdef TARGET_NR_setsockopt
8980     case TARGET_NR_setsockopt:
8981         return do_setsockopt(arg1, arg2, arg3, arg4, (socklen_t) arg5);
8982 #endif
8983 #if defined(TARGET_NR_syslog)
8984     case TARGET_NR_syslog:
8985         {
8986             int len = arg2;
8987 
8988             switch (arg1) {
8989             case TARGET_SYSLOG_ACTION_CLOSE:         /* Close log */
8990             case TARGET_SYSLOG_ACTION_OPEN:          /* Open log */
8991             case TARGET_SYSLOG_ACTION_CLEAR:         /* Clear ring buffer */
8992             case TARGET_SYSLOG_ACTION_CONSOLE_OFF:   /* Disable logging */
8993             case TARGET_SYSLOG_ACTION_CONSOLE_ON:    /* Enable logging */
8994             case TARGET_SYSLOG_ACTION_CONSOLE_LEVEL: /* Set messages level */
8995             case TARGET_SYSLOG_ACTION_SIZE_UNREAD:   /* Number of chars */
8996             case TARGET_SYSLOG_ACTION_SIZE_BUFFER:   /* Size of the buffer */
8997                 return get_errno(sys_syslog((int)arg1, NULL, (int)arg3));
8998             case TARGET_SYSLOG_ACTION_READ:          /* Read from log */
8999             case TARGET_SYSLOG_ACTION_READ_CLEAR:    /* Read/clear msgs */
9000             case TARGET_SYSLOG_ACTION_READ_ALL:      /* Read last messages */
9001                 {
9002                     if (len < 0) {
9003                         return -TARGET_EINVAL;
9004                     }
9005                     if (len == 0) {
9006                         return 0;
9007                     }
9008                     p = lock_user(VERIFY_WRITE, arg2, arg3, 0);
9009                     if (!p) {
9010                         return -TARGET_EFAULT;
9011                     }
9012                     ret = get_errno(sys_syslog((int)arg1, p, (int)arg3));
9013                     unlock_user(p, arg2, arg3);
9014                 }
9015                 return ret;
9016             default:
9017                 return -TARGET_EINVAL;
9018             }
9019         }
9020         break;
9021 #endif
9022     case TARGET_NR_setitimer:
9023         {
9024             struct itimerval value, ovalue, *pvalue;
9025 
9026             if (arg2) {
9027                 pvalue = &value;
9028                 if (copy_from_user_timeval(&pvalue->it_interval, arg2)
9029                     || copy_from_user_timeval(&pvalue->it_value,
9030                                               arg2 + sizeof(struct target_timeval)))
9031                     return -TARGET_EFAULT;
9032             } else {
9033                 pvalue = NULL;
9034             }
9035             ret = get_errno(setitimer(arg1, pvalue, &ovalue));
9036             if (!is_error(ret) && arg3) {
9037                 if (copy_to_user_timeval(arg3,
9038                                          &ovalue.it_interval)
9039                     || copy_to_user_timeval(arg3 + sizeof(struct target_timeval),
9040                                             &ovalue.it_value))
9041                     return -TARGET_EFAULT;
9042             }
9043         }
9044         return ret;
9045     case TARGET_NR_getitimer:
9046         {
9047             struct itimerval value;
9048 
9049             ret = get_errno(getitimer(arg1, &value));
9050             if (!is_error(ret) && arg2) {
9051                 if (copy_to_user_timeval(arg2,
9052                                          &value.it_interval)
9053                     || copy_to_user_timeval(arg2 + sizeof(struct target_timeval),
9054                                             &value.it_value))
9055                     return -TARGET_EFAULT;
9056             }
9057         }
9058         return ret;
9059 #ifdef TARGET_NR_stat
9060     case TARGET_NR_stat:
9061         if (!(p = lock_user_string(arg1))) {
9062             return -TARGET_EFAULT;
9063         }
9064         ret = get_errno(stat(path(p), &st));
9065         unlock_user(p, arg1, 0);
9066         goto do_stat;
9067 #endif
9068 #ifdef TARGET_NR_lstat
9069     case TARGET_NR_lstat:
9070         if (!(p = lock_user_string(arg1))) {
9071             return -TARGET_EFAULT;
9072         }
9073         ret = get_errno(lstat(path(p), &st));
9074         unlock_user(p, arg1, 0);
9075         goto do_stat;
9076 #endif
9077 #ifdef TARGET_NR_fstat
9078     case TARGET_NR_fstat:
9079         {
9080             ret = get_errno(fstat(arg1, &st));
9081 #if defined(TARGET_NR_stat) || defined(TARGET_NR_lstat)
9082         do_stat:
9083 #endif
9084             if (!is_error(ret)) {
9085                 struct target_stat *target_st;
9086 
9087                 if (!lock_user_struct(VERIFY_WRITE, target_st, arg2, 0))
9088                     return -TARGET_EFAULT;
9089                 memset(target_st, 0, sizeof(*target_st));
9090                 __put_user(st.st_dev, &target_st->st_dev);
9091                 __put_user(st.st_ino, &target_st->st_ino);
9092                 __put_user(st.st_mode, &target_st->st_mode);
9093                 __put_user(st.st_uid, &target_st->st_uid);
9094                 __put_user(st.st_gid, &target_st->st_gid);
9095                 __put_user(st.st_nlink, &target_st->st_nlink);
9096                 __put_user(st.st_rdev, &target_st->st_rdev);
9097                 __put_user(st.st_size, &target_st->st_size);
9098                 __put_user(st.st_blksize, &target_st->st_blksize);
9099                 __put_user(st.st_blocks, &target_st->st_blocks);
9100                 __put_user(st.st_atime, &target_st->target_st_atime);
9101                 __put_user(st.st_mtime, &target_st->target_st_mtime);
9102                 __put_user(st.st_ctime, &target_st->target_st_ctime);
9103 #if (_POSIX_C_SOURCE >= 200809L || _XOPEN_SOURCE >= 700) && \
9104     defined(TARGET_STAT_HAVE_NSEC)
9105                 __put_user(st.st_atim.tv_nsec,
9106                            &target_st->target_st_atime_nsec);
9107                 __put_user(st.st_mtim.tv_nsec,
9108                            &target_st->target_st_mtime_nsec);
9109                 __put_user(st.st_ctim.tv_nsec,
9110                            &target_st->target_st_ctime_nsec);
9111 #endif
9112                 unlock_user_struct(target_st, arg2, 1);
9113             }
9114         }
9115         return ret;
9116 #endif
9117     case TARGET_NR_vhangup:
9118         return get_errno(vhangup());
9119 #ifdef TARGET_NR_syscall
9120     case TARGET_NR_syscall:
9121         return do_syscall(cpu_env, arg1 & 0xffff, arg2, arg3, arg4, arg5,
9122                           arg6, arg7, arg8, 0);
9123 #endif
9124     case TARGET_NR_wait4:
9125         {
9126             int status;
9127             abi_long status_ptr = arg2;
9128             struct rusage rusage, *rusage_ptr;
9129             abi_ulong target_rusage = arg4;
9130             abi_long rusage_err;
9131             if (target_rusage)
9132                 rusage_ptr = &rusage;
9133             else
9134                 rusage_ptr = NULL;
9135             ret = get_errno(safe_wait4(arg1, &status, arg3, rusage_ptr));
9136             if (!is_error(ret)) {
9137                 if (status_ptr && ret) {
9138                     status = host_to_target_waitstatus(status);
9139                     if (put_user_s32(status, status_ptr))
9140                         return -TARGET_EFAULT;
9141                 }
9142                 if (target_rusage) {
9143                     rusage_err = host_to_target_rusage(target_rusage, &rusage);
9144                     if (rusage_err) {
9145                         ret = rusage_err;
9146                     }
9147                 }
9148             }
9149         }
9150         return ret;
9151 #ifdef TARGET_NR_swapoff
9152     case TARGET_NR_swapoff:
9153         if (!(p = lock_user_string(arg1)))
9154             return -TARGET_EFAULT;
9155         ret = get_errno(swapoff(p));
9156         unlock_user(p, arg1, 0);
9157         return ret;
9158 #endif
9159     case TARGET_NR_sysinfo:
9160         {
9161             struct target_sysinfo *target_value;
9162             struct sysinfo value;
9163             ret = get_errno(sysinfo(&value));
9164             if (!is_error(ret) && arg1)
9165             {
9166                 if (!lock_user_struct(VERIFY_WRITE, target_value, arg1, 0))
9167                     return -TARGET_EFAULT;
9168                 __put_user(value.uptime, &target_value->uptime);
9169                 __put_user(value.loads[0], &target_value->loads[0]);
9170                 __put_user(value.loads[1], &target_value->loads[1]);
9171                 __put_user(value.loads[2], &target_value->loads[2]);
9172                 __put_user(value.totalram, &target_value->totalram);
9173                 __put_user(value.freeram, &target_value->freeram);
9174                 __put_user(value.sharedram, &target_value->sharedram);
9175                 __put_user(value.bufferram, &target_value->bufferram);
9176                 __put_user(value.totalswap, &target_value->totalswap);
9177                 __put_user(value.freeswap, &target_value->freeswap);
9178                 __put_user(value.procs, &target_value->procs);
9179                 __put_user(value.totalhigh, &target_value->totalhigh);
9180                 __put_user(value.freehigh, &target_value->freehigh);
9181                 __put_user(value.mem_unit, &target_value->mem_unit);
9182                 unlock_user_struct(target_value, arg1, 1);
9183             }
9184         }
9185         return ret;
9186 #ifdef TARGET_NR_ipc
9187     case TARGET_NR_ipc:
9188         return do_ipc(cpu_env, arg1, arg2, arg3, arg4, arg5, arg6);
9189 #endif
9190 #ifdef TARGET_NR_semget
9191     case TARGET_NR_semget:
9192         return get_errno(semget(arg1, arg2, arg3));
9193 #endif
9194 #ifdef TARGET_NR_semop
9195     case TARGET_NR_semop:
9196         return do_semop(arg1, arg2, arg3);
9197 #endif
9198 #ifdef TARGET_NR_semctl
9199     case TARGET_NR_semctl:
9200         return do_semctl(arg1, arg2, arg3, arg4);
9201 #endif
9202 #ifdef TARGET_NR_msgctl
9203     case TARGET_NR_msgctl:
9204         return do_msgctl(arg1, arg2, arg3);
9205 #endif
9206 #ifdef TARGET_NR_msgget
9207     case TARGET_NR_msgget:
9208         return get_errno(msgget(arg1, arg2));
9209 #endif
9210 #ifdef TARGET_NR_msgrcv
9211     case TARGET_NR_msgrcv:
9212         return do_msgrcv(arg1, arg2, arg3, arg4, arg5);
9213 #endif
9214 #ifdef TARGET_NR_msgsnd
9215     case TARGET_NR_msgsnd:
9216         return do_msgsnd(arg1, arg2, arg3, arg4);
9217 #endif
9218 #ifdef TARGET_NR_shmget
9219     case TARGET_NR_shmget:
9220         return get_errno(shmget(arg1, arg2, arg3));
9221 #endif
9222 #ifdef TARGET_NR_shmctl
9223     case TARGET_NR_shmctl:
9224         return do_shmctl(arg1, arg2, arg3);
9225 #endif
9226 #ifdef TARGET_NR_shmat
9227     case TARGET_NR_shmat:
9228         return do_shmat(cpu_env, arg1, arg2, arg3);
9229 #endif
9230 #ifdef TARGET_NR_shmdt
9231     case TARGET_NR_shmdt:
9232         return do_shmdt(arg1);
9233 #endif
9234     case TARGET_NR_fsync:
9235         return get_errno(fsync(arg1));
9236     case TARGET_NR_clone:
9237         /* Linux manages to have three different orderings for its
9238          * arguments to clone(); the BACKWARDS and BACKWARDS2 defines
9239          * match the kernel's CONFIG_CLONE_* settings.
9240          * Microblaze is further special in that it uses a sixth
9241          * implicit argument to clone for the TLS pointer.
9242          */
9243 #if defined(TARGET_MICROBLAZE)
9244         ret = get_errno(do_fork(cpu_env, arg1, arg2, arg4, arg6, arg5));
9245 #elif defined(TARGET_CLONE_BACKWARDS)
9246         ret = get_errno(do_fork(cpu_env, arg1, arg2, arg3, arg4, arg5));
9247 #elif defined(TARGET_CLONE_BACKWARDS2)
9248         ret = get_errno(do_fork(cpu_env, arg2, arg1, arg3, arg5, arg4));
9249 #else
9250         ret = get_errno(do_fork(cpu_env, arg1, arg2, arg3, arg5, arg4));
9251 #endif
9252         return ret;
9253 #ifdef __NR_exit_group
9254         /* new thread calls */
9255     case TARGET_NR_exit_group:
9256         preexit_cleanup(cpu_env, arg1);
9257         return get_errno(exit_group(arg1));
9258 #endif
9259     case TARGET_NR_setdomainname:
9260         if (!(p = lock_user_string(arg1)))
9261             return -TARGET_EFAULT;
9262         ret = get_errno(setdomainname(p, arg2));
9263         unlock_user(p, arg1, 0);
9264         return ret;
9265     case TARGET_NR_uname:
9266         /* no need to transcode because we use the linux syscall */
9267         {
9268             struct new_utsname * buf;
9269 
9270             if (!lock_user_struct(VERIFY_WRITE, buf, arg1, 0))
9271                 return -TARGET_EFAULT;
9272             ret = get_errno(sys_uname(buf));
9273             if (!is_error(ret)) {
9274                 /* Overwrite the native machine name with whatever is being
9275                    emulated. */
9276                 g_strlcpy(buf->machine, cpu_to_uname_machine(cpu_env),
9277                           sizeof(buf->machine));
9278                 /* Allow the user to override the reported release.  */
9279                 if (qemu_uname_release && *qemu_uname_release) {
9280                     g_strlcpy(buf->release, qemu_uname_release,
9281                               sizeof(buf->release));
9282                 }
9283             }
9284             unlock_user_struct(buf, arg1, 1);
9285         }
9286         return ret;
9287 #ifdef TARGET_I386
9288     case TARGET_NR_modify_ldt:
9289         return do_modify_ldt(cpu_env, arg1, arg2, arg3);
9290 #if !defined(TARGET_X86_64)
9291     case TARGET_NR_vm86:
9292         return do_vm86(cpu_env, arg1, arg2);
9293 #endif
9294 #endif
9295     case TARGET_NR_adjtimex:
9296         {
9297             struct timex host_buf;
9298 
9299             if (target_to_host_timex(&host_buf, arg1) != 0) {
9300                 return -TARGET_EFAULT;
9301             }
9302             ret = get_errno(adjtimex(&host_buf));
9303             if (!is_error(ret)) {
9304                 if (host_to_target_timex(arg1, &host_buf) != 0) {
9305                     return -TARGET_EFAULT;
9306                 }
9307             }
9308         }
9309         return ret;
9310 #if defined(TARGET_NR_clock_adjtime) && defined(CONFIG_CLOCK_ADJTIME)
9311     case TARGET_NR_clock_adjtime:
9312         {
9313             struct timex htx, *phtx = &htx;
9314 
9315             if (target_to_host_timex(phtx, arg2) != 0) {
9316                 return -TARGET_EFAULT;
9317             }
9318             ret = get_errno(clock_adjtime(arg1, phtx));
9319             if (!is_error(ret) && phtx) {
9320                 if (host_to_target_timex(arg2, phtx) != 0) {
9321                     return -TARGET_EFAULT;
9322                 }
9323             }
9324         }
9325         return ret;
9326 #endif
9327     case TARGET_NR_getpgid:
9328         return get_errno(getpgid(arg1));
9329     case TARGET_NR_fchdir:
9330         return get_errno(fchdir(arg1));
9331     case TARGET_NR_personality:
9332         return get_errno(personality(arg1));
9333 #ifdef TARGET_NR__llseek /* Not on alpha */
9334     case TARGET_NR__llseek:
9335         {
9336             int64_t res;
9337 #if !defined(__NR_llseek)
9338             res = lseek(arg1, ((uint64_t)arg2 << 32) | (abi_ulong)arg3, arg5);
9339             if (res == -1) {
9340                 ret = get_errno(res);
9341             } else {
9342                 ret = 0;
9343             }
9344 #else
9345             ret = get_errno(_llseek(arg1, arg2, arg3, &res, arg5));
9346 #endif
9347             if ((ret == 0) && put_user_s64(res, arg4)) {
9348                 return -TARGET_EFAULT;
9349             }
9350         }
9351         return ret;
9352 #endif
9353 #ifdef TARGET_NR_getdents
9354     case TARGET_NR_getdents:
9355 #ifdef EMULATE_GETDENTS_WITH_GETDENTS
9356 #if TARGET_ABI_BITS == 32 && HOST_LONG_BITS == 64
9357         {
9358             struct target_dirent *target_dirp;
9359             struct linux_dirent *dirp;
9360             abi_long count = arg3;
9361 
9362             dirp = g_try_malloc(count);
9363             if (!dirp) {
9364                 return -TARGET_ENOMEM;
9365             }
9366 
9367             ret = get_errno(sys_getdents(arg1, dirp, count));
9368             if (!is_error(ret)) {
9369                 struct linux_dirent *de;
9370 		struct target_dirent *tde;
9371                 int len = ret;
9372                 int reclen, treclen;
9373 		int count1, tnamelen;
9374 
9375 		count1 = 0;
9376                 de = dirp;
9377                 if (!(target_dirp = lock_user(VERIFY_WRITE, arg2, count, 0)))
9378                     return -TARGET_EFAULT;
9379 		tde = target_dirp;
9380                 while (len > 0) {
9381                     reclen = de->d_reclen;
9382                     tnamelen = reclen - offsetof(struct linux_dirent, d_name);
9383                     assert(tnamelen >= 0);
9384                     treclen = tnamelen + offsetof(struct target_dirent, d_name);
9385                     assert(count1 + treclen <= count);
9386                     tde->d_reclen = tswap16(treclen);
9387                     tde->d_ino = tswapal(de->d_ino);
9388                     tde->d_off = tswapal(de->d_off);
9389                     memcpy(tde->d_name, de->d_name, tnamelen);
9390                     de = (struct linux_dirent *)((char *)de + reclen);
9391                     len -= reclen;
9392                     tde = (struct target_dirent *)((char *)tde + treclen);
9393 		    count1 += treclen;
9394                 }
9395 		ret = count1;
9396                 unlock_user(target_dirp, arg2, ret);
9397             }
9398             g_free(dirp);
9399         }
9400 #else
9401         {
9402             struct linux_dirent *dirp;
9403             abi_long count = arg3;
9404 
9405             if (!(dirp = lock_user(VERIFY_WRITE, arg2, count, 0)))
9406                 return -TARGET_EFAULT;
9407             ret = get_errno(sys_getdents(arg1, dirp, count));
9408             if (!is_error(ret)) {
9409                 struct linux_dirent *de;
9410                 int len = ret;
9411                 int reclen;
9412                 de = dirp;
9413                 while (len > 0) {
9414                     reclen = de->d_reclen;
9415                     if (reclen > len)
9416                         break;
9417                     de->d_reclen = tswap16(reclen);
9418                     tswapls(&de->d_ino);
9419                     tswapls(&de->d_off);
9420                     de = (struct linux_dirent *)((char *)de + reclen);
9421                     len -= reclen;
9422                 }
9423             }
9424             unlock_user(dirp, arg2, ret);
9425         }
9426 #endif
9427 #else
9428         /* Implement getdents in terms of getdents64 */
9429         {
9430             struct linux_dirent64 *dirp;
9431             abi_long count = arg3;
9432 
9433             dirp = lock_user(VERIFY_WRITE, arg2, count, 0);
9434             if (!dirp) {
9435                 return -TARGET_EFAULT;
9436             }
9437             ret = get_errno(sys_getdents64(arg1, dirp, count));
9438             if (!is_error(ret)) {
9439                 /* Convert the dirent64 structs to target dirent.  We do this
9440                  * in-place, since we can guarantee that a target_dirent is no
9441                  * larger than a dirent64; however this means we have to be
9442                  * careful to read everything before writing in the new format.
9443                  */
9444                 struct linux_dirent64 *de;
9445                 struct target_dirent *tde;
9446                 int len = ret;
9447                 int tlen = 0;
9448 
9449                 de = dirp;
9450                 tde = (struct target_dirent *)dirp;
9451                 while (len > 0) {
9452                     int namelen, treclen;
9453                     int reclen = de->d_reclen;
9454                     uint64_t ino = de->d_ino;
9455                     int64_t off = de->d_off;
9456                     uint8_t type = de->d_type;
9457 
9458                     namelen = strlen(de->d_name);
9459                     treclen = offsetof(struct target_dirent, d_name)
9460                         + namelen + 2;
9461                     treclen = QEMU_ALIGN_UP(treclen, sizeof(abi_long));
9462 
9463                     memmove(tde->d_name, de->d_name, namelen + 1);
9464                     tde->d_ino = tswapal(ino);
9465                     tde->d_off = tswapal(off);
9466                     tde->d_reclen = tswap16(treclen);
9467                     /* The target_dirent type is in what was formerly a padding
9468                      * byte at the end of the structure:
9469                      */
9470                     *(((char *)tde) + treclen - 1) = type;
9471 
9472                     de = (struct linux_dirent64 *)((char *)de + reclen);
9473                     tde = (struct target_dirent *)((char *)tde + treclen);
9474                     len -= reclen;
9475                     tlen += treclen;
9476                 }
9477                 ret = tlen;
9478             }
9479             unlock_user(dirp, arg2, ret);
9480         }
9481 #endif
9482         return ret;
9483 #endif /* TARGET_NR_getdents */
9484 #if defined(TARGET_NR_getdents64) && defined(__NR_getdents64)
9485     case TARGET_NR_getdents64:
9486         {
9487             struct linux_dirent64 *dirp;
9488             abi_long count = arg3;
9489             if (!(dirp = lock_user(VERIFY_WRITE, arg2, count, 0)))
9490                 return -TARGET_EFAULT;
9491             ret = get_errno(sys_getdents64(arg1, dirp, count));
9492             if (!is_error(ret)) {
9493                 struct linux_dirent64 *de;
9494                 int len = ret;
9495                 int reclen;
9496                 de = dirp;
9497                 while (len > 0) {
9498                     reclen = de->d_reclen;
9499                     if (reclen > len)
9500                         break;
9501                     de->d_reclen = tswap16(reclen);
9502                     tswap64s((uint64_t *)&de->d_ino);
9503                     tswap64s((uint64_t *)&de->d_off);
9504                     de = (struct linux_dirent64 *)((char *)de + reclen);
9505                     len -= reclen;
9506                 }
9507             }
9508             unlock_user(dirp, arg2, ret);
9509         }
9510         return ret;
9511 #endif /* TARGET_NR_getdents64 */
9512 #if defined(TARGET_NR__newselect)
9513     case TARGET_NR__newselect:
9514         return do_select(arg1, arg2, arg3, arg4, arg5);
9515 #endif
9516 #if defined(TARGET_NR_poll) || defined(TARGET_NR_ppoll)
9517 # ifdef TARGET_NR_poll
9518     case TARGET_NR_poll:
9519 # endif
9520 # ifdef TARGET_NR_ppoll
9521     case TARGET_NR_ppoll:
9522 # endif
9523         {
9524             struct target_pollfd *target_pfd;
9525             unsigned int nfds = arg2;
9526             struct pollfd *pfd;
9527             unsigned int i;
9528 
9529             pfd = NULL;
9530             target_pfd = NULL;
9531             if (nfds) {
9532                 if (nfds > (INT_MAX / sizeof(struct target_pollfd))) {
9533                     return -TARGET_EINVAL;
9534                 }
9535 
9536                 target_pfd = lock_user(VERIFY_WRITE, arg1,
9537                                        sizeof(struct target_pollfd) * nfds, 1);
9538                 if (!target_pfd) {
9539                     return -TARGET_EFAULT;
9540                 }
9541 
9542                 pfd = alloca(sizeof(struct pollfd) * nfds);
9543                 for (i = 0; i < nfds; i++) {
9544                     pfd[i].fd = tswap32(target_pfd[i].fd);
9545                     pfd[i].events = tswap16(target_pfd[i].events);
9546                 }
9547             }
9548 
9549             switch (num) {
9550 # ifdef TARGET_NR_ppoll
9551             case TARGET_NR_ppoll:
9552             {
9553                 struct timespec _timeout_ts, *timeout_ts = &_timeout_ts;
9554                 target_sigset_t *target_set;
9555                 sigset_t _set, *set = &_set;
9556 
9557                 if (arg3) {
9558                     if (target_to_host_timespec(timeout_ts, arg3)) {
9559                         unlock_user(target_pfd, arg1, 0);
9560                         return -TARGET_EFAULT;
9561                     }
9562                 } else {
9563                     timeout_ts = NULL;
9564                 }
9565 
9566                 if (arg4) {
9567                     if (arg5 != sizeof(target_sigset_t)) {
9568                         unlock_user(target_pfd, arg1, 0);
9569                         return -TARGET_EINVAL;
9570                     }
9571 
9572                     target_set = lock_user(VERIFY_READ, arg4, sizeof(target_sigset_t), 1);
9573                     if (!target_set) {
9574                         unlock_user(target_pfd, arg1, 0);
9575                         return -TARGET_EFAULT;
9576                     }
9577                     target_to_host_sigset(set, target_set);
9578                 } else {
9579                     set = NULL;
9580                 }
9581 
9582                 ret = get_errno(safe_ppoll(pfd, nfds, timeout_ts,
9583                                            set, SIGSET_T_SIZE));
9584 
9585                 if (!is_error(ret) && arg3) {
9586                     host_to_target_timespec(arg3, timeout_ts);
9587                 }
9588                 if (arg4) {
9589                     unlock_user(target_set, arg4, 0);
9590                 }
9591                 break;
9592             }
9593 # endif
9594 # ifdef TARGET_NR_poll
9595             case TARGET_NR_poll:
9596             {
9597                 struct timespec ts, *pts;
9598 
9599                 if (arg3 >= 0) {
9600                     /* Convert ms to secs, ns */
9601                     ts.tv_sec = arg3 / 1000;
9602                     ts.tv_nsec = (arg3 % 1000) * 1000000LL;
9603                     pts = &ts;
9604                 } else {
9605                     /* -ve poll() timeout means "infinite" */
9606                     pts = NULL;
9607                 }
9608                 ret = get_errno(safe_ppoll(pfd, nfds, pts, NULL, 0));
9609                 break;
9610             }
9611 # endif
9612             default:
9613                 g_assert_not_reached();
9614             }
9615 
9616             if (!is_error(ret)) {
9617                 for(i = 0; i < nfds; i++) {
9618                     target_pfd[i].revents = tswap16(pfd[i].revents);
9619                 }
9620             }
9621             unlock_user(target_pfd, arg1, sizeof(struct target_pollfd) * nfds);
9622         }
9623         return ret;
9624 #endif
9625     case TARGET_NR_flock:
9626         /* NOTE: the flock constant seems to be the same for every
9627            Linux platform */
9628         return get_errno(safe_flock(arg1, arg2));
9629     case TARGET_NR_readv:
9630         {
9631             struct iovec *vec = lock_iovec(VERIFY_WRITE, arg2, arg3, 0);
9632             if (vec != NULL) {
9633                 ret = get_errno(safe_readv(arg1, vec, arg3));
9634                 unlock_iovec(vec, arg2, arg3, 1);
9635             } else {
9636                 ret = -host_to_target_errno(errno);
9637             }
9638         }
9639         return ret;
9640     case TARGET_NR_writev:
9641         {
9642             struct iovec *vec = lock_iovec(VERIFY_READ, arg2, arg3, 1);
9643             if (vec != NULL) {
9644                 ret = get_errno(safe_writev(arg1, vec, arg3));
9645                 unlock_iovec(vec, arg2, arg3, 0);
9646             } else {
9647                 ret = -host_to_target_errno(errno);
9648             }
9649         }
9650         return ret;
9651 #if defined(TARGET_NR_preadv)
9652     case TARGET_NR_preadv:
9653         {
9654             struct iovec *vec = lock_iovec(VERIFY_WRITE, arg2, arg3, 0);
9655             if (vec != NULL) {
9656                 unsigned long low, high;
9657 
9658                 target_to_host_low_high(arg4, arg5, &low, &high);
9659                 ret = get_errno(safe_preadv(arg1, vec, arg3, low, high));
9660                 unlock_iovec(vec, arg2, arg3, 1);
9661             } else {
9662                 ret = -host_to_target_errno(errno);
9663            }
9664         }
9665         return ret;
9666 #endif
9667 #if defined(TARGET_NR_pwritev)
9668     case TARGET_NR_pwritev:
9669         {
9670             struct iovec *vec = lock_iovec(VERIFY_READ, arg2, arg3, 1);
9671             if (vec != NULL) {
9672                 unsigned long low, high;
9673 
9674                 target_to_host_low_high(arg4, arg5, &low, &high);
9675                 ret = get_errno(safe_pwritev(arg1, vec, arg3, low, high));
9676                 unlock_iovec(vec, arg2, arg3, 0);
9677             } else {
9678                 ret = -host_to_target_errno(errno);
9679            }
9680         }
9681         return ret;
9682 #endif
9683     case TARGET_NR_getsid:
9684         return get_errno(getsid(arg1));
9685 #if defined(TARGET_NR_fdatasync) /* Not on alpha (osf_datasync ?) */
9686     case TARGET_NR_fdatasync:
9687         return get_errno(fdatasync(arg1));
9688 #endif
9689 #ifdef TARGET_NR__sysctl
9690     case TARGET_NR__sysctl:
9691         /* We don't implement this, but ENOTDIR is always a safe
9692            return value. */
9693         return -TARGET_ENOTDIR;
9694 #endif
9695     case TARGET_NR_sched_getaffinity:
9696         {
9697             unsigned int mask_size;
9698             unsigned long *mask;
9699 
9700             /*
9701              * sched_getaffinity needs multiples of ulong, so need to take
9702              * care of mismatches between target ulong and host ulong sizes.
9703              */
9704             if (arg2 & (sizeof(abi_ulong) - 1)) {
9705                 return -TARGET_EINVAL;
9706             }
9707             mask_size = (arg2 + (sizeof(*mask) - 1)) & ~(sizeof(*mask) - 1);
9708 
9709             mask = alloca(mask_size);
9710             memset(mask, 0, mask_size);
9711             ret = get_errno(sys_sched_getaffinity(arg1, mask_size, mask));
9712 
9713             if (!is_error(ret)) {
9714                 if (ret > arg2) {
9715                     /* More data returned than the caller's buffer will fit.
9716                      * This only happens if sizeof(abi_long) < sizeof(long)
9717                      * and the caller passed us a buffer holding an odd number
9718                      * of abi_longs. If the host kernel is actually using the
9719                      * extra 4 bytes then fail EINVAL; otherwise we can just
9720                      * ignore them and only copy the interesting part.
9721                      */
9722                     int numcpus = sysconf(_SC_NPROCESSORS_CONF);
9723                     if (numcpus > arg2 * 8) {
9724                         return -TARGET_EINVAL;
9725                     }
9726                     ret = arg2;
9727                 }
9728 
9729                 if (host_to_target_cpu_mask(mask, mask_size, arg3, ret)) {
9730                     return -TARGET_EFAULT;
9731                 }
9732             }
9733         }
9734         return ret;
9735     case TARGET_NR_sched_setaffinity:
9736         {
9737             unsigned int mask_size;
9738             unsigned long *mask;
9739 
9740             /*
9741              * sched_setaffinity needs multiples of ulong, so need to take
9742              * care of mismatches between target ulong and host ulong sizes.
9743              */
9744             if (arg2 & (sizeof(abi_ulong) - 1)) {
9745                 return -TARGET_EINVAL;
9746             }
9747             mask_size = (arg2 + (sizeof(*mask) - 1)) & ~(sizeof(*mask) - 1);
9748             mask = alloca(mask_size);
9749 
9750             ret = target_to_host_cpu_mask(mask, mask_size, arg3, arg2);
9751             if (ret) {
9752                 return ret;
9753             }
9754 
9755             return get_errno(sys_sched_setaffinity(arg1, mask_size, mask));
9756         }
9757     case TARGET_NR_getcpu:
9758         {
9759             unsigned cpu, node;
9760             ret = get_errno(sys_getcpu(arg1 ? &cpu : NULL,
9761                                        arg2 ? &node : NULL,
9762                                        NULL));
9763             if (is_error(ret)) {
9764                 return ret;
9765             }
9766             if (arg1 && put_user_u32(cpu, arg1)) {
9767                 return -TARGET_EFAULT;
9768             }
9769             if (arg2 && put_user_u32(node, arg2)) {
9770                 return -TARGET_EFAULT;
9771             }
9772         }
9773         return ret;
9774     case TARGET_NR_sched_setparam:
9775         {
9776             struct sched_param *target_schp;
9777             struct sched_param schp;
9778 
9779             if (arg2 == 0) {
9780                 return -TARGET_EINVAL;
9781             }
9782             if (!lock_user_struct(VERIFY_READ, target_schp, arg2, 1))
9783                 return -TARGET_EFAULT;
9784             schp.sched_priority = tswap32(target_schp->sched_priority);
9785             unlock_user_struct(target_schp, arg2, 0);
9786             return get_errno(sched_setparam(arg1, &schp));
9787         }
9788     case TARGET_NR_sched_getparam:
9789         {
9790             struct sched_param *target_schp;
9791             struct sched_param schp;
9792 
9793             if (arg2 == 0) {
9794                 return -TARGET_EINVAL;
9795             }
9796             ret = get_errno(sched_getparam(arg1, &schp));
9797             if (!is_error(ret)) {
9798                 if (!lock_user_struct(VERIFY_WRITE, target_schp, arg2, 0))
9799                     return -TARGET_EFAULT;
9800                 target_schp->sched_priority = tswap32(schp.sched_priority);
9801                 unlock_user_struct(target_schp, arg2, 1);
9802             }
9803         }
9804         return ret;
9805     case TARGET_NR_sched_setscheduler:
9806         {
9807             struct sched_param *target_schp;
9808             struct sched_param schp;
9809             if (arg3 == 0) {
9810                 return -TARGET_EINVAL;
9811             }
9812             if (!lock_user_struct(VERIFY_READ, target_schp, arg3, 1))
9813                 return -TARGET_EFAULT;
9814             schp.sched_priority = tswap32(target_schp->sched_priority);
9815             unlock_user_struct(target_schp, arg3, 0);
9816             return get_errno(sched_setscheduler(arg1, arg2, &schp));
9817         }
9818     case TARGET_NR_sched_getscheduler:
9819         return get_errno(sched_getscheduler(arg1));
9820     case TARGET_NR_sched_yield:
9821         return get_errno(sched_yield());
9822     case TARGET_NR_sched_get_priority_max:
9823         return get_errno(sched_get_priority_max(arg1));
9824     case TARGET_NR_sched_get_priority_min:
9825         return get_errno(sched_get_priority_min(arg1));
9826     case TARGET_NR_sched_rr_get_interval:
9827         {
9828             struct timespec ts;
9829             ret = get_errno(sched_rr_get_interval(arg1, &ts));
9830             if (!is_error(ret)) {
9831                 ret = host_to_target_timespec(arg2, &ts);
9832             }
9833         }
9834         return ret;
9835     case TARGET_NR_nanosleep:
9836         {
9837             struct timespec req, rem;
9838             target_to_host_timespec(&req, arg1);
9839             ret = get_errno(safe_nanosleep(&req, &rem));
9840             if (is_error(ret) && arg2) {
9841                 host_to_target_timespec(arg2, &rem);
9842             }
9843         }
9844         return ret;
9845     case TARGET_NR_prctl:
9846         switch (arg1) {
9847         case PR_GET_PDEATHSIG:
9848         {
9849             int deathsig;
9850             ret = get_errno(prctl(arg1, &deathsig, arg3, arg4, arg5));
9851             if (!is_error(ret) && arg2
9852                 && put_user_ual(deathsig, arg2)) {
9853                 return -TARGET_EFAULT;
9854             }
9855             return ret;
9856         }
9857 #ifdef PR_GET_NAME
9858         case PR_GET_NAME:
9859         {
9860             void *name = lock_user(VERIFY_WRITE, arg2, 16, 1);
9861             if (!name) {
9862                 return -TARGET_EFAULT;
9863             }
9864             ret = get_errno(prctl(arg1, (unsigned long)name,
9865                                   arg3, arg4, arg5));
9866             unlock_user(name, arg2, 16);
9867             return ret;
9868         }
9869         case PR_SET_NAME:
9870         {
9871             void *name = lock_user(VERIFY_READ, arg2, 16, 1);
9872             if (!name) {
9873                 return -TARGET_EFAULT;
9874             }
9875             ret = get_errno(prctl(arg1, (unsigned long)name,
9876                                   arg3, arg4, arg5));
9877             unlock_user(name, arg2, 0);
9878             return ret;
9879         }
9880 #endif
9881 #ifdef TARGET_MIPS
9882         case TARGET_PR_GET_FP_MODE:
9883         {
9884             CPUMIPSState *env = ((CPUMIPSState *)cpu_env);
9885             ret = 0;
9886             if (env->CP0_Status & (1 << CP0St_FR)) {
9887                 ret |= TARGET_PR_FP_MODE_FR;
9888             }
9889             if (env->CP0_Config5 & (1 << CP0C5_FRE)) {
9890                 ret |= TARGET_PR_FP_MODE_FRE;
9891             }
9892             return ret;
9893         }
9894         case TARGET_PR_SET_FP_MODE:
9895         {
9896             CPUMIPSState *env = ((CPUMIPSState *)cpu_env);
9897             bool old_fr = env->CP0_Status & (1 << CP0St_FR);
9898             bool old_fre = env->CP0_Config5 & (1 << CP0C5_FRE);
9899             bool new_fr = arg2 & TARGET_PR_FP_MODE_FR;
9900             bool new_fre = arg2 & TARGET_PR_FP_MODE_FRE;
9901 
9902             const unsigned int known_bits = TARGET_PR_FP_MODE_FR |
9903                                             TARGET_PR_FP_MODE_FRE;
9904 
9905             /* If nothing to change, return right away, successfully.  */
9906             if (old_fr == new_fr && old_fre == new_fre) {
9907                 return 0;
9908             }
9909             /* Check the value is valid */
9910             if (arg2 & ~known_bits) {
9911                 return -TARGET_EOPNOTSUPP;
9912             }
9913             /* Setting FRE without FR is not supported.  */
9914             if (new_fre && !new_fr) {
9915                 return -TARGET_EOPNOTSUPP;
9916             }
9917             if (new_fr && !(env->active_fpu.fcr0 & (1 << FCR0_F64))) {
9918                 /* FR1 is not supported */
9919                 return -TARGET_EOPNOTSUPP;
9920             }
9921             if (!new_fr && (env->active_fpu.fcr0 & (1 << FCR0_F64))
9922                 && !(env->CP0_Status_rw_bitmask & (1 << CP0St_FR))) {
9923                 /* cannot set FR=0 */
9924                 return -TARGET_EOPNOTSUPP;
9925             }
9926             if (new_fre && !(env->active_fpu.fcr0 & (1 << FCR0_FREP))) {
9927                 /* Cannot set FRE=1 */
9928                 return -TARGET_EOPNOTSUPP;
9929             }
9930 
9931             int i;
9932             fpr_t *fpr = env->active_fpu.fpr;
9933             for (i = 0; i < 32 ; i += 2) {
9934                 if (!old_fr && new_fr) {
9935                     fpr[i].w[!FP_ENDIAN_IDX] = fpr[i + 1].w[FP_ENDIAN_IDX];
9936                 } else if (old_fr && !new_fr) {
9937                     fpr[i + 1].w[FP_ENDIAN_IDX] = fpr[i].w[!FP_ENDIAN_IDX];
9938                 }
9939             }
9940 
9941             if (new_fr) {
9942                 env->CP0_Status |= (1 << CP0St_FR);
9943                 env->hflags |= MIPS_HFLAG_F64;
9944             } else {
9945                 env->CP0_Status &= ~(1 << CP0St_FR);
9946                 env->hflags &= ~MIPS_HFLAG_F64;
9947             }
9948             if (new_fre) {
9949                 env->CP0_Config5 |= (1 << CP0C5_FRE);
9950                 if (env->active_fpu.fcr0 & (1 << FCR0_FREP)) {
9951                     env->hflags |= MIPS_HFLAG_FRE;
9952                 }
9953             } else {
9954                 env->CP0_Config5 &= ~(1 << CP0C5_FRE);
9955                 env->hflags &= ~MIPS_HFLAG_FRE;
9956             }
9957 
9958             return 0;
9959         }
9960 #endif /* MIPS */
9961 #ifdef TARGET_AARCH64
9962         case TARGET_PR_SVE_SET_VL:
9963             /*
9964              * We cannot support either PR_SVE_SET_VL_ONEXEC or
9965              * PR_SVE_VL_INHERIT.  Note the kernel definition
9966              * of sve_vl_valid allows for VQ=512, i.e. VL=8192,
9967              * even though the current architectural maximum is VQ=16.
9968              */
9969             ret = -TARGET_EINVAL;
9970             if (cpu_isar_feature(aa64_sve, env_archcpu(cpu_env))
9971                 && arg2 >= 0 && arg2 <= 512 * 16 && !(arg2 & 15)) {
9972                 CPUARMState *env = cpu_env;
9973                 ARMCPU *cpu = env_archcpu(env);
9974                 uint32_t vq, old_vq;
9975 
9976                 old_vq = (env->vfp.zcr_el[1] & 0xf) + 1;
9977                 vq = MAX(arg2 / 16, 1);
9978                 vq = MIN(vq, cpu->sve_max_vq);
9979 
9980                 if (vq < old_vq) {
9981                     aarch64_sve_narrow_vq(env, vq);
9982                 }
9983                 env->vfp.zcr_el[1] = vq - 1;
9984                 ret = vq * 16;
9985             }
9986             return ret;
9987         case TARGET_PR_SVE_GET_VL:
9988             ret = -TARGET_EINVAL;
9989             {
9990                 ARMCPU *cpu = env_archcpu(cpu_env);
9991                 if (cpu_isar_feature(aa64_sve, cpu)) {
9992                     ret = ((cpu->env.vfp.zcr_el[1] & 0xf) + 1) * 16;
9993                 }
9994             }
9995             return ret;
9996         case TARGET_PR_PAC_RESET_KEYS:
9997             {
9998                 CPUARMState *env = cpu_env;
9999                 ARMCPU *cpu = env_archcpu(env);
10000 
10001                 if (arg3 || arg4 || arg5) {
10002                     return -TARGET_EINVAL;
10003                 }
10004                 if (cpu_isar_feature(aa64_pauth, cpu)) {
10005                     int all = (TARGET_PR_PAC_APIAKEY | TARGET_PR_PAC_APIBKEY |
10006                                TARGET_PR_PAC_APDAKEY | TARGET_PR_PAC_APDBKEY |
10007                                TARGET_PR_PAC_APGAKEY);
10008                     int ret = 0;
10009                     Error *err = NULL;
10010 
10011                     if (arg2 == 0) {
10012                         arg2 = all;
10013                     } else if (arg2 & ~all) {
10014                         return -TARGET_EINVAL;
10015                     }
10016                     if (arg2 & TARGET_PR_PAC_APIAKEY) {
10017                         ret |= qemu_guest_getrandom(&env->keys.apia,
10018                                                     sizeof(ARMPACKey), &err);
10019                     }
10020                     if (arg2 & TARGET_PR_PAC_APIBKEY) {
10021                         ret |= qemu_guest_getrandom(&env->keys.apib,
10022                                                     sizeof(ARMPACKey), &err);
10023                     }
10024                     if (arg2 & TARGET_PR_PAC_APDAKEY) {
10025                         ret |= qemu_guest_getrandom(&env->keys.apda,
10026                                                     sizeof(ARMPACKey), &err);
10027                     }
10028                     if (arg2 & TARGET_PR_PAC_APDBKEY) {
10029                         ret |= qemu_guest_getrandom(&env->keys.apdb,
10030                                                     sizeof(ARMPACKey), &err);
10031                     }
10032                     if (arg2 & TARGET_PR_PAC_APGAKEY) {
10033                         ret |= qemu_guest_getrandom(&env->keys.apga,
10034                                                     sizeof(ARMPACKey), &err);
10035                     }
10036                     if (ret != 0) {
10037                         /*
10038                          * Some unknown failure in the crypto.  The best
10039                          * we can do is log it and fail the syscall.
10040                          * The real syscall cannot fail this way.
10041                          */
10042                         qemu_log_mask(LOG_UNIMP,
10043                                       "PR_PAC_RESET_KEYS: Crypto failure: %s",
10044                                       error_get_pretty(err));
10045                         error_free(err);
10046                         return -TARGET_EIO;
10047                     }
10048                     return 0;
10049                 }
10050             }
10051             return -TARGET_EINVAL;
10052 #endif /* AARCH64 */
10053         case PR_GET_SECCOMP:
10054         case PR_SET_SECCOMP:
10055             /* Disable seccomp to prevent the target disabling syscalls we
10056              * need. */
10057             return -TARGET_EINVAL;
10058         default:
10059             /* Most prctl options have no pointer arguments */
10060             return get_errno(prctl(arg1, arg2, arg3, arg4, arg5));
10061         }
10062         break;
10063 #ifdef TARGET_NR_arch_prctl
10064     case TARGET_NR_arch_prctl:
10065 #if defined(TARGET_I386) && !defined(TARGET_ABI32)
10066         return do_arch_prctl(cpu_env, arg1, arg2);
10067 #else
10068 #error unreachable
10069 #endif
10070 #endif
10071 #ifdef TARGET_NR_pread64
10072     case TARGET_NR_pread64:
10073         if (regpairs_aligned(cpu_env, num)) {
10074             arg4 = arg5;
10075             arg5 = arg6;
10076         }
10077         if (arg2 == 0 && arg3 == 0) {
10078             /* Special-case NULL buffer and zero length, which should succeed */
10079             p = 0;
10080         } else {
10081             p = lock_user(VERIFY_WRITE, arg2, arg3, 0);
10082             if (!p) {
10083                 return -TARGET_EFAULT;
10084             }
10085         }
10086         ret = get_errno(pread64(arg1, p, arg3, target_offset64(arg4, arg5)));
10087         unlock_user(p, arg2, ret);
10088         return ret;
10089     case TARGET_NR_pwrite64:
10090         if (regpairs_aligned(cpu_env, num)) {
10091             arg4 = arg5;
10092             arg5 = arg6;
10093         }
10094         if (arg2 == 0 && arg3 == 0) {
10095             /* Special-case NULL buffer and zero length, which should succeed */
10096             p = 0;
10097         } else {
10098             p = lock_user(VERIFY_READ, arg2, arg3, 1);
10099             if (!p) {
10100                 return -TARGET_EFAULT;
10101             }
10102         }
10103         ret = get_errno(pwrite64(arg1, p, arg3, target_offset64(arg4, arg5)));
10104         unlock_user(p, arg2, 0);
10105         return ret;
10106 #endif
10107     case TARGET_NR_getcwd:
10108         if (!(p = lock_user(VERIFY_WRITE, arg1, arg2, 0)))
10109             return -TARGET_EFAULT;
10110         ret = get_errno(sys_getcwd1(p, arg2));
10111         unlock_user(p, arg1, ret);
10112         return ret;
10113     case TARGET_NR_capget:
10114     case TARGET_NR_capset:
10115     {
10116         struct target_user_cap_header *target_header;
10117         struct target_user_cap_data *target_data = NULL;
10118         struct __user_cap_header_struct header;
10119         struct __user_cap_data_struct data[2];
10120         struct __user_cap_data_struct *dataptr = NULL;
10121         int i, target_datalen;
10122         int data_items = 1;
10123 
10124         if (!lock_user_struct(VERIFY_WRITE, target_header, arg1, 1)) {
10125             return -TARGET_EFAULT;
10126         }
10127         header.version = tswap32(target_header->version);
10128         header.pid = tswap32(target_header->pid);
10129 
10130         if (header.version != _LINUX_CAPABILITY_VERSION) {
10131             /* Version 2 and up takes pointer to two user_data structs */
10132             data_items = 2;
10133         }
10134 
10135         target_datalen = sizeof(*target_data) * data_items;
10136 
10137         if (arg2) {
10138             if (num == TARGET_NR_capget) {
10139                 target_data = lock_user(VERIFY_WRITE, arg2, target_datalen, 0);
10140             } else {
10141                 target_data = lock_user(VERIFY_READ, arg2, target_datalen, 1);
10142             }
10143             if (!target_data) {
10144                 unlock_user_struct(target_header, arg1, 0);
10145                 return -TARGET_EFAULT;
10146             }
10147 
10148             if (num == TARGET_NR_capset) {
10149                 for (i = 0; i < data_items; i++) {
10150                     data[i].effective = tswap32(target_data[i].effective);
10151                     data[i].permitted = tswap32(target_data[i].permitted);
10152                     data[i].inheritable = tswap32(target_data[i].inheritable);
10153                 }
10154             }
10155 
10156             dataptr = data;
10157         }
10158 
10159         if (num == TARGET_NR_capget) {
10160             ret = get_errno(capget(&header, dataptr));
10161         } else {
10162             ret = get_errno(capset(&header, dataptr));
10163         }
10164 
10165         /* The kernel always updates version for both capget and capset */
10166         target_header->version = tswap32(header.version);
10167         unlock_user_struct(target_header, arg1, 1);
10168 
10169         if (arg2) {
10170             if (num == TARGET_NR_capget) {
10171                 for (i = 0; i < data_items; i++) {
10172                     target_data[i].effective = tswap32(data[i].effective);
10173                     target_data[i].permitted = tswap32(data[i].permitted);
10174                     target_data[i].inheritable = tswap32(data[i].inheritable);
10175                 }
10176                 unlock_user(target_data, arg2, target_datalen);
10177             } else {
10178                 unlock_user(target_data, arg2, 0);
10179             }
10180         }
10181         return ret;
10182     }
10183     case TARGET_NR_sigaltstack:
10184         return do_sigaltstack(arg1, arg2,
10185                               get_sp_from_cpustate((CPUArchState *)cpu_env));
10186 
10187 #ifdef CONFIG_SENDFILE
10188 #ifdef TARGET_NR_sendfile
10189     case TARGET_NR_sendfile:
10190     {
10191         off_t *offp = NULL;
10192         off_t off;
10193         if (arg3) {
10194             ret = get_user_sal(off, arg3);
10195             if (is_error(ret)) {
10196                 return ret;
10197             }
10198             offp = &off;
10199         }
10200         ret = get_errno(sendfile(arg1, arg2, offp, arg4));
10201         if (!is_error(ret) && arg3) {
10202             abi_long ret2 = put_user_sal(off, arg3);
10203             if (is_error(ret2)) {
10204                 ret = ret2;
10205             }
10206         }
10207         return ret;
10208     }
10209 #endif
10210 #ifdef TARGET_NR_sendfile64
10211     case TARGET_NR_sendfile64:
10212     {
10213         off_t *offp = NULL;
10214         off_t off;
10215         if (arg3) {
10216             ret = get_user_s64(off, arg3);
10217             if (is_error(ret)) {
10218                 return ret;
10219             }
10220             offp = &off;
10221         }
10222         ret = get_errno(sendfile(arg1, arg2, offp, arg4));
10223         if (!is_error(ret) && arg3) {
10224             abi_long ret2 = put_user_s64(off, arg3);
10225             if (is_error(ret2)) {
10226                 ret = ret2;
10227             }
10228         }
10229         return ret;
10230     }
10231 #endif
10232 #endif
10233 #ifdef TARGET_NR_vfork
10234     case TARGET_NR_vfork:
10235         return get_errno(do_fork(cpu_env,
10236                          CLONE_VFORK | CLONE_VM | TARGET_SIGCHLD,
10237                          0, 0, 0, 0));
10238 #endif
10239 #ifdef TARGET_NR_ugetrlimit
10240     case TARGET_NR_ugetrlimit:
10241     {
10242 	struct rlimit rlim;
10243 	int resource = target_to_host_resource(arg1);
10244 	ret = get_errno(getrlimit(resource, &rlim));
10245 	if (!is_error(ret)) {
10246 	    struct target_rlimit *target_rlim;
10247             if (!lock_user_struct(VERIFY_WRITE, target_rlim, arg2, 0))
10248                 return -TARGET_EFAULT;
10249 	    target_rlim->rlim_cur = host_to_target_rlim(rlim.rlim_cur);
10250 	    target_rlim->rlim_max = host_to_target_rlim(rlim.rlim_max);
10251             unlock_user_struct(target_rlim, arg2, 1);
10252 	}
10253         return ret;
10254     }
10255 #endif
10256 #ifdef TARGET_NR_truncate64
10257     case TARGET_NR_truncate64:
10258         if (!(p = lock_user_string(arg1)))
10259             return -TARGET_EFAULT;
10260 	ret = target_truncate64(cpu_env, p, arg2, arg3, arg4);
10261         unlock_user(p, arg1, 0);
10262         return ret;
10263 #endif
10264 #ifdef TARGET_NR_ftruncate64
10265     case TARGET_NR_ftruncate64:
10266         return target_ftruncate64(cpu_env, arg1, arg2, arg3, arg4);
10267 #endif
10268 #ifdef TARGET_NR_stat64
10269     case TARGET_NR_stat64:
10270         if (!(p = lock_user_string(arg1))) {
10271             return -TARGET_EFAULT;
10272         }
10273         ret = get_errno(stat(path(p), &st));
10274         unlock_user(p, arg1, 0);
10275         if (!is_error(ret))
10276             ret = host_to_target_stat64(cpu_env, arg2, &st);
10277         return ret;
10278 #endif
10279 #ifdef TARGET_NR_lstat64
10280     case TARGET_NR_lstat64:
10281         if (!(p = lock_user_string(arg1))) {
10282             return -TARGET_EFAULT;
10283         }
10284         ret = get_errno(lstat(path(p), &st));
10285         unlock_user(p, arg1, 0);
10286         if (!is_error(ret))
10287             ret = host_to_target_stat64(cpu_env, arg2, &st);
10288         return ret;
10289 #endif
10290 #ifdef TARGET_NR_fstat64
10291     case TARGET_NR_fstat64:
10292         ret = get_errno(fstat(arg1, &st));
10293         if (!is_error(ret))
10294             ret = host_to_target_stat64(cpu_env, arg2, &st);
10295         return ret;
10296 #endif
10297 #if (defined(TARGET_NR_fstatat64) || defined(TARGET_NR_newfstatat))
10298 #ifdef TARGET_NR_fstatat64
10299     case TARGET_NR_fstatat64:
10300 #endif
10301 #ifdef TARGET_NR_newfstatat
10302     case TARGET_NR_newfstatat:
10303 #endif
10304         if (!(p = lock_user_string(arg2))) {
10305             return -TARGET_EFAULT;
10306         }
10307         ret = get_errno(fstatat(arg1, path(p), &st, arg4));
10308         unlock_user(p, arg2, 0);
10309         if (!is_error(ret))
10310             ret = host_to_target_stat64(cpu_env, arg3, &st);
10311         return ret;
10312 #endif
10313 #if defined(TARGET_NR_statx)
10314     case TARGET_NR_statx:
10315         {
10316             struct target_statx *target_stx;
10317             int dirfd = arg1;
10318             int flags = arg3;
10319 
10320             p = lock_user_string(arg2);
10321             if (p == NULL) {
10322                 return -TARGET_EFAULT;
10323             }
10324 #if defined(__NR_statx)
10325             {
10326                 /*
10327                  * It is assumed that struct statx is architecture independent.
10328                  */
10329                 struct target_statx host_stx;
10330                 int mask = arg4;
10331 
10332                 ret = get_errno(sys_statx(dirfd, p, flags, mask, &host_stx));
10333                 if (!is_error(ret)) {
10334                     if (host_to_target_statx(&host_stx, arg5) != 0) {
10335                         unlock_user(p, arg2, 0);
10336                         return -TARGET_EFAULT;
10337                     }
10338                 }
10339 
10340                 if (ret != -TARGET_ENOSYS) {
10341                     unlock_user(p, arg2, 0);
10342                     return ret;
10343                 }
10344             }
10345 #endif
10346             ret = get_errno(fstatat(dirfd, path(p), &st, flags));
10347             unlock_user(p, arg2, 0);
10348 
10349             if (!is_error(ret)) {
10350                 if (!lock_user_struct(VERIFY_WRITE, target_stx, arg5, 0)) {
10351                     return -TARGET_EFAULT;
10352                 }
10353                 memset(target_stx, 0, sizeof(*target_stx));
10354                 __put_user(major(st.st_dev), &target_stx->stx_dev_major);
10355                 __put_user(minor(st.st_dev), &target_stx->stx_dev_minor);
10356                 __put_user(st.st_ino, &target_stx->stx_ino);
10357                 __put_user(st.st_mode, &target_stx->stx_mode);
10358                 __put_user(st.st_uid, &target_stx->stx_uid);
10359                 __put_user(st.st_gid, &target_stx->stx_gid);
10360                 __put_user(st.st_nlink, &target_stx->stx_nlink);
10361                 __put_user(major(st.st_rdev), &target_stx->stx_rdev_major);
10362                 __put_user(minor(st.st_rdev), &target_stx->stx_rdev_minor);
10363                 __put_user(st.st_size, &target_stx->stx_size);
10364                 __put_user(st.st_blksize, &target_stx->stx_blksize);
10365                 __put_user(st.st_blocks, &target_stx->stx_blocks);
10366                 __put_user(st.st_atime, &target_stx->stx_atime.tv_sec);
10367                 __put_user(st.st_mtime, &target_stx->stx_mtime.tv_sec);
10368                 __put_user(st.st_ctime, &target_stx->stx_ctime.tv_sec);
10369                 unlock_user_struct(target_stx, arg5, 1);
10370             }
10371         }
10372         return ret;
10373 #endif
10374 #ifdef TARGET_NR_lchown
10375     case TARGET_NR_lchown:
10376         if (!(p = lock_user_string(arg1)))
10377             return -TARGET_EFAULT;
10378         ret = get_errno(lchown(p, low2highuid(arg2), low2highgid(arg3)));
10379         unlock_user(p, arg1, 0);
10380         return ret;
10381 #endif
10382 #ifdef TARGET_NR_getuid
10383     case TARGET_NR_getuid:
10384         return get_errno(high2lowuid(getuid()));
10385 #endif
10386 #ifdef TARGET_NR_getgid
10387     case TARGET_NR_getgid:
10388         return get_errno(high2lowgid(getgid()));
10389 #endif
10390 #ifdef TARGET_NR_geteuid
10391     case TARGET_NR_geteuid:
10392         return get_errno(high2lowuid(geteuid()));
10393 #endif
10394 #ifdef TARGET_NR_getegid
10395     case TARGET_NR_getegid:
10396         return get_errno(high2lowgid(getegid()));
10397 #endif
10398     case TARGET_NR_setreuid:
10399         return get_errno(setreuid(low2highuid(arg1), low2highuid(arg2)));
10400     case TARGET_NR_setregid:
10401         return get_errno(setregid(low2highgid(arg1), low2highgid(arg2)));
10402     case TARGET_NR_getgroups:
10403         {
10404             int gidsetsize = arg1;
10405             target_id *target_grouplist;
10406             gid_t *grouplist;
10407             int i;
10408 
10409             grouplist = alloca(gidsetsize * sizeof(gid_t));
10410             ret = get_errno(getgroups(gidsetsize, grouplist));
10411             if (gidsetsize == 0)
10412                 return ret;
10413             if (!is_error(ret)) {
10414                 target_grouplist = lock_user(VERIFY_WRITE, arg2, gidsetsize * sizeof(target_id), 0);
10415                 if (!target_grouplist)
10416                     return -TARGET_EFAULT;
10417                 for(i = 0;i < ret; i++)
10418                     target_grouplist[i] = tswapid(high2lowgid(grouplist[i]));
10419                 unlock_user(target_grouplist, arg2, gidsetsize * sizeof(target_id));
10420             }
10421         }
10422         return ret;
10423     case TARGET_NR_setgroups:
10424         {
10425             int gidsetsize = arg1;
10426             target_id *target_grouplist;
10427             gid_t *grouplist = NULL;
10428             int i;
10429             if (gidsetsize) {
10430                 grouplist = alloca(gidsetsize * sizeof(gid_t));
10431                 target_grouplist = lock_user(VERIFY_READ, arg2, gidsetsize * sizeof(target_id), 1);
10432                 if (!target_grouplist) {
10433                     return -TARGET_EFAULT;
10434                 }
10435                 for (i = 0; i < gidsetsize; i++) {
10436                     grouplist[i] = low2highgid(tswapid(target_grouplist[i]));
10437                 }
10438                 unlock_user(target_grouplist, arg2, 0);
10439             }
10440             return get_errno(setgroups(gidsetsize, grouplist));
10441         }
10442     case TARGET_NR_fchown:
10443         return get_errno(fchown(arg1, low2highuid(arg2), low2highgid(arg3)));
10444 #if defined(TARGET_NR_fchownat)
10445     case TARGET_NR_fchownat:
10446         if (!(p = lock_user_string(arg2)))
10447             return -TARGET_EFAULT;
10448         ret = get_errno(fchownat(arg1, p, low2highuid(arg3),
10449                                  low2highgid(arg4), arg5));
10450         unlock_user(p, arg2, 0);
10451         return ret;
10452 #endif
10453 #ifdef TARGET_NR_setresuid
10454     case TARGET_NR_setresuid:
10455         return get_errno(sys_setresuid(low2highuid(arg1),
10456                                        low2highuid(arg2),
10457                                        low2highuid(arg3)));
10458 #endif
10459 #ifdef TARGET_NR_getresuid
10460     case TARGET_NR_getresuid:
10461         {
10462             uid_t ruid, euid, suid;
10463             ret = get_errno(getresuid(&ruid, &euid, &suid));
10464             if (!is_error(ret)) {
10465                 if (put_user_id(high2lowuid(ruid), arg1)
10466                     || put_user_id(high2lowuid(euid), arg2)
10467                     || put_user_id(high2lowuid(suid), arg3))
10468                     return -TARGET_EFAULT;
10469             }
10470         }
10471         return ret;
10472 #endif
10473 #ifdef TARGET_NR_getresgid
10474     case TARGET_NR_setresgid:
10475         return get_errno(sys_setresgid(low2highgid(arg1),
10476                                        low2highgid(arg2),
10477                                        low2highgid(arg3)));
10478 #endif
10479 #ifdef TARGET_NR_getresgid
10480     case TARGET_NR_getresgid:
10481         {
10482             gid_t rgid, egid, sgid;
10483             ret = get_errno(getresgid(&rgid, &egid, &sgid));
10484             if (!is_error(ret)) {
10485                 if (put_user_id(high2lowgid(rgid), arg1)
10486                     || put_user_id(high2lowgid(egid), arg2)
10487                     || put_user_id(high2lowgid(sgid), arg3))
10488                     return -TARGET_EFAULT;
10489             }
10490         }
10491         return ret;
10492 #endif
10493 #ifdef TARGET_NR_chown
10494     case TARGET_NR_chown:
10495         if (!(p = lock_user_string(arg1)))
10496             return -TARGET_EFAULT;
10497         ret = get_errno(chown(p, low2highuid(arg2), low2highgid(arg3)));
10498         unlock_user(p, arg1, 0);
10499         return ret;
10500 #endif
10501     case TARGET_NR_setuid:
10502         return get_errno(sys_setuid(low2highuid(arg1)));
10503     case TARGET_NR_setgid:
10504         return get_errno(sys_setgid(low2highgid(arg1)));
10505     case TARGET_NR_setfsuid:
10506         return get_errno(setfsuid(arg1));
10507     case TARGET_NR_setfsgid:
10508         return get_errno(setfsgid(arg1));
10509 
10510 #ifdef TARGET_NR_lchown32
10511     case TARGET_NR_lchown32:
10512         if (!(p = lock_user_string(arg1)))
10513             return -TARGET_EFAULT;
10514         ret = get_errno(lchown(p, arg2, arg3));
10515         unlock_user(p, arg1, 0);
10516         return ret;
10517 #endif
10518 #ifdef TARGET_NR_getuid32
10519     case TARGET_NR_getuid32:
10520         return get_errno(getuid());
10521 #endif
10522 
10523 #if defined(TARGET_NR_getxuid) && defined(TARGET_ALPHA)
10524    /* Alpha specific */
10525     case TARGET_NR_getxuid:
10526          {
10527             uid_t euid;
10528             euid=geteuid();
10529             ((CPUAlphaState *)cpu_env)->ir[IR_A4]=euid;
10530          }
10531         return get_errno(getuid());
10532 #endif
10533 #if defined(TARGET_NR_getxgid) && defined(TARGET_ALPHA)
10534    /* Alpha specific */
10535     case TARGET_NR_getxgid:
10536          {
10537             uid_t egid;
10538             egid=getegid();
10539             ((CPUAlphaState *)cpu_env)->ir[IR_A4]=egid;
10540          }
10541         return get_errno(getgid());
10542 #endif
10543 #if defined(TARGET_NR_osf_getsysinfo) && defined(TARGET_ALPHA)
10544     /* Alpha specific */
10545     case TARGET_NR_osf_getsysinfo:
10546         ret = -TARGET_EOPNOTSUPP;
10547         switch (arg1) {
10548           case TARGET_GSI_IEEE_FP_CONTROL:
10549             {
10550                 uint64_t fpcr = cpu_alpha_load_fpcr(cpu_env);
10551                 uint64_t swcr = ((CPUAlphaState *)cpu_env)->swcr;
10552 
10553                 swcr &= ~SWCR_STATUS_MASK;
10554                 swcr |= (fpcr >> 35) & SWCR_STATUS_MASK;
10555 
10556                 if (put_user_u64 (swcr, arg2))
10557                         return -TARGET_EFAULT;
10558                 ret = 0;
10559             }
10560             break;
10561 
10562           /* case GSI_IEEE_STATE_AT_SIGNAL:
10563              -- Not implemented in linux kernel.
10564              case GSI_UACPROC:
10565              -- Retrieves current unaligned access state; not much used.
10566              case GSI_PROC_TYPE:
10567              -- Retrieves implver information; surely not used.
10568              case GSI_GET_HWRPB:
10569              -- Grabs a copy of the HWRPB; surely not used.
10570           */
10571         }
10572         return ret;
10573 #endif
10574 #if defined(TARGET_NR_osf_setsysinfo) && defined(TARGET_ALPHA)
10575     /* Alpha specific */
10576     case TARGET_NR_osf_setsysinfo:
10577         ret = -TARGET_EOPNOTSUPP;
10578         switch (arg1) {
10579           case TARGET_SSI_IEEE_FP_CONTROL:
10580             {
10581                 uint64_t swcr, fpcr;
10582 
10583                 if (get_user_u64 (swcr, arg2)) {
10584                     return -TARGET_EFAULT;
10585                 }
10586 
10587                 /*
10588                  * The kernel calls swcr_update_status to update the
10589                  * status bits from the fpcr at every point that it
10590                  * could be queried.  Therefore, we store the status
10591                  * bits only in FPCR.
10592                  */
10593                 ((CPUAlphaState *)cpu_env)->swcr
10594                     = swcr & (SWCR_TRAP_ENABLE_MASK | SWCR_MAP_MASK);
10595 
10596                 fpcr = cpu_alpha_load_fpcr(cpu_env);
10597                 fpcr &= ((uint64_t)FPCR_DYN_MASK << 32);
10598                 fpcr |= alpha_ieee_swcr_to_fpcr(swcr);
10599                 cpu_alpha_store_fpcr(cpu_env, fpcr);
10600                 ret = 0;
10601             }
10602             break;
10603 
10604           case TARGET_SSI_IEEE_RAISE_EXCEPTION:
10605             {
10606                 uint64_t exc, fpcr, fex;
10607 
10608                 if (get_user_u64(exc, arg2)) {
10609                     return -TARGET_EFAULT;
10610                 }
10611                 exc &= SWCR_STATUS_MASK;
10612                 fpcr = cpu_alpha_load_fpcr(cpu_env);
10613 
10614                 /* Old exceptions are not signaled.  */
10615                 fex = alpha_ieee_fpcr_to_swcr(fpcr);
10616                 fex = exc & ~fex;
10617                 fex >>= SWCR_STATUS_TO_EXCSUM_SHIFT;
10618                 fex &= ((CPUArchState *)cpu_env)->swcr;
10619 
10620                 /* Update the hardware fpcr.  */
10621                 fpcr |= alpha_ieee_swcr_to_fpcr(exc);
10622                 cpu_alpha_store_fpcr(cpu_env, fpcr);
10623 
10624                 if (fex) {
10625                     int si_code = TARGET_FPE_FLTUNK;
10626                     target_siginfo_t info;
10627 
10628                     if (fex & SWCR_TRAP_ENABLE_DNO) {
10629                         si_code = TARGET_FPE_FLTUND;
10630                     }
10631                     if (fex & SWCR_TRAP_ENABLE_INE) {
10632                         si_code = TARGET_FPE_FLTRES;
10633                     }
10634                     if (fex & SWCR_TRAP_ENABLE_UNF) {
10635                         si_code = TARGET_FPE_FLTUND;
10636                     }
10637                     if (fex & SWCR_TRAP_ENABLE_OVF) {
10638                         si_code = TARGET_FPE_FLTOVF;
10639                     }
10640                     if (fex & SWCR_TRAP_ENABLE_DZE) {
10641                         si_code = TARGET_FPE_FLTDIV;
10642                     }
10643                     if (fex & SWCR_TRAP_ENABLE_INV) {
10644                         si_code = TARGET_FPE_FLTINV;
10645                     }
10646 
10647                     info.si_signo = SIGFPE;
10648                     info.si_errno = 0;
10649                     info.si_code = si_code;
10650                     info._sifields._sigfault._addr
10651                         = ((CPUArchState *)cpu_env)->pc;
10652                     queue_signal((CPUArchState *)cpu_env, info.si_signo,
10653                                  QEMU_SI_FAULT, &info);
10654                 }
10655                 ret = 0;
10656             }
10657             break;
10658 
10659           /* case SSI_NVPAIRS:
10660              -- Used with SSIN_UACPROC to enable unaligned accesses.
10661              case SSI_IEEE_STATE_AT_SIGNAL:
10662              case SSI_IEEE_IGNORE_STATE_AT_SIGNAL:
10663              -- Not implemented in linux kernel
10664           */
10665         }
10666         return ret;
10667 #endif
10668 #ifdef TARGET_NR_osf_sigprocmask
10669     /* Alpha specific.  */
10670     case TARGET_NR_osf_sigprocmask:
10671         {
10672             abi_ulong mask;
10673             int how;
10674             sigset_t set, oldset;
10675 
10676             switch(arg1) {
10677             case TARGET_SIG_BLOCK:
10678                 how = SIG_BLOCK;
10679                 break;
10680             case TARGET_SIG_UNBLOCK:
10681                 how = SIG_UNBLOCK;
10682                 break;
10683             case TARGET_SIG_SETMASK:
10684                 how = SIG_SETMASK;
10685                 break;
10686             default:
10687                 return -TARGET_EINVAL;
10688             }
10689             mask = arg2;
10690             target_to_host_old_sigset(&set, &mask);
10691             ret = do_sigprocmask(how, &set, &oldset);
10692             if (!ret) {
10693                 host_to_target_old_sigset(&mask, &oldset);
10694                 ret = mask;
10695             }
10696         }
10697         return ret;
10698 #endif
10699 
10700 #ifdef TARGET_NR_getgid32
10701     case TARGET_NR_getgid32:
10702         return get_errno(getgid());
10703 #endif
10704 #ifdef TARGET_NR_geteuid32
10705     case TARGET_NR_geteuid32:
10706         return get_errno(geteuid());
10707 #endif
10708 #ifdef TARGET_NR_getegid32
10709     case TARGET_NR_getegid32:
10710         return get_errno(getegid());
10711 #endif
10712 #ifdef TARGET_NR_setreuid32
10713     case TARGET_NR_setreuid32:
10714         return get_errno(setreuid(arg1, arg2));
10715 #endif
10716 #ifdef TARGET_NR_setregid32
10717     case TARGET_NR_setregid32:
10718         return get_errno(setregid(arg1, arg2));
10719 #endif
10720 #ifdef TARGET_NR_getgroups32
10721     case TARGET_NR_getgroups32:
10722         {
10723             int gidsetsize = arg1;
10724             uint32_t *target_grouplist;
10725             gid_t *grouplist;
10726             int i;
10727 
10728             grouplist = alloca(gidsetsize * sizeof(gid_t));
10729             ret = get_errno(getgroups(gidsetsize, grouplist));
10730             if (gidsetsize == 0)
10731                 return ret;
10732             if (!is_error(ret)) {
10733                 target_grouplist = lock_user(VERIFY_WRITE, arg2, gidsetsize * 4, 0);
10734                 if (!target_grouplist) {
10735                     return -TARGET_EFAULT;
10736                 }
10737                 for(i = 0;i < ret; i++)
10738                     target_grouplist[i] = tswap32(grouplist[i]);
10739                 unlock_user(target_grouplist, arg2, gidsetsize * 4);
10740             }
10741         }
10742         return ret;
10743 #endif
10744 #ifdef TARGET_NR_setgroups32
10745     case TARGET_NR_setgroups32:
10746         {
10747             int gidsetsize = arg1;
10748             uint32_t *target_grouplist;
10749             gid_t *grouplist;
10750             int i;
10751 
10752             grouplist = alloca(gidsetsize * sizeof(gid_t));
10753             target_grouplist = lock_user(VERIFY_READ, arg2, gidsetsize * 4, 1);
10754             if (!target_grouplist) {
10755                 return -TARGET_EFAULT;
10756             }
10757             for(i = 0;i < gidsetsize; i++)
10758                 grouplist[i] = tswap32(target_grouplist[i]);
10759             unlock_user(target_grouplist, arg2, 0);
10760             return get_errno(setgroups(gidsetsize, grouplist));
10761         }
10762 #endif
10763 #ifdef TARGET_NR_fchown32
10764     case TARGET_NR_fchown32:
10765         return get_errno(fchown(arg1, arg2, arg3));
10766 #endif
10767 #ifdef TARGET_NR_setresuid32
10768     case TARGET_NR_setresuid32:
10769         return get_errno(sys_setresuid(arg1, arg2, arg3));
10770 #endif
10771 #ifdef TARGET_NR_getresuid32
10772     case TARGET_NR_getresuid32:
10773         {
10774             uid_t ruid, euid, suid;
10775             ret = get_errno(getresuid(&ruid, &euid, &suid));
10776             if (!is_error(ret)) {
10777                 if (put_user_u32(ruid, arg1)
10778                     || put_user_u32(euid, arg2)
10779                     || put_user_u32(suid, arg3))
10780                     return -TARGET_EFAULT;
10781             }
10782         }
10783         return ret;
10784 #endif
10785 #ifdef TARGET_NR_setresgid32
10786     case TARGET_NR_setresgid32:
10787         return get_errno(sys_setresgid(arg1, arg2, arg3));
10788 #endif
10789 #ifdef TARGET_NR_getresgid32
10790     case TARGET_NR_getresgid32:
10791         {
10792             gid_t rgid, egid, sgid;
10793             ret = get_errno(getresgid(&rgid, &egid, &sgid));
10794             if (!is_error(ret)) {
10795                 if (put_user_u32(rgid, arg1)
10796                     || put_user_u32(egid, arg2)
10797                     || put_user_u32(sgid, arg3))
10798                     return -TARGET_EFAULT;
10799             }
10800         }
10801         return ret;
10802 #endif
10803 #ifdef TARGET_NR_chown32
10804     case TARGET_NR_chown32:
10805         if (!(p = lock_user_string(arg1)))
10806             return -TARGET_EFAULT;
10807         ret = get_errno(chown(p, arg2, arg3));
10808         unlock_user(p, arg1, 0);
10809         return ret;
10810 #endif
10811 #ifdef TARGET_NR_setuid32
10812     case TARGET_NR_setuid32:
10813         return get_errno(sys_setuid(arg1));
10814 #endif
10815 #ifdef TARGET_NR_setgid32
10816     case TARGET_NR_setgid32:
10817         return get_errno(sys_setgid(arg1));
10818 #endif
10819 #ifdef TARGET_NR_setfsuid32
10820     case TARGET_NR_setfsuid32:
10821         return get_errno(setfsuid(arg1));
10822 #endif
10823 #ifdef TARGET_NR_setfsgid32
10824     case TARGET_NR_setfsgid32:
10825         return get_errno(setfsgid(arg1));
10826 #endif
10827 #ifdef TARGET_NR_mincore
10828     case TARGET_NR_mincore:
10829         {
10830             void *a = lock_user(VERIFY_READ, arg1, arg2, 0);
10831             if (!a) {
10832                 return -TARGET_ENOMEM;
10833             }
10834             p = lock_user_string(arg3);
10835             if (!p) {
10836                 ret = -TARGET_EFAULT;
10837             } else {
10838                 ret = get_errno(mincore(a, arg2, p));
10839                 unlock_user(p, arg3, ret);
10840             }
10841             unlock_user(a, arg1, 0);
10842         }
10843         return ret;
10844 #endif
10845 #ifdef TARGET_NR_arm_fadvise64_64
10846     case TARGET_NR_arm_fadvise64_64:
10847         /* arm_fadvise64_64 looks like fadvise64_64 but
10848          * with different argument order: fd, advice, offset, len
10849          * rather than the usual fd, offset, len, advice.
10850          * Note that offset and len are both 64-bit so appear as
10851          * pairs of 32-bit registers.
10852          */
10853         ret = posix_fadvise(arg1, target_offset64(arg3, arg4),
10854                             target_offset64(arg5, arg6), arg2);
10855         return -host_to_target_errno(ret);
10856 #endif
10857 
10858 #if TARGET_ABI_BITS == 32
10859 
10860 #ifdef TARGET_NR_fadvise64_64
10861     case TARGET_NR_fadvise64_64:
10862 #if defined(TARGET_PPC) || defined(TARGET_XTENSA)
10863         /* 6 args: fd, advice, offset (high, low), len (high, low) */
10864         ret = arg2;
10865         arg2 = arg3;
10866         arg3 = arg4;
10867         arg4 = arg5;
10868         arg5 = arg6;
10869         arg6 = ret;
10870 #else
10871         /* 6 args: fd, offset (high, low), len (high, low), advice */
10872         if (regpairs_aligned(cpu_env, num)) {
10873             /* offset is in (3,4), len in (5,6) and advice in 7 */
10874             arg2 = arg3;
10875             arg3 = arg4;
10876             arg4 = arg5;
10877             arg5 = arg6;
10878             arg6 = arg7;
10879         }
10880 #endif
10881         ret = posix_fadvise(arg1, target_offset64(arg2, arg3),
10882                             target_offset64(arg4, arg5), arg6);
10883         return -host_to_target_errno(ret);
10884 #endif
10885 
10886 #ifdef TARGET_NR_fadvise64
10887     case TARGET_NR_fadvise64:
10888         /* 5 args: fd, offset (high, low), len, advice */
10889         if (regpairs_aligned(cpu_env, num)) {
10890             /* offset is in (3,4), len in 5 and advice in 6 */
10891             arg2 = arg3;
10892             arg3 = arg4;
10893             arg4 = arg5;
10894             arg5 = arg6;
10895         }
10896         ret = posix_fadvise(arg1, target_offset64(arg2, arg3), arg4, arg5);
10897         return -host_to_target_errno(ret);
10898 #endif
10899 
10900 #else /* not a 32-bit ABI */
10901 #if defined(TARGET_NR_fadvise64_64) || defined(TARGET_NR_fadvise64)
10902 #ifdef TARGET_NR_fadvise64_64
10903     case TARGET_NR_fadvise64_64:
10904 #endif
10905 #ifdef TARGET_NR_fadvise64
10906     case TARGET_NR_fadvise64:
10907 #endif
10908 #ifdef TARGET_S390X
10909         switch (arg4) {
10910         case 4: arg4 = POSIX_FADV_NOREUSE + 1; break; /* make sure it's an invalid value */
10911         case 5: arg4 = POSIX_FADV_NOREUSE + 2; break; /* ditto */
10912         case 6: arg4 = POSIX_FADV_DONTNEED; break;
10913         case 7: arg4 = POSIX_FADV_NOREUSE; break;
10914         default: break;
10915         }
10916 #endif
10917         return -host_to_target_errno(posix_fadvise(arg1, arg2, arg3, arg4));
10918 #endif
10919 #endif /* end of 64-bit ABI fadvise handling */
10920 
10921 #ifdef TARGET_NR_madvise
10922     case TARGET_NR_madvise:
10923         /* A straight passthrough may not be safe because qemu sometimes
10924            turns private file-backed mappings into anonymous mappings.
10925            This will break MADV_DONTNEED.
10926            This is a hint, so ignoring and returning success is ok.  */
10927         return 0;
10928 #endif
10929 #if TARGET_ABI_BITS == 32
10930     case TARGET_NR_fcntl64:
10931     {
10932 	int cmd;
10933 	struct flock64 fl;
10934         from_flock64_fn *copyfrom = copy_from_user_flock64;
10935         to_flock64_fn *copyto = copy_to_user_flock64;
10936 
10937 #ifdef TARGET_ARM
10938         if (!((CPUARMState *)cpu_env)->eabi) {
10939             copyfrom = copy_from_user_oabi_flock64;
10940             copyto = copy_to_user_oabi_flock64;
10941         }
10942 #endif
10943 
10944 	cmd = target_to_host_fcntl_cmd(arg2);
10945         if (cmd == -TARGET_EINVAL) {
10946             return cmd;
10947         }
10948 
10949         switch(arg2) {
10950         case TARGET_F_GETLK64:
10951             ret = copyfrom(&fl, arg3);
10952             if (ret) {
10953                 break;
10954             }
10955             ret = get_errno(safe_fcntl(arg1, cmd, &fl));
10956             if (ret == 0) {
10957                 ret = copyto(arg3, &fl);
10958             }
10959 	    break;
10960 
10961         case TARGET_F_SETLK64:
10962         case TARGET_F_SETLKW64:
10963             ret = copyfrom(&fl, arg3);
10964             if (ret) {
10965                 break;
10966             }
10967             ret = get_errno(safe_fcntl(arg1, cmd, &fl));
10968 	    break;
10969         default:
10970             ret = do_fcntl(arg1, arg2, arg3);
10971             break;
10972         }
10973         return ret;
10974     }
10975 #endif
10976 #ifdef TARGET_NR_cacheflush
10977     case TARGET_NR_cacheflush:
10978         /* self-modifying code is handled automatically, so nothing needed */
10979         return 0;
10980 #endif
10981 #ifdef TARGET_NR_getpagesize
10982     case TARGET_NR_getpagesize:
10983         return TARGET_PAGE_SIZE;
10984 #endif
10985     case TARGET_NR_gettid:
10986         return get_errno(sys_gettid());
10987 #ifdef TARGET_NR_readahead
10988     case TARGET_NR_readahead:
10989 #if TARGET_ABI_BITS == 32
10990         if (regpairs_aligned(cpu_env, num)) {
10991             arg2 = arg3;
10992             arg3 = arg4;
10993             arg4 = arg5;
10994         }
10995         ret = get_errno(readahead(arg1, target_offset64(arg2, arg3) , arg4));
10996 #else
10997         ret = get_errno(readahead(arg1, arg2, arg3));
10998 #endif
10999         return ret;
11000 #endif
11001 #ifdef CONFIG_ATTR
11002 #ifdef TARGET_NR_setxattr
11003     case TARGET_NR_listxattr:
11004     case TARGET_NR_llistxattr:
11005     {
11006         void *p, *b = 0;
11007         if (arg2) {
11008             b = lock_user(VERIFY_WRITE, arg2, arg3, 0);
11009             if (!b) {
11010                 return -TARGET_EFAULT;
11011             }
11012         }
11013         p = lock_user_string(arg1);
11014         if (p) {
11015             if (num == TARGET_NR_listxattr) {
11016                 ret = get_errno(listxattr(p, b, arg3));
11017             } else {
11018                 ret = get_errno(llistxattr(p, b, arg3));
11019             }
11020         } else {
11021             ret = -TARGET_EFAULT;
11022         }
11023         unlock_user(p, arg1, 0);
11024         unlock_user(b, arg2, arg3);
11025         return ret;
11026     }
11027     case TARGET_NR_flistxattr:
11028     {
11029         void *b = 0;
11030         if (arg2) {
11031             b = lock_user(VERIFY_WRITE, arg2, arg3, 0);
11032             if (!b) {
11033                 return -TARGET_EFAULT;
11034             }
11035         }
11036         ret = get_errno(flistxattr(arg1, b, arg3));
11037         unlock_user(b, arg2, arg3);
11038         return ret;
11039     }
11040     case TARGET_NR_setxattr:
11041     case TARGET_NR_lsetxattr:
11042         {
11043             void *p, *n, *v = 0;
11044             if (arg3) {
11045                 v = lock_user(VERIFY_READ, arg3, arg4, 1);
11046                 if (!v) {
11047                     return -TARGET_EFAULT;
11048                 }
11049             }
11050             p = lock_user_string(arg1);
11051             n = lock_user_string(arg2);
11052             if (p && n) {
11053                 if (num == TARGET_NR_setxattr) {
11054                     ret = get_errno(setxattr(p, n, v, arg4, arg5));
11055                 } else {
11056                     ret = get_errno(lsetxattr(p, n, v, arg4, arg5));
11057                 }
11058             } else {
11059                 ret = -TARGET_EFAULT;
11060             }
11061             unlock_user(p, arg1, 0);
11062             unlock_user(n, arg2, 0);
11063             unlock_user(v, arg3, 0);
11064         }
11065         return ret;
11066     case TARGET_NR_fsetxattr:
11067         {
11068             void *n, *v = 0;
11069             if (arg3) {
11070                 v = lock_user(VERIFY_READ, arg3, arg4, 1);
11071                 if (!v) {
11072                     return -TARGET_EFAULT;
11073                 }
11074             }
11075             n = lock_user_string(arg2);
11076             if (n) {
11077                 ret = get_errno(fsetxattr(arg1, n, v, arg4, arg5));
11078             } else {
11079                 ret = -TARGET_EFAULT;
11080             }
11081             unlock_user(n, arg2, 0);
11082             unlock_user(v, arg3, 0);
11083         }
11084         return ret;
11085     case TARGET_NR_getxattr:
11086     case TARGET_NR_lgetxattr:
11087         {
11088             void *p, *n, *v = 0;
11089             if (arg3) {
11090                 v = lock_user(VERIFY_WRITE, arg3, arg4, 0);
11091                 if (!v) {
11092                     return -TARGET_EFAULT;
11093                 }
11094             }
11095             p = lock_user_string(arg1);
11096             n = lock_user_string(arg2);
11097             if (p && n) {
11098                 if (num == TARGET_NR_getxattr) {
11099                     ret = get_errno(getxattr(p, n, v, arg4));
11100                 } else {
11101                     ret = get_errno(lgetxattr(p, n, v, arg4));
11102                 }
11103             } else {
11104                 ret = -TARGET_EFAULT;
11105             }
11106             unlock_user(p, arg1, 0);
11107             unlock_user(n, arg2, 0);
11108             unlock_user(v, arg3, arg4);
11109         }
11110         return ret;
11111     case TARGET_NR_fgetxattr:
11112         {
11113             void *n, *v = 0;
11114             if (arg3) {
11115                 v = lock_user(VERIFY_WRITE, arg3, arg4, 0);
11116                 if (!v) {
11117                     return -TARGET_EFAULT;
11118                 }
11119             }
11120             n = lock_user_string(arg2);
11121             if (n) {
11122                 ret = get_errno(fgetxattr(arg1, n, v, arg4));
11123             } else {
11124                 ret = -TARGET_EFAULT;
11125             }
11126             unlock_user(n, arg2, 0);
11127             unlock_user(v, arg3, arg4);
11128         }
11129         return ret;
11130     case TARGET_NR_removexattr:
11131     case TARGET_NR_lremovexattr:
11132         {
11133             void *p, *n;
11134             p = lock_user_string(arg1);
11135             n = lock_user_string(arg2);
11136             if (p && n) {
11137                 if (num == TARGET_NR_removexattr) {
11138                     ret = get_errno(removexattr(p, n));
11139                 } else {
11140                     ret = get_errno(lremovexattr(p, n));
11141                 }
11142             } else {
11143                 ret = -TARGET_EFAULT;
11144             }
11145             unlock_user(p, arg1, 0);
11146             unlock_user(n, arg2, 0);
11147         }
11148         return ret;
11149     case TARGET_NR_fremovexattr:
11150         {
11151             void *n;
11152             n = lock_user_string(arg2);
11153             if (n) {
11154                 ret = get_errno(fremovexattr(arg1, n));
11155             } else {
11156                 ret = -TARGET_EFAULT;
11157             }
11158             unlock_user(n, arg2, 0);
11159         }
11160         return ret;
11161 #endif
11162 #endif /* CONFIG_ATTR */
11163 #ifdef TARGET_NR_set_thread_area
11164     case TARGET_NR_set_thread_area:
11165 #if defined(TARGET_MIPS)
11166       ((CPUMIPSState *) cpu_env)->active_tc.CP0_UserLocal = arg1;
11167       return 0;
11168 #elif defined(TARGET_CRIS)
11169       if (arg1 & 0xff)
11170           ret = -TARGET_EINVAL;
11171       else {
11172           ((CPUCRISState *) cpu_env)->pregs[PR_PID] = arg1;
11173           ret = 0;
11174       }
11175       return ret;
11176 #elif defined(TARGET_I386) && defined(TARGET_ABI32)
11177       return do_set_thread_area(cpu_env, arg1);
11178 #elif defined(TARGET_M68K)
11179       {
11180           TaskState *ts = cpu->opaque;
11181           ts->tp_value = arg1;
11182           return 0;
11183       }
11184 #else
11185       return -TARGET_ENOSYS;
11186 #endif
11187 #endif
11188 #ifdef TARGET_NR_get_thread_area
11189     case TARGET_NR_get_thread_area:
11190 #if defined(TARGET_I386) && defined(TARGET_ABI32)
11191         return do_get_thread_area(cpu_env, arg1);
11192 #elif defined(TARGET_M68K)
11193         {
11194             TaskState *ts = cpu->opaque;
11195             return ts->tp_value;
11196         }
11197 #else
11198         return -TARGET_ENOSYS;
11199 #endif
11200 #endif
11201 #ifdef TARGET_NR_getdomainname
11202     case TARGET_NR_getdomainname:
11203         return -TARGET_ENOSYS;
11204 #endif
11205 
11206 #ifdef TARGET_NR_clock_settime
11207     case TARGET_NR_clock_settime:
11208     {
11209         struct timespec ts;
11210 
11211         ret = target_to_host_timespec(&ts, arg2);
11212         if (!is_error(ret)) {
11213             ret = get_errno(clock_settime(arg1, &ts));
11214         }
11215         return ret;
11216     }
11217 #endif
11218 #ifdef TARGET_NR_clock_gettime
11219     case TARGET_NR_clock_gettime:
11220     {
11221         struct timespec ts;
11222         ret = get_errno(clock_gettime(arg1, &ts));
11223         if (!is_error(ret)) {
11224             ret = host_to_target_timespec(arg2, &ts);
11225         }
11226         return ret;
11227     }
11228 #endif
11229 #ifdef TARGET_NR_clock_getres
11230     case TARGET_NR_clock_getres:
11231     {
11232         struct timespec ts;
11233         ret = get_errno(clock_getres(arg1, &ts));
11234         if (!is_error(ret)) {
11235             host_to_target_timespec(arg2, &ts);
11236         }
11237         return ret;
11238     }
11239 #endif
11240 #ifdef TARGET_NR_clock_nanosleep
11241     case TARGET_NR_clock_nanosleep:
11242     {
11243         struct timespec ts;
11244         target_to_host_timespec(&ts, arg3);
11245         ret = get_errno(safe_clock_nanosleep(arg1, arg2,
11246                                              &ts, arg4 ? &ts : NULL));
11247         if (arg4)
11248             host_to_target_timespec(arg4, &ts);
11249 
11250 #if defined(TARGET_PPC)
11251         /* clock_nanosleep is odd in that it returns positive errno values.
11252          * On PPC, CR0 bit 3 should be set in such a situation. */
11253         if (ret && ret != -TARGET_ERESTARTSYS) {
11254             ((CPUPPCState *)cpu_env)->crf[0] |= 1;
11255         }
11256 #endif
11257         return ret;
11258     }
11259 #endif
11260 
11261 #if defined(TARGET_NR_set_tid_address) && defined(__NR_set_tid_address)
11262     case TARGET_NR_set_tid_address:
11263         return get_errno(set_tid_address((int *)g2h(arg1)));
11264 #endif
11265 
11266     case TARGET_NR_tkill:
11267         return get_errno(safe_tkill((int)arg1, target_to_host_signal(arg2)));
11268 
11269     case TARGET_NR_tgkill:
11270         return get_errno(safe_tgkill((int)arg1, (int)arg2,
11271                          target_to_host_signal(arg3)));
11272 
11273 #ifdef TARGET_NR_set_robust_list
11274     case TARGET_NR_set_robust_list:
11275     case TARGET_NR_get_robust_list:
11276         /* The ABI for supporting robust futexes has userspace pass
11277          * the kernel a pointer to a linked list which is updated by
11278          * userspace after the syscall; the list is walked by the kernel
11279          * when the thread exits. Since the linked list in QEMU guest
11280          * memory isn't a valid linked list for the host and we have
11281          * no way to reliably intercept the thread-death event, we can't
11282          * support these. Silently return ENOSYS so that guest userspace
11283          * falls back to a non-robust futex implementation (which should
11284          * be OK except in the corner case of the guest crashing while
11285          * holding a mutex that is shared with another process via
11286          * shared memory).
11287          */
11288         return -TARGET_ENOSYS;
11289 #endif
11290 
11291 #if defined(TARGET_NR_utimensat)
11292     case TARGET_NR_utimensat:
11293         {
11294             struct timespec *tsp, ts[2];
11295             if (!arg3) {
11296                 tsp = NULL;
11297             } else {
11298                 target_to_host_timespec(ts, arg3);
11299                 target_to_host_timespec(ts+1, arg3+sizeof(struct target_timespec));
11300                 tsp = ts;
11301             }
11302             if (!arg2)
11303                 ret = get_errno(sys_utimensat(arg1, NULL, tsp, arg4));
11304             else {
11305                 if (!(p = lock_user_string(arg2))) {
11306                     return -TARGET_EFAULT;
11307                 }
11308                 ret = get_errno(sys_utimensat(arg1, path(p), tsp, arg4));
11309                 unlock_user(p, arg2, 0);
11310             }
11311         }
11312         return ret;
11313 #endif
11314     case TARGET_NR_futex:
11315         return do_futex(arg1, arg2, arg3, arg4, arg5, arg6);
11316 #if defined(TARGET_NR_inotify_init) && defined(__NR_inotify_init)
11317     case TARGET_NR_inotify_init:
11318         ret = get_errno(sys_inotify_init());
11319         if (ret >= 0) {
11320             fd_trans_register(ret, &target_inotify_trans);
11321         }
11322         return ret;
11323 #endif
11324 #ifdef CONFIG_INOTIFY1
11325 #if defined(TARGET_NR_inotify_init1) && defined(__NR_inotify_init1)
11326     case TARGET_NR_inotify_init1:
11327         ret = get_errno(sys_inotify_init1(target_to_host_bitmask(arg1,
11328                                           fcntl_flags_tbl)));
11329         if (ret >= 0) {
11330             fd_trans_register(ret, &target_inotify_trans);
11331         }
11332         return ret;
11333 #endif
11334 #endif
11335 #if defined(TARGET_NR_inotify_add_watch) && defined(__NR_inotify_add_watch)
11336     case TARGET_NR_inotify_add_watch:
11337         p = lock_user_string(arg2);
11338         ret = get_errno(sys_inotify_add_watch(arg1, path(p), arg3));
11339         unlock_user(p, arg2, 0);
11340         return ret;
11341 #endif
11342 #if defined(TARGET_NR_inotify_rm_watch) && defined(__NR_inotify_rm_watch)
11343     case TARGET_NR_inotify_rm_watch:
11344         return get_errno(sys_inotify_rm_watch(arg1, arg2));
11345 #endif
11346 
11347 #if defined(TARGET_NR_mq_open) && defined(__NR_mq_open)
11348     case TARGET_NR_mq_open:
11349         {
11350             struct mq_attr posix_mq_attr;
11351             struct mq_attr *pposix_mq_attr;
11352             int host_flags;
11353 
11354             host_flags = target_to_host_bitmask(arg2, fcntl_flags_tbl);
11355             pposix_mq_attr = NULL;
11356             if (arg4) {
11357                 if (copy_from_user_mq_attr(&posix_mq_attr, arg4) != 0) {
11358                     return -TARGET_EFAULT;
11359                 }
11360                 pposix_mq_attr = &posix_mq_attr;
11361             }
11362             p = lock_user_string(arg1 - 1);
11363             if (!p) {
11364                 return -TARGET_EFAULT;
11365             }
11366             ret = get_errno(mq_open(p, host_flags, arg3, pposix_mq_attr));
11367             unlock_user (p, arg1, 0);
11368         }
11369         return ret;
11370 
11371     case TARGET_NR_mq_unlink:
11372         p = lock_user_string(arg1 - 1);
11373         if (!p) {
11374             return -TARGET_EFAULT;
11375         }
11376         ret = get_errno(mq_unlink(p));
11377         unlock_user (p, arg1, 0);
11378         return ret;
11379 
11380     case TARGET_NR_mq_timedsend:
11381         {
11382             struct timespec ts;
11383 
11384             p = lock_user (VERIFY_READ, arg2, arg3, 1);
11385             if (arg5 != 0) {
11386                 target_to_host_timespec(&ts, arg5);
11387                 ret = get_errno(safe_mq_timedsend(arg1, p, arg3, arg4, &ts));
11388                 host_to_target_timespec(arg5, &ts);
11389             } else {
11390                 ret = get_errno(safe_mq_timedsend(arg1, p, arg3, arg4, NULL));
11391             }
11392             unlock_user (p, arg2, arg3);
11393         }
11394         return ret;
11395 
11396     case TARGET_NR_mq_timedreceive:
11397         {
11398             struct timespec ts;
11399             unsigned int prio;
11400 
11401             p = lock_user (VERIFY_READ, arg2, arg3, 1);
11402             if (arg5 != 0) {
11403                 target_to_host_timespec(&ts, arg5);
11404                 ret = get_errno(safe_mq_timedreceive(arg1, p, arg3,
11405                                                      &prio, &ts));
11406                 host_to_target_timespec(arg5, &ts);
11407             } else {
11408                 ret = get_errno(safe_mq_timedreceive(arg1, p, arg3,
11409                                                      &prio, NULL));
11410             }
11411             unlock_user (p, arg2, arg3);
11412             if (arg4 != 0)
11413                 put_user_u32(prio, arg4);
11414         }
11415         return ret;
11416 
11417     /* Not implemented for now... */
11418 /*     case TARGET_NR_mq_notify: */
11419 /*         break; */
11420 
11421     case TARGET_NR_mq_getsetattr:
11422         {
11423             struct mq_attr posix_mq_attr_in, posix_mq_attr_out;
11424             ret = 0;
11425             if (arg2 != 0) {
11426                 copy_from_user_mq_attr(&posix_mq_attr_in, arg2);
11427                 ret = get_errno(mq_setattr(arg1, &posix_mq_attr_in,
11428                                            &posix_mq_attr_out));
11429             } else if (arg3 != 0) {
11430                 ret = get_errno(mq_getattr(arg1, &posix_mq_attr_out));
11431             }
11432             if (ret == 0 && arg3 != 0) {
11433                 copy_to_user_mq_attr(arg3, &posix_mq_attr_out);
11434             }
11435         }
11436         return ret;
11437 #endif
11438 
11439 #ifdef CONFIG_SPLICE
11440 #ifdef TARGET_NR_tee
11441     case TARGET_NR_tee:
11442         {
11443             ret = get_errno(tee(arg1,arg2,arg3,arg4));
11444         }
11445         return ret;
11446 #endif
11447 #ifdef TARGET_NR_splice
11448     case TARGET_NR_splice:
11449         {
11450             loff_t loff_in, loff_out;
11451             loff_t *ploff_in = NULL, *ploff_out = NULL;
11452             if (arg2) {
11453                 if (get_user_u64(loff_in, arg2)) {
11454                     return -TARGET_EFAULT;
11455                 }
11456                 ploff_in = &loff_in;
11457             }
11458             if (arg4) {
11459                 if (get_user_u64(loff_out, arg4)) {
11460                     return -TARGET_EFAULT;
11461                 }
11462                 ploff_out = &loff_out;
11463             }
11464             ret = get_errno(splice(arg1, ploff_in, arg3, ploff_out, arg5, arg6));
11465             if (arg2) {
11466                 if (put_user_u64(loff_in, arg2)) {
11467                     return -TARGET_EFAULT;
11468                 }
11469             }
11470             if (arg4) {
11471                 if (put_user_u64(loff_out, arg4)) {
11472                     return -TARGET_EFAULT;
11473                 }
11474             }
11475         }
11476         return ret;
11477 #endif
11478 #ifdef TARGET_NR_vmsplice
11479 	case TARGET_NR_vmsplice:
11480         {
11481             struct iovec *vec = lock_iovec(VERIFY_READ, arg2, arg3, 1);
11482             if (vec != NULL) {
11483                 ret = get_errno(vmsplice(arg1, vec, arg3, arg4));
11484                 unlock_iovec(vec, arg2, arg3, 0);
11485             } else {
11486                 ret = -host_to_target_errno(errno);
11487             }
11488         }
11489         return ret;
11490 #endif
11491 #endif /* CONFIG_SPLICE */
11492 #ifdef CONFIG_EVENTFD
11493 #if defined(TARGET_NR_eventfd)
11494     case TARGET_NR_eventfd:
11495         ret = get_errno(eventfd(arg1, 0));
11496         if (ret >= 0) {
11497             fd_trans_register(ret, &target_eventfd_trans);
11498         }
11499         return ret;
11500 #endif
11501 #if defined(TARGET_NR_eventfd2)
11502     case TARGET_NR_eventfd2:
11503     {
11504         int host_flags = arg2 & (~(TARGET_O_NONBLOCK | TARGET_O_CLOEXEC));
11505         if (arg2 & TARGET_O_NONBLOCK) {
11506             host_flags |= O_NONBLOCK;
11507         }
11508         if (arg2 & TARGET_O_CLOEXEC) {
11509             host_flags |= O_CLOEXEC;
11510         }
11511         ret = get_errno(eventfd(arg1, host_flags));
11512         if (ret >= 0) {
11513             fd_trans_register(ret, &target_eventfd_trans);
11514         }
11515         return ret;
11516     }
11517 #endif
11518 #endif /* CONFIG_EVENTFD  */
11519 #if defined(CONFIG_FALLOCATE) && defined(TARGET_NR_fallocate)
11520     case TARGET_NR_fallocate:
11521 #if TARGET_ABI_BITS == 32
11522         ret = get_errno(fallocate(arg1, arg2, target_offset64(arg3, arg4),
11523                                   target_offset64(arg5, arg6)));
11524 #else
11525         ret = get_errno(fallocate(arg1, arg2, arg3, arg4));
11526 #endif
11527         return ret;
11528 #endif
11529 #if defined(CONFIG_SYNC_FILE_RANGE)
11530 #if defined(TARGET_NR_sync_file_range)
11531     case TARGET_NR_sync_file_range:
11532 #if TARGET_ABI_BITS == 32
11533 #if defined(TARGET_MIPS)
11534         ret = get_errno(sync_file_range(arg1, target_offset64(arg3, arg4),
11535                                         target_offset64(arg5, arg6), arg7));
11536 #else
11537         ret = get_errno(sync_file_range(arg1, target_offset64(arg2, arg3),
11538                                         target_offset64(arg4, arg5), arg6));
11539 #endif /* !TARGET_MIPS */
11540 #else
11541         ret = get_errno(sync_file_range(arg1, arg2, arg3, arg4));
11542 #endif
11543         return ret;
11544 #endif
11545 #if defined(TARGET_NR_sync_file_range2)
11546     case TARGET_NR_sync_file_range2:
11547         /* This is like sync_file_range but the arguments are reordered */
11548 #if TARGET_ABI_BITS == 32
11549         ret = get_errno(sync_file_range(arg1, target_offset64(arg3, arg4),
11550                                         target_offset64(arg5, arg6), arg2));
11551 #else
11552         ret = get_errno(sync_file_range(arg1, arg3, arg4, arg2));
11553 #endif
11554         return ret;
11555 #endif
11556 #endif
11557 #if defined(TARGET_NR_signalfd4)
11558     case TARGET_NR_signalfd4:
11559         return do_signalfd4(arg1, arg2, arg4);
11560 #endif
11561 #if defined(TARGET_NR_signalfd)
11562     case TARGET_NR_signalfd:
11563         return do_signalfd4(arg1, arg2, 0);
11564 #endif
11565 #if defined(CONFIG_EPOLL)
11566 #if defined(TARGET_NR_epoll_create)
11567     case TARGET_NR_epoll_create:
11568         return get_errno(epoll_create(arg1));
11569 #endif
11570 #if defined(TARGET_NR_epoll_create1) && defined(CONFIG_EPOLL_CREATE1)
11571     case TARGET_NR_epoll_create1:
11572         return get_errno(epoll_create1(arg1));
11573 #endif
11574 #if defined(TARGET_NR_epoll_ctl)
11575     case TARGET_NR_epoll_ctl:
11576     {
11577         struct epoll_event ep;
11578         struct epoll_event *epp = 0;
11579         if (arg4) {
11580             struct target_epoll_event *target_ep;
11581             if (!lock_user_struct(VERIFY_READ, target_ep, arg4, 1)) {
11582                 return -TARGET_EFAULT;
11583             }
11584             ep.events = tswap32(target_ep->events);
11585             /* The epoll_data_t union is just opaque data to the kernel,
11586              * so we transfer all 64 bits across and need not worry what
11587              * actual data type it is.
11588              */
11589             ep.data.u64 = tswap64(target_ep->data.u64);
11590             unlock_user_struct(target_ep, arg4, 0);
11591             epp = &ep;
11592         }
11593         return get_errno(epoll_ctl(arg1, arg2, arg3, epp));
11594     }
11595 #endif
11596 
11597 #if defined(TARGET_NR_epoll_wait) || defined(TARGET_NR_epoll_pwait)
11598 #if defined(TARGET_NR_epoll_wait)
11599     case TARGET_NR_epoll_wait:
11600 #endif
11601 #if defined(TARGET_NR_epoll_pwait)
11602     case TARGET_NR_epoll_pwait:
11603 #endif
11604     {
11605         struct target_epoll_event *target_ep;
11606         struct epoll_event *ep;
11607         int epfd = arg1;
11608         int maxevents = arg3;
11609         int timeout = arg4;
11610 
11611         if (maxevents <= 0 || maxevents > TARGET_EP_MAX_EVENTS) {
11612             return -TARGET_EINVAL;
11613         }
11614 
11615         target_ep = lock_user(VERIFY_WRITE, arg2,
11616                               maxevents * sizeof(struct target_epoll_event), 1);
11617         if (!target_ep) {
11618             return -TARGET_EFAULT;
11619         }
11620 
11621         ep = g_try_new(struct epoll_event, maxevents);
11622         if (!ep) {
11623             unlock_user(target_ep, arg2, 0);
11624             return -TARGET_ENOMEM;
11625         }
11626 
11627         switch (num) {
11628 #if defined(TARGET_NR_epoll_pwait)
11629         case TARGET_NR_epoll_pwait:
11630         {
11631             target_sigset_t *target_set;
11632             sigset_t _set, *set = &_set;
11633 
11634             if (arg5) {
11635                 if (arg6 != sizeof(target_sigset_t)) {
11636                     ret = -TARGET_EINVAL;
11637                     break;
11638                 }
11639 
11640                 target_set = lock_user(VERIFY_READ, arg5,
11641                                        sizeof(target_sigset_t), 1);
11642                 if (!target_set) {
11643                     ret = -TARGET_EFAULT;
11644                     break;
11645                 }
11646                 target_to_host_sigset(set, target_set);
11647                 unlock_user(target_set, arg5, 0);
11648             } else {
11649                 set = NULL;
11650             }
11651 
11652             ret = get_errno(safe_epoll_pwait(epfd, ep, maxevents, timeout,
11653                                              set, SIGSET_T_SIZE));
11654             break;
11655         }
11656 #endif
11657 #if defined(TARGET_NR_epoll_wait)
11658         case TARGET_NR_epoll_wait:
11659             ret = get_errno(safe_epoll_pwait(epfd, ep, maxevents, timeout,
11660                                              NULL, 0));
11661             break;
11662 #endif
11663         default:
11664             ret = -TARGET_ENOSYS;
11665         }
11666         if (!is_error(ret)) {
11667             int i;
11668             for (i = 0; i < ret; i++) {
11669                 target_ep[i].events = tswap32(ep[i].events);
11670                 target_ep[i].data.u64 = tswap64(ep[i].data.u64);
11671             }
11672             unlock_user(target_ep, arg2,
11673                         ret * sizeof(struct target_epoll_event));
11674         } else {
11675             unlock_user(target_ep, arg2, 0);
11676         }
11677         g_free(ep);
11678         return ret;
11679     }
11680 #endif
11681 #endif
11682 #ifdef TARGET_NR_prlimit64
11683     case TARGET_NR_prlimit64:
11684     {
11685         /* args: pid, resource number, ptr to new rlimit, ptr to old rlimit */
11686         struct target_rlimit64 *target_rnew, *target_rold;
11687         struct host_rlimit64 rnew, rold, *rnewp = 0;
11688         int resource = target_to_host_resource(arg2);
11689         if (arg3) {
11690             if (!lock_user_struct(VERIFY_READ, target_rnew, arg3, 1)) {
11691                 return -TARGET_EFAULT;
11692             }
11693             rnew.rlim_cur = tswap64(target_rnew->rlim_cur);
11694             rnew.rlim_max = tswap64(target_rnew->rlim_max);
11695             unlock_user_struct(target_rnew, arg3, 0);
11696             rnewp = &rnew;
11697         }
11698 
11699         ret = get_errno(sys_prlimit64(arg1, resource, rnewp, arg4 ? &rold : 0));
11700         if (!is_error(ret) && arg4) {
11701             if (!lock_user_struct(VERIFY_WRITE, target_rold, arg4, 1)) {
11702                 return -TARGET_EFAULT;
11703             }
11704             target_rold->rlim_cur = tswap64(rold.rlim_cur);
11705             target_rold->rlim_max = tswap64(rold.rlim_max);
11706             unlock_user_struct(target_rold, arg4, 1);
11707         }
11708         return ret;
11709     }
11710 #endif
11711 #ifdef TARGET_NR_gethostname
11712     case TARGET_NR_gethostname:
11713     {
11714         char *name = lock_user(VERIFY_WRITE, arg1, arg2, 0);
11715         if (name) {
11716             ret = get_errno(gethostname(name, arg2));
11717             unlock_user(name, arg1, arg2);
11718         } else {
11719             ret = -TARGET_EFAULT;
11720         }
11721         return ret;
11722     }
11723 #endif
11724 #ifdef TARGET_NR_atomic_cmpxchg_32
11725     case TARGET_NR_atomic_cmpxchg_32:
11726     {
11727         /* should use start_exclusive from main.c */
11728         abi_ulong mem_value;
11729         if (get_user_u32(mem_value, arg6)) {
11730             target_siginfo_t info;
11731             info.si_signo = SIGSEGV;
11732             info.si_errno = 0;
11733             info.si_code = TARGET_SEGV_MAPERR;
11734             info._sifields._sigfault._addr = arg6;
11735             queue_signal((CPUArchState *)cpu_env, info.si_signo,
11736                          QEMU_SI_FAULT, &info);
11737             ret = 0xdeadbeef;
11738 
11739         }
11740         if (mem_value == arg2)
11741             put_user_u32(arg1, arg6);
11742         return mem_value;
11743     }
11744 #endif
11745 #ifdef TARGET_NR_atomic_barrier
11746     case TARGET_NR_atomic_barrier:
11747         /* Like the kernel implementation and the
11748            qemu arm barrier, no-op this? */
11749         return 0;
11750 #endif
11751 
11752 #ifdef TARGET_NR_timer_create
11753     case TARGET_NR_timer_create:
11754     {
11755         /* args: clockid_t clockid, struct sigevent *sevp, timer_t *timerid */
11756 
11757         struct sigevent host_sevp = { {0}, }, *phost_sevp = NULL;
11758 
11759         int clkid = arg1;
11760         int timer_index = next_free_host_timer();
11761 
11762         if (timer_index < 0) {
11763             ret = -TARGET_EAGAIN;
11764         } else {
11765             timer_t *phtimer = g_posix_timers  + timer_index;
11766 
11767             if (arg2) {
11768                 phost_sevp = &host_sevp;
11769                 ret = target_to_host_sigevent(phost_sevp, arg2);
11770                 if (ret != 0) {
11771                     return ret;
11772                 }
11773             }
11774 
11775             ret = get_errno(timer_create(clkid, phost_sevp, phtimer));
11776             if (ret) {
11777                 phtimer = NULL;
11778             } else {
11779                 if (put_user(TIMER_MAGIC | timer_index, arg3, target_timer_t)) {
11780                     return -TARGET_EFAULT;
11781                 }
11782             }
11783         }
11784         return ret;
11785     }
11786 #endif
11787 
11788 #ifdef TARGET_NR_timer_settime
11789     case TARGET_NR_timer_settime:
11790     {
11791         /* args: timer_t timerid, int flags, const struct itimerspec *new_value,
11792          * struct itimerspec * old_value */
11793         target_timer_t timerid = get_timer_id(arg1);
11794 
11795         if (timerid < 0) {
11796             ret = timerid;
11797         } else if (arg3 == 0) {
11798             ret = -TARGET_EINVAL;
11799         } else {
11800             timer_t htimer = g_posix_timers[timerid];
11801             struct itimerspec hspec_new = {{0},}, hspec_old = {{0},};
11802 
11803             if (target_to_host_itimerspec(&hspec_new, arg3)) {
11804                 return -TARGET_EFAULT;
11805             }
11806             ret = get_errno(
11807                           timer_settime(htimer, arg2, &hspec_new, &hspec_old));
11808             if (arg4 && host_to_target_itimerspec(arg4, &hspec_old)) {
11809                 return -TARGET_EFAULT;
11810             }
11811         }
11812         return ret;
11813     }
11814 #endif
11815 
11816 #ifdef TARGET_NR_timer_gettime
11817     case TARGET_NR_timer_gettime:
11818     {
11819         /* args: timer_t timerid, struct itimerspec *curr_value */
11820         target_timer_t timerid = get_timer_id(arg1);
11821 
11822         if (timerid < 0) {
11823             ret = timerid;
11824         } else if (!arg2) {
11825             ret = -TARGET_EFAULT;
11826         } else {
11827             timer_t htimer = g_posix_timers[timerid];
11828             struct itimerspec hspec;
11829             ret = get_errno(timer_gettime(htimer, &hspec));
11830 
11831             if (host_to_target_itimerspec(arg2, &hspec)) {
11832                 ret = -TARGET_EFAULT;
11833             }
11834         }
11835         return ret;
11836     }
11837 #endif
11838 
11839 #ifdef TARGET_NR_timer_getoverrun
11840     case TARGET_NR_timer_getoverrun:
11841     {
11842         /* args: timer_t timerid */
11843         target_timer_t timerid = get_timer_id(arg1);
11844 
11845         if (timerid < 0) {
11846             ret = timerid;
11847         } else {
11848             timer_t htimer = g_posix_timers[timerid];
11849             ret = get_errno(timer_getoverrun(htimer));
11850         }
11851         return ret;
11852     }
11853 #endif
11854 
11855 #ifdef TARGET_NR_timer_delete
11856     case TARGET_NR_timer_delete:
11857     {
11858         /* args: timer_t timerid */
11859         target_timer_t timerid = get_timer_id(arg1);
11860 
11861         if (timerid < 0) {
11862             ret = timerid;
11863         } else {
11864             timer_t htimer = g_posix_timers[timerid];
11865             ret = get_errno(timer_delete(htimer));
11866             g_posix_timers[timerid] = 0;
11867         }
11868         return ret;
11869     }
11870 #endif
11871 
11872 #if defined(TARGET_NR_timerfd_create) && defined(CONFIG_TIMERFD)
11873     case TARGET_NR_timerfd_create:
11874         return get_errno(timerfd_create(arg1,
11875                           target_to_host_bitmask(arg2, fcntl_flags_tbl)));
11876 #endif
11877 
11878 #if defined(TARGET_NR_timerfd_gettime) && defined(CONFIG_TIMERFD)
11879     case TARGET_NR_timerfd_gettime:
11880         {
11881             struct itimerspec its_curr;
11882 
11883             ret = get_errno(timerfd_gettime(arg1, &its_curr));
11884 
11885             if (arg2 && host_to_target_itimerspec(arg2, &its_curr)) {
11886                 return -TARGET_EFAULT;
11887             }
11888         }
11889         return ret;
11890 #endif
11891 
11892 #if defined(TARGET_NR_timerfd_settime) && defined(CONFIG_TIMERFD)
11893     case TARGET_NR_timerfd_settime:
11894         {
11895             struct itimerspec its_new, its_old, *p_new;
11896 
11897             if (arg3) {
11898                 if (target_to_host_itimerspec(&its_new, arg3)) {
11899                     return -TARGET_EFAULT;
11900                 }
11901                 p_new = &its_new;
11902             } else {
11903                 p_new = NULL;
11904             }
11905 
11906             ret = get_errno(timerfd_settime(arg1, arg2, p_new, &its_old));
11907 
11908             if (arg4 && host_to_target_itimerspec(arg4, &its_old)) {
11909                 return -TARGET_EFAULT;
11910             }
11911         }
11912         return ret;
11913 #endif
11914 
11915 #if defined(TARGET_NR_ioprio_get) && defined(__NR_ioprio_get)
11916     case TARGET_NR_ioprio_get:
11917         return get_errno(ioprio_get(arg1, arg2));
11918 #endif
11919 
11920 #if defined(TARGET_NR_ioprio_set) && defined(__NR_ioprio_set)
11921     case TARGET_NR_ioprio_set:
11922         return get_errno(ioprio_set(arg1, arg2, arg3));
11923 #endif
11924 
11925 #if defined(TARGET_NR_setns) && defined(CONFIG_SETNS)
11926     case TARGET_NR_setns:
11927         return get_errno(setns(arg1, arg2));
11928 #endif
11929 #if defined(TARGET_NR_unshare) && defined(CONFIG_SETNS)
11930     case TARGET_NR_unshare:
11931         return get_errno(unshare(arg1));
11932 #endif
11933 #if defined(TARGET_NR_kcmp) && defined(__NR_kcmp)
11934     case TARGET_NR_kcmp:
11935         return get_errno(kcmp(arg1, arg2, arg3, arg4, arg5));
11936 #endif
11937 #ifdef TARGET_NR_swapcontext
11938     case TARGET_NR_swapcontext:
11939         /* PowerPC specific.  */
11940         return do_swapcontext(cpu_env, arg1, arg2, arg3);
11941 #endif
11942 #ifdef TARGET_NR_memfd_create
11943     case TARGET_NR_memfd_create:
11944         p = lock_user_string(arg1);
11945         if (!p) {
11946             return -TARGET_EFAULT;
11947         }
11948         ret = get_errno(memfd_create(p, arg2));
11949         fd_trans_unregister(ret);
11950         unlock_user(p, arg1, 0);
11951         return ret;
11952 #endif
11953 
11954     default:
11955         qemu_log_mask(LOG_UNIMP, "Unsupported syscall: %d\n", num);
11956         return -TARGET_ENOSYS;
11957     }
11958     return ret;
11959 }
11960 
11961 abi_long do_syscall(void *cpu_env, int num, abi_long arg1,
11962                     abi_long arg2, abi_long arg3, abi_long arg4,
11963                     abi_long arg5, abi_long arg6, abi_long arg7,
11964                     abi_long arg8)
11965 {
11966     CPUState *cpu = env_cpu(cpu_env);
11967     abi_long ret;
11968 
11969 #ifdef DEBUG_ERESTARTSYS
11970     /* Debug-only code for exercising the syscall-restart code paths
11971      * in the per-architecture cpu main loops: restart every syscall
11972      * the guest makes once before letting it through.
11973      */
11974     {
11975         static bool flag;
11976         flag = !flag;
11977         if (flag) {
11978             return -TARGET_ERESTARTSYS;
11979         }
11980     }
11981 #endif
11982 
11983     trace_guest_user_syscall(cpu, num, arg1, arg2, arg3, arg4,
11984                              arg5, arg6, arg7, arg8);
11985 
11986     if (unlikely(do_strace)) {
11987         print_syscall(num, arg1, arg2, arg3, arg4, arg5, arg6);
11988         ret = do_syscall1(cpu_env, num, arg1, arg2, arg3, arg4,
11989                           arg5, arg6, arg7, arg8);
11990         print_syscall_ret(num, ret);
11991     } else {
11992         ret = do_syscall1(cpu_env, num, arg1, arg2, arg3, arg4,
11993                           arg5, arg6, arg7, arg8);
11994     }
11995 
11996     trace_guest_user_syscall_ret(cpu, num, ret);
11997     return ret;
11998 }
11999