xref: /openbmc/qemu/linux-user/syscall.c (revision 2ae16a6a)
1 /*
2  *  Linux syscalls
3  *
4  *  Copyright (c) 2003 Fabrice Bellard
5  *
6  *  This program is free software; you can redistribute it and/or modify
7  *  it under the terms of the GNU General Public License as published by
8  *  the Free Software Foundation; either version 2 of the License, or
9  *  (at your option) any later version.
10  *
11  *  This program is distributed in the hope that it will be useful,
12  *  but WITHOUT ANY WARRANTY; without even the implied warranty of
13  *  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
14  *  GNU General Public License for more details.
15  *
16  *  You should have received a copy of the GNU General Public License
17  *  along with this program; if not, see <http://www.gnu.org/licenses/>.
18  */
19 #define _ATFILE_SOURCE
20 #include "qemu/osdep.h"
21 #include "qemu/cutils.h"
22 #include "qemu/path.h"
23 #include <elf.h>
24 #include <endian.h>
25 #include <grp.h>
26 #include <sys/ipc.h>
27 #include <sys/msg.h>
28 #include <sys/wait.h>
29 #include <sys/mount.h>
30 #include <sys/file.h>
31 #include <sys/fsuid.h>
32 #include <sys/personality.h>
33 #include <sys/prctl.h>
34 #include <sys/resource.h>
35 #include <sys/swap.h>
36 #include <linux/capability.h>
37 #include <sched.h>
38 #include <sys/timex.h>
39 #include <sys/socket.h>
40 #include <linux/sockios.h>
41 #include <sys/un.h>
42 #include <sys/uio.h>
43 #include <poll.h>
44 #include <sys/times.h>
45 #include <sys/shm.h>
46 #include <sys/sem.h>
47 #include <sys/statfs.h>
48 #include <utime.h>
49 #include <sys/sysinfo.h>
50 #include <sys/signalfd.h>
51 //#include <sys/user.h>
52 #include <netinet/ip.h>
53 #include <netinet/tcp.h>
54 #include <linux/wireless.h>
55 #include <linux/icmp.h>
56 #include <linux/icmpv6.h>
57 #include <linux/errqueue.h>
58 #include <linux/random.h>
59 #ifdef CONFIG_TIMERFD
60 #include <sys/timerfd.h>
61 #endif
62 #ifdef CONFIG_EVENTFD
63 #include <sys/eventfd.h>
64 #endif
65 #ifdef CONFIG_EPOLL
66 #include <sys/epoll.h>
67 #endif
68 #ifdef CONFIG_ATTR
69 #include "qemu/xattr.h"
70 #endif
71 #ifdef CONFIG_SENDFILE
72 #include <sys/sendfile.h>
73 #endif
74 
75 #define termios host_termios
76 #define winsize host_winsize
77 #define termio host_termio
78 #define sgttyb host_sgttyb /* same as target */
79 #define tchars host_tchars /* same as target */
80 #define ltchars host_ltchars /* same as target */
81 
82 #include <linux/termios.h>
83 #include <linux/unistd.h>
84 #include <linux/cdrom.h>
85 #include <linux/hdreg.h>
86 #include <linux/soundcard.h>
87 #include <linux/kd.h>
88 #include <linux/mtio.h>
89 #include <linux/fs.h>
90 #if defined(CONFIG_FIEMAP)
91 #include <linux/fiemap.h>
92 #endif
93 #include <linux/fb.h>
94 #if defined(CONFIG_USBFS)
95 #include <linux/usbdevice_fs.h>
96 #include <linux/usb/ch9.h>
97 #endif
98 #include <linux/vt.h>
99 #include <linux/dm-ioctl.h>
100 #include <linux/reboot.h>
101 #include <linux/route.h>
102 #include <linux/filter.h>
103 #include <linux/blkpg.h>
104 #include <netpacket/packet.h>
105 #include <linux/netlink.h>
106 #include <linux/if_alg.h>
107 #include "linux_loop.h"
108 #include "uname.h"
109 
110 #include "qemu.h"
111 #include "qemu/guest-random.h"
112 #include "qapi/error.h"
113 #include "fd-trans.h"
114 
115 #ifndef CLONE_IO
116 #define CLONE_IO                0x80000000      /* Clone io context */
117 #endif
118 
119 /* We can't directly call the host clone syscall, because this will
120  * badly confuse libc (breaking mutexes, for example). So we must
121  * divide clone flags into:
122  *  * flag combinations that look like pthread_create()
123  *  * flag combinations that look like fork()
124  *  * flags we can implement within QEMU itself
125  *  * flags we can't support and will return an error for
126  */
127 /* For thread creation, all these flags must be present; for
128  * fork, none must be present.
129  */
130 #define CLONE_THREAD_FLAGS                              \
131     (CLONE_VM | CLONE_FS | CLONE_FILES |                \
132      CLONE_SIGHAND | CLONE_THREAD | CLONE_SYSVSEM)
133 
134 /* These flags are ignored:
135  * CLONE_DETACHED is now ignored by the kernel;
136  * CLONE_IO is just an optimisation hint to the I/O scheduler
137  */
138 #define CLONE_IGNORED_FLAGS                     \
139     (CLONE_DETACHED | CLONE_IO)
140 
141 /* Flags for fork which we can implement within QEMU itself */
142 #define CLONE_OPTIONAL_FORK_FLAGS               \
143     (CLONE_SETTLS | CLONE_PARENT_SETTID |       \
144      CLONE_CHILD_CLEARTID | CLONE_CHILD_SETTID)
145 
146 /* Flags for thread creation which we can implement within QEMU itself */
147 #define CLONE_OPTIONAL_THREAD_FLAGS                             \
148     (CLONE_SETTLS | CLONE_PARENT_SETTID |                       \
149      CLONE_CHILD_CLEARTID | CLONE_CHILD_SETTID | CLONE_PARENT)
150 
151 #define CLONE_INVALID_FORK_FLAGS                                        \
152     (~(CSIGNAL | CLONE_OPTIONAL_FORK_FLAGS | CLONE_IGNORED_FLAGS))
153 
154 #define CLONE_INVALID_THREAD_FLAGS                                      \
155     (~(CSIGNAL | CLONE_THREAD_FLAGS | CLONE_OPTIONAL_THREAD_FLAGS |     \
156        CLONE_IGNORED_FLAGS))
157 
158 /* CLONE_VFORK is special cased early in do_fork(). The other flag bits
159  * have almost all been allocated. We cannot support any of
160  * CLONE_NEWNS, CLONE_NEWCGROUP, CLONE_NEWUTS, CLONE_NEWIPC,
161  * CLONE_NEWUSER, CLONE_NEWPID, CLONE_NEWNET, CLONE_PTRACE, CLONE_UNTRACED.
162  * The checks against the invalid thread masks above will catch these.
163  * (The one remaining unallocated bit is 0x1000 which used to be CLONE_PID.)
164  */
165 
166 /* Define DEBUG_ERESTARTSYS to force every syscall to be restarted
167  * once. This exercises the codepaths for restart.
168  */
169 //#define DEBUG_ERESTARTSYS
170 
171 //#include <linux/msdos_fs.h>
172 #define	VFAT_IOCTL_READDIR_BOTH		_IOR('r', 1, struct linux_dirent [2])
173 #define	VFAT_IOCTL_READDIR_SHORT	_IOR('r', 2, struct linux_dirent [2])
174 
175 #undef _syscall0
176 #undef _syscall1
177 #undef _syscall2
178 #undef _syscall3
179 #undef _syscall4
180 #undef _syscall5
181 #undef _syscall6
182 
183 #define _syscall0(type,name)		\
184 static type name (void)			\
185 {					\
186 	return syscall(__NR_##name);	\
187 }
188 
189 #define _syscall1(type,name,type1,arg1)		\
190 static type name (type1 arg1)			\
191 {						\
192 	return syscall(__NR_##name, arg1);	\
193 }
194 
195 #define _syscall2(type,name,type1,arg1,type2,arg2)	\
196 static type name (type1 arg1,type2 arg2)		\
197 {							\
198 	return syscall(__NR_##name, arg1, arg2);	\
199 }
200 
201 #define _syscall3(type,name,type1,arg1,type2,arg2,type3,arg3)	\
202 static type name (type1 arg1,type2 arg2,type3 arg3)		\
203 {								\
204 	return syscall(__NR_##name, arg1, arg2, arg3);		\
205 }
206 
207 #define _syscall4(type,name,type1,arg1,type2,arg2,type3,arg3,type4,arg4)	\
208 static type name (type1 arg1,type2 arg2,type3 arg3,type4 arg4)			\
209 {										\
210 	return syscall(__NR_##name, arg1, arg2, arg3, arg4);			\
211 }
212 
213 #define _syscall5(type,name,type1,arg1,type2,arg2,type3,arg3,type4,arg4,	\
214 		  type5,arg5)							\
215 static type name (type1 arg1,type2 arg2,type3 arg3,type4 arg4,type5 arg5)	\
216 {										\
217 	return syscall(__NR_##name, arg1, arg2, arg3, arg4, arg5);		\
218 }
219 
220 
221 #define _syscall6(type,name,type1,arg1,type2,arg2,type3,arg3,type4,arg4,	\
222 		  type5,arg5,type6,arg6)					\
223 static type name (type1 arg1,type2 arg2,type3 arg3,type4 arg4,type5 arg5,	\
224                   type6 arg6)							\
225 {										\
226 	return syscall(__NR_##name, arg1, arg2, arg3, arg4, arg5, arg6);	\
227 }
228 
229 
230 #define __NR_sys_uname __NR_uname
231 #define __NR_sys_getcwd1 __NR_getcwd
232 #define __NR_sys_getdents __NR_getdents
233 #define __NR_sys_getdents64 __NR_getdents64
234 #define __NR_sys_getpriority __NR_getpriority
235 #define __NR_sys_rt_sigqueueinfo __NR_rt_sigqueueinfo
236 #define __NR_sys_rt_tgsigqueueinfo __NR_rt_tgsigqueueinfo
237 #define __NR_sys_syslog __NR_syslog
238 #define __NR_sys_futex __NR_futex
239 #define __NR_sys_inotify_init __NR_inotify_init
240 #define __NR_sys_inotify_add_watch __NR_inotify_add_watch
241 #define __NR_sys_inotify_rm_watch __NR_inotify_rm_watch
242 #define __NR_sys_statx __NR_statx
243 
244 #if defined(__alpha__) || defined(__x86_64__) || defined(__s390x__)
245 #define __NR__llseek __NR_lseek
246 #endif
247 
248 /* Newer kernel ports have llseek() instead of _llseek() */
249 #if defined(TARGET_NR_llseek) && !defined(TARGET_NR__llseek)
250 #define TARGET_NR__llseek TARGET_NR_llseek
251 #endif
252 
253 #define __NR_sys_gettid __NR_gettid
254 _syscall0(int, sys_gettid)
255 
256 /* For the 64-bit guest on 32-bit host case we must emulate
257  * getdents using getdents64, because otherwise the host
258  * might hand us back more dirent records than we can fit
259  * into the guest buffer after structure format conversion.
260  * Otherwise we emulate getdents with getdents if the host has it.
261  */
262 #if defined(__NR_getdents) && HOST_LONG_BITS >= TARGET_ABI_BITS
263 #define EMULATE_GETDENTS_WITH_GETDENTS
264 #endif
265 
266 #if defined(TARGET_NR_getdents) && defined(EMULATE_GETDENTS_WITH_GETDENTS)
267 _syscall3(int, sys_getdents, uint, fd, struct linux_dirent *, dirp, uint, count);
268 #endif
269 #if (defined(TARGET_NR_getdents) && \
270       !defined(EMULATE_GETDENTS_WITH_GETDENTS)) || \
271     (defined(TARGET_NR_getdents64) && defined(__NR_getdents64))
272 _syscall3(int, sys_getdents64, uint, fd, struct linux_dirent64 *, dirp, uint, count);
273 #endif
274 #if defined(TARGET_NR__llseek) && defined(__NR_llseek)
275 _syscall5(int, _llseek,  uint,  fd, ulong, hi, ulong, lo,
276           loff_t *, res, uint, wh);
277 #endif
278 _syscall3(int, sys_rt_sigqueueinfo, pid_t, pid, int, sig, siginfo_t *, uinfo)
279 _syscall4(int, sys_rt_tgsigqueueinfo, pid_t, pid, pid_t, tid, int, sig,
280           siginfo_t *, uinfo)
281 _syscall3(int,sys_syslog,int,type,char*,bufp,int,len)
282 #ifdef __NR_exit_group
283 _syscall1(int,exit_group,int,error_code)
284 #endif
285 #if defined(TARGET_NR_set_tid_address) && defined(__NR_set_tid_address)
286 _syscall1(int,set_tid_address,int *,tidptr)
287 #endif
288 #if defined(TARGET_NR_futex) && defined(__NR_futex)
289 _syscall6(int,sys_futex,int *,uaddr,int,op,int,val,
290           const struct timespec *,timeout,int *,uaddr2,int,val3)
291 #endif
292 #define __NR_sys_sched_getaffinity __NR_sched_getaffinity
293 _syscall3(int, sys_sched_getaffinity, pid_t, pid, unsigned int, len,
294           unsigned long *, user_mask_ptr);
295 #define __NR_sys_sched_setaffinity __NR_sched_setaffinity
296 _syscall3(int, sys_sched_setaffinity, pid_t, pid, unsigned int, len,
297           unsigned long *, user_mask_ptr);
298 #define __NR_sys_getcpu __NR_getcpu
299 _syscall3(int, sys_getcpu, unsigned *, cpu, unsigned *, node, void *, tcache);
300 _syscall4(int, reboot, int, magic1, int, magic2, unsigned int, cmd,
301           void *, arg);
302 _syscall2(int, capget, struct __user_cap_header_struct *, header,
303           struct __user_cap_data_struct *, data);
304 _syscall2(int, capset, struct __user_cap_header_struct *, header,
305           struct __user_cap_data_struct *, data);
306 #if defined(TARGET_NR_ioprio_get) && defined(__NR_ioprio_get)
307 _syscall2(int, ioprio_get, int, which, int, who)
308 #endif
309 #if defined(TARGET_NR_ioprio_set) && defined(__NR_ioprio_set)
310 _syscall3(int, ioprio_set, int, which, int, who, int, ioprio)
311 #endif
312 #if defined(TARGET_NR_getrandom) && defined(__NR_getrandom)
313 _syscall3(int, getrandom, void *, buf, size_t, buflen, unsigned int, flags)
314 #endif
315 
316 #if defined(TARGET_NR_kcmp) && defined(__NR_kcmp)
317 _syscall5(int, kcmp, pid_t, pid1, pid_t, pid2, int, type,
318           unsigned long, idx1, unsigned long, idx2)
319 #endif
320 
321 /*
322  * It is assumed that struct statx is architecture independent.
323  */
324 #if defined(TARGET_NR_statx) && defined(__NR_statx)
325 _syscall5(int, sys_statx, int, dirfd, const char *, pathname, int, flags,
326           unsigned int, mask, struct target_statx *, statxbuf)
327 #endif
328 
329 static bitmask_transtbl fcntl_flags_tbl[] = {
330   { TARGET_O_ACCMODE,   TARGET_O_WRONLY,    O_ACCMODE,   O_WRONLY,    },
331   { TARGET_O_ACCMODE,   TARGET_O_RDWR,      O_ACCMODE,   O_RDWR,      },
332   { TARGET_O_CREAT,     TARGET_O_CREAT,     O_CREAT,     O_CREAT,     },
333   { TARGET_O_EXCL,      TARGET_O_EXCL,      O_EXCL,      O_EXCL,      },
334   { TARGET_O_NOCTTY,    TARGET_O_NOCTTY,    O_NOCTTY,    O_NOCTTY,    },
335   { TARGET_O_TRUNC,     TARGET_O_TRUNC,     O_TRUNC,     O_TRUNC,     },
336   { TARGET_O_APPEND,    TARGET_O_APPEND,    O_APPEND,    O_APPEND,    },
337   { TARGET_O_NONBLOCK,  TARGET_O_NONBLOCK,  O_NONBLOCK,  O_NONBLOCK,  },
338   { TARGET_O_SYNC,      TARGET_O_DSYNC,     O_SYNC,      O_DSYNC,     },
339   { TARGET_O_SYNC,      TARGET_O_SYNC,      O_SYNC,      O_SYNC,      },
340   { TARGET_FASYNC,      TARGET_FASYNC,      FASYNC,      FASYNC,      },
341   { TARGET_O_DIRECTORY, TARGET_O_DIRECTORY, O_DIRECTORY, O_DIRECTORY, },
342   { TARGET_O_NOFOLLOW,  TARGET_O_NOFOLLOW,  O_NOFOLLOW,  O_NOFOLLOW,  },
343 #if defined(O_DIRECT)
344   { TARGET_O_DIRECT,    TARGET_O_DIRECT,    O_DIRECT,    O_DIRECT,    },
345 #endif
346 #if defined(O_NOATIME)
347   { TARGET_O_NOATIME,   TARGET_O_NOATIME,   O_NOATIME,   O_NOATIME    },
348 #endif
349 #if defined(O_CLOEXEC)
350   { TARGET_O_CLOEXEC,   TARGET_O_CLOEXEC,   O_CLOEXEC,   O_CLOEXEC    },
351 #endif
352 #if defined(O_PATH)
353   { TARGET_O_PATH,      TARGET_O_PATH,      O_PATH,      O_PATH       },
354 #endif
355 #if defined(O_TMPFILE)
356   { TARGET_O_TMPFILE,   TARGET_O_TMPFILE,   O_TMPFILE,   O_TMPFILE    },
357 #endif
358   /* Don't terminate the list prematurely on 64-bit host+guest.  */
359 #if TARGET_O_LARGEFILE != 0 || O_LARGEFILE != 0
360   { TARGET_O_LARGEFILE, TARGET_O_LARGEFILE, O_LARGEFILE, O_LARGEFILE, },
361 #endif
362   { 0, 0, 0, 0 }
363 };
364 
365 static int sys_getcwd1(char *buf, size_t size)
366 {
367   if (getcwd(buf, size) == NULL) {
368       /* getcwd() sets errno */
369       return (-1);
370   }
371   return strlen(buf)+1;
372 }
373 
374 #ifdef TARGET_NR_utimensat
375 #if defined(__NR_utimensat)
376 #define __NR_sys_utimensat __NR_utimensat
377 _syscall4(int,sys_utimensat,int,dirfd,const char *,pathname,
378           const struct timespec *,tsp,int,flags)
379 #else
380 static int sys_utimensat(int dirfd, const char *pathname,
381                          const struct timespec times[2], int flags)
382 {
383     errno = ENOSYS;
384     return -1;
385 }
386 #endif
387 #endif /* TARGET_NR_utimensat */
388 
389 #ifdef TARGET_NR_renameat2
390 #if defined(__NR_renameat2)
391 #define __NR_sys_renameat2 __NR_renameat2
392 _syscall5(int, sys_renameat2, int, oldfd, const char *, old, int, newfd,
393           const char *, new, unsigned int, flags)
394 #else
395 static int sys_renameat2(int oldfd, const char *old,
396                          int newfd, const char *new, int flags)
397 {
398     if (flags == 0) {
399         return renameat(oldfd, old, newfd, new);
400     }
401     errno = ENOSYS;
402     return -1;
403 }
404 #endif
405 #endif /* TARGET_NR_renameat2 */
406 
407 #ifdef CONFIG_INOTIFY
408 #include <sys/inotify.h>
409 
410 #if defined(TARGET_NR_inotify_init) && defined(__NR_inotify_init)
411 static int sys_inotify_init(void)
412 {
413   return (inotify_init());
414 }
415 #endif
416 #if defined(TARGET_NR_inotify_add_watch) && defined(__NR_inotify_add_watch)
417 static int sys_inotify_add_watch(int fd,const char *pathname, int32_t mask)
418 {
419   return (inotify_add_watch(fd, pathname, mask));
420 }
421 #endif
422 #if defined(TARGET_NR_inotify_rm_watch) && defined(__NR_inotify_rm_watch)
423 static int sys_inotify_rm_watch(int fd, int32_t wd)
424 {
425   return (inotify_rm_watch(fd, wd));
426 }
427 #endif
428 #ifdef CONFIG_INOTIFY1
429 #if defined(TARGET_NR_inotify_init1) && defined(__NR_inotify_init1)
430 static int sys_inotify_init1(int flags)
431 {
432   return (inotify_init1(flags));
433 }
434 #endif
435 #endif
436 #else
437 /* Userspace can usually survive runtime without inotify */
438 #undef TARGET_NR_inotify_init
439 #undef TARGET_NR_inotify_init1
440 #undef TARGET_NR_inotify_add_watch
441 #undef TARGET_NR_inotify_rm_watch
442 #endif /* CONFIG_INOTIFY  */
443 
444 #if defined(TARGET_NR_prlimit64)
445 #ifndef __NR_prlimit64
446 # define __NR_prlimit64 -1
447 #endif
448 #define __NR_sys_prlimit64 __NR_prlimit64
449 /* The glibc rlimit structure may not be that used by the underlying syscall */
450 struct host_rlimit64 {
451     uint64_t rlim_cur;
452     uint64_t rlim_max;
453 };
454 _syscall4(int, sys_prlimit64, pid_t, pid, int, resource,
455           const struct host_rlimit64 *, new_limit,
456           struct host_rlimit64 *, old_limit)
457 #endif
458 
459 
460 #if defined(TARGET_NR_timer_create)
461 /* Maxiumum of 32 active POSIX timers allowed at any one time. */
462 static timer_t g_posix_timers[32] = { 0, } ;
463 
464 static inline int next_free_host_timer(void)
465 {
466     int k ;
467     /* FIXME: Does finding the next free slot require a lock? */
468     for (k = 0; k < ARRAY_SIZE(g_posix_timers); k++) {
469         if (g_posix_timers[k] == 0) {
470             g_posix_timers[k] = (timer_t) 1;
471             return k;
472         }
473     }
474     return -1;
475 }
476 #endif
477 
478 /* ARM EABI and MIPS expect 64bit types aligned even on pairs or registers */
479 #ifdef TARGET_ARM
480 static inline int regpairs_aligned(void *cpu_env, int num)
481 {
482     return ((((CPUARMState *)cpu_env)->eabi) == 1) ;
483 }
484 #elif defined(TARGET_MIPS) && (TARGET_ABI_BITS == 32)
485 static inline int regpairs_aligned(void *cpu_env, int num) { return 1; }
486 #elif defined(TARGET_PPC) && !defined(TARGET_PPC64)
487 /* SysV AVI for PPC32 expects 64bit parameters to be passed on odd/even pairs
488  * of registers which translates to the same as ARM/MIPS, because we start with
489  * r3 as arg1 */
490 static inline int regpairs_aligned(void *cpu_env, int num) { return 1; }
491 #elif defined(TARGET_SH4)
492 /* SH4 doesn't align register pairs, except for p{read,write}64 */
493 static inline int regpairs_aligned(void *cpu_env, int num)
494 {
495     switch (num) {
496     case TARGET_NR_pread64:
497     case TARGET_NR_pwrite64:
498         return 1;
499 
500     default:
501         return 0;
502     }
503 }
504 #elif defined(TARGET_XTENSA)
505 static inline int regpairs_aligned(void *cpu_env, int num) { return 1; }
506 #else
507 static inline int regpairs_aligned(void *cpu_env, int num) { return 0; }
508 #endif
509 
510 #define ERRNO_TABLE_SIZE 1200
511 
512 /* target_to_host_errno_table[] is initialized from
513  * host_to_target_errno_table[] in syscall_init(). */
514 static uint16_t target_to_host_errno_table[ERRNO_TABLE_SIZE] = {
515 };
516 
517 /*
518  * This list is the union of errno values overridden in asm-<arch>/errno.h
519  * minus the errnos that are not actually generic to all archs.
520  */
521 static uint16_t host_to_target_errno_table[ERRNO_TABLE_SIZE] = {
522     [EAGAIN]		= TARGET_EAGAIN,
523     [EIDRM]		= TARGET_EIDRM,
524     [ECHRNG]		= TARGET_ECHRNG,
525     [EL2NSYNC]		= TARGET_EL2NSYNC,
526     [EL3HLT]		= TARGET_EL3HLT,
527     [EL3RST]		= TARGET_EL3RST,
528     [ELNRNG]		= TARGET_ELNRNG,
529     [EUNATCH]		= TARGET_EUNATCH,
530     [ENOCSI]		= TARGET_ENOCSI,
531     [EL2HLT]		= TARGET_EL2HLT,
532     [EDEADLK]		= TARGET_EDEADLK,
533     [ENOLCK]		= TARGET_ENOLCK,
534     [EBADE]		= TARGET_EBADE,
535     [EBADR]		= TARGET_EBADR,
536     [EXFULL]		= TARGET_EXFULL,
537     [ENOANO]		= TARGET_ENOANO,
538     [EBADRQC]		= TARGET_EBADRQC,
539     [EBADSLT]		= TARGET_EBADSLT,
540     [EBFONT]		= TARGET_EBFONT,
541     [ENOSTR]		= TARGET_ENOSTR,
542     [ENODATA]		= TARGET_ENODATA,
543     [ETIME]		= TARGET_ETIME,
544     [ENOSR]		= TARGET_ENOSR,
545     [ENONET]		= TARGET_ENONET,
546     [ENOPKG]		= TARGET_ENOPKG,
547     [EREMOTE]		= TARGET_EREMOTE,
548     [ENOLINK]		= TARGET_ENOLINK,
549     [EADV]		= TARGET_EADV,
550     [ESRMNT]		= TARGET_ESRMNT,
551     [ECOMM]		= TARGET_ECOMM,
552     [EPROTO]		= TARGET_EPROTO,
553     [EDOTDOT]		= TARGET_EDOTDOT,
554     [EMULTIHOP]		= TARGET_EMULTIHOP,
555     [EBADMSG]		= TARGET_EBADMSG,
556     [ENAMETOOLONG]	= TARGET_ENAMETOOLONG,
557     [EOVERFLOW]		= TARGET_EOVERFLOW,
558     [ENOTUNIQ]		= TARGET_ENOTUNIQ,
559     [EBADFD]		= TARGET_EBADFD,
560     [EREMCHG]		= TARGET_EREMCHG,
561     [ELIBACC]		= TARGET_ELIBACC,
562     [ELIBBAD]		= TARGET_ELIBBAD,
563     [ELIBSCN]		= TARGET_ELIBSCN,
564     [ELIBMAX]		= TARGET_ELIBMAX,
565     [ELIBEXEC]		= TARGET_ELIBEXEC,
566     [EILSEQ]		= TARGET_EILSEQ,
567     [ENOSYS]		= TARGET_ENOSYS,
568     [ELOOP]		= TARGET_ELOOP,
569     [ERESTART]		= TARGET_ERESTART,
570     [ESTRPIPE]		= TARGET_ESTRPIPE,
571     [ENOTEMPTY]		= TARGET_ENOTEMPTY,
572     [EUSERS]		= TARGET_EUSERS,
573     [ENOTSOCK]		= TARGET_ENOTSOCK,
574     [EDESTADDRREQ]	= TARGET_EDESTADDRREQ,
575     [EMSGSIZE]		= TARGET_EMSGSIZE,
576     [EPROTOTYPE]	= TARGET_EPROTOTYPE,
577     [ENOPROTOOPT]	= TARGET_ENOPROTOOPT,
578     [EPROTONOSUPPORT]	= TARGET_EPROTONOSUPPORT,
579     [ESOCKTNOSUPPORT]	= TARGET_ESOCKTNOSUPPORT,
580     [EOPNOTSUPP]	= TARGET_EOPNOTSUPP,
581     [EPFNOSUPPORT]	= TARGET_EPFNOSUPPORT,
582     [EAFNOSUPPORT]	= TARGET_EAFNOSUPPORT,
583     [EADDRINUSE]	= TARGET_EADDRINUSE,
584     [EADDRNOTAVAIL]	= TARGET_EADDRNOTAVAIL,
585     [ENETDOWN]		= TARGET_ENETDOWN,
586     [ENETUNREACH]	= TARGET_ENETUNREACH,
587     [ENETRESET]		= TARGET_ENETRESET,
588     [ECONNABORTED]	= TARGET_ECONNABORTED,
589     [ECONNRESET]	= TARGET_ECONNRESET,
590     [ENOBUFS]		= TARGET_ENOBUFS,
591     [EISCONN]		= TARGET_EISCONN,
592     [ENOTCONN]		= TARGET_ENOTCONN,
593     [EUCLEAN]		= TARGET_EUCLEAN,
594     [ENOTNAM]		= TARGET_ENOTNAM,
595     [ENAVAIL]		= TARGET_ENAVAIL,
596     [EISNAM]		= TARGET_EISNAM,
597     [EREMOTEIO]		= TARGET_EREMOTEIO,
598     [EDQUOT]            = TARGET_EDQUOT,
599     [ESHUTDOWN]		= TARGET_ESHUTDOWN,
600     [ETOOMANYREFS]	= TARGET_ETOOMANYREFS,
601     [ETIMEDOUT]		= TARGET_ETIMEDOUT,
602     [ECONNREFUSED]	= TARGET_ECONNREFUSED,
603     [EHOSTDOWN]		= TARGET_EHOSTDOWN,
604     [EHOSTUNREACH]	= TARGET_EHOSTUNREACH,
605     [EALREADY]		= TARGET_EALREADY,
606     [EINPROGRESS]	= TARGET_EINPROGRESS,
607     [ESTALE]		= TARGET_ESTALE,
608     [ECANCELED]		= TARGET_ECANCELED,
609     [ENOMEDIUM]		= TARGET_ENOMEDIUM,
610     [EMEDIUMTYPE]	= TARGET_EMEDIUMTYPE,
611 #ifdef ENOKEY
612     [ENOKEY]		= TARGET_ENOKEY,
613 #endif
614 #ifdef EKEYEXPIRED
615     [EKEYEXPIRED]	= TARGET_EKEYEXPIRED,
616 #endif
617 #ifdef EKEYREVOKED
618     [EKEYREVOKED]	= TARGET_EKEYREVOKED,
619 #endif
620 #ifdef EKEYREJECTED
621     [EKEYREJECTED]	= TARGET_EKEYREJECTED,
622 #endif
623 #ifdef EOWNERDEAD
624     [EOWNERDEAD]	= TARGET_EOWNERDEAD,
625 #endif
626 #ifdef ENOTRECOVERABLE
627     [ENOTRECOVERABLE]	= TARGET_ENOTRECOVERABLE,
628 #endif
629 #ifdef ENOMSG
630     [ENOMSG]            = TARGET_ENOMSG,
631 #endif
632 #ifdef ERKFILL
633     [ERFKILL]           = TARGET_ERFKILL,
634 #endif
635 #ifdef EHWPOISON
636     [EHWPOISON]         = TARGET_EHWPOISON,
637 #endif
638 };
639 
640 static inline int host_to_target_errno(int err)
641 {
642     if (err >= 0 && err < ERRNO_TABLE_SIZE &&
643         host_to_target_errno_table[err]) {
644         return host_to_target_errno_table[err];
645     }
646     return err;
647 }
648 
649 static inline int target_to_host_errno(int err)
650 {
651     if (err >= 0 && err < ERRNO_TABLE_SIZE &&
652         target_to_host_errno_table[err]) {
653         return target_to_host_errno_table[err];
654     }
655     return err;
656 }
657 
658 static inline abi_long get_errno(abi_long ret)
659 {
660     if (ret == -1)
661         return -host_to_target_errno(errno);
662     else
663         return ret;
664 }
665 
666 const char *target_strerror(int err)
667 {
668     if (err == TARGET_ERESTARTSYS) {
669         return "To be restarted";
670     }
671     if (err == TARGET_QEMU_ESIGRETURN) {
672         return "Successful exit from sigreturn";
673     }
674 
675     if ((err >= ERRNO_TABLE_SIZE) || (err < 0)) {
676         return NULL;
677     }
678     return strerror(target_to_host_errno(err));
679 }
680 
681 #define safe_syscall0(type, name) \
682 static type safe_##name(void) \
683 { \
684     return safe_syscall(__NR_##name); \
685 }
686 
687 #define safe_syscall1(type, name, type1, arg1) \
688 static type safe_##name(type1 arg1) \
689 { \
690     return safe_syscall(__NR_##name, arg1); \
691 }
692 
693 #define safe_syscall2(type, name, type1, arg1, type2, arg2) \
694 static type safe_##name(type1 arg1, type2 arg2) \
695 { \
696     return safe_syscall(__NR_##name, arg1, arg2); \
697 }
698 
699 #define safe_syscall3(type, name, type1, arg1, type2, arg2, type3, arg3) \
700 static type safe_##name(type1 arg1, type2 arg2, type3 arg3) \
701 { \
702     return safe_syscall(__NR_##name, arg1, arg2, arg3); \
703 }
704 
705 #define safe_syscall4(type, name, type1, arg1, type2, arg2, type3, arg3, \
706     type4, arg4) \
707 static type safe_##name(type1 arg1, type2 arg2, type3 arg3, type4 arg4) \
708 { \
709     return safe_syscall(__NR_##name, arg1, arg2, arg3, arg4); \
710 }
711 
712 #define safe_syscall5(type, name, type1, arg1, type2, arg2, type3, arg3, \
713     type4, arg4, type5, arg5) \
714 static type safe_##name(type1 arg1, type2 arg2, type3 arg3, type4 arg4, \
715     type5 arg5) \
716 { \
717     return safe_syscall(__NR_##name, arg1, arg2, arg3, arg4, arg5); \
718 }
719 
720 #define safe_syscall6(type, name, type1, arg1, type2, arg2, type3, arg3, \
721     type4, arg4, type5, arg5, type6, arg6) \
722 static type safe_##name(type1 arg1, type2 arg2, type3 arg3, type4 arg4, \
723     type5 arg5, type6 arg6) \
724 { \
725     return safe_syscall(__NR_##name, arg1, arg2, arg3, arg4, arg5, arg6); \
726 }
727 
728 safe_syscall3(ssize_t, read, int, fd, void *, buff, size_t, count)
729 safe_syscall3(ssize_t, write, int, fd, const void *, buff, size_t, count)
730 safe_syscall4(int, openat, int, dirfd, const char *, pathname, \
731               int, flags, mode_t, mode)
732 safe_syscall4(pid_t, wait4, pid_t, pid, int *, status, int, options, \
733               struct rusage *, rusage)
734 safe_syscall5(int, waitid, idtype_t, idtype, id_t, id, siginfo_t *, infop, \
735               int, options, struct rusage *, rusage)
736 safe_syscall3(int, execve, const char *, filename, char **, argv, char **, envp)
737 safe_syscall6(int, pselect6, int, nfds, fd_set *, readfds, fd_set *, writefds, \
738               fd_set *, exceptfds, struct timespec *, timeout, void *, sig)
739 safe_syscall5(int, ppoll, struct pollfd *, ufds, unsigned int, nfds,
740               struct timespec *, tsp, const sigset_t *, sigmask,
741               size_t, sigsetsize)
742 safe_syscall6(int, epoll_pwait, int, epfd, struct epoll_event *, events,
743               int, maxevents, int, timeout, const sigset_t *, sigmask,
744               size_t, sigsetsize)
745 safe_syscall6(int,futex,int *,uaddr,int,op,int,val, \
746               const struct timespec *,timeout,int *,uaddr2,int,val3)
747 safe_syscall2(int, rt_sigsuspend, sigset_t *, newset, size_t, sigsetsize)
748 safe_syscall2(int, kill, pid_t, pid, int, sig)
749 safe_syscall2(int, tkill, int, tid, int, sig)
750 safe_syscall3(int, tgkill, int, tgid, int, pid, int, sig)
751 safe_syscall3(ssize_t, readv, int, fd, const struct iovec *, iov, int, iovcnt)
752 safe_syscall3(ssize_t, writev, int, fd, const struct iovec *, iov, int, iovcnt)
753 safe_syscall5(ssize_t, preadv, int, fd, const struct iovec *, iov, int, iovcnt,
754               unsigned long, pos_l, unsigned long, pos_h)
755 safe_syscall5(ssize_t, pwritev, int, fd, const struct iovec *, iov, int, iovcnt,
756               unsigned long, pos_l, unsigned long, pos_h)
757 safe_syscall3(int, connect, int, fd, const struct sockaddr *, addr,
758               socklen_t, addrlen)
759 safe_syscall6(ssize_t, sendto, int, fd, const void *, buf, size_t, len,
760               int, flags, const struct sockaddr *, addr, socklen_t, addrlen)
761 safe_syscall6(ssize_t, recvfrom, int, fd, void *, buf, size_t, len,
762               int, flags, struct sockaddr *, addr, socklen_t *, addrlen)
763 safe_syscall3(ssize_t, sendmsg, int, fd, const struct msghdr *, msg, int, flags)
764 safe_syscall3(ssize_t, recvmsg, int, fd, struct msghdr *, msg, int, flags)
765 safe_syscall2(int, flock, int, fd, int, operation)
766 safe_syscall4(int, rt_sigtimedwait, const sigset_t *, these, siginfo_t *, uinfo,
767               const struct timespec *, uts, size_t, sigsetsize)
768 safe_syscall4(int, accept4, int, fd, struct sockaddr *, addr, socklen_t *, len,
769               int, flags)
770 safe_syscall2(int, nanosleep, const struct timespec *, req,
771               struct timespec *, rem)
772 #ifdef TARGET_NR_clock_nanosleep
773 safe_syscall4(int, clock_nanosleep, const clockid_t, clock, int, flags,
774               const struct timespec *, req, struct timespec *, rem)
775 #endif
776 #ifdef __NR_ipc
777 safe_syscall6(int, ipc, int, call, long, first, long, second, long, third,
778               void *, ptr, long, fifth)
779 #endif
780 #ifdef __NR_msgsnd
781 safe_syscall4(int, msgsnd, int, msgid, const void *, msgp, size_t, sz,
782               int, flags)
783 #endif
784 #ifdef __NR_msgrcv
785 safe_syscall5(int, msgrcv, int, msgid, void *, msgp, size_t, sz,
786               long, msgtype, int, flags)
787 #endif
788 #ifdef __NR_semtimedop
789 safe_syscall4(int, semtimedop, int, semid, struct sembuf *, tsops,
790               unsigned, nsops, const struct timespec *, timeout)
791 #endif
792 #if defined(TARGET_NR_mq_open) && defined(__NR_mq_open)
793 safe_syscall5(int, mq_timedsend, int, mqdes, const char *, msg_ptr,
794               size_t, len, unsigned, prio, const struct timespec *, timeout)
795 safe_syscall5(int, mq_timedreceive, int, mqdes, char *, msg_ptr,
796               size_t, len, unsigned *, prio, const struct timespec *, timeout)
797 #endif
798 /* We do ioctl like this rather than via safe_syscall3 to preserve the
799  * "third argument might be integer or pointer or not present" behaviour of
800  * the libc function.
801  */
802 #define safe_ioctl(...) safe_syscall(__NR_ioctl, __VA_ARGS__)
803 /* Similarly for fcntl. Note that callers must always:
804  *  pass the F_GETLK64 etc constants rather than the unsuffixed F_GETLK
805  *  use the flock64 struct rather than unsuffixed flock
806  * This will then work and use a 64-bit offset for both 32-bit and 64-bit hosts.
807  */
808 #ifdef __NR_fcntl64
809 #define safe_fcntl(...) safe_syscall(__NR_fcntl64, __VA_ARGS__)
810 #else
811 #define safe_fcntl(...) safe_syscall(__NR_fcntl, __VA_ARGS__)
812 #endif
813 
814 static inline int host_to_target_sock_type(int host_type)
815 {
816     int target_type;
817 
818     switch (host_type & 0xf /* SOCK_TYPE_MASK */) {
819     case SOCK_DGRAM:
820         target_type = TARGET_SOCK_DGRAM;
821         break;
822     case SOCK_STREAM:
823         target_type = TARGET_SOCK_STREAM;
824         break;
825     default:
826         target_type = host_type & 0xf /* SOCK_TYPE_MASK */;
827         break;
828     }
829 
830 #if defined(SOCK_CLOEXEC)
831     if (host_type & SOCK_CLOEXEC) {
832         target_type |= TARGET_SOCK_CLOEXEC;
833     }
834 #endif
835 
836 #if defined(SOCK_NONBLOCK)
837     if (host_type & SOCK_NONBLOCK) {
838         target_type |= TARGET_SOCK_NONBLOCK;
839     }
840 #endif
841 
842     return target_type;
843 }
844 
845 static abi_ulong target_brk;
846 static abi_ulong target_original_brk;
847 static abi_ulong brk_page;
848 
849 void target_set_brk(abi_ulong new_brk)
850 {
851     target_original_brk = target_brk = HOST_PAGE_ALIGN(new_brk);
852     brk_page = HOST_PAGE_ALIGN(target_brk);
853 }
854 
855 //#define DEBUGF_BRK(message, args...) do { fprintf(stderr, (message), ## args); } while (0)
856 #define DEBUGF_BRK(message, args...)
857 
858 /* do_brk() must return target values and target errnos. */
859 abi_long do_brk(abi_ulong new_brk)
860 {
861     abi_long mapped_addr;
862     abi_ulong new_alloc_size;
863 
864     DEBUGF_BRK("do_brk(" TARGET_ABI_FMT_lx ") -> ", new_brk);
865 
866     if (!new_brk) {
867         DEBUGF_BRK(TARGET_ABI_FMT_lx " (!new_brk)\n", target_brk);
868         return target_brk;
869     }
870     if (new_brk < target_original_brk) {
871         DEBUGF_BRK(TARGET_ABI_FMT_lx " (new_brk < target_original_brk)\n",
872                    target_brk);
873         return target_brk;
874     }
875 
876     /* If the new brk is less than the highest page reserved to the
877      * target heap allocation, set it and we're almost done...  */
878     if (new_brk <= brk_page) {
879         /* Heap contents are initialized to zero, as for anonymous
880          * mapped pages.  */
881         if (new_brk > target_brk) {
882             memset(g2h(target_brk), 0, new_brk - target_brk);
883         }
884 	target_brk = new_brk;
885         DEBUGF_BRK(TARGET_ABI_FMT_lx " (new_brk <= brk_page)\n", target_brk);
886 	return target_brk;
887     }
888 
889     /* We need to allocate more memory after the brk... Note that
890      * we don't use MAP_FIXED because that will map over the top of
891      * any existing mapping (like the one with the host libc or qemu
892      * itself); instead we treat "mapped but at wrong address" as
893      * a failure and unmap again.
894      */
895     new_alloc_size = HOST_PAGE_ALIGN(new_brk - brk_page);
896     mapped_addr = get_errno(target_mmap(brk_page, new_alloc_size,
897                                         PROT_READ|PROT_WRITE,
898                                         MAP_ANON|MAP_PRIVATE, 0, 0));
899 
900     if (mapped_addr == brk_page) {
901         /* Heap contents are initialized to zero, as for anonymous
902          * mapped pages.  Technically the new pages are already
903          * initialized to zero since they *are* anonymous mapped
904          * pages, however we have to take care with the contents that
905          * come from the remaining part of the previous page: it may
906          * contains garbage data due to a previous heap usage (grown
907          * then shrunken).  */
908         memset(g2h(target_brk), 0, brk_page - target_brk);
909 
910         target_brk = new_brk;
911         brk_page = HOST_PAGE_ALIGN(target_brk);
912         DEBUGF_BRK(TARGET_ABI_FMT_lx " (mapped_addr == brk_page)\n",
913             target_brk);
914         return target_brk;
915     } else if (mapped_addr != -1) {
916         /* Mapped but at wrong address, meaning there wasn't actually
917          * enough space for this brk.
918          */
919         target_munmap(mapped_addr, new_alloc_size);
920         mapped_addr = -1;
921         DEBUGF_BRK(TARGET_ABI_FMT_lx " (mapped_addr != -1)\n", target_brk);
922     }
923     else {
924         DEBUGF_BRK(TARGET_ABI_FMT_lx " (otherwise)\n", target_brk);
925     }
926 
927 #if defined(TARGET_ALPHA)
928     /* We (partially) emulate OSF/1 on Alpha, which requires we
929        return a proper errno, not an unchanged brk value.  */
930     return -TARGET_ENOMEM;
931 #endif
932     /* For everything else, return the previous break. */
933     return target_brk;
934 }
935 
936 static inline abi_long copy_from_user_fdset(fd_set *fds,
937                                             abi_ulong target_fds_addr,
938                                             int n)
939 {
940     int i, nw, j, k;
941     abi_ulong b, *target_fds;
942 
943     nw = DIV_ROUND_UP(n, TARGET_ABI_BITS);
944     if (!(target_fds = lock_user(VERIFY_READ,
945                                  target_fds_addr,
946                                  sizeof(abi_ulong) * nw,
947                                  1)))
948         return -TARGET_EFAULT;
949 
950     FD_ZERO(fds);
951     k = 0;
952     for (i = 0; i < nw; i++) {
953         /* grab the abi_ulong */
954         __get_user(b, &target_fds[i]);
955         for (j = 0; j < TARGET_ABI_BITS; j++) {
956             /* check the bit inside the abi_ulong */
957             if ((b >> j) & 1)
958                 FD_SET(k, fds);
959             k++;
960         }
961     }
962 
963     unlock_user(target_fds, target_fds_addr, 0);
964 
965     return 0;
966 }
967 
968 static inline abi_ulong copy_from_user_fdset_ptr(fd_set *fds, fd_set **fds_ptr,
969                                                  abi_ulong target_fds_addr,
970                                                  int n)
971 {
972     if (target_fds_addr) {
973         if (copy_from_user_fdset(fds, target_fds_addr, n))
974             return -TARGET_EFAULT;
975         *fds_ptr = fds;
976     } else {
977         *fds_ptr = NULL;
978     }
979     return 0;
980 }
981 
982 static inline abi_long copy_to_user_fdset(abi_ulong target_fds_addr,
983                                           const fd_set *fds,
984                                           int n)
985 {
986     int i, nw, j, k;
987     abi_long v;
988     abi_ulong *target_fds;
989 
990     nw = DIV_ROUND_UP(n, TARGET_ABI_BITS);
991     if (!(target_fds = lock_user(VERIFY_WRITE,
992                                  target_fds_addr,
993                                  sizeof(abi_ulong) * nw,
994                                  0)))
995         return -TARGET_EFAULT;
996 
997     k = 0;
998     for (i = 0; i < nw; i++) {
999         v = 0;
1000         for (j = 0; j < TARGET_ABI_BITS; j++) {
1001             v |= ((abi_ulong)(FD_ISSET(k, fds) != 0) << j);
1002             k++;
1003         }
1004         __put_user(v, &target_fds[i]);
1005     }
1006 
1007     unlock_user(target_fds, target_fds_addr, sizeof(abi_ulong) * nw);
1008 
1009     return 0;
1010 }
1011 
1012 #if defined(__alpha__)
1013 #define HOST_HZ 1024
1014 #else
1015 #define HOST_HZ 100
1016 #endif
1017 
1018 static inline abi_long host_to_target_clock_t(long ticks)
1019 {
1020 #if HOST_HZ == TARGET_HZ
1021     return ticks;
1022 #else
1023     return ((int64_t)ticks * TARGET_HZ) / HOST_HZ;
1024 #endif
1025 }
1026 
1027 static inline abi_long host_to_target_rusage(abi_ulong target_addr,
1028                                              const struct rusage *rusage)
1029 {
1030     struct target_rusage *target_rusage;
1031 
1032     if (!lock_user_struct(VERIFY_WRITE, target_rusage, target_addr, 0))
1033         return -TARGET_EFAULT;
1034     target_rusage->ru_utime.tv_sec = tswapal(rusage->ru_utime.tv_sec);
1035     target_rusage->ru_utime.tv_usec = tswapal(rusage->ru_utime.tv_usec);
1036     target_rusage->ru_stime.tv_sec = tswapal(rusage->ru_stime.tv_sec);
1037     target_rusage->ru_stime.tv_usec = tswapal(rusage->ru_stime.tv_usec);
1038     target_rusage->ru_maxrss = tswapal(rusage->ru_maxrss);
1039     target_rusage->ru_ixrss = tswapal(rusage->ru_ixrss);
1040     target_rusage->ru_idrss = tswapal(rusage->ru_idrss);
1041     target_rusage->ru_isrss = tswapal(rusage->ru_isrss);
1042     target_rusage->ru_minflt = tswapal(rusage->ru_minflt);
1043     target_rusage->ru_majflt = tswapal(rusage->ru_majflt);
1044     target_rusage->ru_nswap = tswapal(rusage->ru_nswap);
1045     target_rusage->ru_inblock = tswapal(rusage->ru_inblock);
1046     target_rusage->ru_oublock = tswapal(rusage->ru_oublock);
1047     target_rusage->ru_msgsnd = tswapal(rusage->ru_msgsnd);
1048     target_rusage->ru_msgrcv = tswapal(rusage->ru_msgrcv);
1049     target_rusage->ru_nsignals = tswapal(rusage->ru_nsignals);
1050     target_rusage->ru_nvcsw = tswapal(rusage->ru_nvcsw);
1051     target_rusage->ru_nivcsw = tswapal(rusage->ru_nivcsw);
1052     unlock_user_struct(target_rusage, target_addr, 1);
1053 
1054     return 0;
1055 }
1056 
1057 static inline rlim_t target_to_host_rlim(abi_ulong target_rlim)
1058 {
1059     abi_ulong target_rlim_swap;
1060     rlim_t result;
1061 
1062     target_rlim_swap = tswapal(target_rlim);
1063     if (target_rlim_swap == TARGET_RLIM_INFINITY)
1064         return RLIM_INFINITY;
1065 
1066     result = target_rlim_swap;
1067     if (target_rlim_swap != (rlim_t)result)
1068         return RLIM_INFINITY;
1069 
1070     return result;
1071 }
1072 
1073 static inline abi_ulong host_to_target_rlim(rlim_t rlim)
1074 {
1075     abi_ulong target_rlim_swap;
1076     abi_ulong result;
1077 
1078     if (rlim == RLIM_INFINITY || rlim != (abi_long)rlim)
1079         target_rlim_swap = TARGET_RLIM_INFINITY;
1080     else
1081         target_rlim_swap = rlim;
1082     result = tswapal(target_rlim_swap);
1083 
1084     return result;
1085 }
1086 
1087 static inline int target_to_host_resource(int code)
1088 {
1089     switch (code) {
1090     case TARGET_RLIMIT_AS:
1091         return RLIMIT_AS;
1092     case TARGET_RLIMIT_CORE:
1093         return RLIMIT_CORE;
1094     case TARGET_RLIMIT_CPU:
1095         return RLIMIT_CPU;
1096     case TARGET_RLIMIT_DATA:
1097         return RLIMIT_DATA;
1098     case TARGET_RLIMIT_FSIZE:
1099         return RLIMIT_FSIZE;
1100     case TARGET_RLIMIT_LOCKS:
1101         return RLIMIT_LOCKS;
1102     case TARGET_RLIMIT_MEMLOCK:
1103         return RLIMIT_MEMLOCK;
1104     case TARGET_RLIMIT_MSGQUEUE:
1105         return RLIMIT_MSGQUEUE;
1106     case TARGET_RLIMIT_NICE:
1107         return RLIMIT_NICE;
1108     case TARGET_RLIMIT_NOFILE:
1109         return RLIMIT_NOFILE;
1110     case TARGET_RLIMIT_NPROC:
1111         return RLIMIT_NPROC;
1112     case TARGET_RLIMIT_RSS:
1113         return RLIMIT_RSS;
1114     case TARGET_RLIMIT_RTPRIO:
1115         return RLIMIT_RTPRIO;
1116     case TARGET_RLIMIT_SIGPENDING:
1117         return RLIMIT_SIGPENDING;
1118     case TARGET_RLIMIT_STACK:
1119         return RLIMIT_STACK;
1120     default:
1121         return code;
1122     }
1123 }
1124 
1125 static inline abi_long copy_from_user_timeval(struct timeval *tv,
1126                                               abi_ulong target_tv_addr)
1127 {
1128     struct target_timeval *target_tv;
1129 
1130     if (!lock_user_struct(VERIFY_READ, target_tv, target_tv_addr, 1)) {
1131         return -TARGET_EFAULT;
1132     }
1133 
1134     __get_user(tv->tv_sec, &target_tv->tv_sec);
1135     __get_user(tv->tv_usec, &target_tv->tv_usec);
1136 
1137     unlock_user_struct(target_tv, target_tv_addr, 0);
1138 
1139     return 0;
1140 }
1141 
1142 static inline abi_long copy_to_user_timeval(abi_ulong target_tv_addr,
1143                                             const struct timeval *tv)
1144 {
1145     struct target_timeval *target_tv;
1146 
1147     if (!lock_user_struct(VERIFY_WRITE, target_tv, target_tv_addr, 0)) {
1148         return -TARGET_EFAULT;
1149     }
1150 
1151     __put_user(tv->tv_sec, &target_tv->tv_sec);
1152     __put_user(tv->tv_usec, &target_tv->tv_usec);
1153 
1154     unlock_user_struct(target_tv, target_tv_addr, 1);
1155 
1156     return 0;
1157 }
1158 
1159 static inline abi_long copy_to_user_timeval64(abi_ulong target_tv_addr,
1160                                              const struct timeval *tv)
1161 {
1162     struct target__kernel_sock_timeval *target_tv;
1163 
1164     if (!lock_user_struct(VERIFY_WRITE, target_tv, target_tv_addr, 0)) {
1165         return -TARGET_EFAULT;
1166     }
1167 
1168     __put_user(tv->tv_sec, &target_tv->tv_sec);
1169     __put_user(tv->tv_usec, &target_tv->tv_usec);
1170 
1171     unlock_user_struct(target_tv, target_tv_addr, 1);
1172 
1173     return 0;
1174 }
1175 
1176 static inline abi_long target_to_host_timespec(struct timespec *host_ts,
1177                                                abi_ulong target_addr)
1178 {
1179     struct target_timespec *target_ts;
1180 
1181     if (!lock_user_struct(VERIFY_READ, target_ts, target_addr, 1)) {
1182         return -TARGET_EFAULT;
1183     }
1184     __get_user(host_ts->tv_sec, &target_ts->tv_sec);
1185     __get_user(host_ts->tv_nsec, &target_ts->tv_nsec);
1186     unlock_user_struct(target_ts, target_addr, 0);
1187     return 0;
1188 }
1189 
1190 static inline abi_long host_to_target_timespec(abi_ulong target_addr,
1191                                                struct timespec *host_ts)
1192 {
1193     struct target_timespec *target_ts;
1194 
1195     if (!lock_user_struct(VERIFY_WRITE, target_ts, target_addr, 0)) {
1196         return -TARGET_EFAULT;
1197     }
1198     __put_user(host_ts->tv_sec, &target_ts->tv_sec);
1199     __put_user(host_ts->tv_nsec, &target_ts->tv_nsec);
1200     unlock_user_struct(target_ts, target_addr, 1);
1201     return 0;
1202 }
1203 
1204 static inline abi_long host_to_target_timespec64(abi_ulong target_addr,
1205                                                  struct timespec *host_ts)
1206 {
1207     struct target__kernel_timespec *target_ts;
1208 
1209     if (!lock_user_struct(VERIFY_WRITE, target_ts, target_addr, 0)) {
1210         return -TARGET_EFAULT;
1211     }
1212     __put_user(host_ts->tv_sec, &target_ts->tv_sec);
1213     __put_user(host_ts->tv_nsec, &target_ts->tv_nsec);
1214     unlock_user_struct(target_ts, target_addr, 1);
1215     return 0;
1216 }
1217 
1218 static inline abi_long copy_from_user_timezone(struct timezone *tz,
1219                                                abi_ulong target_tz_addr)
1220 {
1221     struct target_timezone *target_tz;
1222 
1223     if (!lock_user_struct(VERIFY_READ, target_tz, target_tz_addr, 1)) {
1224         return -TARGET_EFAULT;
1225     }
1226 
1227     __get_user(tz->tz_minuteswest, &target_tz->tz_minuteswest);
1228     __get_user(tz->tz_dsttime, &target_tz->tz_dsttime);
1229 
1230     unlock_user_struct(target_tz, target_tz_addr, 0);
1231 
1232     return 0;
1233 }
1234 
1235 #if defined(TARGET_NR_mq_open) && defined(__NR_mq_open)
1236 #include <mqueue.h>
1237 
1238 static inline abi_long copy_from_user_mq_attr(struct mq_attr *attr,
1239                                               abi_ulong target_mq_attr_addr)
1240 {
1241     struct target_mq_attr *target_mq_attr;
1242 
1243     if (!lock_user_struct(VERIFY_READ, target_mq_attr,
1244                           target_mq_attr_addr, 1))
1245         return -TARGET_EFAULT;
1246 
1247     __get_user(attr->mq_flags, &target_mq_attr->mq_flags);
1248     __get_user(attr->mq_maxmsg, &target_mq_attr->mq_maxmsg);
1249     __get_user(attr->mq_msgsize, &target_mq_attr->mq_msgsize);
1250     __get_user(attr->mq_curmsgs, &target_mq_attr->mq_curmsgs);
1251 
1252     unlock_user_struct(target_mq_attr, target_mq_attr_addr, 0);
1253 
1254     return 0;
1255 }
1256 
1257 static inline abi_long copy_to_user_mq_attr(abi_ulong target_mq_attr_addr,
1258                                             const struct mq_attr *attr)
1259 {
1260     struct target_mq_attr *target_mq_attr;
1261 
1262     if (!lock_user_struct(VERIFY_WRITE, target_mq_attr,
1263                           target_mq_attr_addr, 0))
1264         return -TARGET_EFAULT;
1265 
1266     __put_user(attr->mq_flags, &target_mq_attr->mq_flags);
1267     __put_user(attr->mq_maxmsg, &target_mq_attr->mq_maxmsg);
1268     __put_user(attr->mq_msgsize, &target_mq_attr->mq_msgsize);
1269     __put_user(attr->mq_curmsgs, &target_mq_attr->mq_curmsgs);
1270 
1271     unlock_user_struct(target_mq_attr, target_mq_attr_addr, 1);
1272 
1273     return 0;
1274 }
1275 #endif
1276 
1277 #if defined(TARGET_NR_select) || defined(TARGET_NR__newselect)
1278 /* do_select() must return target values and target errnos. */
1279 static abi_long do_select(int n,
1280                           abi_ulong rfd_addr, abi_ulong wfd_addr,
1281                           abi_ulong efd_addr, abi_ulong target_tv_addr)
1282 {
1283     fd_set rfds, wfds, efds;
1284     fd_set *rfds_ptr, *wfds_ptr, *efds_ptr;
1285     struct timeval tv;
1286     struct timespec ts, *ts_ptr;
1287     abi_long ret;
1288 
1289     ret = copy_from_user_fdset_ptr(&rfds, &rfds_ptr, rfd_addr, n);
1290     if (ret) {
1291         return ret;
1292     }
1293     ret = copy_from_user_fdset_ptr(&wfds, &wfds_ptr, wfd_addr, n);
1294     if (ret) {
1295         return ret;
1296     }
1297     ret = copy_from_user_fdset_ptr(&efds, &efds_ptr, efd_addr, n);
1298     if (ret) {
1299         return ret;
1300     }
1301 
1302     if (target_tv_addr) {
1303         if (copy_from_user_timeval(&tv, target_tv_addr))
1304             return -TARGET_EFAULT;
1305         ts.tv_sec = tv.tv_sec;
1306         ts.tv_nsec = tv.tv_usec * 1000;
1307         ts_ptr = &ts;
1308     } else {
1309         ts_ptr = NULL;
1310     }
1311 
1312     ret = get_errno(safe_pselect6(n, rfds_ptr, wfds_ptr, efds_ptr,
1313                                   ts_ptr, NULL));
1314 
1315     if (!is_error(ret)) {
1316         if (rfd_addr && copy_to_user_fdset(rfd_addr, &rfds, n))
1317             return -TARGET_EFAULT;
1318         if (wfd_addr && copy_to_user_fdset(wfd_addr, &wfds, n))
1319             return -TARGET_EFAULT;
1320         if (efd_addr && copy_to_user_fdset(efd_addr, &efds, n))
1321             return -TARGET_EFAULT;
1322 
1323         if (target_tv_addr) {
1324             tv.tv_sec = ts.tv_sec;
1325             tv.tv_usec = ts.tv_nsec / 1000;
1326             if (copy_to_user_timeval(target_tv_addr, &tv)) {
1327                 return -TARGET_EFAULT;
1328             }
1329         }
1330     }
1331 
1332     return ret;
1333 }
1334 
1335 #if defined(TARGET_WANT_OLD_SYS_SELECT)
1336 static abi_long do_old_select(abi_ulong arg1)
1337 {
1338     struct target_sel_arg_struct *sel;
1339     abi_ulong inp, outp, exp, tvp;
1340     long nsel;
1341 
1342     if (!lock_user_struct(VERIFY_READ, sel, arg1, 1)) {
1343         return -TARGET_EFAULT;
1344     }
1345 
1346     nsel = tswapal(sel->n);
1347     inp = tswapal(sel->inp);
1348     outp = tswapal(sel->outp);
1349     exp = tswapal(sel->exp);
1350     tvp = tswapal(sel->tvp);
1351 
1352     unlock_user_struct(sel, arg1, 0);
1353 
1354     return do_select(nsel, inp, outp, exp, tvp);
1355 }
1356 #endif
1357 #endif
1358 
1359 static abi_long do_pipe2(int host_pipe[], int flags)
1360 {
1361 #ifdef CONFIG_PIPE2
1362     return pipe2(host_pipe, flags);
1363 #else
1364     return -ENOSYS;
1365 #endif
1366 }
1367 
1368 static abi_long do_pipe(void *cpu_env, abi_ulong pipedes,
1369                         int flags, int is_pipe2)
1370 {
1371     int host_pipe[2];
1372     abi_long ret;
1373     ret = flags ? do_pipe2(host_pipe, flags) : pipe(host_pipe);
1374 
1375     if (is_error(ret))
1376         return get_errno(ret);
1377 
1378     /* Several targets have special calling conventions for the original
1379        pipe syscall, but didn't replicate this into the pipe2 syscall.  */
1380     if (!is_pipe2) {
1381 #if defined(TARGET_ALPHA)
1382         ((CPUAlphaState *)cpu_env)->ir[IR_A4] = host_pipe[1];
1383         return host_pipe[0];
1384 #elif defined(TARGET_MIPS)
1385         ((CPUMIPSState*)cpu_env)->active_tc.gpr[3] = host_pipe[1];
1386         return host_pipe[0];
1387 #elif defined(TARGET_SH4)
1388         ((CPUSH4State*)cpu_env)->gregs[1] = host_pipe[1];
1389         return host_pipe[0];
1390 #elif defined(TARGET_SPARC)
1391         ((CPUSPARCState*)cpu_env)->regwptr[1] = host_pipe[1];
1392         return host_pipe[0];
1393 #endif
1394     }
1395 
1396     if (put_user_s32(host_pipe[0], pipedes)
1397         || put_user_s32(host_pipe[1], pipedes + sizeof(host_pipe[0])))
1398         return -TARGET_EFAULT;
1399     return get_errno(ret);
1400 }
1401 
1402 static inline abi_long target_to_host_ip_mreq(struct ip_mreqn *mreqn,
1403                                               abi_ulong target_addr,
1404                                               socklen_t len)
1405 {
1406     struct target_ip_mreqn *target_smreqn;
1407 
1408     target_smreqn = lock_user(VERIFY_READ, target_addr, len, 1);
1409     if (!target_smreqn)
1410         return -TARGET_EFAULT;
1411     mreqn->imr_multiaddr.s_addr = target_smreqn->imr_multiaddr.s_addr;
1412     mreqn->imr_address.s_addr = target_smreqn->imr_address.s_addr;
1413     if (len == sizeof(struct target_ip_mreqn))
1414         mreqn->imr_ifindex = tswapal(target_smreqn->imr_ifindex);
1415     unlock_user(target_smreqn, target_addr, 0);
1416 
1417     return 0;
1418 }
1419 
1420 static inline abi_long target_to_host_sockaddr(int fd, struct sockaddr *addr,
1421                                                abi_ulong target_addr,
1422                                                socklen_t len)
1423 {
1424     const socklen_t unix_maxlen = sizeof (struct sockaddr_un);
1425     sa_family_t sa_family;
1426     struct target_sockaddr *target_saddr;
1427 
1428     if (fd_trans_target_to_host_addr(fd)) {
1429         return fd_trans_target_to_host_addr(fd)(addr, target_addr, len);
1430     }
1431 
1432     target_saddr = lock_user(VERIFY_READ, target_addr, len, 1);
1433     if (!target_saddr)
1434         return -TARGET_EFAULT;
1435 
1436     sa_family = tswap16(target_saddr->sa_family);
1437 
1438     /* Oops. The caller might send a incomplete sun_path; sun_path
1439      * must be terminated by \0 (see the manual page), but
1440      * unfortunately it is quite common to specify sockaddr_un
1441      * length as "strlen(x->sun_path)" while it should be
1442      * "strlen(...) + 1". We'll fix that here if needed.
1443      * Linux kernel has a similar feature.
1444      */
1445 
1446     if (sa_family == AF_UNIX) {
1447         if (len < unix_maxlen && len > 0) {
1448             char *cp = (char*)target_saddr;
1449 
1450             if ( cp[len-1] && !cp[len] )
1451                 len++;
1452         }
1453         if (len > unix_maxlen)
1454             len = unix_maxlen;
1455     }
1456 
1457     memcpy(addr, target_saddr, len);
1458     addr->sa_family = sa_family;
1459     if (sa_family == AF_NETLINK) {
1460         struct sockaddr_nl *nladdr;
1461 
1462         nladdr = (struct sockaddr_nl *)addr;
1463         nladdr->nl_pid = tswap32(nladdr->nl_pid);
1464         nladdr->nl_groups = tswap32(nladdr->nl_groups);
1465     } else if (sa_family == AF_PACKET) {
1466 	struct target_sockaddr_ll *lladdr;
1467 
1468 	lladdr = (struct target_sockaddr_ll *)addr;
1469 	lladdr->sll_ifindex = tswap32(lladdr->sll_ifindex);
1470 	lladdr->sll_hatype = tswap16(lladdr->sll_hatype);
1471     }
1472     unlock_user(target_saddr, target_addr, 0);
1473 
1474     return 0;
1475 }
1476 
1477 static inline abi_long host_to_target_sockaddr(abi_ulong target_addr,
1478                                                struct sockaddr *addr,
1479                                                socklen_t len)
1480 {
1481     struct target_sockaddr *target_saddr;
1482 
1483     if (len == 0) {
1484         return 0;
1485     }
1486     assert(addr);
1487 
1488     target_saddr = lock_user(VERIFY_WRITE, target_addr, len, 0);
1489     if (!target_saddr)
1490         return -TARGET_EFAULT;
1491     memcpy(target_saddr, addr, len);
1492     if (len >= offsetof(struct target_sockaddr, sa_family) +
1493         sizeof(target_saddr->sa_family)) {
1494         target_saddr->sa_family = tswap16(addr->sa_family);
1495     }
1496     if (addr->sa_family == AF_NETLINK && len >= sizeof(struct sockaddr_nl)) {
1497         struct sockaddr_nl *target_nl = (struct sockaddr_nl *)target_saddr;
1498         target_nl->nl_pid = tswap32(target_nl->nl_pid);
1499         target_nl->nl_groups = tswap32(target_nl->nl_groups);
1500     } else if (addr->sa_family == AF_PACKET) {
1501         struct sockaddr_ll *target_ll = (struct sockaddr_ll *)target_saddr;
1502         target_ll->sll_ifindex = tswap32(target_ll->sll_ifindex);
1503         target_ll->sll_hatype = tswap16(target_ll->sll_hatype);
1504     } else if (addr->sa_family == AF_INET6 &&
1505                len >= sizeof(struct target_sockaddr_in6)) {
1506         struct target_sockaddr_in6 *target_in6 =
1507                (struct target_sockaddr_in6 *)target_saddr;
1508         target_in6->sin6_scope_id = tswap16(target_in6->sin6_scope_id);
1509     }
1510     unlock_user(target_saddr, target_addr, len);
1511 
1512     return 0;
1513 }
1514 
1515 static inline abi_long target_to_host_cmsg(struct msghdr *msgh,
1516                                            struct target_msghdr *target_msgh)
1517 {
1518     struct cmsghdr *cmsg = CMSG_FIRSTHDR(msgh);
1519     abi_long msg_controllen;
1520     abi_ulong target_cmsg_addr;
1521     struct target_cmsghdr *target_cmsg, *target_cmsg_start;
1522     socklen_t space = 0;
1523 
1524     msg_controllen = tswapal(target_msgh->msg_controllen);
1525     if (msg_controllen < sizeof (struct target_cmsghdr))
1526         goto the_end;
1527     target_cmsg_addr = tswapal(target_msgh->msg_control);
1528     target_cmsg = lock_user(VERIFY_READ, target_cmsg_addr, msg_controllen, 1);
1529     target_cmsg_start = target_cmsg;
1530     if (!target_cmsg)
1531         return -TARGET_EFAULT;
1532 
1533     while (cmsg && target_cmsg) {
1534         void *data = CMSG_DATA(cmsg);
1535         void *target_data = TARGET_CMSG_DATA(target_cmsg);
1536 
1537         int len = tswapal(target_cmsg->cmsg_len)
1538             - sizeof(struct target_cmsghdr);
1539 
1540         space += CMSG_SPACE(len);
1541         if (space > msgh->msg_controllen) {
1542             space -= CMSG_SPACE(len);
1543             /* This is a QEMU bug, since we allocated the payload
1544              * area ourselves (unlike overflow in host-to-target
1545              * conversion, which is just the guest giving us a buffer
1546              * that's too small). It can't happen for the payload types
1547              * we currently support; if it becomes an issue in future
1548              * we would need to improve our allocation strategy to
1549              * something more intelligent than "twice the size of the
1550              * target buffer we're reading from".
1551              */
1552             gemu_log("Host cmsg overflow\n");
1553             break;
1554         }
1555 
1556         if (tswap32(target_cmsg->cmsg_level) == TARGET_SOL_SOCKET) {
1557             cmsg->cmsg_level = SOL_SOCKET;
1558         } else {
1559             cmsg->cmsg_level = tswap32(target_cmsg->cmsg_level);
1560         }
1561         cmsg->cmsg_type = tswap32(target_cmsg->cmsg_type);
1562         cmsg->cmsg_len = CMSG_LEN(len);
1563 
1564         if (cmsg->cmsg_level == SOL_SOCKET && cmsg->cmsg_type == SCM_RIGHTS) {
1565             int *fd = (int *)data;
1566             int *target_fd = (int *)target_data;
1567             int i, numfds = len / sizeof(int);
1568 
1569             for (i = 0; i < numfds; i++) {
1570                 __get_user(fd[i], target_fd + i);
1571             }
1572         } else if (cmsg->cmsg_level == SOL_SOCKET
1573                &&  cmsg->cmsg_type == SCM_CREDENTIALS) {
1574             struct ucred *cred = (struct ucred *)data;
1575             struct target_ucred *target_cred =
1576                 (struct target_ucred *)target_data;
1577 
1578             __get_user(cred->pid, &target_cred->pid);
1579             __get_user(cred->uid, &target_cred->uid);
1580             __get_user(cred->gid, &target_cred->gid);
1581         } else {
1582             gemu_log("Unsupported ancillary data: %d/%d\n",
1583                                         cmsg->cmsg_level, cmsg->cmsg_type);
1584             memcpy(data, target_data, len);
1585         }
1586 
1587         cmsg = CMSG_NXTHDR(msgh, cmsg);
1588         target_cmsg = TARGET_CMSG_NXTHDR(target_msgh, target_cmsg,
1589                                          target_cmsg_start);
1590     }
1591     unlock_user(target_cmsg, target_cmsg_addr, 0);
1592  the_end:
1593     msgh->msg_controllen = space;
1594     return 0;
1595 }
1596 
1597 static inline abi_long host_to_target_cmsg(struct target_msghdr *target_msgh,
1598                                            struct msghdr *msgh)
1599 {
1600     struct cmsghdr *cmsg = CMSG_FIRSTHDR(msgh);
1601     abi_long msg_controllen;
1602     abi_ulong target_cmsg_addr;
1603     struct target_cmsghdr *target_cmsg, *target_cmsg_start;
1604     socklen_t space = 0;
1605 
1606     msg_controllen = tswapal(target_msgh->msg_controllen);
1607     if (msg_controllen < sizeof (struct target_cmsghdr))
1608         goto the_end;
1609     target_cmsg_addr = tswapal(target_msgh->msg_control);
1610     target_cmsg = lock_user(VERIFY_WRITE, target_cmsg_addr, msg_controllen, 0);
1611     target_cmsg_start = target_cmsg;
1612     if (!target_cmsg)
1613         return -TARGET_EFAULT;
1614 
1615     while (cmsg && target_cmsg) {
1616         void *data = CMSG_DATA(cmsg);
1617         void *target_data = TARGET_CMSG_DATA(target_cmsg);
1618 
1619         int len = cmsg->cmsg_len - sizeof(struct cmsghdr);
1620         int tgt_len, tgt_space;
1621 
1622         /* We never copy a half-header but may copy half-data;
1623          * this is Linux's behaviour in put_cmsg(). Note that
1624          * truncation here is a guest problem (which we report
1625          * to the guest via the CTRUNC bit), unlike truncation
1626          * in target_to_host_cmsg, which is a QEMU bug.
1627          */
1628         if (msg_controllen < sizeof(struct target_cmsghdr)) {
1629             target_msgh->msg_flags |= tswap32(MSG_CTRUNC);
1630             break;
1631         }
1632 
1633         if (cmsg->cmsg_level == SOL_SOCKET) {
1634             target_cmsg->cmsg_level = tswap32(TARGET_SOL_SOCKET);
1635         } else {
1636             target_cmsg->cmsg_level = tswap32(cmsg->cmsg_level);
1637         }
1638         target_cmsg->cmsg_type = tswap32(cmsg->cmsg_type);
1639 
1640         /* Payload types which need a different size of payload on
1641          * the target must adjust tgt_len here.
1642          */
1643         tgt_len = len;
1644         switch (cmsg->cmsg_level) {
1645         case SOL_SOCKET:
1646             switch (cmsg->cmsg_type) {
1647             case SO_TIMESTAMP:
1648                 tgt_len = sizeof(struct target_timeval);
1649                 break;
1650             default:
1651                 break;
1652             }
1653             break;
1654         default:
1655             break;
1656         }
1657 
1658         if (msg_controllen < TARGET_CMSG_LEN(tgt_len)) {
1659             target_msgh->msg_flags |= tswap32(MSG_CTRUNC);
1660             tgt_len = msg_controllen - sizeof(struct target_cmsghdr);
1661         }
1662 
1663         /* We must now copy-and-convert len bytes of payload
1664          * into tgt_len bytes of destination space. Bear in mind
1665          * that in both source and destination we may be dealing
1666          * with a truncated value!
1667          */
1668         switch (cmsg->cmsg_level) {
1669         case SOL_SOCKET:
1670             switch (cmsg->cmsg_type) {
1671             case SCM_RIGHTS:
1672             {
1673                 int *fd = (int *)data;
1674                 int *target_fd = (int *)target_data;
1675                 int i, numfds = tgt_len / sizeof(int);
1676 
1677                 for (i = 0; i < numfds; i++) {
1678                     __put_user(fd[i], target_fd + i);
1679                 }
1680                 break;
1681             }
1682             case SO_TIMESTAMP:
1683             {
1684                 struct timeval *tv = (struct timeval *)data;
1685                 struct target_timeval *target_tv =
1686                     (struct target_timeval *)target_data;
1687 
1688                 if (len != sizeof(struct timeval) ||
1689                     tgt_len != sizeof(struct target_timeval)) {
1690                     goto unimplemented;
1691                 }
1692 
1693                 /* copy struct timeval to target */
1694                 __put_user(tv->tv_sec, &target_tv->tv_sec);
1695                 __put_user(tv->tv_usec, &target_tv->tv_usec);
1696                 break;
1697             }
1698             case SCM_CREDENTIALS:
1699             {
1700                 struct ucred *cred = (struct ucred *)data;
1701                 struct target_ucred *target_cred =
1702                     (struct target_ucred *)target_data;
1703 
1704                 __put_user(cred->pid, &target_cred->pid);
1705                 __put_user(cred->uid, &target_cred->uid);
1706                 __put_user(cred->gid, &target_cred->gid);
1707                 break;
1708             }
1709             default:
1710                 goto unimplemented;
1711             }
1712             break;
1713 
1714         case SOL_IP:
1715             switch (cmsg->cmsg_type) {
1716             case IP_TTL:
1717             {
1718                 uint32_t *v = (uint32_t *)data;
1719                 uint32_t *t_int = (uint32_t *)target_data;
1720 
1721                 if (len != sizeof(uint32_t) ||
1722                     tgt_len != sizeof(uint32_t)) {
1723                     goto unimplemented;
1724                 }
1725                 __put_user(*v, t_int);
1726                 break;
1727             }
1728             case IP_RECVERR:
1729             {
1730                 struct errhdr_t {
1731                    struct sock_extended_err ee;
1732                    struct sockaddr_in offender;
1733                 };
1734                 struct errhdr_t *errh = (struct errhdr_t *)data;
1735                 struct errhdr_t *target_errh =
1736                     (struct errhdr_t *)target_data;
1737 
1738                 if (len != sizeof(struct errhdr_t) ||
1739                     tgt_len != sizeof(struct errhdr_t)) {
1740                     goto unimplemented;
1741                 }
1742                 __put_user(errh->ee.ee_errno, &target_errh->ee.ee_errno);
1743                 __put_user(errh->ee.ee_origin, &target_errh->ee.ee_origin);
1744                 __put_user(errh->ee.ee_type,  &target_errh->ee.ee_type);
1745                 __put_user(errh->ee.ee_code, &target_errh->ee.ee_code);
1746                 __put_user(errh->ee.ee_pad, &target_errh->ee.ee_pad);
1747                 __put_user(errh->ee.ee_info, &target_errh->ee.ee_info);
1748                 __put_user(errh->ee.ee_data, &target_errh->ee.ee_data);
1749                 host_to_target_sockaddr((unsigned long) &target_errh->offender,
1750                     (void *) &errh->offender, sizeof(errh->offender));
1751                 break;
1752             }
1753             default:
1754                 goto unimplemented;
1755             }
1756             break;
1757 
1758         case SOL_IPV6:
1759             switch (cmsg->cmsg_type) {
1760             case IPV6_HOPLIMIT:
1761             {
1762                 uint32_t *v = (uint32_t *)data;
1763                 uint32_t *t_int = (uint32_t *)target_data;
1764 
1765                 if (len != sizeof(uint32_t) ||
1766                     tgt_len != sizeof(uint32_t)) {
1767                     goto unimplemented;
1768                 }
1769                 __put_user(*v, t_int);
1770                 break;
1771             }
1772             case IPV6_RECVERR:
1773             {
1774                 struct errhdr6_t {
1775                    struct sock_extended_err ee;
1776                    struct sockaddr_in6 offender;
1777                 };
1778                 struct errhdr6_t *errh = (struct errhdr6_t *)data;
1779                 struct errhdr6_t *target_errh =
1780                     (struct errhdr6_t *)target_data;
1781 
1782                 if (len != sizeof(struct errhdr6_t) ||
1783                     tgt_len != sizeof(struct errhdr6_t)) {
1784                     goto unimplemented;
1785                 }
1786                 __put_user(errh->ee.ee_errno, &target_errh->ee.ee_errno);
1787                 __put_user(errh->ee.ee_origin, &target_errh->ee.ee_origin);
1788                 __put_user(errh->ee.ee_type,  &target_errh->ee.ee_type);
1789                 __put_user(errh->ee.ee_code, &target_errh->ee.ee_code);
1790                 __put_user(errh->ee.ee_pad, &target_errh->ee.ee_pad);
1791                 __put_user(errh->ee.ee_info, &target_errh->ee.ee_info);
1792                 __put_user(errh->ee.ee_data, &target_errh->ee.ee_data);
1793                 host_to_target_sockaddr((unsigned long) &target_errh->offender,
1794                     (void *) &errh->offender, sizeof(errh->offender));
1795                 break;
1796             }
1797             default:
1798                 goto unimplemented;
1799             }
1800             break;
1801 
1802         default:
1803         unimplemented:
1804             gemu_log("Unsupported ancillary data: %d/%d\n",
1805                                         cmsg->cmsg_level, cmsg->cmsg_type);
1806             memcpy(target_data, data, MIN(len, tgt_len));
1807             if (tgt_len > len) {
1808                 memset(target_data + len, 0, tgt_len - len);
1809             }
1810         }
1811 
1812         target_cmsg->cmsg_len = tswapal(TARGET_CMSG_LEN(tgt_len));
1813         tgt_space = TARGET_CMSG_SPACE(tgt_len);
1814         if (msg_controllen < tgt_space) {
1815             tgt_space = msg_controllen;
1816         }
1817         msg_controllen -= tgt_space;
1818         space += tgt_space;
1819         cmsg = CMSG_NXTHDR(msgh, cmsg);
1820         target_cmsg = TARGET_CMSG_NXTHDR(target_msgh, target_cmsg,
1821                                          target_cmsg_start);
1822     }
1823     unlock_user(target_cmsg, target_cmsg_addr, space);
1824  the_end:
1825     target_msgh->msg_controllen = tswapal(space);
1826     return 0;
1827 }
1828 
1829 /* do_setsockopt() Must return target values and target errnos. */
1830 static abi_long do_setsockopt(int sockfd, int level, int optname,
1831                               abi_ulong optval_addr, socklen_t optlen)
1832 {
1833     abi_long ret;
1834     int val;
1835     struct ip_mreqn *ip_mreq;
1836     struct ip_mreq_source *ip_mreq_source;
1837 
1838     switch(level) {
1839     case SOL_TCP:
1840         /* TCP options all take an 'int' value.  */
1841         if (optlen < sizeof(uint32_t))
1842             return -TARGET_EINVAL;
1843 
1844         if (get_user_u32(val, optval_addr))
1845             return -TARGET_EFAULT;
1846         ret = get_errno(setsockopt(sockfd, level, optname, &val, sizeof(val)));
1847         break;
1848     case SOL_IP:
1849         switch(optname) {
1850         case IP_TOS:
1851         case IP_TTL:
1852         case IP_HDRINCL:
1853         case IP_ROUTER_ALERT:
1854         case IP_RECVOPTS:
1855         case IP_RETOPTS:
1856         case IP_PKTINFO:
1857         case IP_MTU_DISCOVER:
1858         case IP_RECVERR:
1859         case IP_RECVTTL:
1860         case IP_RECVTOS:
1861 #ifdef IP_FREEBIND
1862         case IP_FREEBIND:
1863 #endif
1864         case IP_MULTICAST_TTL:
1865         case IP_MULTICAST_LOOP:
1866             val = 0;
1867             if (optlen >= sizeof(uint32_t)) {
1868                 if (get_user_u32(val, optval_addr))
1869                     return -TARGET_EFAULT;
1870             } else if (optlen >= 1) {
1871                 if (get_user_u8(val, optval_addr))
1872                     return -TARGET_EFAULT;
1873             }
1874             ret = get_errno(setsockopt(sockfd, level, optname, &val, sizeof(val)));
1875             break;
1876         case IP_ADD_MEMBERSHIP:
1877         case IP_DROP_MEMBERSHIP:
1878             if (optlen < sizeof (struct target_ip_mreq) ||
1879                 optlen > sizeof (struct target_ip_mreqn))
1880                 return -TARGET_EINVAL;
1881 
1882             ip_mreq = (struct ip_mreqn *) alloca(optlen);
1883             target_to_host_ip_mreq(ip_mreq, optval_addr, optlen);
1884             ret = get_errno(setsockopt(sockfd, level, optname, ip_mreq, optlen));
1885             break;
1886 
1887         case IP_BLOCK_SOURCE:
1888         case IP_UNBLOCK_SOURCE:
1889         case IP_ADD_SOURCE_MEMBERSHIP:
1890         case IP_DROP_SOURCE_MEMBERSHIP:
1891             if (optlen != sizeof (struct target_ip_mreq_source))
1892                 return -TARGET_EINVAL;
1893 
1894             ip_mreq_source = lock_user(VERIFY_READ, optval_addr, optlen, 1);
1895             ret = get_errno(setsockopt(sockfd, level, optname, ip_mreq_source, optlen));
1896             unlock_user (ip_mreq_source, optval_addr, 0);
1897             break;
1898 
1899         default:
1900             goto unimplemented;
1901         }
1902         break;
1903     case SOL_IPV6:
1904         switch (optname) {
1905         case IPV6_MTU_DISCOVER:
1906         case IPV6_MTU:
1907         case IPV6_V6ONLY:
1908         case IPV6_RECVPKTINFO:
1909         case IPV6_UNICAST_HOPS:
1910         case IPV6_MULTICAST_HOPS:
1911         case IPV6_MULTICAST_LOOP:
1912         case IPV6_RECVERR:
1913         case IPV6_RECVHOPLIMIT:
1914         case IPV6_2292HOPLIMIT:
1915         case IPV6_CHECKSUM:
1916         case IPV6_ADDRFORM:
1917         case IPV6_2292PKTINFO:
1918         case IPV6_RECVTCLASS:
1919         case IPV6_RECVRTHDR:
1920         case IPV6_2292RTHDR:
1921         case IPV6_RECVHOPOPTS:
1922         case IPV6_2292HOPOPTS:
1923         case IPV6_RECVDSTOPTS:
1924         case IPV6_2292DSTOPTS:
1925         case IPV6_TCLASS:
1926 #ifdef IPV6_RECVPATHMTU
1927         case IPV6_RECVPATHMTU:
1928 #endif
1929 #ifdef IPV6_TRANSPARENT
1930         case IPV6_TRANSPARENT:
1931 #endif
1932 #ifdef IPV6_FREEBIND
1933         case IPV6_FREEBIND:
1934 #endif
1935 #ifdef IPV6_RECVORIGDSTADDR
1936         case IPV6_RECVORIGDSTADDR:
1937 #endif
1938             val = 0;
1939             if (optlen < sizeof(uint32_t)) {
1940                 return -TARGET_EINVAL;
1941             }
1942             if (get_user_u32(val, optval_addr)) {
1943                 return -TARGET_EFAULT;
1944             }
1945             ret = get_errno(setsockopt(sockfd, level, optname,
1946                                        &val, sizeof(val)));
1947             break;
1948         case IPV6_PKTINFO:
1949         {
1950             struct in6_pktinfo pki;
1951 
1952             if (optlen < sizeof(pki)) {
1953                 return -TARGET_EINVAL;
1954             }
1955 
1956             if (copy_from_user(&pki, optval_addr, sizeof(pki))) {
1957                 return -TARGET_EFAULT;
1958             }
1959 
1960             pki.ipi6_ifindex = tswap32(pki.ipi6_ifindex);
1961 
1962             ret = get_errno(setsockopt(sockfd, level, optname,
1963                                        &pki, sizeof(pki)));
1964             break;
1965         }
1966         case IPV6_ADD_MEMBERSHIP:
1967         case IPV6_DROP_MEMBERSHIP:
1968         {
1969             struct ipv6_mreq ipv6mreq;
1970 
1971             if (optlen < sizeof(ipv6mreq)) {
1972                 return -TARGET_EINVAL;
1973             }
1974 
1975             if (copy_from_user(&ipv6mreq, optval_addr, sizeof(ipv6mreq))) {
1976                 return -TARGET_EFAULT;
1977             }
1978 
1979             ipv6mreq.ipv6mr_interface = tswap32(ipv6mreq.ipv6mr_interface);
1980 
1981             ret = get_errno(setsockopt(sockfd, level, optname,
1982                                        &ipv6mreq, sizeof(ipv6mreq)));
1983             break;
1984         }
1985         default:
1986             goto unimplemented;
1987         }
1988         break;
1989     case SOL_ICMPV6:
1990         switch (optname) {
1991         case ICMPV6_FILTER:
1992         {
1993             struct icmp6_filter icmp6f;
1994 
1995             if (optlen > sizeof(icmp6f)) {
1996                 optlen = sizeof(icmp6f);
1997             }
1998 
1999             if (copy_from_user(&icmp6f, optval_addr, optlen)) {
2000                 return -TARGET_EFAULT;
2001             }
2002 
2003             for (val = 0; val < 8; val++) {
2004                 icmp6f.data[val] = tswap32(icmp6f.data[val]);
2005             }
2006 
2007             ret = get_errno(setsockopt(sockfd, level, optname,
2008                                        &icmp6f, optlen));
2009             break;
2010         }
2011         default:
2012             goto unimplemented;
2013         }
2014         break;
2015     case SOL_RAW:
2016         switch (optname) {
2017         case ICMP_FILTER:
2018         case IPV6_CHECKSUM:
2019             /* those take an u32 value */
2020             if (optlen < sizeof(uint32_t)) {
2021                 return -TARGET_EINVAL;
2022             }
2023 
2024             if (get_user_u32(val, optval_addr)) {
2025                 return -TARGET_EFAULT;
2026             }
2027             ret = get_errno(setsockopt(sockfd, level, optname,
2028                                        &val, sizeof(val)));
2029             break;
2030 
2031         default:
2032             goto unimplemented;
2033         }
2034         break;
2035 #if defined(SOL_ALG) && defined(ALG_SET_KEY) && defined(ALG_SET_AEAD_AUTHSIZE)
2036     case SOL_ALG:
2037         switch (optname) {
2038         case ALG_SET_KEY:
2039         {
2040             char *alg_key = g_malloc(optlen);
2041 
2042             if (!alg_key) {
2043                 return -TARGET_ENOMEM;
2044             }
2045             if (copy_from_user(alg_key, optval_addr, optlen)) {
2046                 g_free(alg_key);
2047                 return -TARGET_EFAULT;
2048             }
2049             ret = get_errno(setsockopt(sockfd, level, optname,
2050                                        alg_key, optlen));
2051             g_free(alg_key);
2052             break;
2053         }
2054         case ALG_SET_AEAD_AUTHSIZE:
2055         {
2056             ret = get_errno(setsockopt(sockfd, level, optname,
2057                                        NULL, optlen));
2058             break;
2059         }
2060         default:
2061             goto unimplemented;
2062         }
2063         break;
2064 #endif
2065     case TARGET_SOL_SOCKET:
2066         switch (optname) {
2067         case TARGET_SO_RCVTIMEO:
2068         {
2069                 struct timeval tv;
2070 
2071                 optname = SO_RCVTIMEO;
2072 
2073 set_timeout:
2074                 if (optlen != sizeof(struct target_timeval)) {
2075                     return -TARGET_EINVAL;
2076                 }
2077 
2078                 if (copy_from_user_timeval(&tv, optval_addr)) {
2079                     return -TARGET_EFAULT;
2080                 }
2081 
2082                 ret = get_errno(setsockopt(sockfd, SOL_SOCKET, optname,
2083                                 &tv, sizeof(tv)));
2084                 return ret;
2085         }
2086         case TARGET_SO_SNDTIMEO:
2087                 optname = SO_SNDTIMEO;
2088                 goto set_timeout;
2089         case TARGET_SO_ATTACH_FILTER:
2090         {
2091                 struct target_sock_fprog *tfprog;
2092                 struct target_sock_filter *tfilter;
2093                 struct sock_fprog fprog;
2094                 struct sock_filter *filter;
2095                 int i;
2096 
2097                 if (optlen != sizeof(*tfprog)) {
2098                     return -TARGET_EINVAL;
2099                 }
2100                 if (!lock_user_struct(VERIFY_READ, tfprog, optval_addr, 0)) {
2101                     return -TARGET_EFAULT;
2102                 }
2103                 if (!lock_user_struct(VERIFY_READ, tfilter,
2104                                       tswapal(tfprog->filter), 0)) {
2105                     unlock_user_struct(tfprog, optval_addr, 1);
2106                     return -TARGET_EFAULT;
2107                 }
2108 
2109                 fprog.len = tswap16(tfprog->len);
2110                 filter = g_try_new(struct sock_filter, fprog.len);
2111                 if (filter == NULL) {
2112                     unlock_user_struct(tfilter, tfprog->filter, 1);
2113                     unlock_user_struct(tfprog, optval_addr, 1);
2114                     return -TARGET_ENOMEM;
2115                 }
2116                 for (i = 0; i < fprog.len; i++) {
2117                     filter[i].code = tswap16(tfilter[i].code);
2118                     filter[i].jt = tfilter[i].jt;
2119                     filter[i].jf = tfilter[i].jf;
2120                     filter[i].k = tswap32(tfilter[i].k);
2121                 }
2122                 fprog.filter = filter;
2123 
2124                 ret = get_errno(setsockopt(sockfd, SOL_SOCKET,
2125                                 SO_ATTACH_FILTER, &fprog, sizeof(fprog)));
2126                 g_free(filter);
2127 
2128                 unlock_user_struct(tfilter, tfprog->filter, 1);
2129                 unlock_user_struct(tfprog, optval_addr, 1);
2130                 return ret;
2131         }
2132 	case TARGET_SO_BINDTODEVICE:
2133 	{
2134 		char *dev_ifname, *addr_ifname;
2135 
2136 		if (optlen > IFNAMSIZ - 1) {
2137 		    optlen = IFNAMSIZ - 1;
2138 		}
2139 		dev_ifname = lock_user(VERIFY_READ, optval_addr, optlen, 1);
2140 		if (!dev_ifname) {
2141 		    return -TARGET_EFAULT;
2142 		}
2143 		optname = SO_BINDTODEVICE;
2144 		addr_ifname = alloca(IFNAMSIZ);
2145 		memcpy(addr_ifname, dev_ifname, optlen);
2146 		addr_ifname[optlen] = 0;
2147 		ret = get_errno(setsockopt(sockfd, SOL_SOCKET, optname,
2148                                            addr_ifname, optlen));
2149 		unlock_user (dev_ifname, optval_addr, 0);
2150 		return ret;
2151 	}
2152         case TARGET_SO_LINGER:
2153         {
2154                 struct linger lg;
2155                 struct target_linger *tlg;
2156 
2157                 if (optlen != sizeof(struct target_linger)) {
2158                     return -TARGET_EINVAL;
2159                 }
2160                 if (!lock_user_struct(VERIFY_READ, tlg, optval_addr, 1)) {
2161                     return -TARGET_EFAULT;
2162                 }
2163                 __get_user(lg.l_onoff, &tlg->l_onoff);
2164                 __get_user(lg.l_linger, &tlg->l_linger);
2165                 ret = get_errno(setsockopt(sockfd, SOL_SOCKET, SO_LINGER,
2166                                 &lg, sizeof(lg)));
2167                 unlock_user_struct(tlg, optval_addr, 0);
2168                 return ret;
2169         }
2170             /* Options with 'int' argument.  */
2171         case TARGET_SO_DEBUG:
2172 		optname = SO_DEBUG;
2173 		break;
2174         case TARGET_SO_REUSEADDR:
2175 		optname = SO_REUSEADDR;
2176 		break;
2177 #ifdef SO_REUSEPORT
2178         case TARGET_SO_REUSEPORT:
2179                 optname = SO_REUSEPORT;
2180                 break;
2181 #endif
2182         case TARGET_SO_TYPE:
2183 		optname = SO_TYPE;
2184 		break;
2185         case TARGET_SO_ERROR:
2186 		optname = SO_ERROR;
2187 		break;
2188         case TARGET_SO_DONTROUTE:
2189 		optname = SO_DONTROUTE;
2190 		break;
2191         case TARGET_SO_BROADCAST:
2192 		optname = SO_BROADCAST;
2193 		break;
2194         case TARGET_SO_SNDBUF:
2195 		optname = SO_SNDBUF;
2196 		break;
2197         case TARGET_SO_SNDBUFFORCE:
2198                 optname = SO_SNDBUFFORCE;
2199                 break;
2200         case TARGET_SO_RCVBUF:
2201 		optname = SO_RCVBUF;
2202 		break;
2203         case TARGET_SO_RCVBUFFORCE:
2204                 optname = SO_RCVBUFFORCE;
2205                 break;
2206         case TARGET_SO_KEEPALIVE:
2207 		optname = SO_KEEPALIVE;
2208 		break;
2209         case TARGET_SO_OOBINLINE:
2210 		optname = SO_OOBINLINE;
2211 		break;
2212         case TARGET_SO_NO_CHECK:
2213 		optname = SO_NO_CHECK;
2214 		break;
2215         case TARGET_SO_PRIORITY:
2216 		optname = SO_PRIORITY;
2217 		break;
2218 #ifdef SO_BSDCOMPAT
2219         case TARGET_SO_BSDCOMPAT:
2220 		optname = SO_BSDCOMPAT;
2221 		break;
2222 #endif
2223         case TARGET_SO_PASSCRED:
2224 		optname = SO_PASSCRED;
2225 		break;
2226         case TARGET_SO_PASSSEC:
2227                 optname = SO_PASSSEC;
2228                 break;
2229         case TARGET_SO_TIMESTAMP:
2230 		optname = SO_TIMESTAMP;
2231 		break;
2232         case TARGET_SO_RCVLOWAT:
2233 		optname = SO_RCVLOWAT;
2234 		break;
2235         default:
2236             goto unimplemented;
2237         }
2238 	if (optlen < sizeof(uint32_t))
2239             return -TARGET_EINVAL;
2240 
2241 	if (get_user_u32(val, optval_addr))
2242             return -TARGET_EFAULT;
2243 	ret = get_errno(setsockopt(sockfd, SOL_SOCKET, optname, &val, sizeof(val)));
2244         break;
2245     default:
2246     unimplemented:
2247         gemu_log("Unsupported setsockopt level=%d optname=%d\n", level, optname);
2248         ret = -TARGET_ENOPROTOOPT;
2249     }
2250     return ret;
2251 }
2252 
2253 /* do_getsockopt() Must return target values and target errnos. */
2254 static abi_long do_getsockopt(int sockfd, int level, int optname,
2255                               abi_ulong optval_addr, abi_ulong optlen)
2256 {
2257     abi_long ret;
2258     int len, val;
2259     socklen_t lv;
2260 
2261     switch(level) {
2262     case TARGET_SOL_SOCKET:
2263         level = SOL_SOCKET;
2264         switch (optname) {
2265         /* These don't just return a single integer */
2266         case TARGET_SO_RCVTIMEO:
2267         case TARGET_SO_SNDTIMEO:
2268         case TARGET_SO_PEERNAME:
2269             goto unimplemented;
2270         case TARGET_SO_PEERCRED: {
2271             struct ucred cr;
2272             socklen_t crlen;
2273             struct target_ucred *tcr;
2274 
2275             if (get_user_u32(len, optlen)) {
2276                 return -TARGET_EFAULT;
2277             }
2278             if (len < 0) {
2279                 return -TARGET_EINVAL;
2280             }
2281 
2282             crlen = sizeof(cr);
2283             ret = get_errno(getsockopt(sockfd, level, SO_PEERCRED,
2284                                        &cr, &crlen));
2285             if (ret < 0) {
2286                 return ret;
2287             }
2288             if (len > crlen) {
2289                 len = crlen;
2290             }
2291             if (!lock_user_struct(VERIFY_WRITE, tcr, optval_addr, 0)) {
2292                 return -TARGET_EFAULT;
2293             }
2294             __put_user(cr.pid, &tcr->pid);
2295             __put_user(cr.uid, &tcr->uid);
2296             __put_user(cr.gid, &tcr->gid);
2297             unlock_user_struct(tcr, optval_addr, 1);
2298             if (put_user_u32(len, optlen)) {
2299                 return -TARGET_EFAULT;
2300             }
2301             break;
2302         }
2303         case TARGET_SO_LINGER:
2304         {
2305             struct linger lg;
2306             socklen_t lglen;
2307             struct target_linger *tlg;
2308 
2309             if (get_user_u32(len, optlen)) {
2310                 return -TARGET_EFAULT;
2311             }
2312             if (len < 0) {
2313                 return -TARGET_EINVAL;
2314             }
2315 
2316             lglen = sizeof(lg);
2317             ret = get_errno(getsockopt(sockfd, level, SO_LINGER,
2318                                        &lg, &lglen));
2319             if (ret < 0) {
2320                 return ret;
2321             }
2322             if (len > lglen) {
2323                 len = lglen;
2324             }
2325             if (!lock_user_struct(VERIFY_WRITE, tlg, optval_addr, 0)) {
2326                 return -TARGET_EFAULT;
2327             }
2328             __put_user(lg.l_onoff, &tlg->l_onoff);
2329             __put_user(lg.l_linger, &tlg->l_linger);
2330             unlock_user_struct(tlg, optval_addr, 1);
2331             if (put_user_u32(len, optlen)) {
2332                 return -TARGET_EFAULT;
2333             }
2334             break;
2335         }
2336         /* Options with 'int' argument.  */
2337         case TARGET_SO_DEBUG:
2338             optname = SO_DEBUG;
2339             goto int_case;
2340         case TARGET_SO_REUSEADDR:
2341             optname = SO_REUSEADDR;
2342             goto int_case;
2343 #ifdef SO_REUSEPORT
2344         case TARGET_SO_REUSEPORT:
2345             optname = SO_REUSEPORT;
2346             goto int_case;
2347 #endif
2348         case TARGET_SO_TYPE:
2349             optname = SO_TYPE;
2350             goto int_case;
2351         case TARGET_SO_ERROR:
2352             optname = SO_ERROR;
2353             goto int_case;
2354         case TARGET_SO_DONTROUTE:
2355             optname = SO_DONTROUTE;
2356             goto int_case;
2357         case TARGET_SO_BROADCAST:
2358             optname = SO_BROADCAST;
2359             goto int_case;
2360         case TARGET_SO_SNDBUF:
2361             optname = SO_SNDBUF;
2362             goto int_case;
2363         case TARGET_SO_RCVBUF:
2364             optname = SO_RCVBUF;
2365             goto int_case;
2366         case TARGET_SO_KEEPALIVE:
2367             optname = SO_KEEPALIVE;
2368             goto int_case;
2369         case TARGET_SO_OOBINLINE:
2370             optname = SO_OOBINLINE;
2371             goto int_case;
2372         case TARGET_SO_NO_CHECK:
2373             optname = SO_NO_CHECK;
2374             goto int_case;
2375         case TARGET_SO_PRIORITY:
2376             optname = SO_PRIORITY;
2377             goto int_case;
2378 #ifdef SO_BSDCOMPAT
2379         case TARGET_SO_BSDCOMPAT:
2380             optname = SO_BSDCOMPAT;
2381             goto int_case;
2382 #endif
2383         case TARGET_SO_PASSCRED:
2384             optname = SO_PASSCRED;
2385             goto int_case;
2386         case TARGET_SO_TIMESTAMP:
2387             optname = SO_TIMESTAMP;
2388             goto int_case;
2389         case TARGET_SO_RCVLOWAT:
2390             optname = SO_RCVLOWAT;
2391             goto int_case;
2392         case TARGET_SO_ACCEPTCONN:
2393             optname = SO_ACCEPTCONN;
2394             goto int_case;
2395         default:
2396             goto int_case;
2397         }
2398         break;
2399     case SOL_TCP:
2400         /* TCP options all take an 'int' value.  */
2401     int_case:
2402         if (get_user_u32(len, optlen))
2403             return -TARGET_EFAULT;
2404         if (len < 0)
2405             return -TARGET_EINVAL;
2406         lv = sizeof(lv);
2407         ret = get_errno(getsockopt(sockfd, level, optname, &val, &lv));
2408         if (ret < 0)
2409             return ret;
2410         if (optname == SO_TYPE) {
2411             val = host_to_target_sock_type(val);
2412         }
2413         if (len > lv)
2414             len = lv;
2415         if (len == 4) {
2416             if (put_user_u32(val, optval_addr))
2417                 return -TARGET_EFAULT;
2418         } else {
2419             if (put_user_u8(val, optval_addr))
2420                 return -TARGET_EFAULT;
2421         }
2422         if (put_user_u32(len, optlen))
2423             return -TARGET_EFAULT;
2424         break;
2425     case SOL_IP:
2426         switch(optname) {
2427         case IP_TOS:
2428         case IP_TTL:
2429         case IP_HDRINCL:
2430         case IP_ROUTER_ALERT:
2431         case IP_RECVOPTS:
2432         case IP_RETOPTS:
2433         case IP_PKTINFO:
2434         case IP_MTU_DISCOVER:
2435         case IP_RECVERR:
2436         case IP_RECVTOS:
2437 #ifdef IP_FREEBIND
2438         case IP_FREEBIND:
2439 #endif
2440         case IP_MULTICAST_TTL:
2441         case IP_MULTICAST_LOOP:
2442             if (get_user_u32(len, optlen))
2443                 return -TARGET_EFAULT;
2444             if (len < 0)
2445                 return -TARGET_EINVAL;
2446             lv = sizeof(lv);
2447             ret = get_errno(getsockopt(sockfd, level, optname, &val, &lv));
2448             if (ret < 0)
2449                 return ret;
2450             if (len < sizeof(int) && len > 0 && val >= 0 && val < 255) {
2451                 len = 1;
2452                 if (put_user_u32(len, optlen)
2453                     || put_user_u8(val, optval_addr))
2454                     return -TARGET_EFAULT;
2455             } else {
2456                 if (len > sizeof(int))
2457                     len = sizeof(int);
2458                 if (put_user_u32(len, optlen)
2459                     || put_user_u32(val, optval_addr))
2460                     return -TARGET_EFAULT;
2461             }
2462             break;
2463         default:
2464             ret = -TARGET_ENOPROTOOPT;
2465             break;
2466         }
2467         break;
2468     case SOL_IPV6:
2469         switch (optname) {
2470         case IPV6_MTU_DISCOVER:
2471         case IPV6_MTU:
2472         case IPV6_V6ONLY:
2473         case IPV6_RECVPKTINFO:
2474         case IPV6_UNICAST_HOPS:
2475         case IPV6_MULTICAST_HOPS:
2476         case IPV6_MULTICAST_LOOP:
2477         case IPV6_RECVERR:
2478         case IPV6_RECVHOPLIMIT:
2479         case IPV6_2292HOPLIMIT:
2480         case IPV6_CHECKSUM:
2481         case IPV6_ADDRFORM:
2482         case IPV6_2292PKTINFO:
2483         case IPV6_RECVTCLASS:
2484         case IPV6_RECVRTHDR:
2485         case IPV6_2292RTHDR:
2486         case IPV6_RECVHOPOPTS:
2487         case IPV6_2292HOPOPTS:
2488         case IPV6_RECVDSTOPTS:
2489         case IPV6_2292DSTOPTS:
2490         case IPV6_TCLASS:
2491 #ifdef IPV6_RECVPATHMTU
2492         case IPV6_RECVPATHMTU:
2493 #endif
2494 #ifdef IPV6_TRANSPARENT
2495         case IPV6_TRANSPARENT:
2496 #endif
2497 #ifdef IPV6_FREEBIND
2498         case IPV6_FREEBIND:
2499 #endif
2500 #ifdef IPV6_RECVORIGDSTADDR
2501         case IPV6_RECVORIGDSTADDR:
2502 #endif
2503             if (get_user_u32(len, optlen))
2504                 return -TARGET_EFAULT;
2505             if (len < 0)
2506                 return -TARGET_EINVAL;
2507             lv = sizeof(lv);
2508             ret = get_errno(getsockopt(sockfd, level, optname, &val, &lv));
2509             if (ret < 0)
2510                 return ret;
2511             if (len < sizeof(int) && len > 0 && val >= 0 && val < 255) {
2512                 len = 1;
2513                 if (put_user_u32(len, optlen)
2514                     || put_user_u8(val, optval_addr))
2515                     return -TARGET_EFAULT;
2516             } else {
2517                 if (len > sizeof(int))
2518                     len = sizeof(int);
2519                 if (put_user_u32(len, optlen)
2520                     || put_user_u32(val, optval_addr))
2521                     return -TARGET_EFAULT;
2522             }
2523             break;
2524         default:
2525             ret = -TARGET_ENOPROTOOPT;
2526             break;
2527         }
2528         break;
2529     default:
2530     unimplemented:
2531         gemu_log("getsockopt level=%d optname=%d not yet supported\n",
2532                  level, optname);
2533         ret = -TARGET_EOPNOTSUPP;
2534         break;
2535     }
2536     return ret;
2537 }
2538 
2539 /* Convert target low/high pair representing file offset into the host
2540  * low/high pair. This function doesn't handle offsets bigger than 64 bits
2541  * as the kernel doesn't handle them either.
2542  */
2543 static void target_to_host_low_high(abi_ulong tlow,
2544                                     abi_ulong thigh,
2545                                     unsigned long *hlow,
2546                                     unsigned long *hhigh)
2547 {
2548     uint64_t off = tlow |
2549         ((unsigned long long)thigh << TARGET_LONG_BITS / 2) <<
2550         TARGET_LONG_BITS / 2;
2551 
2552     *hlow = off;
2553     *hhigh = (off >> HOST_LONG_BITS / 2) >> HOST_LONG_BITS / 2;
2554 }
2555 
2556 static struct iovec *lock_iovec(int type, abi_ulong target_addr,
2557                                 abi_ulong count, int copy)
2558 {
2559     struct target_iovec *target_vec;
2560     struct iovec *vec;
2561     abi_ulong total_len, max_len;
2562     int i;
2563     int err = 0;
2564     bool bad_address = false;
2565 
2566     if (count == 0) {
2567         errno = 0;
2568         return NULL;
2569     }
2570     if (count > IOV_MAX) {
2571         errno = EINVAL;
2572         return NULL;
2573     }
2574 
2575     vec = g_try_new0(struct iovec, count);
2576     if (vec == NULL) {
2577         errno = ENOMEM;
2578         return NULL;
2579     }
2580 
2581     target_vec = lock_user(VERIFY_READ, target_addr,
2582                            count * sizeof(struct target_iovec), 1);
2583     if (target_vec == NULL) {
2584         err = EFAULT;
2585         goto fail2;
2586     }
2587 
2588     /* ??? If host page size > target page size, this will result in a
2589        value larger than what we can actually support.  */
2590     max_len = 0x7fffffff & TARGET_PAGE_MASK;
2591     total_len = 0;
2592 
2593     for (i = 0; i < count; i++) {
2594         abi_ulong base = tswapal(target_vec[i].iov_base);
2595         abi_long len = tswapal(target_vec[i].iov_len);
2596 
2597         if (len < 0) {
2598             err = EINVAL;
2599             goto fail;
2600         } else if (len == 0) {
2601             /* Zero length pointer is ignored.  */
2602             vec[i].iov_base = 0;
2603         } else {
2604             vec[i].iov_base = lock_user(type, base, len, copy);
2605             /* If the first buffer pointer is bad, this is a fault.  But
2606              * subsequent bad buffers will result in a partial write; this
2607              * is realized by filling the vector with null pointers and
2608              * zero lengths. */
2609             if (!vec[i].iov_base) {
2610                 if (i == 0) {
2611                     err = EFAULT;
2612                     goto fail;
2613                 } else {
2614                     bad_address = true;
2615                 }
2616             }
2617             if (bad_address) {
2618                 len = 0;
2619             }
2620             if (len > max_len - total_len) {
2621                 len = max_len - total_len;
2622             }
2623         }
2624         vec[i].iov_len = len;
2625         total_len += len;
2626     }
2627 
2628     unlock_user(target_vec, target_addr, 0);
2629     return vec;
2630 
2631  fail:
2632     while (--i >= 0) {
2633         if (tswapal(target_vec[i].iov_len) > 0) {
2634             unlock_user(vec[i].iov_base, tswapal(target_vec[i].iov_base), 0);
2635         }
2636     }
2637     unlock_user(target_vec, target_addr, 0);
2638  fail2:
2639     g_free(vec);
2640     errno = err;
2641     return NULL;
2642 }
2643 
2644 static void unlock_iovec(struct iovec *vec, abi_ulong target_addr,
2645                          abi_ulong count, int copy)
2646 {
2647     struct target_iovec *target_vec;
2648     int i;
2649 
2650     target_vec = lock_user(VERIFY_READ, target_addr,
2651                            count * sizeof(struct target_iovec), 1);
2652     if (target_vec) {
2653         for (i = 0; i < count; i++) {
2654             abi_ulong base = tswapal(target_vec[i].iov_base);
2655             abi_long len = tswapal(target_vec[i].iov_len);
2656             if (len < 0) {
2657                 break;
2658             }
2659             unlock_user(vec[i].iov_base, base, copy ? vec[i].iov_len : 0);
2660         }
2661         unlock_user(target_vec, target_addr, 0);
2662     }
2663 
2664     g_free(vec);
2665 }
2666 
2667 static inline int target_to_host_sock_type(int *type)
2668 {
2669     int host_type = 0;
2670     int target_type = *type;
2671 
2672     switch (target_type & TARGET_SOCK_TYPE_MASK) {
2673     case TARGET_SOCK_DGRAM:
2674         host_type = SOCK_DGRAM;
2675         break;
2676     case TARGET_SOCK_STREAM:
2677         host_type = SOCK_STREAM;
2678         break;
2679     default:
2680         host_type = target_type & TARGET_SOCK_TYPE_MASK;
2681         break;
2682     }
2683     if (target_type & TARGET_SOCK_CLOEXEC) {
2684 #if defined(SOCK_CLOEXEC)
2685         host_type |= SOCK_CLOEXEC;
2686 #else
2687         return -TARGET_EINVAL;
2688 #endif
2689     }
2690     if (target_type & TARGET_SOCK_NONBLOCK) {
2691 #if defined(SOCK_NONBLOCK)
2692         host_type |= SOCK_NONBLOCK;
2693 #elif !defined(O_NONBLOCK)
2694         return -TARGET_EINVAL;
2695 #endif
2696     }
2697     *type = host_type;
2698     return 0;
2699 }
2700 
2701 /* Try to emulate socket type flags after socket creation.  */
2702 static int sock_flags_fixup(int fd, int target_type)
2703 {
2704 #if !defined(SOCK_NONBLOCK) && defined(O_NONBLOCK)
2705     if (target_type & TARGET_SOCK_NONBLOCK) {
2706         int flags = fcntl(fd, F_GETFL);
2707         if (fcntl(fd, F_SETFL, O_NONBLOCK | flags) == -1) {
2708             close(fd);
2709             return -TARGET_EINVAL;
2710         }
2711     }
2712 #endif
2713     return fd;
2714 }
2715 
2716 /* do_socket() Must return target values and target errnos. */
2717 static abi_long do_socket(int domain, int type, int protocol)
2718 {
2719     int target_type = type;
2720     int ret;
2721 
2722     ret = target_to_host_sock_type(&type);
2723     if (ret) {
2724         return ret;
2725     }
2726 
2727     if (domain == PF_NETLINK && !(
2728 #ifdef CONFIG_RTNETLINK
2729          protocol == NETLINK_ROUTE ||
2730 #endif
2731          protocol == NETLINK_KOBJECT_UEVENT ||
2732          protocol == NETLINK_AUDIT)) {
2733         return -EPFNOSUPPORT;
2734     }
2735 
2736     if (domain == AF_PACKET ||
2737         (domain == AF_INET && type == SOCK_PACKET)) {
2738         protocol = tswap16(protocol);
2739     }
2740 
2741     ret = get_errno(socket(domain, type, protocol));
2742     if (ret >= 0) {
2743         ret = sock_flags_fixup(ret, target_type);
2744         if (type == SOCK_PACKET) {
2745             /* Manage an obsolete case :
2746              * if socket type is SOCK_PACKET, bind by name
2747              */
2748             fd_trans_register(ret, &target_packet_trans);
2749         } else if (domain == PF_NETLINK) {
2750             switch (protocol) {
2751 #ifdef CONFIG_RTNETLINK
2752             case NETLINK_ROUTE:
2753                 fd_trans_register(ret, &target_netlink_route_trans);
2754                 break;
2755 #endif
2756             case NETLINK_KOBJECT_UEVENT:
2757                 /* nothing to do: messages are strings */
2758                 break;
2759             case NETLINK_AUDIT:
2760                 fd_trans_register(ret, &target_netlink_audit_trans);
2761                 break;
2762             default:
2763                 g_assert_not_reached();
2764             }
2765         }
2766     }
2767     return ret;
2768 }
2769 
2770 /* do_bind() Must return target values and target errnos. */
2771 static abi_long do_bind(int sockfd, abi_ulong target_addr,
2772                         socklen_t addrlen)
2773 {
2774     void *addr;
2775     abi_long ret;
2776 
2777     if ((int)addrlen < 0) {
2778         return -TARGET_EINVAL;
2779     }
2780 
2781     addr = alloca(addrlen+1);
2782 
2783     ret = target_to_host_sockaddr(sockfd, addr, target_addr, addrlen);
2784     if (ret)
2785         return ret;
2786 
2787     return get_errno(bind(sockfd, addr, addrlen));
2788 }
2789 
2790 /* do_connect() Must return target values and target errnos. */
2791 static abi_long do_connect(int sockfd, abi_ulong target_addr,
2792                            socklen_t addrlen)
2793 {
2794     void *addr;
2795     abi_long ret;
2796 
2797     if ((int)addrlen < 0) {
2798         return -TARGET_EINVAL;
2799     }
2800 
2801     addr = alloca(addrlen+1);
2802 
2803     ret = target_to_host_sockaddr(sockfd, addr, target_addr, addrlen);
2804     if (ret)
2805         return ret;
2806 
2807     return get_errno(safe_connect(sockfd, addr, addrlen));
2808 }
2809 
2810 /* do_sendrecvmsg_locked() Must return target values and target errnos. */
2811 static abi_long do_sendrecvmsg_locked(int fd, struct target_msghdr *msgp,
2812                                       int flags, int send)
2813 {
2814     abi_long ret, len;
2815     struct msghdr msg;
2816     abi_ulong count;
2817     struct iovec *vec;
2818     abi_ulong target_vec;
2819 
2820     if (msgp->msg_name) {
2821         msg.msg_namelen = tswap32(msgp->msg_namelen);
2822         msg.msg_name = alloca(msg.msg_namelen+1);
2823         ret = target_to_host_sockaddr(fd, msg.msg_name,
2824                                       tswapal(msgp->msg_name),
2825                                       msg.msg_namelen);
2826         if (ret == -TARGET_EFAULT) {
2827             /* For connected sockets msg_name and msg_namelen must
2828              * be ignored, so returning EFAULT immediately is wrong.
2829              * Instead, pass a bad msg_name to the host kernel, and
2830              * let it decide whether to return EFAULT or not.
2831              */
2832             msg.msg_name = (void *)-1;
2833         } else if (ret) {
2834             goto out2;
2835         }
2836     } else {
2837         msg.msg_name = NULL;
2838         msg.msg_namelen = 0;
2839     }
2840     msg.msg_controllen = 2 * tswapal(msgp->msg_controllen);
2841     msg.msg_control = alloca(msg.msg_controllen);
2842     memset(msg.msg_control, 0, msg.msg_controllen);
2843 
2844     msg.msg_flags = tswap32(msgp->msg_flags);
2845 
2846     count = tswapal(msgp->msg_iovlen);
2847     target_vec = tswapal(msgp->msg_iov);
2848 
2849     if (count > IOV_MAX) {
2850         /* sendrcvmsg returns a different errno for this condition than
2851          * readv/writev, so we must catch it here before lock_iovec() does.
2852          */
2853         ret = -TARGET_EMSGSIZE;
2854         goto out2;
2855     }
2856 
2857     vec = lock_iovec(send ? VERIFY_READ : VERIFY_WRITE,
2858                      target_vec, count, send);
2859     if (vec == NULL) {
2860         ret = -host_to_target_errno(errno);
2861         goto out2;
2862     }
2863     msg.msg_iovlen = count;
2864     msg.msg_iov = vec;
2865 
2866     if (send) {
2867         if (fd_trans_target_to_host_data(fd)) {
2868             void *host_msg;
2869 
2870             host_msg = g_malloc(msg.msg_iov->iov_len);
2871             memcpy(host_msg, msg.msg_iov->iov_base, msg.msg_iov->iov_len);
2872             ret = fd_trans_target_to_host_data(fd)(host_msg,
2873                                                    msg.msg_iov->iov_len);
2874             if (ret >= 0) {
2875                 msg.msg_iov->iov_base = host_msg;
2876                 ret = get_errno(safe_sendmsg(fd, &msg, flags));
2877             }
2878             g_free(host_msg);
2879         } else {
2880             ret = target_to_host_cmsg(&msg, msgp);
2881             if (ret == 0) {
2882                 ret = get_errno(safe_sendmsg(fd, &msg, flags));
2883             }
2884         }
2885     } else {
2886         ret = get_errno(safe_recvmsg(fd, &msg, flags));
2887         if (!is_error(ret)) {
2888             len = ret;
2889             if (fd_trans_host_to_target_data(fd)) {
2890                 ret = fd_trans_host_to_target_data(fd)(msg.msg_iov->iov_base,
2891                                                MIN(msg.msg_iov->iov_len, len));
2892             } else {
2893                 ret = host_to_target_cmsg(msgp, &msg);
2894             }
2895             if (!is_error(ret)) {
2896                 msgp->msg_namelen = tswap32(msg.msg_namelen);
2897                 msgp->msg_flags = tswap32(msg.msg_flags);
2898                 if (msg.msg_name != NULL && msg.msg_name != (void *)-1) {
2899                     ret = host_to_target_sockaddr(tswapal(msgp->msg_name),
2900                                     msg.msg_name, msg.msg_namelen);
2901                     if (ret) {
2902                         goto out;
2903                     }
2904                 }
2905 
2906                 ret = len;
2907             }
2908         }
2909     }
2910 
2911 out:
2912     unlock_iovec(vec, target_vec, count, !send);
2913 out2:
2914     return ret;
2915 }
2916 
2917 static abi_long do_sendrecvmsg(int fd, abi_ulong target_msg,
2918                                int flags, int send)
2919 {
2920     abi_long ret;
2921     struct target_msghdr *msgp;
2922 
2923     if (!lock_user_struct(send ? VERIFY_READ : VERIFY_WRITE,
2924                           msgp,
2925                           target_msg,
2926                           send ? 1 : 0)) {
2927         return -TARGET_EFAULT;
2928     }
2929     ret = do_sendrecvmsg_locked(fd, msgp, flags, send);
2930     unlock_user_struct(msgp, target_msg, send ? 0 : 1);
2931     return ret;
2932 }
2933 
2934 /* We don't rely on the C library to have sendmmsg/recvmmsg support,
2935  * so it might not have this *mmsg-specific flag either.
2936  */
2937 #ifndef MSG_WAITFORONE
2938 #define MSG_WAITFORONE 0x10000
2939 #endif
2940 
2941 static abi_long do_sendrecvmmsg(int fd, abi_ulong target_msgvec,
2942                                 unsigned int vlen, unsigned int flags,
2943                                 int send)
2944 {
2945     struct target_mmsghdr *mmsgp;
2946     abi_long ret = 0;
2947     int i;
2948 
2949     if (vlen > UIO_MAXIOV) {
2950         vlen = UIO_MAXIOV;
2951     }
2952 
2953     mmsgp = lock_user(VERIFY_WRITE, target_msgvec, sizeof(*mmsgp) * vlen, 1);
2954     if (!mmsgp) {
2955         return -TARGET_EFAULT;
2956     }
2957 
2958     for (i = 0; i < vlen; i++) {
2959         ret = do_sendrecvmsg_locked(fd, &mmsgp[i].msg_hdr, flags, send);
2960         if (is_error(ret)) {
2961             break;
2962         }
2963         mmsgp[i].msg_len = tswap32(ret);
2964         /* MSG_WAITFORONE turns on MSG_DONTWAIT after one packet */
2965         if (flags & MSG_WAITFORONE) {
2966             flags |= MSG_DONTWAIT;
2967         }
2968     }
2969 
2970     unlock_user(mmsgp, target_msgvec, sizeof(*mmsgp) * i);
2971 
2972     /* Return number of datagrams sent if we sent any at all;
2973      * otherwise return the error.
2974      */
2975     if (i) {
2976         return i;
2977     }
2978     return ret;
2979 }
2980 
2981 /* do_accept4() Must return target values and target errnos. */
2982 static abi_long do_accept4(int fd, abi_ulong target_addr,
2983                            abi_ulong target_addrlen_addr, int flags)
2984 {
2985     socklen_t addrlen, ret_addrlen;
2986     void *addr;
2987     abi_long ret;
2988     int host_flags;
2989 
2990     host_flags = target_to_host_bitmask(flags, fcntl_flags_tbl);
2991 
2992     if (target_addr == 0) {
2993         return get_errno(safe_accept4(fd, NULL, NULL, host_flags));
2994     }
2995 
2996     /* linux returns EINVAL if addrlen pointer is invalid */
2997     if (get_user_u32(addrlen, target_addrlen_addr))
2998         return -TARGET_EINVAL;
2999 
3000     if ((int)addrlen < 0) {
3001         return -TARGET_EINVAL;
3002     }
3003 
3004     if (!access_ok(VERIFY_WRITE, target_addr, addrlen))
3005         return -TARGET_EINVAL;
3006 
3007     addr = alloca(addrlen);
3008 
3009     ret_addrlen = addrlen;
3010     ret = get_errno(safe_accept4(fd, addr, &ret_addrlen, host_flags));
3011     if (!is_error(ret)) {
3012         host_to_target_sockaddr(target_addr, addr, MIN(addrlen, ret_addrlen));
3013         if (put_user_u32(ret_addrlen, target_addrlen_addr)) {
3014             ret = -TARGET_EFAULT;
3015         }
3016     }
3017     return ret;
3018 }
3019 
3020 /* do_getpeername() Must return target values and target errnos. */
3021 static abi_long do_getpeername(int fd, abi_ulong target_addr,
3022                                abi_ulong target_addrlen_addr)
3023 {
3024     socklen_t addrlen, ret_addrlen;
3025     void *addr;
3026     abi_long ret;
3027 
3028     if (get_user_u32(addrlen, target_addrlen_addr))
3029         return -TARGET_EFAULT;
3030 
3031     if ((int)addrlen < 0) {
3032         return -TARGET_EINVAL;
3033     }
3034 
3035     if (!access_ok(VERIFY_WRITE, target_addr, addrlen))
3036         return -TARGET_EFAULT;
3037 
3038     addr = alloca(addrlen);
3039 
3040     ret_addrlen = addrlen;
3041     ret = get_errno(getpeername(fd, addr, &ret_addrlen));
3042     if (!is_error(ret)) {
3043         host_to_target_sockaddr(target_addr, addr, MIN(addrlen, ret_addrlen));
3044         if (put_user_u32(ret_addrlen, target_addrlen_addr)) {
3045             ret = -TARGET_EFAULT;
3046         }
3047     }
3048     return ret;
3049 }
3050 
3051 /* do_getsockname() Must return target values and target errnos. */
3052 static abi_long do_getsockname(int fd, abi_ulong target_addr,
3053                                abi_ulong target_addrlen_addr)
3054 {
3055     socklen_t addrlen, ret_addrlen;
3056     void *addr;
3057     abi_long ret;
3058 
3059     if (get_user_u32(addrlen, target_addrlen_addr))
3060         return -TARGET_EFAULT;
3061 
3062     if ((int)addrlen < 0) {
3063         return -TARGET_EINVAL;
3064     }
3065 
3066     if (!access_ok(VERIFY_WRITE, target_addr, addrlen))
3067         return -TARGET_EFAULT;
3068 
3069     addr = alloca(addrlen);
3070 
3071     ret_addrlen = addrlen;
3072     ret = get_errno(getsockname(fd, addr, &ret_addrlen));
3073     if (!is_error(ret)) {
3074         host_to_target_sockaddr(target_addr, addr, MIN(addrlen, ret_addrlen));
3075         if (put_user_u32(ret_addrlen, target_addrlen_addr)) {
3076             ret = -TARGET_EFAULT;
3077         }
3078     }
3079     return ret;
3080 }
3081 
3082 /* do_socketpair() Must return target values and target errnos. */
3083 static abi_long do_socketpair(int domain, int type, int protocol,
3084                               abi_ulong target_tab_addr)
3085 {
3086     int tab[2];
3087     abi_long ret;
3088 
3089     target_to_host_sock_type(&type);
3090 
3091     ret = get_errno(socketpair(domain, type, protocol, tab));
3092     if (!is_error(ret)) {
3093         if (put_user_s32(tab[0], target_tab_addr)
3094             || put_user_s32(tab[1], target_tab_addr + sizeof(tab[0])))
3095             ret = -TARGET_EFAULT;
3096     }
3097     return ret;
3098 }
3099 
3100 /* do_sendto() Must return target values and target errnos. */
3101 static abi_long do_sendto(int fd, abi_ulong msg, size_t len, int flags,
3102                           abi_ulong target_addr, socklen_t addrlen)
3103 {
3104     void *addr;
3105     void *host_msg;
3106     void *copy_msg = NULL;
3107     abi_long ret;
3108 
3109     if ((int)addrlen < 0) {
3110         return -TARGET_EINVAL;
3111     }
3112 
3113     host_msg = lock_user(VERIFY_READ, msg, len, 1);
3114     if (!host_msg)
3115         return -TARGET_EFAULT;
3116     if (fd_trans_target_to_host_data(fd)) {
3117         copy_msg = host_msg;
3118         host_msg = g_malloc(len);
3119         memcpy(host_msg, copy_msg, len);
3120         ret = fd_trans_target_to_host_data(fd)(host_msg, len);
3121         if (ret < 0) {
3122             goto fail;
3123         }
3124     }
3125     if (target_addr) {
3126         addr = alloca(addrlen+1);
3127         ret = target_to_host_sockaddr(fd, addr, target_addr, addrlen);
3128         if (ret) {
3129             goto fail;
3130         }
3131         ret = get_errno(safe_sendto(fd, host_msg, len, flags, addr, addrlen));
3132     } else {
3133         ret = get_errno(safe_sendto(fd, host_msg, len, flags, NULL, 0));
3134     }
3135 fail:
3136     if (copy_msg) {
3137         g_free(host_msg);
3138         host_msg = copy_msg;
3139     }
3140     unlock_user(host_msg, msg, 0);
3141     return ret;
3142 }
3143 
3144 /* do_recvfrom() Must return target values and target errnos. */
3145 static abi_long do_recvfrom(int fd, abi_ulong msg, size_t len, int flags,
3146                             abi_ulong target_addr,
3147                             abi_ulong target_addrlen)
3148 {
3149     socklen_t addrlen, ret_addrlen;
3150     void *addr;
3151     void *host_msg;
3152     abi_long ret;
3153 
3154     host_msg = lock_user(VERIFY_WRITE, msg, len, 0);
3155     if (!host_msg)
3156         return -TARGET_EFAULT;
3157     if (target_addr) {
3158         if (get_user_u32(addrlen, target_addrlen)) {
3159             ret = -TARGET_EFAULT;
3160             goto fail;
3161         }
3162         if ((int)addrlen < 0) {
3163             ret = -TARGET_EINVAL;
3164             goto fail;
3165         }
3166         addr = alloca(addrlen);
3167         ret_addrlen = addrlen;
3168         ret = get_errno(safe_recvfrom(fd, host_msg, len, flags,
3169                                       addr, &ret_addrlen));
3170     } else {
3171         addr = NULL; /* To keep compiler quiet.  */
3172         addrlen = 0; /* To keep compiler quiet.  */
3173         ret = get_errno(safe_recvfrom(fd, host_msg, len, flags, NULL, 0));
3174     }
3175     if (!is_error(ret)) {
3176         if (fd_trans_host_to_target_data(fd)) {
3177             abi_long trans;
3178             trans = fd_trans_host_to_target_data(fd)(host_msg, MIN(ret, len));
3179             if (is_error(trans)) {
3180                 ret = trans;
3181                 goto fail;
3182             }
3183         }
3184         if (target_addr) {
3185             host_to_target_sockaddr(target_addr, addr,
3186                                     MIN(addrlen, ret_addrlen));
3187             if (put_user_u32(ret_addrlen, target_addrlen)) {
3188                 ret = -TARGET_EFAULT;
3189                 goto fail;
3190             }
3191         }
3192         unlock_user(host_msg, msg, len);
3193     } else {
3194 fail:
3195         unlock_user(host_msg, msg, 0);
3196     }
3197     return ret;
3198 }
3199 
3200 #ifdef TARGET_NR_socketcall
3201 /* do_socketcall() must return target values and target errnos. */
3202 static abi_long do_socketcall(int num, abi_ulong vptr)
3203 {
3204     static const unsigned nargs[] = { /* number of arguments per operation */
3205         [TARGET_SYS_SOCKET] = 3,      /* domain, type, protocol */
3206         [TARGET_SYS_BIND] = 3,        /* fd, addr, addrlen */
3207         [TARGET_SYS_CONNECT] = 3,     /* fd, addr, addrlen */
3208         [TARGET_SYS_LISTEN] = 2,      /* fd, backlog */
3209         [TARGET_SYS_ACCEPT] = 3,      /* fd, addr, addrlen */
3210         [TARGET_SYS_GETSOCKNAME] = 3, /* fd, addr, addrlen */
3211         [TARGET_SYS_GETPEERNAME] = 3, /* fd, addr, addrlen */
3212         [TARGET_SYS_SOCKETPAIR] = 4,  /* domain, type, protocol, tab */
3213         [TARGET_SYS_SEND] = 4,        /* fd, msg, len, flags */
3214         [TARGET_SYS_RECV] = 4,        /* fd, msg, len, flags */
3215         [TARGET_SYS_SENDTO] = 6,      /* fd, msg, len, flags, addr, addrlen */
3216         [TARGET_SYS_RECVFROM] = 6,    /* fd, msg, len, flags, addr, addrlen */
3217         [TARGET_SYS_SHUTDOWN] = 2,    /* fd, how */
3218         [TARGET_SYS_SETSOCKOPT] = 5,  /* fd, level, optname, optval, optlen */
3219         [TARGET_SYS_GETSOCKOPT] = 5,  /* fd, level, optname, optval, optlen */
3220         [TARGET_SYS_SENDMSG] = 3,     /* fd, msg, flags */
3221         [TARGET_SYS_RECVMSG] = 3,     /* fd, msg, flags */
3222         [TARGET_SYS_ACCEPT4] = 4,     /* fd, addr, addrlen, flags */
3223         [TARGET_SYS_RECVMMSG] = 4,    /* fd, msgvec, vlen, flags */
3224         [TARGET_SYS_SENDMMSG] = 4,    /* fd, msgvec, vlen, flags */
3225     };
3226     abi_long a[6]; /* max 6 args */
3227     unsigned i;
3228 
3229     /* check the range of the first argument num */
3230     /* (TARGET_SYS_SENDMMSG is the highest among TARGET_SYS_xxx) */
3231     if (num < 1 || num > TARGET_SYS_SENDMMSG) {
3232         return -TARGET_EINVAL;
3233     }
3234     /* ensure we have space for args */
3235     if (nargs[num] > ARRAY_SIZE(a)) {
3236         return -TARGET_EINVAL;
3237     }
3238     /* collect the arguments in a[] according to nargs[] */
3239     for (i = 0; i < nargs[num]; ++i) {
3240         if (get_user_ual(a[i], vptr + i * sizeof(abi_long)) != 0) {
3241             return -TARGET_EFAULT;
3242         }
3243     }
3244     /* now when we have the args, invoke the appropriate underlying function */
3245     switch (num) {
3246     case TARGET_SYS_SOCKET: /* domain, type, protocol */
3247         return do_socket(a[0], a[1], a[2]);
3248     case TARGET_SYS_BIND: /* sockfd, addr, addrlen */
3249         return do_bind(a[0], a[1], a[2]);
3250     case TARGET_SYS_CONNECT: /* sockfd, addr, addrlen */
3251         return do_connect(a[0], a[1], a[2]);
3252     case TARGET_SYS_LISTEN: /* sockfd, backlog */
3253         return get_errno(listen(a[0], a[1]));
3254     case TARGET_SYS_ACCEPT: /* sockfd, addr, addrlen */
3255         return do_accept4(a[0], a[1], a[2], 0);
3256     case TARGET_SYS_GETSOCKNAME: /* sockfd, addr, addrlen */
3257         return do_getsockname(a[0], a[1], a[2]);
3258     case TARGET_SYS_GETPEERNAME: /* sockfd, addr, addrlen */
3259         return do_getpeername(a[0], a[1], a[2]);
3260     case TARGET_SYS_SOCKETPAIR: /* domain, type, protocol, tab */
3261         return do_socketpair(a[0], a[1], a[2], a[3]);
3262     case TARGET_SYS_SEND: /* sockfd, msg, len, flags */
3263         return do_sendto(a[0], a[1], a[2], a[3], 0, 0);
3264     case TARGET_SYS_RECV: /* sockfd, msg, len, flags */
3265         return do_recvfrom(a[0], a[1], a[2], a[3], 0, 0);
3266     case TARGET_SYS_SENDTO: /* sockfd, msg, len, flags, addr, addrlen */
3267         return do_sendto(a[0], a[1], a[2], a[3], a[4], a[5]);
3268     case TARGET_SYS_RECVFROM: /* sockfd, msg, len, flags, addr, addrlen */
3269         return do_recvfrom(a[0], a[1], a[2], a[3], a[4], a[5]);
3270     case TARGET_SYS_SHUTDOWN: /* sockfd, how */
3271         return get_errno(shutdown(a[0], a[1]));
3272     case TARGET_SYS_SETSOCKOPT: /* sockfd, level, optname, optval, optlen */
3273         return do_setsockopt(a[0], a[1], a[2], a[3], a[4]);
3274     case TARGET_SYS_GETSOCKOPT: /* sockfd, level, optname, optval, optlen */
3275         return do_getsockopt(a[0], a[1], a[2], a[3], a[4]);
3276     case TARGET_SYS_SENDMSG: /* sockfd, msg, flags */
3277         return do_sendrecvmsg(a[0], a[1], a[2], 1);
3278     case TARGET_SYS_RECVMSG: /* sockfd, msg, flags */
3279         return do_sendrecvmsg(a[0], a[1], a[2], 0);
3280     case TARGET_SYS_ACCEPT4: /* sockfd, addr, addrlen, flags */
3281         return do_accept4(a[0], a[1], a[2], a[3]);
3282     case TARGET_SYS_RECVMMSG: /* sockfd, msgvec, vlen, flags */
3283         return do_sendrecvmmsg(a[0], a[1], a[2], a[3], 0);
3284     case TARGET_SYS_SENDMMSG: /* sockfd, msgvec, vlen, flags */
3285         return do_sendrecvmmsg(a[0], a[1], a[2], a[3], 1);
3286     default:
3287         gemu_log("Unsupported socketcall: %d\n", num);
3288         return -TARGET_EINVAL;
3289     }
3290 }
3291 #endif
3292 
3293 #define N_SHM_REGIONS	32
3294 
3295 static struct shm_region {
3296     abi_ulong start;
3297     abi_ulong size;
3298     bool in_use;
3299 } shm_regions[N_SHM_REGIONS];
3300 
3301 #ifndef TARGET_SEMID64_DS
3302 /* asm-generic version of this struct */
3303 struct target_semid64_ds
3304 {
3305   struct target_ipc_perm sem_perm;
3306   abi_ulong sem_otime;
3307 #if TARGET_ABI_BITS == 32
3308   abi_ulong __unused1;
3309 #endif
3310   abi_ulong sem_ctime;
3311 #if TARGET_ABI_BITS == 32
3312   abi_ulong __unused2;
3313 #endif
3314   abi_ulong sem_nsems;
3315   abi_ulong __unused3;
3316   abi_ulong __unused4;
3317 };
3318 #endif
3319 
3320 static inline abi_long target_to_host_ipc_perm(struct ipc_perm *host_ip,
3321                                                abi_ulong target_addr)
3322 {
3323     struct target_ipc_perm *target_ip;
3324     struct target_semid64_ds *target_sd;
3325 
3326     if (!lock_user_struct(VERIFY_READ, target_sd, target_addr, 1))
3327         return -TARGET_EFAULT;
3328     target_ip = &(target_sd->sem_perm);
3329     host_ip->__key = tswap32(target_ip->__key);
3330     host_ip->uid = tswap32(target_ip->uid);
3331     host_ip->gid = tswap32(target_ip->gid);
3332     host_ip->cuid = tswap32(target_ip->cuid);
3333     host_ip->cgid = tswap32(target_ip->cgid);
3334 #if defined(TARGET_ALPHA) || defined(TARGET_MIPS) || defined(TARGET_PPC)
3335     host_ip->mode = tswap32(target_ip->mode);
3336 #else
3337     host_ip->mode = tswap16(target_ip->mode);
3338 #endif
3339 #if defined(TARGET_PPC)
3340     host_ip->__seq = tswap32(target_ip->__seq);
3341 #else
3342     host_ip->__seq = tswap16(target_ip->__seq);
3343 #endif
3344     unlock_user_struct(target_sd, target_addr, 0);
3345     return 0;
3346 }
3347 
3348 static inline abi_long host_to_target_ipc_perm(abi_ulong target_addr,
3349                                                struct ipc_perm *host_ip)
3350 {
3351     struct target_ipc_perm *target_ip;
3352     struct target_semid64_ds *target_sd;
3353 
3354     if (!lock_user_struct(VERIFY_WRITE, target_sd, target_addr, 0))
3355         return -TARGET_EFAULT;
3356     target_ip = &(target_sd->sem_perm);
3357     target_ip->__key = tswap32(host_ip->__key);
3358     target_ip->uid = tswap32(host_ip->uid);
3359     target_ip->gid = tswap32(host_ip->gid);
3360     target_ip->cuid = tswap32(host_ip->cuid);
3361     target_ip->cgid = tswap32(host_ip->cgid);
3362 #if defined(TARGET_ALPHA) || defined(TARGET_MIPS) || defined(TARGET_PPC)
3363     target_ip->mode = tswap32(host_ip->mode);
3364 #else
3365     target_ip->mode = tswap16(host_ip->mode);
3366 #endif
3367 #if defined(TARGET_PPC)
3368     target_ip->__seq = tswap32(host_ip->__seq);
3369 #else
3370     target_ip->__seq = tswap16(host_ip->__seq);
3371 #endif
3372     unlock_user_struct(target_sd, target_addr, 1);
3373     return 0;
3374 }
3375 
3376 static inline abi_long target_to_host_semid_ds(struct semid_ds *host_sd,
3377                                                abi_ulong target_addr)
3378 {
3379     struct target_semid64_ds *target_sd;
3380 
3381     if (!lock_user_struct(VERIFY_READ, target_sd, target_addr, 1))
3382         return -TARGET_EFAULT;
3383     if (target_to_host_ipc_perm(&(host_sd->sem_perm),target_addr))
3384         return -TARGET_EFAULT;
3385     host_sd->sem_nsems = tswapal(target_sd->sem_nsems);
3386     host_sd->sem_otime = tswapal(target_sd->sem_otime);
3387     host_sd->sem_ctime = tswapal(target_sd->sem_ctime);
3388     unlock_user_struct(target_sd, target_addr, 0);
3389     return 0;
3390 }
3391 
3392 static inline abi_long host_to_target_semid_ds(abi_ulong target_addr,
3393                                                struct semid_ds *host_sd)
3394 {
3395     struct target_semid64_ds *target_sd;
3396 
3397     if (!lock_user_struct(VERIFY_WRITE, target_sd, target_addr, 0))
3398         return -TARGET_EFAULT;
3399     if (host_to_target_ipc_perm(target_addr,&(host_sd->sem_perm)))
3400         return -TARGET_EFAULT;
3401     target_sd->sem_nsems = tswapal(host_sd->sem_nsems);
3402     target_sd->sem_otime = tswapal(host_sd->sem_otime);
3403     target_sd->sem_ctime = tswapal(host_sd->sem_ctime);
3404     unlock_user_struct(target_sd, target_addr, 1);
3405     return 0;
3406 }
3407 
3408 struct target_seminfo {
3409     int semmap;
3410     int semmni;
3411     int semmns;
3412     int semmnu;
3413     int semmsl;
3414     int semopm;
3415     int semume;
3416     int semusz;
3417     int semvmx;
3418     int semaem;
3419 };
3420 
3421 static inline abi_long host_to_target_seminfo(abi_ulong target_addr,
3422                                               struct seminfo *host_seminfo)
3423 {
3424     struct target_seminfo *target_seminfo;
3425     if (!lock_user_struct(VERIFY_WRITE, target_seminfo, target_addr, 0))
3426         return -TARGET_EFAULT;
3427     __put_user(host_seminfo->semmap, &target_seminfo->semmap);
3428     __put_user(host_seminfo->semmni, &target_seminfo->semmni);
3429     __put_user(host_seminfo->semmns, &target_seminfo->semmns);
3430     __put_user(host_seminfo->semmnu, &target_seminfo->semmnu);
3431     __put_user(host_seminfo->semmsl, &target_seminfo->semmsl);
3432     __put_user(host_seminfo->semopm, &target_seminfo->semopm);
3433     __put_user(host_seminfo->semume, &target_seminfo->semume);
3434     __put_user(host_seminfo->semusz, &target_seminfo->semusz);
3435     __put_user(host_seminfo->semvmx, &target_seminfo->semvmx);
3436     __put_user(host_seminfo->semaem, &target_seminfo->semaem);
3437     unlock_user_struct(target_seminfo, target_addr, 1);
3438     return 0;
3439 }
3440 
3441 union semun {
3442 	int val;
3443 	struct semid_ds *buf;
3444 	unsigned short *array;
3445 	struct seminfo *__buf;
3446 };
3447 
3448 union target_semun {
3449 	int val;
3450 	abi_ulong buf;
3451 	abi_ulong array;
3452 	abi_ulong __buf;
3453 };
3454 
3455 static inline abi_long target_to_host_semarray(int semid, unsigned short **host_array,
3456                                                abi_ulong target_addr)
3457 {
3458     int nsems;
3459     unsigned short *array;
3460     union semun semun;
3461     struct semid_ds semid_ds;
3462     int i, ret;
3463 
3464     semun.buf = &semid_ds;
3465 
3466     ret = semctl(semid, 0, IPC_STAT, semun);
3467     if (ret == -1)
3468         return get_errno(ret);
3469 
3470     nsems = semid_ds.sem_nsems;
3471 
3472     *host_array = g_try_new(unsigned short, nsems);
3473     if (!*host_array) {
3474         return -TARGET_ENOMEM;
3475     }
3476     array = lock_user(VERIFY_READ, target_addr,
3477                       nsems*sizeof(unsigned short), 1);
3478     if (!array) {
3479         g_free(*host_array);
3480         return -TARGET_EFAULT;
3481     }
3482 
3483     for(i=0; i<nsems; i++) {
3484         __get_user((*host_array)[i], &array[i]);
3485     }
3486     unlock_user(array, target_addr, 0);
3487 
3488     return 0;
3489 }
3490 
3491 static inline abi_long host_to_target_semarray(int semid, abi_ulong target_addr,
3492                                                unsigned short **host_array)
3493 {
3494     int nsems;
3495     unsigned short *array;
3496     union semun semun;
3497     struct semid_ds semid_ds;
3498     int i, ret;
3499 
3500     semun.buf = &semid_ds;
3501 
3502     ret = semctl(semid, 0, IPC_STAT, semun);
3503     if (ret == -1)
3504         return get_errno(ret);
3505 
3506     nsems = semid_ds.sem_nsems;
3507 
3508     array = lock_user(VERIFY_WRITE, target_addr,
3509                       nsems*sizeof(unsigned short), 0);
3510     if (!array)
3511         return -TARGET_EFAULT;
3512 
3513     for(i=0; i<nsems; i++) {
3514         __put_user((*host_array)[i], &array[i]);
3515     }
3516     g_free(*host_array);
3517     unlock_user(array, target_addr, 1);
3518 
3519     return 0;
3520 }
3521 
3522 static inline abi_long do_semctl(int semid, int semnum, int cmd,
3523                                  abi_ulong target_arg)
3524 {
3525     union target_semun target_su = { .buf = target_arg };
3526     union semun arg;
3527     struct semid_ds dsarg;
3528     unsigned short *array = NULL;
3529     struct seminfo seminfo;
3530     abi_long ret = -TARGET_EINVAL;
3531     abi_long err;
3532     cmd &= 0xff;
3533 
3534     switch( cmd ) {
3535 	case GETVAL:
3536 	case SETVAL:
3537             /* In 64 bit cross-endian situations, we will erroneously pick up
3538              * the wrong half of the union for the "val" element.  To rectify
3539              * this, the entire 8-byte structure is byteswapped, followed by
3540 	     * a swap of the 4 byte val field. In other cases, the data is
3541 	     * already in proper host byte order. */
3542 	    if (sizeof(target_su.val) != (sizeof(target_su.buf))) {
3543 		target_su.buf = tswapal(target_su.buf);
3544 		arg.val = tswap32(target_su.val);
3545 	    } else {
3546 		arg.val = target_su.val;
3547 	    }
3548             ret = get_errno(semctl(semid, semnum, cmd, arg));
3549             break;
3550 	case GETALL:
3551 	case SETALL:
3552             err = target_to_host_semarray(semid, &array, target_su.array);
3553             if (err)
3554                 return err;
3555             arg.array = array;
3556             ret = get_errno(semctl(semid, semnum, cmd, arg));
3557             err = host_to_target_semarray(semid, target_su.array, &array);
3558             if (err)
3559                 return err;
3560             break;
3561 	case IPC_STAT:
3562 	case IPC_SET:
3563 	case SEM_STAT:
3564             err = target_to_host_semid_ds(&dsarg, target_su.buf);
3565             if (err)
3566                 return err;
3567             arg.buf = &dsarg;
3568             ret = get_errno(semctl(semid, semnum, cmd, arg));
3569             err = host_to_target_semid_ds(target_su.buf, &dsarg);
3570             if (err)
3571                 return err;
3572             break;
3573 	case IPC_INFO:
3574 	case SEM_INFO:
3575             arg.__buf = &seminfo;
3576             ret = get_errno(semctl(semid, semnum, cmd, arg));
3577             err = host_to_target_seminfo(target_su.__buf, &seminfo);
3578             if (err)
3579                 return err;
3580             break;
3581 	case IPC_RMID:
3582 	case GETPID:
3583 	case GETNCNT:
3584 	case GETZCNT:
3585             ret = get_errno(semctl(semid, semnum, cmd, NULL));
3586             break;
3587     }
3588 
3589     return ret;
3590 }
3591 
3592 struct target_sembuf {
3593     unsigned short sem_num;
3594     short sem_op;
3595     short sem_flg;
3596 };
3597 
3598 static inline abi_long target_to_host_sembuf(struct sembuf *host_sembuf,
3599                                              abi_ulong target_addr,
3600                                              unsigned nsops)
3601 {
3602     struct target_sembuf *target_sembuf;
3603     int i;
3604 
3605     target_sembuf = lock_user(VERIFY_READ, target_addr,
3606                               nsops*sizeof(struct target_sembuf), 1);
3607     if (!target_sembuf)
3608         return -TARGET_EFAULT;
3609 
3610     for(i=0; i<nsops; i++) {
3611         __get_user(host_sembuf[i].sem_num, &target_sembuf[i].sem_num);
3612         __get_user(host_sembuf[i].sem_op, &target_sembuf[i].sem_op);
3613         __get_user(host_sembuf[i].sem_flg, &target_sembuf[i].sem_flg);
3614     }
3615 
3616     unlock_user(target_sembuf, target_addr, 0);
3617 
3618     return 0;
3619 }
3620 
3621 static inline abi_long do_semop(int semid, abi_long ptr, unsigned nsops)
3622 {
3623     struct sembuf sops[nsops];
3624     abi_long ret;
3625 
3626     if (target_to_host_sembuf(sops, ptr, nsops))
3627         return -TARGET_EFAULT;
3628 
3629     ret = -TARGET_ENOSYS;
3630 #ifdef __NR_semtimedop
3631     ret = get_errno(safe_semtimedop(semid, sops, nsops, NULL));
3632 #endif
3633 #ifdef __NR_ipc
3634     if (ret == -TARGET_ENOSYS) {
3635         ret = get_errno(safe_ipc(IPCOP_semtimedop, semid, nsops, 0, sops, 0));
3636     }
3637 #endif
3638     return ret;
3639 }
3640 
3641 struct target_msqid_ds
3642 {
3643     struct target_ipc_perm msg_perm;
3644     abi_ulong msg_stime;
3645 #if TARGET_ABI_BITS == 32
3646     abi_ulong __unused1;
3647 #endif
3648     abi_ulong msg_rtime;
3649 #if TARGET_ABI_BITS == 32
3650     abi_ulong __unused2;
3651 #endif
3652     abi_ulong msg_ctime;
3653 #if TARGET_ABI_BITS == 32
3654     abi_ulong __unused3;
3655 #endif
3656     abi_ulong __msg_cbytes;
3657     abi_ulong msg_qnum;
3658     abi_ulong msg_qbytes;
3659     abi_ulong msg_lspid;
3660     abi_ulong msg_lrpid;
3661     abi_ulong __unused4;
3662     abi_ulong __unused5;
3663 };
3664 
3665 static inline abi_long target_to_host_msqid_ds(struct msqid_ds *host_md,
3666                                                abi_ulong target_addr)
3667 {
3668     struct target_msqid_ds *target_md;
3669 
3670     if (!lock_user_struct(VERIFY_READ, target_md, target_addr, 1))
3671         return -TARGET_EFAULT;
3672     if (target_to_host_ipc_perm(&(host_md->msg_perm),target_addr))
3673         return -TARGET_EFAULT;
3674     host_md->msg_stime = tswapal(target_md->msg_stime);
3675     host_md->msg_rtime = tswapal(target_md->msg_rtime);
3676     host_md->msg_ctime = tswapal(target_md->msg_ctime);
3677     host_md->__msg_cbytes = tswapal(target_md->__msg_cbytes);
3678     host_md->msg_qnum = tswapal(target_md->msg_qnum);
3679     host_md->msg_qbytes = tswapal(target_md->msg_qbytes);
3680     host_md->msg_lspid = tswapal(target_md->msg_lspid);
3681     host_md->msg_lrpid = tswapal(target_md->msg_lrpid);
3682     unlock_user_struct(target_md, target_addr, 0);
3683     return 0;
3684 }
3685 
3686 static inline abi_long host_to_target_msqid_ds(abi_ulong target_addr,
3687                                                struct msqid_ds *host_md)
3688 {
3689     struct target_msqid_ds *target_md;
3690 
3691     if (!lock_user_struct(VERIFY_WRITE, target_md, target_addr, 0))
3692         return -TARGET_EFAULT;
3693     if (host_to_target_ipc_perm(target_addr,&(host_md->msg_perm)))
3694         return -TARGET_EFAULT;
3695     target_md->msg_stime = tswapal(host_md->msg_stime);
3696     target_md->msg_rtime = tswapal(host_md->msg_rtime);
3697     target_md->msg_ctime = tswapal(host_md->msg_ctime);
3698     target_md->__msg_cbytes = tswapal(host_md->__msg_cbytes);
3699     target_md->msg_qnum = tswapal(host_md->msg_qnum);
3700     target_md->msg_qbytes = tswapal(host_md->msg_qbytes);
3701     target_md->msg_lspid = tswapal(host_md->msg_lspid);
3702     target_md->msg_lrpid = tswapal(host_md->msg_lrpid);
3703     unlock_user_struct(target_md, target_addr, 1);
3704     return 0;
3705 }
3706 
3707 struct target_msginfo {
3708     int msgpool;
3709     int msgmap;
3710     int msgmax;
3711     int msgmnb;
3712     int msgmni;
3713     int msgssz;
3714     int msgtql;
3715     unsigned short int msgseg;
3716 };
3717 
3718 static inline abi_long host_to_target_msginfo(abi_ulong target_addr,
3719                                               struct msginfo *host_msginfo)
3720 {
3721     struct target_msginfo *target_msginfo;
3722     if (!lock_user_struct(VERIFY_WRITE, target_msginfo, target_addr, 0))
3723         return -TARGET_EFAULT;
3724     __put_user(host_msginfo->msgpool, &target_msginfo->msgpool);
3725     __put_user(host_msginfo->msgmap, &target_msginfo->msgmap);
3726     __put_user(host_msginfo->msgmax, &target_msginfo->msgmax);
3727     __put_user(host_msginfo->msgmnb, &target_msginfo->msgmnb);
3728     __put_user(host_msginfo->msgmni, &target_msginfo->msgmni);
3729     __put_user(host_msginfo->msgssz, &target_msginfo->msgssz);
3730     __put_user(host_msginfo->msgtql, &target_msginfo->msgtql);
3731     __put_user(host_msginfo->msgseg, &target_msginfo->msgseg);
3732     unlock_user_struct(target_msginfo, target_addr, 1);
3733     return 0;
3734 }
3735 
3736 static inline abi_long do_msgctl(int msgid, int cmd, abi_long ptr)
3737 {
3738     struct msqid_ds dsarg;
3739     struct msginfo msginfo;
3740     abi_long ret = -TARGET_EINVAL;
3741 
3742     cmd &= 0xff;
3743 
3744     switch (cmd) {
3745     case IPC_STAT:
3746     case IPC_SET:
3747     case MSG_STAT:
3748         if (target_to_host_msqid_ds(&dsarg,ptr))
3749             return -TARGET_EFAULT;
3750         ret = get_errno(msgctl(msgid, cmd, &dsarg));
3751         if (host_to_target_msqid_ds(ptr,&dsarg))
3752             return -TARGET_EFAULT;
3753         break;
3754     case IPC_RMID:
3755         ret = get_errno(msgctl(msgid, cmd, NULL));
3756         break;
3757     case IPC_INFO:
3758     case MSG_INFO:
3759         ret = get_errno(msgctl(msgid, cmd, (struct msqid_ds *)&msginfo));
3760         if (host_to_target_msginfo(ptr, &msginfo))
3761             return -TARGET_EFAULT;
3762         break;
3763     }
3764 
3765     return ret;
3766 }
3767 
3768 struct target_msgbuf {
3769     abi_long mtype;
3770     char	mtext[1];
3771 };
3772 
3773 static inline abi_long do_msgsnd(int msqid, abi_long msgp,
3774                                  ssize_t msgsz, int msgflg)
3775 {
3776     struct target_msgbuf *target_mb;
3777     struct msgbuf *host_mb;
3778     abi_long ret = 0;
3779 
3780     if (msgsz < 0) {
3781         return -TARGET_EINVAL;
3782     }
3783 
3784     if (!lock_user_struct(VERIFY_READ, target_mb, msgp, 0))
3785         return -TARGET_EFAULT;
3786     host_mb = g_try_malloc(msgsz + sizeof(long));
3787     if (!host_mb) {
3788         unlock_user_struct(target_mb, msgp, 0);
3789         return -TARGET_ENOMEM;
3790     }
3791     host_mb->mtype = (abi_long) tswapal(target_mb->mtype);
3792     memcpy(host_mb->mtext, target_mb->mtext, msgsz);
3793     ret = -TARGET_ENOSYS;
3794 #ifdef __NR_msgsnd
3795     ret = get_errno(safe_msgsnd(msqid, host_mb, msgsz, msgflg));
3796 #endif
3797 #ifdef __NR_ipc
3798     if (ret == -TARGET_ENOSYS) {
3799         ret = get_errno(safe_ipc(IPCOP_msgsnd, msqid, msgsz, msgflg,
3800                                  host_mb, 0));
3801     }
3802 #endif
3803     g_free(host_mb);
3804     unlock_user_struct(target_mb, msgp, 0);
3805 
3806     return ret;
3807 }
3808 
3809 static inline abi_long do_msgrcv(int msqid, abi_long msgp,
3810                                  ssize_t msgsz, abi_long msgtyp,
3811                                  int msgflg)
3812 {
3813     struct target_msgbuf *target_mb;
3814     char *target_mtext;
3815     struct msgbuf *host_mb;
3816     abi_long ret = 0;
3817 
3818     if (msgsz < 0) {
3819         return -TARGET_EINVAL;
3820     }
3821 
3822     if (!lock_user_struct(VERIFY_WRITE, target_mb, msgp, 0))
3823         return -TARGET_EFAULT;
3824 
3825     host_mb = g_try_malloc(msgsz + sizeof(long));
3826     if (!host_mb) {
3827         ret = -TARGET_ENOMEM;
3828         goto end;
3829     }
3830     ret = -TARGET_ENOSYS;
3831 #ifdef __NR_msgrcv
3832     ret = get_errno(safe_msgrcv(msqid, host_mb, msgsz, msgtyp, msgflg));
3833 #endif
3834 #ifdef __NR_ipc
3835     if (ret == -TARGET_ENOSYS) {
3836         ret = get_errno(safe_ipc(IPCOP_CALL(1, IPCOP_msgrcv), msqid, msgsz,
3837                         msgflg, host_mb, msgtyp));
3838     }
3839 #endif
3840 
3841     if (ret > 0) {
3842         abi_ulong target_mtext_addr = msgp + sizeof(abi_ulong);
3843         target_mtext = lock_user(VERIFY_WRITE, target_mtext_addr, ret, 0);
3844         if (!target_mtext) {
3845             ret = -TARGET_EFAULT;
3846             goto end;
3847         }
3848         memcpy(target_mb->mtext, host_mb->mtext, ret);
3849         unlock_user(target_mtext, target_mtext_addr, ret);
3850     }
3851 
3852     target_mb->mtype = tswapal(host_mb->mtype);
3853 
3854 end:
3855     if (target_mb)
3856         unlock_user_struct(target_mb, msgp, 1);
3857     g_free(host_mb);
3858     return ret;
3859 }
3860 
3861 static inline abi_long target_to_host_shmid_ds(struct shmid_ds *host_sd,
3862                                                abi_ulong target_addr)
3863 {
3864     struct target_shmid_ds *target_sd;
3865 
3866     if (!lock_user_struct(VERIFY_READ, target_sd, target_addr, 1))
3867         return -TARGET_EFAULT;
3868     if (target_to_host_ipc_perm(&(host_sd->shm_perm), target_addr))
3869         return -TARGET_EFAULT;
3870     __get_user(host_sd->shm_segsz, &target_sd->shm_segsz);
3871     __get_user(host_sd->shm_atime, &target_sd->shm_atime);
3872     __get_user(host_sd->shm_dtime, &target_sd->shm_dtime);
3873     __get_user(host_sd->shm_ctime, &target_sd->shm_ctime);
3874     __get_user(host_sd->shm_cpid, &target_sd->shm_cpid);
3875     __get_user(host_sd->shm_lpid, &target_sd->shm_lpid);
3876     __get_user(host_sd->shm_nattch, &target_sd->shm_nattch);
3877     unlock_user_struct(target_sd, target_addr, 0);
3878     return 0;
3879 }
3880 
3881 static inline abi_long host_to_target_shmid_ds(abi_ulong target_addr,
3882                                                struct shmid_ds *host_sd)
3883 {
3884     struct target_shmid_ds *target_sd;
3885 
3886     if (!lock_user_struct(VERIFY_WRITE, target_sd, target_addr, 0))
3887         return -TARGET_EFAULT;
3888     if (host_to_target_ipc_perm(target_addr, &(host_sd->shm_perm)))
3889         return -TARGET_EFAULT;
3890     __put_user(host_sd->shm_segsz, &target_sd->shm_segsz);
3891     __put_user(host_sd->shm_atime, &target_sd->shm_atime);
3892     __put_user(host_sd->shm_dtime, &target_sd->shm_dtime);
3893     __put_user(host_sd->shm_ctime, &target_sd->shm_ctime);
3894     __put_user(host_sd->shm_cpid, &target_sd->shm_cpid);
3895     __put_user(host_sd->shm_lpid, &target_sd->shm_lpid);
3896     __put_user(host_sd->shm_nattch, &target_sd->shm_nattch);
3897     unlock_user_struct(target_sd, target_addr, 1);
3898     return 0;
3899 }
3900 
3901 struct  target_shminfo {
3902     abi_ulong shmmax;
3903     abi_ulong shmmin;
3904     abi_ulong shmmni;
3905     abi_ulong shmseg;
3906     abi_ulong shmall;
3907 };
3908 
3909 static inline abi_long host_to_target_shminfo(abi_ulong target_addr,
3910                                               struct shminfo *host_shminfo)
3911 {
3912     struct target_shminfo *target_shminfo;
3913     if (!lock_user_struct(VERIFY_WRITE, target_shminfo, target_addr, 0))
3914         return -TARGET_EFAULT;
3915     __put_user(host_shminfo->shmmax, &target_shminfo->shmmax);
3916     __put_user(host_shminfo->shmmin, &target_shminfo->shmmin);
3917     __put_user(host_shminfo->shmmni, &target_shminfo->shmmni);
3918     __put_user(host_shminfo->shmseg, &target_shminfo->shmseg);
3919     __put_user(host_shminfo->shmall, &target_shminfo->shmall);
3920     unlock_user_struct(target_shminfo, target_addr, 1);
3921     return 0;
3922 }
3923 
3924 struct target_shm_info {
3925     int used_ids;
3926     abi_ulong shm_tot;
3927     abi_ulong shm_rss;
3928     abi_ulong shm_swp;
3929     abi_ulong swap_attempts;
3930     abi_ulong swap_successes;
3931 };
3932 
3933 static inline abi_long host_to_target_shm_info(abi_ulong target_addr,
3934                                                struct shm_info *host_shm_info)
3935 {
3936     struct target_shm_info *target_shm_info;
3937     if (!lock_user_struct(VERIFY_WRITE, target_shm_info, target_addr, 0))
3938         return -TARGET_EFAULT;
3939     __put_user(host_shm_info->used_ids, &target_shm_info->used_ids);
3940     __put_user(host_shm_info->shm_tot, &target_shm_info->shm_tot);
3941     __put_user(host_shm_info->shm_rss, &target_shm_info->shm_rss);
3942     __put_user(host_shm_info->shm_swp, &target_shm_info->shm_swp);
3943     __put_user(host_shm_info->swap_attempts, &target_shm_info->swap_attempts);
3944     __put_user(host_shm_info->swap_successes, &target_shm_info->swap_successes);
3945     unlock_user_struct(target_shm_info, target_addr, 1);
3946     return 0;
3947 }
3948 
3949 static inline abi_long do_shmctl(int shmid, int cmd, abi_long buf)
3950 {
3951     struct shmid_ds dsarg;
3952     struct shminfo shminfo;
3953     struct shm_info shm_info;
3954     abi_long ret = -TARGET_EINVAL;
3955 
3956     cmd &= 0xff;
3957 
3958     switch(cmd) {
3959     case IPC_STAT:
3960     case IPC_SET:
3961     case SHM_STAT:
3962         if (target_to_host_shmid_ds(&dsarg, buf))
3963             return -TARGET_EFAULT;
3964         ret = get_errno(shmctl(shmid, cmd, &dsarg));
3965         if (host_to_target_shmid_ds(buf, &dsarg))
3966             return -TARGET_EFAULT;
3967         break;
3968     case IPC_INFO:
3969         ret = get_errno(shmctl(shmid, cmd, (struct shmid_ds *)&shminfo));
3970         if (host_to_target_shminfo(buf, &shminfo))
3971             return -TARGET_EFAULT;
3972         break;
3973     case SHM_INFO:
3974         ret = get_errno(shmctl(shmid, cmd, (struct shmid_ds *)&shm_info));
3975         if (host_to_target_shm_info(buf, &shm_info))
3976             return -TARGET_EFAULT;
3977         break;
3978     case IPC_RMID:
3979     case SHM_LOCK:
3980     case SHM_UNLOCK:
3981         ret = get_errno(shmctl(shmid, cmd, NULL));
3982         break;
3983     }
3984 
3985     return ret;
3986 }
3987 
3988 #ifndef TARGET_FORCE_SHMLBA
3989 /* For most architectures, SHMLBA is the same as the page size;
3990  * some architectures have larger values, in which case they should
3991  * define TARGET_FORCE_SHMLBA and provide a target_shmlba() function.
3992  * This corresponds to the kernel arch code defining __ARCH_FORCE_SHMLBA
3993  * and defining its own value for SHMLBA.
3994  *
3995  * The kernel also permits SHMLBA to be set by the architecture to a
3996  * value larger than the page size without setting __ARCH_FORCE_SHMLBA;
3997  * this means that addresses are rounded to the large size if
3998  * SHM_RND is set but addresses not aligned to that size are not rejected
3999  * as long as they are at least page-aligned. Since the only architecture
4000  * which uses this is ia64 this code doesn't provide for that oddity.
4001  */
4002 static inline abi_ulong target_shmlba(CPUArchState *cpu_env)
4003 {
4004     return TARGET_PAGE_SIZE;
4005 }
4006 #endif
4007 
4008 static inline abi_ulong do_shmat(CPUArchState *cpu_env,
4009                                  int shmid, abi_ulong shmaddr, int shmflg)
4010 {
4011     abi_long raddr;
4012     void *host_raddr;
4013     struct shmid_ds shm_info;
4014     int i,ret;
4015     abi_ulong shmlba;
4016 
4017     /* find out the length of the shared memory segment */
4018     ret = get_errno(shmctl(shmid, IPC_STAT, &shm_info));
4019     if (is_error(ret)) {
4020         /* can't get length, bail out */
4021         return ret;
4022     }
4023 
4024     shmlba = target_shmlba(cpu_env);
4025 
4026     if (shmaddr & (shmlba - 1)) {
4027         if (shmflg & SHM_RND) {
4028             shmaddr &= ~(shmlba - 1);
4029         } else {
4030             return -TARGET_EINVAL;
4031         }
4032     }
4033     if (!guest_range_valid(shmaddr, shm_info.shm_segsz)) {
4034         return -TARGET_EINVAL;
4035     }
4036 
4037     mmap_lock();
4038 
4039     if (shmaddr)
4040         host_raddr = shmat(shmid, (void *)g2h(shmaddr), shmflg);
4041     else {
4042         abi_ulong mmap_start;
4043 
4044         /* In order to use the host shmat, we need to honor host SHMLBA.  */
4045         mmap_start = mmap_find_vma(0, shm_info.shm_segsz, MAX(SHMLBA, shmlba));
4046 
4047         if (mmap_start == -1) {
4048             errno = ENOMEM;
4049             host_raddr = (void *)-1;
4050         } else
4051             host_raddr = shmat(shmid, g2h(mmap_start), shmflg | SHM_REMAP);
4052     }
4053 
4054     if (host_raddr == (void *)-1) {
4055         mmap_unlock();
4056         return get_errno((long)host_raddr);
4057     }
4058     raddr=h2g((unsigned long)host_raddr);
4059 
4060     page_set_flags(raddr, raddr + shm_info.shm_segsz,
4061                    PAGE_VALID | PAGE_READ |
4062                    ((shmflg & SHM_RDONLY)? 0 : PAGE_WRITE));
4063 
4064     for (i = 0; i < N_SHM_REGIONS; i++) {
4065         if (!shm_regions[i].in_use) {
4066             shm_regions[i].in_use = true;
4067             shm_regions[i].start = raddr;
4068             shm_regions[i].size = shm_info.shm_segsz;
4069             break;
4070         }
4071     }
4072 
4073     mmap_unlock();
4074     return raddr;
4075 
4076 }
4077 
4078 static inline abi_long do_shmdt(abi_ulong shmaddr)
4079 {
4080     int i;
4081     abi_long rv;
4082 
4083     mmap_lock();
4084 
4085     for (i = 0; i < N_SHM_REGIONS; ++i) {
4086         if (shm_regions[i].in_use && shm_regions[i].start == shmaddr) {
4087             shm_regions[i].in_use = false;
4088             page_set_flags(shmaddr, shmaddr + shm_regions[i].size, 0);
4089             break;
4090         }
4091     }
4092     rv = get_errno(shmdt(g2h(shmaddr)));
4093 
4094     mmap_unlock();
4095 
4096     return rv;
4097 }
4098 
4099 #ifdef TARGET_NR_ipc
4100 /* ??? This only works with linear mappings.  */
4101 /* do_ipc() must return target values and target errnos. */
4102 static abi_long do_ipc(CPUArchState *cpu_env,
4103                        unsigned int call, abi_long first,
4104                        abi_long second, abi_long third,
4105                        abi_long ptr, abi_long fifth)
4106 {
4107     int version;
4108     abi_long ret = 0;
4109 
4110     version = call >> 16;
4111     call &= 0xffff;
4112 
4113     switch (call) {
4114     case IPCOP_semop:
4115         ret = do_semop(first, ptr, second);
4116         break;
4117 
4118     case IPCOP_semget:
4119         ret = get_errno(semget(first, second, third));
4120         break;
4121 
4122     case IPCOP_semctl: {
4123         /* The semun argument to semctl is passed by value, so dereference the
4124          * ptr argument. */
4125         abi_ulong atptr;
4126         get_user_ual(atptr, ptr);
4127         ret = do_semctl(first, second, third, atptr);
4128         break;
4129     }
4130 
4131     case IPCOP_msgget:
4132         ret = get_errno(msgget(first, second));
4133         break;
4134 
4135     case IPCOP_msgsnd:
4136         ret = do_msgsnd(first, ptr, second, third);
4137         break;
4138 
4139     case IPCOP_msgctl:
4140         ret = do_msgctl(first, second, ptr);
4141         break;
4142 
4143     case IPCOP_msgrcv:
4144         switch (version) {
4145         case 0:
4146             {
4147                 struct target_ipc_kludge {
4148                     abi_long msgp;
4149                     abi_long msgtyp;
4150                 } *tmp;
4151 
4152                 if (!lock_user_struct(VERIFY_READ, tmp, ptr, 1)) {
4153                     ret = -TARGET_EFAULT;
4154                     break;
4155                 }
4156 
4157                 ret = do_msgrcv(first, tswapal(tmp->msgp), second, tswapal(tmp->msgtyp), third);
4158 
4159                 unlock_user_struct(tmp, ptr, 0);
4160                 break;
4161             }
4162         default:
4163             ret = do_msgrcv(first, ptr, second, fifth, third);
4164         }
4165         break;
4166 
4167     case IPCOP_shmat:
4168         switch (version) {
4169         default:
4170         {
4171             abi_ulong raddr;
4172             raddr = do_shmat(cpu_env, first, ptr, second);
4173             if (is_error(raddr))
4174                 return get_errno(raddr);
4175             if (put_user_ual(raddr, third))
4176                 return -TARGET_EFAULT;
4177             break;
4178         }
4179         case 1:
4180             ret = -TARGET_EINVAL;
4181             break;
4182         }
4183 	break;
4184     case IPCOP_shmdt:
4185         ret = do_shmdt(ptr);
4186 	break;
4187 
4188     case IPCOP_shmget:
4189 	/* IPC_* flag values are the same on all linux platforms */
4190 	ret = get_errno(shmget(first, second, third));
4191 	break;
4192 
4193 	/* IPC_* and SHM_* command values are the same on all linux platforms */
4194     case IPCOP_shmctl:
4195         ret = do_shmctl(first, second, ptr);
4196         break;
4197     default:
4198 	gemu_log("Unsupported ipc call: %d (version %d)\n", call, version);
4199 	ret = -TARGET_ENOSYS;
4200 	break;
4201     }
4202     return ret;
4203 }
4204 #endif
4205 
4206 /* kernel structure types definitions */
4207 
4208 #define STRUCT(name, ...) STRUCT_ ## name,
4209 #define STRUCT_SPECIAL(name) STRUCT_ ## name,
4210 enum {
4211 #include "syscall_types.h"
4212 STRUCT_MAX
4213 };
4214 #undef STRUCT
4215 #undef STRUCT_SPECIAL
4216 
4217 #define STRUCT(name, ...) static const argtype struct_ ## name ## _def[] = {  __VA_ARGS__, TYPE_NULL };
4218 #define STRUCT_SPECIAL(name)
4219 #include "syscall_types.h"
4220 #undef STRUCT
4221 #undef STRUCT_SPECIAL
4222 
4223 typedef struct IOCTLEntry IOCTLEntry;
4224 
4225 typedef abi_long do_ioctl_fn(const IOCTLEntry *ie, uint8_t *buf_temp,
4226                              int fd, int cmd, abi_long arg);
4227 
4228 struct IOCTLEntry {
4229     int target_cmd;
4230     unsigned int host_cmd;
4231     const char *name;
4232     int access;
4233     do_ioctl_fn *do_ioctl;
4234     const argtype arg_type[5];
4235 };
4236 
4237 #define IOC_R 0x0001
4238 #define IOC_W 0x0002
4239 #define IOC_RW (IOC_R | IOC_W)
4240 
4241 #define MAX_STRUCT_SIZE 4096
4242 
4243 #ifdef CONFIG_FIEMAP
4244 /* So fiemap access checks don't overflow on 32 bit systems.
4245  * This is very slightly smaller than the limit imposed by
4246  * the underlying kernel.
4247  */
4248 #define FIEMAP_MAX_EXTENTS ((UINT_MAX - sizeof(struct fiemap))  \
4249                             / sizeof(struct fiemap_extent))
4250 
4251 static abi_long do_ioctl_fs_ioc_fiemap(const IOCTLEntry *ie, uint8_t *buf_temp,
4252                                        int fd, int cmd, abi_long arg)
4253 {
4254     /* The parameter for this ioctl is a struct fiemap followed
4255      * by an array of struct fiemap_extent whose size is set
4256      * in fiemap->fm_extent_count. The array is filled in by the
4257      * ioctl.
4258      */
4259     int target_size_in, target_size_out;
4260     struct fiemap *fm;
4261     const argtype *arg_type = ie->arg_type;
4262     const argtype extent_arg_type[] = { MK_STRUCT(STRUCT_fiemap_extent) };
4263     void *argptr, *p;
4264     abi_long ret;
4265     int i, extent_size = thunk_type_size(extent_arg_type, 0);
4266     uint32_t outbufsz;
4267     int free_fm = 0;
4268 
4269     assert(arg_type[0] == TYPE_PTR);
4270     assert(ie->access == IOC_RW);
4271     arg_type++;
4272     target_size_in = thunk_type_size(arg_type, 0);
4273     argptr = lock_user(VERIFY_READ, arg, target_size_in, 1);
4274     if (!argptr) {
4275         return -TARGET_EFAULT;
4276     }
4277     thunk_convert(buf_temp, argptr, arg_type, THUNK_HOST);
4278     unlock_user(argptr, arg, 0);
4279     fm = (struct fiemap *)buf_temp;
4280     if (fm->fm_extent_count > FIEMAP_MAX_EXTENTS) {
4281         return -TARGET_EINVAL;
4282     }
4283 
4284     outbufsz = sizeof (*fm) +
4285         (sizeof(struct fiemap_extent) * fm->fm_extent_count);
4286 
4287     if (outbufsz > MAX_STRUCT_SIZE) {
4288         /* We can't fit all the extents into the fixed size buffer.
4289          * Allocate one that is large enough and use it instead.
4290          */
4291         fm = g_try_malloc(outbufsz);
4292         if (!fm) {
4293             return -TARGET_ENOMEM;
4294         }
4295         memcpy(fm, buf_temp, sizeof(struct fiemap));
4296         free_fm = 1;
4297     }
4298     ret = get_errno(safe_ioctl(fd, ie->host_cmd, fm));
4299     if (!is_error(ret)) {
4300         target_size_out = target_size_in;
4301         /* An extent_count of 0 means we were only counting the extents
4302          * so there are no structs to copy
4303          */
4304         if (fm->fm_extent_count != 0) {
4305             target_size_out += fm->fm_mapped_extents * extent_size;
4306         }
4307         argptr = lock_user(VERIFY_WRITE, arg, target_size_out, 0);
4308         if (!argptr) {
4309             ret = -TARGET_EFAULT;
4310         } else {
4311             /* Convert the struct fiemap */
4312             thunk_convert(argptr, fm, arg_type, THUNK_TARGET);
4313             if (fm->fm_extent_count != 0) {
4314                 p = argptr + target_size_in;
4315                 /* ...and then all the struct fiemap_extents */
4316                 for (i = 0; i < fm->fm_mapped_extents; i++) {
4317                     thunk_convert(p, &fm->fm_extents[i], extent_arg_type,
4318                                   THUNK_TARGET);
4319                     p += extent_size;
4320                 }
4321             }
4322             unlock_user(argptr, arg, target_size_out);
4323         }
4324     }
4325     if (free_fm) {
4326         g_free(fm);
4327     }
4328     return ret;
4329 }
4330 #endif
4331 
4332 static abi_long do_ioctl_ifconf(const IOCTLEntry *ie, uint8_t *buf_temp,
4333                                 int fd, int cmd, abi_long arg)
4334 {
4335     const argtype *arg_type = ie->arg_type;
4336     int target_size;
4337     void *argptr;
4338     int ret;
4339     struct ifconf *host_ifconf;
4340     uint32_t outbufsz;
4341     const argtype ifreq_arg_type[] = { MK_STRUCT(STRUCT_sockaddr_ifreq) };
4342     int target_ifreq_size;
4343     int nb_ifreq;
4344     int free_buf = 0;
4345     int i;
4346     int target_ifc_len;
4347     abi_long target_ifc_buf;
4348     int host_ifc_len;
4349     char *host_ifc_buf;
4350 
4351     assert(arg_type[0] == TYPE_PTR);
4352     assert(ie->access == IOC_RW);
4353 
4354     arg_type++;
4355     target_size = thunk_type_size(arg_type, 0);
4356 
4357     argptr = lock_user(VERIFY_READ, arg, target_size, 1);
4358     if (!argptr)
4359         return -TARGET_EFAULT;
4360     thunk_convert(buf_temp, argptr, arg_type, THUNK_HOST);
4361     unlock_user(argptr, arg, 0);
4362 
4363     host_ifconf = (struct ifconf *)(unsigned long)buf_temp;
4364     target_ifc_buf = (abi_long)(unsigned long)host_ifconf->ifc_buf;
4365     target_ifreq_size = thunk_type_size(ifreq_arg_type, 0);
4366 
4367     if (target_ifc_buf != 0) {
4368         target_ifc_len = host_ifconf->ifc_len;
4369         nb_ifreq = target_ifc_len / target_ifreq_size;
4370         host_ifc_len = nb_ifreq * sizeof(struct ifreq);
4371 
4372         outbufsz = sizeof(*host_ifconf) + host_ifc_len;
4373         if (outbufsz > MAX_STRUCT_SIZE) {
4374             /*
4375              * We can't fit all the extents into the fixed size buffer.
4376              * Allocate one that is large enough and use it instead.
4377              */
4378             host_ifconf = malloc(outbufsz);
4379             if (!host_ifconf) {
4380                 return -TARGET_ENOMEM;
4381             }
4382             memcpy(host_ifconf, buf_temp, sizeof(*host_ifconf));
4383             free_buf = 1;
4384         }
4385         host_ifc_buf = (char *)host_ifconf + sizeof(*host_ifconf);
4386 
4387         host_ifconf->ifc_len = host_ifc_len;
4388     } else {
4389       host_ifc_buf = NULL;
4390     }
4391     host_ifconf->ifc_buf = host_ifc_buf;
4392 
4393     ret = get_errno(safe_ioctl(fd, ie->host_cmd, host_ifconf));
4394     if (!is_error(ret)) {
4395 	/* convert host ifc_len to target ifc_len */
4396 
4397         nb_ifreq = host_ifconf->ifc_len / sizeof(struct ifreq);
4398         target_ifc_len = nb_ifreq * target_ifreq_size;
4399         host_ifconf->ifc_len = target_ifc_len;
4400 
4401 	/* restore target ifc_buf */
4402 
4403         host_ifconf->ifc_buf = (char *)(unsigned long)target_ifc_buf;
4404 
4405 	/* copy struct ifconf to target user */
4406 
4407         argptr = lock_user(VERIFY_WRITE, arg, target_size, 0);
4408         if (!argptr)
4409             return -TARGET_EFAULT;
4410         thunk_convert(argptr, host_ifconf, arg_type, THUNK_TARGET);
4411         unlock_user(argptr, arg, target_size);
4412 
4413         if (target_ifc_buf != 0) {
4414             /* copy ifreq[] to target user */
4415             argptr = lock_user(VERIFY_WRITE, target_ifc_buf, target_ifc_len, 0);
4416             for (i = 0; i < nb_ifreq ; i++) {
4417                 thunk_convert(argptr + i * target_ifreq_size,
4418                               host_ifc_buf + i * sizeof(struct ifreq),
4419                               ifreq_arg_type, THUNK_TARGET);
4420             }
4421             unlock_user(argptr, target_ifc_buf, target_ifc_len);
4422         }
4423     }
4424 
4425     if (free_buf) {
4426         free(host_ifconf);
4427     }
4428 
4429     return ret;
4430 }
4431 
4432 #if defined(CONFIG_USBFS)
4433 #if HOST_LONG_BITS > 64
4434 #error USBDEVFS thunks do not support >64 bit hosts yet.
4435 #endif
4436 struct live_urb {
4437     uint64_t target_urb_adr;
4438     uint64_t target_buf_adr;
4439     char *target_buf_ptr;
4440     struct usbdevfs_urb host_urb;
4441 };
4442 
4443 static GHashTable *usbdevfs_urb_hashtable(void)
4444 {
4445     static GHashTable *urb_hashtable;
4446 
4447     if (!urb_hashtable) {
4448         urb_hashtable = g_hash_table_new(g_int64_hash, g_int64_equal);
4449     }
4450     return urb_hashtable;
4451 }
4452 
4453 static void urb_hashtable_insert(struct live_urb *urb)
4454 {
4455     GHashTable *urb_hashtable = usbdevfs_urb_hashtable();
4456     g_hash_table_insert(urb_hashtable, urb, urb);
4457 }
4458 
4459 static struct live_urb *urb_hashtable_lookup(uint64_t target_urb_adr)
4460 {
4461     GHashTable *urb_hashtable = usbdevfs_urb_hashtable();
4462     return g_hash_table_lookup(urb_hashtable, &target_urb_adr);
4463 }
4464 
4465 static void urb_hashtable_remove(struct live_urb *urb)
4466 {
4467     GHashTable *urb_hashtable = usbdevfs_urb_hashtable();
4468     g_hash_table_remove(urb_hashtable, urb);
4469 }
4470 
4471 static abi_long
4472 do_ioctl_usbdevfs_reapurb(const IOCTLEntry *ie, uint8_t *buf_temp,
4473                           int fd, int cmd, abi_long arg)
4474 {
4475     const argtype usbfsurb_arg_type[] = { MK_STRUCT(STRUCT_usbdevfs_urb) };
4476     const argtype ptrvoid_arg_type[] = { TYPE_PTRVOID, 0, 0 };
4477     struct live_urb *lurb;
4478     void *argptr;
4479     uint64_t hurb;
4480     int target_size;
4481     uintptr_t target_urb_adr;
4482     abi_long ret;
4483 
4484     target_size = thunk_type_size(usbfsurb_arg_type, THUNK_TARGET);
4485 
4486     memset(buf_temp, 0, sizeof(uint64_t));
4487     ret = get_errno(safe_ioctl(fd, ie->host_cmd, buf_temp));
4488     if (is_error(ret)) {
4489         return ret;
4490     }
4491 
4492     memcpy(&hurb, buf_temp, sizeof(uint64_t));
4493     lurb = (void *)((uintptr_t)hurb - offsetof(struct live_urb, host_urb));
4494     if (!lurb->target_urb_adr) {
4495         return -TARGET_EFAULT;
4496     }
4497     urb_hashtable_remove(lurb);
4498     unlock_user(lurb->target_buf_ptr, lurb->target_buf_adr,
4499         lurb->host_urb.buffer_length);
4500     lurb->target_buf_ptr = NULL;
4501 
4502     /* restore the guest buffer pointer */
4503     lurb->host_urb.buffer = (void *)(uintptr_t)lurb->target_buf_adr;
4504 
4505     /* update the guest urb struct */
4506     argptr = lock_user(VERIFY_WRITE, lurb->target_urb_adr, target_size, 0);
4507     if (!argptr) {
4508         g_free(lurb);
4509         return -TARGET_EFAULT;
4510     }
4511     thunk_convert(argptr, &lurb->host_urb, usbfsurb_arg_type, THUNK_TARGET);
4512     unlock_user(argptr, lurb->target_urb_adr, target_size);
4513 
4514     target_size = thunk_type_size(ptrvoid_arg_type, THUNK_TARGET);
4515     /* write back the urb handle */
4516     argptr = lock_user(VERIFY_WRITE, arg, target_size, 0);
4517     if (!argptr) {
4518         g_free(lurb);
4519         return -TARGET_EFAULT;
4520     }
4521 
4522     /* GHashTable uses 64-bit keys but thunk_convert expects uintptr_t */
4523     target_urb_adr = lurb->target_urb_adr;
4524     thunk_convert(argptr, &target_urb_adr, ptrvoid_arg_type, THUNK_TARGET);
4525     unlock_user(argptr, arg, target_size);
4526 
4527     g_free(lurb);
4528     return ret;
4529 }
4530 
4531 static abi_long
4532 do_ioctl_usbdevfs_discardurb(const IOCTLEntry *ie,
4533                              uint8_t *buf_temp __attribute__((unused)),
4534                              int fd, int cmd, abi_long arg)
4535 {
4536     struct live_urb *lurb;
4537 
4538     /* map target address back to host URB with metadata. */
4539     lurb = urb_hashtable_lookup(arg);
4540     if (!lurb) {
4541         return -TARGET_EFAULT;
4542     }
4543     return get_errno(safe_ioctl(fd, ie->host_cmd, &lurb->host_urb));
4544 }
4545 
4546 static abi_long
4547 do_ioctl_usbdevfs_submiturb(const IOCTLEntry *ie, uint8_t *buf_temp,
4548                             int fd, int cmd, abi_long arg)
4549 {
4550     const argtype *arg_type = ie->arg_type;
4551     int target_size;
4552     abi_long ret;
4553     void *argptr;
4554     int rw_dir;
4555     struct live_urb *lurb;
4556 
4557     /*
4558      * each submitted URB needs to map to a unique ID for the
4559      * kernel, and that unique ID needs to be a pointer to
4560      * host memory.  hence, we need to malloc for each URB.
4561      * isochronous transfers have a variable length struct.
4562      */
4563     arg_type++;
4564     target_size = thunk_type_size(arg_type, THUNK_TARGET);
4565 
4566     /* construct host copy of urb and metadata */
4567     lurb = g_try_malloc0(sizeof(struct live_urb));
4568     if (!lurb) {
4569         return -TARGET_ENOMEM;
4570     }
4571 
4572     argptr = lock_user(VERIFY_READ, arg, target_size, 1);
4573     if (!argptr) {
4574         g_free(lurb);
4575         return -TARGET_EFAULT;
4576     }
4577     thunk_convert(&lurb->host_urb, argptr, arg_type, THUNK_HOST);
4578     unlock_user(argptr, arg, 0);
4579 
4580     lurb->target_urb_adr = arg;
4581     lurb->target_buf_adr = (uintptr_t)lurb->host_urb.buffer;
4582 
4583     /* buffer space used depends on endpoint type so lock the entire buffer */
4584     /* control type urbs should check the buffer contents for true direction */
4585     rw_dir = lurb->host_urb.endpoint & USB_DIR_IN ? VERIFY_WRITE : VERIFY_READ;
4586     lurb->target_buf_ptr = lock_user(rw_dir, lurb->target_buf_adr,
4587         lurb->host_urb.buffer_length, 1);
4588     if (lurb->target_buf_ptr == NULL) {
4589         g_free(lurb);
4590         return -TARGET_EFAULT;
4591     }
4592 
4593     /* update buffer pointer in host copy */
4594     lurb->host_urb.buffer = lurb->target_buf_ptr;
4595 
4596     ret = get_errno(safe_ioctl(fd, ie->host_cmd, &lurb->host_urb));
4597     if (is_error(ret)) {
4598         unlock_user(lurb->target_buf_ptr, lurb->target_buf_adr, 0);
4599         g_free(lurb);
4600     } else {
4601         urb_hashtable_insert(lurb);
4602     }
4603 
4604     return ret;
4605 }
4606 #endif /* CONFIG_USBFS */
4607 
4608 static abi_long do_ioctl_dm(const IOCTLEntry *ie, uint8_t *buf_temp, int fd,
4609                             int cmd, abi_long arg)
4610 {
4611     void *argptr;
4612     struct dm_ioctl *host_dm;
4613     abi_long guest_data;
4614     uint32_t guest_data_size;
4615     int target_size;
4616     const argtype *arg_type = ie->arg_type;
4617     abi_long ret;
4618     void *big_buf = NULL;
4619     char *host_data;
4620 
4621     arg_type++;
4622     target_size = thunk_type_size(arg_type, 0);
4623     argptr = lock_user(VERIFY_READ, arg, target_size, 1);
4624     if (!argptr) {
4625         ret = -TARGET_EFAULT;
4626         goto out;
4627     }
4628     thunk_convert(buf_temp, argptr, arg_type, THUNK_HOST);
4629     unlock_user(argptr, arg, 0);
4630 
4631     /* buf_temp is too small, so fetch things into a bigger buffer */
4632     big_buf = g_malloc0(((struct dm_ioctl*)buf_temp)->data_size * 2);
4633     memcpy(big_buf, buf_temp, target_size);
4634     buf_temp = big_buf;
4635     host_dm = big_buf;
4636 
4637     guest_data = arg + host_dm->data_start;
4638     if ((guest_data - arg) < 0) {
4639         ret = -TARGET_EINVAL;
4640         goto out;
4641     }
4642     guest_data_size = host_dm->data_size - host_dm->data_start;
4643     host_data = (char*)host_dm + host_dm->data_start;
4644 
4645     argptr = lock_user(VERIFY_READ, guest_data, guest_data_size, 1);
4646     if (!argptr) {
4647         ret = -TARGET_EFAULT;
4648         goto out;
4649     }
4650 
4651     switch (ie->host_cmd) {
4652     case DM_REMOVE_ALL:
4653     case DM_LIST_DEVICES:
4654     case DM_DEV_CREATE:
4655     case DM_DEV_REMOVE:
4656     case DM_DEV_SUSPEND:
4657     case DM_DEV_STATUS:
4658     case DM_DEV_WAIT:
4659     case DM_TABLE_STATUS:
4660     case DM_TABLE_CLEAR:
4661     case DM_TABLE_DEPS:
4662     case DM_LIST_VERSIONS:
4663         /* no input data */
4664         break;
4665     case DM_DEV_RENAME:
4666     case DM_DEV_SET_GEOMETRY:
4667         /* data contains only strings */
4668         memcpy(host_data, argptr, guest_data_size);
4669         break;
4670     case DM_TARGET_MSG:
4671         memcpy(host_data, argptr, guest_data_size);
4672         *(uint64_t*)host_data = tswap64(*(uint64_t*)argptr);
4673         break;
4674     case DM_TABLE_LOAD:
4675     {
4676         void *gspec = argptr;
4677         void *cur_data = host_data;
4678         const argtype arg_type[] = { MK_STRUCT(STRUCT_dm_target_spec) };
4679         int spec_size = thunk_type_size(arg_type, 0);
4680         int i;
4681 
4682         for (i = 0; i < host_dm->target_count; i++) {
4683             struct dm_target_spec *spec = cur_data;
4684             uint32_t next;
4685             int slen;
4686 
4687             thunk_convert(spec, gspec, arg_type, THUNK_HOST);
4688             slen = strlen((char*)gspec + spec_size) + 1;
4689             next = spec->next;
4690             spec->next = sizeof(*spec) + slen;
4691             strcpy((char*)&spec[1], gspec + spec_size);
4692             gspec += next;
4693             cur_data += spec->next;
4694         }
4695         break;
4696     }
4697     default:
4698         ret = -TARGET_EINVAL;
4699         unlock_user(argptr, guest_data, 0);
4700         goto out;
4701     }
4702     unlock_user(argptr, guest_data, 0);
4703 
4704     ret = get_errno(safe_ioctl(fd, ie->host_cmd, buf_temp));
4705     if (!is_error(ret)) {
4706         guest_data = arg + host_dm->data_start;
4707         guest_data_size = host_dm->data_size - host_dm->data_start;
4708         argptr = lock_user(VERIFY_WRITE, guest_data, guest_data_size, 0);
4709         switch (ie->host_cmd) {
4710         case DM_REMOVE_ALL:
4711         case DM_DEV_CREATE:
4712         case DM_DEV_REMOVE:
4713         case DM_DEV_RENAME:
4714         case DM_DEV_SUSPEND:
4715         case DM_DEV_STATUS:
4716         case DM_TABLE_LOAD:
4717         case DM_TABLE_CLEAR:
4718         case DM_TARGET_MSG:
4719         case DM_DEV_SET_GEOMETRY:
4720             /* no return data */
4721             break;
4722         case DM_LIST_DEVICES:
4723         {
4724             struct dm_name_list *nl = (void*)host_dm + host_dm->data_start;
4725             uint32_t remaining_data = guest_data_size;
4726             void *cur_data = argptr;
4727             const argtype arg_type[] = { MK_STRUCT(STRUCT_dm_name_list) };
4728             int nl_size = 12; /* can't use thunk_size due to alignment */
4729 
4730             while (1) {
4731                 uint32_t next = nl->next;
4732                 if (next) {
4733                     nl->next = nl_size + (strlen(nl->name) + 1);
4734                 }
4735                 if (remaining_data < nl->next) {
4736                     host_dm->flags |= DM_BUFFER_FULL_FLAG;
4737                     break;
4738                 }
4739                 thunk_convert(cur_data, nl, arg_type, THUNK_TARGET);
4740                 strcpy(cur_data + nl_size, nl->name);
4741                 cur_data += nl->next;
4742                 remaining_data -= nl->next;
4743                 if (!next) {
4744                     break;
4745                 }
4746                 nl = (void*)nl + next;
4747             }
4748             break;
4749         }
4750         case DM_DEV_WAIT:
4751         case DM_TABLE_STATUS:
4752         {
4753             struct dm_target_spec *spec = (void*)host_dm + host_dm->data_start;
4754             void *cur_data = argptr;
4755             const argtype arg_type[] = { MK_STRUCT(STRUCT_dm_target_spec) };
4756             int spec_size = thunk_type_size(arg_type, 0);
4757             int i;
4758 
4759             for (i = 0; i < host_dm->target_count; i++) {
4760                 uint32_t next = spec->next;
4761                 int slen = strlen((char*)&spec[1]) + 1;
4762                 spec->next = (cur_data - argptr) + spec_size + slen;
4763                 if (guest_data_size < spec->next) {
4764                     host_dm->flags |= DM_BUFFER_FULL_FLAG;
4765                     break;
4766                 }
4767                 thunk_convert(cur_data, spec, arg_type, THUNK_TARGET);
4768                 strcpy(cur_data + spec_size, (char*)&spec[1]);
4769                 cur_data = argptr + spec->next;
4770                 spec = (void*)host_dm + host_dm->data_start + next;
4771             }
4772             break;
4773         }
4774         case DM_TABLE_DEPS:
4775         {
4776             void *hdata = (void*)host_dm + host_dm->data_start;
4777             int count = *(uint32_t*)hdata;
4778             uint64_t *hdev = hdata + 8;
4779             uint64_t *gdev = argptr + 8;
4780             int i;
4781 
4782             *(uint32_t*)argptr = tswap32(count);
4783             for (i = 0; i < count; i++) {
4784                 *gdev = tswap64(*hdev);
4785                 gdev++;
4786                 hdev++;
4787             }
4788             break;
4789         }
4790         case DM_LIST_VERSIONS:
4791         {
4792             struct dm_target_versions *vers = (void*)host_dm + host_dm->data_start;
4793             uint32_t remaining_data = guest_data_size;
4794             void *cur_data = argptr;
4795             const argtype arg_type[] = { MK_STRUCT(STRUCT_dm_target_versions) };
4796             int vers_size = thunk_type_size(arg_type, 0);
4797 
4798             while (1) {
4799                 uint32_t next = vers->next;
4800                 if (next) {
4801                     vers->next = vers_size + (strlen(vers->name) + 1);
4802                 }
4803                 if (remaining_data < vers->next) {
4804                     host_dm->flags |= DM_BUFFER_FULL_FLAG;
4805                     break;
4806                 }
4807                 thunk_convert(cur_data, vers, arg_type, THUNK_TARGET);
4808                 strcpy(cur_data + vers_size, vers->name);
4809                 cur_data += vers->next;
4810                 remaining_data -= vers->next;
4811                 if (!next) {
4812                     break;
4813                 }
4814                 vers = (void*)vers + next;
4815             }
4816             break;
4817         }
4818         default:
4819             unlock_user(argptr, guest_data, 0);
4820             ret = -TARGET_EINVAL;
4821             goto out;
4822         }
4823         unlock_user(argptr, guest_data, guest_data_size);
4824 
4825         argptr = lock_user(VERIFY_WRITE, arg, target_size, 0);
4826         if (!argptr) {
4827             ret = -TARGET_EFAULT;
4828             goto out;
4829         }
4830         thunk_convert(argptr, buf_temp, arg_type, THUNK_TARGET);
4831         unlock_user(argptr, arg, target_size);
4832     }
4833 out:
4834     g_free(big_buf);
4835     return ret;
4836 }
4837 
4838 static abi_long do_ioctl_blkpg(const IOCTLEntry *ie, uint8_t *buf_temp, int fd,
4839                                int cmd, abi_long arg)
4840 {
4841     void *argptr;
4842     int target_size;
4843     const argtype *arg_type = ie->arg_type;
4844     const argtype part_arg_type[] = { MK_STRUCT(STRUCT_blkpg_partition) };
4845     abi_long ret;
4846 
4847     struct blkpg_ioctl_arg *host_blkpg = (void*)buf_temp;
4848     struct blkpg_partition host_part;
4849 
4850     /* Read and convert blkpg */
4851     arg_type++;
4852     target_size = thunk_type_size(arg_type, 0);
4853     argptr = lock_user(VERIFY_READ, arg, target_size, 1);
4854     if (!argptr) {
4855         ret = -TARGET_EFAULT;
4856         goto out;
4857     }
4858     thunk_convert(buf_temp, argptr, arg_type, THUNK_HOST);
4859     unlock_user(argptr, arg, 0);
4860 
4861     switch (host_blkpg->op) {
4862     case BLKPG_ADD_PARTITION:
4863     case BLKPG_DEL_PARTITION:
4864         /* payload is struct blkpg_partition */
4865         break;
4866     default:
4867         /* Unknown opcode */
4868         ret = -TARGET_EINVAL;
4869         goto out;
4870     }
4871 
4872     /* Read and convert blkpg->data */
4873     arg = (abi_long)(uintptr_t)host_blkpg->data;
4874     target_size = thunk_type_size(part_arg_type, 0);
4875     argptr = lock_user(VERIFY_READ, arg, target_size, 1);
4876     if (!argptr) {
4877         ret = -TARGET_EFAULT;
4878         goto out;
4879     }
4880     thunk_convert(&host_part, argptr, part_arg_type, THUNK_HOST);
4881     unlock_user(argptr, arg, 0);
4882 
4883     /* Swizzle the data pointer to our local copy and call! */
4884     host_blkpg->data = &host_part;
4885     ret = get_errno(safe_ioctl(fd, ie->host_cmd, host_blkpg));
4886 
4887 out:
4888     return ret;
4889 }
4890 
4891 static abi_long do_ioctl_rt(const IOCTLEntry *ie, uint8_t *buf_temp,
4892                                 int fd, int cmd, abi_long arg)
4893 {
4894     const argtype *arg_type = ie->arg_type;
4895     const StructEntry *se;
4896     const argtype *field_types;
4897     const int *dst_offsets, *src_offsets;
4898     int target_size;
4899     void *argptr;
4900     abi_ulong *target_rt_dev_ptr = NULL;
4901     unsigned long *host_rt_dev_ptr = NULL;
4902     abi_long ret;
4903     int i;
4904 
4905     assert(ie->access == IOC_W);
4906     assert(*arg_type == TYPE_PTR);
4907     arg_type++;
4908     assert(*arg_type == TYPE_STRUCT);
4909     target_size = thunk_type_size(arg_type, 0);
4910     argptr = lock_user(VERIFY_READ, arg, target_size, 1);
4911     if (!argptr) {
4912         return -TARGET_EFAULT;
4913     }
4914     arg_type++;
4915     assert(*arg_type == (int)STRUCT_rtentry);
4916     se = struct_entries + *arg_type++;
4917     assert(se->convert[0] == NULL);
4918     /* convert struct here to be able to catch rt_dev string */
4919     field_types = se->field_types;
4920     dst_offsets = se->field_offsets[THUNK_HOST];
4921     src_offsets = se->field_offsets[THUNK_TARGET];
4922     for (i = 0; i < se->nb_fields; i++) {
4923         if (dst_offsets[i] == offsetof(struct rtentry, rt_dev)) {
4924             assert(*field_types == TYPE_PTRVOID);
4925             target_rt_dev_ptr = (abi_ulong *)(argptr + src_offsets[i]);
4926             host_rt_dev_ptr = (unsigned long *)(buf_temp + dst_offsets[i]);
4927             if (*target_rt_dev_ptr != 0) {
4928                 *host_rt_dev_ptr = (unsigned long)lock_user_string(
4929                                                   tswapal(*target_rt_dev_ptr));
4930                 if (!*host_rt_dev_ptr) {
4931                     unlock_user(argptr, arg, 0);
4932                     return -TARGET_EFAULT;
4933                 }
4934             } else {
4935                 *host_rt_dev_ptr = 0;
4936             }
4937             field_types++;
4938             continue;
4939         }
4940         field_types = thunk_convert(buf_temp + dst_offsets[i],
4941                                     argptr + src_offsets[i],
4942                                     field_types, THUNK_HOST);
4943     }
4944     unlock_user(argptr, arg, 0);
4945 
4946     ret = get_errno(safe_ioctl(fd, ie->host_cmd, buf_temp));
4947 
4948     assert(host_rt_dev_ptr != NULL);
4949     assert(target_rt_dev_ptr != NULL);
4950     if (*host_rt_dev_ptr != 0) {
4951         unlock_user((void *)*host_rt_dev_ptr,
4952                     *target_rt_dev_ptr, 0);
4953     }
4954     return ret;
4955 }
4956 
4957 static abi_long do_ioctl_kdsigaccept(const IOCTLEntry *ie, uint8_t *buf_temp,
4958                                      int fd, int cmd, abi_long arg)
4959 {
4960     int sig = target_to_host_signal(arg);
4961     return get_errno(safe_ioctl(fd, ie->host_cmd, sig));
4962 }
4963 
4964 static abi_long do_ioctl_SIOCGSTAMP(const IOCTLEntry *ie, uint8_t *buf_temp,
4965                                     int fd, int cmd, abi_long arg)
4966 {
4967     struct timeval tv;
4968     abi_long ret;
4969 
4970     ret = get_errno(safe_ioctl(fd, SIOCGSTAMP, &tv));
4971     if (is_error(ret)) {
4972         return ret;
4973     }
4974 
4975     if (cmd == (int)TARGET_SIOCGSTAMP_OLD) {
4976         if (copy_to_user_timeval(arg, &tv)) {
4977             return -TARGET_EFAULT;
4978         }
4979     } else {
4980         if (copy_to_user_timeval64(arg, &tv)) {
4981             return -TARGET_EFAULT;
4982         }
4983     }
4984 
4985     return ret;
4986 }
4987 
4988 static abi_long do_ioctl_SIOCGSTAMPNS(const IOCTLEntry *ie, uint8_t *buf_temp,
4989                                       int fd, int cmd, abi_long arg)
4990 {
4991     struct timespec ts;
4992     abi_long ret;
4993 
4994     ret = get_errno(safe_ioctl(fd, SIOCGSTAMPNS, &ts));
4995     if (is_error(ret)) {
4996         return ret;
4997     }
4998 
4999     if (cmd == (int)TARGET_SIOCGSTAMPNS_OLD) {
5000         if (host_to_target_timespec(arg, &ts)) {
5001             return -TARGET_EFAULT;
5002         }
5003     } else{
5004         if (host_to_target_timespec64(arg, &ts)) {
5005             return -TARGET_EFAULT;
5006         }
5007     }
5008 
5009     return ret;
5010 }
5011 
5012 #ifdef TIOCGPTPEER
5013 static abi_long do_ioctl_tiocgptpeer(const IOCTLEntry *ie, uint8_t *buf_temp,
5014                                      int fd, int cmd, abi_long arg)
5015 {
5016     int flags = target_to_host_bitmask(arg, fcntl_flags_tbl);
5017     return get_errno(safe_ioctl(fd, ie->host_cmd, flags));
5018 }
5019 #endif
5020 
5021 static IOCTLEntry ioctl_entries[] = {
5022 #define IOCTL(cmd, access, ...) \
5023     { TARGET_ ## cmd, cmd, #cmd, access, 0, {  __VA_ARGS__ } },
5024 #define IOCTL_SPECIAL(cmd, access, dofn, ...)                      \
5025     { TARGET_ ## cmd, cmd, #cmd, access, dofn, {  __VA_ARGS__ } },
5026 #define IOCTL_IGNORE(cmd) \
5027     { TARGET_ ## cmd, 0, #cmd },
5028 #include "ioctls.h"
5029     { 0, 0, },
5030 };
5031 
5032 /* ??? Implement proper locking for ioctls.  */
5033 /* do_ioctl() Must return target values and target errnos. */
5034 static abi_long do_ioctl(int fd, int cmd, abi_long arg)
5035 {
5036     const IOCTLEntry *ie;
5037     const argtype *arg_type;
5038     abi_long ret;
5039     uint8_t buf_temp[MAX_STRUCT_SIZE];
5040     int target_size;
5041     void *argptr;
5042 
5043     ie = ioctl_entries;
5044     for(;;) {
5045         if (ie->target_cmd == 0) {
5046             gemu_log("Unsupported ioctl: cmd=0x%04lx\n", (long)cmd);
5047             return -TARGET_ENOSYS;
5048         }
5049         if (ie->target_cmd == cmd)
5050             break;
5051         ie++;
5052     }
5053     arg_type = ie->arg_type;
5054     if (ie->do_ioctl) {
5055         return ie->do_ioctl(ie, buf_temp, fd, cmd, arg);
5056     } else if (!ie->host_cmd) {
5057         /* Some architectures define BSD ioctls in their headers
5058            that are not implemented in Linux.  */
5059         return -TARGET_ENOSYS;
5060     }
5061 
5062     switch(arg_type[0]) {
5063     case TYPE_NULL:
5064         /* no argument */
5065         ret = get_errno(safe_ioctl(fd, ie->host_cmd));
5066         break;
5067     case TYPE_PTRVOID:
5068     case TYPE_INT:
5069         ret = get_errno(safe_ioctl(fd, ie->host_cmd, arg));
5070         break;
5071     case TYPE_PTR:
5072         arg_type++;
5073         target_size = thunk_type_size(arg_type, 0);
5074         switch(ie->access) {
5075         case IOC_R:
5076             ret = get_errno(safe_ioctl(fd, ie->host_cmd, buf_temp));
5077             if (!is_error(ret)) {
5078                 argptr = lock_user(VERIFY_WRITE, arg, target_size, 0);
5079                 if (!argptr)
5080                     return -TARGET_EFAULT;
5081                 thunk_convert(argptr, buf_temp, arg_type, THUNK_TARGET);
5082                 unlock_user(argptr, arg, target_size);
5083             }
5084             break;
5085         case IOC_W:
5086             argptr = lock_user(VERIFY_READ, arg, target_size, 1);
5087             if (!argptr)
5088                 return -TARGET_EFAULT;
5089             thunk_convert(buf_temp, argptr, arg_type, THUNK_HOST);
5090             unlock_user(argptr, arg, 0);
5091             ret = get_errno(safe_ioctl(fd, ie->host_cmd, buf_temp));
5092             break;
5093         default:
5094         case IOC_RW:
5095             argptr = lock_user(VERIFY_READ, arg, target_size, 1);
5096             if (!argptr)
5097                 return -TARGET_EFAULT;
5098             thunk_convert(buf_temp, argptr, arg_type, THUNK_HOST);
5099             unlock_user(argptr, arg, 0);
5100             ret = get_errno(safe_ioctl(fd, ie->host_cmd, buf_temp));
5101             if (!is_error(ret)) {
5102                 argptr = lock_user(VERIFY_WRITE, arg, target_size, 0);
5103                 if (!argptr)
5104                     return -TARGET_EFAULT;
5105                 thunk_convert(argptr, buf_temp, arg_type, THUNK_TARGET);
5106                 unlock_user(argptr, arg, target_size);
5107             }
5108             break;
5109         }
5110         break;
5111     default:
5112         gemu_log("Unsupported ioctl type: cmd=0x%04lx type=%d\n",
5113                  (long)cmd, arg_type[0]);
5114         ret = -TARGET_ENOSYS;
5115         break;
5116     }
5117     return ret;
5118 }
5119 
5120 static const bitmask_transtbl iflag_tbl[] = {
5121         { TARGET_IGNBRK, TARGET_IGNBRK, IGNBRK, IGNBRK },
5122         { TARGET_BRKINT, TARGET_BRKINT, BRKINT, BRKINT },
5123         { TARGET_IGNPAR, TARGET_IGNPAR, IGNPAR, IGNPAR },
5124         { TARGET_PARMRK, TARGET_PARMRK, PARMRK, PARMRK },
5125         { TARGET_INPCK, TARGET_INPCK, INPCK, INPCK },
5126         { TARGET_ISTRIP, TARGET_ISTRIP, ISTRIP, ISTRIP },
5127         { TARGET_INLCR, TARGET_INLCR, INLCR, INLCR },
5128         { TARGET_IGNCR, TARGET_IGNCR, IGNCR, IGNCR },
5129         { TARGET_ICRNL, TARGET_ICRNL, ICRNL, ICRNL },
5130         { TARGET_IUCLC, TARGET_IUCLC, IUCLC, IUCLC },
5131         { TARGET_IXON, TARGET_IXON, IXON, IXON },
5132         { TARGET_IXANY, TARGET_IXANY, IXANY, IXANY },
5133         { TARGET_IXOFF, TARGET_IXOFF, IXOFF, IXOFF },
5134         { TARGET_IMAXBEL, TARGET_IMAXBEL, IMAXBEL, IMAXBEL },
5135         { 0, 0, 0, 0 }
5136 };
5137 
5138 static const bitmask_transtbl oflag_tbl[] = {
5139 	{ TARGET_OPOST, TARGET_OPOST, OPOST, OPOST },
5140 	{ TARGET_OLCUC, TARGET_OLCUC, OLCUC, OLCUC },
5141 	{ TARGET_ONLCR, TARGET_ONLCR, ONLCR, ONLCR },
5142 	{ TARGET_OCRNL, TARGET_OCRNL, OCRNL, OCRNL },
5143 	{ TARGET_ONOCR, TARGET_ONOCR, ONOCR, ONOCR },
5144 	{ TARGET_ONLRET, TARGET_ONLRET, ONLRET, ONLRET },
5145 	{ TARGET_OFILL, TARGET_OFILL, OFILL, OFILL },
5146 	{ TARGET_OFDEL, TARGET_OFDEL, OFDEL, OFDEL },
5147 	{ TARGET_NLDLY, TARGET_NL0, NLDLY, NL0 },
5148 	{ TARGET_NLDLY, TARGET_NL1, NLDLY, NL1 },
5149 	{ TARGET_CRDLY, TARGET_CR0, CRDLY, CR0 },
5150 	{ TARGET_CRDLY, TARGET_CR1, CRDLY, CR1 },
5151 	{ TARGET_CRDLY, TARGET_CR2, CRDLY, CR2 },
5152 	{ TARGET_CRDLY, TARGET_CR3, CRDLY, CR3 },
5153 	{ TARGET_TABDLY, TARGET_TAB0, TABDLY, TAB0 },
5154 	{ TARGET_TABDLY, TARGET_TAB1, TABDLY, TAB1 },
5155 	{ TARGET_TABDLY, TARGET_TAB2, TABDLY, TAB2 },
5156 	{ TARGET_TABDLY, TARGET_TAB3, TABDLY, TAB3 },
5157 	{ TARGET_BSDLY, TARGET_BS0, BSDLY, BS0 },
5158 	{ TARGET_BSDLY, TARGET_BS1, BSDLY, BS1 },
5159 	{ TARGET_VTDLY, TARGET_VT0, VTDLY, VT0 },
5160 	{ TARGET_VTDLY, TARGET_VT1, VTDLY, VT1 },
5161 	{ TARGET_FFDLY, TARGET_FF0, FFDLY, FF0 },
5162 	{ TARGET_FFDLY, TARGET_FF1, FFDLY, FF1 },
5163 	{ 0, 0, 0, 0 }
5164 };
5165 
5166 static const bitmask_transtbl cflag_tbl[] = {
5167 	{ TARGET_CBAUD, TARGET_B0, CBAUD, B0 },
5168 	{ TARGET_CBAUD, TARGET_B50, CBAUD, B50 },
5169 	{ TARGET_CBAUD, TARGET_B75, CBAUD, B75 },
5170 	{ TARGET_CBAUD, TARGET_B110, CBAUD, B110 },
5171 	{ TARGET_CBAUD, TARGET_B134, CBAUD, B134 },
5172 	{ TARGET_CBAUD, TARGET_B150, CBAUD, B150 },
5173 	{ TARGET_CBAUD, TARGET_B200, CBAUD, B200 },
5174 	{ TARGET_CBAUD, TARGET_B300, CBAUD, B300 },
5175 	{ TARGET_CBAUD, TARGET_B600, CBAUD, B600 },
5176 	{ TARGET_CBAUD, TARGET_B1200, CBAUD, B1200 },
5177 	{ TARGET_CBAUD, TARGET_B1800, CBAUD, B1800 },
5178 	{ TARGET_CBAUD, TARGET_B2400, CBAUD, B2400 },
5179 	{ TARGET_CBAUD, TARGET_B4800, CBAUD, B4800 },
5180 	{ TARGET_CBAUD, TARGET_B9600, CBAUD, B9600 },
5181 	{ TARGET_CBAUD, TARGET_B19200, CBAUD, B19200 },
5182 	{ TARGET_CBAUD, TARGET_B38400, CBAUD, B38400 },
5183 	{ TARGET_CBAUD, TARGET_B57600, CBAUD, B57600 },
5184 	{ TARGET_CBAUD, TARGET_B115200, CBAUD, B115200 },
5185 	{ TARGET_CBAUD, TARGET_B230400, CBAUD, B230400 },
5186 	{ TARGET_CBAUD, TARGET_B460800, CBAUD, B460800 },
5187 	{ TARGET_CSIZE, TARGET_CS5, CSIZE, CS5 },
5188 	{ TARGET_CSIZE, TARGET_CS6, CSIZE, CS6 },
5189 	{ TARGET_CSIZE, TARGET_CS7, CSIZE, CS7 },
5190 	{ TARGET_CSIZE, TARGET_CS8, CSIZE, CS8 },
5191 	{ TARGET_CSTOPB, TARGET_CSTOPB, CSTOPB, CSTOPB },
5192 	{ TARGET_CREAD, TARGET_CREAD, CREAD, CREAD },
5193 	{ TARGET_PARENB, TARGET_PARENB, PARENB, PARENB },
5194 	{ TARGET_PARODD, TARGET_PARODD, PARODD, PARODD },
5195 	{ TARGET_HUPCL, TARGET_HUPCL, HUPCL, HUPCL },
5196 	{ TARGET_CLOCAL, TARGET_CLOCAL, CLOCAL, CLOCAL },
5197 	{ TARGET_CRTSCTS, TARGET_CRTSCTS, CRTSCTS, CRTSCTS },
5198 	{ 0, 0, 0, 0 }
5199 };
5200 
5201 static const bitmask_transtbl lflag_tbl[] = {
5202 	{ TARGET_ISIG, TARGET_ISIG, ISIG, ISIG },
5203 	{ TARGET_ICANON, TARGET_ICANON, ICANON, ICANON },
5204 	{ TARGET_XCASE, TARGET_XCASE, XCASE, XCASE },
5205 	{ TARGET_ECHO, TARGET_ECHO, ECHO, ECHO },
5206 	{ TARGET_ECHOE, TARGET_ECHOE, ECHOE, ECHOE },
5207 	{ TARGET_ECHOK, TARGET_ECHOK, ECHOK, ECHOK },
5208 	{ TARGET_ECHONL, TARGET_ECHONL, ECHONL, ECHONL },
5209 	{ TARGET_NOFLSH, TARGET_NOFLSH, NOFLSH, NOFLSH },
5210 	{ TARGET_TOSTOP, TARGET_TOSTOP, TOSTOP, TOSTOP },
5211 	{ TARGET_ECHOCTL, TARGET_ECHOCTL, ECHOCTL, ECHOCTL },
5212 	{ TARGET_ECHOPRT, TARGET_ECHOPRT, ECHOPRT, ECHOPRT },
5213 	{ TARGET_ECHOKE, TARGET_ECHOKE, ECHOKE, ECHOKE },
5214 	{ TARGET_FLUSHO, TARGET_FLUSHO, FLUSHO, FLUSHO },
5215 	{ TARGET_PENDIN, TARGET_PENDIN, PENDIN, PENDIN },
5216 	{ TARGET_IEXTEN, TARGET_IEXTEN, IEXTEN, IEXTEN },
5217 	{ 0, 0, 0, 0 }
5218 };
5219 
5220 static void target_to_host_termios (void *dst, const void *src)
5221 {
5222     struct host_termios *host = dst;
5223     const struct target_termios *target = src;
5224 
5225     host->c_iflag =
5226         target_to_host_bitmask(tswap32(target->c_iflag), iflag_tbl);
5227     host->c_oflag =
5228         target_to_host_bitmask(tswap32(target->c_oflag), oflag_tbl);
5229     host->c_cflag =
5230         target_to_host_bitmask(tswap32(target->c_cflag), cflag_tbl);
5231     host->c_lflag =
5232         target_to_host_bitmask(tswap32(target->c_lflag), lflag_tbl);
5233     host->c_line = target->c_line;
5234 
5235     memset(host->c_cc, 0, sizeof(host->c_cc));
5236     host->c_cc[VINTR] = target->c_cc[TARGET_VINTR];
5237     host->c_cc[VQUIT] = target->c_cc[TARGET_VQUIT];
5238     host->c_cc[VERASE] = target->c_cc[TARGET_VERASE];
5239     host->c_cc[VKILL] = target->c_cc[TARGET_VKILL];
5240     host->c_cc[VEOF] = target->c_cc[TARGET_VEOF];
5241     host->c_cc[VTIME] = target->c_cc[TARGET_VTIME];
5242     host->c_cc[VMIN] = target->c_cc[TARGET_VMIN];
5243     host->c_cc[VSWTC] = target->c_cc[TARGET_VSWTC];
5244     host->c_cc[VSTART] = target->c_cc[TARGET_VSTART];
5245     host->c_cc[VSTOP] = target->c_cc[TARGET_VSTOP];
5246     host->c_cc[VSUSP] = target->c_cc[TARGET_VSUSP];
5247     host->c_cc[VEOL] = target->c_cc[TARGET_VEOL];
5248     host->c_cc[VREPRINT] = target->c_cc[TARGET_VREPRINT];
5249     host->c_cc[VDISCARD] = target->c_cc[TARGET_VDISCARD];
5250     host->c_cc[VWERASE] = target->c_cc[TARGET_VWERASE];
5251     host->c_cc[VLNEXT] = target->c_cc[TARGET_VLNEXT];
5252     host->c_cc[VEOL2] = target->c_cc[TARGET_VEOL2];
5253 }
5254 
5255 static void host_to_target_termios (void *dst, const void *src)
5256 {
5257     struct target_termios *target = dst;
5258     const struct host_termios *host = src;
5259 
5260     target->c_iflag =
5261         tswap32(host_to_target_bitmask(host->c_iflag, iflag_tbl));
5262     target->c_oflag =
5263         tswap32(host_to_target_bitmask(host->c_oflag, oflag_tbl));
5264     target->c_cflag =
5265         tswap32(host_to_target_bitmask(host->c_cflag, cflag_tbl));
5266     target->c_lflag =
5267         tswap32(host_to_target_bitmask(host->c_lflag, lflag_tbl));
5268     target->c_line = host->c_line;
5269 
5270     memset(target->c_cc, 0, sizeof(target->c_cc));
5271     target->c_cc[TARGET_VINTR] = host->c_cc[VINTR];
5272     target->c_cc[TARGET_VQUIT] = host->c_cc[VQUIT];
5273     target->c_cc[TARGET_VERASE] = host->c_cc[VERASE];
5274     target->c_cc[TARGET_VKILL] = host->c_cc[VKILL];
5275     target->c_cc[TARGET_VEOF] = host->c_cc[VEOF];
5276     target->c_cc[TARGET_VTIME] = host->c_cc[VTIME];
5277     target->c_cc[TARGET_VMIN] = host->c_cc[VMIN];
5278     target->c_cc[TARGET_VSWTC] = host->c_cc[VSWTC];
5279     target->c_cc[TARGET_VSTART] = host->c_cc[VSTART];
5280     target->c_cc[TARGET_VSTOP] = host->c_cc[VSTOP];
5281     target->c_cc[TARGET_VSUSP] = host->c_cc[VSUSP];
5282     target->c_cc[TARGET_VEOL] = host->c_cc[VEOL];
5283     target->c_cc[TARGET_VREPRINT] = host->c_cc[VREPRINT];
5284     target->c_cc[TARGET_VDISCARD] = host->c_cc[VDISCARD];
5285     target->c_cc[TARGET_VWERASE] = host->c_cc[VWERASE];
5286     target->c_cc[TARGET_VLNEXT] = host->c_cc[VLNEXT];
5287     target->c_cc[TARGET_VEOL2] = host->c_cc[VEOL2];
5288 }
5289 
5290 static const StructEntry struct_termios_def = {
5291     .convert = { host_to_target_termios, target_to_host_termios },
5292     .size = { sizeof(struct target_termios), sizeof(struct host_termios) },
5293     .align = { __alignof__(struct target_termios), __alignof__(struct host_termios) },
5294 };
5295 
5296 static bitmask_transtbl mmap_flags_tbl[] = {
5297     { TARGET_MAP_SHARED, TARGET_MAP_SHARED, MAP_SHARED, MAP_SHARED },
5298     { TARGET_MAP_PRIVATE, TARGET_MAP_PRIVATE, MAP_PRIVATE, MAP_PRIVATE },
5299     { TARGET_MAP_FIXED, TARGET_MAP_FIXED, MAP_FIXED, MAP_FIXED },
5300     { TARGET_MAP_ANONYMOUS, TARGET_MAP_ANONYMOUS,
5301       MAP_ANONYMOUS, MAP_ANONYMOUS },
5302     { TARGET_MAP_GROWSDOWN, TARGET_MAP_GROWSDOWN,
5303       MAP_GROWSDOWN, MAP_GROWSDOWN },
5304     { TARGET_MAP_DENYWRITE, TARGET_MAP_DENYWRITE,
5305       MAP_DENYWRITE, MAP_DENYWRITE },
5306     { TARGET_MAP_EXECUTABLE, TARGET_MAP_EXECUTABLE,
5307       MAP_EXECUTABLE, MAP_EXECUTABLE },
5308     { TARGET_MAP_LOCKED, TARGET_MAP_LOCKED, MAP_LOCKED, MAP_LOCKED },
5309     { TARGET_MAP_NORESERVE, TARGET_MAP_NORESERVE,
5310       MAP_NORESERVE, MAP_NORESERVE },
5311     { TARGET_MAP_HUGETLB, TARGET_MAP_HUGETLB, MAP_HUGETLB, MAP_HUGETLB },
5312     /* MAP_STACK had been ignored by the kernel for quite some time.
5313        Recognize it for the target insofar as we do not want to pass
5314        it through to the host.  */
5315     { TARGET_MAP_STACK, TARGET_MAP_STACK, 0, 0 },
5316     { 0, 0, 0, 0 }
5317 };
5318 
5319 #if defined(TARGET_I386)
5320 
5321 /* NOTE: there is really one LDT for all the threads */
5322 static uint8_t *ldt_table;
5323 
5324 static abi_long read_ldt(abi_ulong ptr, unsigned long bytecount)
5325 {
5326     int size;
5327     void *p;
5328 
5329     if (!ldt_table)
5330         return 0;
5331     size = TARGET_LDT_ENTRIES * TARGET_LDT_ENTRY_SIZE;
5332     if (size > bytecount)
5333         size = bytecount;
5334     p = lock_user(VERIFY_WRITE, ptr, size, 0);
5335     if (!p)
5336         return -TARGET_EFAULT;
5337     /* ??? Should this by byteswapped?  */
5338     memcpy(p, ldt_table, size);
5339     unlock_user(p, ptr, size);
5340     return size;
5341 }
5342 
5343 /* XXX: add locking support */
5344 static abi_long write_ldt(CPUX86State *env,
5345                           abi_ulong ptr, unsigned long bytecount, int oldmode)
5346 {
5347     struct target_modify_ldt_ldt_s ldt_info;
5348     struct target_modify_ldt_ldt_s *target_ldt_info;
5349     int seg_32bit, contents, read_exec_only, limit_in_pages;
5350     int seg_not_present, useable, lm;
5351     uint32_t *lp, entry_1, entry_2;
5352 
5353     if (bytecount != sizeof(ldt_info))
5354         return -TARGET_EINVAL;
5355     if (!lock_user_struct(VERIFY_READ, target_ldt_info, ptr, 1))
5356         return -TARGET_EFAULT;
5357     ldt_info.entry_number = tswap32(target_ldt_info->entry_number);
5358     ldt_info.base_addr = tswapal(target_ldt_info->base_addr);
5359     ldt_info.limit = tswap32(target_ldt_info->limit);
5360     ldt_info.flags = tswap32(target_ldt_info->flags);
5361     unlock_user_struct(target_ldt_info, ptr, 0);
5362 
5363     if (ldt_info.entry_number >= TARGET_LDT_ENTRIES)
5364         return -TARGET_EINVAL;
5365     seg_32bit = ldt_info.flags & 1;
5366     contents = (ldt_info.flags >> 1) & 3;
5367     read_exec_only = (ldt_info.flags >> 3) & 1;
5368     limit_in_pages = (ldt_info.flags >> 4) & 1;
5369     seg_not_present = (ldt_info.flags >> 5) & 1;
5370     useable = (ldt_info.flags >> 6) & 1;
5371 #ifdef TARGET_ABI32
5372     lm = 0;
5373 #else
5374     lm = (ldt_info.flags >> 7) & 1;
5375 #endif
5376     if (contents == 3) {
5377         if (oldmode)
5378             return -TARGET_EINVAL;
5379         if (seg_not_present == 0)
5380             return -TARGET_EINVAL;
5381     }
5382     /* allocate the LDT */
5383     if (!ldt_table) {
5384         env->ldt.base = target_mmap(0,
5385                                     TARGET_LDT_ENTRIES * TARGET_LDT_ENTRY_SIZE,
5386                                     PROT_READ|PROT_WRITE,
5387                                     MAP_ANONYMOUS|MAP_PRIVATE, -1, 0);
5388         if (env->ldt.base == -1)
5389             return -TARGET_ENOMEM;
5390         memset(g2h(env->ldt.base), 0,
5391                TARGET_LDT_ENTRIES * TARGET_LDT_ENTRY_SIZE);
5392         env->ldt.limit = 0xffff;
5393         ldt_table = g2h(env->ldt.base);
5394     }
5395 
5396     /* NOTE: same code as Linux kernel */
5397     /* Allow LDTs to be cleared by the user. */
5398     if (ldt_info.base_addr == 0 && ldt_info.limit == 0) {
5399         if (oldmode ||
5400             (contents == 0		&&
5401              read_exec_only == 1	&&
5402              seg_32bit == 0		&&
5403              limit_in_pages == 0	&&
5404              seg_not_present == 1	&&
5405              useable == 0 )) {
5406             entry_1 = 0;
5407             entry_2 = 0;
5408             goto install;
5409         }
5410     }
5411 
5412     entry_1 = ((ldt_info.base_addr & 0x0000ffff) << 16) |
5413         (ldt_info.limit & 0x0ffff);
5414     entry_2 = (ldt_info.base_addr & 0xff000000) |
5415         ((ldt_info.base_addr & 0x00ff0000) >> 16) |
5416         (ldt_info.limit & 0xf0000) |
5417         ((read_exec_only ^ 1) << 9) |
5418         (contents << 10) |
5419         ((seg_not_present ^ 1) << 15) |
5420         (seg_32bit << 22) |
5421         (limit_in_pages << 23) |
5422         (lm << 21) |
5423         0x7000;
5424     if (!oldmode)
5425         entry_2 |= (useable << 20);
5426 
5427     /* Install the new entry ...  */
5428 install:
5429     lp = (uint32_t *)(ldt_table + (ldt_info.entry_number << 3));
5430     lp[0] = tswap32(entry_1);
5431     lp[1] = tswap32(entry_2);
5432     return 0;
5433 }
5434 
5435 /* specific and weird i386 syscalls */
5436 static abi_long do_modify_ldt(CPUX86State *env, int func, abi_ulong ptr,
5437                               unsigned long bytecount)
5438 {
5439     abi_long ret;
5440 
5441     switch (func) {
5442     case 0:
5443         ret = read_ldt(ptr, bytecount);
5444         break;
5445     case 1:
5446         ret = write_ldt(env, ptr, bytecount, 1);
5447         break;
5448     case 0x11:
5449         ret = write_ldt(env, ptr, bytecount, 0);
5450         break;
5451     default:
5452         ret = -TARGET_ENOSYS;
5453         break;
5454     }
5455     return ret;
5456 }
5457 
5458 #if defined(TARGET_I386) && defined(TARGET_ABI32)
5459 abi_long do_set_thread_area(CPUX86State *env, abi_ulong ptr)
5460 {
5461     uint64_t *gdt_table = g2h(env->gdt.base);
5462     struct target_modify_ldt_ldt_s ldt_info;
5463     struct target_modify_ldt_ldt_s *target_ldt_info;
5464     int seg_32bit, contents, read_exec_only, limit_in_pages;
5465     int seg_not_present, useable, lm;
5466     uint32_t *lp, entry_1, entry_2;
5467     int i;
5468 
5469     lock_user_struct(VERIFY_WRITE, target_ldt_info, ptr, 1);
5470     if (!target_ldt_info)
5471         return -TARGET_EFAULT;
5472     ldt_info.entry_number = tswap32(target_ldt_info->entry_number);
5473     ldt_info.base_addr = tswapal(target_ldt_info->base_addr);
5474     ldt_info.limit = tswap32(target_ldt_info->limit);
5475     ldt_info.flags = tswap32(target_ldt_info->flags);
5476     if (ldt_info.entry_number == -1) {
5477         for (i=TARGET_GDT_ENTRY_TLS_MIN; i<=TARGET_GDT_ENTRY_TLS_MAX; i++) {
5478             if (gdt_table[i] == 0) {
5479                 ldt_info.entry_number = i;
5480                 target_ldt_info->entry_number = tswap32(i);
5481                 break;
5482             }
5483         }
5484     }
5485     unlock_user_struct(target_ldt_info, ptr, 1);
5486 
5487     if (ldt_info.entry_number < TARGET_GDT_ENTRY_TLS_MIN ||
5488         ldt_info.entry_number > TARGET_GDT_ENTRY_TLS_MAX)
5489            return -TARGET_EINVAL;
5490     seg_32bit = ldt_info.flags & 1;
5491     contents = (ldt_info.flags >> 1) & 3;
5492     read_exec_only = (ldt_info.flags >> 3) & 1;
5493     limit_in_pages = (ldt_info.flags >> 4) & 1;
5494     seg_not_present = (ldt_info.flags >> 5) & 1;
5495     useable = (ldt_info.flags >> 6) & 1;
5496 #ifdef TARGET_ABI32
5497     lm = 0;
5498 #else
5499     lm = (ldt_info.flags >> 7) & 1;
5500 #endif
5501 
5502     if (contents == 3) {
5503         if (seg_not_present == 0)
5504             return -TARGET_EINVAL;
5505     }
5506 
5507     /* NOTE: same code as Linux kernel */
5508     /* Allow LDTs to be cleared by the user. */
5509     if (ldt_info.base_addr == 0 && ldt_info.limit == 0) {
5510         if ((contents == 0             &&
5511              read_exec_only == 1       &&
5512              seg_32bit == 0            &&
5513              limit_in_pages == 0       &&
5514              seg_not_present == 1      &&
5515              useable == 0 )) {
5516             entry_1 = 0;
5517             entry_2 = 0;
5518             goto install;
5519         }
5520     }
5521 
5522     entry_1 = ((ldt_info.base_addr & 0x0000ffff) << 16) |
5523         (ldt_info.limit & 0x0ffff);
5524     entry_2 = (ldt_info.base_addr & 0xff000000) |
5525         ((ldt_info.base_addr & 0x00ff0000) >> 16) |
5526         (ldt_info.limit & 0xf0000) |
5527         ((read_exec_only ^ 1) << 9) |
5528         (contents << 10) |
5529         ((seg_not_present ^ 1) << 15) |
5530         (seg_32bit << 22) |
5531         (limit_in_pages << 23) |
5532         (useable << 20) |
5533         (lm << 21) |
5534         0x7000;
5535 
5536     /* Install the new entry ...  */
5537 install:
5538     lp = (uint32_t *)(gdt_table + ldt_info.entry_number);
5539     lp[0] = tswap32(entry_1);
5540     lp[1] = tswap32(entry_2);
5541     return 0;
5542 }
5543 
5544 static abi_long do_get_thread_area(CPUX86State *env, abi_ulong ptr)
5545 {
5546     struct target_modify_ldt_ldt_s *target_ldt_info;
5547     uint64_t *gdt_table = g2h(env->gdt.base);
5548     uint32_t base_addr, limit, flags;
5549     int seg_32bit, contents, read_exec_only, limit_in_pages, idx;
5550     int seg_not_present, useable, lm;
5551     uint32_t *lp, entry_1, entry_2;
5552 
5553     lock_user_struct(VERIFY_WRITE, target_ldt_info, ptr, 1);
5554     if (!target_ldt_info)
5555         return -TARGET_EFAULT;
5556     idx = tswap32(target_ldt_info->entry_number);
5557     if (idx < TARGET_GDT_ENTRY_TLS_MIN ||
5558         idx > TARGET_GDT_ENTRY_TLS_MAX) {
5559         unlock_user_struct(target_ldt_info, ptr, 1);
5560         return -TARGET_EINVAL;
5561     }
5562     lp = (uint32_t *)(gdt_table + idx);
5563     entry_1 = tswap32(lp[0]);
5564     entry_2 = tswap32(lp[1]);
5565 
5566     read_exec_only = ((entry_2 >> 9) & 1) ^ 1;
5567     contents = (entry_2 >> 10) & 3;
5568     seg_not_present = ((entry_2 >> 15) & 1) ^ 1;
5569     seg_32bit = (entry_2 >> 22) & 1;
5570     limit_in_pages = (entry_2 >> 23) & 1;
5571     useable = (entry_2 >> 20) & 1;
5572 #ifdef TARGET_ABI32
5573     lm = 0;
5574 #else
5575     lm = (entry_2 >> 21) & 1;
5576 #endif
5577     flags = (seg_32bit << 0) | (contents << 1) |
5578         (read_exec_only << 3) | (limit_in_pages << 4) |
5579         (seg_not_present << 5) | (useable << 6) | (lm << 7);
5580     limit = (entry_1 & 0xffff) | (entry_2  & 0xf0000);
5581     base_addr = (entry_1 >> 16) |
5582         (entry_2 & 0xff000000) |
5583         ((entry_2 & 0xff) << 16);
5584     target_ldt_info->base_addr = tswapal(base_addr);
5585     target_ldt_info->limit = tswap32(limit);
5586     target_ldt_info->flags = tswap32(flags);
5587     unlock_user_struct(target_ldt_info, ptr, 1);
5588     return 0;
5589 }
5590 #endif /* TARGET_I386 && TARGET_ABI32 */
5591 
5592 #ifndef TARGET_ABI32
5593 abi_long do_arch_prctl(CPUX86State *env, int code, abi_ulong addr)
5594 {
5595     abi_long ret = 0;
5596     abi_ulong val;
5597     int idx;
5598 
5599     switch(code) {
5600     case TARGET_ARCH_SET_GS:
5601     case TARGET_ARCH_SET_FS:
5602         if (code == TARGET_ARCH_SET_GS)
5603             idx = R_GS;
5604         else
5605             idx = R_FS;
5606         cpu_x86_load_seg(env, idx, 0);
5607         env->segs[idx].base = addr;
5608         break;
5609     case TARGET_ARCH_GET_GS:
5610     case TARGET_ARCH_GET_FS:
5611         if (code == TARGET_ARCH_GET_GS)
5612             idx = R_GS;
5613         else
5614             idx = R_FS;
5615         val = env->segs[idx].base;
5616         if (put_user(val, addr, abi_ulong))
5617             ret = -TARGET_EFAULT;
5618         break;
5619     default:
5620         ret = -TARGET_EINVAL;
5621         break;
5622     }
5623     return ret;
5624 }
5625 #endif
5626 
5627 #endif /* defined(TARGET_I386) */
5628 
5629 #define NEW_STACK_SIZE 0x40000
5630 
5631 
5632 static pthread_mutex_t clone_lock = PTHREAD_MUTEX_INITIALIZER;
5633 typedef struct {
5634     CPUArchState *env;
5635     pthread_mutex_t mutex;
5636     pthread_cond_t cond;
5637     pthread_t thread;
5638     uint32_t tid;
5639     abi_ulong child_tidptr;
5640     abi_ulong parent_tidptr;
5641     sigset_t sigmask;
5642 } new_thread_info;
5643 
5644 static void *clone_func(void *arg)
5645 {
5646     new_thread_info *info = arg;
5647     CPUArchState *env;
5648     CPUState *cpu;
5649     TaskState *ts;
5650 
5651     rcu_register_thread();
5652     tcg_register_thread();
5653     env = info->env;
5654     cpu = env_cpu(env);
5655     thread_cpu = cpu;
5656     ts = (TaskState *)cpu->opaque;
5657     info->tid = sys_gettid();
5658     task_settid(ts);
5659     if (info->child_tidptr)
5660         put_user_u32(info->tid, info->child_tidptr);
5661     if (info->parent_tidptr)
5662         put_user_u32(info->tid, info->parent_tidptr);
5663     qemu_guest_random_seed_thread_part2(cpu->random_seed);
5664     /* Enable signals.  */
5665     sigprocmask(SIG_SETMASK, &info->sigmask, NULL);
5666     /* Signal to the parent that we're ready.  */
5667     pthread_mutex_lock(&info->mutex);
5668     pthread_cond_broadcast(&info->cond);
5669     pthread_mutex_unlock(&info->mutex);
5670     /* Wait until the parent has finished initializing the tls state.  */
5671     pthread_mutex_lock(&clone_lock);
5672     pthread_mutex_unlock(&clone_lock);
5673     cpu_loop(env);
5674     /* never exits */
5675     return NULL;
5676 }
5677 
5678 /* do_fork() Must return host values and target errnos (unlike most
5679    do_*() functions). */
5680 static int do_fork(CPUArchState *env, unsigned int flags, abi_ulong newsp,
5681                    abi_ulong parent_tidptr, target_ulong newtls,
5682                    abi_ulong child_tidptr)
5683 {
5684     CPUState *cpu = env_cpu(env);
5685     int ret;
5686     TaskState *ts;
5687     CPUState *new_cpu;
5688     CPUArchState *new_env;
5689     sigset_t sigmask;
5690 
5691     flags &= ~CLONE_IGNORED_FLAGS;
5692 
5693     /* Emulate vfork() with fork() */
5694     if (flags & CLONE_VFORK)
5695         flags &= ~(CLONE_VFORK | CLONE_VM);
5696 
5697     if (flags & CLONE_VM) {
5698         TaskState *parent_ts = (TaskState *)cpu->opaque;
5699         new_thread_info info;
5700         pthread_attr_t attr;
5701 
5702         if (((flags & CLONE_THREAD_FLAGS) != CLONE_THREAD_FLAGS) ||
5703             (flags & CLONE_INVALID_THREAD_FLAGS)) {
5704             return -TARGET_EINVAL;
5705         }
5706 
5707         ts = g_new0(TaskState, 1);
5708         init_task_state(ts);
5709 
5710         /* Grab a mutex so that thread setup appears atomic.  */
5711         pthread_mutex_lock(&clone_lock);
5712 
5713         /* we create a new CPU instance. */
5714         new_env = cpu_copy(env);
5715         /* Init regs that differ from the parent.  */
5716         cpu_clone_regs(new_env, newsp);
5717         new_cpu = env_cpu(new_env);
5718         new_cpu->opaque = ts;
5719         ts->bprm = parent_ts->bprm;
5720         ts->info = parent_ts->info;
5721         ts->signal_mask = parent_ts->signal_mask;
5722 
5723         if (flags & CLONE_CHILD_CLEARTID) {
5724             ts->child_tidptr = child_tidptr;
5725         }
5726 
5727         if (flags & CLONE_SETTLS) {
5728             cpu_set_tls (new_env, newtls);
5729         }
5730 
5731         memset(&info, 0, sizeof(info));
5732         pthread_mutex_init(&info.mutex, NULL);
5733         pthread_mutex_lock(&info.mutex);
5734         pthread_cond_init(&info.cond, NULL);
5735         info.env = new_env;
5736         if (flags & CLONE_CHILD_SETTID) {
5737             info.child_tidptr = child_tidptr;
5738         }
5739         if (flags & CLONE_PARENT_SETTID) {
5740             info.parent_tidptr = parent_tidptr;
5741         }
5742 
5743         ret = pthread_attr_init(&attr);
5744         ret = pthread_attr_setstacksize(&attr, NEW_STACK_SIZE);
5745         ret = pthread_attr_setdetachstate(&attr, PTHREAD_CREATE_DETACHED);
5746         /* It is not safe to deliver signals until the child has finished
5747            initializing, so temporarily block all signals.  */
5748         sigfillset(&sigmask);
5749         sigprocmask(SIG_BLOCK, &sigmask, &info.sigmask);
5750         cpu->random_seed = qemu_guest_random_seed_thread_part1();
5751 
5752         /* If this is our first additional thread, we need to ensure we
5753          * generate code for parallel execution and flush old translations.
5754          */
5755         if (!parallel_cpus) {
5756             parallel_cpus = true;
5757             tb_flush(cpu);
5758         }
5759 
5760         ret = pthread_create(&info.thread, &attr, clone_func, &info);
5761         /* TODO: Free new CPU state if thread creation failed.  */
5762 
5763         sigprocmask(SIG_SETMASK, &info.sigmask, NULL);
5764         pthread_attr_destroy(&attr);
5765         if (ret == 0) {
5766             /* Wait for the child to initialize.  */
5767             pthread_cond_wait(&info.cond, &info.mutex);
5768             ret = info.tid;
5769         } else {
5770             ret = -1;
5771         }
5772         pthread_mutex_unlock(&info.mutex);
5773         pthread_cond_destroy(&info.cond);
5774         pthread_mutex_destroy(&info.mutex);
5775         pthread_mutex_unlock(&clone_lock);
5776     } else {
5777         /* if no CLONE_VM, we consider it is a fork */
5778         if (flags & CLONE_INVALID_FORK_FLAGS) {
5779             return -TARGET_EINVAL;
5780         }
5781 
5782         /* We can't support custom termination signals */
5783         if ((flags & CSIGNAL) != TARGET_SIGCHLD) {
5784             return -TARGET_EINVAL;
5785         }
5786 
5787         if (block_signals()) {
5788             return -TARGET_ERESTARTSYS;
5789         }
5790 
5791         fork_start();
5792         ret = fork();
5793         if (ret == 0) {
5794             /* Child Process.  */
5795             cpu_clone_regs(env, newsp);
5796             fork_end(1);
5797             /* There is a race condition here.  The parent process could
5798                theoretically read the TID in the child process before the child
5799                tid is set.  This would require using either ptrace
5800                (not implemented) or having *_tidptr to point at a shared memory
5801                mapping.  We can't repeat the spinlock hack used above because
5802                the child process gets its own copy of the lock.  */
5803             if (flags & CLONE_CHILD_SETTID)
5804                 put_user_u32(sys_gettid(), child_tidptr);
5805             if (flags & CLONE_PARENT_SETTID)
5806                 put_user_u32(sys_gettid(), parent_tidptr);
5807             ts = (TaskState *)cpu->opaque;
5808             if (flags & CLONE_SETTLS)
5809                 cpu_set_tls (env, newtls);
5810             if (flags & CLONE_CHILD_CLEARTID)
5811                 ts->child_tidptr = child_tidptr;
5812         } else {
5813             fork_end(0);
5814         }
5815     }
5816     return ret;
5817 }
5818 
5819 /* warning : doesn't handle linux specific flags... */
5820 static int target_to_host_fcntl_cmd(int cmd)
5821 {
5822     int ret;
5823 
5824     switch(cmd) {
5825     case TARGET_F_DUPFD:
5826     case TARGET_F_GETFD:
5827     case TARGET_F_SETFD:
5828     case TARGET_F_GETFL:
5829     case TARGET_F_SETFL:
5830         ret = cmd;
5831         break;
5832     case TARGET_F_GETLK:
5833         ret = F_GETLK64;
5834         break;
5835     case TARGET_F_SETLK:
5836         ret = F_SETLK64;
5837         break;
5838     case TARGET_F_SETLKW:
5839         ret = F_SETLKW64;
5840         break;
5841     case TARGET_F_GETOWN:
5842         ret = F_GETOWN;
5843         break;
5844     case TARGET_F_SETOWN:
5845         ret = F_SETOWN;
5846         break;
5847     case TARGET_F_GETSIG:
5848         ret = F_GETSIG;
5849         break;
5850     case TARGET_F_SETSIG:
5851         ret = F_SETSIG;
5852         break;
5853 #if TARGET_ABI_BITS == 32
5854     case TARGET_F_GETLK64:
5855         ret = F_GETLK64;
5856         break;
5857     case TARGET_F_SETLK64:
5858         ret = F_SETLK64;
5859         break;
5860     case TARGET_F_SETLKW64:
5861         ret = F_SETLKW64;
5862         break;
5863 #endif
5864     case TARGET_F_SETLEASE:
5865         ret = F_SETLEASE;
5866         break;
5867     case TARGET_F_GETLEASE:
5868         ret = F_GETLEASE;
5869         break;
5870 #ifdef F_DUPFD_CLOEXEC
5871     case TARGET_F_DUPFD_CLOEXEC:
5872         ret = F_DUPFD_CLOEXEC;
5873         break;
5874 #endif
5875     case TARGET_F_NOTIFY:
5876         ret = F_NOTIFY;
5877         break;
5878 #ifdef F_GETOWN_EX
5879     case TARGET_F_GETOWN_EX:
5880         ret = F_GETOWN_EX;
5881         break;
5882 #endif
5883 #ifdef F_SETOWN_EX
5884     case TARGET_F_SETOWN_EX:
5885         ret = F_SETOWN_EX;
5886         break;
5887 #endif
5888 #ifdef F_SETPIPE_SZ
5889     case TARGET_F_SETPIPE_SZ:
5890         ret = F_SETPIPE_SZ;
5891         break;
5892     case TARGET_F_GETPIPE_SZ:
5893         ret = F_GETPIPE_SZ;
5894         break;
5895 #endif
5896     default:
5897         ret = -TARGET_EINVAL;
5898         break;
5899     }
5900 
5901 #if defined(__powerpc64__)
5902     /* On PPC64, glibc headers has the F_*LK* defined to 12, 13 and 14 and
5903      * is not supported by kernel. The glibc fcntl call actually adjusts
5904      * them to 5, 6 and 7 before making the syscall(). Since we make the
5905      * syscall directly, adjust to what is supported by the kernel.
5906      */
5907     if (ret >= F_GETLK64 && ret <= F_SETLKW64) {
5908         ret -= F_GETLK64 - 5;
5909     }
5910 #endif
5911 
5912     return ret;
5913 }
5914 
5915 #define FLOCK_TRANSTBL \
5916     switch (type) { \
5917     TRANSTBL_CONVERT(F_RDLCK); \
5918     TRANSTBL_CONVERT(F_WRLCK); \
5919     TRANSTBL_CONVERT(F_UNLCK); \
5920     TRANSTBL_CONVERT(F_EXLCK); \
5921     TRANSTBL_CONVERT(F_SHLCK); \
5922     }
5923 
5924 static int target_to_host_flock(int type)
5925 {
5926 #define TRANSTBL_CONVERT(a) case TARGET_##a: return a
5927     FLOCK_TRANSTBL
5928 #undef  TRANSTBL_CONVERT
5929     return -TARGET_EINVAL;
5930 }
5931 
5932 static int host_to_target_flock(int type)
5933 {
5934 #define TRANSTBL_CONVERT(a) case a: return TARGET_##a
5935     FLOCK_TRANSTBL
5936 #undef  TRANSTBL_CONVERT
5937     /* if we don't know how to convert the value coming
5938      * from the host we copy to the target field as-is
5939      */
5940     return type;
5941 }
5942 
5943 static inline abi_long copy_from_user_flock(struct flock64 *fl,
5944                                             abi_ulong target_flock_addr)
5945 {
5946     struct target_flock *target_fl;
5947     int l_type;
5948 
5949     if (!lock_user_struct(VERIFY_READ, target_fl, target_flock_addr, 1)) {
5950         return -TARGET_EFAULT;
5951     }
5952 
5953     __get_user(l_type, &target_fl->l_type);
5954     l_type = target_to_host_flock(l_type);
5955     if (l_type < 0) {
5956         return l_type;
5957     }
5958     fl->l_type = l_type;
5959     __get_user(fl->l_whence, &target_fl->l_whence);
5960     __get_user(fl->l_start, &target_fl->l_start);
5961     __get_user(fl->l_len, &target_fl->l_len);
5962     __get_user(fl->l_pid, &target_fl->l_pid);
5963     unlock_user_struct(target_fl, target_flock_addr, 0);
5964     return 0;
5965 }
5966 
5967 static inline abi_long copy_to_user_flock(abi_ulong target_flock_addr,
5968                                           const struct flock64 *fl)
5969 {
5970     struct target_flock *target_fl;
5971     short l_type;
5972 
5973     if (!lock_user_struct(VERIFY_WRITE, target_fl, target_flock_addr, 0)) {
5974         return -TARGET_EFAULT;
5975     }
5976 
5977     l_type = host_to_target_flock(fl->l_type);
5978     __put_user(l_type, &target_fl->l_type);
5979     __put_user(fl->l_whence, &target_fl->l_whence);
5980     __put_user(fl->l_start, &target_fl->l_start);
5981     __put_user(fl->l_len, &target_fl->l_len);
5982     __put_user(fl->l_pid, &target_fl->l_pid);
5983     unlock_user_struct(target_fl, target_flock_addr, 1);
5984     return 0;
5985 }
5986 
5987 typedef abi_long from_flock64_fn(struct flock64 *fl, abi_ulong target_addr);
5988 typedef abi_long to_flock64_fn(abi_ulong target_addr, const struct flock64 *fl);
5989 
5990 #if defined(TARGET_ARM) && TARGET_ABI_BITS == 32
5991 static inline abi_long copy_from_user_oabi_flock64(struct flock64 *fl,
5992                                                    abi_ulong target_flock_addr)
5993 {
5994     struct target_oabi_flock64 *target_fl;
5995     int l_type;
5996 
5997     if (!lock_user_struct(VERIFY_READ, target_fl, target_flock_addr, 1)) {
5998         return -TARGET_EFAULT;
5999     }
6000 
6001     __get_user(l_type, &target_fl->l_type);
6002     l_type = target_to_host_flock(l_type);
6003     if (l_type < 0) {
6004         return l_type;
6005     }
6006     fl->l_type = l_type;
6007     __get_user(fl->l_whence, &target_fl->l_whence);
6008     __get_user(fl->l_start, &target_fl->l_start);
6009     __get_user(fl->l_len, &target_fl->l_len);
6010     __get_user(fl->l_pid, &target_fl->l_pid);
6011     unlock_user_struct(target_fl, target_flock_addr, 0);
6012     return 0;
6013 }
6014 
6015 static inline abi_long copy_to_user_oabi_flock64(abi_ulong target_flock_addr,
6016                                                  const struct flock64 *fl)
6017 {
6018     struct target_oabi_flock64 *target_fl;
6019     short l_type;
6020 
6021     if (!lock_user_struct(VERIFY_WRITE, target_fl, target_flock_addr, 0)) {
6022         return -TARGET_EFAULT;
6023     }
6024 
6025     l_type = host_to_target_flock(fl->l_type);
6026     __put_user(l_type, &target_fl->l_type);
6027     __put_user(fl->l_whence, &target_fl->l_whence);
6028     __put_user(fl->l_start, &target_fl->l_start);
6029     __put_user(fl->l_len, &target_fl->l_len);
6030     __put_user(fl->l_pid, &target_fl->l_pid);
6031     unlock_user_struct(target_fl, target_flock_addr, 1);
6032     return 0;
6033 }
6034 #endif
6035 
6036 static inline abi_long copy_from_user_flock64(struct flock64 *fl,
6037                                               abi_ulong target_flock_addr)
6038 {
6039     struct target_flock64 *target_fl;
6040     int l_type;
6041 
6042     if (!lock_user_struct(VERIFY_READ, target_fl, target_flock_addr, 1)) {
6043         return -TARGET_EFAULT;
6044     }
6045 
6046     __get_user(l_type, &target_fl->l_type);
6047     l_type = target_to_host_flock(l_type);
6048     if (l_type < 0) {
6049         return l_type;
6050     }
6051     fl->l_type = l_type;
6052     __get_user(fl->l_whence, &target_fl->l_whence);
6053     __get_user(fl->l_start, &target_fl->l_start);
6054     __get_user(fl->l_len, &target_fl->l_len);
6055     __get_user(fl->l_pid, &target_fl->l_pid);
6056     unlock_user_struct(target_fl, target_flock_addr, 0);
6057     return 0;
6058 }
6059 
6060 static inline abi_long copy_to_user_flock64(abi_ulong target_flock_addr,
6061                                             const struct flock64 *fl)
6062 {
6063     struct target_flock64 *target_fl;
6064     short l_type;
6065 
6066     if (!lock_user_struct(VERIFY_WRITE, target_fl, target_flock_addr, 0)) {
6067         return -TARGET_EFAULT;
6068     }
6069 
6070     l_type = host_to_target_flock(fl->l_type);
6071     __put_user(l_type, &target_fl->l_type);
6072     __put_user(fl->l_whence, &target_fl->l_whence);
6073     __put_user(fl->l_start, &target_fl->l_start);
6074     __put_user(fl->l_len, &target_fl->l_len);
6075     __put_user(fl->l_pid, &target_fl->l_pid);
6076     unlock_user_struct(target_fl, target_flock_addr, 1);
6077     return 0;
6078 }
6079 
6080 static abi_long do_fcntl(int fd, int cmd, abi_ulong arg)
6081 {
6082     struct flock64 fl64;
6083 #ifdef F_GETOWN_EX
6084     struct f_owner_ex fox;
6085     struct target_f_owner_ex *target_fox;
6086 #endif
6087     abi_long ret;
6088     int host_cmd = target_to_host_fcntl_cmd(cmd);
6089 
6090     if (host_cmd == -TARGET_EINVAL)
6091 	    return host_cmd;
6092 
6093     switch(cmd) {
6094     case TARGET_F_GETLK:
6095         ret = copy_from_user_flock(&fl64, arg);
6096         if (ret) {
6097             return ret;
6098         }
6099         ret = get_errno(safe_fcntl(fd, host_cmd, &fl64));
6100         if (ret == 0) {
6101             ret = copy_to_user_flock(arg, &fl64);
6102         }
6103         break;
6104 
6105     case TARGET_F_SETLK:
6106     case TARGET_F_SETLKW:
6107         ret = copy_from_user_flock(&fl64, arg);
6108         if (ret) {
6109             return ret;
6110         }
6111         ret = get_errno(safe_fcntl(fd, host_cmd, &fl64));
6112         break;
6113 
6114     case TARGET_F_GETLK64:
6115         ret = copy_from_user_flock64(&fl64, arg);
6116         if (ret) {
6117             return ret;
6118         }
6119         ret = get_errno(safe_fcntl(fd, host_cmd, &fl64));
6120         if (ret == 0) {
6121             ret = copy_to_user_flock64(arg, &fl64);
6122         }
6123         break;
6124     case TARGET_F_SETLK64:
6125     case TARGET_F_SETLKW64:
6126         ret = copy_from_user_flock64(&fl64, arg);
6127         if (ret) {
6128             return ret;
6129         }
6130         ret = get_errno(safe_fcntl(fd, host_cmd, &fl64));
6131         break;
6132 
6133     case TARGET_F_GETFL:
6134         ret = get_errno(safe_fcntl(fd, host_cmd, arg));
6135         if (ret >= 0) {
6136             ret = host_to_target_bitmask(ret, fcntl_flags_tbl);
6137         }
6138         break;
6139 
6140     case TARGET_F_SETFL:
6141         ret = get_errno(safe_fcntl(fd, host_cmd,
6142                                    target_to_host_bitmask(arg,
6143                                                           fcntl_flags_tbl)));
6144         break;
6145 
6146 #ifdef F_GETOWN_EX
6147     case TARGET_F_GETOWN_EX:
6148         ret = get_errno(safe_fcntl(fd, host_cmd, &fox));
6149         if (ret >= 0) {
6150             if (!lock_user_struct(VERIFY_WRITE, target_fox, arg, 0))
6151                 return -TARGET_EFAULT;
6152             target_fox->type = tswap32(fox.type);
6153             target_fox->pid = tswap32(fox.pid);
6154             unlock_user_struct(target_fox, arg, 1);
6155         }
6156         break;
6157 #endif
6158 
6159 #ifdef F_SETOWN_EX
6160     case TARGET_F_SETOWN_EX:
6161         if (!lock_user_struct(VERIFY_READ, target_fox, arg, 1))
6162             return -TARGET_EFAULT;
6163         fox.type = tswap32(target_fox->type);
6164         fox.pid = tswap32(target_fox->pid);
6165         unlock_user_struct(target_fox, arg, 0);
6166         ret = get_errno(safe_fcntl(fd, host_cmd, &fox));
6167         break;
6168 #endif
6169 
6170     case TARGET_F_SETOWN:
6171     case TARGET_F_GETOWN:
6172     case TARGET_F_SETSIG:
6173     case TARGET_F_GETSIG:
6174     case TARGET_F_SETLEASE:
6175     case TARGET_F_GETLEASE:
6176     case TARGET_F_SETPIPE_SZ:
6177     case TARGET_F_GETPIPE_SZ:
6178         ret = get_errno(safe_fcntl(fd, host_cmd, arg));
6179         break;
6180 
6181     default:
6182         ret = get_errno(safe_fcntl(fd, cmd, arg));
6183         break;
6184     }
6185     return ret;
6186 }
6187 
6188 #ifdef USE_UID16
6189 
6190 static inline int high2lowuid(int uid)
6191 {
6192     if (uid > 65535)
6193         return 65534;
6194     else
6195         return uid;
6196 }
6197 
6198 static inline int high2lowgid(int gid)
6199 {
6200     if (gid > 65535)
6201         return 65534;
6202     else
6203         return gid;
6204 }
6205 
6206 static inline int low2highuid(int uid)
6207 {
6208     if ((int16_t)uid == -1)
6209         return -1;
6210     else
6211         return uid;
6212 }
6213 
6214 static inline int low2highgid(int gid)
6215 {
6216     if ((int16_t)gid == -1)
6217         return -1;
6218     else
6219         return gid;
6220 }
6221 static inline int tswapid(int id)
6222 {
6223     return tswap16(id);
6224 }
6225 
6226 #define put_user_id(x, gaddr) put_user_u16(x, gaddr)
6227 
6228 #else /* !USE_UID16 */
6229 static inline int high2lowuid(int uid)
6230 {
6231     return uid;
6232 }
6233 static inline int high2lowgid(int gid)
6234 {
6235     return gid;
6236 }
6237 static inline int low2highuid(int uid)
6238 {
6239     return uid;
6240 }
6241 static inline int low2highgid(int gid)
6242 {
6243     return gid;
6244 }
6245 static inline int tswapid(int id)
6246 {
6247     return tswap32(id);
6248 }
6249 
6250 #define put_user_id(x, gaddr) put_user_u32(x, gaddr)
6251 
6252 #endif /* USE_UID16 */
6253 
6254 /* We must do direct syscalls for setting UID/GID, because we want to
6255  * implement the Linux system call semantics of "change only for this thread",
6256  * not the libc/POSIX semantics of "change for all threads in process".
6257  * (See http://ewontfix.com/17/ for more details.)
6258  * We use the 32-bit version of the syscalls if present; if it is not
6259  * then either the host architecture supports 32-bit UIDs natively with
6260  * the standard syscall, or the 16-bit UID is the best we can do.
6261  */
6262 #ifdef __NR_setuid32
6263 #define __NR_sys_setuid __NR_setuid32
6264 #else
6265 #define __NR_sys_setuid __NR_setuid
6266 #endif
6267 #ifdef __NR_setgid32
6268 #define __NR_sys_setgid __NR_setgid32
6269 #else
6270 #define __NR_sys_setgid __NR_setgid
6271 #endif
6272 #ifdef __NR_setresuid32
6273 #define __NR_sys_setresuid __NR_setresuid32
6274 #else
6275 #define __NR_sys_setresuid __NR_setresuid
6276 #endif
6277 #ifdef __NR_setresgid32
6278 #define __NR_sys_setresgid __NR_setresgid32
6279 #else
6280 #define __NR_sys_setresgid __NR_setresgid
6281 #endif
6282 
6283 _syscall1(int, sys_setuid, uid_t, uid)
6284 _syscall1(int, sys_setgid, gid_t, gid)
6285 _syscall3(int, sys_setresuid, uid_t, ruid, uid_t, euid, uid_t, suid)
6286 _syscall3(int, sys_setresgid, gid_t, rgid, gid_t, egid, gid_t, sgid)
6287 
6288 void syscall_init(void)
6289 {
6290     IOCTLEntry *ie;
6291     const argtype *arg_type;
6292     int size;
6293     int i;
6294 
6295     thunk_init(STRUCT_MAX);
6296 
6297 #define STRUCT(name, ...) thunk_register_struct(STRUCT_ ## name, #name, struct_ ## name ## _def);
6298 #define STRUCT_SPECIAL(name) thunk_register_struct_direct(STRUCT_ ## name, #name, &struct_ ## name ## _def);
6299 #include "syscall_types.h"
6300 #undef STRUCT
6301 #undef STRUCT_SPECIAL
6302 
6303     /* Build target_to_host_errno_table[] table from
6304      * host_to_target_errno_table[]. */
6305     for (i = 0; i < ERRNO_TABLE_SIZE; i++) {
6306         target_to_host_errno_table[host_to_target_errno_table[i]] = i;
6307     }
6308 
6309     /* we patch the ioctl size if necessary. We rely on the fact that
6310        no ioctl has all the bits at '1' in the size field */
6311     ie = ioctl_entries;
6312     while (ie->target_cmd != 0) {
6313         if (((ie->target_cmd >> TARGET_IOC_SIZESHIFT) & TARGET_IOC_SIZEMASK) ==
6314             TARGET_IOC_SIZEMASK) {
6315             arg_type = ie->arg_type;
6316             if (arg_type[0] != TYPE_PTR) {
6317                 fprintf(stderr, "cannot patch size for ioctl 0x%x\n",
6318                         ie->target_cmd);
6319                 exit(1);
6320             }
6321             arg_type++;
6322             size = thunk_type_size(arg_type, 0);
6323             ie->target_cmd = (ie->target_cmd &
6324                               ~(TARGET_IOC_SIZEMASK << TARGET_IOC_SIZESHIFT)) |
6325                 (size << TARGET_IOC_SIZESHIFT);
6326         }
6327 
6328         /* automatic consistency check if same arch */
6329 #if (defined(__i386__) && defined(TARGET_I386) && defined(TARGET_ABI32)) || \
6330     (defined(__x86_64__) && defined(TARGET_X86_64))
6331         if (unlikely(ie->target_cmd != ie->host_cmd)) {
6332             fprintf(stderr, "ERROR: ioctl(%s): target=0x%x host=0x%x\n",
6333                     ie->name, ie->target_cmd, ie->host_cmd);
6334         }
6335 #endif
6336         ie++;
6337     }
6338 }
6339 
6340 #if TARGET_ABI_BITS == 32
6341 static inline uint64_t target_offset64(uint32_t word0, uint32_t word1)
6342 {
6343 #ifdef TARGET_WORDS_BIGENDIAN
6344     return ((uint64_t)word0 << 32) | word1;
6345 #else
6346     return ((uint64_t)word1 << 32) | word0;
6347 #endif
6348 }
6349 #else /* TARGET_ABI_BITS == 32 */
6350 static inline uint64_t target_offset64(uint64_t word0, uint64_t word1)
6351 {
6352     return word0;
6353 }
6354 #endif /* TARGET_ABI_BITS != 32 */
6355 
6356 #ifdef TARGET_NR_truncate64
6357 static inline abi_long target_truncate64(void *cpu_env, const char *arg1,
6358                                          abi_long arg2,
6359                                          abi_long arg3,
6360                                          abi_long arg4)
6361 {
6362     if (regpairs_aligned(cpu_env, TARGET_NR_truncate64)) {
6363         arg2 = arg3;
6364         arg3 = arg4;
6365     }
6366     return get_errno(truncate64(arg1, target_offset64(arg2, arg3)));
6367 }
6368 #endif
6369 
6370 #ifdef TARGET_NR_ftruncate64
6371 static inline abi_long target_ftruncate64(void *cpu_env, abi_long arg1,
6372                                           abi_long arg2,
6373                                           abi_long arg3,
6374                                           abi_long arg4)
6375 {
6376     if (regpairs_aligned(cpu_env, TARGET_NR_ftruncate64)) {
6377         arg2 = arg3;
6378         arg3 = arg4;
6379     }
6380     return get_errno(ftruncate64(arg1, target_offset64(arg2, arg3)));
6381 }
6382 #endif
6383 
6384 static inline abi_long target_to_host_itimerspec(struct itimerspec *host_itspec,
6385                                                  abi_ulong target_addr)
6386 {
6387     struct target_itimerspec *target_itspec;
6388 
6389     if (!lock_user_struct(VERIFY_READ, target_itspec, target_addr, 1)) {
6390         return -TARGET_EFAULT;
6391     }
6392 
6393     host_itspec->it_interval.tv_sec =
6394                             tswapal(target_itspec->it_interval.tv_sec);
6395     host_itspec->it_interval.tv_nsec =
6396                             tswapal(target_itspec->it_interval.tv_nsec);
6397     host_itspec->it_value.tv_sec = tswapal(target_itspec->it_value.tv_sec);
6398     host_itspec->it_value.tv_nsec = tswapal(target_itspec->it_value.tv_nsec);
6399 
6400     unlock_user_struct(target_itspec, target_addr, 1);
6401     return 0;
6402 }
6403 
6404 static inline abi_long host_to_target_itimerspec(abi_ulong target_addr,
6405                                                struct itimerspec *host_its)
6406 {
6407     struct target_itimerspec *target_itspec;
6408 
6409     if (!lock_user_struct(VERIFY_WRITE, target_itspec, target_addr, 0)) {
6410         return -TARGET_EFAULT;
6411     }
6412 
6413     target_itspec->it_interval.tv_sec = tswapal(host_its->it_interval.tv_sec);
6414     target_itspec->it_interval.tv_nsec = tswapal(host_its->it_interval.tv_nsec);
6415 
6416     target_itspec->it_value.tv_sec = tswapal(host_its->it_value.tv_sec);
6417     target_itspec->it_value.tv_nsec = tswapal(host_its->it_value.tv_nsec);
6418 
6419     unlock_user_struct(target_itspec, target_addr, 0);
6420     return 0;
6421 }
6422 
6423 static inline abi_long target_to_host_timex(struct timex *host_tx,
6424                                             abi_long target_addr)
6425 {
6426     struct target_timex *target_tx;
6427 
6428     if (!lock_user_struct(VERIFY_READ, target_tx, target_addr, 1)) {
6429         return -TARGET_EFAULT;
6430     }
6431 
6432     __get_user(host_tx->modes, &target_tx->modes);
6433     __get_user(host_tx->offset, &target_tx->offset);
6434     __get_user(host_tx->freq, &target_tx->freq);
6435     __get_user(host_tx->maxerror, &target_tx->maxerror);
6436     __get_user(host_tx->esterror, &target_tx->esterror);
6437     __get_user(host_tx->status, &target_tx->status);
6438     __get_user(host_tx->constant, &target_tx->constant);
6439     __get_user(host_tx->precision, &target_tx->precision);
6440     __get_user(host_tx->tolerance, &target_tx->tolerance);
6441     __get_user(host_tx->time.tv_sec, &target_tx->time.tv_sec);
6442     __get_user(host_tx->time.tv_usec, &target_tx->time.tv_usec);
6443     __get_user(host_tx->tick, &target_tx->tick);
6444     __get_user(host_tx->ppsfreq, &target_tx->ppsfreq);
6445     __get_user(host_tx->jitter, &target_tx->jitter);
6446     __get_user(host_tx->shift, &target_tx->shift);
6447     __get_user(host_tx->stabil, &target_tx->stabil);
6448     __get_user(host_tx->jitcnt, &target_tx->jitcnt);
6449     __get_user(host_tx->calcnt, &target_tx->calcnt);
6450     __get_user(host_tx->errcnt, &target_tx->errcnt);
6451     __get_user(host_tx->stbcnt, &target_tx->stbcnt);
6452     __get_user(host_tx->tai, &target_tx->tai);
6453 
6454     unlock_user_struct(target_tx, target_addr, 0);
6455     return 0;
6456 }
6457 
6458 static inline abi_long host_to_target_timex(abi_long target_addr,
6459                                             struct timex *host_tx)
6460 {
6461     struct target_timex *target_tx;
6462 
6463     if (!lock_user_struct(VERIFY_WRITE, target_tx, target_addr, 0)) {
6464         return -TARGET_EFAULT;
6465     }
6466 
6467     __put_user(host_tx->modes, &target_tx->modes);
6468     __put_user(host_tx->offset, &target_tx->offset);
6469     __put_user(host_tx->freq, &target_tx->freq);
6470     __put_user(host_tx->maxerror, &target_tx->maxerror);
6471     __put_user(host_tx->esterror, &target_tx->esterror);
6472     __put_user(host_tx->status, &target_tx->status);
6473     __put_user(host_tx->constant, &target_tx->constant);
6474     __put_user(host_tx->precision, &target_tx->precision);
6475     __put_user(host_tx->tolerance, &target_tx->tolerance);
6476     __put_user(host_tx->time.tv_sec, &target_tx->time.tv_sec);
6477     __put_user(host_tx->time.tv_usec, &target_tx->time.tv_usec);
6478     __put_user(host_tx->tick, &target_tx->tick);
6479     __put_user(host_tx->ppsfreq, &target_tx->ppsfreq);
6480     __put_user(host_tx->jitter, &target_tx->jitter);
6481     __put_user(host_tx->shift, &target_tx->shift);
6482     __put_user(host_tx->stabil, &target_tx->stabil);
6483     __put_user(host_tx->jitcnt, &target_tx->jitcnt);
6484     __put_user(host_tx->calcnt, &target_tx->calcnt);
6485     __put_user(host_tx->errcnt, &target_tx->errcnt);
6486     __put_user(host_tx->stbcnt, &target_tx->stbcnt);
6487     __put_user(host_tx->tai, &target_tx->tai);
6488 
6489     unlock_user_struct(target_tx, target_addr, 1);
6490     return 0;
6491 }
6492 
6493 
6494 static inline abi_long target_to_host_sigevent(struct sigevent *host_sevp,
6495                                                abi_ulong target_addr)
6496 {
6497     struct target_sigevent *target_sevp;
6498 
6499     if (!lock_user_struct(VERIFY_READ, target_sevp, target_addr, 1)) {
6500         return -TARGET_EFAULT;
6501     }
6502 
6503     /* This union is awkward on 64 bit systems because it has a 32 bit
6504      * integer and a pointer in it; we follow the conversion approach
6505      * used for handling sigval types in signal.c so the guest should get
6506      * the correct value back even if we did a 64 bit byteswap and it's
6507      * using the 32 bit integer.
6508      */
6509     host_sevp->sigev_value.sival_ptr =
6510         (void *)(uintptr_t)tswapal(target_sevp->sigev_value.sival_ptr);
6511     host_sevp->sigev_signo =
6512         target_to_host_signal(tswap32(target_sevp->sigev_signo));
6513     host_sevp->sigev_notify = tswap32(target_sevp->sigev_notify);
6514     host_sevp->_sigev_un._tid = tswap32(target_sevp->_sigev_un._tid);
6515 
6516     unlock_user_struct(target_sevp, target_addr, 1);
6517     return 0;
6518 }
6519 
6520 #if defined(TARGET_NR_mlockall)
6521 static inline int target_to_host_mlockall_arg(int arg)
6522 {
6523     int result = 0;
6524 
6525     if (arg & TARGET_MLOCKALL_MCL_CURRENT) {
6526         result |= MCL_CURRENT;
6527     }
6528     if (arg & TARGET_MLOCKALL_MCL_FUTURE) {
6529         result |= MCL_FUTURE;
6530     }
6531     return result;
6532 }
6533 #endif
6534 
6535 #if (defined(TARGET_NR_stat64) || defined(TARGET_NR_lstat64) ||     \
6536      defined(TARGET_NR_fstat64) || defined(TARGET_NR_fstatat64) ||  \
6537      defined(TARGET_NR_newfstatat))
6538 static inline abi_long host_to_target_stat64(void *cpu_env,
6539                                              abi_ulong target_addr,
6540                                              struct stat *host_st)
6541 {
6542 #if defined(TARGET_ARM) && defined(TARGET_ABI32)
6543     if (((CPUARMState *)cpu_env)->eabi) {
6544         struct target_eabi_stat64 *target_st;
6545 
6546         if (!lock_user_struct(VERIFY_WRITE, target_st, target_addr, 0))
6547             return -TARGET_EFAULT;
6548         memset(target_st, 0, sizeof(struct target_eabi_stat64));
6549         __put_user(host_st->st_dev, &target_st->st_dev);
6550         __put_user(host_st->st_ino, &target_st->st_ino);
6551 #ifdef TARGET_STAT64_HAS_BROKEN_ST_INO
6552         __put_user(host_st->st_ino, &target_st->__st_ino);
6553 #endif
6554         __put_user(host_st->st_mode, &target_st->st_mode);
6555         __put_user(host_st->st_nlink, &target_st->st_nlink);
6556         __put_user(host_st->st_uid, &target_st->st_uid);
6557         __put_user(host_st->st_gid, &target_st->st_gid);
6558         __put_user(host_st->st_rdev, &target_st->st_rdev);
6559         __put_user(host_st->st_size, &target_st->st_size);
6560         __put_user(host_st->st_blksize, &target_st->st_blksize);
6561         __put_user(host_st->st_blocks, &target_st->st_blocks);
6562         __put_user(host_st->st_atime, &target_st->target_st_atime);
6563         __put_user(host_st->st_mtime, &target_st->target_st_mtime);
6564         __put_user(host_st->st_ctime, &target_st->target_st_ctime);
6565 #if _POSIX_C_SOURCE >= 200809L || _XOPEN_SOURCE >= 700
6566         __put_user(host_st->st_atim.tv_nsec, &target_st->target_st_atime_nsec);
6567         __put_user(host_st->st_mtim.tv_nsec, &target_st->target_st_mtime_nsec);
6568         __put_user(host_st->st_ctim.tv_nsec, &target_st->target_st_ctime_nsec);
6569 #endif
6570         unlock_user_struct(target_st, target_addr, 1);
6571     } else
6572 #endif
6573     {
6574 #if defined(TARGET_HAS_STRUCT_STAT64)
6575         struct target_stat64 *target_st;
6576 #else
6577         struct target_stat *target_st;
6578 #endif
6579 
6580         if (!lock_user_struct(VERIFY_WRITE, target_st, target_addr, 0))
6581             return -TARGET_EFAULT;
6582         memset(target_st, 0, sizeof(*target_st));
6583         __put_user(host_st->st_dev, &target_st->st_dev);
6584         __put_user(host_st->st_ino, &target_st->st_ino);
6585 #ifdef TARGET_STAT64_HAS_BROKEN_ST_INO
6586         __put_user(host_st->st_ino, &target_st->__st_ino);
6587 #endif
6588         __put_user(host_st->st_mode, &target_st->st_mode);
6589         __put_user(host_st->st_nlink, &target_st->st_nlink);
6590         __put_user(host_st->st_uid, &target_st->st_uid);
6591         __put_user(host_st->st_gid, &target_st->st_gid);
6592         __put_user(host_st->st_rdev, &target_st->st_rdev);
6593         /* XXX: better use of kernel struct */
6594         __put_user(host_st->st_size, &target_st->st_size);
6595         __put_user(host_st->st_blksize, &target_st->st_blksize);
6596         __put_user(host_st->st_blocks, &target_st->st_blocks);
6597         __put_user(host_st->st_atime, &target_st->target_st_atime);
6598         __put_user(host_st->st_mtime, &target_st->target_st_mtime);
6599         __put_user(host_st->st_ctime, &target_st->target_st_ctime);
6600 #if _POSIX_C_SOURCE >= 200809L || _XOPEN_SOURCE >= 700
6601         __put_user(host_st->st_atim.tv_nsec, &target_st->target_st_atime_nsec);
6602         __put_user(host_st->st_mtim.tv_nsec, &target_st->target_st_mtime_nsec);
6603         __put_user(host_st->st_ctim.tv_nsec, &target_st->target_st_ctime_nsec);
6604 #endif
6605         unlock_user_struct(target_st, target_addr, 1);
6606     }
6607 
6608     return 0;
6609 }
6610 #endif
6611 
6612 #if defined(TARGET_NR_statx) && defined(__NR_statx)
6613 static inline abi_long host_to_target_statx(struct target_statx *host_stx,
6614                                             abi_ulong target_addr)
6615 {
6616     struct target_statx *target_stx;
6617 
6618     if (!lock_user_struct(VERIFY_WRITE, target_stx, target_addr,  0)) {
6619         return -TARGET_EFAULT;
6620     }
6621     memset(target_stx, 0, sizeof(*target_stx));
6622 
6623     __put_user(host_stx->stx_mask, &target_stx->stx_mask);
6624     __put_user(host_stx->stx_blksize, &target_stx->stx_blksize);
6625     __put_user(host_stx->stx_attributes, &target_stx->stx_attributes);
6626     __put_user(host_stx->stx_nlink, &target_stx->stx_nlink);
6627     __put_user(host_stx->stx_uid, &target_stx->stx_uid);
6628     __put_user(host_stx->stx_gid, &target_stx->stx_gid);
6629     __put_user(host_stx->stx_mode, &target_stx->stx_mode);
6630     __put_user(host_stx->stx_ino, &target_stx->stx_ino);
6631     __put_user(host_stx->stx_size, &target_stx->stx_size);
6632     __put_user(host_stx->stx_blocks, &target_stx->stx_blocks);
6633     __put_user(host_stx->stx_attributes_mask, &target_stx->stx_attributes_mask);
6634     __put_user(host_stx->stx_atime.tv_sec, &target_stx->stx_atime.tv_sec);
6635     __put_user(host_stx->stx_atime.tv_nsec, &target_stx->stx_atime.tv_nsec);
6636     __put_user(host_stx->stx_btime.tv_sec, &target_stx->stx_atime.tv_sec);
6637     __put_user(host_stx->stx_btime.tv_nsec, &target_stx->stx_atime.tv_nsec);
6638     __put_user(host_stx->stx_ctime.tv_sec, &target_stx->stx_atime.tv_sec);
6639     __put_user(host_stx->stx_ctime.tv_nsec, &target_stx->stx_atime.tv_nsec);
6640     __put_user(host_stx->stx_mtime.tv_sec, &target_stx->stx_atime.tv_sec);
6641     __put_user(host_stx->stx_mtime.tv_nsec, &target_stx->stx_atime.tv_nsec);
6642     __put_user(host_stx->stx_rdev_major, &target_stx->stx_rdev_major);
6643     __put_user(host_stx->stx_rdev_minor, &target_stx->stx_rdev_minor);
6644     __put_user(host_stx->stx_dev_major, &target_stx->stx_dev_major);
6645     __put_user(host_stx->stx_dev_minor, &target_stx->stx_dev_minor);
6646 
6647     unlock_user_struct(target_stx, target_addr, 1);
6648 
6649     return 0;
6650 }
6651 #endif
6652 
6653 
6654 /* ??? Using host futex calls even when target atomic operations
6655    are not really atomic probably breaks things.  However implementing
6656    futexes locally would make futexes shared between multiple processes
6657    tricky.  However they're probably useless because guest atomic
6658    operations won't work either.  */
6659 static int do_futex(target_ulong uaddr, int op, int val, target_ulong timeout,
6660                     target_ulong uaddr2, int val3)
6661 {
6662     struct timespec ts, *pts;
6663     int base_op;
6664 
6665     /* ??? We assume FUTEX_* constants are the same on both host
6666        and target.  */
6667 #ifdef FUTEX_CMD_MASK
6668     base_op = op & FUTEX_CMD_MASK;
6669 #else
6670     base_op = op;
6671 #endif
6672     switch (base_op) {
6673     case FUTEX_WAIT:
6674     case FUTEX_WAIT_BITSET:
6675         if (timeout) {
6676             pts = &ts;
6677             target_to_host_timespec(pts, timeout);
6678         } else {
6679             pts = NULL;
6680         }
6681         return get_errno(safe_futex(g2h(uaddr), op, tswap32(val),
6682                          pts, NULL, val3));
6683     case FUTEX_WAKE:
6684         return get_errno(safe_futex(g2h(uaddr), op, val, NULL, NULL, 0));
6685     case FUTEX_FD:
6686         return get_errno(safe_futex(g2h(uaddr), op, val, NULL, NULL, 0));
6687     case FUTEX_REQUEUE:
6688     case FUTEX_CMP_REQUEUE:
6689     case FUTEX_WAKE_OP:
6690         /* For FUTEX_REQUEUE, FUTEX_CMP_REQUEUE, and FUTEX_WAKE_OP, the
6691            TIMEOUT parameter is interpreted as a uint32_t by the kernel.
6692            But the prototype takes a `struct timespec *'; insert casts
6693            to satisfy the compiler.  We do not need to tswap TIMEOUT
6694            since it's not compared to guest memory.  */
6695         pts = (struct timespec *)(uintptr_t) timeout;
6696         return get_errno(safe_futex(g2h(uaddr), op, val, pts,
6697                                     g2h(uaddr2),
6698                                     (base_op == FUTEX_CMP_REQUEUE
6699                                      ? tswap32(val3)
6700                                      : val3)));
6701     default:
6702         return -TARGET_ENOSYS;
6703     }
6704 }
6705 #if defined(TARGET_NR_name_to_handle_at) && defined(CONFIG_OPEN_BY_HANDLE)
6706 static abi_long do_name_to_handle_at(abi_long dirfd, abi_long pathname,
6707                                      abi_long handle, abi_long mount_id,
6708                                      abi_long flags)
6709 {
6710     struct file_handle *target_fh;
6711     struct file_handle *fh;
6712     int mid = 0;
6713     abi_long ret;
6714     char *name;
6715     unsigned int size, total_size;
6716 
6717     if (get_user_s32(size, handle)) {
6718         return -TARGET_EFAULT;
6719     }
6720 
6721     name = lock_user_string(pathname);
6722     if (!name) {
6723         return -TARGET_EFAULT;
6724     }
6725 
6726     total_size = sizeof(struct file_handle) + size;
6727     target_fh = lock_user(VERIFY_WRITE, handle, total_size, 0);
6728     if (!target_fh) {
6729         unlock_user(name, pathname, 0);
6730         return -TARGET_EFAULT;
6731     }
6732 
6733     fh = g_malloc0(total_size);
6734     fh->handle_bytes = size;
6735 
6736     ret = get_errno(name_to_handle_at(dirfd, path(name), fh, &mid, flags));
6737     unlock_user(name, pathname, 0);
6738 
6739     /* man name_to_handle_at(2):
6740      * Other than the use of the handle_bytes field, the caller should treat
6741      * the file_handle structure as an opaque data type
6742      */
6743 
6744     memcpy(target_fh, fh, total_size);
6745     target_fh->handle_bytes = tswap32(fh->handle_bytes);
6746     target_fh->handle_type = tswap32(fh->handle_type);
6747     g_free(fh);
6748     unlock_user(target_fh, handle, total_size);
6749 
6750     if (put_user_s32(mid, mount_id)) {
6751         return -TARGET_EFAULT;
6752     }
6753 
6754     return ret;
6755 
6756 }
6757 #endif
6758 
6759 #if defined(TARGET_NR_open_by_handle_at) && defined(CONFIG_OPEN_BY_HANDLE)
6760 static abi_long do_open_by_handle_at(abi_long mount_fd, abi_long handle,
6761                                      abi_long flags)
6762 {
6763     struct file_handle *target_fh;
6764     struct file_handle *fh;
6765     unsigned int size, total_size;
6766     abi_long ret;
6767 
6768     if (get_user_s32(size, handle)) {
6769         return -TARGET_EFAULT;
6770     }
6771 
6772     total_size = sizeof(struct file_handle) + size;
6773     target_fh = lock_user(VERIFY_READ, handle, total_size, 1);
6774     if (!target_fh) {
6775         return -TARGET_EFAULT;
6776     }
6777 
6778     fh = g_memdup(target_fh, total_size);
6779     fh->handle_bytes = size;
6780     fh->handle_type = tswap32(target_fh->handle_type);
6781 
6782     ret = get_errno(open_by_handle_at(mount_fd, fh,
6783                     target_to_host_bitmask(flags, fcntl_flags_tbl)));
6784 
6785     g_free(fh);
6786 
6787     unlock_user(target_fh, handle, total_size);
6788 
6789     return ret;
6790 }
6791 #endif
6792 
6793 #if defined(TARGET_NR_signalfd) || defined(TARGET_NR_signalfd4)
6794 
6795 static abi_long do_signalfd4(int fd, abi_long mask, int flags)
6796 {
6797     int host_flags;
6798     target_sigset_t *target_mask;
6799     sigset_t host_mask;
6800     abi_long ret;
6801 
6802     if (flags & ~(TARGET_O_NONBLOCK | TARGET_O_CLOEXEC)) {
6803         return -TARGET_EINVAL;
6804     }
6805     if (!lock_user_struct(VERIFY_READ, target_mask, mask, 1)) {
6806         return -TARGET_EFAULT;
6807     }
6808 
6809     target_to_host_sigset(&host_mask, target_mask);
6810 
6811     host_flags = target_to_host_bitmask(flags, fcntl_flags_tbl);
6812 
6813     ret = get_errno(signalfd(fd, &host_mask, host_flags));
6814     if (ret >= 0) {
6815         fd_trans_register(ret, &target_signalfd_trans);
6816     }
6817 
6818     unlock_user_struct(target_mask, mask, 0);
6819 
6820     return ret;
6821 }
6822 #endif
6823 
6824 /* Map host to target signal numbers for the wait family of syscalls.
6825    Assume all other status bits are the same.  */
6826 int host_to_target_waitstatus(int status)
6827 {
6828     if (WIFSIGNALED(status)) {
6829         return host_to_target_signal(WTERMSIG(status)) | (status & ~0x7f);
6830     }
6831     if (WIFSTOPPED(status)) {
6832         return (host_to_target_signal(WSTOPSIG(status)) << 8)
6833                | (status & 0xff);
6834     }
6835     return status;
6836 }
6837 
6838 static int open_self_cmdline(void *cpu_env, int fd)
6839 {
6840     CPUState *cpu = env_cpu((CPUArchState *)cpu_env);
6841     struct linux_binprm *bprm = ((TaskState *)cpu->opaque)->bprm;
6842     int i;
6843 
6844     for (i = 0; i < bprm->argc; i++) {
6845         size_t len = strlen(bprm->argv[i]) + 1;
6846 
6847         if (write(fd, bprm->argv[i], len) != len) {
6848             return -1;
6849         }
6850     }
6851 
6852     return 0;
6853 }
6854 
6855 static int open_self_maps(void *cpu_env, int fd)
6856 {
6857     CPUState *cpu = env_cpu((CPUArchState *)cpu_env);
6858     TaskState *ts = cpu->opaque;
6859     FILE *fp;
6860     char *line = NULL;
6861     size_t len = 0;
6862     ssize_t read;
6863 
6864     fp = fopen("/proc/self/maps", "r");
6865     if (fp == NULL) {
6866         return -1;
6867     }
6868 
6869     while ((read = getline(&line, &len, fp)) != -1) {
6870         int fields, dev_maj, dev_min, inode;
6871         uint64_t min, max, offset;
6872         char flag_r, flag_w, flag_x, flag_p;
6873         char path[512] = "";
6874         fields = sscanf(line, "%"PRIx64"-%"PRIx64" %c%c%c%c %"PRIx64" %x:%x %d"
6875                         " %512s", &min, &max, &flag_r, &flag_w, &flag_x,
6876                         &flag_p, &offset, &dev_maj, &dev_min, &inode, path);
6877 
6878         if ((fields < 10) || (fields > 11)) {
6879             continue;
6880         }
6881         if (h2g_valid(min)) {
6882             int flags = page_get_flags(h2g(min));
6883             max = h2g_valid(max - 1) ? max : (uintptr_t)g2h(GUEST_ADDR_MAX) + 1;
6884             if (page_check_range(h2g(min), max - min, flags) == -1) {
6885                 continue;
6886             }
6887             if (h2g(min) == ts->info->stack_limit) {
6888                 pstrcpy(path, sizeof(path), "      [stack]");
6889             }
6890             dprintf(fd, TARGET_ABI_FMT_ptr "-" TARGET_ABI_FMT_ptr
6891                     " %c%c%c%c %08" PRIx64 " %02x:%02x %d %s%s\n",
6892                     h2g(min), h2g(max - 1) + 1, flag_r, flag_w,
6893                     flag_x, flag_p, offset, dev_maj, dev_min, inode,
6894                     path[0] ? "         " : "", path);
6895         }
6896     }
6897 
6898     free(line);
6899     fclose(fp);
6900 
6901     return 0;
6902 }
6903 
6904 static int open_self_stat(void *cpu_env, int fd)
6905 {
6906     CPUState *cpu = env_cpu((CPUArchState *)cpu_env);
6907     TaskState *ts = cpu->opaque;
6908     abi_ulong start_stack = ts->info->start_stack;
6909     int i;
6910 
6911     for (i = 0; i < 44; i++) {
6912       char buf[128];
6913       int len;
6914       uint64_t val = 0;
6915 
6916       if (i == 0) {
6917         /* pid */
6918         val = getpid();
6919         snprintf(buf, sizeof(buf), "%"PRId64 " ", val);
6920       } else if (i == 1) {
6921         /* app name */
6922         snprintf(buf, sizeof(buf), "(%s) ", ts->bprm->argv[0]);
6923       } else if (i == 27) {
6924         /* stack bottom */
6925         val = start_stack;
6926         snprintf(buf, sizeof(buf), "%"PRId64 " ", val);
6927       } else {
6928         /* for the rest, there is MasterCard */
6929         snprintf(buf, sizeof(buf), "0%c", i == 43 ? '\n' : ' ');
6930       }
6931 
6932       len = strlen(buf);
6933       if (write(fd, buf, len) != len) {
6934           return -1;
6935       }
6936     }
6937 
6938     return 0;
6939 }
6940 
6941 static int open_self_auxv(void *cpu_env, int fd)
6942 {
6943     CPUState *cpu = env_cpu((CPUArchState *)cpu_env);
6944     TaskState *ts = cpu->opaque;
6945     abi_ulong auxv = ts->info->saved_auxv;
6946     abi_ulong len = ts->info->auxv_len;
6947     char *ptr;
6948 
6949     /*
6950      * Auxiliary vector is stored in target process stack.
6951      * read in whole auxv vector and copy it to file
6952      */
6953     ptr = lock_user(VERIFY_READ, auxv, len, 0);
6954     if (ptr != NULL) {
6955         while (len > 0) {
6956             ssize_t r;
6957             r = write(fd, ptr, len);
6958             if (r <= 0) {
6959                 break;
6960             }
6961             len -= r;
6962             ptr += r;
6963         }
6964         lseek(fd, 0, SEEK_SET);
6965         unlock_user(ptr, auxv, len);
6966     }
6967 
6968     return 0;
6969 }
6970 
6971 static int is_proc_myself(const char *filename, const char *entry)
6972 {
6973     if (!strncmp(filename, "/proc/", strlen("/proc/"))) {
6974         filename += strlen("/proc/");
6975         if (!strncmp(filename, "self/", strlen("self/"))) {
6976             filename += strlen("self/");
6977         } else if (*filename >= '1' && *filename <= '9') {
6978             char myself[80];
6979             snprintf(myself, sizeof(myself), "%d/", getpid());
6980             if (!strncmp(filename, myself, strlen(myself))) {
6981                 filename += strlen(myself);
6982             } else {
6983                 return 0;
6984             }
6985         } else {
6986             return 0;
6987         }
6988         if (!strcmp(filename, entry)) {
6989             return 1;
6990         }
6991     }
6992     return 0;
6993 }
6994 
6995 #if defined(HOST_WORDS_BIGENDIAN) != defined(TARGET_WORDS_BIGENDIAN) || \
6996     defined(TARGET_SPARC) || defined(TARGET_M68K)
6997 static int is_proc(const char *filename, const char *entry)
6998 {
6999     return strcmp(filename, entry) == 0;
7000 }
7001 #endif
7002 
7003 #if defined(HOST_WORDS_BIGENDIAN) != defined(TARGET_WORDS_BIGENDIAN)
7004 static int open_net_route(void *cpu_env, int fd)
7005 {
7006     FILE *fp;
7007     char *line = NULL;
7008     size_t len = 0;
7009     ssize_t read;
7010 
7011     fp = fopen("/proc/net/route", "r");
7012     if (fp == NULL) {
7013         return -1;
7014     }
7015 
7016     /* read header */
7017 
7018     read = getline(&line, &len, fp);
7019     dprintf(fd, "%s", line);
7020 
7021     /* read routes */
7022 
7023     while ((read = getline(&line, &len, fp)) != -1) {
7024         char iface[16];
7025         uint32_t dest, gw, mask;
7026         unsigned int flags, refcnt, use, metric, mtu, window, irtt;
7027         int fields;
7028 
7029         fields = sscanf(line,
7030                         "%s\t%08x\t%08x\t%04x\t%d\t%d\t%d\t%08x\t%d\t%u\t%u\n",
7031                         iface, &dest, &gw, &flags, &refcnt, &use, &metric,
7032                         &mask, &mtu, &window, &irtt);
7033         if (fields != 11) {
7034             continue;
7035         }
7036         dprintf(fd, "%s\t%08x\t%08x\t%04x\t%d\t%d\t%d\t%08x\t%d\t%u\t%u\n",
7037                 iface, tswap32(dest), tswap32(gw), flags, refcnt, use,
7038                 metric, tswap32(mask), mtu, window, irtt);
7039     }
7040 
7041     free(line);
7042     fclose(fp);
7043 
7044     return 0;
7045 }
7046 #endif
7047 
7048 #if defined(TARGET_SPARC)
7049 static int open_cpuinfo(void *cpu_env, int fd)
7050 {
7051     dprintf(fd, "type\t\t: sun4u\n");
7052     return 0;
7053 }
7054 #endif
7055 
7056 #if defined(TARGET_M68K)
7057 static int open_hardware(void *cpu_env, int fd)
7058 {
7059     dprintf(fd, "Model:\t\tqemu-m68k\n");
7060     return 0;
7061 }
7062 #endif
7063 
7064 static int do_openat(void *cpu_env, int dirfd, const char *pathname, int flags, mode_t mode)
7065 {
7066     struct fake_open {
7067         const char *filename;
7068         int (*fill)(void *cpu_env, int fd);
7069         int (*cmp)(const char *s1, const char *s2);
7070     };
7071     const struct fake_open *fake_open;
7072     static const struct fake_open fakes[] = {
7073         { "maps", open_self_maps, is_proc_myself },
7074         { "stat", open_self_stat, is_proc_myself },
7075         { "auxv", open_self_auxv, is_proc_myself },
7076         { "cmdline", open_self_cmdline, is_proc_myself },
7077 #if defined(HOST_WORDS_BIGENDIAN) != defined(TARGET_WORDS_BIGENDIAN)
7078         { "/proc/net/route", open_net_route, is_proc },
7079 #endif
7080 #if defined(TARGET_SPARC)
7081         { "/proc/cpuinfo", open_cpuinfo, is_proc },
7082 #endif
7083 #if defined(TARGET_M68K)
7084         { "/proc/hardware", open_hardware, is_proc },
7085 #endif
7086         { NULL, NULL, NULL }
7087     };
7088 
7089     if (is_proc_myself(pathname, "exe")) {
7090         int execfd = qemu_getauxval(AT_EXECFD);
7091         return execfd ? execfd : safe_openat(dirfd, exec_path, flags, mode);
7092     }
7093 
7094     for (fake_open = fakes; fake_open->filename; fake_open++) {
7095         if (fake_open->cmp(pathname, fake_open->filename)) {
7096             break;
7097         }
7098     }
7099 
7100     if (fake_open->filename) {
7101         const char *tmpdir;
7102         char filename[PATH_MAX];
7103         int fd, r;
7104 
7105         /* create temporary file to map stat to */
7106         tmpdir = getenv("TMPDIR");
7107         if (!tmpdir)
7108             tmpdir = "/tmp";
7109         snprintf(filename, sizeof(filename), "%s/qemu-open.XXXXXX", tmpdir);
7110         fd = mkstemp(filename);
7111         if (fd < 0) {
7112             return fd;
7113         }
7114         unlink(filename);
7115 
7116         if ((r = fake_open->fill(cpu_env, fd))) {
7117             int e = errno;
7118             close(fd);
7119             errno = e;
7120             return r;
7121         }
7122         lseek(fd, 0, SEEK_SET);
7123 
7124         return fd;
7125     }
7126 
7127     return safe_openat(dirfd, path(pathname), flags, mode);
7128 }
7129 
7130 #define TIMER_MAGIC 0x0caf0000
7131 #define TIMER_MAGIC_MASK 0xffff0000
7132 
7133 /* Convert QEMU provided timer ID back to internal 16bit index format */
7134 static target_timer_t get_timer_id(abi_long arg)
7135 {
7136     target_timer_t timerid = arg;
7137 
7138     if ((timerid & TIMER_MAGIC_MASK) != TIMER_MAGIC) {
7139         return -TARGET_EINVAL;
7140     }
7141 
7142     timerid &= 0xffff;
7143 
7144     if (timerid >= ARRAY_SIZE(g_posix_timers)) {
7145         return -TARGET_EINVAL;
7146     }
7147 
7148     return timerid;
7149 }
7150 
7151 static int target_to_host_cpu_mask(unsigned long *host_mask,
7152                                    size_t host_size,
7153                                    abi_ulong target_addr,
7154                                    size_t target_size)
7155 {
7156     unsigned target_bits = sizeof(abi_ulong) * 8;
7157     unsigned host_bits = sizeof(*host_mask) * 8;
7158     abi_ulong *target_mask;
7159     unsigned i, j;
7160 
7161     assert(host_size >= target_size);
7162 
7163     target_mask = lock_user(VERIFY_READ, target_addr, target_size, 1);
7164     if (!target_mask) {
7165         return -TARGET_EFAULT;
7166     }
7167     memset(host_mask, 0, host_size);
7168 
7169     for (i = 0 ; i < target_size / sizeof(abi_ulong); i++) {
7170         unsigned bit = i * target_bits;
7171         abi_ulong val;
7172 
7173         __get_user(val, &target_mask[i]);
7174         for (j = 0; j < target_bits; j++, bit++) {
7175             if (val & (1UL << j)) {
7176                 host_mask[bit / host_bits] |= 1UL << (bit % host_bits);
7177             }
7178         }
7179     }
7180 
7181     unlock_user(target_mask, target_addr, 0);
7182     return 0;
7183 }
7184 
7185 static int host_to_target_cpu_mask(const unsigned long *host_mask,
7186                                    size_t host_size,
7187                                    abi_ulong target_addr,
7188                                    size_t target_size)
7189 {
7190     unsigned target_bits = sizeof(abi_ulong) * 8;
7191     unsigned host_bits = sizeof(*host_mask) * 8;
7192     abi_ulong *target_mask;
7193     unsigned i, j;
7194 
7195     assert(host_size >= target_size);
7196 
7197     target_mask = lock_user(VERIFY_WRITE, target_addr, target_size, 0);
7198     if (!target_mask) {
7199         return -TARGET_EFAULT;
7200     }
7201 
7202     for (i = 0 ; i < target_size / sizeof(abi_ulong); i++) {
7203         unsigned bit = i * target_bits;
7204         abi_ulong val = 0;
7205 
7206         for (j = 0; j < target_bits; j++, bit++) {
7207             if (host_mask[bit / host_bits] & (1UL << (bit % host_bits))) {
7208                 val |= 1UL << j;
7209             }
7210         }
7211         __put_user(val, &target_mask[i]);
7212     }
7213 
7214     unlock_user(target_mask, target_addr, target_size);
7215     return 0;
7216 }
7217 
7218 /* This is an internal helper for do_syscall so that it is easier
7219  * to have a single return point, so that actions, such as logging
7220  * of syscall results, can be performed.
7221  * All errnos that do_syscall() returns must be -TARGET_<errcode>.
7222  */
7223 static abi_long do_syscall1(void *cpu_env, int num, abi_long arg1,
7224                             abi_long arg2, abi_long arg3, abi_long arg4,
7225                             abi_long arg5, abi_long arg6, abi_long arg7,
7226                             abi_long arg8)
7227 {
7228     CPUState *cpu = env_cpu(cpu_env);
7229     abi_long ret;
7230 #if defined(TARGET_NR_stat) || defined(TARGET_NR_stat64) \
7231     || defined(TARGET_NR_lstat) || defined(TARGET_NR_lstat64) \
7232     || defined(TARGET_NR_fstat) || defined(TARGET_NR_fstat64) \
7233     || defined(TARGET_NR_statx)
7234     struct stat st;
7235 #endif
7236 #if defined(TARGET_NR_statfs) || defined(TARGET_NR_statfs64) \
7237     || defined(TARGET_NR_fstatfs)
7238     struct statfs stfs;
7239 #endif
7240     void *p;
7241 
7242     switch(num) {
7243     case TARGET_NR_exit:
7244         /* In old applications this may be used to implement _exit(2).
7245            However in threaded applictions it is used for thread termination,
7246            and _exit_group is used for application termination.
7247            Do thread termination if we have more then one thread.  */
7248 
7249         if (block_signals()) {
7250             return -TARGET_ERESTARTSYS;
7251         }
7252 
7253         cpu_list_lock();
7254 
7255         if (CPU_NEXT(first_cpu)) {
7256             TaskState *ts;
7257 
7258             /* Remove the CPU from the list.  */
7259             QTAILQ_REMOVE_RCU(&cpus, cpu, node);
7260 
7261             cpu_list_unlock();
7262 
7263             ts = cpu->opaque;
7264             if (ts->child_tidptr) {
7265                 put_user_u32(0, ts->child_tidptr);
7266                 sys_futex(g2h(ts->child_tidptr), FUTEX_WAKE, INT_MAX,
7267                           NULL, NULL, 0);
7268             }
7269             thread_cpu = NULL;
7270             object_unref(OBJECT(cpu));
7271             g_free(ts);
7272             rcu_unregister_thread();
7273             pthread_exit(NULL);
7274         }
7275 
7276         cpu_list_unlock();
7277         preexit_cleanup(cpu_env, arg1);
7278         _exit(arg1);
7279         return 0; /* avoid warning */
7280     case TARGET_NR_read:
7281         if (arg2 == 0 && arg3 == 0) {
7282             return get_errno(safe_read(arg1, 0, 0));
7283         } else {
7284             if (!(p = lock_user(VERIFY_WRITE, arg2, arg3, 0)))
7285                 return -TARGET_EFAULT;
7286             ret = get_errno(safe_read(arg1, p, arg3));
7287             if (ret >= 0 &&
7288                 fd_trans_host_to_target_data(arg1)) {
7289                 ret = fd_trans_host_to_target_data(arg1)(p, ret);
7290             }
7291             unlock_user(p, arg2, ret);
7292         }
7293         return ret;
7294     case TARGET_NR_write:
7295         if (arg2 == 0 && arg3 == 0) {
7296             return get_errno(safe_write(arg1, 0, 0));
7297         }
7298         if (!(p = lock_user(VERIFY_READ, arg2, arg3, 1)))
7299             return -TARGET_EFAULT;
7300         if (fd_trans_target_to_host_data(arg1)) {
7301             void *copy = g_malloc(arg3);
7302             memcpy(copy, p, arg3);
7303             ret = fd_trans_target_to_host_data(arg1)(copy, arg3);
7304             if (ret >= 0) {
7305                 ret = get_errno(safe_write(arg1, copy, ret));
7306             }
7307             g_free(copy);
7308         } else {
7309             ret = get_errno(safe_write(arg1, p, arg3));
7310         }
7311         unlock_user(p, arg2, 0);
7312         return ret;
7313 
7314 #ifdef TARGET_NR_open
7315     case TARGET_NR_open:
7316         if (!(p = lock_user_string(arg1)))
7317             return -TARGET_EFAULT;
7318         ret = get_errno(do_openat(cpu_env, AT_FDCWD, p,
7319                                   target_to_host_bitmask(arg2, fcntl_flags_tbl),
7320                                   arg3));
7321         fd_trans_unregister(ret);
7322         unlock_user(p, arg1, 0);
7323         return ret;
7324 #endif
7325     case TARGET_NR_openat:
7326         if (!(p = lock_user_string(arg2)))
7327             return -TARGET_EFAULT;
7328         ret = get_errno(do_openat(cpu_env, arg1, p,
7329                                   target_to_host_bitmask(arg3, fcntl_flags_tbl),
7330                                   arg4));
7331         fd_trans_unregister(ret);
7332         unlock_user(p, arg2, 0);
7333         return ret;
7334 #if defined(TARGET_NR_name_to_handle_at) && defined(CONFIG_OPEN_BY_HANDLE)
7335     case TARGET_NR_name_to_handle_at:
7336         ret = do_name_to_handle_at(arg1, arg2, arg3, arg4, arg5);
7337         return ret;
7338 #endif
7339 #if defined(TARGET_NR_open_by_handle_at) && defined(CONFIG_OPEN_BY_HANDLE)
7340     case TARGET_NR_open_by_handle_at:
7341         ret = do_open_by_handle_at(arg1, arg2, arg3);
7342         fd_trans_unregister(ret);
7343         return ret;
7344 #endif
7345     case TARGET_NR_close:
7346         fd_trans_unregister(arg1);
7347         return get_errno(close(arg1));
7348 
7349     case TARGET_NR_brk:
7350         return do_brk(arg1);
7351 #ifdef TARGET_NR_fork
7352     case TARGET_NR_fork:
7353         return get_errno(do_fork(cpu_env, TARGET_SIGCHLD, 0, 0, 0, 0));
7354 #endif
7355 #ifdef TARGET_NR_waitpid
7356     case TARGET_NR_waitpid:
7357         {
7358             int status;
7359             ret = get_errno(safe_wait4(arg1, &status, arg3, 0));
7360             if (!is_error(ret) && arg2 && ret
7361                 && put_user_s32(host_to_target_waitstatus(status), arg2))
7362                 return -TARGET_EFAULT;
7363         }
7364         return ret;
7365 #endif
7366 #ifdef TARGET_NR_waitid
7367     case TARGET_NR_waitid:
7368         {
7369             siginfo_t info;
7370             info.si_pid = 0;
7371             ret = get_errno(safe_waitid(arg1, arg2, &info, arg4, NULL));
7372             if (!is_error(ret) && arg3 && info.si_pid != 0) {
7373                 if (!(p = lock_user(VERIFY_WRITE, arg3, sizeof(target_siginfo_t), 0)))
7374                     return -TARGET_EFAULT;
7375                 host_to_target_siginfo(p, &info);
7376                 unlock_user(p, arg3, sizeof(target_siginfo_t));
7377             }
7378         }
7379         return ret;
7380 #endif
7381 #ifdef TARGET_NR_creat /* not on alpha */
7382     case TARGET_NR_creat:
7383         if (!(p = lock_user_string(arg1)))
7384             return -TARGET_EFAULT;
7385         ret = get_errno(creat(p, arg2));
7386         fd_trans_unregister(ret);
7387         unlock_user(p, arg1, 0);
7388         return ret;
7389 #endif
7390 #ifdef TARGET_NR_link
7391     case TARGET_NR_link:
7392         {
7393             void * p2;
7394             p = lock_user_string(arg1);
7395             p2 = lock_user_string(arg2);
7396             if (!p || !p2)
7397                 ret = -TARGET_EFAULT;
7398             else
7399                 ret = get_errno(link(p, p2));
7400             unlock_user(p2, arg2, 0);
7401             unlock_user(p, arg1, 0);
7402         }
7403         return ret;
7404 #endif
7405 #if defined(TARGET_NR_linkat)
7406     case TARGET_NR_linkat:
7407         {
7408             void * p2 = NULL;
7409             if (!arg2 || !arg4)
7410                 return -TARGET_EFAULT;
7411             p  = lock_user_string(arg2);
7412             p2 = lock_user_string(arg4);
7413             if (!p || !p2)
7414                 ret = -TARGET_EFAULT;
7415             else
7416                 ret = get_errno(linkat(arg1, p, arg3, p2, arg5));
7417             unlock_user(p, arg2, 0);
7418             unlock_user(p2, arg4, 0);
7419         }
7420         return ret;
7421 #endif
7422 #ifdef TARGET_NR_unlink
7423     case TARGET_NR_unlink:
7424         if (!(p = lock_user_string(arg1)))
7425             return -TARGET_EFAULT;
7426         ret = get_errno(unlink(p));
7427         unlock_user(p, arg1, 0);
7428         return ret;
7429 #endif
7430 #if defined(TARGET_NR_unlinkat)
7431     case TARGET_NR_unlinkat:
7432         if (!(p = lock_user_string(arg2)))
7433             return -TARGET_EFAULT;
7434         ret = get_errno(unlinkat(arg1, p, arg3));
7435         unlock_user(p, arg2, 0);
7436         return ret;
7437 #endif
7438     case TARGET_NR_execve:
7439         {
7440             char **argp, **envp;
7441             int argc, envc;
7442             abi_ulong gp;
7443             abi_ulong guest_argp;
7444             abi_ulong guest_envp;
7445             abi_ulong addr;
7446             char **q;
7447             int total_size = 0;
7448 
7449             argc = 0;
7450             guest_argp = arg2;
7451             for (gp = guest_argp; gp; gp += sizeof(abi_ulong)) {
7452                 if (get_user_ual(addr, gp))
7453                     return -TARGET_EFAULT;
7454                 if (!addr)
7455                     break;
7456                 argc++;
7457             }
7458             envc = 0;
7459             guest_envp = arg3;
7460             for (gp = guest_envp; gp; gp += sizeof(abi_ulong)) {
7461                 if (get_user_ual(addr, gp))
7462                     return -TARGET_EFAULT;
7463                 if (!addr)
7464                     break;
7465                 envc++;
7466             }
7467 
7468             argp = g_new0(char *, argc + 1);
7469             envp = g_new0(char *, envc + 1);
7470 
7471             for (gp = guest_argp, q = argp; gp;
7472                   gp += sizeof(abi_ulong), q++) {
7473                 if (get_user_ual(addr, gp))
7474                     goto execve_efault;
7475                 if (!addr)
7476                     break;
7477                 if (!(*q = lock_user_string(addr)))
7478                     goto execve_efault;
7479                 total_size += strlen(*q) + 1;
7480             }
7481             *q = NULL;
7482 
7483             for (gp = guest_envp, q = envp; gp;
7484                   gp += sizeof(abi_ulong), q++) {
7485                 if (get_user_ual(addr, gp))
7486                     goto execve_efault;
7487                 if (!addr)
7488                     break;
7489                 if (!(*q = lock_user_string(addr)))
7490                     goto execve_efault;
7491                 total_size += strlen(*q) + 1;
7492             }
7493             *q = NULL;
7494 
7495             if (!(p = lock_user_string(arg1)))
7496                 goto execve_efault;
7497             /* Although execve() is not an interruptible syscall it is
7498              * a special case where we must use the safe_syscall wrapper:
7499              * if we allow a signal to happen before we make the host
7500              * syscall then we will 'lose' it, because at the point of
7501              * execve the process leaves QEMU's control. So we use the
7502              * safe syscall wrapper to ensure that we either take the
7503              * signal as a guest signal, or else it does not happen
7504              * before the execve completes and makes it the other
7505              * program's problem.
7506              */
7507             ret = get_errno(safe_execve(p, argp, envp));
7508             unlock_user(p, arg1, 0);
7509 
7510             goto execve_end;
7511 
7512         execve_efault:
7513             ret = -TARGET_EFAULT;
7514 
7515         execve_end:
7516             for (gp = guest_argp, q = argp; *q;
7517                   gp += sizeof(abi_ulong), q++) {
7518                 if (get_user_ual(addr, gp)
7519                     || !addr)
7520                     break;
7521                 unlock_user(*q, addr, 0);
7522             }
7523             for (gp = guest_envp, q = envp; *q;
7524                   gp += sizeof(abi_ulong), q++) {
7525                 if (get_user_ual(addr, gp)
7526                     || !addr)
7527                     break;
7528                 unlock_user(*q, addr, 0);
7529             }
7530 
7531             g_free(argp);
7532             g_free(envp);
7533         }
7534         return ret;
7535     case TARGET_NR_chdir:
7536         if (!(p = lock_user_string(arg1)))
7537             return -TARGET_EFAULT;
7538         ret = get_errno(chdir(p));
7539         unlock_user(p, arg1, 0);
7540         return ret;
7541 #ifdef TARGET_NR_time
7542     case TARGET_NR_time:
7543         {
7544             time_t host_time;
7545             ret = get_errno(time(&host_time));
7546             if (!is_error(ret)
7547                 && arg1
7548                 && put_user_sal(host_time, arg1))
7549                 return -TARGET_EFAULT;
7550         }
7551         return ret;
7552 #endif
7553 #ifdef TARGET_NR_mknod
7554     case TARGET_NR_mknod:
7555         if (!(p = lock_user_string(arg1)))
7556             return -TARGET_EFAULT;
7557         ret = get_errno(mknod(p, arg2, arg3));
7558         unlock_user(p, arg1, 0);
7559         return ret;
7560 #endif
7561 #if defined(TARGET_NR_mknodat)
7562     case TARGET_NR_mknodat:
7563         if (!(p = lock_user_string(arg2)))
7564             return -TARGET_EFAULT;
7565         ret = get_errno(mknodat(arg1, p, arg3, arg4));
7566         unlock_user(p, arg2, 0);
7567         return ret;
7568 #endif
7569 #ifdef TARGET_NR_chmod
7570     case TARGET_NR_chmod:
7571         if (!(p = lock_user_string(arg1)))
7572             return -TARGET_EFAULT;
7573         ret = get_errno(chmod(p, arg2));
7574         unlock_user(p, arg1, 0);
7575         return ret;
7576 #endif
7577 #ifdef TARGET_NR_lseek
7578     case TARGET_NR_lseek:
7579         return get_errno(lseek(arg1, arg2, arg3));
7580 #endif
7581 #if defined(TARGET_NR_getxpid) && defined(TARGET_ALPHA)
7582     /* Alpha specific */
7583     case TARGET_NR_getxpid:
7584         ((CPUAlphaState *)cpu_env)->ir[IR_A4] = getppid();
7585         return get_errno(getpid());
7586 #endif
7587 #ifdef TARGET_NR_getpid
7588     case TARGET_NR_getpid:
7589         return get_errno(getpid());
7590 #endif
7591     case TARGET_NR_mount:
7592         {
7593             /* need to look at the data field */
7594             void *p2, *p3;
7595 
7596             if (arg1) {
7597                 p = lock_user_string(arg1);
7598                 if (!p) {
7599                     return -TARGET_EFAULT;
7600                 }
7601             } else {
7602                 p = NULL;
7603             }
7604 
7605             p2 = lock_user_string(arg2);
7606             if (!p2) {
7607                 if (arg1) {
7608                     unlock_user(p, arg1, 0);
7609                 }
7610                 return -TARGET_EFAULT;
7611             }
7612 
7613             if (arg3) {
7614                 p3 = lock_user_string(arg3);
7615                 if (!p3) {
7616                     if (arg1) {
7617                         unlock_user(p, arg1, 0);
7618                     }
7619                     unlock_user(p2, arg2, 0);
7620                     return -TARGET_EFAULT;
7621                 }
7622             } else {
7623                 p3 = NULL;
7624             }
7625 
7626             /* FIXME - arg5 should be locked, but it isn't clear how to
7627              * do that since it's not guaranteed to be a NULL-terminated
7628              * string.
7629              */
7630             if (!arg5) {
7631                 ret = mount(p, p2, p3, (unsigned long)arg4, NULL);
7632             } else {
7633                 ret = mount(p, p2, p3, (unsigned long)arg4, g2h(arg5));
7634             }
7635             ret = get_errno(ret);
7636 
7637             if (arg1) {
7638                 unlock_user(p, arg1, 0);
7639             }
7640             unlock_user(p2, arg2, 0);
7641             if (arg3) {
7642                 unlock_user(p3, arg3, 0);
7643             }
7644         }
7645         return ret;
7646 #ifdef TARGET_NR_umount
7647     case TARGET_NR_umount:
7648         if (!(p = lock_user_string(arg1)))
7649             return -TARGET_EFAULT;
7650         ret = get_errno(umount(p));
7651         unlock_user(p, arg1, 0);
7652         return ret;
7653 #endif
7654 #ifdef TARGET_NR_stime /* not on alpha */
7655     case TARGET_NR_stime:
7656         {
7657             time_t host_time;
7658             if (get_user_sal(host_time, arg1))
7659                 return -TARGET_EFAULT;
7660             return get_errno(stime(&host_time));
7661         }
7662 #endif
7663 #ifdef TARGET_NR_alarm /* not on alpha */
7664     case TARGET_NR_alarm:
7665         return alarm(arg1);
7666 #endif
7667 #ifdef TARGET_NR_pause /* not on alpha */
7668     case TARGET_NR_pause:
7669         if (!block_signals()) {
7670             sigsuspend(&((TaskState *)cpu->opaque)->signal_mask);
7671         }
7672         return -TARGET_EINTR;
7673 #endif
7674 #ifdef TARGET_NR_utime
7675     case TARGET_NR_utime:
7676         {
7677             struct utimbuf tbuf, *host_tbuf;
7678             struct target_utimbuf *target_tbuf;
7679             if (arg2) {
7680                 if (!lock_user_struct(VERIFY_READ, target_tbuf, arg2, 1))
7681                     return -TARGET_EFAULT;
7682                 tbuf.actime = tswapal(target_tbuf->actime);
7683                 tbuf.modtime = tswapal(target_tbuf->modtime);
7684                 unlock_user_struct(target_tbuf, arg2, 0);
7685                 host_tbuf = &tbuf;
7686             } else {
7687                 host_tbuf = NULL;
7688             }
7689             if (!(p = lock_user_string(arg1)))
7690                 return -TARGET_EFAULT;
7691             ret = get_errno(utime(p, host_tbuf));
7692             unlock_user(p, arg1, 0);
7693         }
7694         return ret;
7695 #endif
7696 #ifdef TARGET_NR_utimes
7697     case TARGET_NR_utimes:
7698         {
7699             struct timeval *tvp, tv[2];
7700             if (arg2) {
7701                 if (copy_from_user_timeval(&tv[0], arg2)
7702                     || copy_from_user_timeval(&tv[1],
7703                                               arg2 + sizeof(struct target_timeval)))
7704                     return -TARGET_EFAULT;
7705                 tvp = tv;
7706             } else {
7707                 tvp = NULL;
7708             }
7709             if (!(p = lock_user_string(arg1)))
7710                 return -TARGET_EFAULT;
7711             ret = get_errno(utimes(p, tvp));
7712             unlock_user(p, arg1, 0);
7713         }
7714         return ret;
7715 #endif
7716 #if defined(TARGET_NR_futimesat)
7717     case TARGET_NR_futimesat:
7718         {
7719             struct timeval *tvp, tv[2];
7720             if (arg3) {
7721                 if (copy_from_user_timeval(&tv[0], arg3)
7722                     || copy_from_user_timeval(&tv[1],
7723                                               arg3 + sizeof(struct target_timeval)))
7724                     return -TARGET_EFAULT;
7725                 tvp = tv;
7726             } else {
7727                 tvp = NULL;
7728             }
7729             if (!(p = lock_user_string(arg2))) {
7730                 return -TARGET_EFAULT;
7731             }
7732             ret = get_errno(futimesat(arg1, path(p), tvp));
7733             unlock_user(p, arg2, 0);
7734         }
7735         return ret;
7736 #endif
7737 #ifdef TARGET_NR_access
7738     case TARGET_NR_access:
7739         if (!(p = lock_user_string(arg1))) {
7740             return -TARGET_EFAULT;
7741         }
7742         ret = get_errno(access(path(p), arg2));
7743         unlock_user(p, arg1, 0);
7744         return ret;
7745 #endif
7746 #if defined(TARGET_NR_faccessat) && defined(__NR_faccessat)
7747     case TARGET_NR_faccessat:
7748         if (!(p = lock_user_string(arg2))) {
7749             return -TARGET_EFAULT;
7750         }
7751         ret = get_errno(faccessat(arg1, p, arg3, 0));
7752         unlock_user(p, arg2, 0);
7753         return ret;
7754 #endif
7755 #ifdef TARGET_NR_nice /* not on alpha */
7756     case TARGET_NR_nice:
7757         return get_errno(nice(arg1));
7758 #endif
7759     case TARGET_NR_sync:
7760         sync();
7761         return 0;
7762 #if defined(TARGET_NR_syncfs) && defined(CONFIG_SYNCFS)
7763     case TARGET_NR_syncfs:
7764         return get_errno(syncfs(arg1));
7765 #endif
7766     case TARGET_NR_kill:
7767         return get_errno(safe_kill(arg1, target_to_host_signal(arg2)));
7768 #ifdef TARGET_NR_rename
7769     case TARGET_NR_rename:
7770         {
7771             void *p2;
7772             p = lock_user_string(arg1);
7773             p2 = lock_user_string(arg2);
7774             if (!p || !p2)
7775                 ret = -TARGET_EFAULT;
7776             else
7777                 ret = get_errno(rename(p, p2));
7778             unlock_user(p2, arg2, 0);
7779             unlock_user(p, arg1, 0);
7780         }
7781         return ret;
7782 #endif
7783 #if defined(TARGET_NR_renameat)
7784     case TARGET_NR_renameat:
7785         {
7786             void *p2;
7787             p  = lock_user_string(arg2);
7788             p2 = lock_user_string(arg4);
7789             if (!p || !p2)
7790                 ret = -TARGET_EFAULT;
7791             else
7792                 ret = get_errno(renameat(arg1, p, arg3, p2));
7793             unlock_user(p2, arg4, 0);
7794             unlock_user(p, arg2, 0);
7795         }
7796         return ret;
7797 #endif
7798 #if defined(TARGET_NR_renameat2)
7799     case TARGET_NR_renameat2:
7800         {
7801             void *p2;
7802             p  = lock_user_string(arg2);
7803             p2 = lock_user_string(arg4);
7804             if (!p || !p2) {
7805                 ret = -TARGET_EFAULT;
7806             } else {
7807                 ret = get_errno(sys_renameat2(arg1, p, arg3, p2, arg5));
7808             }
7809             unlock_user(p2, arg4, 0);
7810             unlock_user(p, arg2, 0);
7811         }
7812         return ret;
7813 #endif
7814 #ifdef TARGET_NR_mkdir
7815     case TARGET_NR_mkdir:
7816         if (!(p = lock_user_string(arg1)))
7817             return -TARGET_EFAULT;
7818         ret = get_errno(mkdir(p, arg2));
7819         unlock_user(p, arg1, 0);
7820         return ret;
7821 #endif
7822 #if defined(TARGET_NR_mkdirat)
7823     case TARGET_NR_mkdirat:
7824         if (!(p = lock_user_string(arg2)))
7825             return -TARGET_EFAULT;
7826         ret = get_errno(mkdirat(arg1, p, arg3));
7827         unlock_user(p, arg2, 0);
7828         return ret;
7829 #endif
7830 #ifdef TARGET_NR_rmdir
7831     case TARGET_NR_rmdir:
7832         if (!(p = lock_user_string(arg1)))
7833             return -TARGET_EFAULT;
7834         ret = get_errno(rmdir(p));
7835         unlock_user(p, arg1, 0);
7836         return ret;
7837 #endif
7838     case TARGET_NR_dup:
7839         ret = get_errno(dup(arg1));
7840         if (ret >= 0) {
7841             fd_trans_dup(arg1, ret);
7842         }
7843         return ret;
7844 #ifdef TARGET_NR_pipe
7845     case TARGET_NR_pipe:
7846         return do_pipe(cpu_env, arg1, 0, 0);
7847 #endif
7848 #ifdef TARGET_NR_pipe2
7849     case TARGET_NR_pipe2:
7850         return do_pipe(cpu_env, arg1,
7851                        target_to_host_bitmask(arg2, fcntl_flags_tbl), 1);
7852 #endif
7853     case TARGET_NR_times:
7854         {
7855             struct target_tms *tmsp;
7856             struct tms tms;
7857             ret = get_errno(times(&tms));
7858             if (arg1) {
7859                 tmsp = lock_user(VERIFY_WRITE, arg1, sizeof(struct target_tms), 0);
7860                 if (!tmsp)
7861                     return -TARGET_EFAULT;
7862                 tmsp->tms_utime = tswapal(host_to_target_clock_t(tms.tms_utime));
7863                 tmsp->tms_stime = tswapal(host_to_target_clock_t(tms.tms_stime));
7864                 tmsp->tms_cutime = tswapal(host_to_target_clock_t(tms.tms_cutime));
7865                 tmsp->tms_cstime = tswapal(host_to_target_clock_t(tms.tms_cstime));
7866             }
7867             if (!is_error(ret))
7868                 ret = host_to_target_clock_t(ret);
7869         }
7870         return ret;
7871     case TARGET_NR_acct:
7872         if (arg1 == 0) {
7873             ret = get_errno(acct(NULL));
7874         } else {
7875             if (!(p = lock_user_string(arg1))) {
7876                 return -TARGET_EFAULT;
7877             }
7878             ret = get_errno(acct(path(p)));
7879             unlock_user(p, arg1, 0);
7880         }
7881         return ret;
7882 #ifdef TARGET_NR_umount2
7883     case TARGET_NR_umount2:
7884         if (!(p = lock_user_string(arg1)))
7885             return -TARGET_EFAULT;
7886         ret = get_errno(umount2(p, arg2));
7887         unlock_user(p, arg1, 0);
7888         return ret;
7889 #endif
7890     case TARGET_NR_ioctl:
7891         return do_ioctl(arg1, arg2, arg3);
7892 #ifdef TARGET_NR_fcntl
7893     case TARGET_NR_fcntl:
7894         return do_fcntl(arg1, arg2, arg3);
7895 #endif
7896     case TARGET_NR_setpgid:
7897         return get_errno(setpgid(arg1, arg2));
7898     case TARGET_NR_umask:
7899         return get_errno(umask(arg1));
7900     case TARGET_NR_chroot:
7901         if (!(p = lock_user_string(arg1)))
7902             return -TARGET_EFAULT;
7903         ret = get_errno(chroot(p));
7904         unlock_user(p, arg1, 0);
7905         return ret;
7906 #ifdef TARGET_NR_dup2
7907     case TARGET_NR_dup2:
7908         ret = get_errno(dup2(arg1, arg2));
7909         if (ret >= 0) {
7910             fd_trans_dup(arg1, arg2);
7911         }
7912         return ret;
7913 #endif
7914 #if defined(CONFIG_DUP3) && defined(TARGET_NR_dup3)
7915     case TARGET_NR_dup3:
7916     {
7917         int host_flags;
7918 
7919         if ((arg3 & ~TARGET_O_CLOEXEC) != 0) {
7920             return -EINVAL;
7921         }
7922         host_flags = target_to_host_bitmask(arg3, fcntl_flags_tbl);
7923         ret = get_errno(dup3(arg1, arg2, host_flags));
7924         if (ret >= 0) {
7925             fd_trans_dup(arg1, arg2);
7926         }
7927         return ret;
7928     }
7929 #endif
7930 #ifdef TARGET_NR_getppid /* not on alpha */
7931     case TARGET_NR_getppid:
7932         return get_errno(getppid());
7933 #endif
7934 #ifdef TARGET_NR_getpgrp
7935     case TARGET_NR_getpgrp:
7936         return get_errno(getpgrp());
7937 #endif
7938     case TARGET_NR_setsid:
7939         return get_errno(setsid());
7940 #ifdef TARGET_NR_sigaction
7941     case TARGET_NR_sigaction:
7942         {
7943 #if defined(TARGET_ALPHA)
7944             struct target_sigaction act, oact, *pact = 0;
7945             struct target_old_sigaction *old_act;
7946             if (arg2) {
7947                 if (!lock_user_struct(VERIFY_READ, old_act, arg2, 1))
7948                     return -TARGET_EFAULT;
7949                 act._sa_handler = old_act->_sa_handler;
7950                 target_siginitset(&act.sa_mask, old_act->sa_mask);
7951                 act.sa_flags = old_act->sa_flags;
7952                 act.sa_restorer = 0;
7953                 unlock_user_struct(old_act, arg2, 0);
7954                 pact = &act;
7955             }
7956             ret = get_errno(do_sigaction(arg1, pact, &oact));
7957             if (!is_error(ret) && arg3) {
7958                 if (!lock_user_struct(VERIFY_WRITE, old_act, arg3, 0))
7959                     return -TARGET_EFAULT;
7960                 old_act->_sa_handler = oact._sa_handler;
7961                 old_act->sa_mask = oact.sa_mask.sig[0];
7962                 old_act->sa_flags = oact.sa_flags;
7963                 unlock_user_struct(old_act, arg3, 1);
7964             }
7965 #elif defined(TARGET_MIPS)
7966 	    struct target_sigaction act, oact, *pact, *old_act;
7967 
7968 	    if (arg2) {
7969                 if (!lock_user_struct(VERIFY_READ, old_act, arg2, 1))
7970                     return -TARGET_EFAULT;
7971 		act._sa_handler = old_act->_sa_handler;
7972 		target_siginitset(&act.sa_mask, old_act->sa_mask.sig[0]);
7973 		act.sa_flags = old_act->sa_flags;
7974 		unlock_user_struct(old_act, arg2, 0);
7975 		pact = &act;
7976 	    } else {
7977 		pact = NULL;
7978 	    }
7979 
7980 	    ret = get_errno(do_sigaction(arg1, pact, &oact));
7981 
7982 	    if (!is_error(ret) && arg3) {
7983                 if (!lock_user_struct(VERIFY_WRITE, old_act, arg3, 0))
7984                     return -TARGET_EFAULT;
7985 		old_act->_sa_handler = oact._sa_handler;
7986 		old_act->sa_flags = oact.sa_flags;
7987 		old_act->sa_mask.sig[0] = oact.sa_mask.sig[0];
7988 		old_act->sa_mask.sig[1] = 0;
7989 		old_act->sa_mask.sig[2] = 0;
7990 		old_act->sa_mask.sig[3] = 0;
7991 		unlock_user_struct(old_act, arg3, 1);
7992 	    }
7993 #else
7994             struct target_old_sigaction *old_act;
7995             struct target_sigaction act, oact, *pact;
7996             if (arg2) {
7997                 if (!lock_user_struct(VERIFY_READ, old_act, arg2, 1))
7998                     return -TARGET_EFAULT;
7999                 act._sa_handler = old_act->_sa_handler;
8000                 target_siginitset(&act.sa_mask, old_act->sa_mask);
8001                 act.sa_flags = old_act->sa_flags;
8002                 act.sa_restorer = old_act->sa_restorer;
8003 #ifdef TARGET_ARCH_HAS_KA_RESTORER
8004                 act.ka_restorer = 0;
8005 #endif
8006                 unlock_user_struct(old_act, arg2, 0);
8007                 pact = &act;
8008             } else {
8009                 pact = NULL;
8010             }
8011             ret = get_errno(do_sigaction(arg1, pact, &oact));
8012             if (!is_error(ret) && arg3) {
8013                 if (!lock_user_struct(VERIFY_WRITE, old_act, arg3, 0))
8014                     return -TARGET_EFAULT;
8015                 old_act->_sa_handler = oact._sa_handler;
8016                 old_act->sa_mask = oact.sa_mask.sig[0];
8017                 old_act->sa_flags = oact.sa_flags;
8018                 old_act->sa_restorer = oact.sa_restorer;
8019                 unlock_user_struct(old_act, arg3, 1);
8020             }
8021 #endif
8022         }
8023         return ret;
8024 #endif
8025     case TARGET_NR_rt_sigaction:
8026         {
8027 #if defined(TARGET_ALPHA)
8028             /* For Alpha and SPARC this is a 5 argument syscall, with
8029              * a 'restorer' parameter which must be copied into the
8030              * sa_restorer field of the sigaction struct.
8031              * For Alpha that 'restorer' is arg5; for SPARC it is arg4,
8032              * and arg5 is the sigsetsize.
8033              * Alpha also has a separate rt_sigaction struct that it uses
8034              * here; SPARC uses the usual sigaction struct.
8035              */
8036             struct target_rt_sigaction *rt_act;
8037             struct target_sigaction act, oact, *pact = 0;
8038 
8039             if (arg4 != sizeof(target_sigset_t)) {
8040                 return -TARGET_EINVAL;
8041             }
8042             if (arg2) {
8043                 if (!lock_user_struct(VERIFY_READ, rt_act, arg2, 1))
8044                     return -TARGET_EFAULT;
8045                 act._sa_handler = rt_act->_sa_handler;
8046                 act.sa_mask = rt_act->sa_mask;
8047                 act.sa_flags = rt_act->sa_flags;
8048                 act.sa_restorer = arg5;
8049                 unlock_user_struct(rt_act, arg2, 0);
8050                 pact = &act;
8051             }
8052             ret = get_errno(do_sigaction(arg1, pact, &oact));
8053             if (!is_error(ret) && arg3) {
8054                 if (!lock_user_struct(VERIFY_WRITE, rt_act, arg3, 0))
8055                     return -TARGET_EFAULT;
8056                 rt_act->_sa_handler = oact._sa_handler;
8057                 rt_act->sa_mask = oact.sa_mask;
8058                 rt_act->sa_flags = oact.sa_flags;
8059                 unlock_user_struct(rt_act, arg3, 1);
8060             }
8061 #else
8062 #ifdef TARGET_SPARC
8063             target_ulong restorer = arg4;
8064             target_ulong sigsetsize = arg5;
8065 #else
8066             target_ulong sigsetsize = arg4;
8067 #endif
8068             struct target_sigaction *act;
8069             struct target_sigaction *oact;
8070 
8071             if (sigsetsize != sizeof(target_sigset_t)) {
8072                 return -TARGET_EINVAL;
8073             }
8074             if (arg2) {
8075                 if (!lock_user_struct(VERIFY_READ, act, arg2, 1)) {
8076                     return -TARGET_EFAULT;
8077                 }
8078 #ifdef TARGET_ARCH_HAS_KA_RESTORER
8079                 act->ka_restorer = restorer;
8080 #endif
8081             } else {
8082                 act = NULL;
8083             }
8084             if (arg3) {
8085                 if (!lock_user_struct(VERIFY_WRITE, oact, arg3, 0)) {
8086                     ret = -TARGET_EFAULT;
8087                     goto rt_sigaction_fail;
8088                 }
8089             } else
8090                 oact = NULL;
8091             ret = get_errno(do_sigaction(arg1, act, oact));
8092 	rt_sigaction_fail:
8093             if (act)
8094                 unlock_user_struct(act, arg2, 0);
8095             if (oact)
8096                 unlock_user_struct(oact, arg3, 1);
8097 #endif
8098         }
8099         return ret;
8100 #ifdef TARGET_NR_sgetmask /* not on alpha */
8101     case TARGET_NR_sgetmask:
8102         {
8103             sigset_t cur_set;
8104             abi_ulong target_set;
8105             ret = do_sigprocmask(0, NULL, &cur_set);
8106             if (!ret) {
8107                 host_to_target_old_sigset(&target_set, &cur_set);
8108                 ret = target_set;
8109             }
8110         }
8111         return ret;
8112 #endif
8113 #ifdef TARGET_NR_ssetmask /* not on alpha */
8114     case TARGET_NR_ssetmask:
8115         {
8116             sigset_t set, oset;
8117             abi_ulong target_set = arg1;
8118             target_to_host_old_sigset(&set, &target_set);
8119             ret = do_sigprocmask(SIG_SETMASK, &set, &oset);
8120             if (!ret) {
8121                 host_to_target_old_sigset(&target_set, &oset);
8122                 ret = target_set;
8123             }
8124         }
8125         return ret;
8126 #endif
8127 #ifdef TARGET_NR_sigprocmask
8128     case TARGET_NR_sigprocmask:
8129         {
8130 #if defined(TARGET_ALPHA)
8131             sigset_t set, oldset;
8132             abi_ulong mask;
8133             int how;
8134 
8135             switch (arg1) {
8136             case TARGET_SIG_BLOCK:
8137                 how = SIG_BLOCK;
8138                 break;
8139             case TARGET_SIG_UNBLOCK:
8140                 how = SIG_UNBLOCK;
8141                 break;
8142             case TARGET_SIG_SETMASK:
8143                 how = SIG_SETMASK;
8144                 break;
8145             default:
8146                 return -TARGET_EINVAL;
8147             }
8148             mask = arg2;
8149             target_to_host_old_sigset(&set, &mask);
8150 
8151             ret = do_sigprocmask(how, &set, &oldset);
8152             if (!is_error(ret)) {
8153                 host_to_target_old_sigset(&mask, &oldset);
8154                 ret = mask;
8155                 ((CPUAlphaState *)cpu_env)->ir[IR_V0] = 0; /* force no error */
8156             }
8157 #else
8158             sigset_t set, oldset, *set_ptr;
8159             int how;
8160 
8161             if (arg2) {
8162                 switch (arg1) {
8163                 case TARGET_SIG_BLOCK:
8164                     how = SIG_BLOCK;
8165                     break;
8166                 case TARGET_SIG_UNBLOCK:
8167                     how = SIG_UNBLOCK;
8168                     break;
8169                 case TARGET_SIG_SETMASK:
8170                     how = SIG_SETMASK;
8171                     break;
8172                 default:
8173                     return -TARGET_EINVAL;
8174                 }
8175                 if (!(p = lock_user(VERIFY_READ, arg2, sizeof(target_sigset_t), 1)))
8176                     return -TARGET_EFAULT;
8177                 target_to_host_old_sigset(&set, p);
8178                 unlock_user(p, arg2, 0);
8179                 set_ptr = &set;
8180             } else {
8181                 how = 0;
8182                 set_ptr = NULL;
8183             }
8184             ret = do_sigprocmask(how, set_ptr, &oldset);
8185             if (!is_error(ret) && arg3) {
8186                 if (!(p = lock_user(VERIFY_WRITE, arg3, sizeof(target_sigset_t), 0)))
8187                     return -TARGET_EFAULT;
8188                 host_to_target_old_sigset(p, &oldset);
8189                 unlock_user(p, arg3, sizeof(target_sigset_t));
8190             }
8191 #endif
8192         }
8193         return ret;
8194 #endif
8195     case TARGET_NR_rt_sigprocmask:
8196         {
8197             int how = arg1;
8198             sigset_t set, oldset, *set_ptr;
8199 
8200             if (arg4 != sizeof(target_sigset_t)) {
8201                 return -TARGET_EINVAL;
8202             }
8203 
8204             if (arg2) {
8205                 switch(how) {
8206                 case TARGET_SIG_BLOCK:
8207                     how = SIG_BLOCK;
8208                     break;
8209                 case TARGET_SIG_UNBLOCK:
8210                     how = SIG_UNBLOCK;
8211                     break;
8212                 case TARGET_SIG_SETMASK:
8213                     how = SIG_SETMASK;
8214                     break;
8215                 default:
8216                     return -TARGET_EINVAL;
8217                 }
8218                 if (!(p = lock_user(VERIFY_READ, arg2, sizeof(target_sigset_t), 1)))
8219                     return -TARGET_EFAULT;
8220                 target_to_host_sigset(&set, p);
8221                 unlock_user(p, arg2, 0);
8222                 set_ptr = &set;
8223             } else {
8224                 how = 0;
8225                 set_ptr = NULL;
8226             }
8227             ret = do_sigprocmask(how, set_ptr, &oldset);
8228             if (!is_error(ret) && arg3) {
8229                 if (!(p = lock_user(VERIFY_WRITE, arg3, sizeof(target_sigset_t), 0)))
8230                     return -TARGET_EFAULT;
8231                 host_to_target_sigset(p, &oldset);
8232                 unlock_user(p, arg3, sizeof(target_sigset_t));
8233             }
8234         }
8235         return ret;
8236 #ifdef TARGET_NR_sigpending
8237     case TARGET_NR_sigpending:
8238         {
8239             sigset_t set;
8240             ret = get_errno(sigpending(&set));
8241             if (!is_error(ret)) {
8242                 if (!(p = lock_user(VERIFY_WRITE, arg1, sizeof(target_sigset_t), 0)))
8243                     return -TARGET_EFAULT;
8244                 host_to_target_old_sigset(p, &set);
8245                 unlock_user(p, arg1, sizeof(target_sigset_t));
8246             }
8247         }
8248         return ret;
8249 #endif
8250     case TARGET_NR_rt_sigpending:
8251         {
8252             sigset_t set;
8253 
8254             /* Yes, this check is >, not != like most. We follow the kernel's
8255              * logic and it does it like this because it implements
8256              * NR_sigpending through the same code path, and in that case
8257              * the old_sigset_t is smaller in size.
8258              */
8259             if (arg2 > sizeof(target_sigset_t)) {
8260                 return -TARGET_EINVAL;
8261             }
8262 
8263             ret = get_errno(sigpending(&set));
8264             if (!is_error(ret)) {
8265                 if (!(p = lock_user(VERIFY_WRITE, arg1, sizeof(target_sigset_t), 0)))
8266                     return -TARGET_EFAULT;
8267                 host_to_target_sigset(p, &set);
8268                 unlock_user(p, arg1, sizeof(target_sigset_t));
8269             }
8270         }
8271         return ret;
8272 #ifdef TARGET_NR_sigsuspend
8273     case TARGET_NR_sigsuspend:
8274         {
8275             TaskState *ts = cpu->opaque;
8276 #if defined(TARGET_ALPHA)
8277             abi_ulong mask = arg1;
8278             target_to_host_old_sigset(&ts->sigsuspend_mask, &mask);
8279 #else
8280             if (!(p = lock_user(VERIFY_READ, arg1, sizeof(target_sigset_t), 1)))
8281                 return -TARGET_EFAULT;
8282             target_to_host_old_sigset(&ts->sigsuspend_mask, p);
8283             unlock_user(p, arg1, 0);
8284 #endif
8285             ret = get_errno(safe_rt_sigsuspend(&ts->sigsuspend_mask,
8286                                                SIGSET_T_SIZE));
8287             if (ret != -TARGET_ERESTARTSYS) {
8288                 ts->in_sigsuspend = 1;
8289             }
8290         }
8291         return ret;
8292 #endif
8293     case TARGET_NR_rt_sigsuspend:
8294         {
8295             TaskState *ts = cpu->opaque;
8296 
8297             if (arg2 != sizeof(target_sigset_t)) {
8298                 return -TARGET_EINVAL;
8299             }
8300             if (!(p = lock_user(VERIFY_READ, arg1, sizeof(target_sigset_t), 1)))
8301                 return -TARGET_EFAULT;
8302             target_to_host_sigset(&ts->sigsuspend_mask, p);
8303             unlock_user(p, arg1, 0);
8304             ret = get_errno(safe_rt_sigsuspend(&ts->sigsuspend_mask,
8305                                                SIGSET_T_SIZE));
8306             if (ret != -TARGET_ERESTARTSYS) {
8307                 ts->in_sigsuspend = 1;
8308             }
8309         }
8310         return ret;
8311     case TARGET_NR_rt_sigtimedwait:
8312         {
8313             sigset_t set;
8314             struct timespec uts, *puts;
8315             siginfo_t uinfo;
8316 
8317             if (arg4 != sizeof(target_sigset_t)) {
8318                 return -TARGET_EINVAL;
8319             }
8320 
8321             if (!(p = lock_user(VERIFY_READ, arg1, sizeof(target_sigset_t), 1)))
8322                 return -TARGET_EFAULT;
8323             target_to_host_sigset(&set, p);
8324             unlock_user(p, arg1, 0);
8325             if (arg3) {
8326                 puts = &uts;
8327                 target_to_host_timespec(puts, arg3);
8328             } else {
8329                 puts = NULL;
8330             }
8331             ret = get_errno(safe_rt_sigtimedwait(&set, &uinfo, puts,
8332                                                  SIGSET_T_SIZE));
8333             if (!is_error(ret)) {
8334                 if (arg2) {
8335                     p = lock_user(VERIFY_WRITE, arg2, sizeof(target_siginfo_t),
8336                                   0);
8337                     if (!p) {
8338                         return -TARGET_EFAULT;
8339                     }
8340                     host_to_target_siginfo(p, &uinfo);
8341                     unlock_user(p, arg2, sizeof(target_siginfo_t));
8342                 }
8343                 ret = host_to_target_signal(ret);
8344             }
8345         }
8346         return ret;
8347     case TARGET_NR_rt_sigqueueinfo:
8348         {
8349             siginfo_t uinfo;
8350 
8351             p = lock_user(VERIFY_READ, arg3, sizeof(target_siginfo_t), 1);
8352             if (!p) {
8353                 return -TARGET_EFAULT;
8354             }
8355             target_to_host_siginfo(&uinfo, p);
8356             unlock_user(p, arg3, 0);
8357             ret = get_errno(sys_rt_sigqueueinfo(arg1, arg2, &uinfo));
8358         }
8359         return ret;
8360     case TARGET_NR_rt_tgsigqueueinfo:
8361         {
8362             siginfo_t uinfo;
8363 
8364             p = lock_user(VERIFY_READ, arg4, sizeof(target_siginfo_t), 1);
8365             if (!p) {
8366                 return -TARGET_EFAULT;
8367             }
8368             target_to_host_siginfo(&uinfo, p);
8369             unlock_user(p, arg4, 0);
8370             ret = get_errno(sys_rt_tgsigqueueinfo(arg1, arg2, arg3, &uinfo));
8371         }
8372         return ret;
8373 #ifdef TARGET_NR_sigreturn
8374     case TARGET_NR_sigreturn:
8375         if (block_signals()) {
8376             return -TARGET_ERESTARTSYS;
8377         }
8378         return do_sigreturn(cpu_env);
8379 #endif
8380     case TARGET_NR_rt_sigreturn:
8381         if (block_signals()) {
8382             return -TARGET_ERESTARTSYS;
8383         }
8384         return do_rt_sigreturn(cpu_env);
8385     case TARGET_NR_sethostname:
8386         if (!(p = lock_user_string(arg1)))
8387             return -TARGET_EFAULT;
8388         ret = get_errno(sethostname(p, arg2));
8389         unlock_user(p, arg1, 0);
8390         return ret;
8391 #ifdef TARGET_NR_setrlimit
8392     case TARGET_NR_setrlimit:
8393         {
8394             int resource = target_to_host_resource(arg1);
8395             struct target_rlimit *target_rlim;
8396             struct rlimit rlim;
8397             if (!lock_user_struct(VERIFY_READ, target_rlim, arg2, 1))
8398                 return -TARGET_EFAULT;
8399             rlim.rlim_cur = target_to_host_rlim(target_rlim->rlim_cur);
8400             rlim.rlim_max = target_to_host_rlim(target_rlim->rlim_max);
8401             unlock_user_struct(target_rlim, arg2, 0);
8402             /*
8403              * If we just passed through resource limit settings for memory then
8404              * they would also apply to QEMU's own allocations, and QEMU will
8405              * crash or hang or die if its allocations fail. Ideally we would
8406              * track the guest allocations in QEMU and apply the limits ourselves.
8407              * For now, just tell the guest the call succeeded but don't actually
8408              * limit anything.
8409              */
8410             if (resource != RLIMIT_AS &&
8411                 resource != RLIMIT_DATA &&
8412                 resource != RLIMIT_STACK) {
8413                 return get_errno(setrlimit(resource, &rlim));
8414             } else {
8415                 return 0;
8416             }
8417         }
8418 #endif
8419 #ifdef TARGET_NR_getrlimit
8420     case TARGET_NR_getrlimit:
8421         {
8422             int resource = target_to_host_resource(arg1);
8423             struct target_rlimit *target_rlim;
8424             struct rlimit rlim;
8425 
8426             ret = get_errno(getrlimit(resource, &rlim));
8427             if (!is_error(ret)) {
8428                 if (!lock_user_struct(VERIFY_WRITE, target_rlim, arg2, 0))
8429                     return -TARGET_EFAULT;
8430                 target_rlim->rlim_cur = host_to_target_rlim(rlim.rlim_cur);
8431                 target_rlim->rlim_max = host_to_target_rlim(rlim.rlim_max);
8432                 unlock_user_struct(target_rlim, arg2, 1);
8433             }
8434         }
8435         return ret;
8436 #endif
8437     case TARGET_NR_getrusage:
8438         {
8439             struct rusage rusage;
8440             ret = get_errno(getrusage(arg1, &rusage));
8441             if (!is_error(ret)) {
8442                 ret = host_to_target_rusage(arg2, &rusage);
8443             }
8444         }
8445         return ret;
8446     case TARGET_NR_gettimeofday:
8447         {
8448             struct timeval tv;
8449             ret = get_errno(gettimeofday(&tv, NULL));
8450             if (!is_error(ret)) {
8451                 if (copy_to_user_timeval(arg1, &tv))
8452                     return -TARGET_EFAULT;
8453             }
8454         }
8455         return ret;
8456     case TARGET_NR_settimeofday:
8457         {
8458             struct timeval tv, *ptv = NULL;
8459             struct timezone tz, *ptz = NULL;
8460 
8461             if (arg1) {
8462                 if (copy_from_user_timeval(&tv, arg1)) {
8463                     return -TARGET_EFAULT;
8464                 }
8465                 ptv = &tv;
8466             }
8467 
8468             if (arg2) {
8469                 if (copy_from_user_timezone(&tz, arg2)) {
8470                     return -TARGET_EFAULT;
8471                 }
8472                 ptz = &tz;
8473             }
8474 
8475             return get_errno(settimeofday(ptv, ptz));
8476         }
8477 #if defined(TARGET_NR_select)
8478     case TARGET_NR_select:
8479 #if defined(TARGET_WANT_NI_OLD_SELECT)
8480         /* some architectures used to have old_select here
8481          * but now ENOSYS it.
8482          */
8483         ret = -TARGET_ENOSYS;
8484 #elif defined(TARGET_WANT_OLD_SYS_SELECT)
8485         ret = do_old_select(arg1);
8486 #else
8487         ret = do_select(arg1, arg2, arg3, arg4, arg5);
8488 #endif
8489         return ret;
8490 #endif
8491 #ifdef TARGET_NR_pselect6
8492     case TARGET_NR_pselect6:
8493         {
8494             abi_long rfd_addr, wfd_addr, efd_addr, n, ts_addr;
8495             fd_set rfds, wfds, efds;
8496             fd_set *rfds_ptr, *wfds_ptr, *efds_ptr;
8497             struct timespec ts, *ts_ptr;
8498 
8499             /*
8500              * The 6th arg is actually two args smashed together,
8501              * so we cannot use the C library.
8502              */
8503             sigset_t set;
8504             struct {
8505                 sigset_t *set;
8506                 size_t size;
8507             } sig, *sig_ptr;
8508 
8509             abi_ulong arg_sigset, arg_sigsize, *arg7;
8510             target_sigset_t *target_sigset;
8511 
8512             n = arg1;
8513             rfd_addr = arg2;
8514             wfd_addr = arg3;
8515             efd_addr = arg4;
8516             ts_addr = arg5;
8517 
8518             ret = copy_from_user_fdset_ptr(&rfds, &rfds_ptr, rfd_addr, n);
8519             if (ret) {
8520                 return ret;
8521             }
8522             ret = copy_from_user_fdset_ptr(&wfds, &wfds_ptr, wfd_addr, n);
8523             if (ret) {
8524                 return ret;
8525             }
8526             ret = copy_from_user_fdset_ptr(&efds, &efds_ptr, efd_addr, n);
8527             if (ret) {
8528                 return ret;
8529             }
8530 
8531             /*
8532              * This takes a timespec, and not a timeval, so we cannot
8533              * use the do_select() helper ...
8534              */
8535             if (ts_addr) {
8536                 if (target_to_host_timespec(&ts, ts_addr)) {
8537                     return -TARGET_EFAULT;
8538                 }
8539                 ts_ptr = &ts;
8540             } else {
8541                 ts_ptr = NULL;
8542             }
8543 
8544             /* Extract the two packed args for the sigset */
8545             if (arg6) {
8546                 sig_ptr = &sig;
8547                 sig.size = SIGSET_T_SIZE;
8548 
8549                 arg7 = lock_user(VERIFY_READ, arg6, sizeof(*arg7) * 2, 1);
8550                 if (!arg7) {
8551                     return -TARGET_EFAULT;
8552                 }
8553                 arg_sigset = tswapal(arg7[0]);
8554                 arg_sigsize = tswapal(arg7[1]);
8555                 unlock_user(arg7, arg6, 0);
8556 
8557                 if (arg_sigset) {
8558                     sig.set = &set;
8559                     if (arg_sigsize != sizeof(*target_sigset)) {
8560                         /* Like the kernel, we enforce correct size sigsets */
8561                         return -TARGET_EINVAL;
8562                     }
8563                     target_sigset = lock_user(VERIFY_READ, arg_sigset,
8564                                               sizeof(*target_sigset), 1);
8565                     if (!target_sigset) {
8566                         return -TARGET_EFAULT;
8567                     }
8568                     target_to_host_sigset(&set, target_sigset);
8569                     unlock_user(target_sigset, arg_sigset, 0);
8570                 } else {
8571                     sig.set = NULL;
8572                 }
8573             } else {
8574                 sig_ptr = NULL;
8575             }
8576 
8577             ret = get_errno(safe_pselect6(n, rfds_ptr, wfds_ptr, efds_ptr,
8578                                           ts_ptr, sig_ptr));
8579 
8580             if (!is_error(ret)) {
8581                 if (rfd_addr && copy_to_user_fdset(rfd_addr, &rfds, n))
8582                     return -TARGET_EFAULT;
8583                 if (wfd_addr && copy_to_user_fdset(wfd_addr, &wfds, n))
8584                     return -TARGET_EFAULT;
8585                 if (efd_addr && copy_to_user_fdset(efd_addr, &efds, n))
8586                     return -TARGET_EFAULT;
8587 
8588                 if (ts_addr && host_to_target_timespec(ts_addr, &ts))
8589                     return -TARGET_EFAULT;
8590             }
8591         }
8592         return ret;
8593 #endif
8594 #ifdef TARGET_NR_symlink
8595     case TARGET_NR_symlink:
8596         {
8597             void *p2;
8598             p = lock_user_string(arg1);
8599             p2 = lock_user_string(arg2);
8600             if (!p || !p2)
8601                 ret = -TARGET_EFAULT;
8602             else
8603                 ret = get_errno(symlink(p, p2));
8604             unlock_user(p2, arg2, 0);
8605             unlock_user(p, arg1, 0);
8606         }
8607         return ret;
8608 #endif
8609 #if defined(TARGET_NR_symlinkat)
8610     case TARGET_NR_symlinkat:
8611         {
8612             void *p2;
8613             p  = lock_user_string(arg1);
8614             p2 = lock_user_string(arg3);
8615             if (!p || !p2)
8616                 ret = -TARGET_EFAULT;
8617             else
8618                 ret = get_errno(symlinkat(p, arg2, p2));
8619             unlock_user(p2, arg3, 0);
8620             unlock_user(p, arg1, 0);
8621         }
8622         return ret;
8623 #endif
8624 #ifdef TARGET_NR_readlink
8625     case TARGET_NR_readlink:
8626         {
8627             void *p2;
8628             p = lock_user_string(arg1);
8629             p2 = lock_user(VERIFY_WRITE, arg2, arg3, 0);
8630             if (!p || !p2) {
8631                 ret = -TARGET_EFAULT;
8632             } else if (!arg3) {
8633                 /* Short circuit this for the magic exe check. */
8634                 ret = -TARGET_EINVAL;
8635             } else if (is_proc_myself((const char *)p, "exe")) {
8636                 char real[PATH_MAX], *temp;
8637                 temp = realpath(exec_path, real);
8638                 /* Return value is # of bytes that we wrote to the buffer. */
8639                 if (temp == NULL) {
8640                     ret = get_errno(-1);
8641                 } else {
8642                     /* Don't worry about sign mismatch as earlier mapping
8643                      * logic would have thrown a bad address error. */
8644                     ret = MIN(strlen(real), arg3);
8645                     /* We cannot NUL terminate the string. */
8646                     memcpy(p2, real, ret);
8647                 }
8648             } else {
8649                 ret = get_errno(readlink(path(p), p2, arg3));
8650             }
8651             unlock_user(p2, arg2, ret);
8652             unlock_user(p, arg1, 0);
8653         }
8654         return ret;
8655 #endif
8656 #if defined(TARGET_NR_readlinkat)
8657     case TARGET_NR_readlinkat:
8658         {
8659             void *p2;
8660             p  = lock_user_string(arg2);
8661             p2 = lock_user(VERIFY_WRITE, arg3, arg4, 0);
8662             if (!p || !p2) {
8663                 ret = -TARGET_EFAULT;
8664             } else if (is_proc_myself((const char *)p, "exe")) {
8665                 char real[PATH_MAX], *temp;
8666                 temp = realpath(exec_path, real);
8667                 ret = temp == NULL ? get_errno(-1) : strlen(real) ;
8668                 snprintf((char *)p2, arg4, "%s", real);
8669             } else {
8670                 ret = get_errno(readlinkat(arg1, path(p), p2, arg4));
8671             }
8672             unlock_user(p2, arg3, ret);
8673             unlock_user(p, arg2, 0);
8674         }
8675         return ret;
8676 #endif
8677 #ifdef TARGET_NR_swapon
8678     case TARGET_NR_swapon:
8679         if (!(p = lock_user_string(arg1)))
8680             return -TARGET_EFAULT;
8681         ret = get_errno(swapon(p, arg2));
8682         unlock_user(p, arg1, 0);
8683         return ret;
8684 #endif
8685     case TARGET_NR_reboot:
8686         if (arg3 == LINUX_REBOOT_CMD_RESTART2) {
8687            /* arg4 must be ignored in all other cases */
8688            p = lock_user_string(arg4);
8689            if (!p) {
8690                return -TARGET_EFAULT;
8691            }
8692            ret = get_errno(reboot(arg1, arg2, arg3, p));
8693            unlock_user(p, arg4, 0);
8694         } else {
8695            ret = get_errno(reboot(arg1, arg2, arg3, NULL));
8696         }
8697         return ret;
8698 #ifdef TARGET_NR_mmap
8699     case TARGET_NR_mmap:
8700 #if (defined(TARGET_I386) && defined(TARGET_ABI32)) || \
8701     (defined(TARGET_ARM) && defined(TARGET_ABI32)) || \
8702     defined(TARGET_M68K) || defined(TARGET_CRIS) || defined(TARGET_MICROBLAZE) \
8703     || defined(TARGET_S390X)
8704         {
8705             abi_ulong *v;
8706             abi_ulong v1, v2, v3, v4, v5, v6;
8707             if (!(v = lock_user(VERIFY_READ, arg1, 6 * sizeof(abi_ulong), 1)))
8708                 return -TARGET_EFAULT;
8709             v1 = tswapal(v[0]);
8710             v2 = tswapal(v[1]);
8711             v3 = tswapal(v[2]);
8712             v4 = tswapal(v[3]);
8713             v5 = tswapal(v[4]);
8714             v6 = tswapal(v[5]);
8715             unlock_user(v, arg1, 0);
8716             ret = get_errno(target_mmap(v1, v2, v3,
8717                                         target_to_host_bitmask(v4, mmap_flags_tbl),
8718                                         v5, v6));
8719         }
8720 #else
8721         ret = get_errno(target_mmap(arg1, arg2, arg3,
8722                                     target_to_host_bitmask(arg4, mmap_flags_tbl),
8723                                     arg5,
8724                                     arg6));
8725 #endif
8726         return ret;
8727 #endif
8728 #ifdef TARGET_NR_mmap2
8729     case TARGET_NR_mmap2:
8730 #ifndef MMAP_SHIFT
8731 #define MMAP_SHIFT 12
8732 #endif
8733         ret = target_mmap(arg1, arg2, arg3,
8734                           target_to_host_bitmask(arg4, mmap_flags_tbl),
8735                           arg5, arg6 << MMAP_SHIFT);
8736         return get_errno(ret);
8737 #endif
8738     case TARGET_NR_munmap:
8739         return get_errno(target_munmap(arg1, arg2));
8740     case TARGET_NR_mprotect:
8741         {
8742             TaskState *ts = cpu->opaque;
8743             /* Special hack to detect libc making the stack executable.  */
8744             if ((arg3 & PROT_GROWSDOWN)
8745                 && arg1 >= ts->info->stack_limit
8746                 && arg1 <= ts->info->start_stack) {
8747                 arg3 &= ~PROT_GROWSDOWN;
8748                 arg2 = arg2 + arg1 - ts->info->stack_limit;
8749                 arg1 = ts->info->stack_limit;
8750             }
8751         }
8752         return get_errno(target_mprotect(arg1, arg2, arg3));
8753 #ifdef TARGET_NR_mremap
8754     case TARGET_NR_mremap:
8755         return get_errno(target_mremap(arg1, arg2, arg3, arg4, arg5));
8756 #endif
8757         /* ??? msync/mlock/munlock are broken for softmmu.  */
8758 #ifdef TARGET_NR_msync
8759     case TARGET_NR_msync:
8760         return get_errno(msync(g2h(arg1), arg2, arg3));
8761 #endif
8762 #ifdef TARGET_NR_mlock
8763     case TARGET_NR_mlock:
8764         return get_errno(mlock(g2h(arg1), arg2));
8765 #endif
8766 #ifdef TARGET_NR_munlock
8767     case TARGET_NR_munlock:
8768         return get_errno(munlock(g2h(arg1), arg2));
8769 #endif
8770 #ifdef TARGET_NR_mlockall
8771     case TARGET_NR_mlockall:
8772         return get_errno(mlockall(target_to_host_mlockall_arg(arg1)));
8773 #endif
8774 #ifdef TARGET_NR_munlockall
8775     case TARGET_NR_munlockall:
8776         return get_errno(munlockall());
8777 #endif
8778 #ifdef TARGET_NR_truncate
8779     case TARGET_NR_truncate:
8780         if (!(p = lock_user_string(arg1)))
8781             return -TARGET_EFAULT;
8782         ret = get_errno(truncate(p, arg2));
8783         unlock_user(p, arg1, 0);
8784         return ret;
8785 #endif
8786 #ifdef TARGET_NR_ftruncate
8787     case TARGET_NR_ftruncate:
8788         return get_errno(ftruncate(arg1, arg2));
8789 #endif
8790     case TARGET_NR_fchmod:
8791         return get_errno(fchmod(arg1, arg2));
8792 #if defined(TARGET_NR_fchmodat)
8793     case TARGET_NR_fchmodat:
8794         if (!(p = lock_user_string(arg2)))
8795             return -TARGET_EFAULT;
8796         ret = get_errno(fchmodat(arg1, p, arg3, 0));
8797         unlock_user(p, arg2, 0);
8798         return ret;
8799 #endif
8800     case TARGET_NR_getpriority:
8801         /* Note that negative values are valid for getpriority, so we must
8802            differentiate based on errno settings.  */
8803         errno = 0;
8804         ret = getpriority(arg1, arg2);
8805         if (ret == -1 && errno != 0) {
8806             return -host_to_target_errno(errno);
8807         }
8808 #ifdef TARGET_ALPHA
8809         /* Return value is the unbiased priority.  Signal no error.  */
8810         ((CPUAlphaState *)cpu_env)->ir[IR_V0] = 0;
8811 #else
8812         /* Return value is a biased priority to avoid negative numbers.  */
8813         ret = 20 - ret;
8814 #endif
8815         return ret;
8816     case TARGET_NR_setpriority:
8817         return get_errno(setpriority(arg1, arg2, arg3));
8818 #ifdef TARGET_NR_statfs
8819     case TARGET_NR_statfs:
8820         if (!(p = lock_user_string(arg1))) {
8821             return -TARGET_EFAULT;
8822         }
8823         ret = get_errno(statfs(path(p), &stfs));
8824         unlock_user(p, arg1, 0);
8825     convert_statfs:
8826         if (!is_error(ret)) {
8827             struct target_statfs *target_stfs;
8828 
8829             if (!lock_user_struct(VERIFY_WRITE, target_stfs, arg2, 0))
8830                 return -TARGET_EFAULT;
8831             __put_user(stfs.f_type, &target_stfs->f_type);
8832             __put_user(stfs.f_bsize, &target_stfs->f_bsize);
8833             __put_user(stfs.f_blocks, &target_stfs->f_blocks);
8834             __put_user(stfs.f_bfree, &target_stfs->f_bfree);
8835             __put_user(stfs.f_bavail, &target_stfs->f_bavail);
8836             __put_user(stfs.f_files, &target_stfs->f_files);
8837             __put_user(stfs.f_ffree, &target_stfs->f_ffree);
8838             __put_user(stfs.f_fsid.__val[0], &target_stfs->f_fsid.val[0]);
8839             __put_user(stfs.f_fsid.__val[1], &target_stfs->f_fsid.val[1]);
8840             __put_user(stfs.f_namelen, &target_stfs->f_namelen);
8841             __put_user(stfs.f_frsize, &target_stfs->f_frsize);
8842 #ifdef _STATFS_F_FLAGS
8843             __put_user(stfs.f_flags, &target_stfs->f_flags);
8844 #else
8845             __put_user(0, &target_stfs->f_flags);
8846 #endif
8847             memset(target_stfs->f_spare, 0, sizeof(target_stfs->f_spare));
8848             unlock_user_struct(target_stfs, arg2, 1);
8849         }
8850         return ret;
8851 #endif
8852 #ifdef TARGET_NR_fstatfs
8853     case TARGET_NR_fstatfs:
8854         ret = get_errno(fstatfs(arg1, &stfs));
8855         goto convert_statfs;
8856 #endif
8857 #ifdef TARGET_NR_statfs64
8858     case TARGET_NR_statfs64:
8859         if (!(p = lock_user_string(arg1))) {
8860             return -TARGET_EFAULT;
8861         }
8862         ret = get_errno(statfs(path(p), &stfs));
8863         unlock_user(p, arg1, 0);
8864     convert_statfs64:
8865         if (!is_error(ret)) {
8866             struct target_statfs64 *target_stfs;
8867 
8868             if (!lock_user_struct(VERIFY_WRITE, target_stfs, arg3, 0))
8869                 return -TARGET_EFAULT;
8870             __put_user(stfs.f_type, &target_stfs->f_type);
8871             __put_user(stfs.f_bsize, &target_stfs->f_bsize);
8872             __put_user(stfs.f_blocks, &target_stfs->f_blocks);
8873             __put_user(stfs.f_bfree, &target_stfs->f_bfree);
8874             __put_user(stfs.f_bavail, &target_stfs->f_bavail);
8875             __put_user(stfs.f_files, &target_stfs->f_files);
8876             __put_user(stfs.f_ffree, &target_stfs->f_ffree);
8877             __put_user(stfs.f_fsid.__val[0], &target_stfs->f_fsid.val[0]);
8878             __put_user(stfs.f_fsid.__val[1], &target_stfs->f_fsid.val[1]);
8879             __put_user(stfs.f_namelen, &target_stfs->f_namelen);
8880             __put_user(stfs.f_frsize, &target_stfs->f_frsize);
8881             memset(target_stfs->f_spare, 0, sizeof(target_stfs->f_spare));
8882             unlock_user_struct(target_stfs, arg3, 1);
8883         }
8884         return ret;
8885     case TARGET_NR_fstatfs64:
8886         ret = get_errno(fstatfs(arg1, &stfs));
8887         goto convert_statfs64;
8888 #endif
8889 #ifdef TARGET_NR_socketcall
8890     case TARGET_NR_socketcall:
8891         return do_socketcall(arg1, arg2);
8892 #endif
8893 #ifdef TARGET_NR_accept
8894     case TARGET_NR_accept:
8895         return do_accept4(arg1, arg2, arg3, 0);
8896 #endif
8897 #ifdef TARGET_NR_accept4
8898     case TARGET_NR_accept4:
8899         return do_accept4(arg1, arg2, arg3, arg4);
8900 #endif
8901 #ifdef TARGET_NR_bind
8902     case TARGET_NR_bind:
8903         return do_bind(arg1, arg2, arg3);
8904 #endif
8905 #ifdef TARGET_NR_connect
8906     case TARGET_NR_connect:
8907         return do_connect(arg1, arg2, arg3);
8908 #endif
8909 #ifdef TARGET_NR_getpeername
8910     case TARGET_NR_getpeername:
8911         return do_getpeername(arg1, arg2, arg3);
8912 #endif
8913 #ifdef TARGET_NR_getsockname
8914     case TARGET_NR_getsockname:
8915         return do_getsockname(arg1, arg2, arg3);
8916 #endif
8917 #ifdef TARGET_NR_getsockopt
8918     case TARGET_NR_getsockopt:
8919         return do_getsockopt(arg1, arg2, arg3, arg4, arg5);
8920 #endif
8921 #ifdef TARGET_NR_listen
8922     case TARGET_NR_listen:
8923         return get_errno(listen(arg1, arg2));
8924 #endif
8925 #ifdef TARGET_NR_recv
8926     case TARGET_NR_recv:
8927         return do_recvfrom(arg1, arg2, arg3, arg4, 0, 0);
8928 #endif
8929 #ifdef TARGET_NR_recvfrom
8930     case TARGET_NR_recvfrom:
8931         return do_recvfrom(arg1, arg2, arg3, arg4, arg5, arg6);
8932 #endif
8933 #ifdef TARGET_NR_recvmsg
8934     case TARGET_NR_recvmsg:
8935         return do_sendrecvmsg(arg1, arg2, arg3, 0);
8936 #endif
8937 #ifdef TARGET_NR_send
8938     case TARGET_NR_send:
8939         return do_sendto(arg1, arg2, arg3, arg4, 0, 0);
8940 #endif
8941 #ifdef TARGET_NR_sendmsg
8942     case TARGET_NR_sendmsg:
8943         return do_sendrecvmsg(arg1, arg2, arg3, 1);
8944 #endif
8945 #ifdef TARGET_NR_sendmmsg
8946     case TARGET_NR_sendmmsg:
8947         return do_sendrecvmmsg(arg1, arg2, arg3, arg4, 1);
8948     case TARGET_NR_recvmmsg:
8949         return do_sendrecvmmsg(arg1, arg2, arg3, arg4, 0);
8950 #endif
8951 #ifdef TARGET_NR_sendto
8952     case TARGET_NR_sendto:
8953         return do_sendto(arg1, arg2, arg3, arg4, arg5, arg6);
8954 #endif
8955 #ifdef TARGET_NR_shutdown
8956     case TARGET_NR_shutdown:
8957         return get_errno(shutdown(arg1, arg2));
8958 #endif
8959 #if defined(TARGET_NR_getrandom) && defined(__NR_getrandom)
8960     case TARGET_NR_getrandom:
8961         p = lock_user(VERIFY_WRITE, arg1, arg2, 0);
8962         if (!p) {
8963             return -TARGET_EFAULT;
8964         }
8965         ret = get_errno(getrandom(p, arg2, arg3));
8966         unlock_user(p, arg1, ret);
8967         return ret;
8968 #endif
8969 #ifdef TARGET_NR_socket
8970     case TARGET_NR_socket:
8971         return do_socket(arg1, arg2, arg3);
8972 #endif
8973 #ifdef TARGET_NR_socketpair
8974     case TARGET_NR_socketpair:
8975         return do_socketpair(arg1, arg2, arg3, arg4);
8976 #endif
8977 #ifdef TARGET_NR_setsockopt
8978     case TARGET_NR_setsockopt:
8979         return do_setsockopt(arg1, arg2, arg3, arg4, (socklen_t) arg5);
8980 #endif
8981 #if defined(TARGET_NR_syslog)
8982     case TARGET_NR_syslog:
8983         {
8984             int len = arg2;
8985 
8986             switch (arg1) {
8987             case TARGET_SYSLOG_ACTION_CLOSE:         /* Close log */
8988             case TARGET_SYSLOG_ACTION_OPEN:          /* Open log */
8989             case TARGET_SYSLOG_ACTION_CLEAR:         /* Clear ring buffer */
8990             case TARGET_SYSLOG_ACTION_CONSOLE_OFF:   /* Disable logging */
8991             case TARGET_SYSLOG_ACTION_CONSOLE_ON:    /* Enable logging */
8992             case TARGET_SYSLOG_ACTION_CONSOLE_LEVEL: /* Set messages level */
8993             case TARGET_SYSLOG_ACTION_SIZE_UNREAD:   /* Number of chars */
8994             case TARGET_SYSLOG_ACTION_SIZE_BUFFER:   /* Size of the buffer */
8995                 return get_errno(sys_syslog((int)arg1, NULL, (int)arg3));
8996             case TARGET_SYSLOG_ACTION_READ:          /* Read from log */
8997             case TARGET_SYSLOG_ACTION_READ_CLEAR:    /* Read/clear msgs */
8998             case TARGET_SYSLOG_ACTION_READ_ALL:      /* Read last messages */
8999                 {
9000                     if (len < 0) {
9001                         return -TARGET_EINVAL;
9002                     }
9003                     if (len == 0) {
9004                         return 0;
9005                     }
9006                     p = lock_user(VERIFY_WRITE, arg2, arg3, 0);
9007                     if (!p) {
9008                         return -TARGET_EFAULT;
9009                     }
9010                     ret = get_errno(sys_syslog((int)arg1, p, (int)arg3));
9011                     unlock_user(p, arg2, arg3);
9012                 }
9013                 return ret;
9014             default:
9015                 return -TARGET_EINVAL;
9016             }
9017         }
9018         break;
9019 #endif
9020     case TARGET_NR_setitimer:
9021         {
9022             struct itimerval value, ovalue, *pvalue;
9023 
9024             if (arg2) {
9025                 pvalue = &value;
9026                 if (copy_from_user_timeval(&pvalue->it_interval, arg2)
9027                     || copy_from_user_timeval(&pvalue->it_value,
9028                                               arg2 + sizeof(struct target_timeval)))
9029                     return -TARGET_EFAULT;
9030             } else {
9031                 pvalue = NULL;
9032             }
9033             ret = get_errno(setitimer(arg1, pvalue, &ovalue));
9034             if (!is_error(ret) && arg3) {
9035                 if (copy_to_user_timeval(arg3,
9036                                          &ovalue.it_interval)
9037                     || copy_to_user_timeval(arg3 + sizeof(struct target_timeval),
9038                                             &ovalue.it_value))
9039                     return -TARGET_EFAULT;
9040             }
9041         }
9042         return ret;
9043     case TARGET_NR_getitimer:
9044         {
9045             struct itimerval value;
9046 
9047             ret = get_errno(getitimer(arg1, &value));
9048             if (!is_error(ret) && arg2) {
9049                 if (copy_to_user_timeval(arg2,
9050                                          &value.it_interval)
9051                     || copy_to_user_timeval(arg2 + sizeof(struct target_timeval),
9052                                             &value.it_value))
9053                     return -TARGET_EFAULT;
9054             }
9055         }
9056         return ret;
9057 #ifdef TARGET_NR_stat
9058     case TARGET_NR_stat:
9059         if (!(p = lock_user_string(arg1))) {
9060             return -TARGET_EFAULT;
9061         }
9062         ret = get_errno(stat(path(p), &st));
9063         unlock_user(p, arg1, 0);
9064         goto do_stat;
9065 #endif
9066 #ifdef TARGET_NR_lstat
9067     case TARGET_NR_lstat:
9068         if (!(p = lock_user_string(arg1))) {
9069             return -TARGET_EFAULT;
9070         }
9071         ret = get_errno(lstat(path(p), &st));
9072         unlock_user(p, arg1, 0);
9073         goto do_stat;
9074 #endif
9075 #ifdef TARGET_NR_fstat
9076     case TARGET_NR_fstat:
9077         {
9078             ret = get_errno(fstat(arg1, &st));
9079 #if defined(TARGET_NR_stat) || defined(TARGET_NR_lstat)
9080         do_stat:
9081 #endif
9082             if (!is_error(ret)) {
9083                 struct target_stat *target_st;
9084 
9085                 if (!lock_user_struct(VERIFY_WRITE, target_st, arg2, 0))
9086                     return -TARGET_EFAULT;
9087                 memset(target_st, 0, sizeof(*target_st));
9088                 __put_user(st.st_dev, &target_st->st_dev);
9089                 __put_user(st.st_ino, &target_st->st_ino);
9090                 __put_user(st.st_mode, &target_st->st_mode);
9091                 __put_user(st.st_uid, &target_st->st_uid);
9092                 __put_user(st.st_gid, &target_st->st_gid);
9093                 __put_user(st.st_nlink, &target_st->st_nlink);
9094                 __put_user(st.st_rdev, &target_st->st_rdev);
9095                 __put_user(st.st_size, &target_st->st_size);
9096                 __put_user(st.st_blksize, &target_st->st_blksize);
9097                 __put_user(st.st_blocks, &target_st->st_blocks);
9098                 __put_user(st.st_atime, &target_st->target_st_atime);
9099                 __put_user(st.st_mtime, &target_st->target_st_mtime);
9100                 __put_user(st.st_ctime, &target_st->target_st_ctime);
9101 #if (_POSIX_C_SOURCE >= 200809L || _XOPEN_SOURCE >= 700) && \
9102     defined(TARGET_STAT_HAVE_NSEC)
9103                 __put_user(st.st_atim.tv_nsec,
9104                            &target_st->target_st_atime_nsec);
9105                 __put_user(st.st_mtim.tv_nsec,
9106                            &target_st->target_st_mtime_nsec);
9107                 __put_user(st.st_ctim.tv_nsec,
9108                            &target_st->target_st_ctime_nsec);
9109 #endif
9110                 unlock_user_struct(target_st, arg2, 1);
9111             }
9112         }
9113         return ret;
9114 #endif
9115     case TARGET_NR_vhangup:
9116         return get_errno(vhangup());
9117 #ifdef TARGET_NR_syscall
9118     case TARGET_NR_syscall:
9119         return do_syscall(cpu_env, arg1 & 0xffff, arg2, arg3, arg4, arg5,
9120                           arg6, arg7, arg8, 0);
9121 #endif
9122     case TARGET_NR_wait4:
9123         {
9124             int status;
9125             abi_long status_ptr = arg2;
9126             struct rusage rusage, *rusage_ptr;
9127             abi_ulong target_rusage = arg4;
9128             abi_long rusage_err;
9129             if (target_rusage)
9130                 rusage_ptr = &rusage;
9131             else
9132                 rusage_ptr = NULL;
9133             ret = get_errno(safe_wait4(arg1, &status, arg3, rusage_ptr));
9134             if (!is_error(ret)) {
9135                 if (status_ptr && ret) {
9136                     status = host_to_target_waitstatus(status);
9137                     if (put_user_s32(status, status_ptr))
9138                         return -TARGET_EFAULT;
9139                 }
9140                 if (target_rusage) {
9141                     rusage_err = host_to_target_rusage(target_rusage, &rusage);
9142                     if (rusage_err) {
9143                         ret = rusage_err;
9144                     }
9145                 }
9146             }
9147         }
9148         return ret;
9149 #ifdef TARGET_NR_swapoff
9150     case TARGET_NR_swapoff:
9151         if (!(p = lock_user_string(arg1)))
9152             return -TARGET_EFAULT;
9153         ret = get_errno(swapoff(p));
9154         unlock_user(p, arg1, 0);
9155         return ret;
9156 #endif
9157     case TARGET_NR_sysinfo:
9158         {
9159             struct target_sysinfo *target_value;
9160             struct sysinfo value;
9161             ret = get_errno(sysinfo(&value));
9162             if (!is_error(ret) && arg1)
9163             {
9164                 if (!lock_user_struct(VERIFY_WRITE, target_value, arg1, 0))
9165                     return -TARGET_EFAULT;
9166                 __put_user(value.uptime, &target_value->uptime);
9167                 __put_user(value.loads[0], &target_value->loads[0]);
9168                 __put_user(value.loads[1], &target_value->loads[1]);
9169                 __put_user(value.loads[2], &target_value->loads[2]);
9170                 __put_user(value.totalram, &target_value->totalram);
9171                 __put_user(value.freeram, &target_value->freeram);
9172                 __put_user(value.sharedram, &target_value->sharedram);
9173                 __put_user(value.bufferram, &target_value->bufferram);
9174                 __put_user(value.totalswap, &target_value->totalswap);
9175                 __put_user(value.freeswap, &target_value->freeswap);
9176                 __put_user(value.procs, &target_value->procs);
9177                 __put_user(value.totalhigh, &target_value->totalhigh);
9178                 __put_user(value.freehigh, &target_value->freehigh);
9179                 __put_user(value.mem_unit, &target_value->mem_unit);
9180                 unlock_user_struct(target_value, arg1, 1);
9181             }
9182         }
9183         return ret;
9184 #ifdef TARGET_NR_ipc
9185     case TARGET_NR_ipc:
9186         return do_ipc(cpu_env, arg1, arg2, arg3, arg4, arg5, arg6);
9187 #endif
9188 #ifdef TARGET_NR_semget
9189     case TARGET_NR_semget:
9190         return get_errno(semget(arg1, arg2, arg3));
9191 #endif
9192 #ifdef TARGET_NR_semop
9193     case TARGET_NR_semop:
9194         return do_semop(arg1, arg2, arg3);
9195 #endif
9196 #ifdef TARGET_NR_semctl
9197     case TARGET_NR_semctl:
9198         return do_semctl(arg1, arg2, arg3, arg4);
9199 #endif
9200 #ifdef TARGET_NR_msgctl
9201     case TARGET_NR_msgctl:
9202         return do_msgctl(arg1, arg2, arg3);
9203 #endif
9204 #ifdef TARGET_NR_msgget
9205     case TARGET_NR_msgget:
9206         return get_errno(msgget(arg1, arg2));
9207 #endif
9208 #ifdef TARGET_NR_msgrcv
9209     case TARGET_NR_msgrcv:
9210         return do_msgrcv(arg1, arg2, arg3, arg4, arg5);
9211 #endif
9212 #ifdef TARGET_NR_msgsnd
9213     case TARGET_NR_msgsnd:
9214         return do_msgsnd(arg1, arg2, arg3, arg4);
9215 #endif
9216 #ifdef TARGET_NR_shmget
9217     case TARGET_NR_shmget:
9218         return get_errno(shmget(arg1, arg2, arg3));
9219 #endif
9220 #ifdef TARGET_NR_shmctl
9221     case TARGET_NR_shmctl:
9222         return do_shmctl(arg1, arg2, arg3);
9223 #endif
9224 #ifdef TARGET_NR_shmat
9225     case TARGET_NR_shmat:
9226         return do_shmat(cpu_env, arg1, arg2, arg3);
9227 #endif
9228 #ifdef TARGET_NR_shmdt
9229     case TARGET_NR_shmdt:
9230         return do_shmdt(arg1);
9231 #endif
9232     case TARGET_NR_fsync:
9233         return get_errno(fsync(arg1));
9234     case TARGET_NR_clone:
9235         /* Linux manages to have three different orderings for its
9236          * arguments to clone(); the BACKWARDS and BACKWARDS2 defines
9237          * match the kernel's CONFIG_CLONE_* settings.
9238          * Microblaze is further special in that it uses a sixth
9239          * implicit argument to clone for the TLS pointer.
9240          */
9241 #if defined(TARGET_MICROBLAZE)
9242         ret = get_errno(do_fork(cpu_env, arg1, arg2, arg4, arg6, arg5));
9243 #elif defined(TARGET_CLONE_BACKWARDS)
9244         ret = get_errno(do_fork(cpu_env, arg1, arg2, arg3, arg4, arg5));
9245 #elif defined(TARGET_CLONE_BACKWARDS2)
9246         ret = get_errno(do_fork(cpu_env, arg2, arg1, arg3, arg5, arg4));
9247 #else
9248         ret = get_errno(do_fork(cpu_env, arg1, arg2, arg3, arg5, arg4));
9249 #endif
9250         return ret;
9251 #ifdef __NR_exit_group
9252         /* new thread calls */
9253     case TARGET_NR_exit_group:
9254         preexit_cleanup(cpu_env, arg1);
9255         return get_errno(exit_group(arg1));
9256 #endif
9257     case TARGET_NR_setdomainname:
9258         if (!(p = lock_user_string(arg1)))
9259             return -TARGET_EFAULT;
9260         ret = get_errno(setdomainname(p, arg2));
9261         unlock_user(p, arg1, 0);
9262         return ret;
9263     case TARGET_NR_uname:
9264         /* no need to transcode because we use the linux syscall */
9265         {
9266             struct new_utsname * buf;
9267 
9268             if (!lock_user_struct(VERIFY_WRITE, buf, arg1, 0))
9269                 return -TARGET_EFAULT;
9270             ret = get_errno(sys_uname(buf));
9271             if (!is_error(ret)) {
9272                 /* Overwrite the native machine name with whatever is being
9273                    emulated. */
9274                 g_strlcpy(buf->machine, cpu_to_uname_machine(cpu_env),
9275                           sizeof(buf->machine));
9276                 /* Allow the user to override the reported release.  */
9277                 if (qemu_uname_release && *qemu_uname_release) {
9278                     g_strlcpy(buf->release, qemu_uname_release,
9279                               sizeof(buf->release));
9280                 }
9281             }
9282             unlock_user_struct(buf, arg1, 1);
9283         }
9284         return ret;
9285 #ifdef TARGET_I386
9286     case TARGET_NR_modify_ldt:
9287         return do_modify_ldt(cpu_env, arg1, arg2, arg3);
9288 #if !defined(TARGET_X86_64)
9289     case TARGET_NR_vm86:
9290         return do_vm86(cpu_env, arg1, arg2);
9291 #endif
9292 #endif
9293     case TARGET_NR_adjtimex:
9294         {
9295             struct timex host_buf;
9296 
9297             if (target_to_host_timex(&host_buf, arg1) != 0) {
9298                 return -TARGET_EFAULT;
9299             }
9300             ret = get_errno(adjtimex(&host_buf));
9301             if (!is_error(ret)) {
9302                 if (host_to_target_timex(arg1, &host_buf) != 0) {
9303                     return -TARGET_EFAULT;
9304                 }
9305             }
9306         }
9307         return ret;
9308 #if defined(TARGET_NR_clock_adjtime) && defined(CONFIG_CLOCK_ADJTIME)
9309     case TARGET_NR_clock_adjtime:
9310         {
9311             struct timex htx, *phtx = &htx;
9312 
9313             if (target_to_host_timex(phtx, arg2) != 0) {
9314                 return -TARGET_EFAULT;
9315             }
9316             ret = get_errno(clock_adjtime(arg1, phtx));
9317             if (!is_error(ret) && phtx) {
9318                 if (host_to_target_timex(arg2, phtx) != 0) {
9319                     return -TARGET_EFAULT;
9320                 }
9321             }
9322         }
9323         return ret;
9324 #endif
9325     case TARGET_NR_getpgid:
9326         return get_errno(getpgid(arg1));
9327     case TARGET_NR_fchdir:
9328         return get_errno(fchdir(arg1));
9329     case TARGET_NR_personality:
9330         return get_errno(personality(arg1));
9331 #ifdef TARGET_NR__llseek /* Not on alpha */
9332     case TARGET_NR__llseek:
9333         {
9334             int64_t res;
9335 #if !defined(__NR_llseek)
9336             res = lseek(arg1, ((uint64_t)arg2 << 32) | (abi_ulong)arg3, arg5);
9337             if (res == -1) {
9338                 ret = get_errno(res);
9339             } else {
9340                 ret = 0;
9341             }
9342 #else
9343             ret = get_errno(_llseek(arg1, arg2, arg3, &res, arg5));
9344 #endif
9345             if ((ret == 0) && put_user_s64(res, arg4)) {
9346                 return -TARGET_EFAULT;
9347             }
9348         }
9349         return ret;
9350 #endif
9351 #ifdef TARGET_NR_getdents
9352     case TARGET_NR_getdents:
9353 #ifdef EMULATE_GETDENTS_WITH_GETDENTS
9354 #if TARGET_ABI_BITS == 32 && HOST_LONG_BITS == 64
9355         {
9356             struct target_dirent *target_dirp;
9357             struct linux_dirent *dirp;
9358             abi_long count = arg3;
9359 
9360             dirp = g_try_malloc(count);
9361             if (!dirp) {
9362                 return -TARGET_ENOMEM;
9363             }
9364 
9365             ret = get_errno(sys_getdents(arg1, dirp, count));
9366             if (!is_error(ret)) {
9367                 struct linux_dirent *de;
9368 		struct target_dirent *tde;
9369                 int len = ret;
9370                 int reclen, treclen;
9371 		int count1, tnamelen;
9372 
9373 		count1 = 0;
9374                 de = dirp;
9375                 if (!(target_dirp = lock_user(VERIFY_WRITE, arg2, count, 0)))
9376                     return -TARGET_EFAULT;
9377 		tde = target_dirp;
9378                 while (len > 0) {
9379                     reclen = de->d_reclen;
9380                     tnamelen = reclen - offsetof(struct linux_dirent, d_name);
9381                     assert(tnamelen >= 0);
9382                     treclen = tnamelen + offsetof(struct target_dirent, d_name);
9383                     assert(count1 + treclen <= count);
9384                     tde->d_reclen = tswap16(treclen);
9385                     tde->d_ino = tswapal(de->d_ino);
9386                     tde->d_off = tswapal(de->d_off);
9387                     memcpy(tde->d_name, de->d_name, tnamelen);
9388                     de = (struct linux_dirent *)((char *)de + reclen);
9389                     len -= reclen;
9390                     tde = (struct target_dirent *)((char *)tde + treclen);
9391 		    count1 += treclen;
9392                 }
9393 		ret = count1;
9394                 unlock_user(target_dirp, arg2, ret);
9395             }
9396             g_free(dirp);
9397         }
9398 #else
9399         {
9400             struct linux_dirent *dirp;
9401             abi_long count = arg3;
9402 
9403             if (!(dirp = lock_user(VERIFY_WRITE, arg2, count, 0)))
9404                 return -TARGET_EFAULT;
9405             ret = get_errno(sys_getdents(arg1, dirp, count));
9406             if (!is_error(ret)) {
9407                 struct linux_dirent *de;
9408                 int len = ret;
9409                 int reclen;
9410                 de = dirp;
9411                 while (len > 0) {
9412                     reclen = de->d_reclen;
9413                     if (reclen > len)
9414                         break;
9415                     de->d_reclen = tswap16(reclen);
9416                     tswapls(&de->d_ino);
9417                     tswapls(&de->d_off);
9418                     de = (struct linux_dirent *)((char *)de + reclen);
9419                     len -= reclen;
9420                 }
9421             }
9422             unlock_user(dirp, arg2, ret);
9423         }
9424 #endif
9425 #else
9426         /* Implement getdents in terms of getdents64 */
9427         {
9428             struct linux_dirent64 *dirp;
9429             abi_long count = arg3;
9430 
9431             dirp = lock_user(VERIFY_WRITE, arg2, count, 0);
9432             if (!dirp) {
9433                 return -TARGET_EFAULT;
9434             }
9435             ret = get_errno(sys_getdents64(arg1, dirp, count));
9436             if (!is_error(ret)) {
9437                 /* Convert the dirent64 structs to target dirent.  We do this
9438                  * in-place, since we can guarantee that a target_dirent is no
9439                  * larger than a dirent64; however this means we have to be
9440                  * careful to read everything before writing in the new format.
9441                  */
9442                 struct linux_dirent64 *de;
9443                 struct target_dirent *tde;
9444                 int len = ret;
9445                 int tlen = 0;
9446 
9447                 de = dirp;
9448                 tde = (struct target_dirent *)dirp;
9449                 while (len > 0) {
9450                     int namelen, treclen;
9451                     int reclen = de->d_reclen;
9452                     uint64_t ino = de->d_ino;
9453                     int64_t off = de->d_off;
9454                     uint8_t type = de->d_type;
9455 
9456                     namelen = strlen(de->d_name);
9457                     treclen = offsetof(struct target_dirent, d_name)
9458                         + namelen + 2;
9459                     treclen = QEMU_ALIGN_UP(treclen, sizeof(abi_long));
9460 
9461                     memmove(tde->d_name, de->d_name, namelen + 1);
9462                     tde->d_ino = tswapal(ino);
9463                     tde->d_off = tswapal(off);
9464                     tde->d_reclen = tswap16(treclen);
9465                     /* The target_dirent type is in what was formerly a padding
9466                      * byte at the end of the structure:
9467                      */
9468                     *(((char *)tde) + treclen - 1) = type;
9469 
9470                     de = (struct linux_dirent64 *)((char *)de + reclen);
9471                     tde = (struct target_dirent *)((char *)tde + treclen);
9472                     len -= reclen;
9473                     tlen += treclen;
9474                 }
9475                 ret = tlen;
9476             }
9477             unlock_user(dirp, arg2, ret);
9478         }
9479 #endif
9480         return ret;
9481 #endif /* TARGET_NR_getdents */
9482 #if defined(TARGET_NR_getdents64) && defined(__NR_getdents64)
9483     case TARGET_NR_getdents64:
9484         {
9485             struct linux_dirent64 *dirp;
9486             abi_long count = arg3;
9487             if (!(dirp = lock_user(VERIFY_WRITE, arg2, count, 0)))
9488                 return -TARGET_EFAULT;
9489             ret = get_errno(sys_getdents64(arg1, dirp, count));
9490             if (!is_error(ret)) {
9491                 struct linux_dirent64 *de;
9492                 int len = ret;
9493                 int reclen;
9494                 de = dirp;
9495                 while (len > 0) {
9496                     reclen = de->d_reclen;
9497                     if (reclen > len)
9498                         break;
9499                     de->d_reclen = tswap16(reclen);
9500                     tswap64s((uint64_t *)&de->d_ino);
9501                     tswap64s((uint64_t *)&de->d_off);
9502                     de = (struct linux_dirent64 *)((char *)de + reclen);
9503                     len -= reclen;
9504                 }
9505             }
9506             unlock_user(dirp, arg2, ret);
9507         }
9508         return ret;
9509 #endif /* TARGET_NR_getdents64 */
9510 #if defined(TARGET_NR__newselect)
9511     case TARGET_NR__newselect:
9512         return do_select(arg1, arg2, arg3, arg4, arg5);
9513 #endif
9514 #if defined(TARGET_NR_poll) || defined(TARGET_NR_ppoll)
9515 # ifdef TARGET_NR_poll
9516     case TARGET_NR_poll:
9517 # endif
9518 # ifdef TARGET_NR_ppoll
9519     case TARGET_NR_ppoll:
9520 # endif
9521         {
9522             struct target_pollfd *target_pfd;
9523             unsigned int nfds = arg2;
9524             struct pollfd *pfd;
9525             unsigned int i;
9526 
9527             pfd = NULL;
9528             target_pfd = NULL;
9529             if (nfds) {
9530                 if (nfds > (INT_MAX / sizeof(struct target_pollfd))) {
9531                     return -TARGET_EINVAL;
9532                 }
9533 
9534                 target_pfd = lock_user(VERIFY_WRITE, arg1,
9535                                        sizeof(struct target_pollfd) * nfds, 1);
9536                 if (!target_pfd) {
9537                     return -TARGET_EFAULT;
9538                 }
9539 
9540                 pfd = alloca(sizeof(struct pollfd) * nfds);
9541                 for (i = 0; i < nfds; i++) {
9542                     pfd[i].fd = tswap32(target_pfd[i].fd);
9543                     pfd[i].events = tswap16(target_pfd[i].events);
9544                 }
9545             }
9546 
9547             switch (num) {
9548 # ifdef TARGET_NR_ppoll
9549             case TARGET_NR_ppoll:
9550             {
9551                 struct timespec _timeout_ts, *timeout_ts = &_timeout_ts;
9552                 target_sigset_t *target_set;
9553                 sigset_t _set, *set = &_set;
9554 
9555                 if (arg3) {
9556                     if (target_to_host_timespec(timeout_ts, arg3)) {
9557                         unlock_user(target_pfd, arg1, 0);
9558                         return -TARGET_EFAULT;
9559                     }
9560                 } else {
9561                     timeout_ts = NULL;
9562                 }
9563 
9564                 if (arg4) {
9565                     if (arg5 != sizeof(target_sigset_t)) {
9566                         unlock_user(target_pfd, arg1, 0);
9567                         return -TARGET_EINVAL;
9568                     }
9569 
9570                     target_set = lock_user(VERIFY_READ, arg4, sizeof(target_sigset_t), 1);
9571                     if (!target_set) {
9572                         unlock_user(target_pfd, arg1, 0);
9573                         return -TARGET_EFAULT;
9574                     }
9575                     target_to_host_sigset(set, target_set);
9576                 } else {
9577                     set = NULL;
9578                 }
9579 
9580                 ret = get_errno(safe_ppoll(pfd, nfds, timeout_ts,
9581                                            set, SIGSET_T_SIZE));
9582 
9583                 if (!is_error(ret) && arg3) {
9584                     host_to_target_timespec(arg3, timeout_ts);
9585                 }
9586                 if (arg4) {
9587                     unlock_user(target_set, arg4, 0);
9588                 }
9589                 break;
9590             }
9591 # endif
9592 # ifdef TARGET_NR_poll
9593             case TARGET_NR_poll:
9594             {
9595                 struct timespec ts, *pts;
9596 
9597                 if (arg3 >= 0) {
9598                     /* Convert ms to secs, ns */
9599                     ts.tv_sec = arg3 / 1000;
9600                     ts.tv_nsec = (arg3 % 1000) * 1000000LL;
9601                     pts = &ts;
9602                 } else {
9603                     /* -ve poll() timeout means "infinite" */
9604                     pts = NULL;
9605                 }
9606                 ret = get_errno(safe_ppoll(pfd, nfds, pts, NULL, 0));
9607                 break;
9608             }
9609 # endif
9610             default:
9611                 g_assert_not_reached();
9612             }
9613 
9614             if (!is_error(ret)) {
9615                 for(i = 0; i < nfds; i++) {
9616                     target_pfd[i].revents = tswap16(pfd[i].revents);
9617                 }
9618             }
9619             unlock_user(target_pfd, arg1, sizeof(struct target_pollfd) * nfds);
9620         }
9621         return ret;
9622 #endif
9623     case TARGET_NR_flock:
9624         /* NOTE: the flock constant seems to be the same for every
9625            Linux platform */
9626         return get_errno(safe_flock(arg1, arg2));
9627     case TARGET_NR_readv:
9628         {
9629             struct iovec *vec = lock_iovec(VERIFY_WRITE, arg2, arg3, 0);
9630             if (vec != NULL) {
9631                 ret = get_errno(safe_readv(arg1, vec, arg3));
9632                 unlock_iovec(vec, arg2, arg3, 1);
9633             } else {
9634                 ret = -host_to_target_errno(errno);
9635             }
9636         }
9637         return ret;
9638     case TARGET_NR_writev:
9639         {
9640             struct iovec *vec = lock_iovec(VERIFY_READ, arg2, arg3, 1);
9641             if (vec != NULL) {
9642                 ret = get_errno(safe_writev(arg1, vec, arg3));
9643                 unlock_iovec(vec, arg2, arg3, 0);
9644             } else {
9645                 ret = -host_to_target_errno(errno);
9646             }
9647         }
9648         return ret;
9649 #if defined(TARGET_NR_preadv)
9650     case TARGET_NR_preadv:
9651         {
9652             struct iovec *vec = lock_iovec(VERIFY_WRITE, arg2, arg3, 0);
9653             if (vec != NULL) {
9654                 unsigned long low, high;
9655 
9656                 target_to_host_low_high(arg4, arg5, &low, &high);
9657                 ret = get_errno(safe_preadv(arg1, vec, arg3, low, high));
9658                 unlock_iovec(vec, arg2, arg3, 1);
9659             } else {
9660                 ret = -host_to_target_errno(errno);
9661            }
9662         }
9663         return ret;
9664 #endif
9665 #if defined(TARGET_NR_pwritev)
9666     case TARGET_NR_pwritev:
9667         {
9668             struct iovec *vec = lock_iovec(VERIFY_READ, arg2, arg3, 1);
9669             if (vec != NULL) {
9670                 unsigned long low, high;
9671 
9672                 target_to_host_low_high(arg4, arg5, &low, &high);
9673                 ret = get_errno(safe_pwritev(arg1, vec, arg3, low, high));
9674                 unlock_iovec(vec, arg2, arg3, 0);
9675             } else {
9676                 ret = -host_to_target_errno(errno);
9677            }
9678         }
9679         return ret;
9680 #endif
9681     case TARGET_NR_getsid:
9682         return get_errno(getsid(arg1));
9683 #if defined(TARGET_NR_fdatasync) /* Not on alpha (osf_datasync ?) */
9684     case TARGET_NR_fdatasync:
9685         return get_errno(fdatasync(arg1));
9686 #endif
9687 #ifdef TARGET_NR__sysctl
9688     case TARGET_NR__sysctl:
9689         /* We don't implement this, but ENOTDIR is always a safe
9690            return value. */
9691         return -TARGET_ENOTDIR;
9692 #endif
9693     case TARGET_NR_sched_getaffinity:
9694         {
9695             unsigned int mask_size;
9696             unsigned long *mask;
9697 
9698             /*
9699              * sched_getaffinity needs multiples of ulong, so need to take
9700              * care of mismatches between target ulong and host ulong sizes.
9701              */
9702             if (arg2 & (sizeof(abi_ulong) - 1)) {
9703                 return -TARGET_EINVAL;
9704             }
9705             mask_size = (arg2 + (sizeof(*mask) - 1)) & ~(sizeof(*mask) - 1);
9706 
9707             mask = alloca(mask_size);
9708             memset(mask, 0, mask_size);
9709             ret = get_errno(sys_sched_getaffinity(arg1, mask_size, mask));
9710 
9711             if (!is_error(ret)) {
9712                 if (ret > arg2) {
9713                     /* More data returned than the caller's buffer will fit.
9714                      * This only happens if sizeof(abi_long) < sizeof(long)
9715                      * and the caller passed us a buffer holding an odd number
9716                      * of abi_longs. If the host kernel is actually using the
9717                      * extra 4 bytes then fail EINVAL; otherwise we can just
9718                      * ignore them and only copy the interesting part.
9719                      */
9720                     int numcpus = sysconf(_SC_NPROCESSORS_CONF);
9721                     if (numcpus > arg2 * 8) {
9722                         return -TARGET_EINVAL;
9723                     }
9724                     ret = arg2;
9725                 }
9726 
9727                 if (host_to_target_cpu_mask(mask, mask_size, arg3, ret)) {
9728                     return -TARGET_EFAULT;
9729                 }
9730             }
9731         }
9732         return ret;
9733     case TARGET_NR_sched_setaffinity:
9734         {
9735             unsigned int mask_size;
9736             unsigned long *mask;
9737 
9738             /*
9739              * sched_setaffinity needs multiples of ulong, so need to take
9740              * care of mismatches between target ulong and host ulong sizes.
9741              */
9742             if (arg2 & (sizeof(abi_ulong) - 1)) {
9743                 return -TARGET_EINVAL;
9744             }
9745             mask_size = (arg2 + (sizeof(*mask) - 1)) & ~(sizeof(*mask) - 1);
9746             mask = alloca(mask_size);
9747 
9748             ret = target_to_host_cpu_mask(mask, mask_size, arg3, arg2);
9749             if (ret) {
9750                 return ret;
9751             }
9752 
9753             return get_errno(sys_sched_setaffinity(arg1, mask_size, mask));
9754         }
9755     case TARGET_NR_getcpu:
9756         {
9757             unsigned cpu, node;
9758             ret = get_errno(sys_getcpu(arg1 ? &cpu : NULL,
9759                                        arg2 ? &node : NULL,
9760                                        NULL));
9761             if (is_error(ret)) {
9762                 return ret;
9763             }
9764             if (arg1 && put_user_u32(cpu, arg1)) {
9765                 return -TARGET_EFAULT;
9766             }
9767             if (arg2 && put_user_u32(node, arg2)) {
9768                 return -TARGET_EFAULT;
9769             }
9770         }
9771         return ret;
9772     case TARGET_NR_sched_setparam:
9773         {
9774             struct sched_param *target_schp;
9775             struct sched_param schp;
9776 
9777             if (arg2 == 0) {
9778                 return -TARGET_EINVAL;
9779             }
9780             if (!lock_user_struct(VERIFY_READ, target_schp, arg2, 1))
9781                 return -TARGET_EFAULT;
9782             schp.sched_priority = tswap32(target_schp->sched_priority);
9783             unlock_user_struct(target_schp, arg2, 0);
9784             return get_errno(sched_setparam(arg1, &schp));
9785         }
9786     case TARGET_NR_sched_getparam:
9787         {
9788             struct sched_param *target_schp;
9789             struct sched_param schp;
9790 
9791             if (arg2 == 0) {
9792                 return -TARGET_EINVAL;
9793             }
9794             ret = get_errno(sched_getparam(arg1, &schp));
9795             if (!is_error(ret)) {
9796                 if (!lock_user_struct(VERIFY_WRITE, target_schp, arg2, 0))
9797                     return -TARGET_EFAULT;
9798                 target_schp->sched_priority = tswap32(schp.sched_priority);
9799                 unlock_user_struct(target_schp, arg2, 1);
9800             }
9801         }
9802         return ret;
9803     case TARGET_NR_sched_setscheduler:
9804         {
9805             struct sched_param *target_schp;
9806             struct sched_param schp;
9807             if (arg3 == 0) {
9808                 return -TARGET_EINVAL;
9809             }
9810             if (!lock_user_struct(VERIFY_READ, target_schp, arg3, 1))
9811                 return -TARGET_EFAULT;
9812             schp.sched_priority = tswap32(target_schp->sched_priority);
9813             unlock_user_struct(target_schp, arg3, 0);
9814             return get_errno(sched_setscheduler(arg1, arg2, &schp));
9815         }
9816     case TARGET_NR_sched_getscheduler:
9817         return get_errno(sched_getscheduler(arg1));
9818     case TARGET_NR_sched_yield:
9819         return get_errno(sched_yield());
9820     case TARGET_NR_sched_get_priority_max:
9821         return get_errno(sched_get_priority_max(arg1));
9822     case TARGET_NR_sched_get_priority_min:
9823         return get_errno(sched_get_priority_min(arg1));
9824     case TARGET_NR_sched_rr_get_interval:
9825         {
9826             struct timespec ts;
9827             ret = get_errno(sched_rr_get_interval(arg1, &ts));
9828             if (!is_error(ret)) {
9829                 ret = host_to_target_timespec(arg2, &ts);
9830             }
9831         }
9832         return ret;
9833     case TARGET_NR_nanosleep:
9834         {
9835             struct timespec req, rem;
9836             target_to_host_timespec(&req, arg1);
9837             ret = get_errno(safe_nanosleep(&req, &rem));
9838             if (is_error(ret) && arg2) {
9839                 host_to_target_timespec(arg2, &rem);
9840             }
9841         }
9842         return ret;
9843     case TARGET_NR_prctl:
9844         switch (arg1) {
9845         case PR_GET_PDEATHSIG:
9846         {
9847             int deathsig;
9848             ret = get_errno(prctl(arg1, &deathsig, arg3, arg4, arg5));
9849             if (!is_error(ret) && arg2
9850                 && put_user_ual(deathsig, arg2)) {
9851                 return -TARGET_EFAULT;
9852             }
9853             return ret;
9854         }
9855 #ifdef PR_GET_NAME
9856         case PR_GET_NAME:
9857         {
9858             void *name = lock_user(VERIFY_WRITE, arg2, 16, 1);
9859             if (!name) {
9860                 return -TARGET_EFAULT;
9861             }
9862             ret = get_errno(prctl(arg1, (unsigned long)name,
9863                                   arg3, arg4, arg5));
9864             unlock_user(name, arg2, 16);
9865             return ret;
9866         }
9867         case PR_SET_NAME:
9868         {
9869             void *name = lock_user(VERIFY_READ, arg2, 16, 1);
9870             if (!name) {
9871                 return -TARGET_EFAULT;
9872             }
9873             ret = get_errno(prctl(arg1, (unsigned long)name,
9874                                   arg3, arg4, arg5));
9875             unlock_user(name, arg2, 0);
9876             return ret;
9877         }
9878 #endif
9879 #ifdef TARGET_MIPS
9880         case TARGET_PR_GET_FP_MODE:
9881         {
9882             CPUMIPSState *env = ((CPUMIPSState *)cpu_env);
9883             ret = 0;
9884             if (env->CP0_Status & (1 << CP0St_FR)) {
9885                 ret |= TARGET_PR_FP_MODE_FR;
9886             }
9887             if (env->CP0_Config5 & (1 << CP0C5_FRE)) {
9888                 ret |= TARGET_PR_FP_MODE_FRE;
9889             }
9890             return ret;
9891         }
9892         case TARGET_PR_SET_FP_MODE:
9893         {
9894             CPUMIPSState *env = ((CPUMIPSState *)cpu_env);
9895             bool old_fr = env->CP0_Status & (1 << CP0St_FR);
9896             bool old_fre = env->CP0_Config5 & (1 << CP0C5_FRE);
9897             bool new_fr = arg2 & TARGET_PR_FP_MODE_FR;
9898             bool new_fre = arg2 & TARGET_PR_FP_MODE_FRE;
9899 
9900             const unsigned int known_bits = TARGET_PR_FP_MODE_FR |
9901                                             TARGET_PR_FP_MODE_FRE;
9902 
9903             /* If nothing to change, return right away, successfully.  */
9904             if (old_fr == new_fr && old_fre == new_fre) {
9905                 return 0;
9906             }
9907             /* Check the value is valid */
9908             if (arg2 & ~known_bits) {
9909                 return -TARGET_EOPNOTSUPP;
9910             }
9911             /* Setting FRE without FR is not supported.  */
9912             if (new_fre && !new_fr) {
9913                 return -TARGET_EOPNOTSUPP;
9914             }
9915             if (new_fr && !(env->active_fpu.fcr0 & (1 << FCR0_F64))) {
9916                 /* FR1 is not supported */
9917                 return -TARGET_EOPNOTSUPP;
9918             }
9919             if (!new_fr && (env->active_fpu.fcr0 & (1 << FCR0_F64))
9920                 && !(env->CP0_Status_rw_bitmask & (1 << CP0St_FR))) {
9921                 /* cannot set FR=0 */
9922                 return -TARGET_EOPNOTSUPP;
9923             }
9924             if (new_fre && !(env->active_fpu.fcr0 & (1 << FCR0_FREP))) {
9925                 /* Cannot set FRE=1 */
9926                 return -TARGET_EOPNOTSUPP;
9927             }
9928 
9929             int i;
9930             fpr_t *fpr = env->active_fpu.fpr;
9931             for (i = 0; i < 32 ; i += 2) {
9932                 if (!old_fr && new_fr) {
9933                     fpr[i].w[!FP_ENDIAN_IDX] = fpr[i + 1].w[FP_ENDIAN_IDX];
9934                 } else if (old_fr && !new_fr) {
9935                     fpr[i + 1].w[FP_ENDIAN_IDX] = fpr[i].w[!FP_ENDIAN_IDX];
9936                 }
9937             }
9938 
9939             if (new_fr) {
9940                 env->CP0_Status |= (1 << CP0St_FR);
9941                 env->hflags |= MIPS_HFLAG_F64;
9942             } else {
9943                 env->CP0_Status &= ~(1 << CP0St_FR);
9944                 env->hflags &= ~MIPS_HFLAG_F64;
9945             }
9946             if (new_fre) {
9947                 env->CP0_Config5 |= (1 << CP0C5_FRE);
9948                 if (env->active_fpu.fcr0 & (1 << FCR0_FREP)) {
9949                     env->hflags |= MIPS_HFLAG_FRE;
9950                 }
9951             } else {
9952                 env->CP0_Config5 &= ~(1 << CP0C5_FRE);
9953                 env->hflags &= ~MIPS_HFLAG_FRE;
9954             }
9955 
9956             return 0;
9957         }
9958 #endif /* MIPS */
9959 #ifdef TARGET_AARCH64
9960         case TARGET_PR_SVE_SET_VL:
9961             /*
9962              * We cannot support either PR_SVE_SET_VL_ONEXEC or
9963              * PR_SVE_VL_INHERIT.  Note the kernel definition
9964              * of sve_vl_valid allows for VQ=512, i.e. VL=8192,
9965              * even though the current architectural maximum is VQ=16.
9966              */
9967             ret = -TARGET_EINVAL;
9968             if (cpu_isar_feature(aa64_sve, env_archcpu(cpu_env))
9969                 && arg2 >= 0 && arg2 <= 512 * 16 && !(arg2 & 15)) {
9970                 CPUARMState *env = cpu_env;
9971                 ARMCPU *cpu = env_archcpu(env);
9972                 uint32_t vq, old_vq;
9973 
9974                 old_vq = (env->vfp.zcr_el[1] & 0xf) + 1;
9975                 vq = MAX(arg2 / 16, 1);
9976                 vq = MIN(vq, cpu->sve_max_vq);
9977 
9978                 if (vq < old_vq) {
9979                     aarch64_sve_narrow_vq(env, vq);
9980                 }
9981                 env->vfp.zcr_el[1] = vq - 1;
9982                 ret = vq * 16;
9983             }
9984             return ret;
9985         case TARGET_PR_SVE_GET_VL:
9986             ret = -TARGET_EINVAL;
9987             {
9988                 ARMCPU *cpu = env_archcpu(cpu_env);
9989                 if (cpu_isar_feature(aa64_sve, cpu)) {
9990                     ret = ((cpu->env.vfp.zcr_el[1] & 0xf) + 1) * 16;
9991                 }
9992             }
9993             return ret;
9994         case TARGET_PR_PAC_RESET_KEYS:
9995             {
9996                 CPUARMState *env = cpu_env;
9997                 ARMCPU *cpu = env_archcpu(env);
9998 
9999                 if (arg3 || arg4 || arg5) {
10000                     return -TARGET_EINVAL;
10001                 }
10002                 if (cpu_isar_feature(aa64_pauth, cpu)) {
10003                     int all = (TARGET_PR_PAC_APIAKEY | TARGET_PR_PAC_APIBKEY |
10004                                TARGET_PR_PAC_APDAKEY | TARGET_PR_PAC_APDBKEY |
10005                                TARGET_PR_PAC_APGAKEY);
10006                     int ret = 0;
10007                     Error *err = NULL;
10008 
10009                     if (arg2 == 0) {
10010                         arg2 = all;
10011                     } else if (arg2 & ~all) {
10012                         return -TARGET_EINVAL;
10013                     }
10014                     if (arg2 & TARGET_PR_PAC_APIAKEY) {
10015                         ret |= qemu_guest_getrandom(&env->keys.apia,
10016                                                     sizeof(ARMPACKey), &err);
10017                     }
10018                     if (arg2 & TARGET_PR_PAC_APIBKEY) {
10019                         ret |= qemu_guest_getrandom(&env->keys.apib,
10020                                                     sizeof(ARMPACKey), &err);
10021                     }
10022                     if (arg2 & TARGET_PR_PAC_APDAKEY) {
10023                         ret |= qemu_guest_getrandom(&env->keys.apda,
10024                                                     sizeof(ARMPACKey), &err);
10025                     }
10026                     if (arg2 & TARGET_PR_PAC_APDBKEY) {
10027                         ret |= qemu_guest_getrandom(&env->keys.apdb,
10028                                                     sizeof(ARMPACKey), &err);
10029                     }
10030                     if (arg2 & TARGET_PR_PAC_APGAKEY) {
10031                         ret |= qemu_guest_getrandom(&env->keys.apga,
10032                                                     sizeof(ARMPACKey), &err);
10033                     }
10034                     if (ret != 0) {
10035                         /*
10036                          * Some unknown failure in the crypto.  The best
10037                          * we can do is log it and fail the syscall.
10038                          * The real syscall cannot fail this way.
10039                          */
10040                         qemu_log_mask(LOG_UNIMP,
10041                                       "PR_PAC_RESET_KEYS: Crypto failure: %s",
10042                                       error_get_pretty(err));
10043                         error_free(err);
10044                         return -TARGET_EIO;
10045                     }
10046                     return 0;
10047                 }
10048             }
10049             return -TARGET_EINVAL;
10050 #endif /* AARCH64 */
10051         case PR_GET_SECCOMP:
10052         case PR_SET_SECCOMP:
10053             /* Disable seccomp to prevent the target disabling syscalls we
10054              * need. */
10055             return -TARGET_EINVAL;
10056         default:
10057             /* Most prctl options have no pointer arguments */
10058             return get_errno(prctl(arg1, arg2, arg3, arg4, arg5));
10059         }
10060         break;
10061 #ifdef TARGET_NR_arch_prctl
10062     case TARGET_NR_arch_prctl:
10063 #if defined(TARGET_I386) && !defined(TARGET_ABI32)
10064         return do_arch_prctl(cpu_env, arg1, arg2);
10065 #else
10066 #error unreachable
10067 #endif
10068 #endif
10069 #ifdef TARGET_NR_pread64
10070     case TARGET_NR_pread64:
10071         if (regpairs_aligned(cpu_env, num)) {
10072             arg4 = arg5;
10073             arg5 = arg6;
10074         }
10075         if (arg2 == 0 && arg3 == 0) {
10076             /* Special-case NULL buffer and zero length, which should succeed */
10077             p = 0;
10078         } else {
10079             p = lock_user(VERIFY_WRITE, arg2, arg3, 0);
10080             if (!p) {
10081                 return -TARGET_EFAULT;
10082             }
10083         }
10084         ret = get_errno(pread64(arg1, p, arg3, target_offset64(arg4, arg5)));
10085         unlock_user(p, arg2, ret);
10086         return ret;
10087     case TARGET_NR_pwrite64:
10088         if (regpairs_aligned(cpu_env, num)) {
10089             arg4 = arg5;
10090             arg5 = arg6;
10091         }
10092         if (arg2 == 0 && arg3 == 0) {
10093             /* Special-case NULL buffer and zero length, which should succeed */
10094             p = 0;
10095         } else {
10096             p = lock_user(VERIFY_READ, arg2, arg3, 1);
10097             if (!p) {
10098                 return -TARGET_EFAULT;
10099             }
10100         }
10101         ret = get_errno(pwrite64(arg1, p, arg3, target_offset64(arg4, arg5)));
10102         unlock_user(p, arg2, 0);
10103         return ret;
10104 #endif
10105     case TARGET_NR_getcwd:
10106         if (!(p = lock_user(VERIFY_WRITE, arg1, arg2, 0)))
10107             return -TARGET_EFAULT;
10108         ret = get_errno(sys_getcwd1(p, arg2));
10109         unlock_user(p, arg1, ret);
10110         return ret;
10111     case TARGET_NR_capget:
10112     case TARGET_NR_capset:
10113     {
10114         struct target_user_cap_header *target_header;
10115         struct target_user_cap_data *target_data = NULL;
10116         struct __user_cap_header_struct header;
10117         struct __user_cap_data_struct data[2];
10118         struct __user_cap_data_struct *dataptr = NULL;
10119         int i, target_datalen;
10120         int data_items = 1;
10121 
10122         if (!lock_user_struct(VERIFY_WRITE, target_header, arg1, 1)) {
10123             return -TARGET_EFAULT;
10124         }
10125         header.version = tswap32(target_header->version);
10126         header.pid = tswap32(target_header->pid);
10127 
10128         if (header.version != _LINUX_CAPABILITY_VERSION) {
10129             /* Version 2 and up takes pointer to two user_data structs */
10130             data_items = 2;
10131         }
10132 
10133         target_datalen = sizeof(*target_data) * data_items;
10134 
10135         if (arg2) {
10136             if (num == TARGET_NR_capget) {
10137                 target_data = lock_user(VERIFY_WRITE, arg2, target_datalen, 0);
10138             } else {
10139                 target_data = lock_user(VERIFY_READ, arg2, target_datalen, 1);
10140             }
10141             if (!target_data) {
10142                 unlock_user_struct(target_header, arg1, 0);
10143                 return -TARGET_EFAULT;
10144             }
10145 
10146             if (num == TARGET_NR_capset) {
10147                 for (i = 0; i < data_items; i++) {
10148                     data[i].effective = tswap32(target_data[i].effective);
10149                     data[i].permitted = tswap32(target_data[i].permitted);
10150                     data[i].inheritable = tswap32(target_data[i].inheritable);
10151                 }
10152             }
10153 
10154             dataptr = data;
10155         }
10156 
10157         if (num == TARGET_NR_capget) {
10158             ret = get_errno(capget(&header, dataptr));
10159         } else {
10160             ret = get_errno(capset(&header, dataptr));
10161         }
10162 
10163         /* The kernel always updates version for both capget and capset */
10164         target_header->version = tswap32(header.version);
10165         unlock_user_struct(target_header, arg1, 1);
10166 
10167         if (arg2) {
10168             if (num == TARGET_NR_capget) {
10169                 for (i = 0; i < data_items; i++) {
10170                     target_data[i].effective = tswap32(data[i].effective);
10171                     target_data[i].permitted = tswap32(data[i].permitted);
10172                     target_data[i].inheritable = tswap32(data[i].inheritable);
10173                 }
10174                 unlock_user(target_data, arg2, target_datalen);
10175             } else {
10176                 unlock_user(target_data, arg2, 0);
10177             }
10178         }
10179         return ret;
10180     }
10181     case TARGET_NR_sigaltstack:
10182         return do_sigaltstack(arg1, arg2,
10183                               get_sp_from_cpustate((CPUArchState *)cpu_env));
10184 
10185 #ifdef CONFIG_SENDFILE
10186 #ifdef TARGET_NR_sendfile
10187     case TARGET_NR_sendfile:
10188     {
10189         off_t *offp = NULL;
10190         off_t off;
10191         if (arg3) {
10192             ret = get_user_sal(off, arg3);
10193             if (is_error(ret)) {
10194                 return ret;
10195             }
10196             offp = &off;
10197         }
10198         ret = get_errno(sendfile(arg1, arg2, offp, arg4));
10199         if (!is_error(ret) && arg3) {
10200             abi_long ret2 = put_user_sal(off, arg3);
10201             if (is_error(ret2)) {
10202                 ret = ret2;
10203             }
10204         }
10205         return ret;
10206     }
10207 #endif
10208 #ifdef TARGET_NR_sendfile64
10209     case TARGET_NR_sendfile64:
10210     {
10211         off_t *offp = NULL;
10212         off_t off;
10213         if (arg3) {
10214             ret = get_user_s64(off, arg3);
10215             if (is_error(ret)) {
10216                 return ret;
10217             }
10218             offp = &off;
10219         }
10220         ret = get_errno(sendfile(arg1, arg2, offp, arg4));
10221         if (!is_error(ret) && arg3) {
10222             abi_long ret2 = put_user_s64(off, arg3);
10223             if (is_error(ret2)) {
10224                 ret = ret2;
10225             }
10226         }
10227         return ret;
10228     }
10229 #endif
10230 #endif
10231 #ifdef TARGET_NR_vfork
10232     case TARGET_NR_vfork:
10233         return get_errno(do_fork(cpu_env,
10234                          CLONE_VFORK | CLONE_VM | TARGET_SIGCHLD,
10235                          0, 0, 0, 0));
10236 #endif
10237 #ifdef TARGET_NR_ugetrlimit
10238     case TARGET_NR_ugetrlimit:
10239     {
10240 	struct rlimit rlim;
10241 	int resource = target_to_host_resource(arg1);
10242 	ret = get_errno(getrlimit(resource, &rlim));
10243 	if (!is_error(ret)) {
10244 	    struct target_rlimit *target_rlim;
10245             if (!lock_user_struct(VERIFY_WRITE, target_rlim, arg2, 0))
10246                 return -TARGET_EFAULT;
10247 	    target_rlim->rlim_cur = host_to_target_rlim(rlim.rlim_cur);
10248 	    target_rlim->rlim_max = host_to_target_rlim(rlim.rlim_max);
10249             unlock_user_struct(target_rlim, arg2, 1);
10250 	}
10251         return ret;
10252     }
10253 #endif
10254 #ifdef TARGET_NR_truncate64
10255     case TARGET_NR_truncate64:
10256         if (!(p = lock_user_string(arg1)))
10257             return -TARGET_EFAULT;
10258 	ret = target_truncate64(cpu_env, p, arg2, arg3, arg4);
10259         unlock_user(p, arg1, 0);
10260         return ret;
10261 #endif
10262 #ifdef TARGET_NR_ftruncate64
10263     case TARGET_NR_ftruncate64:
10264         return target_ftruncate64(cpu_env, arg1, arg2, arg3, arg4);
10265 #endif
10266 #ifdef TARGET_NR_stat64
10267     case TARGET_NR_stat64:
10268         if (!(p = lock_user_string(arg1))) {
10269             return -TARGET_EFAULT;
10270         }
10271         ret = get_errno(stat(path(p), &st));
10272         unlock_user(p, arg1, 0);
10273         if (!is_error(ret))
10274             ret = host_to_target_stat64(cpu_env, arg2, &st);
10275         return ret;
10276 #endif
10277 #ifdef TARGET_NR_lstat64
10278     case TARGET_NR_lstat64:
10279         if (!(p = lock_user_string(arg1))) {
10280             return -TARGET_EFAULT;
10281         }
10282         ret = get_errno(lstat(path(p), &st));
10283         unlock_user(p, arg1, 0);
10284         if (!is_error(ret))
10285             ret = host_to_target_stat64(cpu_env, arg2, &st);
10286         return ret;
10287 #endif
10288 #ifdef TARGET_NR_fstat64
10289     case TARGET_NR_fstat64:
10290         ret = get_errno(fstat(arg1, &st));
10291         if (!is_error(ret))
10292             ret = host_to_target_stat64(cpu_env, arg2, &st);
10293         return ret;
10294 #endif
10295 #if (defined(TARGET_NR_fstatat64) || defined(TARGET_NR_newfstatat))
10296 #ifdef TARGET_NR_fstatat64
10297     case TARGET_NR_fstatat64:
10298 #endif
10299 #ifdef TARGET_NR_newfstatat
10300     case TARGET_NR_newfstatat:
10301 #endif
10302         if (!(p = lock_user_string(arg2))) {
10303             return -TARGET_EFAULT;
10304         }
10305         ret = get_errno(fstatat(arg1, path(p), &st, arg4));
10306         unlock_user(p, arg2, 0);
10307         if (!is_error(ret))
10308             ret = host_to_target_stat64(cpu_env, arg3, &st);
10309         return ret;
10310 #endif
10311 #if defined(TARGET_NR_statx)
10312     case TARGET_NR_statx:
10313         {
10314             struct target_statx *target_stx;
10315             int dirfd = arg1;
10316             int flags = arg3;
10317 
10318             p = lock_user_string(arg2);
10319             if (p == NULL) {
10320                 return -TARGET_EFAULT;
10321             }
10322 #if defined(__NR_statx)
10323             {
10324                 /*
10325                  * It is assumed that struct statx is architecture independent.
10326                  */
10327                 struct target_statx host_stx;
10328                 int mask = arg4;
10329 
10330                 ret = get_errno(sys_statx(dirfd, p, flags, mask, &host_stx));
10331                 if (!is_error(ret)) {
10332                     if (host_to_target_statx(&host_stx, arg5) != 0) {
10333                         unlock_user(p, arg2, 0);
10334                         return -TARGET_EFAULT;
10335                     }
10336                 }
10337 
10338                 if (ret != -TARGET_ENOSYS) {
10339                     unlock_user(p, arg2, 0);
10340                     return ret;
10341                 }
10342             }
10343 #endif
10344             ret = get_errno(fstatat(dirfd, path(p), &st, flags));
10345             unlock_user(p, arg2, 0);
10346 
10347             if (!is_error(ret)) {
10348                 if (!lock_user_struct(VERIFY_WRITE, target_stx, arg5, 0)) {
10349                     return -TARGET_EFAULT;
10350                 }
10351                 memset(target_stx, 0, sizeof(*target_stx));
10352                 __put_user(major(st.st_dev), &target_stx->stx_dev_major);
10353                 __put_user(minor(st.st_dev), &target_stx->stx_dev_minor);
10354                 __put_user(st.st_ino, &target_stx->stx_ino);
10355                 __put_user(st.st_mode, &target_stx->stx_mode);
10356                 __put_user(st.st_uid, &target_stx->stx_uid);
10357                 __put_user(st.st_gid, &target_stx->stx_gid);
10358                 __put_user(st.st_nlink, &target_stx->stx_nlink);
10359                 __put_user(major(st.st_rdev), &target_stx->stx_rdev_major);
10360                 __put_user(minor(st.st_rdev), &target_stx->stx_rdev_minor);
10361                 __put_user(st.st_size, &target_stx->stx_size);
10362                 __put_user(st.st_blksize, &target_stx->stx_blksize);
10363                 __put_user(st.st_blocks, &target_stx->stx_blocks);
10364                 __put_user(st.st_atime, &target_stx->stx_atime.tv_sec);
10365                 __put_user(st.st_mtime, &target_stx->stx_mtime.tv_sec);
10366                 __put_user(st.st_ctime, &target_stx->stx_ctime.tv_sec);
10367                 unlock_user_struct(target_stx, arg5, 1);
10368             }
10369         }
10370         return ret;
10371 #endif
10372 #ifdef TARGET_NR_lchown
10373     case TARGET_NR_lchown:
10374         if (!(p = lock_user_string(arg1)))
10375             return -TARGET_EFAULT;
10376         ret = get_errno(lchown(p, low2highuid(arg2), low2highgid(arg3)));
10377         unlock_user(p, arg1, 0);
10378         return ret;
10379 #endif
10380 #ifdef TARGET_NR_getuid
10381     case TARGET_NR_getuid:
10382         return get_errno(high2lowuid(getuid()));
10383 #endif
10384 #ifdef TARGET_NR_getgid
10385     case TARGET_NR_getgid:
10386         return get_errno(high2lowgid(getgid()));
10387 #endif
10388 #ifdef TARGET_NR_geteuid
10389     case TARGET_NR_geteuid:
10390         return get_errno(high2lowuid(geteuid()));
10391 #endif
10392 #ifdef TARGET_NR_getegid
10393     case TARGET_NR_getegid:
10394         return get_errno(high2lowgid(getegid()));
10395 #endif
10396     case TARGET_NR_setreuid:
10397         return get_errno(setreuid(low2highuid(arg1), low2highuid(arg2)));
10398     case TARGET_NR_setregid:
10399         return get_errno(setregid(low2highgid(arg1), low2highgid(arg2)));
10400     case TARGET_NR_getgroups:
10401         {
10402             int gidsetsize = arg1;
10403             target_id *target_grouplist;
10404             gid_t *grouplist;
10405             int i;
10406 
10407             grouplist = alloca(gidsetsize * sizeof(gid_t));
10408             ret = get_errno(getgroups(gidsetsize, grouplist));
10409             if (gidsetsize == 0)
10410                 return ret;
10411             if (!is_error(ret)) {
10412                 target_grouplist = lock_user(VERIFY_WRITE, arg2, gidsetsize * sizeof(target_id), 0);
10413                 if (!target_grouplist)
10414                     return -TARGET_EFAULT;
10415                 for(i = 0;i < ret; i++)
10416                     target_grouplist[i] = tswapid(high2lowgid(grouplist[i]));
10417                 unlock_user(target_grouplist, arg2, gidsetsize * sizeof(target_id));
10418             }
10419         }
10420         return ret;
10421     case TARGET_NR_setgroups:
10422         {
10423             int gidsetsize = arg1;
10424             target_id *target_grouplist;
10425             gid_t *grouplist = NULL;
10426             int i;
10427             if (gidsetsize) {
10428                 grouplist = alloca(gidsetsize * sizeof(gid_t));
10429                 target_grouplist = lock_user(VERIFY_READ, arg2, gidsetsize * sizeof(target_id), 1);
10430                 if (!target_grouplist) {
10431                     return -TARGET_EFAULT;
10432                 }
10433                 for (i = 0; i < gidsetsize; i++) {
10434                     grouplist[i] = low2highgid(tswapid(target_grouplist[i]));
10435                 }
10436                 unlock_user(target_grouplist, arg2, 0);
10437             }
10438             return get_errno(setgroups(gidsetsize, grouplist));
10439         }
10440     case TARGET_NR_fchown:
10441         return get_errno(fchown(arg1, low2highuid(arg2), low2highgid(arg3)));
10442 #if defined(TARGET_NR_fchownat)
10443     case TARGET_NR_fchownat:
10444         if (!(p = lock_user_string(arg2)))
10445             return -TARGET_EFAULT;
10446         ret = get_errno(fchownat(arg1, p, low2highuid(arg3),
10447                                  low2highgid(arg4), arg5));
10448         unlock_user(p, arg2, 0);
10449         return ret;
10450 #endif
10451 #ifdef TARGET_NR_setresuid
10452     case TARGET_NR_setresuid:
10453         return get_errno(sys_setresuid(low2highuid(arg1),
10454                                        low2highuid(arg2),
10455                                        low2highuid(arg3)));
10456 #endif
10457 #ifdef TARGET_NR_getresuid
10458     case TARGET_NR_getresuid:
10459         {
10460             uid_t ruid, euid, suid;
10461             ret = get_errno(getresuid(&ruid, &euid, &suid));
10462             if (!is_error(ret)) {
10463                 if (put_user_id(high2lowuid(ruid), arg1)
10464                     || put_user_id(high2lowuid(euid), arg2)
10465                     || put_user_id(high2lowuid(suid), arg3))
10466                     return -TARGET_EFAULT;
10467             }
10468         }
10469         return ret;
10470 #endif
10471 #ifdef TARGET_NR_getresgid
10472     case TARGET_NR_setresgid:
10473         return get_errno(sys_setresgid(low2highgid(arg1),
10474                                        low2highgid(arg2),
10475                                        low2highgid(arg3)));
10476 #endif
10477 #ifdef TARGET_NR_getresgid
10478     case TARGET_NR_getresgid:
10479         {
10480             gid_t rgid, egid, sgid;
10481             ret = get_errno(getresgid(&rgid, &egid, &sgid));
10482             if (!is_error(ret)) {
10483                 if (put_user_id(high2lowgid(rgid), arg1)
10484                     || put_user_id(high2lowgid(egid), arg2)
10485                     || put_user_id(high2lowgid(sgid), arg3))
10486                     return -TARGET_EFAULT;
10487             }
10488         }
10489         return ret;
10490 #endif
10491 #ifdef TARGET_NR_chown
10492     case TARGET_NR_chown:
10493         if (!(p = lock_user_string(arg1)))
10494             return -TARGET_EFAULT;
10495         ret = get_errno(chown(p, low2highuid(arg2), low2highgid(arg3)));
10496         unlock_user(p, arg1, 0);
10497         return ret;
10498 #endif
10499     case TARGET_NR_setuid:
10500         return get_errno(sys_setuid(low2highuid(arg1)));
10501     case TARGET_NR_setgid:
10502         return get_errno(sys_setgid(low2highgid(arg1)));
10503     case TARGET_NR_setfsuid:
10504         return get_errno(setfsuid(arg1));
10505     case TARGET_NR_setfsgid:
10506         return get_errno(setfsgid(arg1));
10507 
10508 #ifdef TARGET_NR_lchown32
10509     case TARGET_NR_lchown32:
10510         if (!(p = lock_user_string(arg1)))
10511             return -TARGET_EFAULT;
10512         ret = get_errno(lchown(p, arg2, arg3));
10513         unlock_user(p, arg1, 0);
10514         return ret;
10515 #endif
10516 #ifdef TARGET_NR_getuid32
10517     case TARGET_NR_getuid32:
10518         return get_errno(getuid());
10519 #endif
10520 
10521 #if defined(TARGET_NR_getxuid) && defined(TARGET_ALPHA)
10522    /* Alpha specific */
10523     case TARGET_NR_getxuid:
10524          {
10525             uid_t euid;
10526             euid=geteuid();
10527             ((CPUAlphaState *)cpu_env)->ir[IR_A4]=euid;
10528          }
10529         return get_errno(getuid());
10530 #endif
10531 #if defined(TARGET_NR_getxgid) && defined(TARGET_ALPHA)
10532    /* Alpha specific */
10533     case TARGET_NR_getxgid:
10534          {
10535             uid_t egid;
10536             egid=getegid();
10537             ((CPUAlphaState *)cpu_env)->ir[IR_A4]=egid;
10538          }
10539         return get_errno(getgid());
10540 #endif
10541 #if defined(TARGET_NR_osf_getsysinfo) && defined(TARGET_ALPHA)
10542     /* Alpha specific */
10543     case TARGET_NR_osf_getsysinfo:
10544         ret = -TARGET_EOPNOTSUPP;
10545         switch (arg1) {
10546           case TARGET_GSI_IEEE_FP_CONTROL:
10547             {
10548                 uint64_t fpcr = cpu_alpha_load_fpcr(cpu_env);
10549                 uint64_t swcr = ((CPUAlphaState *)cpu_env)->swcr;
10550 
10551                 swcr &= ~SWCR_STATUS_MASK;
10552                 swcr |= (fpcr >> 35) & SWCR_STATUS_MASK;
10553 
10554                 if (put_user_u64 (swcr, arg2))
10555                         return -TARGET_EFAULT;
10556                 ret = 0;
10557             }
10558             break;
10559 
10560           /* case GSI_IEEE_STATE_AT_SIGNAL:
10561              -- Not implemented in linux kernel.
10562              case GSI_UACPROC:
10563              -- Retrieves current unaligned access state; not much used.
10564              case GSI_PROC_TYPE:
10565              -- Retrieves implver information; surely not used.
10566              case GSI_GET_HWRPB:
10567              -- Grabs a copy of the HWRPB; surely not used.
10568           */
10569         }
10570         return ret;
10571 #endif
10572 #if defined(TARGET_NR_osf_setsysinfo) && defined(TARGET_ALPHA)
10573     /* Alpha specific */
10574     case TARGET_NR_osf_setsysinfo:
10575         ret = -TARGET_EOPNOTSUPP;
10576         switch (arg1) {
10577           case TARGET_SSI_IEEE_FP_CONTROL:
10578             {
10579                 uint64_t swcr, fpcr;
10580 
10581                 if (get_user_u64 (swcr, arg2)) {
10582                     return -TARGET_EFAULT;
10583                 }
10584 
10585                 /*
10586                  * The kernel calls swcr_update_status to update the
10587                  * status bits from the fpcr at every point that it
10588                  * could be queried.  Therefore, we store the status
10589                  * bits only in FPCR.
10590                  */
10591                 ((CPUAlphaState *)cpu_env)->swcr
10592                     = swcr & (SWCR_TRAP_ENABLE_MASK | SWCR_MAP_MASK);
10593 
10594                 fpcr = cpu_alpha_load_fpcr(cpu_env);
10595                 fpcr &= ((uint64_t)FPCR_DYN_MASK << 32);
10596                 fpcr |= alpha_ieee_swcr_to_fpcr(swcr);
10597                 cpu_alpha_store_fpcr(cpu_env, fpcr);
10598                 ret = 0;
10599             }
10600             break;
10601 
10602           case TARGET_SSI_IEEE_RAISE_EXCEPTION:
10603             {
10604                 uint64_t exc, fpcr, fex;
10605 
10606                 if (get_user_u64(exc, arg2)) {
10607                     return -TARGET_EFAULT;
10608                 }
10609                 exc &= SWCR_STATUS_MASK;
10610                 fpcr = cpu_alpha_load_fpcr(cpu_env);
10611 
10612                 /* Old exceptions are not signaled.  */
10613                 fex = alpha_ieee_fpcr_to_swcr(fpcr);
10614                 fex = exc & ~fex;
10615                 fex >>= SWCR_STATUS_TO_EXCSUM_SHIFT;
10616                 fex &= ((CPUArchState *)cpu_env)->swcr;
10617 
10618                 /* Update the hardware fpcr.  */
10619                 fpcr |= alpha_ieee_swcr_to_fpcr(exc);
10620                 cpu_alpha_store_fpcr(cpu_env, fpcr);
10621 
10622                 if (fex) {
10623                     int si_code = TARGET_FPE_FLTUNK;
10624                     target_siginfo_t info;
10625 
10626                     if (fex & SWCR_TRAP_ENABLE_DNO) {
10627                         si_code = TARGET_FPE_FLTUND;
10628                     }
10629                     if (fex & SWCR_TRAP_ENABLE_INE) {
10630                         si_code = TARGET_FPE_FLTRES;
10631                     }
10632                     if (fex & SWCR_TRAP_ENABLE_UNF) {
10633                         si_code = TARGET_FPE_FLTUND;
10634                     }
10635                     if (fex & SWCR_TRAP_ENABLE_OVF) {
10636                         si_code = TARGET_FPE_FLTOVF;
10637                     }
10638                     if (fex & SWCR_TRAP_ENABLE_DZE) {
10639                         si_code = TARGET_FPE_FLTDIV;
10640                     }
10641                     if (fex & SWCR_TRAP_ENABLE_INV) {
10642                         si_code = TARGET_FPE_FLTINV;
10643                     }
10644 
10645                     info.si_signo = SIGFPE;
10646                     info.si_errno = 0;
10647                     info.si_code = si_code;
10648                     info._sifields._sigfault._addr
10649                         = ((CPUArchState *)cpu_env)->pc;
10650                     queue_signal((CPUArchState *)cpu_env, info.si_signo,
10651                                  QEMU_SI_FAULT, &info);
10652                 }
10653                 ret = 0;
10654             }
10655             break;
10656 
10657           /* case SSI_NVPAIRS:
10658              -- Used with SSIN_UACPROC to enable unaligned accesses.
10659              case SSI_IEEE_STATE_AT_SIGNAL:
10660              case SSI_IEEE_IGNORE_STATE_AT_SIGNAL:
10661              -- Not implemented in linux kernel
10662           */
10663         }
10664         return ret;
10665 #endif
10666 #ifdef TARGET_NR_osf_sigprocmask
10667     /* Alpha specific.  */
10668     case TARGET_NR_osf_sigprocmask:
10669         {
10670             abi_ulong mask;
10671             int how;
10672             sigset_t set, oldset;
10673 
10674             switch(arg1) {
10675             case TARGET_SIG_BLOCK:
10676                 how = SIG_BLOCK;
10677                 break;
10678             case TARGET_SIG_UNBLOCK:
10679                 how = SIG_UNBLOCK;
10680                 break;
10681             case TARGET_SIG_SETMASK:
10682                 how = SIG_SETMASK;
10683                 break;
10684             default:
10685                 return -TARGET_EINVAL;
10686             }
10687             mask = arg2;
10688             target_to_host_old_sigset(&set, &mask);
10689             ret = do_sigprocmask(how, &set, &oldset);
10690             if (!ret) {
10691                 host_to_target_old_sigset(&mask, &oldset);
10692                 ret = mask;
10693             }
10694         }
10695         return ret;
10696 #endif
10697 
10698 #ifdef TARGET_NR_getgid32
10699     case TARGET_NR_getgid32:
10700         return get_errno(getgid());
10701 #endif
10702 #ifdef TARGET_NR_geteuid32
10703     case TARGET_NR_geteuid32:
10704         return get_errno(geteuid());
10705 #endif
10706 #ifdef TARGET_NR_getegid32
10707     case TARGET_NR_getegid32:
10708         return get_errno(getegid());
10709 #endif
10710 #ifdef TARGET_NR_setreuid32
10711     case TARGET_NR_setreuid32:
10712         return get_errno(setreuid(arg1, arg2));
10713 #endif
10714 #ifdef TARGET_NR_setregid32
10715     case TARGET_NR_setregid32:
10716         return get_errno(setregid(arg1, arg2));
10717 #endif
10718 #ifdef TARGET_NR_getgroups32
10719     case TARGET_NR_getgroups32:
10720         {
10721             int gidsetsize = arg1;
10722             uint32_t *target_grouplist;
10723             gid_t *grouplist;
10724             int i;
10725 
10726             grouplist = alloca(gidsetsize * sizeof(gid_t));
10727             ret = get_errno(getgroups(gidsetsize, grouplist));
10728             if (gidsetsize == 0)
10729                 return ret;
10730             if (!is_error(ret)) {
10731                 target_grouplist = lock_user(VERIFY_WRITE, arg2, gidsetsize * 4, 0);
10732                 if (!target_grouplist) {
10733                     return -TARGET_EFAULT;
10734                 }
10735                 for(i = 0;i < ret; i++)
10736                     target_grouplist[i] = tswap32(grouplist[i]);
10737                 unlock_user(target_grouplist, arg2, gidsetsize * 4);
10738             }
10739         }
10740         return ret;
10741 #endif
10742 #ifdef TARGET_NR_setgroups32
10743     case TARGET_NR_setgroups32:
10744         {
10745             int gidsetsize = arg1;
10746             uint32_t *target_grouplist;
10747             gid_t *grouplist;
10748             int i;
10749 
10750             grouplist = alloca(gidsetsize * sizeof(gid_t));
10751             target_grouplist = lock_user(VERIFY_READ, arg2, gidsetsize * 4, 1);
10752             if (!target_grouplist) {
10753                 return -TARGET_EFAULT;
10754             }
10755             for(i = 0;i < gidsetsize; i++)
10756                 grouplist[i] = tswap32(target_grouplist[i]);
10757             unlock_user(target_grouplist, arg2, 0);
10758             return get_errno(setgroups(gidsetsize, grouplist));
10759         }
10760 #endif
10761 #ifdef TARGET_NR_fchown32
10762     case TARGET_NR_fchown32:
10763         return get_errno(fchown(arg1, arg2, arg3));
10764 #endif
10765 #ifdef TARGET_NR_setresuid32
10766     case TARGET_NR_setresuid32:
10767         return get_errno(sys_setresuid(arg1, arg2, arg3));
10768 #endif
10769 #ifdef TARGET_NR_getresuid32
10770     case TARGET_NR_getresuid32:
10771         {
10772             uid_t ruid, euid, suid;
10773             ret = get_errno(getresuid(&ruid, &euid, &suid));
10774             if (!is_error(ret)) {
10775                 if (put_user_u32(ruid, arg1)
10776                     || put_user_u32(euid, arg2)
10777                     || put_user_u32(suid, arg3))
10778                     return -TARGET_EFAULT;
10779             }
10780         }
10781         return ret;
10782 #endif
10783 #ifdef TARGET_NR_setresgid32
10784     case TARGET_NR_setresgid32:
10785         return get_errno(sys_setresgid(arg1, arg2, arg3));
10786 #endif
10787 #ifdef TARGET_NR_getresgid32
10788     case TARGET_NR_getresgid32:
10789         {
10790             gid_t rgid, egid, sgid;
10791             ret = get_errno(getresgid(&rgid, &egid, &sgid));
10792             if (!is_error(ret)) {
10793                 if (put_user_u32(rgid, arg1)
10794                     || put_user_u32(egid, arg2)
10795                     || put_user_u32(sgid, arg3))
10796                     return -TARGET_EFAULT;
10797             }
10798         }
10799         return ret;
10800 #endif
10801 #ifdef TARGET_NR_chown32
10802     case TARGET_NR_chown32:
10803         if (!(p = lock_user_string(arg1)))
10804             return -TARGET_EFAULT;
10805         ret = get_errno(chown(p, arg2, arg3));
10806         unlock_user(p, arg1, 0);
10807         return ret;
10808 #endif
10809 #ifdef TARGET_NR_setuid32
10810     case TARGET_NR_setuid32:
10811         return get_errno(sys_setuid(arg1));
10812 #endif
10813 #ifdef TARGET_NR_setgid32
10814     case TARGET_NR_setgid32:
10815         return get_errno(sys_setgid(arg1));
10816 #endif
10817 #ifdef TARGET_NR_setfsuid32
10818     case TARGET_NR_setfsuid32:
10819         return get_errno(setfsuid(arg1));
10820 #endif
10821 #ifdef TARGET_NR_setfsgid32
10822     case TARGET_NR_setfsgid32:
10823         return get_errno(setfsgid(arg1));
10824 #endif
10825 #ifdef TARGET_NR_mincore
10826     case TARGET_NR_mincore:
10827         {
10828             void *a = lock_user(VERIFY_READ, arg1, arg2, 0);
10829             if (!a) {
10830                 return -TARGET_ENOMEM;
10831             }
10832             p = lock_user_string(arg3);
10833             if (!p) {
10834                 ret = -TARGET_EFAULT;
10835             } else {
10836                 ret = get_errno(mincore(a, arg2, p));
10837                 unlock_user(p, arg3, ret);
10838             }
10839             unlock_user(a, arg1, 0);
10840         }
10841         return ret;
10842 #endif
10843 #ifdef TARGET_NR_arm_fadvise64_64
10844     case TARGET_NR_arm_fadvise64_64:
10845         /* arm_fadvise64_64 looks like fadvise64_64 but
10846          * with different argument order: fd, advice, offset, len
10847          * rather than the usual fd, offset, len, advice.
10848          * Note that offset and len are both 64-bit so appear as
10849          * pairs of 32-bit registers.
10850          */
10851         ret = posix_fadvise(arg1, target_offset64(arg3, arg4),
10852                             target_offset64(arg5, arg6), arg2);
10853         return -host_to_target_errno(ret);
10854 #endif
10855 
10856 #if TARGET_ABI_BITS == 32
10857 
10858 #ifdef TARGET_NR_fadvise64_64
10859     case TARGET_NR_fadvise64_64:
10860 #if defined(TARGET_PPC) || defined(TARGET_XTENSA)
10861         /* 6 args: fd, advice, offset (high, low), len (high, low) */
10862         ret = arg2;
10863         arg2 = arg3;
10864         arg3 = arg4;
10865         arg4 = arg5;
10866         arg5 = arg6;
10867         arg6 = ret;
10868 #else
10869         /* 6 args: fd, offset (high, low), len (high, low), advice */
10870         if (regpairs_aligned(cpu_env, num)) {
10871             /* offset is in (3,4), len in (5,6) and advice in 7 */
10872             arg2 = arg3;
10873             arg3 = arg4;
10874             arg4 = arg5;
10875             arg5 = arg6;
10876             arg6 = arg7;
10877         }
10878 #endif
10879         ret = posix_fadvise(arg1, target_offset64(arg2, arg3),
10880                             target_offset64(arg4, arg5), arg6);
10881         return -host_to_target_errno(ret);
10882 #endif
10883 
10884 #ifdef TARGET_NR_fadvise64
10885     case TARGET_NR_fadvise64:
10886         /* 5 args: fd, offset (high, low), len, advice */
10887         if (regpairs_aligned(cpu_env, num)) {
10888             /* offset is in (3,4), len in 5 and advice in 6 */
10889             arg2 = arg3;
10890             arg3 = arg4;
10891             arg4 = arg5;
10892             arg5 = arg6;
10893         }
10894         ret = posix_fadvise(arg1, target_offset64(arg2, arg3), arg4, arg5);
10895         return -host_to_target_errno(ret);
10896 #endif
10897 
10898 #else /* not a 32-bit ABI */
10899 #if defined(TARGET_NR_fadvise64_64) || defined(TARGET_NR_fadvise64)
10900 #ifdef TARGET_NR_fadvise64_64
10901     case TARGET_NR_fadvise64_64:
10902 #endif
10903 #ifdef TARGET_NR_fadvise64
10904     case TARGET_NR_fadvise64:
10905 #endif
10906 #ifdef TARGET_S390X
10907         switch (arg4) {
10908         case 4: arg4 = POSIX_FADV_NOREUSE + 1; break; /* make sure it's an invalid value */
10909         case 5: arg4 = POSIX_FADV_NOREUSE + 2; break; /* ditto */
10910         case 6: arg4 = POSIX_FADV_DONTNEED; break;
10911         case 7: arg4 = POSIX_FADV_NOREUSE; break;
10912         default: break;
10913         }
10914 #endif
10915         return -host_to_target_errno(posix_fadvise(arg1, arg2, arg3, arg4));
10916 #endif
10917 #endif /* end of 64-bit ABI fadvise handling */
10918 
10919 #ifdef TARGET_NR_madvise
10920     case TARGET_NR_madvise:
10921         /* A straight passthrough may not be safe because qemu sometimes
10922            turns private file-backed mappings into anonymous mappings.
10923            This will break MADV_DONTNEED.
10924            This is a hint, so ignoring and returning success is ok.  */
10925         return 0;
10926 #endif
10927 #if TARGET_ABI_BITS == 32
10928     case TARGET_NR_fcntl64:
10929     {
10930 	int cmd;
10931 	struct flock64 fl;
10932         from_flock64_fn *copyfrom = copy_from_user_flock64;
10933         to_flock64_fn *copyto = copy_to_user_flock64;
10934 
10935 #ifdef TARGET_ARM
10936         if (!((CPUARMState *)cpu_env)->eabi) {
10937             copyfrom = copy_from_user_oabi_flock64;
10938             copyto = copy_to_user_oabi_flock64;
10939         }
10940 #endif
10941 
10942 	cmd = target_to_host_fcntl_cmd(arg2);
10943         if (cmd == -TARGET_EINVAL) {
10944             return cmd;
10945         }
10946 
10947         switch(arg2) {
10948         case TARGET_F_GETLK64:
10949             ret = copyfrom(&fl, arg3);
10950             if (ret) {
10951                 break;
10952             }
10953             ret = get_errno(safe_fcntl(arg1, cmd, &fl));
10954             if (ret == 0) {
10955                 ret = copyto(arg3, &fl);
10956             }
10957 	    break;
10958 
10959         case TARGET_F_SETLK64:
10960         case TARGET_F_SETLKW64:
10961             ret = copyfrom(&fl, arg3);
10962             if (ret) {
10963                 break;
10964             }
10965             ret = get_errno(safe_fcntl(arg1, cmd, &fl));
10966 	    break;
10967         default:
10968             ret = do_fcntl(arg1, arg2, arg3);
10969             break;
10970         }
10971         return ret;
10972     }
10973 #endif
10974 #ifdef TARGET_NR_cacheflush
10975     case TARGET_NR_cacheflush:
10976         /* self-modifying code is handled automatically, so nothing needed */
10977         return 0;
10978 #endif
10979 #ifdef TARGET_NR_getpagesize
10980     case TARGET_NR_getpagesize:
10981         return TARGET_PAGE_SIZE;
10982 #endif
10983     case TARGET_NR_gettid:
10984         return get_errno(sys_gettid());
10985 #ifdef TARGET_NR_readahead
10986     case TARGET_NR_readahead:
10987 #if TARGET_ABI_BITS == 32
10988         if (regpairs_aligned(cpu_env, num)) {
10989             arg2 = arg3;
10990             arg3 = arg4;
10991             arg4 = arg5;
10992         }
10993         ret = get_errno(readahead(arg1, target_offset64(arg2, arg3) , arg4));
10994 #else
10995         ret = get_errno(readahead(arg1, arg2, arg3));
10996 #endif
10997         return ret;
10998 #endif
10999 #ifdef CONFIG_ATTR
11000 #ifdef TARGET_NR_setxattr
11001     case TARGET_NR_listxattr:
11002     case TARGET_NR_llistxattr:
11003     {
11004         void *p, *b = 0;
11005         if (arg2) {
11006             b = lock_user(VERIFY_WRITE, arg2, arg3, 0);
11007             if (!b) {
11008                 return -TARGET_EFAULT;
11009             }
11010         }
11011         p = lock_user_string(arg1);
11012         if (p) {
11013             if (num == TARGET_NR_listxattr) {
11014                 ret = get_errno(listxattr(p, b, arg3));
11015             } else {
11016                 ret = get_errno(llistxattr(p, b, arg3));
11017             }
11018         } else {
11019             ret = -TARGET_EFAULT;
11020         }
11021         unlock_user(p, arg1, 0);
11022         unlock_user(b, arg2, arg3);
11023         return ret;
11024     }
11025     case TARGET_NR_flistxattr:
11026     {
11027         void *b = 0;
11028         if (arg2) {
11029             b = lock_user(VERIFY_WRITE, arg2, arg3, 0);
11030             if (!b) {
11031                 return -TARGET_EFAULT;
11032             }
11033         }
11034         ret = get_errno(flistxattr(arg1, b, arg3));
11035         unlock_user(b, arg2, arg3);
11036         return ret;
11037     }
11038     case TARGET_NR_setxattr:
11039     case TARGET_NR_lsetxattr:
11040         {
11041             void *p, *n, *v = 0;
11042             if (arg3) {
11043                 v = lock_user(VERIFY_READ, arg3, arg4, 1);
11044                 if (!v) {
11045                     return -TARGET_EFAULT;
11046                 }
11047             }
11048             p = lock_user_string(arg1);
11049             n = lock_user_string(arg2);
11050             if (p && n) {
11051                 if (num == TARGET_NR_setxattr) {
11052                     ret = get_errno(setxattr(p, n, v, arg4, arg5));
11053                 } else {
11054                     ret = get_errno(lsetxattr(p, n, v, arg4, arg5));
11055                 }
11056             } else {
11057                 ret = -TARGET_EFAULT;
11058             }
11059             unlock_user(p, arg1, 0);
11060             unlock_user(n, arg2, 0);
11061             unlock_user(v, arg3, 0);
11062         }
11063         return ret;
11064     case TARGET_NR_fsetxattr:
11065         {
11066             void *n, *v = 0;
11067             if (arg3) {
11068                 v = lock_user(VERIFY_READ, arg3, arg4, 1);
11069                 if (!v) {
11070                     return -TARGET_EFAULT;
11071                 }
11072             }
11073             n = lock_user_string(arg2);
11074             if (n) {
11075                 ret = get_errno(fsetxattr(arg1, n, v, arg4, arg5));
11076             } else {
11077                 ret = -TARGET_EFAULT;
11078             }
11079             unlock_user(n, arg2, 0);
11080             unlock_user(v, arg3, 0);
11081         }
11082         return ret;
11083     case TARGET_NR_getxattr:
11084     case TARGET_NR_lgetxattr:
11085         {
11086             void *p, *n, *v = 0;
11087             if (arg3) {
11088                 v = lock_user(VERIFY_WRITE, arg3, arg4, 0);
11089                 if (!v) {
11090                     return -TARGET_EFAULT;
11091                 }
11092             }
11093             p = lock_user_string(arg1);
11094             n = lock_user_string(arg2);
11095             if (p && n) {
11096                 if (num == TARGET_NR_getxattr) {
11097                     ret = get_errno(getxattr(p, n, v, arg4));
11098                 } else {
11099                     ret = get_errno(lgetxattr(p, n, v, arg4));
11100                 }
11101             } else {
11102                 ret = -TARGET_EFAULT;
11103             }
11104             unlock_user(p, arg1, 0);
11105             unlock_user(n, arg2, 0);
11106             unlock_user(v, arg3, arg4);
11107         }
11108         return ret;
11109     case TARGET_NR_fgetxattr:
11110         {
11111             void *n, *v = 0;
11112             if (arg3) {
11113                 v = lock_user(VERIFY_WRITE, arg3, arg4, 0);
11114                 if (!v) {
11115                     return -TARGET_EFAULT;
11116                 }
11117             }
11118             n = lock_user_string(arg2);
11119             if (n) {
11120                 ret = get_errno(fgetxattr(arg1, n, v, arg4));
11121             } else {
11122                 ret = -TARGET_EFAULT;
11123             }
11124             unlock_user(n, arg2, 0);
11125             unlock_user(v, arg3, arg4);
11126         }
11127         return ret;
11128     case TARGET_NR_removexattr:
11129     case TARGET_NR_lremovexattr:
11130         {
11131             void *p, *n;
11132             p = lock_user_string(arg1);
11133             n = lock_user_string(arg2);
11134             if (p && n) {
11135                 if (num == TARGET_NR_removexattr) {
11136                     ret = get_errno(removexattr(p, n));
11137                 } else {
11138                     ret = get_errno(lremovexattr(p, n));
11139                 }
11140             } else {
11141                 ret = -TARGET_EFAULT;
11142             }
11143             unlock_user(p, arg1, 0);
11144             unlock_user(n, arg2, 0);
11145         }
11146         return ret;
11147     case TARGET_NR_fremovexattr:
11148         {
11149             void *n;
11150             n = lock_user_string(arg2);
11151             if (n) {
11152                 ret = get_errno(fremovexattr(arg1, n));
11153             } else {
11154                 ret = -TARGET_EFAULT;
11155             }
11156             unlock_user(n, arg2, 0);
11157         }
11158         return ret;
11159 #endif
11160 #endif /* CONFIG_ATTR */
11161 #ifdef TARGET_NR_set_thread_area
11162     case TARGET_NR_set_thread_area:
11163 #if defined(TARGET_MIPS)
11164       ((CPUMIPSState *) cpu_env)->active_tc.CP0_UserLocal = arg1;
11165       return 0;
11166 #elif defined(TARGET_CRIS)
11167       if (arg1 & 0xff)
11168           ret = -TARGET_EINVAL;
11169       else {
11170           ((CPUCRISState *) cpu_env)->pregs[PR_PID] = arg1;
11171           ret = 0;
11172       }
11173       return ret;
11174 #elif defined(TARGET_I386) && defined(TARGET_ABI32)
11175       return do_set_thread_area(cpu_env, arg1);
11176 #elif defined(TARGET_M68K)
11177       {
11178           TaskState *ts = cpu->opaque;
11179           ts->tp_value = arg1;
11180           return 0;
11181       }
11182 #else
11183       return -TARGET_ENOSYS;
11184 #endif
11185 #endif
11186 #ifdef TARGET_NR_get_thread_area
11187     case TARGET_NR_get_thread_area:
11188 #if defined(TARGET_I386) && defined(TARGET_ABI32)
11189         return do_get_thread_area(cpu_env, arg1);
11190 #elif defined(TARGET_M68K)
11191         {
11192             TaskState *ts = cpu->opaque;
11193             return ts->tp_value;
11194         }
11195 #else
11196         return -TARGET_ENOSYS;
11197 #endif
11198 #endif
11199 #ifdef TARGET_NR_getdomainname
11200     case TARGET_NR_getdomainname:
11201         return -TARGET_ENOSYS;
11202 #endif
11203 
11204 #ifdef TARGET_NR_clock_settime
11205     case TARGET_NR_clock_settime:
11206     {
11207         struct timespec ts;
11208 
11209         ret = target_to_host_timespec(&ts, arg2);
11210         if (!is_error(ret)) {
11211             ret = get_errno(clock_settime(arg1, &ts));
11212         }
11213         return ret;
11214     }
11215 #endif
11216 #ifdef TARGET_NR_clock_gettime
11217     case TARGET_NR_clock_gettime:
11218     {
11219         struct timespec ts;
11220         ret = get_errno(clock_gettime(arg1, &ts));
11221         if (!is_error(ret)) {
11222             ret = host_to_target_timespec(arg2, &ts);
11223         }
11224         return ret;
11225     }
11226 #endif
11227 #ifdef TARGET_NR_clock_getres
11228     case TARGET_NR_clock_getres:
11229     {
11230         struct timespec ts;
11231         ret = get_errno(clock_getres(arg1, &ts));
11232         if (!is_error(ret)) {
11233             host_to_target_timespec(arg2, &ts);
11234         }
11235         return ret;
11236     }
11237 #endif
11238 #ifdef TARGET_NR_clock_nanosleep
11239     case TARGET_NR_clock_nanosleep:
11240     {
11241         struct timespec ts;
11242         target_to_host_timespec(&ts, arg3);
11243         ret = get_errno(safe_clock_nanosleep(arg1, arg2,
11244                                              &ts, arg4 ? &ts : NULL));
11245         if (arg4)
11246             host_to_target_timespec(arg4, &ts);
11247 
11248 #if defined(TARGET_PPC)
11249         /* clock_nanosleep is odd in that it returns positive errno values.
11250          * On PPC, CR0 bit 3 should be set in such a situation. */
11251         if (ret && ret != -TARGET_ERESTARTSYS) {
11252             ((CPUPPCState *)cpu_env)->crf[0] |= 1;
11253         }
11254 #endif
11255         return ret;
11256     }
11257 #endif
11258 
11259 #if defined(TARGET_NR_set_tid_address) && defined(__NR_set_tid_address)
11260     case TARGET_NR_set_tid_address:
11261         return get_errno(set_tid_address((int *)g2h(arg1)));
11262 #endif
11263 
11264     case TARGET_NR_tkill:
11265         return get_errno(safe_tkill((int)arg1, target_to_host_signal(arg2)));
11266 
11267     case TARGET_NR_tgkill:
11268         return get_errno(safe_tgkill((int)arg1, (int)arg2,
11269                          target_to_host_signal(arg3)));
11270 
11271 #ifdef TARGET_NR_set_robust_list
11272     case TARGET_NR_set_robust_list:
11273     case TARGET_NR_get_robust_list:
11274         /* The ABI for supporting robust futexes has userspace pass
11275          * the kernel a pointer to a linked list which is updated by
11276          * userspace after the syscall; the list is walked by the kernel
11277          * when the thread exits. Since the linked list in QEMU guest
11278          * memory isn't a valid linked list for the host and we have
11279          * no way to reliably intercept the thread-death event, we can't
11280          * support these. Silently return ENOSYS so that guest userspace
11281          * falls back to a non-robust futex implementation (which should
11282          * be OK except in the corner case of the guest crashing while
11283          * holding a mutex that is shared with another process via
11284          * shared memory).
11285          */
11286         return -TARGET_ENOSYS;
11287 #endif
11288 
11289 #if defined(TARGET_NR_utimensat)
11290     case TARGET_NR_utimensat:
11291         {
11292             struct timespec *tsp, ts[2];
11293             if (!arg3) {
11294                 tsp = NULL;
11295             } else {
11296                 target_to_host_timespec(ts, arg3);
11297                 target_to_host_timespec(ts+1, arg3+sizeof(struct target_timespec));
11298                 tsp = ts;
11299             }
11300             if (!arg2)
11301                 ret = get_errno(sys_utimensat(arg1, NULL, tsp, arg4));
11302             else {
11303                 if (!(p = lock_user_string(arg2))) {
11304                     return -TARGET_EFAULT;
11305                 }
11306                 ret = get_errno(sys_utimensat(arg1, path(p), tsp, arg4));
11307                 unlock_user(p, arg2, 0);
11308             }
11309         }
11310         return ret;
11311 #endif
11312     case TARGET_NR_futex:
11313         return do_futex(arg1, arg2, arg3, arg4, arg5, arg6);
11314 #if defined(TARGET_NR_inotify_init) && defined(__NR_inotify_init)
11315     case TARGET_NR_inotify_init:
11316         ret = get_errno(sys_inotify_init());
11317         if (ret >= 0) {
11318             fd_trans_register(ret, &target_inotify_trans);
11319         }
11320         return ret;
11321 #endif
11322 #ifdef CONFIG_INOTIFY1
11323 #if defined(TARGET_NR_inotify_init1) && defined(__NR_inotify_init1)
11324     case TARGET_NR_inotify_init1:
11325         ret = get_errno(sys_inotify_init1(target_to_host_bitmask(arg1,
11326                                           fcntl_flags_tbl)));
11327         if (ret >= 0) {
11328             fd_trans_register(ret, &target_inotify_trans);
11329         }
11330         return ret;
11331 #endif
11332 #endif
11333 #if defined(TARGET_NR_inotify_add_watch) && defined(__NR_inotify_add_watch)
11334     case TARGET_NR_inotify_add_watch:
11335         p = lock_user_string(arg2);
11336         ret = get_errno(sys_inotify_add_watch(arg1, path(p), arg3));
11337         unlock_user(p, arg2, 0);
11338         return ret;
11339 #endif
11340 #if defined(TARGET_NR_inotify_rm_watch) && defined(__NR_inotify_rm_watch)
11341     case TARGET_NR_inotify_rm_watch:
11342         return get_errno(sys_inotify_rm_watch(arg1, arg2));
11343 #endif
11344 
11345 #if defined(TARGET_NR_mq_open) && defined(__NR_mq_open)
11346     case TARGET_NR_mq_open:
11347         {
11348             struct mq_attr posix_mq_attr;
11349             struct mq_attr *pposix_mq_attr;
11350             int host_flags;
11351 
11352             host_flags = target_to_host_bitmask(arg2, fcntl_flags_tbl);
11353             pposix_mq_attr = NULL;
11354             if (arg4) {
11355                 if (copy_from_user_mq_attr(&posix_mq_attr, arg4) != 0) {
11356                     return -TARGET_EFAULT;
11357                 }
11358                 pposix_mq_attr = &posix_mq_attr;
11359             }
11360             p = lock_user_string(arg1 - 1);
11361             if (!p) {
11362                 return -TARGET_EFAULT;
11363             }
11364             ret = get_errno(mq_open(p, host_flags, arg3, pposix_mq_attr));
11365             unlock_user (p, arg1, 0);
11366         }
11367         return ret;
11368 
11369     case TARGET_NR_mq_unlink:
11370         p = lock_user_string(arg1 - 1);
11371         if (!p) {
11372             return -TARGET_EFAULT;
11373         }
11374         ret = get_errno(mq_unlink(p));
11375         unlock_user (p, arg1, 0);
11376         return ret;
11377 
11378     case TARGET_NR_mq_timedsend:
11379         {
11380             struct timespec ts;
11381 
11382             p = lock_user (VERIFY_READ, arg2, arg3, 1);
11383             if (arg5 != 0) {
11384                 target_to_host_timespec(&ts, arg5);
11385                 ret = get_errno(safe_mq_timedsend(arg1, p, arg3, arg4, &ts));
11386                 host_to_target_timespec(arg5, &ts);
11387             } else {
11388                 ret = get_errno(safe_mq_timedsend(arg1, p, arg3, arg4, NULL));
11389             }
11390             unlock_user (p, arg2, arg3);
11391         }
11392         return ret;
11393 
11394     case TARGET_NR_mq_timedreceive:
11395         {
11396             struct timespec ts;
11397             unsigned int prio;
11398 
11399             p = lock_user (VERIFY_READ, arg2, arg3, 1);
11400             if (arg5 != 0) {
11401                 target_to_host_timespec(&ts, arg5);
11402                 ret = get_errno(safe_mq_timedreceive(arg1, p, arg3,
11403                                                      &prio, &ts));
11404                 host_to_target_timespec(arg5, &ts);
11405             } else {
11406                 ret = get_errno(safe_mq_timedreceive(arg1, p, arg3,
11407                                                      &prio, NULL));
11408             }
11409             unlock_user (p, arg2, arg3);
11410             if (arg4 != 0)
11411                 put_user_u32(prio, arg4);
11412         }
11413         return ret;
11414 
11415     /* Not implemented for now... */
11416 /*     case TARGET_NR_mq_notify: */
11417 /*         break; */
11418 
11419     case TARGET_NR_mq_getsetattr:
11420         {
11421             struct mq_attr posix_mq_attr_in, posix_mq_attr_out;
11422             ret = 0;
11423             if (arg2 != 0) {
11424                 copy_from_user_mq_attr(&posix_mq_attr_in, arg2);
11425                 ret = get_errno(mq_setattr(arg1, &posix_mq_attr_in,
11426                                            &posix_mq_attr_out));
11427             } else if (arg3 != 0) {
11428                 ret = get_errno(mq_getattr(arg1, &posix_mq_attr_out));
11429             }
11430             if (ret == 0 && arg3 != 0) {
11431                 copy_to_user_mq_attr(arg3, &posix_mq_attr_out);
11432             }
11433         }
11434         return ret;
11435 #endif
11436 
11437 #ifdef CONFIG_SPLICE
11438 #ifdef TARGET_NR_tee
11439     case TARGET_NR_tee:
11440         {
11441             ret = get_errno(tee(arg1,arg2,arg3,arg4));
11442         }
11443         return ret;
11444 #endif
11445 #ifdef TARGET_NR_splice
11446     case TARGET_NR_splice:
11447         {
11448             loff_t loff_in, loff_out;
11449             loff_t *ploff_in = NULL, *ploff_out = NULL;
11450             if (arg2) {
11451                 if (get_user_u64(loff_in, arg2)) {
11452                     return -TARGET_EFAULT;
11453                 }
11454                 ploff_in = &loff_in;
11455             }
11456             if (arg4) {
11457                 if (get_user_u64(loff_out, arg4)) {
11458                     return -TARGET_EFAULT;
11459                 }
11460                 ploff_out = &loff_out;
11461             }
11462             ret = get_errno(splice(arg1, ploff_in, arg3, ploff_out, arg5, arg6));
11463             if (arg2) {
11464                 if (put_user_u64(loff_in, arg2)) {
11465                     return -TARGET_EFAULT;
11466                 }
11467             }
11468             if (arg4) {
11469                 if (put_user_u64(loff_out, arg4)) {
11470                     return -TARGET_EFAULT;
11471                 }
11472             }
11473         }
11474         return ret;
11475 #endif
11476 #ifdef TARGET_NR_vmsplice
11477 	case TARGET_NR_vmsplice:
11478         {
11479             struct iovec *vec = lock_iovec(VERIFY_READ, arg2, arg3, 1);
11480             if (vec != NULL) {
11481                 ret = get_errno(vmsplice(arg1, vec, arg3, arg4));
11482                 unlock_iovec(vec, arg2, arg3, 0);
11483             } else {
11484                 ret = -host_to_target_errno(errno);
11485             }
11486         }
11487         return ret;
11488 #endif
11489 #endif /* CONFIG_SPLICE */
11490 #ifdef CONFIG_EVENTFD
11491 #if defined(TARGET_NR_eventfd)
11492     case TARGET_NR_eventfd:
11493         ret = get_errno(eventfd(arg1, 0));
11494         if (ret >= 0) {
11495             fd_trans_register(ret, &target_eventfd_trans);
11496         }
11497         return ret;
11498 #endif
11499 #if defined(TARGET_NR_eventfd2)
11500     case TARGET_NR_eventfd2:
11501     {
11502         int host_flags = arg2 & (~(TARGET_O_NONBLOCK | TARGET_O_CLOEXEC));
11503         if (arg2 & TARGET_O_NONBLOCK) {
11504             host_flags |= O_NONBLOCK;
11505         }
11506         if (arg2 & TARGET_O_CLOEXEC) {
11507             host_flags |= O_CLOEXEC;
11508         }
11509         ret = get_errno(eventfd(arg1, host_flags));
11510         if (ret >= 0) {
11511             fd_trans_register(ret, &target_eventfd_trans);
11512         }
11513         return ret;
11514     }
11515 #endif
11516 #endif /* CONFIG_EVENTFD  */
11517 #if defined(CONFIG_FALLOCATE) && defined(TARGET_NR_fallocate)
11518     case TARGET_NR_fallocate:
11519 #if TARGET_ABI_BITS == 32
11520         ret = get_errno(fallocate(arg1, arg2, target_offset64(arg3, arg4),
11521                                   target_offset64(arg5, arg6)));
11522 #else
11523         ret = get_errno(fallocate(arg1, arg2, arg3, arg4));
11524 #endif
11525         return ret;
11526 #endif
11527 #if defined(CONFIG_SYNC_FILE_RANGE)
11528 #if defined(TARGET_NR_sync_file_range)
11529     case TARGET_NR_sync_file_range:
11530 #if TARGET_ABI_BITS == 32
11531 #if defined(TARGET_MIPS)
11532         ret = get_errno(sync_file_range(arg1, target_offset64(arg3, arg4),
11533                                         target_offset64(arg5, arg6), arg7));
11534 #else
11535         ret = get_errno(sync_file_range(arg1, target_offset64(arg2, arg3),
11536                                         target_offset64(arg4, arg5), arg6));
11537 #endif /* !TARGET_MIPS */
11538 #else
11539         ret = get_errno(sync_file_range(arg1, arg2, arg3, arg4));
11540 #endif
11541         return ret;
11542 #endif
11543 #if defined(TARGET_NR_sync_file_range2)
11544     case TARGET_NR_sync_file_range2:
11545         /* This is like sync_file_range but the arguments are reordered */
11546 #if TARGET_ABI_BITS == 32
11547         ret = get_errno(sync_file_range(arg1, target_offset64(arg3, arg4),
11548                                         target_offset64(arg5, arg6), arg2));
11549 #else
11550         ret = get_errno(sync_file_range(arg1, arg3, arg4, arg2));
11551 #endif
11552         return ret;
11553 #endif
11554 #endif
11555 #if defined(TARGET_NR_signalfd4)
11556     case TARGET_NR_signalfd4:
11557         return do_signalfd4(arg1, arg2, arg4);
11558 #endif
11559 #if defined(TARGET_NR_signalfd)
11560     case TARGET_NR_signalfd:
11561         return do_signalfd4(arg1, arg2, 0);
11562 #endif
11563 #if defined(CONFIG_EPOLL)
11564 #if defined(TARGET_NR_epoll_create)
11565     case TARGET_NR_epoll_create:
11566         return get_errno(epoll_create(arg1));
11567 #endif
11568 #if defined(TARGET_NR_epoll_create1) && defined(CONFIG_EPOLL_CREATE1)
11569     case TARGET_NR_epoll_create1:
11570         return get_errno(epoll_create1(arg1));
11571 #endif
11572 #if defined(TARGET_NR_epoll_ctl)
11573     case TARGET_NR_epoll_ctl:
11574     {
11575         struct epoll_event ep;
11576         struct epoll_event *epp = 0;
11577         if (arg4) {
11578             struct target_epoll_event *target_ep;
11579             if (!lock_user_struct(VERIFY_READ, target_ep, arg4, 1)) {
11580                 return -TARGET_EFAULT;
11581             }
11582             ep.events = tswap32(target_ep->events);
11583             /* The epoll_data_t union is just opaque data to the kernel,
11584              * so we transfer all 64 bits across and need not worry what
11585              * actual data type it is.
11586              */
11587             ep.data.u64 = tswap64(target_ep->data.u64);
11588             unlock_user_struct(target_ep, arg4, 0);
11589             epp = &ep;
11590         }
11591         return get_errno(epoll_ctl(arg1, arg2, arg3, epp));
11592     }
11593 #endif
11594 
11595 #if defined(TARGET_NR_epoll_wait) || defined(TARGET_NR_epoll_pwait)
11596 #if defined(TARGET_NR_epoll_wait)
11597     case TARGET_NR_epoll_wait:
11598 #endif
11599 #if defined(TARGET_NR_epoll_pwait)
11600     case TARGET_NR_epoll_pwait:
11601 #endif
11602     {
11603         struct target_epoll_event *target_ep;
11604         struct epoll_event *ep;
11605         int epfd = arg1;
11606         int maxevents = arg3;
11607         int timeout = arg4;
11608 
11609         if (maxevents <= 0 || maxevents > TARGET_EP_MAX_EVENTS) {
11610             return -TARGET_EINVAL;
11611         }
11612 
11613         target_ep = lock_user(VERIFY_WRITE, arg2,
11614                               maxevents * sizeof(struct target_epoll_event), 1);
11615         if (!target_ep) {
11616             return -TARGET_EFAULT;
11617         }
11618 
11619         ep = g_try_new(struct epoll_event, maxevents);
11620         if (!ep) {
11621             unlock_user(target_ep, arg2, 0);
11622             return -TARGET_ENOMEM;
11623         }
11624 
11625         switch (num) {
11626 #if defined(TARGET_NR_epoll_pwait)
11627         case TARGET_NR_epoll_pwait:
11628         {
11629             target_sigset_t *target_set;
11630             sigset_t _set, *set = &_set;
11631 
11632             if (arg5) {
11633                 if (arg6 != sizeof(target_sigset_t)) {
11634                     ret = -TARGET_EINVAL;
11635                     break;
11636                 }
11637 
11638                 target_set = lock_user(VERIFY_READ, arg5,
11639                                        sizeof(target_sigset_t), 1);
11640                 if (!target_set) {
11641                     ret = -TARGET_EFAULT;
11642                     break;
11643                 }
11644                 target_to_host_sigset(set, target_set);
11645                 unlock_user(target_set, arg5, 0);
11646             } else {
11647                 set = NULL;
11648             }
11649 
11650             ret = get_errno(safe_epoll_pwait(epfd, ep, maxevents, timeout,
11651                                              set, SIGSET_T_SIZE));
11652             break;
11653         }
11654 #endif
11655 #if defined(TARGET_NR_epoll_wait)
11656         case TARGET_NR_epoll_wait:
11657             ret = get_errno(safe_epoll_pwait(epfd, ep, maxevents, timeout,
11658                                              NULL, 0));
11659             break;
11660 #endif
11661         default:
11662             ret = -TARGET_ENOSYS;
11663         }
11664         if (!is_error(ret)) {
11665             int i;
11666             for (i = 0; i < ret; i++) {
11667                 target_ep[i].events = tswap32(ep[i].events);
11668                 target_ep[i].data.u64 = tswap64(ep[i].data.u64);
11669             }
11670             unlock_user(target_ep, arg2,
11671                         ret * sizeof(struct target_epoll_event));
11672         } else {
11673             unlock_user(target_ep, arg2, 0);
11674         }
11675         g_free(ep);
11676         return ret;
11677     }
11678 #endif
11679 #endif
11680 #ifdef TARGET_NR_prlimit64
11681     case TARGET_NR_prlimit64:
11682     {
11683         /* args: pid, resource number, ptr to new rlimit, ptr to old rlimit */
11684         struct target_rlimit64 *target_rnew, *target_rold;
11685         struct host_rlimit64 rnew, rold, *rnewp = 0;
11686         int resource = target_to_host_resource(arg2);
11687         if (arg3) {
11688             if (!lock_user_struct(VERIFY_READ, target_rnew, arg3, 1)) {
11689                 return -TARGET_EFAULT;
11690             }
11691             rnew.rlim_cur = tswap64(target_rnew->rlim_cur);
11692             rnew.rlim_max = tswap64(target_rnew->rlim_max);
11693             unlock_user_struct(target_rnew, arg3, 0);
11694             rnewp = &rnew;
11695         }
11696 
11697         ret = get_errno(sys_prlimit64(arg1, resource, rnewp, arg4 ? &rold : 0));
11698         if (!is_error(ret) && arg4) {
11699             if (!lock_user_struct(VERIFY_WRITE, target_rold, arg4, 1)) {
11700                 return -TARGET_EFAULT;
11701             }
11702             target_rold->rlim_cur = tswap64(rold.rlim_cur);
11703             target_rold->rlim_max = tswap64(rold.rlim_max);
11704             unlock_user_struct(target_rold, arg4, 1);
11705         }
11706         return ret;
11707     }
11708 #endif
11709 #ifdef TARGET_NR_gethostname
11710     case TARGET_NR_gethostname:
11711     {
11712         char *name = lock_user(VERIFY_WRITE, arg1, arg2, 0);
11713         if (name) {
11714             ret = get_errno(gethostname(name, arg2));
11715             unlock_user(name, arg1, arg2);
11716         } else {
11717             ret = -TARGET_EFAULT;
11718         }
11719         return ret;
11720     }
11721 #endif
11722 #ifdef TARGET_NR_atomic_cmpxchg_32
11723     case TARGET_NR_atomic_cmpxchg_32:
11724     {
11725         /* should use start_exclusive from main.c */
11726         abi_ulong mem_value;
11727         if (get_user_u32(mem_value, arg6)) {
11728             target_siginfo_t info;
11729             info.si_signo = SIGSEGV;
11730             info.si_errno = 0;
11731             info.si_code = TARGET_SEGV_MAPERR;
11732             info._sifields._sigfault._addr = arg6;
11733             queue_signal((CPUArchState *)cpu_env, info.si_signo,
11734                          QEMU_SI_FAULT, &info);
11735             ret = 0xdeadbeef;
11736 
11737         }
11738         if (mem_value == arg2)
11739             put_user_u32(arg1, arg6);
11740         return mem_value;
11741     }
11742 #endif
11743 #ifdef TARGET_NR_atomic_barrier
11744     case TARGET_NR_atomic_barrier:
11745         /* Like the kernel implementation and the
11746            qemu arm barrier, no-op this? */
11747         return 0;
11748 #endif
11749 
11750 #ifdef TARGET_NR_timer_create
11751     case TARGET_NR_timer_create:
11752     {
11753         /* args: clockid_t clockid, struct sigevent *sevp, timer_t *timerid */
11754 
11755         struct sigevent host_sevp = { {0}, }, *phost_sevp = NULL;
11756 
11757         int clkid = arg1;
11758         int timer_index = next_free_host_timer();
11759 
11760         if (timer_index < 0) {
11761             ret = -TARGET_EAGAIN;
11762         } else {
11763             timer_t *phtimer = g_posix_timers  + timer_index;
11764 
11765             if (arg2) {
11766                 phost_sevp = &host_sevp;
11767                 ret = target_to_host_sigevent(phost_sevp, arg2);
11768                 if (ret != 0) {
11769                     return ret;
11770                 }
11771             }
11772 
11773             ret = get_errno(timer_create(clkid, phost_sevp, phtimer));
11774             if (ret) {
11775                 phtimer = NULL;
11776             } else {
11777                 if (put_user(TIMER_MAGIC | timer_index, arg3, target_timer_t)) {
11778                     return -TARGET_EFAULT;
11779                 }
11780             }
11781         }
11782         return ret;
11783     }
11784 #endif
11785 
11786 #ifdef TARGET_NR_timer_settime
11787     case TARGET_NR_timer_settime:
11788     {
11789         /* args: timer_t timerid, int flags, const struct itimerspec *new_value,
11790          * struct itimerspec * old_value */
11791         target_timer_t timerid = get_timer_id(arg1);
11792 
11793         if (timerid < 0) {
11794             ret = timerid;
11795         } else if (arg3 == 0) {
11796             ret = -TARGET_EINVAL;
11797         } else {
11798             timer_t htimer = g_posix_timers[timerid];
11799             struct itimerspec hspec_new = {{0},}, hspec_old = {{0},};
11800 
11801             if (target_to_host_itimerspec(&hspec_new, arg3)) {
11802                 return -TARGET_EFAULT;
11803             }
11804             ret = get_errno(
11805                           timer_settime(htimer, arg2, &hspec_new, &hspec_old));
11806             if (arg4 && host_to_target_itimerspec(arg4, &hspec_old)) {
11807                 return -TARGET_EFAULT;
11808             }
11809         }
11810         return ret;
11811     }
11812 #endif
11813 
11814 #ifdef TARGET_NR_timer_gettime
11815     case TARGET_NR_timer_gettime:
11816     {
11817         /* args: timer_t timerid, struct itimerspec *curr_value */
11818         target_timer_t timerid = get_timer_id(arg1);
11819 
11820         if (timerid < 0) {
11821             ret = timerid;
11822         } else if (!arg2) {
11823             ret = -TARGET_EFAULT;
11824         } else {
11825             timer_t htimer = g_posix_timers[timerid];
11826             struct itimerspec hspec;
11827             ret = get_errno(timer_gettime(htimer, &hspec));
11828 
11829             if (host_to_target_itimerspec(arg2, &hspec)) {
11830                 ret = -TARGET_EFAULT;
11831             }
11832         }
11833         return ret;
11834     }
11835 #endif
11836 
11837 #ifdef TARGET_NR_timer_getoverrun
11838     case TARGET_NR_timer_getoverrun:
11839     {
11840         /* args: timer_t timerid */
11841         target_timer_t timerid = get_timer_id(arg1);
11842 
11843         if (timerid < 0) {
11844             ret = timerid;
11845         } else {
11846             timer_t htimer = g_posix_timers[timerid];
11847             ret = get_errno(timer_getoverrun(htimer));
11848         }
11849         fd_trans_unregister(ret);
11850         return ret;
11851     }
11852 #endif
11853 
11854 #ifdef TARGET_NR_timer_delete
11855     case TARGET_NR_timer_delete:
11856     {
11857         /* args: timer_t timerid */
11858         target_timer_t timerid = get_timer_id(arg1);
11859 
11860         if (timerid < 0) {
11861             ret = timerid;
11862         } else {
11863             timer_t htimer = g_posix_timers[timerid];
11864             ret = get_errno(timer_delete(htimer));
11865             g_posix_timers[timerid] = 0;
11866         }
11867         return ret;
11868     }
11869 #endif
11870 
11871 #if defined(TARGET_NR_timerfd_create) && defined(CONFIG_TIMERFD)
11872     case TARGET_NR_timerfd_create:
11873         return get_errno(timerfd_create(arg1,
11874                           target_to_host_bitmask(arg2, fcntl_flags_tbl)));
11875 #endif
11876 
11877 #if defined(TARGET_NR_timerfd_gettime) && defined(CONFIG_TIMERFD)
11878     case TARGET_NR_timerfd_gettime:
11879         {
11880             struct itimerspec its_curr;
11881 
11882             ret = get_errno(timerfd_gettime(arg1, &its_curr));
11883 
11884             if (arg2 && host_to_target_itimerspec(arg2, &its_curr)) {
11885                 return -TARGET_EFAULT;
11886             }
11887         }
11888         return ret;
11889 #endif
11890 
11891 #if defined(TARGET_NR_timerfd_settime) && defined(CONFIG_TIMERFD)
11892     case TARGET_NR_timerfd_settime:
11893         {
11894             struct itimerspec its_new, its_old, *p_new;
11895 
11896             if (arg3) {
11897                 if (target_to_host_itimerspec(&its_new, arg3)) {
11898                     return -TARGET_EFAULT;
11899                 }
11900                 p_new = &its_new;
11901             } else {
11902                 p_new = NULL;
11903             }
11904 
11905             ret = get_errno(timerfd_settime(arg1, arg2, p_new, &its_old));
11906 
11907             if (arg4 && host_to_target_itimerspec(arg4, &its_old)) {
11908                 return -TARGET_EFAULT;
11909             }
11910         }
11911         return ret;
11912 #endif
11913 
11914 #if defined(TARGET_NR_ioprio_get) && defined(__NR_ioprio_get)
11915     case TARGET_NR_ioprio_get:
11916         return get_errno(ioprio_get(arg1, arg2));
11917 #endif
11918 
11919 #if defined(TARGET_NR_ioprio_set) && defined(__NR_ioprio_set)
11920     case TARGET_NR_ioprio_set:
11921         return get_errno(ioprio_set(arg1, arg2, arg3));
11922 #endif
11923 
11924 #if defined(TARGET_NR_setns) && defined(CONFIG_SETNS)
11925     case TARGET_NR_setns:
11926         return get_errno(setns(arg1, arg2));
11927 #endif
11928 #if defined(TARGET_NR_unshare) && defined(CONFIG_SETNS)
11929     case TARGET_NR_unshare:
11930         return get_errno(unshare(arg1));
11931 #endif
11932 #if defined(TARGET_NR_kcmp) && defined(__NR_kcmp)
11933     case TARGET_NR_kcmp:
11934         return get_errno(kcmp(arg1, arg2, arg3, arg4, arg5));
11935 #endif
11936 #ifdef TARGET_NR_swapcontext
11937     case TARGET_NR_swapcontext:
11938         /* PowerPC specific.  */
11939         return do_swapcontext(cpu_env, arg1, arg2, arg3);
11940 #endif
11941 
11942     default:
11943         qemu_log_mask(LOG_UNIMP, "Unsupported syscall: %d\n", num);
11944         return -TARGET_ENOSYS;
11945     }
11946     return ret;
11947 }
11948 
11949 abi_long do_syscall(void *cpu_env, int num, abi_long arg1,
11950                     abi_long arg2, abi_long arg3, abi_long arg4,
11951                     abi_long arg5, abi_long arg6, abi_long arg7,
11952                     abi_long arg8)
11953 {
11954     CPUState *cpu = env_cpu(cpu_env);
11955     abi_long ret;
11956 
11957 #ifdef DEBUG_ERESTARTSYS
11958     /* Debug-only code for exercising the syscall-restart code paths
11959      * in the per-architecture cpu main loops: restart every syscall
11960      * the guest makes once before letting it through.
11961      */
11962     {
11963         static bool flag;
11964         flag = !flag;
11965         if (flag) {
11966             return -TARGET_ERESTARTSYS;
11967         }
11968     }
11969 #endif
11970 
11971     trace_guest_user_syscall(cpu, num, arg1, arg2, arg3, arg4,
11972                              arg5, arg6, arg7, arg8);
11973 
11974     if (unlikely(do_strace)) {
11975         print_syscall(num, arg1, arg2, arg3, arg4, arg5, arg6);
11976         ret = do_syscall1(cpu_env, num, arg1, arg2, arg3, arg4,
11977                           arg5, arg6, arg7, arg8);
11978         print_syscall_ret(num, ret);
11979     } else {
11980         ret = do_syscall1(cpu_env, num, arg1, arg2, arg3, arg4,
11981                           arg5, arg6, arg7, arg8);
11982     }
11983 
11984     trace_guest_user_syscall_ret(cpu, num, ret);
11985     return ret;
11986 }
11987