xref: /openbmc/qemu/linux-user/syscall.c (revision 0fd61a2d)
1 /*
2  *  Linux syscalls
3  *
4  *  Copyright (c) 2003 Fabrice Bellard
5  *
6  *  This program is free software; you can redistribute it and/or modify
7  *  it under the terms of the GNU General Public License as published by
8  *  the Free Software Foundation; either version 2 of the License, or
9  *  (at your option) any later version.
10  *
11  *  This program is distributed in the hope that it will be useful,
12  *  but WITHOUT ANY WARRANTY; without even the implied warranty of
13  *  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
14  *  GNU General Public License for more details.
15  *
16  *  You should have received a copy of the GNU General Public License
17  *  along with this program; if not, see <http://www.gnu.org/licenses/>.
18  */
19 #define _ATFILE_SOURCE
20 #include "qemu/osdep.h"
21 #include "qemu/cutils.h"
22 #include "qemu/path.h"
23 #include "qemu/memfd.h"
24 #include "qemu/queue.h"
25 #include <elf.h>
26 #include <endian.h>
27 #include <grp.h>
28 #include <sys/ipc.h>
29 #include <sys/msg.h>
30 #include <sys/wait.h>
31 #include <sys/mount.h>
32 #include <sys/file.h>
33 #include <sys/fsuid.h>
34 #include <sys/personality.h>
35 #include <sys/prctl.h>
36 #include <sys/resource.h>
37 #include <sys/swap.h>
38 #include <linux/capability.h>
39 #include <sched.h>
40 #include <sys/timex.h>
41 #include <sys/socket.h>
42 #include <linux/sockios.h>
43 #include <sys/un.h>
44 #include <sys/uio.h>
45 #include <poll.h>
46 #include <sys/times.h>
47 #include <sys/shm.h>
48 #include <sys/sem.h>
49 #include <sys/statfs.h>
50 #include <utime.h>
51 #include <sys/sysinfo.h>
52 #include <sys/signalfd.h>
53 //#include <sys/user.h>
54 #include <netinet/ip.h>
55 #include <netinet/tcp.h>
56 #include <linux/wireless.h>
57 #include <linux/icmp.h>
58 #include <linux/icmpv6.h>
59 #include <linux/errqueue.h>
60 #include <linux/random.h>
61 #ifdef CONFIG_TIMERFD
62 #include <sys/timerfd.h>
63 #endif
64 #ifdef CONFIG_EVENTFD
65 #include <sys/eventfd.h>
66 #endif
67 #ifdef CONFIG_EPOLL
68 #include <sys/epoll.h>
69 #endif
70 #ifdef CONFIG_ATTR
71 #include "qemu/xattr.h"
72 #endif
73 #ifdef CONFIG_SENDFILE
74 #include <sys/sendfile.h>
75 #endif
76 
77 #define termios host_termios
78 #define winsize host_winsize
79 #define termio host_termio
80 #define sgttyb host_sgttyb /* same as target */
81 #define tchars host_tchars /* same as target */
82 #define ltchars host_ltchars /* same as target */
83 
84 #include <linux/termios.h>
85 #include <linux/unistd.h>
86 #include <linux/cdrom.h>
87 #include <linux/hdreg.h>
88 #include <linux/soundcard.h>
89 #include <linux/kd.h>
90 #include <linux/mtio.h>
91 #include <linux/fs.h>
92 #include <linux/fd.h>
93 #if defined(CONFIG_FIEMAP)
94 #include <linux/fiemap.h>
95 #endif
96 #include <linux/fb.h>
97 #if defined(CONFIG_USBFS)
98 #include <linux/usbdevice_fs.h>
99 #include <linux/usb/ch9.h>
100 #endif
101 #include <linux/vt.h>
102 #include <linux/dm-ioctl.h>
103 #include <linux/reboot.h>
104 #include <linux/route.h>
105 #include <linux/filter.h>
106 #include <linux/blkpg.h>
107 #include <netpacket/packet.h>
108 #include <linux/netlink.h>
109 #include <linux/if_alg.h>
110 #include "linux_loop.h"
111 #include "uname.h"
112 
113 #include "qemu.h"
114 #include "qemu/guest-random.h"
115 #include "user/syscall-trace.h"
116 #include "qapi/error.h"
117 #include "fd-trans.h"
118 
119 #ifndef CLONE_IO
120 #define CLONE_IO                0x80000000      /* Clone io context */
121 #endif
122 
123 /* We can't directly call the host clone syscall, because this will
124  * badly confuse libc (breaking mutexes, for example). So we must
125  * divide clone flags into:
126  *  * flag combinations that look like pthread_create()
127  *  * flag combinations that look like fork()
128  *  * flags we can implement within QEMU itself
129  *  * flags we can't support and will return an error for
130  */
131 /* For thread creation, all these flags must be present; for
132  * fork, none must be present.
133  */
134 #define CLONE_THREAD_FLAGS                              \
135     (CLONE_VM | CLONE_FS | CLONE_FILES |                \
136      CLONE_SIGHAND | CLONE_THREAD | CLONE_SYSVSEM)
137 
138 /* These flags are ignored:
139  * CLONE_DETACHED is now ignored by the kernel;
140  * CLONE_IO is just an optimisation hint to the I/O scheduler
141  */
142 #define CLONE_IGNORED_FLAGS                     \
143     (CLONE_DETACHED | CLONE_IO)
144 
145 /* Flags for fork which we can implement within QEMU itself */
146 #define CLONE_OPTIONAL_FORK_FLAGS               \
147     (CLONE_SETTLS | CLONE_PARENT_SETTID |       \
148      CLONE_CHILD_CLEARTID | CLONE_CHILD_SETTID)
149 
150 /* Flags for thread creation which we can implement within QEMU itself */
151 #define CLONE_OPTIONAL_THREAD_FLAGS                             \
152     (CLONE_SETTLS | CLONE_PARENT_SETTID |                       \
153      CLONE_CHILD_CLEARTID | CLONE_CHILD_SETTID | CLONE_PARENT)
154 
155 #define CLONE_INVALID_FORK_FLAGS                                        \
156     (~(CSIGNAL | CLONE_OPTIONAL_FORK_FLAGS | CLONE_IGNORED_FLAGS))
157 
158 #define CLONE_INVALID_THREAD_FLAGS                                      \
159     (~(CSIGNAL | CLONE_THREAD_FLAGS | CLONE_OPTIONAL_THREAD_FLAGS |     \
160        CLONE_IGNORED_FLAGS))
161 
162 /* CLONE_VFORK is special cased early in do_fork(). The other flag bits
163  * have almost all been allocated. We cannot support any of
164  * CLONE_NEWNS, CLONE_NEWCGROUP, CLONE_NEWUTS, CLONE_NEWIPC,
165  * CLONE_NEWUSER, CLONE_NEWPID, CLONE_NEWNET, CLONE_PTRACE, CLONE_UNTRACED.
166  * The checks against the invalid thread masks above will catch these.
167  * (The one remaining unallocated bit is 0x1000 which used to be CLONE_PID.)
168  */
169 
170 /* Define DEBUG_ERESTARTSYS to force every syscall to be restarted
171  * once. This exercises the codepaths for restart.
172  */
173 //#define DEBUG_ERESTARTSYS
174 
175 //#include <linux/msdos_fs.h>
176 #define	VFAT_IOCTL_READDIR_BOTH		_IOR('r', 1, struct linux_dirent [2])
177 #define	VFAT_IOCTL_READDIR_SHORT	_IOR('r', 2, struct linux_dirent [2])
178 
179 #undef _syscall0
180 #undef _syscall1
181 #undef _syscall2
182 #undef _syscall3
183 #undef _syscall4
184 #undef _syscall5
185 #undef _syscall6
186 
187 #define _syscall0(type,name)		\
188 static type name (void)			\
189 {					\
190 	return syscall(__NR_##name);	\
191 }
192 
193 #define _syscall1(type,name,type1,arg1)		\
194 static type name (type1 arg1)			\
195 {						\
196 	return syscall(__NR_##name, arg1);	\
197 }
198 
199 #define _syscall2(type,name,type1,arg1,type2,arg2)	\
200 static type name (type1 arg1,type2 arg2)		\
201 {							\
202 	return syscall(__NR_##name, arg1, arg2);	\
203 }
204 
205 #define _syscall3(type,name,type1,arg1,type2,arg2,type3,arg3)	\
206 static type name (type1 arg1,type2 arg2,type3 arg3)		\
207 {								\
208 	return syscall(__NR_##name, arg1, arg2, arg3);		\
209 }
210 
211 #define _syscall4(type,name,type1,arg1,type2,arg2,type3,arg3,type4,arg4)	\
212 static type name (type1 arg1,type2 arg2,type3 arg3,type4 arg4)			\
213 {										\
214 	return syscall(__NR_##name, arg1, arg2, arg3, arg4);			\
215 }
216 
217 #define _syscall5(type,name,type1,arg1,type2,arg2,type3,arg3,type4,arg4,	\
218 		  type5,arg5)							\
219 static type name (type1 arg1,type2 arg2,type3 arg3,type4 arg4,type5 arg5)	\
220 {										\
221 	return syscall(__NR_##name, arg1, arg2, arg3, arg4, arg5);		\
222 }
223 
224 
225 #define _syscall6(type,name,type1,arg1,type2,arg2,type3,arg3,type4,arg4,	\
226 		  type5,arg5,type6,arg6)					\
227 static type name (type1 arg1,type2 arg2,type3 arg3,type4 arg4,type5 arg5,	\
228                   type6 arg6)							\
229 {										\
230 	return syscall(__NR_##name, arg1, arg2, arg3, arg4, arg5, arg6);	\
231 }
232 
233 
234 #define __NR_sys_uname __NR_uname
235 #define __NR_sys_getcwd1 __NR_getcwd
236 #define __NR_sys_getdents __NR_getdents
237 #define __NR_sys_getdents64 __NR_getdents64
238 #define __NR_sys_getpriority __NR_getpriority
239 #define __NR_sys_rt_sigqueueinfo __NR_rt_sigqueueinfo
240 #define __NR_sys_rt_tgsigqueueinfo __NR_rt_tgsigqueueinfo
241 #define __NR_sys_syslog __NR_syslog
242 #define __NR_sys_futex __NR_futex
243 #define __NR_sys_inotify_init __NR_inotify_init
244 #define __NR_sys_inotify_add_watch __NR_inotify_add_watch
245 #define __NR_sys_inotify_rm_watch __NR_inotify_rm_watch
246 #define __NR_sys_statx __NR_statx
247 
248 #if defined(__alpha__) || defined(__x86_64__) || defined(__s390x__)
249 #define __NR__llseek __NR_lseek
250 #endif
251 
252 /* Newer kernel ports have llseek() instead of _llseek() */
253 #if defined(TARGET_NR_llseek) && !defined(TARGET_NR__llseek)
254 #define TARGET_NR__llseek TARGET_NR_llseek
255 #endif
256 
257 #define __NR_sys_gettid __NR_gettid
258 _syscall0(int, sys_gettid)
259 
260 /* For the 64-bit guest on 32-bit host case we must emulate
261  * getdents using getdents64, because otherwise the host
262  * might hand us back more dirent records than we can fit
263  * into the guest buffer after structure format conversion.
264  * Otherwise we emulate getdents with getdents if the host has it.
265  */
266 #if defined(__NR_getdents) && HOST_LONG_BITS >= TARGET_ABI_BITS
267 #define EMULATE_GETDENTS_WITH_GETDENTS
268 #endif
269 
270 #if defined(TARGET_NR_getdents) && defined(EMULATE_GETDENTS_WITH_GETDENTS)
271 _syscall3(int, sys_getdents, uint, fd, struct linux_dirent *, dirp, uint, count);
272 #endif
273 #if (defined(TARGET_NR_getdents) && \
274       !defined(EMULATE_GETDENTS_WITH_GETDENTS)) || \
275     (defined(TARGET_NR_getdents64) && defined(__NR_getdents64))
276 _syscall3(int, sys_getdents64, uint, fd, struct linux_dirent64 *, dirp, uint, count);
277 #endif
278 #if defined(TARGET_NR__llseek) && defined(__NR_llseek)
279 _syscall5(int, _llseek,  uint,  fd, ulong, hi, ulong, lo,
280           loff_t *, res, uint, wh);
281 #endif
282 _syscall3(int, sys_rt_sigqueueinfo, pid_t, pid, int, sig, siginfo_t *, uinfo)
283 _syscall4(int, sys_rt_tgsigqueueinfo, pid_t, pid, pid_t, tid, int, sig,
284           siginfo_t *, uinfo)
285 _syscall3(int,sys_syslog,int,type,char*,bufp,int,len)
286 #ifdef __NR_exit_group
287 _syscall1(int,exit_group,int,error_code)
288 #endif
289 #if defined(TARGET_NR_set_tid_address) && defined(__NR_set_tid_address)
290 _syscall1(int,set_tid_address,int *,tidptr)
291 #endif
292 #if defined(TARGET_NR_futex) && defined(__NR_futex)
293 _syscall6(int,sys_futex,int *,uaddr,int,op,int,val,
294           const struct timespec *,timeout,int *,uaddr2,int,val3)
295 #endif
296 #define __NR_sys_sched_getaffinity __NR_sched_getaffinity
297 _syscall3(int, sys_sched_getaffinity, pid_t, pid, unsigned int, len,
298           unsigned long *, user_mask_ptr);
299 #define __NR_sys_sched_setaffinity __NR_sched_setaffinity
300 _syscall3(int, sys_sched_setaffinity, pid_t, pid, unsigned int, len,
301           unsigned long *, user_mask_ptr);
302 #define __NR_sys_getcpu __NR_getcpu
303 _syscall3(int, sys_getcpu, unsigned *, cpu, unsigned *, node, void *, tcache);
304 _syscall4(int, reboot, int, magic1, int, magic2, unsigned int, cmd,
305           void *, arg);
306 _syscall2(int, capget, struct __user_cap_header_struct *, header,
307           struct __user_cap_data_struct *, data);
308 _syscall2(int, capset, struct __user_cap_header_struct *, header,
309           struct __user_cap_data_struct *, data);
310 #if defined(TARGET_NR_ioprio_get) && defined(__NR_ioprio_get)
311 _syscall2(int, ioprio_get, int, which, int, who)
312 #endif
313 #if defined(TARGET_NR_ioprio_set) && defined(__NR_ioprio_set)
314 _syscall3(int, ioprio_set, int, which, int, who, int, ioprio)
315 #endif
316 #if defined(TARGET_NR_getrandom) && defined(__NR_getrandom)
317 _syscall3(int, getrandom, void *, buf, size_t, buflen, unsigned int, flags)
318 #endif
319 
320 #if defined(TARGET_NR_kcmp) && defined(__NR_kcmp)
321 _syscall5(int, kcmp, pid_t, pid1, pid_t, pid2, int, type,
322           unsigned long, idx1, unsigned long, idx2)
323 #endif
324 
325 /*
326  * It is assumed that struct statx is architecture independent.
327  */
328 #if defined(TARGET_NR_statx) && defined(__NR_statx)
329 _syscall5(int, sys_statx, int, dirfd, const char *, pathname, int, flags,
330           unsigned int, mask, struct target_statx *, statxbuf)
331 #endif
332 
333 static bitmask_transtbl fcntl_flags_tbl[] = {
334   { TARGET_O_ACCMODE,   TARGET_O_WRONLY,    O_ACCMODE,   O_WRONLY,    },
335   { TARGET_O_ACCMODE,   TARGET_O_RDWR,      O_ACCMODE,   O_RDWR,      },
336   { TARGET_O_CREAT,     TARGET_O_CREAT,     O_CREAT,     O_CREAT,     },
337   { TARGET_O_EXCL,      TARGET_O_EXCL,      O_EXCL,      O_EXCL,      },
338   { TARGET_O_NOCTTY,    TARGET_O_NOCTTY,    O_NOCTTY,    O_NOCTTY,    },
339   { TARGET_O_TRUNC,     TARGET_O_TRUNC,     O_TRUNC,     O_TRUNC,     },
340   { TARGET_O_APPEND,    TARGET_O_APPEND,    O_APPEND,    O_APPEND,    },
341   { TARGET_O_NONBLOCK,  TARGET_O_NONBLOCK,  O_NONBLOCK,  O_NONBLOCK,  },
342   { TARGET_O_SYNC,      TARGET_O_DSYNC,     O_SYNC,      O_DSYNC,     },
343   { TARGET_O_SYNC,      TARGET_O_SYNC,      O_SYNC,      O_SYNC,      },
344   { TARGET_FASYNC,      TARGET_FASYNC,      FASYNC,      FASYNC,      },
345   { TARGET_O_DIRECTORY, TARGET_O_DIRECTORY, O_DIRECTORY, O_DIRECTORY, },
346   { TARGET_O_NOFOLLOW,  TARGET_O_NOFOLLOW,  O_NOFOLLOW,  O_NOFOLLOW,  },
347 #if defined(O_DIRECT)
348   { TARGET_O_DIRECT,    TARGET_O_DIRECT,    O_DIRECT,    O_DIRECT,    },
349 #endif
350 #if defined(O_NOATIME)
351   { TARGET_O_NOATIME,   TARGET_O_NOATIME,   O_NOATIME,   O_NOATIME    },
352 #endif
353 #if defined(O_CLOEXEC)
354   { TARGET_O_CLOEXEC,   TARGET_O_CLOEXEC,   O_CLOEXEC,   O_CLOEXEC    },
355 #endif
356 #if defined(O_PATH)
357   { TARGET_O_PATH,      TARGET_O_PATH,      O_PATH,      O_PATH       },
358 #endif
359 #if defined(O_TMPFILE)
360   { TARGET_O_TMPFILE,   TARGET_O_TMPFILE,   O_TMPFILE,   O_TMPFILE    },
361 #endif
362   /* Don't terminate the list prematurely on 64-bit host+guest.  */
363 #if TARGET_O_LARGEFILE != 0 || O_LARGEFILE != 0
364   { TARGET_O_LARGEFILE, TARGET_O_LARGEFILE, O_LARGEFILE, O_LARGEFILE, },
365 #endif
366   { 0, 0, 0, 0 }
367 };
368 
369 static int sys_getcwd1(char *buf, size_t size)
370 {
371   if (getcwd(buf, size) == NULL) {
372       /* getcwd() sets errno */
373       return (-1);
374   }
375   return strlen(buf)+1;
376 }
377 
378 #ifdef TARGET_NR_utimensat
379 #if defined(__NR_utimensat)
380 #define __NR_sys_utimensat __NR_utimensat
381 _syscall4(int,sys_utimensat,int,dirfd,const char *,pathname,
382           const struct timespec *,tsp,int,flags)
383 #else
384 static int sys_utimensat(int dirfd, const char *pathname,
385                          const struct timespec times[2], int flags)
386 {
387     errno = ENOSYS;
388     return -1;
389 }
390 #endif
391 #endif /* TARGET_NR_utimensat */
392 
393 #ifdef TARGET_NR_renameat2
394 #if defined(__NR_renameat2)
395 #define __NR_sys_renameat2 __NR_renameat2
396 _syscall5(int, sys_renameat2, int, oldfd, const char *, old, int, newfd,
397           const char *, new, unsigned int, flags)
398 #else
399 static int sys_renameat2(int oldfd, const char *old,
400                          int newfd, const char *new, int flags)
401 {
402     if (flags == 0) {
403         return renameat(oldfd, old, newfd, new);
404     }
405     errno = ENOSYS;
406     return -1;
407 }
408 #endif
409 #endif /* TARGET_NR_renameat2 */
410 
411 #ifdef CONFIG_INOTIFY
412 #include <sys/inotify.h>
413 
414 #if defined(TARGET_NR_inotify_init) && defined(__NR_inotify_init)
415 static int sys_inotify_init(void)
416 {
417   return (inotify_init());
418 }
419 #endif
420 #if defined(TARGET_NR_inotify_add_watch) && defined(__NR_inotify_add_watch)
421 static int sys_inotify_add_watch(int fd,const char *pathname, int32_t mask)
422 {
423   return (inotify_add_watch(fd, pathname, mask));
424 }
425 #endif
426 #if defined(TARGET_NR_inotify_rm_watch) && defined(__NR_inotify_rm_watch)
427 static int sys_inotify_rm_watch(int fd, int32_t wd)
428 {
429   return (inotify_rm_watch(fd, wd));
430 }
431 #endif
432 #ifdef CONFIG_INOTIFY1
433 #if defined(TARGET_NR_inotify_init1) && defined(__NR_inotify_init1)
434 static int sys_inotify_init1(int flags)
435 {
436   return (inotify_init1(flags));
437 }
438 #endif
439 #endif
440 #else
441 /* Userspace can usually survive runtime without inotify */
442 #undef TARGET_NR_inotify_init
443 #undef TARGET_NR_inotify_init1
444 #undef TARGET_NR_inotify_add_watch
445 #undef TARGET_NR_inotify_rm_watch
446 #endif /* CONFIG_INOTIFY  */
447 
448 #if defined(TARGET_NR_prlimit64)
449 #ifndef __NR_prlimit64
450 # define __NR_prlimit64 -1
451 #endif
452 #define __NR_sys_prlimit64 __NR_prlimit64
453 /* The glibc rlimit structure may not be that used by the underlying syscall */
454 struct host_rlimit64 {
455     uint64_t rlim_cur;
456     uint64_t rlim_max;
457 };
458 _syscall4(int, sys_prlimit64, pid_t, pid, int, resource,
459           const struct host_rlimit64 *, new_limit,
460           struct host_rlimit64 *, old_limit)
461 #endif
462 
463 
464 #if defined(TARGET_NR_timer_create)
465 /* Maxiumum of 32 active POSIX timers allowed at any one time. */
466 static timer_t g_posix_timers[32] = { 0, } ;
467 
468 static inline int next_free_host_timer(void)
469 {
470     int k ;
471     /* FIXME: Does finding the next free slot require a lock? */
472     for (k = 0; k < ARRAY_SIZE(g_posix_timers); k++) {
473         if (g_posix_timers[k] == 0) {
474             g_posix_timers[k] = (timer_t) 1;
475             return k;
476         }
477     }
478     return -1;
479 }
480 #endif
481 
482 /* ARM EABI and MIPS expect 64bit types aligned even on pairs or registers */
483 #ifdef TARGET_ARM
484 static inline int regpairs_aligned(void *cpu_env, int num)
485 {
486     return ((((CPUARMState *)cpu_env)->eabi) == 1) ;
487 }
488 #elif defined(TARGET_MIPS) && (TARGET_ABI_BITS == 32)
489 static inline int regpairs_aligned(void *cpu_env, int num) { return 1; }
490 #elif defined(TARGET_PPC) && !defined(TARGET_PPC64)
491 /* SysV AVI for PPC32 expects 64bit parameters to be passed on odd/even pairs
492  * of registers which translates to the same as ARM/MIPS, because we start with
493  * r3 as arg1 */
494 static inline int regpairs_aligned(void *cpu_env, int num) { return 1; }
495 #elif defined(TARGET_SH4)
496 /* SH4 doesn't align register pairs, except for p{read,write}64 */
497 static inline int regpairs_aligned(void *cpu_env, int num)
498 {
499     switch (num) {
500     case TARGET_NR_pread64:
501     case TARGET_NR_pwrite64:
502         return 1;
503 
504     default:
505         return 0;
506     }
507 }
508 #elif defined(TARGET_XTENSA)
509 static inline int regpairs_aligned(void *cpu_env, int num) { return 1; }
510 #else
511 static inline int regpairs_aligned(void *cpu_env, int num) { return 0; }
512 #endif
513 
514 #define ERRNO_TABLE_SIZE 1200
515 
516 /* target_to_host_errno_table[] is initialized from
517  * host_to_target_errno_table[] in syscall_init(). */
518 static uint16_t target_to_host_errno_table[ERRNO_TABLE_SIZE] = {
519 };
520 
521 /*
522  * This list is the union of errno values overridden in asm-<arch>/errno.h
523  * minus the errnos that are not actually generic to all archs.
524  */
525 static uint16_t host_to_target_errno_table[ERRNO_TABLE_SIZE] = {
526     [EAGAIN]		= TARGET_EAGAIN,
527     [EIDRM]		= TARGET_EIDRM,
528     [ECHRNG]		= TARGET_ECHRNG,
529     [EL2NSYNC]		= TARGET_EL2NSYNC,
530     [EL3HLT]		= TARGET_EL3HLT,
531     [EL3RST]		= TARGET_EL3RST,
532     [ELNRNG]		= TARGET_ELNRNG,
533     [EUNATCH]		= TARGET_EUNATCH,
534     [ENOCSI]		= TARGET_ENOCSI,
535     [EL2HLT]		= TARGET_EL2HLT,
536     [EDEADLK]		= TARGET_EDEADLK,
537     [ENOLCK]		= TARGET_ENOLCK,
538     [EBADE]		= TARGET_EBADE,
539     [EBADR]		= TARGET_EBADR,
540     [EXFULL]		= TARGET_EXFULL,
541     [ENOANO]		= TARGET_ENOANO,
542     [EBADRQC]		= TARGET_EBADRQC,
543     [EBADSLT]		= TARGET_EBADSLT,
544     [EBFONT]		= TARGET_EBFONT,
545     [ENOSTR]		= TARGET_ENOSTR,
546     [ENODATA]		= TARGET_ENODATA,
547     [ETIME]		= TARGET_ETIME,
548     [ENOSR]		= TARGET_ENOSR,
549     [ENONET]		= TARGET_ENONET,
550     [ENOPKG]		= TARGET_ENOPKG,
551     [EREMOTE]		= TARGET_EREMOTE,
552     [ENOLINK]		= TARGET_ENOLINK,
553     [EADV]		= TARGET_EADV,
554     [ESRMNT]		= TARGET_ESRMNT,
555     [ECOMM]		= TARGET_ECOMM,
556     [EPROTO]		= TARGET_EPROTO,
557     [EDOTDOT]		= TARGET_EDOTDOT,
558     [EMULTIHOP]		= TARGET_EMULTIHOP,
559     [EBADMSG]		= TARGET_EBADMSG,
560     [ENAMETOOLONG]	= TARGET_ENAMETOOLONG,
561     [EOVERFLOW]		= TARGET_EOVERFLOW,
562     [ENOTUNIQ]		= TARGET_ENOTUNIQ,
563     [EBADFD]		= TARGET_EBADFD,
564     [EREMCHG]		= TARGET_EREMCHG,
565     [ELIBACC]		= TARGET_ELIBACC,
566     [ELIBBAD]		= TARGET_ELIBBAD,
567     [ELIBSCN]		= TARGET_ELIBSCN,
568     [ELIBMAX]		= TARGET_ELIBMAX,
569     [ELIBEXEC]		= TARGET_ELIBEXEC,
570     [EILSEQ]		= TARGET_EILSEQ,
571     [ENOSYS]		= TARGET_ENOSYS,
572     [ELOOP]		= TARGET_ELOOP,
573     [ERESTART]		= TARGET_ERESTART,
574     [ESTRPIPE]		= TARGET_ESTRPIPE,
575     [ENOTEMPTY]		= TARGET_ENOTEMPTY,
576     [EUSERS]		= TARGET_EUSERS,
577     [ENOTSOCK]		= TARGET_ENOTSOCK,
578     [EDESTADDRREQ]	= TARGET_EDESTADDRREQ,
579     [EMSGSIZE]		= TARGET_EMSGSIZE,
580     [EPROTOTYPE]	= TARGET_EPROTOTYPE,
581     [ENOPROTOOPT]	= TARGET_ENOPROTOOPT,
582     [EPROTONOSUPPORT]	= TARGET_EPROTONOSUPPORT,
583     [ESOCKTNOSUPPORT]	= TARGET_ESOCKTNOSUPPORT,
584     [EOPNOTSUPP]	= TARGET_EOPNOTSUPP,
585     [EPFNOSUPPORT]	= TARGET_EPFNOSUPPORT,
586     [EAFNOSUPPORT]	= TARGET_EAFNOSUPPORT,
587     [EADDRINUSE]	= TARGET_EADDRINUSE,
588     [EADDRNOTAVAIL]	= TARGET_EADDRNOTAVAIL,
589     [ENETDOWN]		= TARGET_ENETDOWN,
590     [ENETUNREACH]	= TARGET_ENETUNREACH,
591     [ENETRESET]		= TARGET_ENETRESET,
592     [ECONNABORTED]	= TARGET_ECONNABORTED,
593     [ECONNRESET]	= TARGET_ECONNRESET,
594     [ENOBUFS]		= TARGET_ENOBUFS,
595     [EISCONN]		= TARGET_EISCONN,
596     [ENOTCONN]		= TARGET_ENOTCONN,
597     [EUCLEAN]		= TARGET_EUCLEAN,
598     [ENOTNAM]		= TARGET_ENOTNAM,
599     [ENAVAIL]		= TARGET_ENAVAIL,
600     [EISNAM]		= TARGET_EISNAM,
601     [EREMOTEIO]		= TARGET_EREMOTEIO,
602     [EDQUOT]            = TARGET_EDQUOT,
603     [ESHUTDOWN]		= TARGET_ESHUTDOWN,
604     [ETOOMANYREFS]	= TARGET_ETOOMANYREFS,
605     [ETIMEDOUT]		= TARGET_ETIMEDOUT,
606     [ECONNREFUSED]	= TARGET_ECONNREFUSED,
607     [EHOSTDOWN]		= TARGET_EHOSTDOWN,
608     [EHOSTUNREACH]	= TARGET_EHOSTUNREACH,
609     [EALREADY]		= TARGET_EALREADY,
610     [EINPROGRESS]	= TARGET_EINPROGRESS,
611     [ESTALE]		= TARGET_ESTALE,
612     [ECANCELED]		= TARGET_ECANCELED,
613     [ENOMEDIUM]		= TARGET_ENOMEDIUM,
614     [EMEDIUMTYPE]	= TARGET_EMEDIUMTYPE,
615 #ifdef ENOKEY
616     [ENOKEY]		= TARGET_ENOKEY,
617 #endif
618 #ifdef EKEYEXPIRED
619     [EKEYEXPIRED]	= TARGET_EKEYEXPIRED,
620 #endif
621 #ifdef EKEYREVOKED
622     [EKEYREVOKED]	= TARGET_EKEYREVOKED,
623 #endif
624 #ifdef EKEYREJECTED
625     [EKEYREJECTED]	= TARGET_EKEYREJECTED,
626 #endif
627 #ifdef EOWNERDEAD
628     [EOWNERDEAD]	= TARGET_EOWNERDEAD,
629 #endif
630 #ifdef ENOTRECOVERABLE
631     [ENOTRECOVERABLE]	= TARGET_ENOTRECOVERABLE,
632 #endif
633 #ifdef ENOMSG
634     [ENOMSG]            = TARGET_ENOMSG,
635 #endif
636 #ifdef ERKFILL
637     [ERFKILL]           = TARGET_ERFKILL,
638 #endif
639 #ifdef EHWPOISON
640     [EHWPOISON]         = TARGET_EHWPOISON,
641 #endif
642 };
643 
644 static inline int host_to_target_errno(int err)
645 {
646     if (err >= 0 && err < ERRNO_TABLE_SIZE &&
647         host_to_target_errno_table[err]) {
648         return host_to_target_errno_table[err];
649     }
650     return err;
651 }
652 
653 static inline int target_to_host_errno(int err)
654 {
655     if (err >= 0 && err < ERRNO_TABLE_SIZE &&
656         target_to_host_errno_table[err]) {
657         return target_to_host_errno_table[err];
658     }
659     return err;
660 }
661 
662 static inline abi_long get_errno(abi_long ret)
663 {
664     if (ret == -1)
665         return -host_to_target_errno(errno);
666     else
667         return ret;
668 }
669 
670 const char *target_strerror(int err)
671 {
672     if (err == TARGET_ERESTARTSYS) {
673         return "To be restarted";
674     }
675     if (err == TARGET_QEMU_ESIGRETURN) {
676         return "Successful exit from sigreturn";
677     }
678 
679     if ((err >= ERRNO_TABLE_SIZE) || (err < 0)) {
680         return NULL;
681     }
682     return strerror(target_to_host_errno(err));
683 }
684 
685 #define safe_syscall0(type, name) \
686 static type safe_##name(void) \
687 { \
688     return safe_syscall(__NR_##name); \
689 }
690 
691 #define safe_syscall1(type, name, type1, arg1) \
692 static type safe_##name(type1 arg1) \
693 { \
694     return safe_syscall(__NR_##name, arg1); \
695 }
696 
697 #define safe_syscall2(type, name, type1, arg1, type2, arg2) \
698 static type safe_##name(type1 arg1, type2 arg2) \
699 { \
700     return safe_syscall(__NR_##name, arg1, arg2); \
701 }
702 
703 #define safe_syscall3(type, name, type1, arg1, type2, arg2, type3, arg3) \
704 static type safe_##name(type1 arg1, type2 arg2, type3 arg3) \
705 { \
706     return safe_syscall(__NR_##name, arg1, arg2, arg3); \
707 }
708 
709 #define safe_syscall4(type, name, type1, arg1, type2, arg2, type3, arg3, \
710     type4, arg4) \
711 static type safe_##name(type1 arg1, type2 arg2, type3 arg3, type4 arg4) \
712 { \
713     return safe_syscall(__NR_##name, arg1, arg2, arg3, arg4); \
714 }
715 
716 #define safe_syscall5(type, name, type1, arg1, type2, arg2, type3, arg3, \
717     type4, arg4, type5, arg5) \
718 static type safe_##name(type1 arg1, type2 arg2, type3 arg3, type4 arg4, \
719     type5 arg5) \
720 { \
721     return safe_syscall(__NR_##name, arg1, arg2, arg3, arg4, arg5); \
722 }
723 
724 #define safe_syscall6(type, name, type1, arg1, type2, arg2, type3, arg3, \
725     type4, arg4, type5, arg5, type6, arg6) \
726 static type safe_##name(type1 arg1, type2 arg2, type3 arg3, type4 arg4, \
727     type5 arg5, type6 arg6) \
728 { \
729     return safe_syscall(__NR_##name, arg1, arg2, arg3, arg4, arg5, arg6); \
730 }
731 
732 safe_syscall3(ssize_t, read, int, fd, void *, buff, size_t, count)
733 safe_syscall3(ssize_t, write, int, fd, const void *, buff, size_t, count)
734 safe_syscall4(int, openat, int, dirfd, const char *, pathname, \
735               int, flags, mode_t, mode)
736 safe_syscall4(pid_t, wait4, pid_t, pid, int *, status, int, options, \
737               struct rusage *, rusage)
738 safe_syscall5(int, waitid, idtype_t, idtype, id_t, id, siginfo_t *, infop, \
739               int, options, struct rusage *, rusage)
740 safe_syscall3(int, execve, const char *, filename, char **, argv, char **, envp)
741 safe_syscall6(int, pselect6, int, nfds, fd_set *, readfds, fd_set *, writefds, \
742               fd_set *, exceptfds, struct timespec *, timeout, void *, sig)
743 safe_syscall5(int, ppoll, struct pollfd *, ufds, unsigned int, nfds,
744               struct timespec *, tsp, const sigset_t *, sigmask,
745               size_t, sigsetsize)
746 safe_syscall6(int, epoll_pwait, int, epfd, struct epoll_event *, events,
747               int, maxevents, int, timeout, const sigset_t *, sigmask,
748               size_t, sigsetsize)
749 safe_syscall6(int,futex,int *,uaddr,int,op,int,val, \
750               const struct timespec *,timeout,int *,uaddr2,int,val3)
751 safe_syscall2(int, rt_sigsuspend, sigset_t *, newset, size_t, sigsetsize)
752 safe_syscall2(int, kill, pid_t, pid, int, sig)
753 safe_syscall2(int, tkill, int, tid, int, sig)
754 safe_syscall3(int, tgkill, int, tgid, int, pid, int, sig)
755 safe_syscall3(ssize_t, readv, int, fd, const struct iovec *, iov, int, iovcnt)
756 safe_syscall3(ssize_t, writev, int, fd, const struct iovec *, iov, int, iovcnt)
757 safe_syscall5(ssize_t, preadv, int, fd, const struct iovec *, iov, int, iovcnt,
758               unsigned long, pos_l, unsigned long, pos_h)
759 safe_syscall5(ssize_t, pwritev, int, fd, const struct iovec *, iov, int, iovcnt,
760               unsigned long, pos_l, unsigned long, pos_h)
761 safe_syscall3(int, connect, int, fd, const struct sockaddr *, addr,
762               socklen_t, addrlen)
763 safe_syscall6(ssize_t, sendto, int, fd, const void *, buf, size_t, len,
764               int, flags, const struct sockaddr *, addr, socklen_t, addrlen)
765 safe_syscall6(ssize_t, recvfrom, int, fd, void *, buf, size_t, len,
766               int, flags, struct sockaddr *, addr, socklen_t *, addrlen)
767 safe_syscall3(ssize_t, sendmsg, int, fd, const struct msghdr *, msg, int, flags)
768 safe_syscall3(ssize_t, recvmsg, int, fd, struct msghdr *, msg, int, flags)
769 safe_syscall2(int, flock, int, fd, int, operation)
770 safe_syscall4(int, rt_sigtimedwait, const sigset_t *, these, siginfo_t *, uinfo,
771               const struct timespec *, uts, size_t, sigsetsize)
772 safe_syscall4(int, accept4, int, fd, struct sockaddr *, addr, socklen_t *, len,
773               int, flags)
774 safe_syscall2(int, nanosleep, const struct timespec *, req,
775               struct timespec *, rem)
776 #ifdef TARGET_NR_clock_nanosleep
777 safe_syscall4(int, clock_nanosleep, const clockid_t, clock, int, flags,
778               const struct timespec *, req, struct timespec *, rem)
779 #endif
780 #ifdef __NR_ipc
781 safe_syscall6(int, ipc, int, call, long, first, long, second, long, third,
782               void *, ptr, long, fifth)
783 #endif
784 #ifdef __NR_msgsnd
785 safe_syscall4(int, msgsnd, int, msgid, const void *, msgp, size_t, sz,
786               int, flags)
787 #endif
788 #ifdef __NR_msgrcv
789 safe_syscall5(int, msgrcv, int, msgid, void *, msgp, size_t, sz,
790               long, msgtype, int, flags)
791 #endif
792 #ifdef __NR_semtimedop
793 safe_syscall4(int, semtimedop, int, semid, struct sembuf *, tsops,
794               unsigned, nsops, const struct timespec *, timeout)
795 #endif
796 #if defined(TARGET_NR_mq_open) && defined(__NR_mq_open)
797 safe_syscall5(int, mq_timedsend, int, mqdes, const char *, msg_ptr,
798               size_t, len, unsigned, prio, const struct timespec *, timeout)
799 safe_syscall5(int, mq_timedreceive, int, mqdes, char *, msg_ptr,
800               size_t, len, unsigned *, prio, const struct timespec *, timeout)
801 #endif
802 /* We do ioctl like this rather than via safe_syscall3 to preserve the
803  * "third argument might be integer or pointer or not present" behaviour of
804  * the libc function.
805  */
806 #define safe_ioctl(...) safe_syscall(__NR_ioctl, __VA_ARGS__)
807 /* Similarly for fcntl. Note that callers must always:
808  *  pass the F_GETLK64 etc constants rather than the unsuffixed F_GETLK
809  *  use the flock64 struct rather than unsuffixed flock
810  * This will then work and use a 64-bit offset for both 32-bit and 64-bit hosts.
811  */
812 #ifdef __NR_fcntl64
813 #define safe_fcntl(...) safe_syscall(__NR_fcntl64, __VA_ARGS__)
814 #else
815 #define safe_fcntl(...) safe_syscall(__NR_fcntl, __VA_ARGS__)
816 #endif
817 
818 static inline int host_to_target_sock_type(int host_type)
819 {
820     int target_type;
821 
822     switch (host_type & 0xf /* SOCK_TYPE_MASK */) {
823     case SOCK_DGRAM:
824         target_type = TARGET_SOCK_DGRAM;
825         break;
826     case SOCK_STREAM:
827         target_type = TARGET_SOCK_STREAM;
828         break;
829     default:
830         target_type = host_type & 0xf /* SOCK_TYPE_MASK */;
831         break;
832     }
833 
834 #if defined(SOCK_CLOEXEC)
835     if (host_type & SOCK_CLOEXEC) {
836         target_type |= TARGET_SOCK_CLOEXEC;
837     }
838 #endif
839 
840 #if defined(SOCK_NONBLOCK)
841     if (host_type & SOCK_NONBLOCK) {
842         target_type |= TARGET_SOCK_NONBLOCK;
843     }
844 #endif
845 
846     return target_type;
847 }
848 
849 static abi_ulong target_brk;
850 static abi_ulong target_original_brk;
851 static abi_ulong brk_page;
852 
853 void target_set_brk(abi_ulong new_brk)
854 {
855     target_original_brk = target_brk = HOST_PAGE_ALIGN(new_brk);
856     brk_page = HOST_PAGE_ALIGN(target_brk);
857 }
858 
859 //#define DEBUGF_BRK(message, args...) do { fprintf(stderr, (message), ## args); } while (0)
860 #define DEBUGF_BRK(message, args...)
861 
862 /* do_brk() must return target values and target errnos. */
863 abi_long do_brk(abi_ulong new_brk)
864 {
865     abi_long mapped_addr;
866     abi_ulong new_alloc_size;
867 
868     DEBUGF_BRK("do_brk(" TARGET_ABI_FMT_lx ") -> ", new_brk);
869 
870     if (!new_brk) {
871         DEBUGF_BRK(TARGET_ABI_FMT_lx " (!new_brk)\n", target_brk);
872         return target_brk;
873     }
874     if (new_brk < target_original_brk) {
875         DEBUGF_BRK(TARGET_ABI_FMT_lx " (new_brk < target_original_brk)\n",
876                    target_brk);
877         return target_brk;
878     }
879 
880     /* If the new brk is less than the highest page reserved to the
881      * target heap allocation, set it and we're almost done...  */
882     if (new_brk <= brk_page) {
883         /* Heap contents are initialized to zero, as for anonymous
884          * mapped pages.  */
885         if (new_brk > target_brk) {
886             memset(g2h(target_brk), 0, new_brk - target_brk);
887         }
888 	target_brk = new_brk;
889         DEBUGF_BRK(TARGET_ABI_FMT_lx " (new_brk <= brk_page)\n", target_brk);
890 	return target_brk;
891     }
892 
893     /* We need to allocate more memory after the brk... Note that
894      * we don't use MAP_FIXED because that will map over the top of
895      * any existing mapping (like the one with the host libc or qemu
896      * itself); instead we treat "mapped but at wrong address" as
897      * a failure and unmap again.
898      */
899     new_alloc_size = HOST_PAGE_ALIGN(new_brk - brk_page);
900     mapped_addr = get_errno(target_mmap(brk_page, new_alloc_size,
901                                         PROT_READ|PROT_WRITE,
902                                         MAP_ANON|MAP_PRIVATE, 0, 0));
903 
904     if (mapped_addr == brk_page) {
905         /* Heap contents are initialized to zero, as for anonymous
906          * mapped pages.  Technically the new pages are already
907          * initialized to zero since they *are* anonymous mapped
908          * pages, however we have to take care with the contents that
909          * come from the remaining part of the previous page: it may
910          * contains garbage data due to a previous heap usage (grown
911          * then shrunken).  */
912         memset(g2h(target_brk), 0, brk_page - target_brk);
913 
914         target_brk = new_brk;
915         brk_page = HOST_PAGE_ALIGN(target_brk);
916         DEBUGF_BRK(TARGET_ABI_FMT_lx " (mapped_addr == brk_page)\n",
917             target_brk);
918         return target_brk;
919     } else if (mapped_addr != -1) {
920         /* Mapped but at wrong address, meaning there wasn't actually
921          * enough space for this brk.
922          */
923         target_munmap(mapped_addr, new_alloc_size);
924         mapped_addr = -1;
925         DEBUGF_BRK(TARGET_ABI_FMT_lx " (mapped_addr != -1)\n", target_brk);
926     }
927     else {
928         DEBUGF_BRK(TARGET_ABI_FMT_lx " (otherwise)\n", target_brk);
929     }
930 
931 #if defined(TARGET_ALPHA)
932     /* We (partially) emulate OSF/1 on Alpha, which requires we
933        return a proper errno, not an unchanged brk value.  */
934     return -TARGET_ENOMEM;
935 #endif
936     /* For everything else, return the previous break. */
937     return target_brk;
938 }
939 
940 static inline abi_long copy_from_user_fdset(fd_set *fds,
941                                             abi_ulong target_fds_addr,
942                                             int n)
943 {
944     int i, nw, j, k;
945     abi_ulong b, *target_fds;
946 
947     nw = DIV_ROUND_UP(n, TARGET_ABI_BITS);
948     if (!(target_fds = lock_user(VERIFY_READ,
949                                  target_fds_addr,
950                                  sizeof(abi_ulong) * nw,
951                                  1)))
952         return -TARGET_EFAULT;
953 
954     FD_ZERO(fds);
955     k = 0;
956     for (i = 0; i < nw; i++) {
957         /* grab the abi_ulong */
958         __get_user(b, &target_fds[i]);
959         for (j = 0; j < TARGET_ABI_BITS; j++) {
960             /* check the bit inside the abi_ulong */
961             if ((b >> j) & 1)
962                 FD_SET(k, fds);
963             k++;
964         }
965     }
966 
967     unlock_user(target_fds, target_fds_addr, 0);
968 
969     return 0;
970 }
971 
972 static inline abi_ulong copy_from_user_fdset_ptr(fd_set *fds, fd_set **fds_ptr,
973                                                  abi_ulong target_fds_addr,
974                                                  int n)
975 {
976     if (target_fds_addr) {
977         if (copy_from_user_fdset(fds, target_fds_addr, n))
978             return -TARGET_EFAULT;
979         *fds_ptr = fds;
980     } else {
981         *fds_ptr = NULL;
982     }
983     return 0;
984 }
985 
986 static inline abi_long copy_to_user_fdset(abi_ulong target_fds_addr,
987                                           const fd_set *fds,
988                                           int n)
989 {
990     int i, nw, j, k;
991     abi_long v;
992     abi_ulong *target_fds;
993 
994     nw = DIV_ROUND_UP(n, TARGET_ABI_BITS);
995     if (!(target_fds = lock_user(VERIFY_WRITE,
996                                  target_fds_addr,
997                                  sizeof(abi_ulong) * nw,
998                                  0)))
999         return -TARGET_EFAULT;
1000 
1001     k = 0;
1002     for (i = 0; i < nw; i++) {
1003         v = 0;
1004         for (j = 0; j < TARGET_ABI_BITS; j++) {
1005             v |= ((abi_ulong)(FD_ISSET(k, fds) != 0) << j);
1006             k++;
1007         }
1008         __put_user(v, &target_fds[i]);
1009     }
1010 
1011     unlock_user(target_fds, target_fds_addr, sizeof(abi_ulong) * nw);
1012 
1013     return 0;
1014 }
1015 
1016 #if defined(__alpha__)
1017 #define HOST_HZ 1024
1018 #else
1019 #define HOST_HZ 100
1020 #endif
1021 
1022 static inline abi_long host_to_target_clock_t(long ticks)
1023 {
1024 #if HOST_HZ == TARGET_HZ
1025     return ticks;
1026 #else
1027     return ((int64_t)ticks * TARGET_HZ) / HOST_HZ;
1028 #endif
1029 }
1030 
1031 static inline abi_long host_to_target_rusage(abi_ulong target_addr,
1032                                              const struct rusage *rusage)
1033 {
1034     struct target_rusage *target_rusage;
1035 
1036     if (!lock_user_struct(VERIFY_WRITE, target_rusage, target_addr, 0))
1037         return -TARGET_EFAULT;
1038     target_rusage->ru_utime.tv_sec = tswapal(rusage->ru_utime.tv_sec);
1039     target_rusage->ru_utime.tv_usec = tswapal(rusage->ru_utime.tv_usec);
1040     target_rusage->ru_stime.tv_sec = tswapal(rusage->ru_stime.tv_sec);
1041     target_rusage->ru_stime.tv_usec = tswapal(rusage->ru_stime.tv_usec);
1042     target_rusage->ru_maxrss = tswapal(rusage->ru_maxrss);
1043     target_rusage->ru_ixrss = tswapal(rusage->ru_ixrss);
1044     target_rusage->ru_idrss = tswapal(rusage->ru_idrss);
1045     target_rusage->ru_isrss = tswapal(rusage->ru_isrss);
1046     target_rusage->ru_minflt = tswapal(rusage->ru_minflt);
1047     target_rusage->ru_majflt = tswapal(rusage->ru_majflt);
1048     target_rusage->ru_nswap = tswapal(rusage->ru_nswap);
1049     target_rusage->ru_inblock = tswapal(rusage->ru_inblock);
1050     target_rusage->ru_oublock = tswapal(rusage->ru_oublock);
1051     target_rusage->ru_msgsnd = tswapal(rusage->ru_msgsnd);
1052     target_rusage->ru_msgrcv = tswapal(rusage->ru_msgrcv);
1053     target_rusage->ru_nsignals = tswapal(rusage->ru_nsignals);
1054     target_rusage->ru_nvcsw = tswapal(rusage->ru_nvcsw);
1055     target_rusage->ru_nivcsw = tswapal(rusage->ru_nivcsw);
1056     unlock_user_struct(target_rusage, target_addr, 1);
1057 
1058     return 0;
1059 }
1060 
1061 static inline rlim_t target_to_host_rlim(abi_ulong target_rlim)
1062 {
1063     abi_ulong target_rlim_swap;
1064     rlim_t result;
1065 
1066     target_rlim_swap = tswapal(target_rlim);
1067     if (target_rlim_swap == TARGET_RLIM_INFINITY)
1068         return RLIM_INFINITY;
1069 
1070     result = target_rlim_swap;
1071     if (target_rlim_swap != (rlim_t)result)
1072         return RLIM_INFINITY;
1073 
1074     return result;
1075 }
1076 
1077 static inline abi_ulong host_to_target_rlim(rlim_t rlim)
1078 {
1079     abi_ulong target_rlim_swap;
1080     abi_ulong result;
1081 
1082     if (rlim == RLIM_INFINITY || rlim != (abi_long)rlim)
1083         target_rlim_swap = TARGET_RLIM_INFINITY;
1084     else
1085         target_rlim_swap = rlim;
1086     result = tswapal(target_rlim_swap);
1087 
1088     return result;
1089 }
1090 
1091 static inline int target_to_host_resource(int code)
1092 {
1093     switch (code) {
1094     case TARGET_RLIMIT_AS:
1095         return RLIMIT_AS;
1096     case TARGET_RLIMIT_CORE:
1097         return RLIMIT_CORE;
1098     case TARGET_RLIMIT_CPU:
1099         return RLIMIT_CPU;
1100     case TARGET_RLIMIT_DATA:
1101         return RLIMIT_DATA;
1102     case TARGET_RLIMIT_FSIZE:
1103         return RLIMIT_FSIZE;
1104     case TARGET_RLIMIT_LOCKS:
1105         return RLIMIT_LOCKS;
1106     case TARGET_RLIMIT_MEMLOCK:
1107         return RLIMIT_MEMLOCK;
1108     case TARGET_RLIMIT_MSGQUEUE:
1109         return RLIMIT_MSGQUEUE;
1110     case TARGET_RLIMIT_NICE:
1111         return RLIMIT_NICE;
1112     case TARGET_RLIMIT_NOFILE:
1113         return RLIMIT_NOFILE;
1114     case TARGET_RLIMIT_NPROC:
1115         return RLIMIT_NPROC;
1116     case TARGET_RLIMIT_RSS:
1117         return RLIMIT_RSS;
1118     case TARGET_RLIMIT_RTPRIO:
1119         return RLIMIT_RTPRIO;
1120     case TARGET_RLIMIT_SIGPENDING:
1121         return RLIMIT_SIGPENDING;
1122     case TARGET_RLIMIT_STACK:
1123         return RLIMIT_STACK;
1124     default:
1125         return code;
1126     }
1127 }
1128 
1129 static inline abi_long copy_from_user_timeval(struct timeval *tv,
1130                                               abi_ulong target_tv_addr)
1131 {
1132     struct target_timeval *target_tv;
1133 
1134     if (!lock_user_struct(VERIFY_READ, target_tv, target_tv_addr, 1)) {
1135         return -TARGET_EFAULT;
1136     }
1137 
1138     __get_user(tv->tv_sec, &target_tv->tv_sec);
1139     __get_user(tv->tv_usec, &target_tv->tv_usec);
1140 
1141     unlock_user_struct(target_tv, target_tv_addr, 0);
1142 
1143     return 0;
1144 }
1145 
1146 static inline abi_long copy_to_user_timeval(abi_ulong target_tv_addr,
1147                                             const struct timeval *tv)
1148 {
1149     struct target_timeval *target_tv;
1150 
1151     if (!lock_user_struct(VERIFY_WRITE, target_tv, target_tv_addr, 0)) {
1152         return -TARGET_EFAULT;
1153     }
1154 
1155     __put_user(tv->tv_sec, &target_tv->tv_sec);
1156     __put_user(tv->tv_usec, &target_tv->tv_usec);
1157 
1158     unlock_user_struct(target_tv, target_tv_addr, 1);
1159 
1160     return 0;
1161 }
1162 
1163 static inline abi_long copy_to_user_timeval64(abi_ulong target_tv_addr,
1164                                              const struct timeval *tv)
1165 {
1166     struct target__kernel_sock_timeval *target_tv;
1167 
1168     if (!lock_user_struct(VERIFY_WRITE, target_tv, target_tv_addr, 0)) {
1169         return -TARGET_EFAULT;
1170     }
1171 
1172     __put_user(tv->tv_sec, &target_tv->tv_sec);
1173     __put_user(tv->tv_usec, &target_tv->tv_usec);
1174 
1175     unlock_user_struct(target_tv, target_tv_addr, 1);
1176 
1177     return 0;
1178 }
1179 
1180 static inline abi_long target_to_host_timespec(struct timespec *host_ts,
1181                                                abi_ulong target_addr)
1182 {
1183     struct target_timespec *target_ts;
1184 
1185     if (!lock_user_struct(VERIFY_READ, target_ts, target_addr, 1)) {
1186         return -TARGET_EFAULT;
1187     }
1188     __get_user(host_ts->tv_sec, &target_ts->tv_sec);
1189     __get_user(host_ts->tv_nsec, &target_ts->tv_nsec);
1190     unlock_user_struct(target_ts, target_addr, 0);
1191     return 0;
1192 }
1193 
1194 static inline abi_long host_to_target_timespec(abi_ulong target_addr,
1195                                                struct timespec *host_ts)
1196 {
1197     struct target_timespec *target_ts;
1198 
1199     if (!lock_user_struct(VERIFY_WRITE, target_ts, target_addr, 0)) {
1200         return -TARGET_EFAULT;
1201     }
1202     __put_user(host_ts->tv_sec, &target_ts->tv_sec);
1203     __put_user(host_ts->tv_nsec, &target_ts->tv_nsec);
1204     unlock_user_struct(target_ts, target_addr, 1);
1205     return 0;
1206 }
1207 
1208 static inline abi_long host_to_target_timespec64(abi_ulong target_addr,
1209                                                  struct timespec *host_ts)
1210 {
1211     struct target__kernel_timespec *target_ts;
1212 
1213     if (!lock_user_struct(VERIFY_WRITE, target_ts, target_addr, 0)) {
1214         return -TARGET_EFAULT;
1215     }
1216     __put_user(host_ts->tv_sec, &target_ts->tv_sec);
1217     __put_user(host_ts->tv_nsec, &target_ts->tv_nsec);
1218     unlock_user_struct(target_ts, target_addr, 1);
1219     return 0;
1220 }
1221 
1222 static inline abi_long copy_from_user_timezone(struct timezone *tz,
1223                                                abi_ulong target_tz_addr)
1224 {
1225     struct target_timezone *target_tz;
1226 
1227     if (!lock_user_struct(VERIFY_READ, target_tz, target_tz_addr, 1)) {
1228         return -TARGET_EFAULT;
1229     }
1230 
1231     __get_user(tz->tz_minuteswest, &target_tz->tz_minuteswest);
1232     __get_user(tz->tz_dsttime, &target_tz->tz_dsttime);
1233 
1234     unlock_user_struct(target_tz, target_tz_addr, 0);
1235 
1236     return 0;
1237 }
1238 
1239 #if defined(TARGET_NR_mq_open) && defined(__NR_mq_open)
1240 #include <mqueue.h>
1241 
1242 static inline abi_long copy_from_user_mq_attr(struct mq_attr *attr,
1243                                               abi_ulong target_mq_attr_addr)
1244 {
1245     struct target_mq_attr *target_mq_attr;
1246 
1247     if (!lock_user_struct(VERIFY_READ, target_mq_attr,
1248                           target_mq_attr_addr, 1))
1249         return -TARGET_EFAULT;
1250 
1251     __get_user(attr->mq_flags, &target_mq_attr->mq_flags);
1252     __get_user(attr->mq_maxmsg, &target_mq_attr->mq_maxmsg);
1253     __get_user(attr->mq_msgsize, &target_mq_attr->mq_msgsize);
1254     __get_user(attr->mq_curmsgs, &target_mq_attr->mq_curmsgs);
1255 
1256     unlock_user_struct(target_mq_attr, target_mq_attr_addr, 0);
1257 
1258     return 0;
1259 }
1260 
1261 static inline abi_long copy_to_user_mq_attr(abi_ulong target_mq_attr_addr,
1262                                             const struct mq_attr *attr)
1263 {
1264     struct target_mq_attr *target_mq_attr;
1265 
1266     if (!lock_user_struct(VERIFY_WRITE, target_mq_attr,
1267                           target_mq_attr_addr, 0))
1268         return -TARGET_EFAULT;
1269 
1270     __put_user(attr->mq_flags, &target_mq_attr->mq_flags);
1271     __put_user(attr->mq_maxmsg, &target_mq_attr->mq_maxmsg);
1272     __put_user(attr->mq_msgsize, &target_mq_attr->mq_msgsize);
1273     __put_user(attr->mq_curmsgs, &target_mq_attr->mq_curmsgs);
1274 
1275     unlock_user_struct(target_mq_attr, target_mq_attr_addr, 1);
1276 
1277     return 0;
1278 }
1279 #endif
1280 
1281 #if defined(TARGET_NR_select) || defined(TARGET_NR__newselect)
1282 /* do_select() must return target values and target errnos. */
1283 static abi_long do_select(int n,
1284                           abi_ulong rfd_addr, abi_ulong wfd_addr,
1285                           abi_ulong efd_addr, abi_ulong target_tv_addr)
1286 {
1287     fd_set rfds, wfds, efds;
1288     fd_set *rfds_ptr, *wfds_ptr, *efds_ptr;
1289     struct timeval tv;
1290     struct timespec ts, *ts_ptr;
1291     abi_long ret;
1292 
1293     ret = copy_from_user_fdset_ptr(&rfds, &rfds_ptr, rfd_addr, n);
1294     if (ret) {
1295         return ret;
1296     }
1297     ret = copy_from_user_fdset_ptr(&wfds, &wfds_ptr, wfd_addr, n);
1298     if (ret) {
1299         return ret;
1300     }
1301     ret = copy_from_user_fdset_ptr(&efds, &efds_ptr, efd_addr, n);
1302     if (ret) {
1303         return ret;
1304     }
1305 
1306     if (target_tv_addr) {
1307         if (copy_from_user_timeval(&tv, target_tv_addr))
1308             return -TARGET_EFAULT;
1309         ts.tv_sec = tv.tv_sec;
1310         ts.tv_nsec = tv.tv_usec * 1000;
1311         ts_ptr = &ts;
1312     } else {
1313         ts_ptr = NULL;
1314     }
1315 
1316     ret = get_errno(safe_pselect6(n, rfds_ptr, wfds_ptr, efds_ptr,
1317                                   ts_ptr, NULL));
1318 
1319     if (!is_error(ret)) {
1320         if (rfd_addr && copy_to_user_fdset(rfd_addr, &rfds, n))
1321             return -TARGET_EFAULT;
1322         if (wfd_addr && copy_to_user_fdset(wfd_addr, &wfds, n))
1323             return -TARGET_EFAULT;
1324         if (efd_addr && copy_to_user_fdset(efd_addr, &efds, n))
1325             return -TARGET_EFAULT;
1326 
1327         if (target_tv_addr) {
1328             tv.tv_sec = ts.tv_sec;
1329             tv.tv_usec = ts.tv_nsec / 1000;
1330             if (copy_to_user_timeval(target_tv_addr, &tv)) {
1331                 return -TARGET_EFAULT;
1332             }
1333         }
1334     }
1335 
1336     return ret;
1337 }
1338 
1339 #if defined(TARGET_WANT_OLD_SYS_SELECT)
1340 static abi_long do_old_select(abi_ulong arg1)
1341 {
1342     struct target_sel_arg_struct *sel;
1343     abi_ulong inp, outp, exp, tvp;
1344     long nsel;
1345 
1346     if (!lock_user_struct(VERIFY_READ, sel, arg1, 1)) {
1347         return -TARGET_EFAULT;
1348     }
1349 
1350     nsel = tswapal(sel->n);
1351     inp = tswapal(sel->inp);
1352     outp = tswapal(sel->outp);
1353     exp = tswapal(sel->exp);
1354     tvp = tswapal(sel->tvp);
1355 
1356     unlock_user_struct(sel, arg1, 0);
1357 
1358     return do_select(nsel, inp, outp, exp, tvp);
1359 }
1360 #endif
1361 #endif
1362 
1363 static abi_long do_pipe2(int host_pipe[], int flags)
1364 {
1365 #ifdef CONFIG_PIPE2
1366     return pipe2(host_pipe, flags);
1367 #else
1368     return -ENOSYS;
1369 #endif
1370 }
1371 
1372 static abi_long do_pipe(void *cpu_env, abi_ulong pipedes,
1373                         int flags, int is_pipe2)
1374 {
1375     int host_pipe[2];
1376     abi_long ret;
1377     ret = flags ? do_pipe2(host_pipe, flags) : pipe(host_pipe);
1378 
1379     if (is_error(ret))
1380         return get_errno(ret);
1381 
1382     /* Several targets have special calling conventions for the original
1383        pipe syscall, but didn't replicate this into the pipe2 syscall.  */
1384     if (!is_pipe2) {
1385 #if defined(TARGET_ALPHA)
1386         ((CPUAlphaState *)cpu_env)->ir[IR_A4] = host_pipe[1];
1387         return host_pipe[0];
1388 #elif defined(TARGET_MIPS)
1389         ((CPUMIPSState*)cpu_env)->active_tc.gpr[3] = host_pipe[1];
1390         return host_pipe[0];
1391 #elif defined(TARGET_SH4)
1392         ((CPUSH4State*)cpu_env)->gregs[1] = host_pipe[1];
1393         return host_pipe[0];
1394 #elif defined(TARGET_SPARC)
1395         ((CPUSPARCState*)cpu_env)->regwptr[1] = host_pipe[1];
1396         return host_pipe[0];
1397 #endif
1398     }
1399 
1400     if (put_user_s32(host_pipe[0], pipedes)
1401         || put_user_s32(host_pipe[1], pipedes + sizeof(host_pipe[0])))
1402         return -TARGET_EFAULT;
1403     return get_errno(ret);
1404 }
1405 
1406 static inline abi_long target_to_host_ip_mreq(struct ip_mreqn *mreqn,
1407                                               abi_ulong target_addr,
1408                                               socklen_t len)
1409 {
1410     struct target_ip_mreqn *target_smreqn;
1411 
1412     target_smreqn = lock_user(VERIFY_READ, target_addr, len, 1);
1413     if (!target_smreqn)
1414         return -TARGET_EFAULT;
1415     mreqn->imr_multiaddr.s_addr = target_smreqn->imr_multiaddr.s_addr;
1416     mreqn->imr_address.s_addr = target_smreqn->imr_address.s_addr;
1417     if (len == sizeof(struct target_ip_mreqn))
1418         mreqn->imr_ifindex = tswapal(target_smreqn->imr_ifindex);
1419     unlock_user(target_smreqn, target_addr, 0);
1420 
1421     return 0;
1422 }
1423 
1424 static inline abi_long target_to_host_sockaddr(int fd, struct sockaddr *addr,
1425                                                abi_ulong target_addr,
1426                                                socklen_t len)
1427 {
1428     const socklen_t unix_maxlen = sizeof (struct sockaddr_un);
1429     sa_family_t sa_family;
1430     struct target_sockaddr *target_saddr;
1431 
1432     if (fd_trans_target_to_host_addr(fd)) {
1433         return fd_trans_target_to_host_addr(fd)(addr, target_addr, len);
1434     }
1435 
1436     target_saddr = lock_user(VERIFY_READ, target_addr, len, 1);
1437     if (!target_saddr)
1438         return -TARGET_EFAULT;
1439 
1440     sa_family = tswap16(target_saddr->sa_family);
1441 
1442     /* Oops. The caller might send a incomplete sun_path; sun_path
1443      * must be terminated by \0 (see the manual page), but
1444      * unfortunately it is quite common to specify sockaddr_un
1445      * length as "strlen(x->sun_path)" while it should be
1446      * "strlen(...) + 1". We'll fix that here if needed.
1447      * Linux kernel has a similar feature.
1448      */
1449 
1450     if (sa_family == AF_UNIX) {
1451         if (len < unix_maxlen && len > 0) {
1452             char *cp = (char*)target_saddr;
1453 
1454             if ( cp[len-1] && !cp[len] )
1455                 len++;
1456         }
1457         if (len > unix_maxlen)
1458             len = unix_maxlen;
1459     }
1460 
1461     memcpy(addr, target_saddr, len);
1462     addr->sa_family = sa_family;
1463     if (sa_family == AF_NETLINK) {
1464         struct sockaddr_nl *nladdr;
1465 
1466         nladdr = (struct sockaddr_nl *)addr;
1467         nladdr->nl_pid = tswap32(nladdr->nl_pid);
1468         nladdr->nl_groups = tswap32(nladdr->nl_groups);
1469     } else if (sa_family == AF_PACKET) {
1470 	struct target_sockaddr_ll *lladdr;
1471 
1472 	lladdr = (struct target_sockaddr_ll *)addr;
1473 	lladdr->sll_ifindex = tswap32(lladdr->sll_ifindex);
1474 	lladdr->sll_hatype = tswap16(lladdr->sll_hatype);
1475     }
1476     unlock_user(target_saddr, target_addr, 0);
1477 
1478     return 0;
1479 }
1480 
1481 static inline abi_long host_to_target_sockaddr(abi_ulong target_addr,
1482                                                struct sockaddr *addr,
1483                                                socklen_t len)
1484 {
1485     struct target_sockaddr *target_saddr;
1486 
1487     if (len == 0) {
1488         return 0;
1489     }
1490     assert(addr);
1491 
1492     target_saddr = lock_user(VERIFY_WRITE, target_addr, len, 0);
1493     if (!target_saddr)
1494         return -TARGET_EFAULT;
1495     memcpy(target_saddr, addr, len);
1496     if (len >= offsetof(struct target_sockaddr, sa_family) +
1497         sizeof(target_saddr->sa_family)) {
1498         target_saddr->sa_family = tswap16(addr->sa_family);
1499     }
1500     if (addr->sa_family == AF_NETLINK &&
1501         len >= sizeof(struct target_sockaddr_nl)) {
1502         struct target_sockaddr_nl *target_nl =
1503                (struct target_sockaddr_nl *)target_saddr;
1504         target_nl->nl_pid = tswap32(target_nl->nl_pid);
1505         target_nl->nl_groups = tswap32(target_nl->nl_groups);
1506     } else if (addr->sa_family == AF_PACKET) {
1507         struct sockaddr_ll *target_ll = (struct sockaddr_ll *)target_saddr;
1508         target_ll->sll_ifindex = tswap32(target_ll->sll_ifindex);
1509         target_ll->sll_hatype = tswap16(target_ll->sll_hatype);
1510     } else if (addr->sa_family == AF_INET6 &&
1511                len >= sizeof(struct target_sockaddr_in6)) {
1512         struct target_sockaddr_in6 *target_in6 =
1513                (struct target_sockaddr_in6 *)target_saddr;
1514         target_in6->sin6_scope_id = tswap16(target_in6->sin6_scope_id);
1515     }
1516     unlock_user(target_saddr, target_addr, len);
1517 
1518     return 0;
1519 }
1520 
1521 static inline abi_long target_to_host_cmsg(struct msghdr *msgh,
1522                                            struct target_msghdr *target_msgh)
1523 {
1524     struct cmsghdr *cmsg = CMSG_FIRSTHDR(msgh);
1525     abi_long msg_controllen;
1526     abi_ulong target_cmsg_addr;
1527     struct target_cmsghdr *target_cmsg, *target_cmsg_start;
1528     socklen_t space = 0;
1529 
1530     msg_controllen = tswapal(target_msgh->msg_controllen);
1531     if (msg_controllen < sizeof (struct target_cmsghdr))
1532         goto the_end;
1533     target_cmsg_addr = tswapal(target_msgh->msg_control);
1534     target_cmsg = lock_user(VERIFY_READ, target_cmsg_addr, msg_controllen, 1);
1535     target_cmsg_start = target_cmsg;
1536     if (!target_cmsg)
1537         return -TARGET_EFAULT;
1538 
1539     while (cmsg && target_cmsg) {
1540         void *data = CMSG_DATA(cmsg);
1541         void *target_data = TARGET_CMSG_DATA(target_cmsg);
1542 
1543         int len = tswapal(target_cmsg->cmsg_len)
1544             - sizeof(struct target_cmsghdr);
1545 
1546         space += CMSG_SPACE(len);
1547         if (space > msgh->msg_controllen) {
1548             space -= CMSG_SPACE(len);
1549             /* This is a QEMU bug, since we allocated the payload
1550              * area ourselves (unlike overflow in host-to-target
1551              * conversion, which is just the guest giving us a buffer
1552              * that's too small). It can't happen for the payload types
1553              * we currently support; if it becomes an issue in future
1554              * we would need to improve our allocation strategy to
1555              * something more intelligent than "twice the size of the
1556              * target buffer we're reading from".
1557              */
1558             gemu_log("Host cmsg overflow\n");
1559             break;
1560         }
1561 
1562         if (tswap32(target_cmsg->cmsg_level) == TARGET_SOL_SOCKET) {
1563             cmsg->cmsg_level = SOL_SOCKET;
1564         } else {
1565             cmsg->cmsg_level = tswap32(target_cmsg->cmsg_level);
1566         }
1567         cmsg->cmsg_type = tswap32(target_cmsg->cmsg_type);
1568         cmsg->cmsg_len = CMSG_LEN(len);
1569 
1570         if (cmsg->cmsg_level == SOL_SOCKET && cmsg->cmsg_type == SCM_RIGHTS) {
1571             int *fd = (int *)data;
1572             int *target_fd = (int *)target_data;
1573             int i, numfds = len / sizeof(int);
1574 
1575             for (i = 0; i < numfds; i++) {
1576                 __get_user(fd[i], target_fd + i);
1577             }
1578         } else if (cmsg->cmsg_level == SOL_SOCKET
1579                &&  cmsg->cmsg_type == SCM_CREDENTIALS) {
1580             struct ucred *cred = (struct ucred *)data;
1581             struct target_ucred *target_cred =
1582                 (struct target_ucred *)target_data;
1583 
1584             __get_user(cred->pid, &target_cred->pid);
1585             __get_user(cred->uid, &target_cred->uid);
1586             __get_user(cred->gid, &target_cred->gid);
1587         } else {
1588             gemu_log("Unsupported ancillary data: %d/%d\n",
1589                                         cmsg->cmsg_level, cmsg->cmsg_type);
1590             memcpy(data, target_data, len);
1591         }
1592 
1593         cmsg = CMSG_NXTHDR(msgh, cmsg);
1594         target_cmsg = TARGET_CMSG_NXTHDR(target_msgh, target_cmsg,
1595                                          target_cmsg_start);
1596     }
1597     unlock_user(target_cmsg, target_cmsg_addr, 0);
1598  the_end:
1599     msgh->msg_controllen = space;
1600     return 0;
1601 }
1602 
1603 static inline abi_long host_to_target_cmsg(struct target_msghdr *target_msgh,
1604                                            struct msghdr *msgh)
1605 {
1606     struct cmsghdr *cmsg = CMSG_FIRSTHDR(msgh);
1607     abi_long msg_controllen;
1608     abi_ulong target_cmsg_addr;
1609     struct target_cmsghdr *target_cmsg, *target_cmsg_start;
1610     socklen_t space = 0;
1611 
1612     msg_controllen = tswapal(target_msgh->msg_controllen);
1613     if (msg_controllen < sizeof (struct target_cmsghdr))
1614         goto the_end;
1615     target_cmsg_addr = tswapal(target_msgh->msg_control);
1616     target_cmsg = lock_user(VERIFY_WRITE, target_cmsg_addr, msg_controllen, 0);
1617     target_cmsg_start = target_cmsg;
1618     if (!target_cmsg)
1619         return -TARGET_EFAULT;
1620 
1621     while (cmsg && target_cmsg) {
1622         void *data = CMSG_DATA(cmsg);
1623         void *target_data = TARGET_CMSG_DATA(target_cmsg);
1624 
1625         int len = cmsg->cmsg_len - sizeof(struct cmsghdr);
1626         int tgt_len, tgt_space;
1627 
1628         /* We never copy a half-header but may copy half-data;
1629          * this is Linux's behaviour in put_cmsg(). Note that
1630          * truncation here is a guest problem (which we report
1631          * to the guest via the CTRUNC bit), unlike truncation
1632          * in target_to_host_cmsg, which is a QEMU bug.
1633          */
1634         if (msg_controllen < sizeof(struct target_cmsghdr)) {
1635             target_msgh->msg_flags |= tswap32(MSG_CTRUNC);
1636             break;
1637         }
1638 
1639         if (cmsg->cmsg_level == SOL_SOCKET) {
1640             target_cmsg->cmsg_level = tswap32(TARGET_SOL_SOCKET);
1641         } else {
1642             target_cmsg->cmsg_level = tswap32(cmsg->cmsg_level);
1643         }
1644         target_cmsg->cmsg_type = tswap32(cmsg->cmsg_type);
1645 
1646         /* Payload types which need a different size of payload on
1647          * the target must adjust tgt_len here.
1648          */
1649         tgt_len = len;
1650         switch (cmsg->cmsg_level) {
1651         case SOL_SOCKET:
1652             switch (cmsg->cmsg_type) {
1653             case SO_TIMESTAMP:
1654                 tgt_len = sizeof(struct target_timeval);
1655                 break;
1656             default:
1657                 break;
1658             }
1659             break;
1660         default:
1661             break;
1662         }
1663 
1664         if (msg_controllen < TARGET_CMSG_LEN(tgt_len)) {
1665             target_msgh->msg_flags |= tswap32(MSG_CTRUNC);
1666             tgt_len = msg_controllen - sizeof(struct target_cmsghdr);
1667         }
1668 
1669         /* We must now copy-and-convert len bytes of payload
1670          * into tgt_len bytes of destination space. Bear in mind
1671          * that in both source and destination we may be dealing
1672          * with a truncated value!
1673          */
1674         switch (cmsg->cmsg_level) {
1675         case SOL_SOCKET:
1676             switch (cmsg->cmsg_type) {
1677             case SCM_RIGHTS:
1678             {
1679                 int *fd = (int *)data;
1680                 int *target_fd = (int *)target_data;
1681                 int i, numfds = tgt_len / sizeof(int);
1682 
1683                 for (i = 0; i < numfds; i++) {
1684                     __put_user(fd[i], target_fd + i);
1685                 }
1686                 break;
1687             }
1688             case SO_TIMESTAMP:
1689             {
1690                 struct timeval *tv = (struct timeval *)data;
1691                 struct target_timeval *target_tv =
1692                     (struct target_timeval *)target_data;
1693 
1694                 if (len != sizeof(struct timeval) ||
1695                     tgt_len != sizeof(struct target_timeval)) {
1696                     goto unimplemented;
1697                 }
1698 
1699                 /* copy struct timeval to target */
1700                 __put_user(tv->tv_sec, &target_tv->tv_sec);
1701                 __put_user(tv->tv_usec, &target_tv->tv_usec);
1702                 break;
1703             }
1704             case SCM_CREDENTIALS:
1705             {
1706                 struct ucred *cred = (struct ucred *)data;
1707                 struct target_ucred *target_cred =
1708                     (struct target_ucred *)target_data;
1709 
1710                 __put_user(cred->pid, &target_cred->pid);
1711                 __put_user(cred->uid, &target_cred->uid);
1712                 __put_user(cred->gid, &target_cred->gid);
1713                 break;
1714             }
1715             default:
1716                 goto unimplemented;
1717             }
1718             break;
1719 
1720         case SOL_IP:
1721             switch (cmsg->cmsg_type) {
1722             case IP_TTL:
1723             {
1724                 uint32_t *v = (uint32_t *)data;
1725                 uint32_t *t_int = (uint32_t *)target_data;
1726 
1727                 if (len != sizeof(uint32_t) ||
1728                     tgt_len != sizeof(uint32_t)) {
1729                     goto unimplemented;
1730                 }
1731                 __put_user(*v, t_int);
1732                 break;
1733             }
1734             case IP_RECVERR:
1735             {
1736                 struct errhdr_t {
1737                    struct sock_extended_err ee;
1738                    struct sockaddr_in offender;
1739                 };
1740                 struct errhdr_t *errh = (struct errhdr_t *)data;
1741                 struct errhdr_t *target_errh =
1742                     (struct errhdr_t *)target_data;
1743 
1744                 if (len != sizeof(struct errhdr_t) ||
1745                     tgt_len != sizeof(struct errhdr_t)) {
1746                     goto unimplemented;
1747                 }
1748                 __put_user(errh->ee.ee_errno, &target_errh->ee.ee_errno);
1749                 __put_user(errh->ee.ee_origin, &target_errh->ee.ee_origin);
1750                 __put_user(errh->ee.ee_type,  &target_errh->ee.ee_type);
1751                 __put_user(errh->ee.ee_code, &target_errh->ee.ee_code);
1752                 __put_user(errh->ee.ee_pad, &target_errh->ee.ee_pad);
1753                 __put_user(errh->ee.ee_info, &target_errh->ee.ee_info);
1754                 __put_user(errh->ee.ee_data, &target_errh->ee.ee_data);
1755                 host_to_target_sockaddr((unsigned long) &target_errh->offender,
1756                     (void *) &errh->offender, sizeof(errh->offender));
1757                 break;
1758             }
1759             default:
1760                 goto unimplemented;
1761             }
1762             break;
1763 
1764         case SOL_IPV6:
1765             switch (cmsg->cmsg_type) {
1766             case IPV6_HOPLIMIT:
1767             {
1768                 uint32_t *v = (uint32_t *)data;
1769                 uint32_t *t_int = (uint32_t *)target_data;
1770 
1771                 if (len != sizeof(uint32_t) ||
1772                     tgt_len != sizeof(uint32_t)) {
1773                     goto unimplemented;
1774                 }
1775                 __put_user(*v, t_int);
1776                 break;
1777             }
1778             case IPV6_RECVERR:
1779             {
1780                 struct errhdr6_t {
1781                    struct sock_extended_err ee;
1782                    struct sockaddr_in6 offender;
1783                 };
1784                 struct errhdr6_t *errh = (struct errhdr6_t *)data;
1785                 struct errhdr6_t *target_errh =
1786                     (struct errhdr6_t *)target_data;
1787 
1788                 if (len != sizeof(struct errhdr6_t) ||
1789                     tgt_len != sizeof(struct errhdr6_t)) {
1790                     goto unimplemented;
1791                 }
1792                 __put_user(errh->ee.ee_errno, &target_errh->ee.ee_errno);
1793                 __put_user(errh->ee.ee_origin, &target_errh->ee.ee_origin);
1794                 __put_user(errh->ee.ee_type,  &target_errh->ee.ee_type);
1795                 __put_user(errh->ee.ee_code, &target_errh->ee.ee_code);
1796                 __put_user(errh->ee.ee_pad, &target_errh->ee.ee_pad);
1797                 __put_user(errh->ee.ee_info, &target_errh->ee.ee_info);
1798                 __put_user(errh->ee.ee_data, &target_errh->ee.ee_data);
1799                 host_to_target_sockaddr((unsigned long) &target_errh->offender,
1800                     (void *) &errh->offender, sizeof(errh->offender));
1801                 break;
1802             }
1803             default:
1804                 goto unimplemented;
1805             }
1806             break;
1807 
1808         default:
1809         unimplemented:
1810             gemu_log("Unsupported ancillary data: %d/%d\n",
1811                                         cmsg->cmsg_level, cmsg->cmsg_type);
1812             memcpy(target_data, data, MIN(len, tgt_len));
1813             if (tgt_len > len) {
1814                 memset(target_data + len, 0, tgt_len - len);
1815             }
1816         }
1817 
1818         target_cmsg->cmsg_len = tswapal(TARGET_CMSG_LEN(tgt_len));
1819         tgt_space = TARGET_CMSG_SPACE(tgt_len);
1820         if (msg_controllen < tgt_space) {
1821             tgt_space = msg_controllen;
1822         }
1823         msg_controllen -= tgt_space;
1824         space += tgt_space;
1825         cmsg = CMSG_NXTHDR(msgh, cmsg);
1826         target_cmsg = TARGET_CMSG_NXTHDR(target_msgh, target_cmsg,
1827                                          target_cmsg_start);
1828     }
1829     unlock_user(target_cmsg, target_cmsg_addr, space);
1830  the_end:
1831     target_msgh->msg_controllen = tswapal(space);
1832     return 0;
1833 }
1834 
1835 /* do_setsockopt() Must return target values and target errnos. */
1836 static abi_long do_setsockopt(int sockfd, int level, int optname,
1837                               abi_ulong optval_addr, socklen_t optlen)
1838 {
1839     abi_long ret;
1840     int val;
1841     struct ip_mreqn *ip_mreq;
1842     struct ip_mreq_source *ip_mreq_source;
1843 
1844     switch(level) {
1845     case SOL_TCP:
1846         /* TCP options all take an 'int' value.  */
1847         if (optlen < sizeof(uint32_t))
1848             return -TARGET_EINVAL;
1849 
1850         if (get_user_u32(val, optval_addr))
1851             return -TARGET_EFAULT;
1852         ret = get_errno(setsockopt(sockfd, level, optname, &val, sizeof(val)));
1853         break;
1854     case SOL_IP:
1855         switch(optname) {
1856         case IP_TOS:
1857         case IP_TTL:
1858         case IP_HDRINCL:
1859         case IP_ROUTER_ALERT:
1860         case IP_RECVOPTS:
1861         case IP_RETOPTS:
1862         case IP_PKTINFO:
1863         case IP_MTU_DISCOVER:
1864         case IP_RECVERR:
1865         case IP_RECVTTL:
1866         case IP_RECVTOS:
1867 #ifdef IP_FREEBIND
1868         case IP_FREEBIND:
1869 #endif
1870         case IP_MULTICAST_TTL:
1871         case IP_MULTICAST_LOOP:
1872             val = 0;
1873             if (optlen >= sizeof(uint32_t)) {
1874                 if (get_user_u32(val, optval_addr))
1875                     return -TARGET_EFAULT;
1876             } else if (optlen >= 1) {
1877                 if (get_user_u8(val, optval_addr))
1878                     return -TARGET_EFAULT;
1879             }
1880             ret = get_errno(setsockopt(sockfd, level, optname, &val, sizeof(val)));
1881             break;
1882         case IP_ADD_MEMBERSHIP:
1883         case IP_DROP_MEMBERSHIP:
1884             if (optlen < sizeof (struct target_ip_mreq) ||
1885                 optlen > sizeof (struct target_ip_mreqn))
1886                 return -TARGET_EINVAL;
1887 
1888             ip_mreq = (struct ip_mreqn *) alloca(optlen);
1889             target_to_host_ip_mreq(ip_mreq, optval_addr, optlen);
1890             ret = get_errno(setsockopt(sockfd, level, optname, ip_mreq, optlen));
1891             break;
1892 
1893         case IP_BLOCK_SOURCE:
1894         case IP_UNBLOCK_SOURCE:
1895         case IP_ADD_SOURCE_MEMBERSHIP:
1896         case IP_DROP_SOURCE_MEMBERSHIP:
1897             if (optlen != sizeof (struct target_ip_mreq_source))
1898                 return -TARGET_EINVAL;
1899 
1900             ip_mreq_source = lock_user(VERIFY_READ, optval_addr, optlen, 1);
1901             ret = get_errno(setsockopt(sockfd, level, optname, ip_mreq_source, optlen));
1902             unlock_user (ip_mreq_source, optval_addr, 0);
1903             break;
1904 
1905         default:
1906             goto unimplemented;
1907         }
1908         break;
1909     case SOL_IPV6:
1910         switch (optname) {
1911         case IPV6_MTU_DISCOVER:
1912         case IPV6_MTU:
1913         case IPV6_V6ONLY:
1914         case IPV6_RECVPKTINFO:
1915         case IPV6_UNICAST_HOPS:
1916         case IPV6_MULTICAST_HOPS:
1917         case IPV6_MULTICAST_LOOP:
1918         case IPV6_RECVERR:
1919         case IPV6_RECVHOPLIMIT:
1920         case IPV6_2292HOPLIMIT:
1921         case IPV6_CHECKSUM:
1922         case IPV6_ADDRFORM:
1923         case IPV6_2292PKTINFO:
1924         case IPV6_RECVTCLASS:
1925         case IPV6_RECVRTHDR:
1926         case IPV6_2292RTHDR:
1927         case IPV6_RECVHOPOPTS:
1928         case IPV6_2292HOPOPTS:
1929         case IPV6_RECVDSTOPTS:
1930         case IPV6_2292DSTOPTS:
1931         case IPV6_TCLASS:
1932 #ifdef IPV6_RECVPATHMTU
1933         case IPV6_RECVPATHMTU:
1934 #endif
1935 #ifdef IPV6_TRANSPARENT
1936         case IPV6_TRANSPARENT:
1937 #endif
1938 #ifdef IPV6_FREEBIND
1939         case IPV6_FREEBIND:
1940 #endif
1941 #ifdef IPV6_RECVORIGDSTADDR
1942         case IPV6_RECVORIGDSTADDR:
1943 #endif
1944             val = 0;
1945             if (optlen < sizeof(uint32_t)) {
1946                 return -TARGET_EINVAL;
1947             }
1948             if (get_user_u32(val, optval_addr)) {
1949                 return -TARGET_EFAULT;
1950             }
1951             ret = get_errno(setsockopt(sockfd, level, optname,
1952                                        &val, sizeof(val)));
1953             break;
1954         case IPV6_PKTINFO:
1955         {
1956             struct in6_pktinfo pki;
1957 
1958             if (optlen < sizeof(pki)) {
1959                 return -TARGET_EINVAL;
1960             }
1961 
1962             if (copy_from_user(&pki, optval_addr, sizeof(pki))) {
1963                 return -TARGET_EFAULT;
1964             }
1965 
1966             pki.ipi6_ifindex = tswap32(pki.ipi6_ifindex);
1967 
1968             ret = get_errno(setsockopt(sockfd, level, optname,
1969                                        &pki, sizeof(pki)));
1970             break;
1971         }
1972         case IPV6_ADD_MEMBERSHIP:
1973         case IPV6_DROP_MEMBERSHIP:
1974         {
1975             struct ipv6_mreq ipv6mreq;
1976 
1977             if (optlen < sizeof(ipv6mreq)) {
1978                 return -TARGET_EINVAL;
1979             }
1980 
1981             if (copy_from_user(&ipv6mreq, optval_addr, sizeof(ipv6mreq))) {
1982                 return -TARGET_EFAULT;
1983             }
1984 
1985             ipv6mreq.ipv6mr_interface = tswap32(ipv6mreq.ipv6mr_interface);
1986 
1987             ret = get_errno(setsockopt(sockfd, level, optname,
1988                                        &ipv6mreq, sizeof(ipv6mreq)));
1989             break;
1990         }
1991         default:
1992             goto unimplemented;
1993         }
1994         break;
1995     case SOL_ICMPV6:
1996         switch (optname) {
1997         case ICMPV6_FILTER:
1998         {
1999             struct icmp6_filter icmp6f;
2000 
2001             if (optlen > sizeof(icmp6f)) {
2002                 optlen = sizeof(icmp6f);
2003             }
2004 
2005             if (copy_from_user(&icmp6f, optval_addr, optlen)) {
2006                 return -TARGET_EFAULT;
2007             }
2008 
2009             for (val = 0; val < 8; val++) {
2010                 icmp6f.data[val] = tswap32(icmp6f.data[val]);
2011             }
2012 
2013             ret = get_errno(setsockopt(sockfd, level, optname,
2014                                        &icmp6f, optlen));
2015             break;
2016         }
2017         default:
2018             goto unimplemented;
2019         }
2020         break;
2021     case SOL_RAW:
2022         switch (optname) {
2023         case ICMP_FILTER:
2024         case IPV6_CHECKSUM:
2025             /* those take an u32 value */
2026             if (optlen < sizeof(uint32_t)) {
2027                 return -TARGET_EINVAL;
2028             }
2029 
2030             if (get_user_u32(val, optval_addr)) {
2031                 return -TARGET_EFAULT;
2032             }
2033             ret = get_errno(setsockopt(sockfd, level, optname,
2034                                        &val, sizeof(val)));
2035             break;
2036 
2037         default:
2038             goto unimplemented;
2039         }
2040         break;
2041 #if defined(SOL_ALG) && defined(ALG_SET_KEY) && defined(ALG_SET_AEAD_AUTHSIZE)
2042     case SOL_ALG:
2043         switch (optname) {
2044         case ALG_SET_KEY:
2045         {
2046             char *alg_key = g_malloc(optlen);
2047 
2048             if (!alg_key) {
2049                 return -TARGET_ENOMEM;
2050             }
2051             if (copy_from_user(alg_key, optval_addr, optlen)) {
2052                 g_free(alg_key);
2053                 return -TARGET_EFAULT;
2054             }
2055             ret = get_errno(setsockopt(sockfd, level, optname,
2056                                        alg_key, optlen));
2057             g_free(alg_key);
2058             break;
2059         }
2060         case ALG_SET_AEAD_AUTHSIZE:
2061         {
2062             ret = get_errno(setsockopt(sockfd, level, optname,
2063                                        NULL, optlen));
2064             break;
2065         }
2066         default:
2067             goto unimplemented;
2068         }
2069         break;
2070 #endif
2071     case TARGET_SOL_SOCKET:
2072         switch (optname) {
2073         case TARGET_SO_RCVTIMEO:
2074         {
2075                 struct timeval tv;
2076 
2077                 optname = SO_RCVTIMEO;
2078 
2079 set_timeout:
2080                 if (optlen != sizeof(struct target_timeval)) {
2081                     return -TARGET_EINVAL;
2082                 }
2083 
2084                 if (copy_from_user_timeval(&tv, optval_addr)) {
2085                     return -TARGET_EFAULT;
2086                 }
2087 
2088                 ret = get_errno(setsockopt(sockfd, SOL_SOCKET, optname,
2089                                 &tv, sizeof(tv)));
2090                 return ret;
2091         }
2092         case TARGET_SO_SNDTIMEO:
2093                 optname = SO_SNDTIMEO;
2094                 goto set_timeout;
2095         case TARGET_SO_ATTACH_FILTER:
2096         {
2097                 struct target_sock_fprog *tfprog;
2098                 struct target_sock_filter *tfilter;
2099                 struct sock_fprog fprog;
2100                 struct sock_filter *filter;
2101                 int i;
2102 
2103                 if (optlen != sizeof(*tfprog)) {
2104                     return -TARGET_EINVAL;
2105                 }
2106                 if (!lock_user_struct(VERIFY_READ, tfprog, optval_addr, 0)) {
2107                     return -TARGET_EFAULT;
2108                 }
2109                 if (!lock_user_struct(VERIFY_READ, tfilter,
2110                                       tswapal(tfprog->filter), 0)) {
2111                     unlock_user_struct(tfprog, optval_addr, 1);
2112                     return -TARGET_EFAULT;
2113                 }
2114 
2115                 fprog.len = tswap16(tfprog->len);
2116                 filter = g_try_new(struct sock_filter, fprog.len);
2117                 if (filter == NULL) {
2118                     unlock_user_struct(tfilter, tfprog->filter, 1);
2119                     unlock_user_struct(tfprog, optval_addr, 1);
2120                     return -TARGET_ENOMEM;
2121                 }
2122                 for (i = 0; i < fprog.len; i++) {
2123                     filter[i].code = tswap16(tfilter[i].code);
2124                     filter[i].jt = tfilter[i].jt;
2125                     filter[i].jf = tfilter[i].jf;
2126                     filter[i].k = tswap32(tfilter[i].k);
2127                 }
2128                 fprog.filter = filter;
2129 
2130                 ret = get_errno(setsockopt(sockfd, SOL_SOCKET,
2131                                 SO_ATTACH_FILTER, &fprog, sizeof(fprog)));
2132                 g_free(filter);
2133 
2134                 unlock_user_struct(tfilter, tfprog->filter, 1);
2135                 unlock_user_struct(tfprog, optval_addr, 1);
2136                 return ret;
2137         }
2138 	case TARGET_SO_BINDTODEVICE:
2139 	{
2140 		char *dev_ifname, *addr_ifname;
2141 
2142 		if (optlen > IFNAMSIZ - 1) {
2143 		    optlen = IFNAMSIZ - 1;
2144 		}
2145 		dev_ifname = lock_user(VERIFY_READ, optval_addr, optlen, 1);
2146 		if (!dev_ifname) {
2147 		    return -TARGET_EFAULT;
2148 		}
2149 		optname = SO_BINDTODEVICE;
2150 		addr_ifname = alloca(IFNAMSIZ);
2151 		memcpy(addr_ifname, dev_ifname, optlen);
2152 		addr_ifname[optlen] = 0;
2153 		ret = get_errno(setsockopt(sockfd, SOL_SOCKET, optname,
2154                                            addr_ifname, optlen));
2155 		unlock_user (dev_ifname, optval_addr, 0);
2156 		return ret;
2157 	}
2158         case TARGET_SO_LINGER:
2159         {
2160                 struct linger lg;
2161                 struct target_linger *tlg;
2162 
2163                 if (optlen != sizeof(struct target_linger)) {
2164                     return -TARGET_EINVAL;
2165                 }
2166                 if (!lock_user_struct(VERIFY_READ, tlg, optval_addr, 1)) {
2167                     return -TARGET_EFAULT;
2168                 }
2169                 __get_user(lg.l_onoff, &tlg->l_onoff);
2170                 __get_user(lg.l_linger, &tlg->l_linger);
2171                 ret = get_errno(setsockopt(sockfd, SOL_SOCKET, SO_LINGER,
2172                                 &lg, sizeof(lg)));
2173                 unlock_user_struct(tlg, optval_addr, 0);
2174                 return ret;
2175         }
2176             /* Options with 'int' argument.  */
2177         case TARGET_SO_DEBUG:
2178 		optname = SO_DEBUG;
2179 		break;
2180         case TARGET_SO_REUSEADDR:
2181 		optname = SO_REUSEADDR;
2182 		break;
2183 #ifdef SO_REUSEPORT
2184         case TARGET_SO_REUSEPORT:
2185                 optname = SO_REUSEPORT;
2186                 break;
2187 #endif
2188         case TARGET_SO_TYPE:
2189 		optname = SO_TYPE;
2190 		break;
2191         case TARGET_SO_ERROR:
2192 		optname = SO_ERROR;
2193 		break;
2194         case TARGET_SO_DONTROUTE:
2195 		optname = SO_DONTROUTE;
2196 		break;
2197         case TARGET_SO_BROADCAST:
2198 		optname = SO_BROADCAST;
2199 		break;
2200         case TARGET_SO_SNDBUF:
2201 		optname = SO_SNDBUF;
2202 		break;
2203         case TARGET_SO_SNDBUFFORCE:
2204                 optname = SO_SNDBUFFORCE;
2205                 break;
2206         case TARGET_SO_RCVBUF:
2207 		optname = SO_RCVBUF;
2208 		break;
2209         case TARGET_SO_RCVBUFFORCE:
2210                 optname = SO_RCVBUFFORCE;
2211                 break;
2212         case TARGET_SO_KEEPALIVE:
2213 		optname = SO_KEEPALIVE;
2214 		break;
2215         case TARGET_SO_OOBINLINE:
2216 		optname = SO_OOBINLINE;
2217 		break;
2218         case TARGET_SO_NO_CHECK:
2219 		optname = SO_NO_CHECK;
2220 		break;
2221         case TARGET_SO_PRIORITY:
2222 		optname = SO_PRIORITY;
2223 		break;
2224 #ifdef SO_BSDCOMPAT
2225         case TARGET_SO_BSDCOMPAT:
2226 		optname = SO_BSDCOMPAT;
2227 		break;
2228 #endif
2229         case TARGET_SO_PASSCRED:
2230 		optname = SO_PASSCRED;
2231 		break;
2232         case TARGET_SO_PASSSEC:
2233                 optname = SO_PASSSEC;
2234                 break;
2235         case TARGET_SO_TIMESTAMP:
2236 		optname = SO_TIMESTAMP;
2237 		break;
2238         case TARGET_SO_RCVLOWAT:
2239 		optname = SO_RCVLOWAT;
2240 		break;
2241         default:
2242             goto unimplemented;
2243         }
2244 	if (optlen < sizeof(uint32_t))
2245             return -TARGET_EINVAL;
2246 
2247 	if (get_user_u32(val, optval_addr))
2248             return -TARGET_EFAULT;
2249 	ret = get_errno(setsockopt(sockfd, SOL_SOCKET, optname, &val, sizeof(val)));
2250         break;
2251     default:
2252     unimplemented:
2253         gemu_log("Unsupported setsockopt level=%d optname=%d\n", level, optname);
2254         ret = -TARGET_ENOPROTOOPT;
2255     }
2256     return ret;
2257 }
2258 
2259 /* do_getsockopt() Must return target values and target errnos. */
2260 static abi_long do_getsockopt(int sockfd, int level, int optname,
2261                               abi_ulong optval_addr, abi_ulong optlen)
2262 {
2263     abi_long ret;
2264     int len, val;
2265     socklen_t lv;
2266 
2267     switch(level) {
2268     case TARGET_SOL_SOCKET:
2269         level = SOL_SOCKET;
2270         switch (optname) {
2271         /* These don't just return a single integer */
2272         case TARGET_SO_RCVTIMEO:
2273         case TARGET_SO_SNDTIMEO:
2274         case TARGET_SO_PEERNAME:
2275             goto unimplemented;
2276         case TARGET_SO_PEERCRED: {
2277             struct ucred cr;
2278             socklen_t crlen;
2279             struct target_ucred *tcr;
2280 
2281             if (get_user_u32(len, optlen)) {
2282                 return -TARGET_EFAULT;
2283             }
2284             if (len < 0) {
2285                 return -TARGET_EINVAL;
2286             }
2287 
2288             crlen = sizeof(cr);
2289             ret = get_errno(getsockopt(sockfd, level, SO_PEERCRED,
2290                                        &cr, &crlen));
2291             if (ret < 0) {
2292                 return ret;
2293             }
2294             if (len > crlen) {
2295                 len = crlen;
2296             }
2297             if (!lock_user_struct(VERIFY_WRITE, tcr, optval_addr, 0)) {
2298                 return -TARGET_EFAULT;
2299             }
2300             __put_user(cr.pid, &tcr->pid);
2301             __put_user(cr.uid, &tcr->uid);
2302             __put_user(cr.gid, &tcr->gid);
2303             unlock_user_struct(tcr, optval_addr, 1);
2304             if (put_user_u32(len, optlen)) {
2305                 return -TARGET_EFAULT;
2306             }
2307             break;
2308         }
2309         case TARGET_SO_LINGER:
2310         {
2311             struct linger lg;
2312             socklen_t lglen;
2313             struct target_linger *tlg;
2314 
2315             if (get_user_u32(len, optlen)) {
2316                 return -TARGET_EFAULT;
2317             }
2318             if (len < 0) {
2319                 return -TARGET_EINVAL;
2320             }
2321 
2322             lglen = sizeof(lg);
2323             ret = get_errno(getsockopt(sockfd, level, SO_LINGER,
2324                                        &lg, &lglen));
2325             if (ret < 0) {
2326                 return ret;
2327             }
2328             if (len > lglen) {
2329                 len = lglen;
2330             }
2331             if (!lock_user_struct(VERIFY_WRITE, tlg, optval_addr, 0)) {
2332                 return -TARGET_EFAULT;
2333             }
2334             __put_user(lg.l_onoff, &tlg->l_onoff);
2335             __put_user(lg.l_linger, &tlg->l_linger);
2336             unlock_user_struct(tlg, optval_addr, 1);
2337             if (put_user_u32(len, optlen)) {
2338                 return -TARGET_EFAULT;
2339             }
2340             break;
2341         }
2342         /* Options with 'int' argument.  */
2343         case TARGET_SO_DEBUG:
2344             optname = SO_DEBUG;
2345             goto int_case;
2346         case TARGET_SO_REUSEADDR:
2347             optname = SO_REUSEADDR;
2348             goto int_case;
2349 #ifdef SO_REUSEPORT
2350         case TARGET_SO_REUSEPORT:
2351             optname = SO_REUSEPORT;
2352             goto int_case;
2353 #endif
2354         case TARGET_SO_TYPE:
2355             optname = SO_TYPE;
2356             goto int_case;
2357         case TARGET_SO_ERROR:
2358             optname = SO_ERROR;
2359             goto int_case;
2360         case TARGET_SO_DONTROUTE:
2361             optname = SO_DONTROUTE;
2362             goto int_case;
2363         case TARGET_SO_BROADCAST:
2364             optname = SO_BROADCAST;
2365             goto int_case;
2366         case TARGET_SO_SNDBUF:
2367             optname = SO_SNDBUF;
2368             goto int_case;
2369         case TARGET_SO_RCVBUF:
2370             optname = SO_RCVBUF;
2371             goto int_case;
2372         case TARGET_SO_KEEPALIVE:
2373             optname = SO_KEEPALIVE;
2374             goto int_case;
2375         case TARGET_SO_OOBINLINE:
2376             optname = SO_OOBINLINE;
2377             goto int_case;
2378         case TARGET_SO_NO_CHECK:
2379             optname = SO_NO_CHECK;
2380             goto int_case;
2381         case TARGET_SO_PRIORITY:
2382             optname = SO_PRIORITY;
2383             goto int_case;
2384 #ifdef SO_BSDCOMPAT
2385         case TARGET_SO_BSDCOMPAT:
2386             optname = SO_BSDCOMPAT;
2387             goto int_case;
2388 #endif
2389         case TARGET_SO_PASSCRED:
2390             optname = SO_PASSCRED;
2391             goto int_case;
2392         case TARGET_SO_TIMESTAMP:
2393             optname = SO_TIMESTAMP;
2394             goto int_case;
2395         case TARGET_SO_RCVLOWAT:
2396             optname = SO_RCVLOWAT;
2397             goto int_case;
2398         case TARGET_SO_ACCEPTCONN:
2399             optname = SO_ACCEPTCONN;
2400             goto int_case;
2401         default:
2402             goto int_case;
2403         }
2404         break;
2405     case SOL_TCP:
2406         /* TCP options all take an 'int' value.  */
2407     int_case:
2408         if (get_user_u32(len, optlen))
2409             return -TARGET_EFAULT;
2410         if (len < 0)
2411             return -TARGET_EINVAL;
2412         lv = sizeof(lv);
2413         ret = get_errno(getsockopt(sockfd, level, optname, &val, &lv));
2414         if (ret < 0)
2415             return ret;
2416         if (optname == SO_TYPE) {
2417             val = host_to_target_sock_type(val);
2418         }
2419         if (len > lv)
2420             len = lv;
2421         if (len == 4) {
2422             if (put_user_u32(val, optval_addr))
2423                 return -TARGET_EFAULT;
2424         } else {
2425             if (put_user_u8(val, optval_addr))
2426                 return -TARGET_EFAULT;
2427         }
2428         if (put_user_u32(len, optlen))
2429             return -TARGET_EFAULT;
2430         break;
2431     case SOL_IP:
2432         switch(optname) {
2433         case IP_TOS:
2434         case IP_TTL:
2435         case IP_HDRINCL:
2436         case IP_ROUTER_ALERT:
2437         case IP_RECVOPTS:
2438         case IP_RETOPTS:
2439         case IP_PKTINFO:
2440         case IP_MTU_DISCOVER:
2441         case IP_RECVERR:
2442         case IP_RECVTOS:
2443 #ifdef IP_FREEBIND
2444         case IP_FREEBIND:
2445 #endif
2446         case IP_MULTICAST_TTL:
2447         case IP_MULTICAST_LOOP:
2448             if (get_user_u32(len, optlen))
2449                 return -TARGET_EFAULT;
2450             if (len < 0)
2451                 return -TARGET_EINVAL;
2452             lv = sizeof(lv);
2453             ret = get_errno(getsockopt(sockfd, level, optname, &val, &lv));
2454             if (ret < 0)
2455                 return ret;
2456             if (len < sizeof(int) && len > 0 && val >= 0 && val < 255) {
2457                 len = 1;
2458                 if (put_user_u32(len, optlen)
2459                     || put_user_u8(val, optval_addr))
2460                     return -TARGET_EFAULT;
2461             } else {
2462                 if (len > sizeof(int))
2463                     len = sizeof(int);
2464                 if (put_user_u32(len, optlen)
2465                     || put_user_u32(val, optval_addr))
2466                     return -TARGET_EFAULT;
2467             }
2468             break;
2469         default:
2470             ret = -TARGET_ENOPROTOOPT;
2471             break;
2472         }
2473         break;
2474     case SOL_IPV6:
2475         switch (optname) {
2476         case IPV6_MTU_DISCOVER:
2477         case IPV6_MTU:
2478         case IPV6_V6ONLY:
2479         case IPV6_RECVPKTINFO:
2480         case IPV6_UNICAST_HOPS:
2481         case IPV6_MULTICAST_HOPS:
2482         case IPV6_MULTICAST_LOOP:
2483         case IPV6_RECVERR:
2484         case IPV6_RECVHOPLIMIT:
2485         case IPV6_2292HOPLIMIT:
2486         case IPV6_CHECKSUM:
2487         case IPV6_ADDRFORM:
2488         case IPV6_2292PKTINFO:
2489         case IPV6_RECVTCLASS:
2490         case IPV6_RECVRTHDR:
2491         case IPV6_2292RTHDR:
2492         case IPV6_RECVHOPOPTS:
2493         case IPV6_2292HOPOPTS:
2494         case IPV6_RECVDSTOPTS:
2495         case IPV6_2292DSTOPTS:
2496         case IPV6_TCLASS:
2497 #ifdef IPV6_RECVPATHMTU
2498         case IPV6_RECVPATHMTU:
2499 #endif
2500 #ifdef IPV6_TRANSPARENT
2501         case IPV6_TRANSPARENT:
2502 #endif
2503 #ifdef IPV6_FREEBIND
2504         case IPV6_FREEBIND:
2505 #endif
2506 #ifdef IPV6_RECVORIGDSTADDR
2507         case IPV6_RECVORIGDSTADDR:
2508 #endif
2509             if (get_user_u32(len, optlen))
2510                 return -TARGET_EFAULT;
2511             if (len < 0)
2512                 return -TARGET_EINVAL;
2513             lv = sizeof(lv);
2514             ret = get_errno(getsockopt(sockfd, level, optname, &val, &lv));
2515             if (ret < 0)
2516                 return ret;
2517             if (len < sizeof(int) && len > 0 && val >= 0 && val < 255) {
2518                 len = 1;
2519                 if (put_user_u32(len, optlen)
2520                     || put_user_u8(val, optval_addr))
2521                     return -TARGET_EFAULT;
2522             } else {
2523                 if (len > sizeof(int))
2524                     len = sizeof(int);
2525                 if (put_user_u32(len, optlen)
2526                     || put_user_u32(val, optval_addr))
2527                     return -TARGET_EFAULT;
2528             }
2529             break;
2530         default:
2531             ret = -TARGET_ENOPROTOOPT;
2532             break;
2533         }
2534         break;
2535     default:
2536     unimplemented:
2537         gemu_log("getsockopt level=%d optname=%d not yet supported\n",
2538                  level, optname);
2539         ret = -TARGET_EOPNOTSUPP;
2540         break;
2541     }
2542     return ret;
2543 }
2544 
2545 /* Convert target low/high pair representing file offset into the host
2546  * low/high pair. This function doesn't handle offsets bigger than 64 bits
2547  * as the kernel doesn't handle them either.
2548  */
2549 static void target_to_host_low_high(abi_ulong tlow,
2550                                     abi_ulong thigh,
2551                                     unsigned long *hlow,
2552                                     unsigned long *hhigh)
2553 {
2554     uint64_t off = tlow |
2555         ((unsigned long long)thigh << TARGET_LONG_BITS / 2) <<
2556         TARGET_LONG_BITS / 2;
2557 
2558     *hlow = off;
2559     *hhigh = (off >> HOST_LONG_BITS / 2) >> HOST_LONG_BITS / 2;
2560 }
2561 
2562 static struct iovec *lock_iovec(int type, abi_ulong target_addr,
2563                                 abi_ulong count, int copy)
2564 {
2565     struct target_iovec *target_vec;
2566     struct iovec *vec;
2567     abi_ulong total_len, max_len;
2568     int i;
2569     int err = 0;
2570     bool bad_address = false;
2571 
2572     if (count == 0) {
2573         errno = 0;
2574         return NULL;
2575     }
2576     if (count > IOV_MAX) {
2577         errno = EINVAL;
2578         return NULL;
2579     }
2580 
2581     vec = g_try_new0(struct iovec, count);
2582     if (vec == NULL) {
2583         errno = ENOMEM;
2584         return NULL;
2585     }
2586 
2587     target_vec = lock_user(VERIFY_READ, target_addr,
2588                            count * sizeof(struct target_iovec), 1);
2589     if (target_vec == NULL) {
2590         err = EFAULT;
2591         goto fail2;
2592     }
2593 
2594     /* ??? If host page size > target page size, this will result in a
2595        value larger than what we can actually support.  */
2596     max_len = 0x7fffffff & TARGET_PAGE_MASK;
2597     total_len = 0;
2598 
2599     for (i = 0; i < count; i++) {
2600         abi_ulong base = tswapal(target_vec[i].iov_base);
2601         abi_long len = tswapal(target_vec[i].iov_len);
2602 
2603         if (len < 0) {
2604             err = EINVAL;
2605             goto fail;
2606         } else if (len == 0) {
2607             /* Zero length pointer is ignored.  */
2608             vec[i].iov_base = 0;
2609         } else {
2610             vec[i].iov_base = lock_user(type, base, len, copy);
2611             /* If the first buffer pointer is bad, this is a fault.  But
2612              * subsequent bad buffers will result in a partial write; this
2613              * is realized by filling the vector with null pointers and
2614              * zero lengths. */
2615             if (!vec[i].iov_base) {
2616                 if (i == 0) {
2617                     err = EFAULT;
2618                     goto fail;
2619                 } else {
2620                     bad_address = true;
2621                 }
2622             }
2623             if (bad_address) {
2624                 len = 0;
2625             }
2626             if (len > max_len - total_len) {
2627                 len = max_len - total_len;
2628             }
2629         }
2630         vec[i].iov_len = len;
2631         total_len += len;
2632     }
2633 
2634     unlock_user(target_vec, target_addr, 0);
2635     return vec;
2636 
2637  fail:
2638     while (--i >= 0) {
2639         if (tswapal(target_vec[i].iov_len) > 0) {
2640             unlock_user(vec[i].iov_base, tswapal(target_vec[i].iov_base), 0);
2641         }
2642     }
2643     unlock_user(target_vec, target_addr, 0);
2644  fail2:
2645     g_free(vec);
2646     errno = err;
2647     return NULL;
2648 }
2649 
2650 static void unlock_iovec(struct iovec *vec, abi_ulong target_addr,
2651                          abi_ulong count, int copy)
2652 {
2653     struct target_iovec *target_vec;
2654     int i;
2655 
2656     target_vec = lock_user(VERIFY_READ, target_addr,
2657                            count * sizeof(struct target_iovec), 1);
2658     if (target_vec) {
2659         for (i = 0; i < count; i++) {
2660             abi_ulong base = tswapal(target_vec[i].iov_base);
2661             abi_long len = tswapal(target_vec[i].iov_len);
2662             if (len < 0) {
2663                 break;
2664             }
2665             unlock_user(vec[i].iov_base, base, copy ? vec[i].iov_len : 0);
2666         }
2667         unlock_user(target_vec, target_addr, 0);
2668     }
2669 
2670     g_free(vec);
2671 }
2672 
2673 static inline int target_to_host_sock_type(int *type)
2674 {
2675     int host_type = 0;
2676     int target_type = *type;
2677 
2678     switch (target_type & TARGET_SOCK_TYPE_MASK) {
2679     case TARGET_SOCK_DGRAM:
2680         host_type = SOCK_DGRAM;
2681         break;
2682     case TARGET_SOCK_STREAM:
2683         host_type = SOCK_STREAM;
2684         break;
2685     default:
2686         host_type = target_type & TARGET_SOCK_TYPE_MASK;
2687         break;
2688     }
2689     if (target_type & TARGET_SOCK_CLOEXEC) {
2690 #if defined(SOCK_CLOEXEC)
2691         host_type |= SOCK_CLOEXEC;
2692 #else
2693         return -TARGET_EINVAL;
2694 #endif
2695     }
2696     if (target_type & TARGET_SOCK_NONBLOCK) {
2697 #if defined(SOCK_NONBLOCK)
2698         host_type |= SOCK_NONBLOCK;
2699 #elif !defined(O_NONBLOCK)
2700         return -TARGET_EINVAL;
2701 #endif
2702     }
2703     *type = host_type;
2704     return 0;
2705 }
2706 
2707 /* Try to emulate socket type flags after socket creation.  */
2708 static int sock_flags_fixup(int fd, int target_type)
2709 {
2710 #if !defined(SOCK_NONBLOCK) && defined(O_NONBLOCK)
2711     if (target_type & TARGET_SOCK_NONBLOCK) {
2712         int flags = fcntl(fd, F_GETFL);
2713         if (fcntl(fd, F_SETFL, O_NONBLOCK | flags) == -1) {
2714             close(fd);
2715             return -TARGET_EINVAL;
2716         }
2717     }
2718 #endif
2719     return fd;
2720 }
2721 
2722 /* do_socket() Must return target values and target errnos. */
2723 static abi_long do_socket(int domain, int type, int protocol)
2724 {
2725     int target_type = type;
2726     int ret;
2727 
2728     ret = target_to_host_sock_type(&type);
2729     if (ret) {
2730         return ret;
2731     }
2732 
2733     if (domain == PF_NETLINK && !(
2734 #ifdef CONFIG_RTNETLINK
2735          protocol == NETLINK_ROUTE ||
2736 #endif
2737          protocol == NETLINK_KOBJECT_UEVENT ||
2738          protocol == NETLINK_AUDIT)) {
2739         return -EPFNOSUPPORT;
2740     }
2741 
2742     if (domain == AF_PACKET ||
2743         (domain == AF_INET && type == SOCK_PACKET)) {
2744         protocol = tswap16(protocol);
2745     }
2746 
2747     ret = get_errno(socket(domain, type, protocol));
2748     if (ret >= 0) {
2749         ret = sock_flags_fixup(ret, target_type);
2750         if (type == SOCK_PACKET) {
2751             /* Manage an obsolete case :
2752              * if socket type is SOCK_PACKET, bind by name
2753              */
2754             fd_trans_register(ret, &target_packet_trans);
2755         } else if (domain == PF_NETLINK) {
2756             switch (protocol) {
2757 #ifdef CONFIG_RTNETLINK
2758             case NETLINK_ROUTE:
2759                 fd_trans_register(ret, &target_netlink_route_trans);
2760                 break;
2761 #endif
2762             case NETLINK_KOBJECT_UEVENT:
2763                 /* nothing to do: messages are strings */
2764                 break;
2765             case NETLINK_AUDIT:
2766                 fd_trans_register(ret, &target_netlink_audit_trans);
2767                 break;
2768             default:
2769                 g_assert_not_reached();
2770             }
2771         }
2772     }
2773     return ret;
2774 }
2775 
2776 /* do_bind() Must return target values and target errnos. */
2777 static abi_long do_bind(int sockfd, abi_ulong target_addr,
2778                         socklen_t addrlen)
2779 {
2780     void *addr;
2781     abi_long ret;
2782 
2783     if ((int)addrlen < 0) {
2784         return -TARGET_EINVAL;
2785     }
2786 
2787     addr = alloca(addrlen+1);
2788 
2789     ret = target_to_host_sockaddr(sockfd, addr, target_addr, addrlen);
2790     if (ret)
2791         return ret;
2792 
2793     return get_errno(bind(sockfd, addr, addrlen));
2794 }
2795 
2796 /* do_connect() Must return target values and target errnos. */
2797 static abi_long do_connect(int sockfd, abi_ulong target_addr,
2798                            socklen_t addrlen)
2799 {
2800     void *addr;
2801     abi_long ret;
2802 
2803     if ((int)addrlen < 0) {
2804         return -TARGET_EINVAL;
2805     }
2806 
2807     addr = alloca(addrlen+1);
2808 
2809     ret = target_to_host_sockaddr(sockfd, addr, target_addr, addrlen);
2810     if (ret)
2811         return ret;
2812 
2813     return get_errno(safe_connect(sockfd, addr, addrlen));
2814 }
2815 
2816 /* do_sendrecvmsg_locked() Must return target values and target errnos. */
2817 static abi_long do_sendrecvmsg_locked(int fd, struct target_msghdr *msgp,
2818                                       int flags, int send)
2819 {
2820     abi_long ret, len;
2821     struct msghdr msg;
2822     abi_ulong count;
2823     struct iovec *vec;
2824     abi_ulong target_vec;
2825 
2826     if (msgp->msg_name) {
2827         msg.msg_namelen = tswap32(msgp->msg_namelen);
2828         msg.msg_name = alloca(msg.msg_namelen+1);
2829         ret = target_to_host_sockaddr(fd, msg.msg_name,
2830                                       tswapal(msgp->msg_name),
2831                                       msg.msg_namelen);
2832         if (ret == -TARGET_EFAULT) {
2833             /* For connected sockets msg_name and msg_namelen must
2834              * be ignored, so returning EFAULT immediately is wrong.
2835              * Instead, pass a bad msg_name to the host kernel, and
2836              * let it decide whether to return EFAULT or not.
2837              */
2838             msg.msg_name = (void *)-1;
2839         } else if (ret) {
2840             goto out2;
2841         }
2842     } else {
2843         msg.msg_name = NULL;
2844         msg.msg_namelen = 0;
2845     }
2846     msg.msg_controllen = 2 * tswapal(msgp->msg_controllen);
2847     msg.msg_control = alloca(msg.msg_controllen);
2848     memset(msg.msg_control, 0, msg.msg_controllen);
2849 
2850     msg.msg_flags = tswap32(msgp->msg_flags);
2851 
2852     count = tswapal(msgp->msg_iovlen);
2853     target_vec = tswapal(msgp->msg_iov);
2854 
2855     if (count > IOV_MAX) {
2856         /* sendrcvmsg returns a different errno for this condition than
2857          * readv/writev, so we must catch it here before lock_iovec() does.
2858          */
2859         ret = -TARGET_EMSGSIZE;
2860         goto out2;
2861     }
2862 
2863     vec = lock_iovec(send ? VERIFY_READ : VERIFY_WRITE,
2864                      target_vec, count, send);
2865     if (vec == NULL) {
2866         ret = -host_to_target_errno(errno);
2867         goto out2;
2868     }
2869     msg.msg_iovlen = count;
2870     msg.msg_iov = vec;
2871 
2872     if (send) {
2873         if (fd_trans_target_to_host_data(fd)) {
2874             void *host_msg;
2875 
2876             host_msg = g_malloc(msg.msg_iov->iov_len);
2877             memcpy(host_msg, msg.msg_iov->iov_base, msg.msg_iov->iov_len);
2878             ret = fd_trans_target_to_host_data(fd)(host_msg,
2879                                                    msg.msg_iov->iov_len);
2880             if (ret >= 0) {
2881                 msg.msg_iov->iov_base = host_msg;
2882                 ret = get_errno(safe_sendmsg(fd, &msg, flags));
2883             }
2884             g_free(host_msg);
2885         } else {
2886             ret = target_to_host_cmsg(&msg, msgp);
2887             if (ret == 0) {
2888                 ret = get_errno(safe_sendmsg(fd, &msg, flags));
2889             }
2890         }
2891     } else {
2892         ret = get_errno(safe_recvmsg(fd, &msg, flags));
2893         if (!is_error(ret)) {
2894             len = ret;
2895             if (fd_trans_host_to_target_data(fd)) {
2896                 ret = fd_trans_host_to_target_data(fd)(msg.msg_iov->iov_base,
2897                                                MIN(msg.msg_iov->iov_len, len));
2898             } else {
2899                 ret = host_to_target_cmsg(msgp, &msg);
2900             }
2901             if (!is_error(ret)) {
2902                 msgp->msg_namelen = tswap32(msg.msg_namelen);
2903                 msgp->msg_flags = tswap32(msg.msg_flags);
2904                 if (msg.msg_name != NULL && msg.msg_name != (void *)-1) {
2905                     ret = host_to_target_sockaddr(tswapal(msgp->msg_name),
2906                                     msg.msg_name, msg.msg_namelen);
2907                     if (ret) {
2908                         goto out;
2909                     }
2910                 }
2911 
2912                 ret = len;
2913             }
2914         }
2915     }
2916 
2917 out:
2918     unlock_iovec(vec, target_vec, count, !send);
2919 out2:
2920     return ret;
2921 }
2922 
2923 static abi_long do_sendrecvmsg(int fd, abi_ulong target_msg,
2924                                int flags, int send)
2925 {
2926     abi_long ret;
2927     struct target_msghdr *msgp;
2928 
2929     if (!lock_user_struct(send ? VERIFY_READ : VERIFY_WRITE,
2930                           msgp,
2931                           target_msg,
2932                           send ? 1 : 0)) {
2933         return -TARGET_EFAULT;
2934     }
2935     ret = do_sendrecvmsg_locked(fd, msgp, flags, send);
2936     unlock_user_struct(msgp, target_msg, send ? 0 : 1);
2937     return ret;
2938 }
2939 
2940 /* We don't rely on the C library to have sendmmsg/recvmmsg support,
2941  * so it might not have this *mmsg-specific flag either.
2942  */
2943 #ifndef MSG_WAITFORONE
2944 #define MSG_WAITFORONE 0x10000
2945 #endif
2946 
2947 static abi_long do_sendrecvmmsg(int fd, abi_ulong target_msgvec,
2948                                 unsigned int vlen, unsigned int flags,
2949                                 int send)
2950 {
2951     struct target_mmsghdr *mmsgp;
2952     abi_long ret = 0;
2953     int i;
2954 
2955     if (vlen > UIO_MAXIOV) {
2956         vlen = UIO_MAXIOV;
2957     }
2958 
2959     mmsgp = lock_user(VERIFY_WRITE, target_msgvec, sizeof(*mmsgp) * vlen, 1);
2960     if (!mmsgp) {
2961         return -TARGET_EFAULT;
2962     }
2963 
2964     for (i = 0; i < vlen; i++) {
2965         ret = do_sendrecvmsg_locked(fd, &mmsgp[i].msg_hdr, flags, send);
2966         if (is_error(ret)) {
2967             break;
2968         }
2969         mmsgp[i].msg_len = tswap32(ret);
2970         /* MSG_WAITFORONE turns on MSG_DONTWAIT after one packet */
2971         if (flags & MSG_WAITFORONE) {
2972             flags |= MSG_DONTWAIT;
2973         }
2974     }
2975 
2976     unlock_user(mmsgp, target_msgvec, sizeof(*mmsgp) * i);
2977 
2978     /* Return number of datagrams sent if we sent any at all;
2979      * otherwise return the error.
2980      */
2981     if (i) {
2982         return i;
2983     }
2984     return ret;
2985 }
2986 
2987 /* do_accept4() Must return target values and target errnos. */
2988 static abi_long do_accept4(int fd, abi_ulong target_addr,
2989                            abi_ulong target_addrlen_addr, int flags)
2990 {
2991     socklen_t addrlen, ret_addrlen;
2992     void *addr;
2993     abi_long ret;
2994     int host_flags;
2995 
2996     host_flags = target_to_host_bitmask(flags, fcntl_flags_tbl);
2997 
2998     if (target_addr == 0) {
2999         return get_errno(safe_accept4(fd, NULL, NULL, host_flags));
3000     }
3001 
3002     /* linux returns EINVAL if addrlen pointer is invalid */
3003     if (get_user_u32(addrlen, target_addrlen_addr))
3004         return -TARGET_EINVAL;
3005 
3006     if ((int)addrlen < 0) {
3007         return -TARGET_EINVAL;
3008     }
3009 
3010     if (!access_ok(VERIFY_WRITE, target_addr, addrlen))
3011         return -TARGET_EINVAL;
3012 
3013     addr = alloca(addrlen);
3014 
3015     ret_addrlen = addrlen;
3016     ret = get_errno(safe_accept4(fd, addr, &ret_addrlen, host_flags));
3017     if (!is_error(ret)) {
3018         host_to_target_sockaddr(target_addr, addr, MIN(addrlen, ret_addrlen));
3019         if (put_user_u32(ret_addrlen, target_addrlen_addr)) {
3020             ret = -TARGET_EFAULT;
3021         }
3022     }
3023     return ret;
3024 }
3025 
3026 /* do_getpeername() Must return target values and target errnos. */
3027 static abi_long do_getpeername(int fd, abi_ulong target_addr,
3028                                abi_ulong target_addrlen_addr)
3029 {
3030     socklen_t addrlen, ret_addrlen;
3031     void *addr;
3032     abi_long ret;
3033 
3034     if (get_user_u32(addrlen, target_addrlen_addr))
3035         return -TARGET_EFAULT;
3036 
3037     if ((int)addrlen < 0) {
3038         return -TARGET_EINVAL;
3039     }
3040 
3041     if (!access_ok(VERIFY_WRITE, target_addr, addrlen))
3042         return -TARGET_EFAULT;
3043 
3044     addr = alloca(addrlen);
3045 
3046     ret_addrlen = addrlen;
3047     ret = get_errno(getpeername(fd, addr, &ret_addrlen));
3048     if (!is_error(ret)) {
3049         host_to_target_sockaddr(target_addr, addr, MIN(addrlen, ret_addrlen));
3050         if (put_user_u32(ret_addrlen, target_addrlen_addr)) {
3051             ret = -TARGET_EFAULT;
3052         }
3053     }
3054     return ret;
3055 }
3056 
3057 /* do_getsockname() Must return target values and target errnos. */
3058 static abi_long do_getsockname(int fd, abi_ulong target_addr,
3059                                abi_ulong target_addrlen_addr)
3060 {
3061     socklen_t addrlen, ret_addrlen;
3062     void *addr;
3063     abi_long ret;
3064 
3065     if (get_user_u32(addrlen, target_addrlen_addr))
3066         return -TARGET_EFAULT;
3067 
3068     if ((int)addrlen < 0) {
3069         return -TARGET_EINVAL;
3070     }
3071 
3072     if (!access_ok(VERIFY_WRITE, target_addr, addrlen))
3073         return -TARGET_EFAULT;
3074 
3075     addr = alloca(addrlen);
3076 
3077     ret_addrlen = addrlen;
3078     ret = get_errno(getsockname(fd, addr, &ret_addrlen));
3079     if (!is_error(ret)) {
3080         host_to_target_sockaddr(target_addr, addr, MIN(addrlen, ret_addrlen));
3081         if (put_user_u32(ret_addrlen, target_addrlen_addr)) {
3082             ret = -TARGET_EFAULT;
3083         }
3084     }
3085     return ret;
3086 }
3087 
3088 /* do_socketpair() Must return target values and target errnos. */
3089 static abi_long do_socketpair(int domain, int type, int protocol,
3090                               abi_ulong target_tab_addr)
3091 {
3092     int tab[2];
3093     abi_long ret;
3094 
3095     target_to_host_sock_type(&type);
3096 
3097     ret = get_errno(socketpair(domain, type, protocol, tab));
3098     if (!is_error(ret)) {
3099         if (put_user_s32(tab[0], target_tab_addr)
3100             || put_user_s32(tab[1], target_tab_addr + sizeof(tab[0])))
3101             ret = -TARGET_EFAULT;
3102     }
3103     return ret;
3104 }
3105 
3106 /* do_sendto() Must return target values and target errnos. */
3107 static abi_long do_sendto(int fd, abi_ulong msg, size_t len, int flags,
3108                           abi_ulong target_addr, socklen_t addrlen)
3109 {
3110     void *addr;
3111     void *host_msg;
3112     void *copy_msg = NULL;
3113     abi_long ret;
3114 
3115     if ((int)addrlen < 0) {
3116         return -TARGET_EINVAL;
3117     }
3118 
3119     host_msg = lock_user(VERIFY_READ, msg, len, 1);
3120     if (!host_msg)
3121         return -TARGET_EFAULT;
3122     if (fd_trans_target_to_host_data(fd)) {
3123         copy_msg = host_msg;
3124         host_msg = g_malloc(len);
3125         memcpy(host_msg, copy_msg, len);
3126         ret = fd_trans_target_to_host_data(fd)(host_msg, len);
3127         if (ret < 0) {
3128             goto fail;
3129         }
3130     }
3131     if (target_addr) {
3132         addr = alloca(addrlen+1);
3133         ret = target_to_host_sockaddr(fd, addr, target_addr, addrlen);
3134         if (ret) {
3135             goto fail;
3136         }
3137         ret = get_errno(safe_sendto(fd, host_msg, len, flags, addr, addrlen));
3138     } else {
3139         ret = get_errno(safe_sendto(fd, host_msg, len, flags, NULL, 0));
3140     }
3141 fail:
3142     if (copy_msg) {
3143         g_free(host_msg);
3144         host_msg = copy_msg;
3145     }
3146     unlock_user(host_msg, msg, 0);
3147     return ret;
3148 }
3149 
3150 /* do_recvfrom() Must return target values and target errnos. */
3151 static abi_long do_recvfrom(int fd, abi_ulong msg, size_t len, int flags,
3152                             abi_ulong target_addr,
3153                             abi_ulong target_addrlen)
3154 {
3155     socklen_t addrlen, ret_addrlen;
3156     void *addr;
3157     void *host_msg;
3158     abi_long ret;
3159 
3160     host_msg = lock_user(VERIFY_WRITE, msg, len, 0);
3161     if (!host_msg)
3162         return -TARGET_EFAULT;
3163     if (target_addr) {
3164         if (get_user_u32(addrlen, target_addrlen)) {
3165             ret = -TARGET_EFAULT;
3166             goto fail;
3167         }
3168         if ((int)addrlen < 0) {
3169             ret = -TARGET_EINVAL;
3170             goto fail;
3171         }
3172         addr = alloca(addrlen);
3173         ret_addrlen = addrlen;
3174         ret = get_errno(safe_recvfrom(fd, host_msg, len, flags,
3175                                       addr, &ret_addrlen));
3176     } else {
3177         addr = NULL; /* To keep compiler quiet.  */
3178         addrlen = 0; /* To keep compiler quiet.  */
3179         ret = get_errno(safe_recvfrom(fd, host_msg, len, flags, NULL, 0));
3180     }
3181     if (!is_error(ret)) {
3182         if (fd_trans_host_to_target_data(fd)) {
3183             abi_long trans;
3184             trans = fd_trans_host_to_target_data(fd)(host_msg, MIN(ret, len));
3185             if (is_error(trans)) {
3186                 ret = trans;
3187                 goto fail;
3188             }
3189         }
3190         if (target_addr) {
3191             host_to_target_sockaddr(target_addr, addr,
3192                                     MIN(addrlen, ret_addrlen));
3193             if (put_user_u32(ret_addrlen, target_addrlen)) {
3194                 ret = -TARGET_EFAULT;
3195                 goto fail;
3196             }
3197         }
3198         unlock_user(host_msg, msg, len);
3199     } else {
3200 fail:
3201         unlock_user(host_msg, msg, 0);
3202     }
3203     return ret;
3204 }
3205 
3206 #ifdef TARGET_NR_socketcall
3207 /* do_socketcall() must return target values and target errnos. */
3208 static abi_long do_socketcall(int num, abi_ulong vptr)
3209 {
3210     static const unsigned nargs[] = { /* number of arguments per operation */
3211         [TARGET_SYS_SOCKET] = 3,      /* domain, type, protocol */
3212         [TARGET_SYS_BIND] = 3,        /* fd, addr, addrlen */
3213         [TARGET_SYS_CONNECT] = 3,     /* fd, addr, addrlen */
3214         [TARGET_SYS_LISTEN] = 2,      /* fd, backlog */
3215         [TARGET_SYS_ACCEPT] = 3,      /* fd, addr, addrlen */
3216         [TARGET_SYS_GETSOCKNAME] = 3, /* fd, addr, addrlen */
3217         [TARGET_SYS_GETPEERNAME] = 3, /* fd, addr, addrlen */
3218         [TARGET_SYS_SOCKETPAIR] = 4,  /* domain, type, protocol, tab */
3219         [TARGET_SYS_SEND] = 4,        /* fd, msg, len, flags */
3220         [TARGET_SYS_RECV] = 4,        /* fd, msg, len, flags */
3221         [TARGET_SYS_SENDTO] = 6,      /* fd, msg, len, flags, addr, addrlen */
3222         [TARGET_SYS_RECVFROM] = 6,    /* fd, msg, len, flags, addr, addrlen */
3223         [TARGET_SYS_SHUTDOWN] = 2,    /* fd, how */
3224         [TARGET_SYS_SETSOCKOPT] = 5,  /* fd, level, optname, optval, optlen */
3225         [TARGET_SYS_GETSOCKOPT] = 5,  /* fd, level, optname, optval, optlen */
3226         [TARGET_SYS_SENDMSG] = 3,     /* fd, msg, flags */
3227         [TARGET_SYS_RECVMSG] = 3,     /* fd, msg, flags */
3228         [TARGET_SYS_ACCEPT4] = 4,     /* fd, addr, addrlen, flags */
3229         [TARGET_SYS_RECVMMSG] = 4,    /* fd, msgvec, vlen, flags */
3230         [TARGET_SYS_SENDMMSG] = 4,    /* fd, msgvec, vlen, flags */
3231     };
3232     abi_long a[6]; /* max 6 args */
3233     unsigned i;
3234 
3235     /* check the range of the first argument num */
3236     /* (TARGET_SYS_SENDMMSG is the highest among TARGET_SYS_xxx) */
3237     if (num < 1 || num > TARGET_SYS_SENDMMSG) {
3238         return -TARGET_EINVAL;
3239     }
3240     /* ensure we have space for args */
3241     if (nargs[num] > ARRAY_SIZE(a)) {
3242         return -TARGET_EINVAL;
3243     }
3244     /* collect the arguments in a[] according to nargs[] */
3245     for (i = 0; i < nargs[num]; ++i) {
3246         if (get_user_ual(a[i], vptr + i * sizeof(abi_long)) != 0) {
3247             return -TARGET_EFAULT;
3248         }
3249     }
3250     /* now when we have the args, invoke the appropriate underlying function */
3251     switch (num) {
3252     case TARGET_SYS_SOCKET: /* domain, type, protocol */
3253         return do_socket(a[0], a[1], a[2]);
3254     case TARGET_SYS_BIND: /* sockfd, addr, addrlen */
3255         return do_bind(a[0], a[1], a[2]);
3256     case TARGET_SYS_CONNECT: /* sockfd, addr, addrlen */
3257         return do_connect(a[0], a[1], a[2]);
3258     case TARGET_SYS_LISTEN: /* sockfd, backlog */
3259         return get_errno(listen(a[0], a[1]));
3260     case TARGET_SYS_ACCEPT: /* sockfd, addr, addrlen */
3261         return do_accept4(a[0], a[1], a[2], 0);
3262     case TARGET_SYS_GETSOCKNAME: /* sockfd, addr, addrlen */
3263         return do_getsockname(a[0], a[1], a[2]);
3264     case TARGET_SYS_GETPEERNAME: /* sockfd, addr, addrlen */
3265         return do_getpeername(a[0], a[1], a[2]);
3266     case TARGET_SYS_SOCKETPAIR: /* domain, type, protocol, tab */
3267         return do_socketpair(a[0], a[1], a[2], a[3]);
3268     case TARGET_SYS_SEND: /* sockfd, msg, len, flags */
3269         return do_sendto(a[0], a[1], a[2], a[3], 0, 0);
3270     case TARGET_SYS_RECV: /* sockfd, msg, len, flags */
3271         return do_recvfrom(a[0], a[1], a[2], a[3], 0, 0);
3272     case TARGET_SYS_SENDTO: /* sockfd, msg, len, flags, addr, addrlen */
3273         return do_sendto(a[0], a[1], a[2], a[3], a[4], a[5]);
3274     case TARGET_SYS_RECVFROM: /* sockfd, msg, len, flags, addr, addrlen */
3275         return do_recvfrom(a[0], a[1], a[2], a[3], a[4], a[5]);
3276     case TARGET_SYS_SHUTDOWN: /* sockfd, how */
3277         return get_errno(shutdown(a[0], a[1]));
3278     case TARGET_SYS_SETSOCKOPT: /* sockfd, level, optname, optval, optlen */
3279         return do_setsockopt(a[0], a[1], a[2], a[3], a[4]);
3280     case TARGET_SYS_GETSOCKOPT: /* sockfd, level, optname, optval, optlen */
3281         return do_getsockopt(a[0], a[1], a[2], a[3], a[4]);
3282     case TARGET_SYS_SENDMSG: /* sockfd, msg, flags */
3283         return do_sendrecvmsg(a[0], a[1], a[2], 1);
3284     case TARGET_SYS_RECVMSG: /* sockfd, msg, flags */
3285         return do_sendrecvmsg(a[0], a[1], a[2], 0);
3286     case TARGET_SYS_ACCEPT4: /* sockfd, addr, addrlen, flags */
3287         return do_accept4(a[0], a[1], a[2], a[3]);
3288     case TARGET_SYS_RECVMMSG: /* sockfd, msgvec, vlen, flags */
3289         return do_sendrecvmmsg(a[0], a[1], a[2], a[3], 0);
3290     case TARGET_SYS_SENDMMSG: /* sockfd, msgvec, vlen, flags */
3291         return do_sendrecvmmsg(a[0], a[1], a[2], a[3], 1);
3292     default:
3293         gemu_log("Unsupported socketcall: %d\n", num);
3294         return -TARGET_EINVAL;
3295     }
3296 }
3297 #endif
3298 
3299 #define N_SHM_REGIONS	32
3300 
3301 static struct shm_region {
3302     abi_ulong start;
3303     abi_ulong size;
3304     bool in_use;
3305 } shm_regions[N_SHM_REGIONS];
3306 
3307 #ifndef TARGET_SEMID64_DS
3308 /* asm-generic version of this struct */
3309 struct target_semid64_ds
3310 {
3311   struct target_ipc_perm sem_perm;
3312   abi_ulong sem_otime;
3313 #if TARGET_ABI_BITS == 32
3314   abi_ulong __unused1;
3315 #endif
3316   abi_ulong sem_ctime;
3317 #if TARGET_ABI_BITS == 32
3318   abi_ulong __unused2;
3319 #endif
3320   abi_ulong sem_nsems;
3321   abi_ulong __unused3;
3322   abi_ulong __unused4;
3323 };
3324 #endif
3325 
3326 static inline abi_long target_to_host_ipc_perm(struct ipc_perm *host_ip,
3327                                                abi_ulong target_addr)
3328 {
3329     struct target_ipc_perm *target_ip;
3330     struct target_semid64_ds *target_sd;
3331 
3332     if (!lock_user_struct(VERIFY_READ, target_sd, target_addr, 1))
3333         return -TARGET_EFAULT;
3334     target_ip = &(target_sd->sem_perm);
3335     host_ip->__key = tswap32(target_ip->__key);
3336     host_ip->uid = tswap32(target_ip->uid);
3337     host_ip->gid = tswap32(target_ip->gid);
3338     host_ip->cuid = tswap32(target_ip->cuid);
3339     host_ip->cgid = tswap32(target_ip->cgid);
3340 #if defined(TARGET_ALPHA) || defined(TARGET_MIPS) || defined(TARGET_PPC)
3341     host_ip->mode = tswap32(target_ip->mode);
3342 #else
3343     host_ip->mode = tswap16(target_ip->mode);
3344 #endif
3345 #if defined(TARGET_PPC)
3346     host_ip->__seq = tswap32(target_ip->__seq);
3347 #else
3348     host_ip->__seq = tswap16(target_ip->__seq);
3349 #endif
3350     unlock_user_struct(target_sd, target_addr, 0);
3351     return 0;
3352 }
3353 
3354 static inline abi_long host_to_target_ipc_perm(abi_ulong target_addr,
3355                                                struct ipc_perm *host_ip)
3356 {
3357     struct target_ipc_perm *target_ip;
3358     struct target_semid64_ds *target_sd;
3359 
3360     if (!lock_user_struct(VERIFY_WRITE, target_sd, target_addr, 0))
3361         return -TARGET_EFAULT;
3362     target_ip = &(target_sd->sem_perm);
3363     target_ip->__key = tswap32(host_ip->__key);
3364     target_ip->uid = tswap32(host_ip->uid);
3365     target_ip->gid = tswap32(host_ip->gid);
3366     target_ip->cuid = tswap32(host_ip->cuid);
3367     target_ip->cgid = tswap32(host_ip->cgid);
3368 #if defined(TARGET_ALPHA) || defined(TARGET_MIPS) || defined(TARGET_PPC)
3369     target_ip->mode = tswap32(host_ip->mode);
3370 #else
3371     target_ip->mode = tswap16(host_ip->mode);
3372 #endif
3373 #if defined(TARGET_PPC)
3374     target_ip->__seq = tswap32(host_ip->__seq);
3375 #else
3376     target_ip->__seq = tswap16(host_ip->__seq);
3377 #endif
3378     unlock_user_struct(target_sd, target_addr, 1);
3379     return 0;
3380 }
3381 
3382 static inline abi_long target_to_host_semid_ds(struct semid_ds *host_sd,
3383                                                abi_ulong target_addr)
3384 {
3385     struct target_semid64_ds *target_sd;
3386 
3387     if (!lock_user_struct(VERIFY_READ, target_sd, target_addr, 1))
3388         return -TARGET_EFAULT;
3389     if (target_to_host_ipc_perm(&(host_sd->sem_perm),target_addr))
3390         return -TARGET_EFAULT;
3391     host_sd->sem_nsems = tswapal(target_sd->sem_nsems);
3392     host_sd->sem_otime = tswapal(target_sd->sem_otime);
3393     host_sd->sem_ctime = tswapal(target_sd->sem_ctime);
3394     unlock_user_struct(target_sd, target_addr, 0);
3395     return 0;
3396 }
3397 
3398 static inline abi_long host_to_target_semid_ds(abi_ulong target_addr,
3399                                                struct semid_ds *host_sd)
3400 {
3401     struct target_semid64_ds *target_sd;
3402 
3403     if (!lock_user_struct(VERIFY_WRITE, target_sd, target_addr, 0))
3404         return -TARGET_EFAULT;
3405     if (host_to_target_ipc_perm(target_addr,&(host_sd->sem_perm)))
3406         return -TARGET_EFAULT;
3407     target_sd->sem_nsems = tswapal(host_sd->sem_nsems);
3408     target_sd->sem_otime = tswapal(host_sd->sem_otime);
3409     target_sd->sem_ctime = tswapal(host_sd->sem_ctime);
3410     unlock_user_struct(target_sd, target_addr, 1);
3411     return 0;
3412 }
3413 
3414 struct target_seminfo {
3415     int semmap;
3416     int semmni;
3417     int semmns;
3418     int semmnu;
3419     int semmsl;
3420     int semopm;
3421     int semume;
3422     int semusz;
3423     int semvmx;
3424     int semaem;
3425 };
3426 
3427 static inline abi_long host_to_target_seminfo(abi_ulong target_addr,
3428                                               struct seminfo *host_seminfo)
3429 {
3430     struct target_seminfo *target_seminfo;
3431     if (!lock_user_struct(VERIFY_WRITE, target_seminfo, target_addr, 0))
3432         return -TARGET_EFAULT;
3433     __put_user(host_seminfo->semmap, &target_seminfo->semmap);
3434     __put_user(host_seminfo->semmni, &target_seminfo->semmni);
3435     __put_user(host_seminfo->semmns, &target_seminfo->semmns);
3436     __put_user(host_seminfo->semmnu, &target_seminfo->semmnu);
3437     __put_user(host_seminfo->semmsl, &target_seminfo->semmsl);
3438     __put_user(host_seminfo->semopm, &target_seminfo->semopm);
3439     __put_user(host_seminfo->semume, &target_seminfo->semume);
3440     __put_user(host_seminfo->semusz, &target_seminfo->semusz);
3441     __put_user(host_seminfo->semvmx, &target_seminfo->semvmx);
3442     __put_user(host_seminfo->semaem, &target_seminfo->semaem);
3443     unlock_user_struct(target_seminfo, target_addr, 1);
3444     return 0;
3445 }
3446 
3447 union semun {
3448 	int val;
3449 	struct semid_ds *buf;
3450 	unsigned short *array;
3451 	struct seminfo *__buf;
3452 };
3453 
3454 union target_semun {
3455 	int val;
3456 	abi_ulong buf;
3457 	abi_ulong array;
3458 	abi_ulong __buf;
3459 };
3460 
3461 static inline abi_long target_to_host_semarray(int semid, unsigned short **host_array,
3462                                                abi_ulong target_addr)
3463 {
3464     int nsems;
3465     unsigned short *array;
3466     union semun semun;
3467     struct semid_ds semid_ds;
3468     int i, ret;
3469 
3470     semun.buf = &semid_ds;
3471 
3472     ret = semctl(semid, 0, IPC_STAT, semun);
3473     if (ret == -1)
3474         return get_errno(ret);
3475 
3476     nsems = semid_ds.sem_nsems;
3477 
3478     *host_array = g_try_new(unsigned short, nsems);
3479     if (!*host_array) {
3480         return -TARGET_ENOMEM;
3481     }
3482     array = lock_user(VERIFY_READ, target_addr,
3483                       nsems*sizeof(unsigned short), 1);
3484     if (!array) {
3485         g_free(*host_array);
3486         return -TARGET_EFAULT;
3487     }
3488 
3489     for(i=0; i<nsems; i++) {
3490         __get_user((*host_array)[i], &array[i]);
3491     }
3492     unlock_user(array, target_addr, 0);
3493 
3494     return 0;
3495 }
3496 
3497 static inline abi_long host_to_target_semarray(int semid, abi_ulong target_addr,
3498                                                unsigned short **host_array)
3499 {
3500     int nsems;
3501     unsigned short *array;
3502     union semun semun;
3503     struct semid_ds semid_ds;
3504     int i, ret;
3505 
3506     semun.buf = &semid_ds;
3507 
3508     ret = semctl(semid, 0, IPC_STAT, semun);
3509     if (ret == -1)
3510         return get_errno(ret);
3511 
3512     nsems = semid_ds.sem_nsems;
3513 
3514     array = lock_user(VERIFY_WRITE, target_addr,
3515                       nsems*sizeof(unsigned short), 0);
3516     if (!array)
3517         return -TARGET_EFAULT;
3518 
3519     for(i=0; i<nsems; i++) {
3520         __put_user((*host_array)[i], &array[i]);
3521     }
3522     g_free(*host_array);
3523     unlock_user(array, target_addr, 1);
3524 
3525     return 0;
3526 }
3527 
3528 static inline abi_long do_semctl(int semid, int semnum, int cmd,
3529                                  abi_ulong target_arg)
3530 {
3531     union target_semun target_su = { .buf = target_arg };
3532     union semun arg;
3533     struct semid_ds dsarg;
3534     unsigned short *array = NULL;
3535     struct seminfo seminfo;
3536     abi_long ret = -TARGET_EINVAL;
3537     abi_long err;
3538     cmd &= 0xff;
3539 
3540     switch( cmd ) {
3541 	case GETVAL:
3542 	case SETVAL:
3543             /* In 64 bit cross-endian situations, we will erroneously pick up
3544              * the wrong half of the union for the "val" element.  To rectify
3545              * this, the entire 8-byte structure is byteswapped, followed by
3546 	     * a swap of the 4 byte val field. In other cases, the data is
3547 	     * already in proper host byte order. */
3548 	    if (sizeof(target_su.val) != (sizeof(target_su.buf))) {
3549 		target_su.buf = tswapal(target_su.buf);
3550 		arg.val = tswap32(target_su.val);
3551 	    } else {
3552 		arg.val = target_su.val;
3553 	    }
3554             ret = get_errno(semctl(semid, semnum, cmd, arg));
3555             break;
3556 	case GETALL:
3557 	case SETALL:
3558             err = target_to_host_semarray(semid, &array, target_su.array);
3559             if (err)
3560                 return err;
3561             arg.array = array;
3562             ret = get_errno(semctl(semid, semnum, cmd, arg));
3563             err = host_to_target_semarray(semid, target_su.array, &array);
3564             if (err)
3565                 return err;
3566             break;
3567 	case IPC_STAT:
3568 	case IPC_SET:
3569 	case SEM_STAT:
3570             err = target_to_host_semid_ds(&dsarg, target_su.buf);
3571             if (err)
3572                 return err;
3573             arg.buf = &dsarg;
3574             ret = get_errno(semctl(semid, semnum, cmd, arg));
3575             err = host_to_target_semid_ds(target_su.buf, &dsarg);
3576             if (err)
3577                 return err;
3578             break;
3579 	case IPC_INFO:
3580 	case SEM_INFO:
3581             arg.__buf = &seminfo;
3582             ret = get_errno(semctl(semid, semnum, cmd, arg));
3583             err = host_to_target_seminfo(target_su.__buf, &seminfo);
3584             if (err)
3585                 return err;
3586             break;
3587 	case IPC_RMID:
3588 	case GETPID:
3589 	case GETNCNT:
3590 	case GETZCNT:
3591             ret = get_errno(semctl(semid, semnum, cmd, NULL));
3592             break;
3593     }
3594 
3595     return ret;
3596 }
3597 
3598 struct target_sembuf {
3599     unsigned short sem_num;
3600     short sem_op;
3601     short sem_flg;
3602 };
3603 
3604 static inline abi_long target_to_host_sembuf(struct sembuf *host_sembuf,
3605                                              abi_ulong target_addr,
3606                                              unsigned nsops)
3607 {
3608     struct target_sembuf *target_sembuf;
3609     int i;
3610 
3611     target_sembuf = lock_user(VERIFY_READ, target_addr,
3612                               nsops*sizeof(struct target_sembuf), 1);
3613     if (!target_sembuf)
3614         return -TARGET_EFAULT;
3615 
3616     for(i=0; i<nsops; i++) {
3617         __get_user(host_sembuf[i].sem_num, &target_sembuf[i].sem_num);
3618         __get_user(host_sembuf[i].sem_op, &target_sembuf[i].sem_op);
3619         __get_user(host_sembuf[i].sem_flg, &target_sembuf[i].sem_flg);
3620     }
3621 
3622     unlock_user(target_sembuf, target_addr, 0);
3623 
3624     return 0;
3625 }
3626 
3627 static inline abi_long do_semop(int semid, abi_long ptr, unsigned nsops)
3628 {
3629     struct sembuf sops[nsops];
3630     abi_long ret;
3631 
3632     if (target_to_host_sembuf(sops, ptr, nsops))
3633         return -TARGET_EFAULT;
3634 
3635     ret = -TARGET_ENOSYS;
3636 #ifdef __NR_semtimedop
3637     ret = get_errno(safe_semtimedop(semid, sops, nsops, NULL));
3638 #endif
3639 #ifdef __NR_ipc
3640     if (ret == -TARGET_ENOSYS) {
3641         ret = get_errno(safe_ipc(IPCOP_semtimedop, semid, nsops, 0, sops, 0));
3642     }
3643 #endif
3644     return ret;
3645 }
3646 
3647 struct target_msqid_ds
3648 {
3649     struct target_ipc_perm msg_perm;
3650     abi_ulong msg_stime;
3651 #if TARGET_ABI_BITS == 32
3652     abi_ulong __unused1;
3653 #endif
3654     abi_ulong msg_rtime;
3655 #if TARGET_ABI_BITS == 32
3656     abi_ulong __unused2;
3657 #endif
3658     abi_ulong msg_ctime;
3659 #if TARGET_ABI_BITS == 32
3660     abi_ulong __unused3;
3661 #endif
3662     abi_ulong __msg_cbytes;
3663     abi_ulong msg_qnum;
3664     abi_ulong msg_qbytes;
3665     abi_ulong msg_lspid;
3666     abi_ulong msg_lrpid;
3667     abi_ulong __unused4;
3668     abi_ulong __unused5;
3669 };
3670 
3671 static inline abi_long target_to_host_msqid_ds(struct msqid_ds *host_md,
3672                                                abi_ulong target_addr)
3673 {
3674     struct target_msqid_ds *target_md;
3675 
3676     if (!lock_user_struct(VERIFY_READ, target_md, target_addr, 1))
3677         return -TARGET_EFAULT;
3678     if (target_to_host_ipc_perm(&(host_md->msg_perm),target_addr))
3679         return -TARGET_EFAULT;
3680     host_md->msg_stime = tswapal(target_md->msg_stime);
3681     host_md->msg_rtime = tswapal(target_md->msg_rtime);
3682     host_md->msg_ctime = tswapal(target_md->msg_ctime);
3683     host_md->__msg_cbytes = tswapal(target_md->__msg_cbytes);
3684     host_md->msg_qnum = tswapal(target_md->msg_qnum);
3685     host_md->msg_qbytes = tswapal(target_md->msg_qbytes);
3686     host_md->msg_lspid = tswapal(target_md->msg_lspid);
3687     host_md->msg_lrpid = tswapal(target_md->msg_lrpid);
3688     unlock_user_struct(target_md, target_addr, 0);
3689     return 0;
3690 }
3691 
3692 static inline abi_long host_to_target_msqid_ds(abi_ulong target_addr,
3693                                                struct msqid_ds *host_md)
3694 {
3695     struct target_msqid_ds *target_md;
3696 
3697     if (!lock_user_struct(VERIFY_WRITE, target_md, target_addr, 0))
3698         return -TARGET_EFAULT;
3699     if (host_to_target_ipc_perm(target_addr,&(host_md->msg_perm)))
3700         return -TARGET_EFAULT;
3701     target_md->msg_stime = tswapal(host_md->msg_stime);
3702     target_md->msg_rtime = tswapal(host_md->msg_rtime);
3703     target_md->msg_ctime = tswapal(host_md->msg_ctime);
3704     target_md->__msg_cbytes = tswapal(host_md->__msg_cbytes);
3705     target_md->msg_qnum = tswapal(host_md->msg_qnum);
3706     target_md->msg_qbytes = tswapal(host_md->msg_qbytes);
3707     target_md->msg_lspid = tswapal(host_md->msg_lspid);
3708     target_md->msg_lrpid = tswapal(host_md->msg_lrpid);
3709     unlock_user_struct(target_md, target_addr, 1);
3710     return 0;
3711 }
3712 
3713 struct target_msginfo {
3714     int msgpool;
3715     int msgmap;
3716     int msgmax;
3717     int msgmnb;
3718     int msgmni;
3719     int msgssz;
3720     int msgtql;
3721     unsigned short int msgseg;
3722 };
3723 
3724 static inline abi_long host_to_target_msginfo(abi_ulong target_addr,
3725                                               struct msginfo *host_msginfo)
3726 {
3727     struct target_msginfo *target_msginfo;
3728     if (!lock_user_struct(VERIFY_WRITE, target_msginfo, target_addr, 0))
3729         return -TARGET_EFAULT;
3730     __put_user(host_msginfo->msgpool, &target_msginfo->msgpool);
3731     __put_user(host_msginfo->msgmap, &target_msginfo->msgmap);
3732     __put_user(host_msginfo->msgmax, &target_msginfo->msgmax);
3733     __put_user(host_msginfo->msgmnb, &target_msginfo->msgmnb);
3734     __put_user(host_msginfo->msgmni, &target_msginfo->msgmni);
3735     __put_user(host_msginfo->msgssz, &target_msginfo->msgssz);
3736     __put_user(host_msginfo->msgtql, &target_msginfo->msgtql);
3737     __put_user(host_msginfo->msgseg, &target_msginfo->msgseg);
3738     unlock_user_struct(target_msginfo, target_addr, 1);
3739     return 0;
3740 }
3741 
3742 static inline abi_long do_msgctl(int msgid, int cmd, abi_long ptr)
3743 {
3744     struct msqid_ds dsarg;
3745     struct msginfo msginfo;
3746     abi_long ret = -TARGET_EINVAL;
3747 
3748     cmd &= 0xff;
3749 
3750     switch (cmd) {
3751     case IPC_STAT:
3752     case IPC_SET:
3753     case MSG_STAT:
3754         if (target_to_host_msqid_ds(&dsarg,ptr))
3755             return -TARGET_EFAULT;
3756         ret = get_errno(msgctl(msgid, cmd, &dsarg));
3757         if (host_to_target_msqid_ds(ptr,&dsarg))
3758             return -TARGET_EFAULT;
3759         break;
3760     case IPC_RMID:
3761         ret = get_errno(msgctl(msgid, cmd, NULL));
3762         break;
3763     case IPC_INFO:
3764     case MSG_INFO:
3765         ret = get_errno(msgctl(msgid, cmd, (struct msqid_ds *)&msginfo));
3766         if (host_to_target_msginfo(ptr, &msginfo))
3767             return -TARGET_EFAULT;
3768         break;
3769     }
3770 
3771     return ret;
3772 }
3773 
3774 struct target_msgbuf {
3775     abi_long mtype;
3776     char	mtext[1];
3777 };
3778 
3779 static inline abi_long do_msgsnd(int msqid, abi_long msgp,
3780                                  ssize_t msgsz, int msgflg)
3781 {
3782     struct target_msgbuf *target_mb;
3783     struct msgbuf *host_mb;
3784     abi_long ret = 0;
3785 
3786     if (msgsz < 0) {
3787         return -TARGET_EINVAL;
3788     }
3789 
3790     if (!lock_user_struct(VERIFY_READ, target_mb, msgp, 0))
3791         return -TARGET_EFAULT;
3792     host_mb = g_try_malloc(msgsz + sizeof(long));
3793     if (!host_mb) {
3794         unlock_user_struct(target_mb, msgp, 0);
3795         return -TARGET_ENOMEM;
3796     }
3797     host_mb->mtype = (abi_long) tswapal(target_mb->mtype);
3798     memcpy(host_mb->mtext, target_mb->mtext, msgsz);
3799     ret = -TARGET_ENOSYS;
3800 #ifdef __NR_msgsnd
3801     ret = get_errno(safe_msgsnd(msqid, host_mb, msgsz, msgflg));
3802 #endif
3803 #ifdef __NR_ipc
3804     if (ret == -TARGET_ENOSYS) {
3805         ret = get_errno(safe_ipc(IPCOP_msgsnd, msqid, msgsz, msgflg,
3806                                  host_mb, 0));
3807     }
3808 #endif
3809     g_free(host_mb);
3810     unlock_user_struct(target_mb, msgp, 0);
3811 
3812     return ret;
3813 }
3814 
3815 static inline abi_long do_msgrcv(int msqid, abi_long msgp,
3816                                  ssize_t msgsz, abi_long msgtyp,
3817                                  int msgflg)
3818 {
3819     struct target_msgbuf *target_mb;
3820     char *target_mtext;
3821     struct msgbuf *host_mb;
3822     abi_long ret = 0;
3823 
3824     if (msgsz < 0) {
3825         return -TARGET_EINVAL;
3826     }
3827 
3828     if (!lock_user_struct(VERIFY_WRITE, target_mb, msgp, 0))
3829         return -TARGET_EFAULT;
3830 
3831     host_mb = g_try_malloc(msgsz + sizeof(long));
3832     if (!host_mb) {
3833         ret = -TARGET_ENOMEM;
3834         goto end;
3835     }
3836     ret = -TARGET_ENOSYS;
3837 #ifdef __NR_msgrcv
3838     ret = get_errno(safe_msgrcv(msqid, host_mb, msgsz, msgtyp, msgflg));
3839 #endif
3840 #ifdef __NR_ipc
3841     if (ret == -TARGET_ENOSYS) {
3842         ret = get_errno(safe_ipc(IPCOP_CALL(1, IPCOP_msgrcv), msqid, msgsz,
3843                         msgflg, host_mb, msgtyp));
3844     }
3845 #endif
3846 
3847     if (ret > 0) {
3848         abi_ulong target_mtext_addr = msgp + sizeof(abi_ulong);
3849         target_mtext = lock_user(VERIFY_WRITE, target_mtext_addr, ret, 0);
3850         if (!target_mtext) {
3851             ret = -TARGET_EFAULT;
3852             goto end;
3853         }
3854         memcpy(target_mb->mtext, host_mb->mtext, ret);
3855         unlock_user(target_mtext, target_mtext_addr, ret);
3856     }
3857 
3858     target_mb->mtype = tswapal(host_mb->mtype);
3859 
3860 end:
3861     if (target_mb)
3862         unlock_user_struct(target_mb, msgp, 1);
3863     g_free(host_mb);
3864     return ret;
3865 }
3866 
3867 static inline abi_long target_to_host_shmid_ds(struct shmid_ds *host_sd,
3868                                                abi_ulong target_addr)
3869 {
3870     struct target_shmid_ds *target_sd;
3871 
3872     if (!lock_user_struct(VERIFY_READ, target_sd, target_addr, 1))
3873         return -TARGET_EFAULT;
3874     if (target_to_host_ipc_perm(&(host_sd->shm_perm), target_addr))
3875         return -TARGET_EFAULT;
3876     __get_user(host_sd->shm_segsz, &target_sd->shm_segsz);
3877     __get_user(host_sd->shm_atime, &target_sd->shm_atime);
3878     __get_user(host_sd->shm_dtime, &target_sd->shm_dtime);
3879     __get_user(host_sd->shm_ctime, &target_sd->shm_ctime);
3880     __get_user(host_sd->shm_cpid, &target_sd->shm_cpid);
3881     __get_user(host_sd->shm_lpid, &target_sd->shm_lpid);
3882     __get_user(host_sd->shm_nattch, &target_sd->shm_nattch);
3883     unlock_user_struct(target_sd, target_addr, 0);
3884     return 0;
3885 }
3886 
3887 static inline abi_long host_to_target_shmid_ds(abi_ulong target_addr,
3888                                                struct shmid_ds *host_sd)
3889 {
3890     struct target_shmid_ds *target_sd;
3891 
3892     if (!lock_user_struct(VERIFY_WRITE, target_sd, target_addr, 0))
3893         return -TARGET_EFAULT;
3894     if (host_to_target_ipc_perm(target_addr, &(host_sd->shm_perm)))
3895         return -TARGET_EFAULT;
3896     __put_user(host_sd->shm_segsz, &target_sd->shm_segsz);
3897     __put_user(host_sd->shm_atime, &target_sd->shm_atime);
3898     __put_user(host_sd->shm_dtime, &target_sd->shm_dtime);
3899     __put_user(host_sd->shm_ctime, &target_sd->shm_ctime);
3900     __put_user(host_sd->shm_cpid, &target_sd->shm_cpid);
3901     __put_user(host_sd->shm_lpid, &target_sd->shm_lpid);
3902     __put_user(host_sd->shm_nattch, &target_sd->shm_nattch);
3903     unlock_user_struct(target_sd, target_addr, 1);
3904     return 0;
3905 }
3906 
3907 struct  target_shminfo {
3908     abi_ulong shmmax;
3909     abi_ulong shmmin;
3910     abi_ulong shmmni;
3911     abi_ulong shmseg;
3912     abi_ulong shmall;
3913 };
3914 
3915 static inline abi_long host_to_target_shminfo(abi_ulong target_addr,
3916                                               struct shminfo *host_shminfo)
3917 {
3918     struct target_shminfo *target_shminfo;
3919     if (!lock_user_struct(VERIFY_WRITE, target_shminfo, target_addr, 0))
3920         return -TARGET_EFAULT;
3921     __put_user(host_shminfo->shmmax, &target_shminfo->shmmax);
3922     __put_user(host_shminfo->shmmin, &target_shminfo->shmmin);
3923     __put_user(host_shminfo->shmmni, &target_shminfo->shmmni);
3924     __put_user(host_shminfo->shmseg, &target_shminfo->shmseg);
3925     __put_user(host_shminfo->shmall, &target_shminfo->shmall);
3926     unlock_user_struct(target_shminfo, target_addr, 1);
3927     return 0;
3928 }
3929 
3930 struct target_shm_info {
3931     int used_ids;
3932     abi_ulong shm_tot;
3933     abi_ulong shm_rss;
3934     abi_ulong shm_swp;
3935     abi_ulong swap_attempts;
3936     abi_ulong swap_successes;
3937 };
3938 
3939 static inline abi_long host_to_target_shm_info(abi_ulong target_addr,
3940                                                struct shm_info *host_shm_info)
3941 {
3942     struct target_shm_info *target_shm_info;
3943     if (!lock_user_struct(VERIFY_WRITE, target_shm_info, target_addr, 0))
3944         return -TARGET_EFAULT;
3945     __put_user(host_shm_info->used_ids, &target_shm_info->used_ids);
3946     __put_user(host_shm_info->shm_tot, &target_shm_info->shm_tot);
3947     __put_user(host_shm_info->shm_rss, &target_shm_info->shm_rss);
3948     __put_user(host_shm_info->shm_swp, &target_shm_info->shm_swp);
3949     __put_user(host_shm_info->swap_attempts, &target_shm_info->swap_attempts);
3950     __put_user(host_shm_info->swap_successes, &target_shm_info->swap_successes);
3951     unlock_user_struct(target_shm_info, target_addr, 1);
3952     return 0;
3953 }
3954 
3955 static inline abi_long do_shmctl(int shmid, int cmd, abi_long buf)
3956 {
3957     struct shmid_ds dsarg;
3958     struct shminfo shminfo;
3959     struct shm_info shm_info;
3960     abi_long ret = -TARGET_EINVAL;
3961 
3962     cmd &= 0xff;
3963 
3964     switch(cmd) {
3965     case IPC_STAT:
3966     case IPC_SET:
3967     case SHM_STAT:
3968         if (target_to_host_shmid_ds(&dsarg, buf))
3969             return -TARGET_EFAULT;
3970         ret = get_errno(shmctl(shmid, cmd, &dsarg));
3971         if (host_to_target_shmid_ds(buf, &dsarg))
3972             return -TARGET_EFAULT;
3973         break;
3974     case IPC_INFO:
3975         ret = get_errno(shmctl(shmid, cmd, (struct shmid_ds *)&shminfo));
3976         if (host_to_target_shminfo(buf, &shminfo))
3977             return -TARGET_EFAULT;
3978         break;
3979     case SHM_INFO:
3980         ret = get_errno(shmctl(shmid, cmd, (struct shmid_ds *)&shm_info));
3981         if (host_to_target_shm_info(buf, &shm_info))
3982             return -TARGET_EFAULT;
3983         break;
3984     case IPC_RMID:
3985     case SHM_LOCK:
3986     case SHM_UNLOCK:
3987         ret = get_errno(shmctl(shmid, cmd, NULL));
3988         break;
3989     }
3990 
3991     return ret;
3992 }
3993 
3994 #ifndef TARGET_FORCE_SHMLBA
3995 /* For most architectures, SHMLBA is the same as the page size;
3996  * some architectures have larger values, in which case they should
3997  * define TARGET_FORCE_SHMLBA and provide a target_shmlba() function.
3998  * This corresponds to the kernel arch code defining __ARCH_FORCE_SHMLBA
3999  * and defining its own value for SHMLBA.
4000  *
4001  * The kernel also permits SHMLBA to be set by the architecture to a
4002  * value larger than the page size without setting __ARCH_FORCE_SHMLBA;
4003  * this means that addresses are rounded to the large size if
4004  * SHM_RND is set but addresses not aligned to that size are not rejected
4005  * as long as they are at least page-aligned. Since the only architecture
4006  * which uses this is ia64 this code doesn't provide for that oddity.
4007  */
4008 static inline abi_ulong target_shmlba(CPUArchState *cpu_env)
4009 {
4010     return TARGET_PAGE_SIZE;
4011 }
4012 #endif
4013 
4014 static inline abi_ulong do_shmat(CPUArchState *cpu_env,
4015                                  int shmid, abi_ulong shmaddr, int shmflg)
4016 {
4017     abi_long raddr;
4018     void *host_raddr;
4019     struct shmid_ds shm_info;
4020     int i,ret;
4021     abi_ulong shmlba;
4022 
4023     /* find out the length of the shared memory segment */
4024     ret = get_errno(shmctl(shmid, IPC_STAT, &shm_info));
4025     if (is_error(ret)) {
4026         /* can't get length, bail out */
4027         return ret;
4028     }
4029 
4030     shmlba = target_shmlba(cpu_env);
4031 
4032     if (shmaddr & (shmlba - 1)) {
4033         if (shmflg & SHM_RND) {
4034             shmaddr &= ~(shmlba - 1);
4035         } else {
4036             return -TARGET_EINVAL;
4037         }
4038     }
4039     if (!guest_range_valid(shmaddr, shm_info.shm_segsz)) {
4040         return -TARGET_EINVAL;
4041     }
4042 
4043     mmap_lock();
4044 
4045     if (shmaddr)
4046         host_raddr = shmat(shmid, (void *)g2h(shmaddr), shmflg);
4047     else {
4048         abi_ulong mmap_start;
4049 
4050         /* In order to use the host shmat, we need to honor host SHMLBA.  */
4051         mmap_start = mmap_find_vma(0, shm_info.shm_segsz, MAX(SHMLBA, shmlba));
4052 
4053         if (mmap_start == -1) {
4054             errno = ENOMEM;
4055             host_raddr = (void *)-1;
4056         } else
4057             host_raddr = shmat(shmid, g2h(mmap_start), shmflg | SHM_REMAP);
4058     }
4059 
4060     if (host_raddr == (void *)-1) {
4061         mmap_unlock();
4062         return get_errno((long)host_raddr);
4063     }
4064     raddr=h2g((unsigned long)host_raddr);
4065 
4066     page_set_flags(raddr, raddr + shm_info.shm_segsz,
4067                    PAGE_VALID | PAGE_READ |
4068                    ((shmflg & SHM_RDONLY)? 0 : PAGE_WRITE));
4069 
4070     for (i = 0; i < N_SHM_REGIONS; i++) {
4071         if (!shm_regions[i].in_use) {
4072             shm_regions[i].in_use = true;
4073             shm_regions[i].start = raddr;
4074             shm_regions[i].size = shm_info.shm_segsz;
4075             break;
4076         }
4077     }
4078 
4079     mmap_unlock();
4080     return raddr;
4081 
4082 }
4083 
4084 static inline abi_long do_shmdt(abi_ulong shmaddr)
4085 {
4086     int i;
4087     abi_long rv;
4088 
4089     mmap_lock();
4090 
4091     for (i = 0; i < N_SHM_REGIONS; ++i) {
4092         if (shm_regions[i].in_use && shm_regions[i].start == shmaddr) {
4093             shm_regions[i].in_use = false;
4094             page_set_flags(shmaddr, shmaddr + shm_regions[i].size, 0);
4095             break;
4096         }
4097     }
4098     rv = get_errno(shmdt(g2h(shmaddr)));
4099 
4100     mmap_unlock();
4101 
4102     return rv;
4103 }
4104 
4105 #ifdef TARGET_NR_ipc
4106 /* ??? This only works with linear mappings.  */
4107 /* do_ipc() must return target values and target errnos. */
4108 static abi_long do_ipc(CPUArchState *cpu_env,
4109                        unsigned int call, abi_long first,
4110                        abi_long second, abi_long third,
4111                        abi_long ptr, abi_long fifth)
4112 {
4113     int version;
4114     abi_long ret = 0;
4115 
4116     version = call >> 16;
4117     call &= 0xffff;
4118 
4119     switch (call) {
4120     case IPCOP_semop:
4121         ret = do_semop(first, ptr, second);
4122         break;
4123 
4124     case IPCOP_semget:
4125         ret = get_errno(semget(first, second, third));
4126         break;
4127 
4128     case IPCOP_semctl: {
4129         /* The semun argument to semctl is passed by value, so dereference the
4130          * ptr argument. */
4131         abi_ulong atptr;
4132         get_user_ual(atptr, ptr);
4133         ret = do_semctl(first, second, third, atptr);
4134         break;
4135     }
4136 
4137     case IPCOP_msgget:
4138         ret = get_errno(msgget(first, second));
4139         break;
4140 
4141     case IPCOP_msgsnd:
4142         ret = do_msgsnd(first, ptr, second, third);
4143         break;
4144 
4145     case IPCOP_msgctl:
4146         ret = do_msgctl(first, second, ptr);
4147         break;
4148 
4149     case IPCOP_msgrcv:
4150         switch (version) {
4151         case 0:
4152             {
4153                 struct target_ipc_kludge {
4154                     abi_long msgp;
4155                     abi_long msgtyp;
4156                 } *tmp;
4157 
4158                 if (!lock_user_struct(VERIFY_READ, tmp, ptr, 1)) {
4159                     ret = -TARGET_EFAULT;
4160                     break;
4161                 }
4162 
4163                 ret = do_msgrcv(first, tswapal(tmp->msgp), second, tswapal(tmp->msgtyp), third);
4164 
4165                 unlock_user_struct(tmp, ptr, 0);
4166                 break;
4167             }
4168         default:
4169             ret = do_msgrcv(first, ptr, second, fifth, third);
4170         }
4171         break;
4172 
4173     case IPCOP_shmat:
4174         switch (version) {
4175         default:
4176         {
4177             abi_ulong raddr;
4178             raddr = do_shmat(cpu_env, first, ptr, second);
4179             if (is_error(raddr))
4180                 return get_errno(raddr);
4181             if (put_user_ual(raddr, third))
4182                 return -TARGET_EFAULT;
4183             break;
4184         }
4185         case 1:
4186             ret = -TARGET_EINVAL;
4187             break;
4188         }
4189 	break;
4190     case IPCOP_shmdt:
4191         ret = do_shmdt(ptr);
4192 	break;
4193 
4194     case IPCOP_shmget:
4195 	/* IPC_* flag values are the same on all linux platforms */
4196 	ret = get_errno(shmget(first, second, third));
4197 	break;
4198 
4199 	/* IPC_* and SHM_* command values are the same on all linux platforms */
4200     case IPCOP_shmctl:
4201         ret = do_shmctl(first, second, ptr);
4202         break;
4203     default:
4204 	gemu_log("Unsupported ipc call: %d (version %d)\n", call, version);
4205 	ret = -TARGET_ENOSYS;
4206 	break;
4207     }
4208     return ret;
4209 }
4210 #endif
4211 
4212 /* kernel structure types definitions */
4213 
4214 #define STRUCT(name, ...) STRUCT_ ## name,
4215 #define STRUCT_SPECIAL(name) STRUCT_ ## name,
4216 enum {
4217 #include "syscall_types.h"
4218 STRUCT_MAX
4219 };
4220 #undef STRUCT
4221 #undef STRUCT_SPECIAL
4222 
4223 #define STRUCT(name, ...) static const argtype struct_ ## name ## _def[] = {  __VA_ARGS__, TYPE_NULL };
4224 #define STRUCT_SPECIAL(name)
4225 #include "syscall_types.h"
4226 #undef STRUCT
4227 #undef STRUCT_SPECIAL
4228 
4229 typedef struct IOCTLEntry IOCTLEntry;
4230 
4231 typedef abi_long do_ioctl_fn(const IOCTLEntry *ie, uint8_t *buf_temp,
4232                              int fd, int cmd, abi_long arg);
4233 
4234 struct IOCTLEntry {
4235     int target_cmd;
4236     unsigned int host_cmd;
4237     const char *name;
4238     int access;
4239     do_ioctl_fn *do_ioctl;
4240     const argtype arg_type[5];
4241 };
4242 
4243 #define IOC_R 0x0001
4244 #define IOC_W 0x0002
4245 #define IOC_RW (IOC_R | IOC_W)
4246 
4247 #define MAX_STRUCT_SIZE 4096
4248 
4249 #ifdef CONFIG_FIEMAP
4250 /* So fiemap access checks don't overflow on 32 bit systems.
4251  * This is very slightly smaller than the limit imposed by
4252  * the underlying kernel.
4253  */
4254 #define FIEMAP_MAX_EXTENTS ((UINT_MAX - sizeof(struct fiemap))  \
4255                             / sizeof(struct fiemap_extent))
4256 
4257 static abi_long do_ioctl_fs_ioc_fiemap(const IOCTLEntry *ie, uint8_t *buf_temp,
4258                                        int fd, int cmd, abi_long arg)
4259 {
4260     /* The parameter for this ioctl is a struct fiemap followed
4261      * by an array of struct fiemap_extent whose size is set
4262      * in fiemap->fm_extent_count. The array is filled in by the
4263      * ioctl.
4264      */
4265     int target_size_in, target_size_out;
4266     struct fiemap *fm;
4267     const argtype *arg_type = ie->arg_type;
4268     const argtype extent_arg_type[] = { MK_STRUCT(STRUCT_fiemap_extent) };
4269     void *argptr, *p;
4270     abi_long ret;
4271     int i, extent_size = thunk_type_size(extent_arg_type, 0);
4272     uint32_t outbufsz;
4273     int free_fm = 0;
4274 
4275     assert(arg_type[0] == TYPE_PTR);
4276     assert(ie->access == IOC_RW);
4277     arg_type++;
4278     target_size_in = thunk_type_size(arg_type, 0);
4279     argptr = lock_user(VERIFY_READ, arg, target_size_in, 1);
4280     if (!argptr) {
4281         return -TARGET_EFAULT;
4282     }
4283     thunk_convert(buf_temp, argptr, arg_type, THUNK_HOST);
4284     unlock_user(argptr, arg, 0);
4285     fm = (struct fiemap *)buf_temp;
4286     if (fm->fm_extent_count > FIEMAP_MAX_EXTENTS) {
4287         return -TARGET_EINVAL;
4288     }
4289 
4290     outbufsz = sizeof (*fm) +
4291         (sizeof(struct fiemap_extent) * fm->fm_extent_count);
4292 
4293     if (outbufsz > MAX_STRUCT_SIZE) {
4294         /* We can't fit all the extents into the fixed size buffer.
4295          * Allocate one that is large enough and use it instead.
4296          */
4297         fm = g_try_malloc(outbufsz);
4298         if (!fm) {
4299             return -TARGET_ENOMEM;
4300         }
4301         memcpy(fm, buf_temp, sizeof(struct fiemap));
4302         free_fm = 1;
4303     }
4304     ret = get_errno(safe_ioctl(fd, ie->host_cmd, fm));
4305     if (!is_error(ret)) {
4306         target_size_out = target_size_in;
4307         /* An extent_count of 0 means we were only counting the extents
4308          * so there are no structs to copy
4309          */
4310         if (fm->fm_extent_count != 0) {
4311             target_size_out += fm->fm_mapped_extents * extent_size;
4312         }
4313         argptr = lock_user(VERIFY_WRITE, arg, target_size_out, 0);
4314         if (!argptr) {
4315             ret = -TARGET_EFAULT;
4316         } else {
4317             /* Convert the struct fiemap */
4318             thunk_convert(argptr, fm, arg_type, THUNK_TARGET);
4319             if (fm->fm_extent_count != 0) {
4320                 p = argptr + target_size_in;
4321                 /* ...and then all the struct fiemap_extents */
4322                 for (i = 0; i < fm->fm_mapped_extents; i++) {
4323                     thunk_convert(p, &fm->fm_extents[i], extent_arg_type,
4324                                   THUNK_TARGET);
4325                     p += extent_size;
4326                 }
4327             }
4328             unlock_user(argptr, arg, target_size_out);
4329         }
4330     }
4331     if (free_fm) {
4332         g_free(fm);
4333     }
4334     return ret;
4335 }
4336 #endif
4337 
4338 static abi_long do_ioctl_ifconf(const IOCTLEntry *ie, uint8_t *buf_temp,
4339                                 int fd, int cmd, abi_long arg)
4340 {
4341     const argtype *arg_type = ie->arg_type;
4342     int target_size;
4343     void *argptr;
4344     int ret;
4345     struct ifconf *host_ifconf;
4346     uint32_t outbufsz;
4347     const argtype ifreq_arg_type[] = { MK_STRUCT(STRUCT_sockaddr_ifreq) };
4348     int target_ifreq_size;
4349     int nb_ifreq;
4350     int free_buf = 0;
4351     int i;
4352     int target_ifc_len;
4353     abi_long target_ifc_buf;
4354     int host_ifc_len;
4355     char *host_ifc_buf;
4356 
4357     assert(arg_type[0] == TYPE_PTR);
4358     assert(ie->access == IOC_RW);
4359 
4360     arg_type++;
4361     target_size = thunk_type_size(arg_type, 0);
4362 
4363     argptr = lock_user(VERIFY_READ, arg, target_size, 1);
4364     if (!argptr)
4365         return -TARGET_EFAULT;
4366     thunk_convert(buf_temp, argptr, arg_type, THUNK_HOST);
4367     unlock_user(argptr, arg, 0);
4368 
4369     host_ifconf = (struct ifconf *)(unsigned long)buf_temp;
4370     target_ifc_buf = (abi_long)(unsigned long)host_ifconf->ifc_buf;
4371     target_ifreq_size = thunk_type_size(ifreq_arg_type, 0);
4372 
4373     if (target_ifc_buf != 0) {
4374         target_ifc_len = host_ifconf->ifc_len;
4375         nb_ifreq = target_ifc_len / target_ifreq_size;
4376         host_ifc_len = nb_ifreq * sizeof(struct ifreq);
4377 
4378         outbufsz = sizeof(*host_ifconf) + host_ifc_len;
4379         if (outbufsz > MAX_STRUCT_SIZE) {
4380             /*
4381              * We can't fit all the extents into the fixed size buffer.
4382              * Allocate one that is large enough and use it instead.
4383              */
4384             host_ifconf = malloc(outbufsz);
4385             if (!host_ifconf) {
4386                 return -TARGET_ENOMEM;
4387             }
4388             memcpy(host_ifconf, buf_temp, sizeof(*host_ifconf));
4389             free_buf = 1;
4390         }
4391         host_ifc_buf = (char *)host_ifconf + sizeof(*host_ifconf);
4392 
4393         host_ifconf->ifc_len = host_ifc_len;
4394     } else {
4395       host_ifc_buf = NULL;
4396     }
4397     host_ifconf->ifc_buf = host_ifc_buf;
4398 
4399     ret = get_errno(safe_ioctl(fd, ie->host_cmd, host_ifconf));
4400     if (!is_error(ret)) {
4401 	/* convert host ifc_len to target ifc_len */
4402 
4403         nb_ifreq = host_ifconf->ifc_len / sizeof(struct ifreq);
4404         target_ifc_len = nb_ifreq * target_ifreq_size;
4405         host_ifconf->ifc_len = target_ifc_len;
4406 
4407 	/* restore target ifc_buf */
4408 
4409         host_ifconf->ifc_buf = (char *)(unsigned long)target_ifc_buf;
4410 
4411 	/* copy struct ifconf to target user */
4412 
4413         argptr = lock_user(VERIFY_WRITE, arg, target_size, 0);
4414         if (!argptr)
4415             return -TARGET_EFAULT;
4416         thunk_convert(argptr, host_ifconf, arg_type, THUNK_TARGET);
4417         unlock_user(argptr, arg, target_size);
4418 
4419         if (target_ifc_buf != 0) {
4420             /* copy ifreq[] to target user */
4421             argptr = lock_user(VERIFY_WRITE, target_ifc_buf, target_ifc_len, 0);
4422             for (i = 0; i < nb_ifreq ; i++) {
4423                 thunk_convert(argptr + i * target_ifreq_size,
4424                               host_ifc_buf + i * sizeof(struct ifreq),
4425                               ifreq_arg_type, THUNK_TARGET);
4426             }
4427             unlock_user(argptr, target_ifc_buf, target_ifc_len);
4428         }
4429     }
4430 
4431     if (free_buf) {
4432         free(host_ifconf);
4433     }
4434 
4435     return ret;
4436 }
4437 
4438 #if defined(CONFIG_USBFS)
4439 #if HOST_LONG_BITS > 64
4440 #error USBDEVFS thunks do not support >64 bit hosts yet.
4441 #endif
4442 struct live_urb {
4443     uint64_t target_urb_adr;
4444     uint64_t target_buf_adr;
4445     char *target_buf_ptr;
4446     struct usbdevfs_urb host_urb;
4447 };
4448 
4449 static GHashTable *usbdevfs_urb_hashtable(void)
4450 {
4451     static GHashTable *urb_hashtable;
4452 
4453     if (!urb_hashtable) {
4454         urb_hashtable = g_hash_table_new(g_int64_hash, g_int64_equal);
4455     }
4456     return urb_hashtable;
4457 }
4458 
4459 static void urb_hashtable_insert(struct live_urb *urb)
4460 {
4461     GHashTable *urb_hashtable = usbdevfs_urb_hashtable();
4462     g_hash_table_insert(urb_hashtable, urb, urb);
4463 }
4464 
4465 static struct live_urb *urb_hashtable_lookup(uint64_t target_urb_adr)
4466 {
4467     GHashTable *urb_hashtable = usbdevfs_urb_hashtable();
4468     return g_hash_table_lookup(urb_hashtable, &target_urb_adr);
4469 }
4470 
4471 static void urb_hashtable_remove(struct live_urb *urb)
4472 {
4473     GHashTable *urb_hashtable = usbdevfs_urb_hashtable();
4474     g_hash_table_remove(urb_hashtable, urb);
4475 }
4476 
4477 static abi_long
4478 do_ioctl_usbdevfs_reapurb(const IOCTLEntry *ie, uint8_t *buf_temp,
4479                           int fd, int cmd, abi_long arg)
4480 {
4481     const argtype usbfsurb_arg_type[] = { MK_STRUCT(STRUCT_usbdevfs_urb) };
4482     const argtype ptrvoid_arg_type[] = { TYPE_PTRVOID, 0, 0 };
4483     struct live_urb *lurb;
4484     void *argptr;
4485     uint64_t hurb;
4486     int target_size;
4487     uintptr_t target_urb_adr;
4488     abi_long ret;
4489 
4490     target_size = thunk_type_size(usbfsurb_arg_type, THUNK_TARGET);
4491 
4492     memset(buf_temp, 0, sizeof(uint64_t));
4493     ret = get_errno(safe_ioctl(fd, ie->host_cmd, buf_temp));
4494     if (is_error(ret)) {
4495         return ret;
4496     }
4497 
4498     memcpy(&hurb, buf_temp, sizeof(uint64_t));
4499     lurb = (void *)((uintptr_t)hurb - offsetof(struct live_urb, host_urb));
4500     if (!lurb->target_urb_adr) {
4501         return -TARGET_EFAULT;
4502     }
4503     urb_hashtable_remove(lurb);
4504     unlock_user(lurb->target_buf_ptr, lurb->target_buf_adr,
4505         lurb->host_urb.buffer_length);
4506     lurb->target_buf_ptr = NULL;
4507 
4508     /* restore the guest buffer pointer */
4509     lurb->host_urb.buffer = (void *)(uintptr_t)lurb->target_buf_adr;
4510 
4511     /* update the guest urb struct */
4512     argptr = lock_user(VERIFY_WRITE, lurb->target_urb_adr, target_size, 0);
4513     if (!argptr) {
4514         g_free(lurb);
4515         return -TARGET_EFAULT;
4516     }
4517     thunk_convert(argptr, &lurb->host_urb, usbfsurb_arg_type, THUNK_TARGET);
4518     unlock_user(argptr, lurb->target_urb_adr, target_size);
4519 
4520     target_size = thunk_type_size(ptrvoid_arg_type, THUNK_TARGET);
4521     /* write back the urb handle */
4522     argptr = lock_user(VERIFY_WRITE, arg, target_size, 0);
4523     if (!argptr) {
4524         g_free(lurb);
4525         return -TARGET_EFAULT;
4526     }
4527 
4528     /* GHashTable uses 64-bit keys but thunk_convert expects uintptr_t */
4529     target_urb_adr = lurb->target_urb_adr;
4530     thunk_convert(argptr, &target_urb_adr, ptrvoid_arg_type, THUNK_TARGET);
4531     unlock_user(argptr, arg, target_size);
4532 
4533     g_free(lurb);
4534     return ret;
4535 }
4536 
4537 static abi_long
4538 do_ioctl_usbdevfs_discardurb(const IOCTLEntry *ie,
4539                              uint8_t *buf_temp __attribute__((unused)),
4540                              int fd, int cmd, abi_long arg)
4541 {
4542     struct live_urb *lurb;
4543 
4544     /* map target address back to host URB with metadata. */
4545     lurb = urb_hashtable_lookup(arg);
4546     if (!lurb) {
4547         return -TARGET_EFAULT;
4548     }
4549     return get_errno(safe_ioctl(fd, ie->host_cmd, &lurb->host_urb));
4550 }
4551 
4552 static abi_long
4553 do_ioctl_usbdevfs_submiturb(const IOCTLEntry *ie, uint8_t *buf_temp,
4554                             int fd, int cmd, abi_long arg)
4555 {
4556     const argtype *arg_type = ie->arg_type;
4557     int target_size;
4558     abi_long ret;
4559     void *argptr;
4560     int rw_dir;
4561     struct live_urb *lurb;
4562 
4563     /*
4564      * each submitted URB needs to map to a unique ID for the
4565      * kernel, and that unique ID needs to be a pointer to
4566      * host memory.  hence, we need to malloc for each URB.
4567      * isochronous transfers have a variable length struct.
4568      */
4569     arg_type++;
4570     target_size = thunk_type_size(arg_type, THUNK_TARGET);
4571 
4572     /* construct host copy of urb and metadata */
4573     lurb = g_try_malloc0(sizeof(struct live_urb));
4574     if (!lurb) {
4575         return -TARGET_ENOMEM;
4576     }
4577 
4578     argptr = lock_user(VERIFY_READ, arg, target_size, 1);
4579     if (!argptr) {
4580         g_free(lurb);
4581         return -TARGET_EFAULT;
4582     }
4583     thunk_convert(&lurb->host_urb, argptr, arg_type, THUNK_HOST);
4584     unlock_user(argptr, arg, 0);
4585 
4586     lurb->target_urb_adr = arg;
4587     lurb->target_buf_adr = (uintptr_t)lurb->host_urb.buffer;
4588 
4589     /* buffer space used depends on endpoint type so lock the entire buffer */
4590     /* control type urbs should check the buffer contents for true direction */
4591     rw_dir = lurb->host_urb.endpoint & USB_DIR_IN ? VERIFY_WRITE : VERIFY_READ;
4592     lurb->target_buf_ptr = lock_user(rw_dir, lurb->target_buf_adr,
4593         lurb->host_urb.buffer_length, 1);
4594     if (lurb->target_buf_ptr == NULL) {
4595         g_free(lurb);
4596         return -TARGET_EFAULT;
4597     }
4598 
4599     /* update buffer pointer in host copy */
4600     lurb->host_urb.buffer = lurb->target_buf_ptr;
4601 
4602     ret = get_errno(safe_ioctl(fd, ie->host_cmd, &lurb->host_urb));
4603     if (is_error(ret)) {
4604         unlock_user(lurb->target_buf_ptr, lurb->target_buf_adr, 0);
4605         g_free(lurb);
4606     } else {
4607         urb_hashtable_insert(lurb);
4608     }
4609 
4610     return ret;
4611 }
4612 #endif /* CONFIG_USBFS */
4613 
4614 static abi_long do_ioctl_dm(const IOCTLEntry *ie, uint8_t *buf_temp, int fd,
4615                             int cmd, abi_long arg)
4616 {
4617     void *argptr;
4618     struct dm_ioctl *host_dm;
4619     abi_long guest_data;
4620     uint32_t guest_data_size;
4621     int target_size;
4622     const argtype *arg_type = ie->arg_type;
4623     abi_long ret;
4624     void *big_buf = NULL;
4625     char *host_data;
4626 
4627     arg_type++;
4628     target_size = thunk_type_size(arg_type, 0);
4629     argptr = lock_user(VERIFY_READ, arg, target_size, 1);
4630     if (!argptr) {
4631         ret = -TARGET_EFAULT;
4632         goto out;
4633     }
4634     thunk_convert(buf_temp, argptr, arg_type, THUNK_HOST);
4635     unlock_user(argptr, arg, 0);
4636 
4637     /* buf_temp is too small, so fetch things into a bigger buffer */
4638     big_buf = g_malloc0(((struct dm_ioctl*)buf_temp)->data_size * 2);
4639     memcpy(big_buf, buf_temp, target_size);
4640     buf_temp = big_buf;
4641     host_dm = big_buf;
4642 
4643     guest_data = arg + host_dm->data_start;
4644     if ((guest_data - arg) < 0) {
4645         ret = -TARGET_EINVAL;
4646         goto out;
4647     }
4648     guest_data_size = host_dm->data_size - host_dm->data_start;
4649     host_data = (char*)host_dm + host_dm->data_start;
4650 
4651     argptr = lock_user(VERIFY_READ, guest_data, guest_data_size, 1);
4652     if (!argptr) {
4653         ret = -TARGET_EFAULT;
4654         goto out;
4655     }
4656 
4657     switch (ie->host_cmd) {
4658     case DM_REMOVE_ALL:
4659     case DM_LIST_DEVICES:
4660     case DM_DEV_CREATE:
4661     case DM_DEV_REMOVE:
4662     case DM_DEV_SUSPEND:
4663     case DM_DEV_STATUS:
4664     case DM_DEV_WAIT:
4665     case DM_TABLE_STATUS:
4666     case DM_TABLE_CLEAR:
4667     case DM_TABLE_DEPS:
4668     case DM_LIST_VERSIONS:
4669         /* no input data */
4670         break;
4671     case DM_DEV_RENAME:
4672     case DM_DEV_SET_GEOMETRY:
4673         /* data contains only strings */
4674         memcpy(host_data, argptr, guest_data_size);
4675         break;
4676     case DM_TARGET_MSG:
4677         memcpy(host_data, argptr, guest_data_size);
4678         *(uint64_t*)host_data = tswap64(*(uint64_t*)argptr);
4679         break;
4680     case DM_TABLE_LOAD:
4681     {
4682         void *gspec = argptr;
4683         void *cur_data = host_data;
4684         const argtype arg_type[] = { MK_STRUCT(STRUCT_dm_target_spec) };
4685         int spec_size = thunk_type_size(arg_type, 0);
4686         int i;
4687 
4688         for (i = 0; i < host_dm->target_count; i++) {
4689             struct dm_target_spec *spec = cur_data;
4690             uint32_t next;
4691             int slen;
4692 
4693             thunk_convert(spec, gspec, arg_type, THUNK_HOST);
4694             slen = strlen((char*)gspec + spec_size) + 1;
4695             next = spec->next;
4696             spec->next = sizeof(*spec) + slen;
4697             strcpy((char*)&spec[1], gspec + spec_size);
4698             gspec += next;
4699             cur_data += spec->next;
4700         }
4701         break;
4702     }
4703     default:
4704         ret = -TARGET_EINVAL;
4705         unlock_user(argptr, guest_data, 0);
4706         goto out;
4707     }
4708     unlock_user(argptr, guest_data, 0);
4709 
4710     ret = get_errno(safe_ioctl(fd, ie->host_cmd, buf_temp));
4711     if (!is_error(ret)) {
4712         guest_data = arg + host_dm->data_start;
4713         guest_data_size = host_dm->data_size - host_dm->data_start;
4714         argptr = lock_user(VERIFY_WRITE, guest_data, guest_data_size, 0);
4715         switch (ie->host_cmd) {
4716         case DM_REMOVE_ALL:
4717         case DM_DEV_CREATE:
4718         case DM_DEV_REMOVE:
4719         case DM_DEV_RENAME:
4720         case DM_DEV_SUSPEND:
4721         case DM_DEV_STATUS:
4722         case DM_TABLE_LOAD:
4723         case DM_TABLE_CLEAR:
4724         case DM_TARGET_MSG:
4725         case DM_DEV_SET_GEOMETRY:
4726             /* no return data */
4727             break;
4728         case DM_LIST_DEVICES:
4729         {
4730             struct dm_name_list *nl = (void*)host_dm + host_dm->data_start;
4731             uint32_t remaining_data = guest_data_size;
4732             void *cur_data = argptr;
4733             const argtype arg_type[] = { MK_STRUCT(STRUCT_dm_name_list) };
4734             int nl_size = 12; /* can't use thunk_size due to alignment */
4735 
4736             while (1) {
4737                 uint32_t next = nl->next;
4738                 if (next) {
4739                     nl->next = nl_size + (strlen(nl->name) + 1);
4740                 }
4741                 if (remaining_data < nl->next) {
4742                     host_dm->flags |= DM_BUFFER_FULL_FLAG;
4743                     break;
4744                 }
4745                 thunk_convert(cur_data, nl, arg_type, THUNK_TARGET);
4746                 strcpy(cur_data + nl_size, nl->name);
4747                 cur_data += nl->next;
4748                 remaining_data -= nl->next;
4749                 if (!next) {
4750                     break;
4751                 }
4752                 nl = (void*)nl + next;
4753             }
4754             break;
4755         }
4756         case DM_DEV_WAIT:
4757         case DM_TABLE_STATUS:
4758         {
4759             struct dm_target_spec *spec = (void*)host_dm + host_dm->data_start;
4760             void *cur_data = argptr;
4761             const argtype arg_type[] = { MK_STRUCT(STRUCT_dm_target_spec) };
4762             int spec_size = thunk_type_size(arg_type, 0);
4763             int i;
4764 
4765             for (i = 0; i < host_dm->target_count; i++) {
4766                 uint32_t next = spec->next;
4767                 int slen = strlen((char*)&spec[1]) + 1;
4768                 spec->next = (cur_data - argptr) + spec_size + slen;
4769                 if (guest_data_size < spec->next) {
4770                     host_dm->flags |= DM_BUFFER_FULL_FLAG;
4771                     break;
4772                 }
4773                 thunk_convert(cur_data, spec, arg_type, THUNK_TARGET);
4774                 strcpy(cur_data + spec_size, (char*)&spec[1]);
4775                 cur_data = argptr + spec->next;
4776                 spec = (void*)host_dm + host_dm->data_start + next;
4777             }
4778             break;
4779         }
4780         case DM_TABLE_DEPS:
4781         {
4782             void *hdata = (void*)host_dm + host_dm->data_start;
4783             int count = *(uint32_t*)hdata;
4784             uint64_t *hdev = hdata + 8;
4785             uint64_t *gdev = argptr + 8;
4786             int i;
4787 
4788             *(uint32_t*)argptr = tswap32(count);
4789             for (i = 0; i < count; i++) {
4790                 *gdev = tswap64(*hdev);
4791                 gdev++;
4792                 hdev++;
4793             }
4794             break;
4795         }
4796         case DM_LIST_VERSIONS:
4797         {
4798             struct dm_target_versions *vers = (void*)host_dm + host_dm->data_start;
4799             uint32_t remaining_data = guest_data_size;
4800             void *cur_data = argptr;
4801             const argtype arg_type[] = { MK_STRUCT(STRUCT_dm_target_versions) };
4802             int vers_size = thunk_type_size(arg_type, 0);
4803 
4804             while (1) {
4805                 uint32_t next = vers->next;
4806                 if (next) {
4807                     vers->next = vers_size + (strlen(vers->name) + 1);
4808                 }
4809                 if (remaining_data < vers->next) {
4810                     host_dm->flags |= DM_BUFFER_FULL_FLAG;
4811                     break;
4812                 }
4813                 thunk_convert(cur_data, vers, arg_type, THUNK_TARGET);
4814                 strcpy(cur_data + vers_size, vers->name);
4815                 cur_data += vers->next;
4816                 remaining_data -= vers->next;
4817                 if (!next) {
4818                     break;
4819                 }
4820                 vers = (void*)vers + next;
4821             }
4822             break;
4823         }
4824         default:
4825             unlock_user(argptr, guest_data, 0);
4826             ret = -TARGET_EINVAL;
4827             goto out;
4828         }
4829         unlock_user(argptr, guest_data, guest_data_size);
4830 
4831         argptr = lock_user(VERIFY_WRITE, arg, target_size, 0);
4832         if (!argptr) {
4833             ret = -TARGET_EFAULT;
4834             goto out;
4835         }
4836         thunk_convert(argptr, buf_temp, arg_type, THUNK_TARGET);
4837         unlock_user(argptr, arg, target_size);
4838     }
4839 out:
4840     g_free(big_buf);
4841     return ret;
4842 }
4843 
4844 static abi_long do_ioctl_blkpg(const IOCTLEntry *ie, uint8_t *buf_temp, int fd,
4845                                int cmd, abi_long arg)
4846 {
4847     void *argptr;
4848     int target_size;
4849     const argtype *arg_type = ie->arg_type;
4850     const argtype part_arg_type[] = { MK_STRUCT(STRUCT_blkpg_partition) };
4851     abi_long ret;
4852 
4853     struct blkpg_ioctl_arg *host_blkpg = (void*)buf_temp;
4854     struct blkpg_partition host_part;
4855 
4856     /* Read and convert blkpg */
4857     arg_type++;
4858     target_size = thunk_type_size(arg_type, 0);
4859     argptr = lock_user(VERIFY_READ, arg, target_size, 1);
4860     if (!argptr) {
4861         ret = -TARGET_EFAULT;
4862         goto out;
4863     }
4864     thunk_convert(buf_temp, argptr, arg_type, THUNK_HOST);
4865     unlock_user(argptr, arg, 0);
4866 
4867     switch (host_blkpg->op) {
4868     case BLKPG_ADD_PARTITION:
4869     case BLKPG_DEL_PARTITION:
4870         /* payload is struct blkpg_partition */
4871         break;
4872     default:
4873         /* Unknown opcode */
4874         ret = -TARGET_EINVAL;
4875         goto out;
4876     }
4877 
4878     /* Read and convert blkpg->data */
4879     arg = (abi_long)(uintptr_t)host_blkpg->data;
4880     target_size = thunk_type_size(part_arg_type, 0);
4881     argptr = lock_user(VERIFY_READ, arg, target_size, 1);
4882     if (!argptr) {
4883         ret = -TARGET_EFAULT;
4884         goto out;
4885     }
4886     thunk_convert(&host_part, argptr, part_arg_type, THUNK_HOST);
4887     unlock_user(argptr, arg, 0);
4888 
4889     /* Swizzle the data pointer to our local copy and call! */
4890     host_blkpg->data = &host_part;
4891     ret = get_errno(safe_ioctl(fd, ie->host_cmd, host_blkpg));
4892 
4893 out:
4894     return ret;
4895 }
4896 
4897 static abi_long do_ioctl_rt(const IOCTLEntry *ie, uint8_t *buf_temp,
4898                                 int fd, int cmd, abi_long arg)
4899 {
4900     const argtype *arg_type = ie->arg_type;
4901     const StructEntry *se;
4902     const argtype *field_types;
4903     const int *dst_offsets, *src_offsets;
4904     int target_size;
4905     void *argptr;
4906     abi_ulong *target_rt_dev_ptr = NULL;
4907     unsigned long *host_rt_dev_ptr = NULL;
4908     abi_long ret;
4909     int i;
4910 
4911     assert(ie->access == IOC_W);
4912     assert(*arg_type == TYPE_PTR);
4913     arg_type++;
4914     assert(*arg_type == TYPE_STRUCT);
4915     target_size = thunk_type_size(arg_type, 0);
4916     argptr = lock_user(VERIFY_READ, arg, target_size, 1);
4917     if (!argptr) {
4918         return -TARGET_EFAULT;
4919     }
4920     arg_type++;
4921     assert(*arg_type == (int)STRUCT_rtentry);
4922     se = struct_entries + *arg_type++;
4923     assert(se->convert[0] == NULL);
4924     /* convert struct here to be able to catch rt_dev string */
4925     field_types = se->field_types;
4926     dst_offsets = se->field_offsets[THUNK_HOST];
4927     src_offsets = se->field_offsets[THUNK_TARGET];
4928     for (i = 0; i < se->nb_fields; i++) {
4929         if (dst_offsets[i] == offsetof(struct rtentry, rt_dev)) {
4930             assert(*field_types == TYPE_PTRVOID);
4931             target_rt_dev_ptr = (abi_ulong *)(argptr + src_offsets[i]);
4932             host_rt_dev_ptr = (unsigned long *)(buf_temp + dst_offsets[i]);
4933             if (*target_rt_dev_ptr != 0) {
4934                 *host_rt_dev_ptr = (unsigned long)lock_user_string(
4935                                                   tswapal(*target_rt_dev_ptr));
4936                 if (!*host_rt_dev_ptr) {
4937                     unlock_user(argptr, arg, 0);
4938                     return -TARGET_EFAULT;
4939                 }
4940             } else {
4941                 *host_rt_dev_ptr = 0;
4942             }
4943             field_types++;
4944             continue;
4945         }
4946         field_types = thunk_convert(buf_temp + dst_offsets[i],
4947                                     argptr + src_offsets[i],
4948                                     field_types, THUNK_HOST);
4949     }
4950     unlock_user(argptr, arg, 0);
4951 
4952     ret = get_errno(safe_ioctl(fd, ie->host_cmd, buf_temp));
4953 
4954     assert(host_rt_dev_ptr != NULL);
4955     assert(target_rt_dev_ptr != NULL);
4956     if (*host_rt_dev_ptr != 0) {
4957         unlock_user((void *)*host_rt_dev_ptr,
4958                     *target_rt_dev_ptr, 0);
4959     }
4960     return ret;
4961 }
4962 
4963 static abi_long do_ioctl_kdsigaccept(const IOCTLEntry *ie, uint8_t *buf_temp,
4964                                      int fd, int cmd, abi_long arg)
4965 {
4966     int sig = target_to_host_signal(arg);
4967     return get_errno(safe_ioctl(fd, ie->host_cmd, sig));
4968 }
4969 
4970 static abi_long do_ioctl_SIOCGSTAMP(const IOCTLEntry *ie, uint8_t *buf_temp,
4971                                     int fd, int cmd, abi_long arg)
4972 {
4973     struct timeval tv;
4974     abi_long ret;
4975 
4976     ret = get_errno(safe_ioctl(fd, SIOCGSTAMP, &tv));
4977     if (is_error(ret)) {
4978         return ret;
4979     }
4980 
4981     if (cmd == (int)TARGET_SIOCGSTAMP_OLD) {
4982         if (copy_to_user_timeval(arg, &tv)) {
4983             return -TARGET_EFAULT;
4984         }
4985     } else {
4986         if (copy_to_user_timeval64(arg, &tv)) {
4987             return -TARGET_EFAULT;
4988         }
4989     }
4990 
4991     return ret;
4992 }
4993 
4994 static abi_long do_ioctl_SIOCGSTAMPNS(const IOCTLEntry *ie, uint8_t *buf_temp,
4995                                       int fd, int cmd, abi_long arg)
4996 {
4997     struct timespec ts;
4998     abi_long ret;
4999 
5000     ret = get_errno(safe_ioctl(fd, SIOCGSTAMPNS, &ts));
5001     if (is_error(ret)) {
5002         return ret;
5003     }
5004 
5005     if (cmd == (int)TARGET_SIOCGSTAMPNS_OLD) {
5006         if (host_to_target_timespec(arg, &ts)) {
5007             return -TARGET_EFAULT;
5008         }
5009     } else{
5010         if (host_to_target_timespec64(arg, &ts)) {
5011             return -TARGET_EFAULT;
5012         }
5013     }
5014 
5015     return ret;
5016 }
5017 
5018 #ifdef TIOCGPTPEER
5019 static abi_long do_ioctl_tiocgptpeer(const IOCTLEntry *ie, uint8_t *buf_temp,
5020                                      int fd, int cmd, abi_long arg)
5021 {
5022     int flags = target_to_host_bitmask(arg, fcntl_flags_tbl);
5023     return get_errno(safe_ioctl(fd, ie->host_cmd, flags));
5024 }
5025 #endif
5026 
5027 static IOCTLEntry ioctl_entries[] = {
5028 #define IOCTL(cmd, access, ...) \
5029     { TARGET_ ## cmd, cmd, #cmd, access, 0, {  __VA_ARGS__ } },
5030 #define IOCTL_SPECIAL(cmd, access, dofn, ...)                      \
5031     { TARGET_ ## cmd, cmd, #cmd, access, dofn, {  __VA_ARGS__ } },
5032 #define IOCTL_IGNORE(cmd) \
5033     { TARGET_ ## cmd, 0, #cmd },
5034 #include "ioctls.h"
5035     { 0, 0, },
5036 };
5037 
5038 /* ??? Implement proper locking for ioctls.  */
5039 /* do_ioctl() Must return target values and target errnos. */
5040 static abi_long do_ioctl(int fd, int cmd, abi_long arg)
5041 {
5042     const IOCTLEntry *ie;
5043     const argtype *arg_type;
5044     abi_long ret;
5045     uint8_t buf_temp[MAX_STRUCT_SIZE];
5046     int target_size;
5047     void *argptr;
5048 
5049     ie = ioctl_entries;
5050     for(;;) {
5051         if (ie->target_cmd == 0) {
5052             gemu_log("Unsupported ioctl: cmd=0x%04lx\n", (long)cmd);
5053             return -TARGET_ENOSYS;
5054         }
5055         if (ie->target_cmd == cmd)
5056             break;
5057         ie++;
5058     }
5059     arg_type = ie->arg_type;
5060     if (ie->do_ioctl) {
5061         return ie->do_ioctl(ie, buf_temp, fd, cmd, arg);
5062     } else if (!ie->host_cmd) {
5063         /* Some architectures define BSD ioctls in their headers
5064            that are not implemented in Linux.  */
5065         return -TARGET_ENOSYS;
5066     }
5067 
5068     switch(arg_type[0]) {
5069     case TYPE_NULL:
5070         /* no argument */
5071         ret = get_errno(safe_ioctl(fd, ie->host_cmd));
5072         break;
5073     case TYPE_PTRVOID:
5074     case TYPE_INT:
5075         ret = get_errno(safe_ioctl(fd, ie->host_cmd, arg));
5076         break;
5077     case TYPE_PTR:
5078         arg_type++;
5079         target_size = thunk_type_size(arg_type, 0);
5080         switch(ie->access) {
5081         case IOC_R:
5082             ret = get_errno(safe_ioctl(fd, ie->host_cmd, buf_temp));
5083             if (!is_error(ret)) {
5084                 argptr = lock_user(VERIFY_WRITE, arg, target_size, 0);
5085                 if (!argptr)
5086                     return -TARGET_EFAULT;
5087                 thunk_convert(argptr, buf_temp, arg_type, THUNK_TARGET);
5088                 unlock_user(argptr, arg, target_size);
5089             }
5090             break;
5091         case IOC_W:
5092             argptr = lock_user(VERIFY_READ, arg, target_size, 1);
5093             if (!argptr)
5094                 return -TARGET_EFAULT;
5095             thunk_convert(buf_temp, argptr, arg_type, THUNK_HOST);
5096             unlock_user(argptr, arg, 0);
5097             ret = get_errno(safe_ioctl(fd, ie->host_cmd, buf_temp));
5098             break;
5099         default:
5100         case IOC_RW:
5101             argptr = lock_user(VERIFY_READ, arg, target_size, 1);
5102             if (!argptr)
5103                 return -TARGET_EFAULT;
5104             thunk_convert(buf_temp, argptr, arg_type, THUNK_HOST);
5105             unlock_user(argptr, arg, 0);
5106             ret = get_errno(safe_ioctl(fd, ie->host_cmd, buf_temp));
5107             if (!is_error(ret)) {
5108                 argptr = lock_user(VERIFY_WRITE, arg, target_size, 0);
5109                 if (!argptr)
5110                     return -TARGET_EFAULT;
5111                 thunk_convert(argptr, buf_temp, arg_type, THUNK_TARGET);
5112                 unlock_user(argptr, arg, target_size);
5113             }
5114             break;
5115         }
5116         break;
5117     default:
5118         gemu_log("Unsupported ioctl type: cmd=0x%04lx type=%d\n",
5119                  (long)cmd, arg_type[0]);
5120         ret = -TARGET_ENOSYS;
5121         break;
5122     }
5123     return ret;
5124 }
5125 
5126 static const bitmask_transtbl iflag_tbl[] = {
5127         { TARGET_IGNBRK, TARGET_IGNBRK, IGNBRK, IGNBRK },
5128         { TARGET_BRKINT, TARGET_BRKINT, BRKINT, BRKINT },
5129         { TARGET_IGNPAR, TARGET_IGNPAR, IGNPAR, IGNPAR },
5130         { TARGET_PARMRK, TARGET_PARMRK, PARMRK, PARMRK },
5131         { TARGET_INPCK, TARGET_INPCK, INPCK, INPCK },
5132         { TARGET_ISTRIP, TARGET_ISTRIP, ISTRIP, ISTRIP },
5133         { TARGET_INLCR, TARGET_INLCR, INLCR, INLCR },
5134         { TARGET_IGNCR, TARGET_IGNCR, IGNCR, IGNCR },
5135         { TARGET_ICRNL, TARGET_ICRNL, ICRNL, ICRNL },
5136         { TARGET_IUCLC, TARGET_IUCLC, IUCLC, IUCLC },
5137         { TARGET_IXON, TARGET_IXON, IXON, IXON },
5138         { TARGET_IXANY, TARGET_IXANY, IXANY, IXANY },
5139         { TARGET_IXOFF, TARGET_IXOFF, IXOFF, IXOFF },
5140         { TARGET_IMAXBEL, TARGET_IMAXBEL, IMAXBEL, IMAXBEL },
5141         { 0, 0, 0, 0 }
5142 };
5143 
5144 static const bitmask_transtbl oflag_tbl[] = {
5145 	{ TARGET_OPOST, TARGET_OPOST, OPOST, OPOST },
5146 	{ TARGET_OLCUC, TARGET_OLCUC, OLCUC, OLCUC },
5147 	{ TARGET_ONLCR, TARGET_ONLCR, ONLCR, ONLCR },
5148 	{ TARGET_OCRNL, TARGET_OCRNL, OCRNL, OCRNL },
5149 	{ TARGET_ONOCR, TARGET_ONOCR, ONOCR, ONOCR },
5150 	{ TARGET_ONLRET, TARGET_ONLRET, ONLRET, ONLRET },
5151 	{ TARGET_OFILL, TARGET_OFILL, OFILL, OFILL },
5152 	{ TARGET_OFDEL, TARGET_OFDEL, OFDEL, OFDEL },
5153 	{ TARGET_NLDLY, TARGET_NL0, NLDLY, NL0 },
5154 	{ TARGET_NLDLY, TARGET_NL1, NLDLY, NL1 },
5155 	{ TARGET_CRDLY, TARGET_CR0, CRDLY, CR0 },
5156 	{ TARGET_CRDLY, TARGET_CR1, CRDLY, CR1 },
5157 	{ TARGET_CRDLY, TARGET_CR2, CRDLY, CR2 },
5158 	{ TARGET_CRDLY, TARGET_CR3, CRDLY, CR3 },
5159 	{ TARGET_TABDLY, TARGET_TAB0, TABDLY, TAB0 },
5160 	{ TARGET_TABDLY, TARGET_TAB1, TABDLY, TAB1 },
5161 	{ TARGET_TABDLY, TARGET_TAB2, TABDLY, TAB2 },
5162 	{ TARGET_TABDLY, TARGET_TAB3, TABDLY, TAB3 },
5163 	{ TARGET_BSDLY, TARGET_BS0, BSDLY, BS0 },
5164 	{ TARGET_BSDLY, TARGET_BS1, BSDLY, BS1 },
5165 	{ TARGET_VTDLY, TARGET_VT0, VTDLY, VT0 },
5166 	{ TARGET_VTDLY, TARGET_VT1, VTDLY, VT1 },
5167 	{ TARGET_FFDLY, TARGET_FF0, FFDLY, FF0 },
5168 	{ TARGET_FFDLY, TARGET_FF1, FFDLY, FF1 },
5169 	{ 0, 0, 0, 0 }
5170 };
5171 
5172 static const bitmask_transtbl cflag_tbl[] = {
5173 	{ TARGET_CBAUD, TARGET_B0, CBAUD, B0 },
5174 	{ TARGET_CBAUD, TARGET_B50, CBAUD, B50 },
5175 	{ TARGET_CBAUD, TARGET_B75, CBAUD, B75 },
5176 	{ TARGET_CBAUD, TARGET_B110, CBAUD, B110 },
5177 	{ TARGET_CBAUD, TARGET_B134, CBAUD, B134 },
5178 	{ TARGET_CBAUD, TARGET_B150, CBAUD, B150 },
5179 	{ TARGET_CBAUD, TARGET_B200, CBAUD, B200 },
5180 	{ TARGET_CBAUD, TARGET_B300, CBAUD, B300 },
5181 	{ TARGET_CBAUD, TARGET_B600, CBAUD, B600 },
5182 	{ TARGET_CBAUD, TARGET_B1200, CBAUD, B1200 },
5183 	{ TARGET_CBAUD, TARGET_B1800, CBAUD, B1800 },
5184 	{ TARGET_CBAUD, TARGET_B2400, CBAUD, B2400 },
5185 	{ TARGET_CBAUD, TARGET_B4800, CBAUD, B4800 },
5186 	{ TARGET_CBAUD, TARGET_B9600, CBAUD, B9600 },
5187 	{ TARGET_CBAUD, TARGET_B19200, CBAUD, B19200 },
5188 	{ TARGET_CBAUD, TARGET_B38400, CBAUD, B38400 },
5189 	{ TARGET_CBAUD, TARGET_B57600, CBAUD, B57600 },
5190 	{ TARGET_CBAUD, TARGET_B115200, CBAUD, B115200 },
5191 	{ TARGET_CBAUD, TARGET_B230400, CBAUD, B230400 },
5192 	{ TARGET_CBAUD, TARGET_B460800, CBAUD, B460800 },
5193 	{ TARGET_CSIZE, TARGET_CS5, CSIZE, CS5 },
5194 	{ TARGET_CSIZE, TARGET_CS6, CSIZE, CS6 },
5195 	{ TARGET_CSIZE, TARGET_CS7, CSIZE, CS7 },
5196 	{ TARGET_CSIZE, TARGET_CS8, CSIZE, CS8 },
5197 	{ TARGET_CSTOPB, TARGET_CSTOPB, CSTOPB, CSTOPB },
5198 	{ TARGET_CREAD, TARGET_CREAD, CREAD, CREAD },
5199 	{ TARGET_PARENB, TARGET_PARENB, PARENB, PARENB },
5200 	{ TARGET_PARODD, TARGET_PARODD, PARODD, PARODD },
5201 	{ TARGET_HUPCL, TARGET_HUPCL, HUPCL, HUPCL },
5202 	{ TARGET_CLOCAL, TARGET_CLOCAL, CLOCAL, CLOCAL },
5203 	{ TARGET_CRTSCTS, TARGET_CRTSCTS, CRTSCTS, CRTSCTS },
5204 	{ 0, 0, 0, 0 }
5205 };
5206 
5207 static const bitmask_transtbl lflag_tbl[] = {
5208 	{ TARGET_ISIG, TARGET_ISIG, ISIG, ISIG },
5209 	{ TARGET_ICANON, TARGET_ICANON, ICANON, ICANON },
5210 	{ TARGET_XCASE, TARGET_XCASE, XCASE, XCASE },
5211 	{ TARGET_ECHO, TARGET_ECHO, ECHO, ECHO },
5212 	{ TARGET_ECHOE, TARGET_ECHOE, ECHOE, ECHOE },
5213 	{ TARGET_ECHOK, TARGET_ECHOK, ECHOK, ECHOK },
5214 	{ TARGET_ECHONL, TARGET_ECHONL, ECHONL, ECHONL },
5215 	{ TARGET_NOFLSH, TARGET_NOFLSH, NOFLSH, NOFLSH },
5216 	{ TARGET_TOSTOP, TARGET_TOSTOP, TOSTOP, TOSTOP },
5217 	{ TARGET_ECHOCTL, TARGET_ECHOCTL, ECHOCTL, ECHOCTL },
5218 	{ TARGET_ECHOPRT, TARGET_ECHOPRT, ECHOPRT, ECHOPRT },
5219 	{ TARGET_ECHOKE, TARGET_ECHOKE, ECHOKE, ECHOKE },
5220 	{ TARGET_FLUSHO, TARGET_FLUSHO, FLUSHO, FLUSHO },
5221 	{ TARGET_PENDIN, TARGET_PENDIN, PENDIN, PENDIN },
5222 	{ TARGET_IEXTEN, TARGET_IEXTEN, IEXTEN, IEXTEN },
5223 	{ 0, 0, 0, 0 }
5224 };
5225 
5226 static void target_to_host_termios (void *dst, const void *src)
5227 {
5228     struct host_termios *host = dst;
5229     const struct target_termios *target = src;
5230 
5231     host->c_iflag =
5232         target_to_host_bitmask(tswap32(target->c_iflag), iflag_tbl);
5233     host->c_oflag =
5234         target_to_host_bitmask(tswap32(target->c_oflag), oflag_tbl);
5235     host->c_cflag =
5236         target_to_host_bitmask(tswap32(target->c_cflag), cflag_tbl);
5237     host->c_lflag =
5238         target_to_host_bitmask(tswap32(target->c_lflag), lflag_tbl);
5239     host->c_line = target->c_line;
5240 
5241     memset(host->c_cc, 0, sizeof(host->c_cc));
5242     host->c_cc[VINTR] = target->c_cc[TARGET_VINTR];
5243     host->c_cc[VQUIT] = target->c_cc[TARGET_VQUIT];
5244     host->c_cc[VERASE] = target->c_cc[TARGET_VERASE];
5245     host->c_cc[VKILL] = target->c_cc[TARGET_VKILL];
5246     host->c_cc[VEOF] = target->c_cc[TARGET_VEOF];
5247     host->c_cc[VTIME] = target->c_cc[TARGET_VTIME];
5248     host->c_cc[VMIN] = target->c_cc[TARGET_VMIN];
5249     host->c_cc[VSWTC] = target->c_cc[TARGET_VSWTC];
5250     host->c_cc[VSTART] = target->c_cc[TARGET_VSTART];
5251     host->c_cc[VSTOP] = target->c_cc[TARGET_VSTOP];
5252     host->c_cc[VSUSP] = target->c_cc[TARGET_VSUSP];
5253     host->c_cc[VEOL] = target->c_cc[TARGET_VEOL];
5254     host->c_cc[VREPRINT] = target->c_cc[TARGET_VREPRINT];
5255     host->c_cc[VDISCARD] = target->c_cc[TARGET_VDISCARD];
5256     host->c_cc[VWERASE] = target->c_cc[TARGET_VWERASE];
5257     host->c_cc[VLNEXT] = target->c_cc[TARGET_VLNEXT];
5258     host->c_cc[VEOL2] = target->c_cc[TARGET_VEOL2];
5259 }
5260 
5261 static void host_to_target_termios (void *dst, const void *src)
5262 {
5263     struct target_termios *target = dst;
5264     const struct host_termios *host = src;
5265 
5266     target->c_iflag =
5267         tswap32(host_to_target_bitmask(host->c_iflag, iflag_tbl));
5268     target->c_oflag =
5269         tswap32(host_to_target_bitmask(host->c_oflag, oflag_tbl));
5270     target->c_cflag =
5271         tswap32(host_to_target_bitmask(host->c_cflag, cflag_tbl));
5272     target->c_lflag =
5273         tswap32(host_to_target_bitmask(host->c_lflag, lflag_tbl));
5274     target->c_line = host->c_line;
5275 
5276     memset(target->c_cc, 0, sizeof(target->c_cc));
5277     target->c_cc[TARGET_VINTR] = host->c_cc[VINTR];
5278     target->c_cc[TARGET_VQUIT] = host->c_cc[VQUIT];
5279     target->c_cc[TARGET_VERASE] = host->c_cc[VERASE];
5280     target->c_cc[TARGET_VKILL] = host->c_cc[VKILL];
5281     target->c_cc[TARGET_VEOF] = host->c_cc[VEOF];
5282     target->c_cc[TARGET_VTIME] = host->c_cc[VTIME];
5283     target->c_cc[TARGET_VMIN] = host->c_cc[VMIN];
5284     target->c_cc[TARGET_VSWTC] = host->c_cc[VSWTC];
5285     target->c_cc[TARGET_VSTART] = host->c_cc[VSTART];
5286     target->c_cc[TARGET_VSTOP] = host->c_cc[VSTOP];
5287     target->c_cc[TARGET_VSUSP] = host->c_cc[VSUSP];
5288     target->c_cc[TARGET_VEOL] = host->c_cc[VEOL];
5289     target->c_cc[TARGET_VREPRINT] = host->c_cc[VREPRINT];
5290     target->c_cc[TARGET_VDISCARD] = host->c_cc[VDISCARD];
5291     target->c_cc[TARGET_VWERASE] = host->c_cc[VWERASE];
5292     target->c_cc[TARGET_VLNEXT] = host->c_cc[VLNEXT];
5293     target->c_cc[TARGET_VEOL2] = host->c_cc[VEOL2];
5294 }
5295 
5296 static const StructEntry struct_termios_def = {
5297     .convert = { host_to_target_termios, target_to_host_termios },
5298     .size = { sizeof(struct target_termios), sizeof(struct host_termios) },
5299     .align = { __alignof__(struct target_termios), __alignof__(struct host_termios) },
5300 };
5301 
5302 static bitmask_transtbl mmap_flags_tbl[] = {
5303     { TARGET_MAP_SHARED, TARGET_MAP_SHARED, MAP_SHARED, MAP_SHARED },
5304     { TARGET_MAP_PRIVATE, TARGET_MAP_PRIVATE, MAP_PRIVATE, MAP_PRIVATE },
5305     { TARGET_MAP_FIXED, TARGET_MAP_FIXED, MAP_FIXED, MAP_FIXED },
5306     { TARGET_MAP_ANONYMOUS, TARGET_MAP_ANONYMOUS,
5307       MAP_ANONYMOUS, MAP_ANONYMOUS },
5308     { TARGET_MAP_GROWSDOWN, TARGET_MAP_GROWSDOWN,
5309       MAP_GROWSDOWN, MAP_GROWSDOWN },
5310     { TARGET_MAP_DENYWRITE, TARGET_MAP_DENYWRITE,
5311       MAP_DENYWRITE, MAP_DENYWRITE },
5312     { TARGET_MAP_EXECUTABLE, TARGET_MAP_EXECUTABLE,
5313       MAP_EXECUTABLE, MAP_EXECUTABLE },
5314     { TARGET_MAP_LOCKED, TARGET_MAP_LOCKED, MAP_LOCKED, MAP_LOCKED },
5315     { TARGET_MAP_NORESERVE, TARGET_MAP_NORESERVE,
5316       MAP_NORESERVE, MAP_NORESERVE },
5317     { TARGET_MAP_HUGETLB, TARGET_MAP_HUGETLB, MAP_HUGETLB, MAP_HUGETLB },
5318     /* MAP_STACK had been ignored by the kernel for quite some time.
5319        Recognize it for the target insofar as we do not want to pass
5320        it through to the host.  */
5321     { TARGET_MAP_STACK, TARGET_MAP_STACK, 0, 0 },
5322     { 0, 0, 0, 0 }
5323 };
5324 
5325 #if defined(TARGET_I386)
5326 
5327 /* NOTE: there is really one LDT for all the threads */
5328 static uint8_t *ldt_table;
5329 
5330 static abi_long read_ldt(abi_ulong ptr, unsigned long bytecount)
5331 {
5332     int size;
5333     void *p;
5334 
5335     if (!ldt_table)
5336         return 0;
5337     size = TARGET_LDT_ENTRIES * TARGET_LDT_ENTRY_SIZE;
5338     if (size > bytecount)
5339         size = bytecount;
5340     p = lock_user(VERIFY_WRITE, ptr, size, 0);
5341     if (!p)
5342         return -TARGET_EFAULT;
5343     /* ??? Should this by byteswapped?  */
5344     memcpy(p, ldt_table, size);
5345     unlock_user(p, ptr, size);
5346     return size;
5347 }
5348 
5349 /* XXX: add locking support */
5350 static abi_long write_ldt(CPUX86State *env,
5351                           abi_ulong ptr, unsigned long bytecount, int oldmode)
5352 {
5353     struct target_modify_ldt_ldt_s ldt_info;
5354     struct target_modify_ldt_ldt_s *target_ldt_info;
5355     int seg_32bit, contents, read_exec_only, limit_in_pages;
5356     int seg_not_present, useable, lm;
5357     uint32_t *lp, entry_1, entry_2;
5358 
5359     if (bytecount != sizeof(ldt_info))
5360         return -TARGET_EINVAL;
5361     if (!lock_user_struct(VERIFY_READ, target_ldt_info, ptr, 1))
5362         return -TARGET_EFAULT;
5363     ldt_info.entry_number = tswap32(target_ldt_info->entry_number);
5364     ldt_info.base_addr = tswapal(target_ldt_info->base_addr);
5365     ldt_info.limit = tswap32(target_ldt_info->limit);
5366     ldt_info.flags = tswap32(target_ldt_info->flags);
5367     unlock_user_struct(target_ldt_info, ptr, 0);
5368 
5369     if (ldt_info.entry_number >= TARGET_LDT_ENTRIES)
5370         return -TARGET_EINVAL;
5371     seg_32bit = ldt_info.flags & 1;
5372     contents = (ldt_info.flags >> 1) & 3;
5373     read_exec_only = (ldt_info.flags >> 3) & 1;
5374     limit_in_pages = (ldt_info.flags >> 4) & 1;
5375     seg_not_present = (ldt_info.flags >> 5) & 1;
5376     useable = (ldt_info.flags >> 6) & 1;
5377 #ifdef TARGET_ABI32
5378     lm = 0;
5379 #else
5380     lm = (ldt_info.flags >> 7) & 1;
5381 #endif
5382     if (contents == 3) {
5383         if (oldmode)
5384             return -TARGET_EINVAL;
5385         if (seg_not_present == 0)
5386             return -TARGET_EINVAL;
5387     }
5388     /* allocate the LDT */
5389     if (!ldt_table) {
5390         env->ldt.base = target_mmap(0,
5391                                     TARGET_LDT_ENTRIES * TARGET_LDT_ENTRY_SIZE,
5392                                     PROT_READ|PROT_WRITE,
5393                                     MAP_ANONYMOUS|MAP_PRIVATE, -1, 0);
5394         if (env->ldt.base == -1)
5395             return -TARGET_ENOMEM;
5396         memset(g2h(env->ldt.base), 0,
5397                TARGET_LDT_ENTRIES * TARGET_LDT_ENTRY_SIZE);
5398         env->ldt.limit = 0xffff;
5399         ldt_table = g2h(env->ldt.base);
5400     }
5401 
5402     /* NOTE: same code as Linux kernel */
5403     /* Allow LDTs to be cleared by the user. */
5404     if (ldt_info.base_addr == 0 && ldt_info.limit == 0) {
5405         if (oldmode ||
5406             (contents == 0		&&
5407              read_exec_only == 1	&&
5408              seg_32bit == 0		&&
5409              limit_in_pages == 0	&&
5410              seg_not_present == 1	&&
5411              useable == 0 )) {
5412             entry_1 = 0;
5413             entry_2 = 0;
5414             goto install;
5415         }
5416     }
5417 
5418     entry_1 = ((ldt_info.base_addr & 0x0000ffff) << 16) |
5419         (ldt_info.limit & 0x0ffff);
5420     entry_2 = (ldt_info.base_addr & 0xff000000) |
5421         ((ldt_info.base_addr & 0x00ff0000) >> 16) |
5422         (ldt_info.limit & 0xf0000) |
5423         ((read_exec_only ^ 1) << 9) |
5424         (contents << 10) |
5425         ((seg_not_present ^ 1) << 15) |
5426         (seg_32bit << 22) |
5427         (limit_in_pages << 23) |
5428         (lm << 21) |
5429         0x7000;
5430     if (!oldmode)
5431         entry_2 |= (useable << 20);
5432 
5433     /* Install the new entry ...  */
5434 install:
5435     lp = (uint32_t *)(ldt_table + (ldt_info.entry_number << 3));
5436     lp[0] = tswap32(entry_1);
5437     lp[1] = tswap32(entry_2);
5438     return 0;
5439 }
5440 
5441 /* specific and weird i386 syscalls */
5442 static abi_long do_modify_ldt(CPUX86State *env, int func, abi_ulong ptr,
5443                               unsigned long bytecount)
5444 {
5445     abi_long ret;
5446 
5447     switch (func) {
5448     case 0:
5449         ret = read_ldt(ptr, bytecount);
5450         break;
5451     case 1:
5452         ret = write_ldt(env, ptr, bytecount, 1);
5453         break;
5454     case 0x11:
5455         ret = write_ldt(env, ptr, bytecount, 0);
5456         break;
5457     default:
5458         ret = -TARGET_ENOSYS;
5459         break;
5460     }
5461     return ret;
5462 }
5463 
5464 #if defined(TARGET_I386) && defined(TARGET_ABI32)
5465 abi_long do_set_thread_area(CPUX86State *env, abi_ulong ptr)
5466 {
5467     uint64_t *gdt_table = g2h(env->gdt.base);
5468     struct target_modify_ldt_ldt_s ldt_info;
5469     struct target_modify_ldt_ldt_s *target_ldt_info;
5470     int seg_32bit, contents, read_exec_only, limit_in_pages;
5471     int seg_not_present, useable, lm;
5472     uint32_t *lp, entry_1, entry_2;
5473     int i;
5474 
5475     lock_user_struct(VERIFY_WRITE, target_ldt_info, ptr, 1);
5476     if (!target_ldt_info)
5477         return -TARGET_EFAULT;
5478     ldt_info.entry_number = tswap32(target_ldt_info->entry_number);
5479     ldt_info.base_addr = tswapal(target_ldt_info->base_addr);
5480     ldt_info.limit = tswap32(target_ldt_info->limit);
5481     ldt_info.flags = tswap32(target_ldt_info->flags);
5482     if (ldt_info.entry_number == -1) {
5483         for (i=TARGET_GDT_ENTRY_TLS_MIN; i<=TARGET_GDT_ENTRY_TLS_MAX; i++) {
5484             if (gdt_table[i] == 0) {
5485                 ldt_info.entry_number = i;
5486                 target_ldt_info->entry_number = tswap32(i);
5487                 break;
5488             }
5489         }
5490     }
5491     unlock_user_struct(target_ldt_info, ptr, 1);
5492 
5493     if (ldt_info.entry_number < TARGET_GDT_ENTRY_TLS_MIN ||
5494         ldt_info.entry_number > TARGET_GDT_ENTRY_TLS_MAX)
5495            return -TARGET_EINVAL;
5496     seg_32bit = ldt_info.flags & 1;
5497     contents = (ldt_info.flags >> 1) & 3;
5498     read_exec_only = (ldt_info.flags >> 3) & 1;
5499     limit_in_pages = (ldt_info.flags >> 4) & 1;
5500     seg_not_present = (ldt_info.flags >> 5) & 1;
5501     useable = (ldt_info.flags >> 6) & 1;
5502 #ifdef TARGET_ABI32
5503     lm = 0;
5504 #else
5505     lm = (ldt_info.flags >> 7) & 1;
5506 #endif
5507 
5508     if (contents == 3) {
5509         if (seg_not_present == 0)
5510             return -TARGET_EINVAL;
5511     }
5512 
5513     /* NOTE: same code as Linux kernel */
5514     /* Allow LDTs to be cleared by the user. */
5515     if (ldt_info.base_addr == 0 && ldt_info.limit == 0) {
5516         if ((contents == 0             &&
5517              read_exec_only == 1       &&
5518              seg_32bit == 0            &&
5519              limit_in_pages == 0       &&
5520              seg_not_present == 1      &&
5521              useable == 0 )) {
5522             entry_1 = 0;
5523             entry_2 = 0;
5524             goto install;
5525         }
5526     }
5527 
5528     entry_1 = ((ldt_info.base_addr & 0x0000ffff) << 16) |
5529         (ldt_info.limit & 0x0ffff);
5530     entry_2 = (ldt_info.base_addr & 0xff000000) |
5531         ((ldt_info.base_addr & 0x00ff0000) >> 16) |
5532         (ldt_info.limit & 0xf0000) |
5533         ((read_exec_only ^ 1) << 9) |
5534         (contents << 10) |
5535         ((seg_not_present ^ 1) << 15) |
5536         (seg_32bit << 22) |
5537         (limit_in_pages << 23) |
5538         (useable << 20) |
5539         (lm << 21) |
5540         0x7000;
5541 
5542     /* Install the new entry ...  */
5543 install:
5544     lp = (uint32_t *)(gdt_table + ldt_info.entry_number);
5545     lp[0] = tswap32(entry_1);
5546     lp[1] = tswap32(entry_2);
5547     return 0;
5548 }
5549 
5550 static abi_long do_get_thread_area(CPUX86State *env, abi_ulong ptr)
5551 {
5552     struct target_modify_ldt_ldt_s *target_ldt_info;
5553     uint64_t *gdt_table = g2h(env->gdt.base);
5554     uint32_t base_addr, limit, flags;
5555     int seg_32bit, contents, read_exec_only, limit_in_pages, idx;
5556     int seg_not_present, useable, lm;
5557     uint32_t *lp, entry_1, entry_2;
5558 
5559     lock_user_struct(VERIFY_WRITE, target_ldt_info, ptr, 1);
5560     if (!target_ldt_info)
5561         return -TARGET_EFAULT;
5562     idx = tswap32(target_ldt_info->entry_number);
5563     if (idx < TARGET_GDT_ENTRY_TLS_MIN ||
5564         idx > TARGET_GDT_ENTRY_TLS_MAX) {
5565         unlock_user_struct(target_ldt_info, ptr, 1);
5566         return -TARGET_EINVAL;
5567     }
5568     lp = (uint32_t *)(gdt_table + idx);
5569     entry_1 = tswap32(lp[0]);
5570     entry_2 = tswap32(lp[1]);
5571 
5572     read_exec_only = ((entry_2 >> 9) & 1) ^ 1;
5573     contents = (entry_2 >> 10) & 3;
5574     seg_not_present = ((entry_2 >> 15) & 1) ^ 1;
5575     seg_32bit = (entry_2 >> 22) & 1;
5576     limit_in_pages = (entry_2 >> 23) & 1;
5577     useable = (entry_2 >> 20) & 1;
5578 #ifdef TARGET_ABI32
5579     lm = 0;
5580 #else
5581     lm = (entry_2 >> 21) & 1;
5582 #endif
5583     flags = (seg_32bit << 0) | (contents << 1) |
5584         (read_exec_only << 3) | (limit_in_pages << 4) |
5585         (seg_not_present << 5) | (useable << 6) | (lm << 7);
5586     limit = (entry_1 & 0xffff) | (entry_2  & 0xf0000);
5587     base_addr = (entry_1 >> 16) |
5588         (entry_2 & 0xff000000) |
5589         ((entry_2 & 0xff) << 16);
5590     target_ldt_info->base_addr = tswapal(base_addr);
5591     target_ldt_info->limit = tswap32(limit);
5592     target_ldt_info->flags = tswap32(flags);
5593     unlock_user_struct(target_ldt_info, ptr, 1);
5594     return 0;
5595 }
5596 #endif /* TARGET_I386 && TARGET_ABI32 */
5597 
5598 #ifndef TARGET_ABI32
5599 abi_long do_arch_prctl(CPUX86State *env, int code, abi_ulong addr)
5600 {
5601     abi_long ret = 0;
5602     abi_ulong val;
5603     int idx;
5604 
5605     switch(code) {
5606     case TARGET_ARCH_SET_GS:
5607     case TARGET_ARCH_SET_FS:
5608         if (code == TARGET_ARCH_SET_GS)
5609             idx = R_GS;
5610         else
5611             idx = R_FS;
5612         cpu_x86_load_seg(env, idx, 0);
5613         env->segs[idx].base = addr;
5614         break;
5615     case TARGET_ARCH_GET_GS:
5616     case TARGET_ARCH_GET_FS:
5617         if (code == TARGET_ARCH_GET_GS)
5618             idx = R_GS;
5619         else
5620             idx = R_FS;
5621         val = env->segs[idx].base;
5622         if (put_user(val, addr, abi_ulong))
5623             ret = -TARGET_EFAULT;
5624         break;
5625     default:
5626         ret = -TARGET_EINVAL;
5627         break;
5628     }
5629     return ret;
5630 }
5631 #endif
5632 
5633 #endif /* defined(TARGET_I386) */
5634 
5635 #define NEW_STACK_SIZE 0x40000
5636 
5637 
5638 static pthread_mutex_t clone_lock = PTHREAD_MUTEX_INITIALIZER;
5639 typedef struct {
5640     CPUArchState *env;
5641     pthread_mutex_t mutex;
5642     pthread_cond_t cond;
5643     pthread_t thread;
5644     uint32_t tid;
5645     abi_ulong child_tidptr;
5646     abi_ulong parent_tidptr;
5647     sigset_t sigmask;
5648 } new_thread_info;
5649 
5650 static void *clone_func(void *arg)
5651 {
5652     new_thread_info *info = arg;
5653     CPUArchState *env;
5654     CPUState *cpu;
5655     TaskState *ts;
5656 
5657     rcu_register_thread();
5658     tcg_register_thread();
5659     env = info->env;
5660     cpu = env_cpu(env);
5661     thread_cpu = cpu;
5662     ts = (TaskState *)cpu->opaque;
5663     info->tid = sys_gettid();
5664     task_settid(ts);
5665     if (info->child_tidptr)
5666         put_user_u32(info->tid, info->child_tidptr);
5667     if (info->parent_tidptr)
5668         put_user_u32(info->tid, info->parent_tidptr);
5669     qemu_guest_random_seed_thread_part2(cpu->random_seed);
5670     /* Enable signals.  */
5671     sigprocmask(SIG_SETMASK, &info->sigmask, NULL);
5672     /* Signal to the parent that we're ready.  */
5673     pthread_mutex_lock(&info->mutex);
5674     pthread_cond_broadcast(&info->cond);
5675     pthread_mutex_unlock(&info->mutex);
5676     /* Wait until the parent has finished initializing the tls state.  */
5677     pthread_mutex_lock(&clone_lock);
5678     pthread_mutex_unlock(&clone_lock);
5679     cpu_loop(env);
5680     /* never exits */
5681     return NULL;
5682 }
5683 
5684 /* do_fork() Must return host values and target errnos (unlike most
5685    do_*() functions). */
5686 static int do_fork(CPUArchState *env, unsigned int flags, abi_ulong newsp,
5687                    abi_ulong parent_tidptr, target_ulong newtls,
5688                    abi_ulong child_tidptr)
5689 {
5690     CPUState *cpu = env_cpu(env);
5691     int ret;
5692     TaskState *ts;
5693     CPUState *new_cpu;
5694     CPUArchState *new_env;
5695     sigset_t sigmask;
5696 
5697     flags &= ~CLONE_IGNORED_FLAGS;
5698 
5699     /* Emulate vfork() with fork() */
5700     if (flags & CLONE_VFORK)
5701         flags &= ~(CLONE_VFORK | CLONE_VM);
5702 
5703     if (flags & CLONE_VM) {
5704         TaskState *parent_ts = (TaskState *)cpu->opaque;
5705         new_thread_info info;
5706         pthread_attr_t attr;
5707 
5708         if (((flags & CLONE_THREAD_FLAGS) != CLONE_THREAD_FLAGS) ||
5709             (flags & CLONE_INVALID_THREAD_FLAGS)) {
5710             return -TARGET_EINVAL;
5711         }
5712 
5713         ts = g_new0(TaskState, 1);
5714         init_task_state(ts);
5715 
5716         /* Grab a mutex so that thread setup appears atomic.  */
5717         pthread_mutex_lock(&clone_lock);
5718 
5719         /* we create a new CPU instance. */
5720         new_env = cpu_copy(env);
5721         /* Init regs that differ from the parent.  */
5722         cpu_clone_regs(new_env, newsp);
5723         new_cpu = env_cpu(new_env);
5724         new_cpu->opaque = ts;
5725         ts->bprm = parent_ts->bprm;
5726         ts->info = parent_ts->info;
5727         ts->signal_mask = parent_ts->signal_mask;
5728 
5729         if (flags & CLONE_CHILD_CLEARTID) {
5730             ts->child_tidptr = child_tidptr;
5731         }
5732 
5733         if (flags & CLONE_SETTLS) {
5734             cpu_set_tls (new_env, newtls);
5735         }
5736 
5737         memset(&info, 0, sizeof(info));
5738         pthread_mutex_init(&info.mutex, NULL);
5739         pthread_mutex_lock(&info.mutex);
5740         pthread_cond_init(&info.cond, NULL);
5741         info.env = new_env;
5742         if (flags & CLONE_CHILD_SETTID) {
5743             info.child_tidptr = child_tidptr;
5744         }
5745         if (flags & CLONE_PARENT_SETTID) {
5746             info.parent_tidptr = parent_tidptr;
5747         }
5748 
5749         ret = pthread_attr_init(&attr);
5750         ret = pthread_attr_setstacksize(&attr, NEW_STACK_SIZE);
5751         ret = pthread_attr_setdetachstate(&attr, PTHREAD_CREATE_DETACHED);
5752         /* It is not safe to deliver signals until the child has finished
5753            initializing, so temporarily block all signals.  */
5754         sigfillset(&sigmask);
5755         sigprocmask(SIG_BLOCK, &sigmask, &info.sigmask);
5756         cpu->random_seed = qemu_guest_random_seed_thread_part1();
5757 
5758         /* If this is our first additional thread, we need to ensure we
5759          * generate code for parallel execution and flush old translations.
5760          */
5761         if (!parallel_cpus) {
5762             parallel_cpus = true;
5763             tb_flush(cpu);
5764         }
5765 
5766         ret = pthread_create(&info.thread, &attr, clone_func, &info);
5767         /* TODO: Free new CPU state if thread creation failed.  */
5768 
5769         sigprocmask(SIG_SETMASK, &info.sigmask, NULL);
5770         pthread_attr_destroy(&attr);
5771         if (ret == 0) {
5772             /* Wait for the child to initialize.  */
5773             pthread_cond_wait(&info.cond, &info.mutex);
5774             ret = info.tid;
5775         } else {
5776             ret = -1;
5777         }
5778         pthread_mutex_unlock(&info.mutex);
5779         pthread_cond_destroy(&info.cond);
5780         pthread_mutex_destroy(&info.mutex);
5781         pthread_mutex_unlock(&clone_lock);
5782     } else {
5783         /* if no CLONE_VM, we consider it is a fork */
5784         if (flags & CLONE_INVALID_FORK_FLAGS) {
5785             return -TARGET_EINVAL;
5786         }
5787 
5788         /* We can't support custom termination signals */
5789         if ((flags & CSIGNAL) != TARGET_SIGCHLD) {
5790             return -TARGET_EINVAL;
5791         }
5792 
5793         if (block_signals()) {
5794             return -TARGET_ERESTARTSYS;
5795         }
5796 
5797         fork_start();
5798         ret = fork();
5799         if (ret == 0) {
5800             /* Child Process.  */
5801             cpu_clone_regs(env, newsp);
5802             fork_end(1);
5803             /* There is a race condition here.  The parent process could
5804                theoretically read the TID in the child process before the child
5805                tid is set.  This would require using either ptrace
5806                (not implemented) or having *_tidptr to point at a shared memory
5807                mapping.  We can't repeat the spinlock hack used above because
5808                the child process gets its own copy of the lock.  */
5809             if (flags & CLONE_CHILD_SETTID)
5810                 put_user_u32(sys_gettid(), child_tidptr);
5811             if (flags & CLONE_PARENT_SETTID)
5812                 put_user_u32(sys_gettid(), parent_tidptr);
5813             ts = (TaskState *)cpu->opaque;
5814             if (flags & CLONE_SETTLS)
5815                 cpu_set_tls (env, newtls);
5816             if (flags & CLONE_CHILD_CLEARTID)
5817                 ts->child_tidptr = child_tidptr;
5818         } else {
5819             fork_end(0);
5820         }
5821     }
5822     return ret;
5823 }
5824 
5825 /* warning : doesn't handle linux specific flags... */
5826 static int target_to_host_fcntl_cmd(int cmd)
5827 {
5828     int ret;
5829 
5830     switch(cmd) {
5831     case TARGET_F_DUPFD:
5832     case TARGET_F_GETFD:
5833     case TARGET_F_SETFD:
5834     case TARGET_F_GETFL:
5835     case TARGET_F_SETFL:
5836         ret = cmd;
5837         break;
5838     case TARGET_F_GETLK:
5839         ret = F_GETLK64;
5840         break;
5841     case TARGET_F_SETLK:
5842         ret = F_SETLK64;
5843         break;
5844     case TARGET_F_SETLKW:
5845         ret = F_SETLKW64;
5846         break;
5847     case TARGET_F_GETOWN:
5848         ret = F_GETOWN;
5849         break;
5850     case TARGET_F_SETOWN:
5851         ret = F_SETOWN;
5852         break;
5853     case TARGET_F_GETSIG:
5854         ret = F_GETSIG;
5855         break;
5856     case TARGET_F_SETSIG:
5857         ret = F_SETSIG;
5858         break;
5859 #if TARGET_ABI_BITS == 32
5860     case TARGET_F_GETLK64:
5861         ret = F_GETLK64;
5862         break;
5863     case TARGET_F_SETLK64:
5864         ret = F_SETLK64;
5865         break;
5866     case TARGET_F_SETLKW64:
5867         ret = F_SETLKW64;
5868         break;
5869 #endif
5870     case TARGET_F_SETLEASE:
5871         ret = F_SETLEASE;
5872         break;
5873     case TARGET_F_GETLEASE:
5874         ret = F_GETLEASE;
5875         break;
5876 #ifdef F_DUPFD_CLOEXEC
5877     case TARGET_F_DUPFD_CLOEXEC:
5878         ret = F_DUPFD_CLOEXEC;
5879         break;
5880 #endif
5881     case TARGET_F_NOTIFY:
5882         ret = F_NOTIFY;
5883         break;
5884 #ifdef F_GETOWN_EX
5885     case TARGET_F_GETOWN_EX:
5886         ret = F_GETOWN_EX;
5887         break;
5888 #endif
5889 #ifdef F_SETOWN_EX
5890     case TARGET_F_SETOWN_EX:
5891         ret = F_SETOWN_EX;
5892         break;
5893 #endif
5894 #ifdef F_SETPIPE_SZ
5895     case TARGET_F_SETPIPE_SZ:
5896         ret = F_SETPIPE_SZ;
5897         break;
5898     case TARGET_F_GETPIPE_SZ:
5899         ret = F_GETPIPE_SZ;
5900         break;
5901 #endif
5902     default:
5903         ret = -TARGET_EINVAL;
5904         break;
5905     }
5906 
5907 #if defined(__powerpc64__)
5908     /* On PPC64, glibc headers has the F_*LK* defined to 12, 13 and 14 and
5909      * is not supported by kernel. The glibc fcntl call actually adjusts
5910      * them to 5, 6 and 7 before making the syscall(). Since we make the
5911      * syscall directly, adjust to what is supported by the kernel.
5912      */
5913     if (ret >= F_GETLK64 && ret <= F_SETLKW64) {
5914         ret -= F_GETLK64 - 5;
5915     }
5916 #endif
5917 
5918     return ret;
5919 }
5920 
5921 #define FLOCK_TRANSTBL \
5922     switch (type) { \
5923     TRANSTBL_CONVERT(F_RDLCK); \
5924     TRANSTBL_CONVERT(F_WRLCK); \
5925     TRANSTBL_CONVERT(F_UNLCK); \
5926     TRANSTBL_CONVERT(F_EXLCK); \
5927     TRANSTBL_CONVERT(F_SHLCK); \
5928     }
5929 
5930 static int target_to_host_flock(int type)
5931 {
5932 #define TRANSTBL_CONVERT(a) case TARGET_##a: return a
5933     FLOCK_TRANSTBL
5934 #undef  TRANSTBL_CONVERT
5935     return -TARGET_EINVAL;
5936 }
5937 
5938 static int host_to_target_flock(int type)
5939 {
5940 #define TRANSTBL_CONVERT(a) case a: return TARGET_##a
5941     FLOCK_TRANSTBL
5942 #undef  TRANSTBL_CONVERT
5943     /* if we don't know how to convert the value coming
5944      * from the host we copy to the target field as-is
5945      */
5946     return type;
5947 }
5948 
5949 static inline abi_long copy_from_user_flock(struct flock64 *fl,
5950                                             abi_ulong target_flock_addr)
5951 {
5952     struct target_flock *target_fl;
5953     int l_type;
5954 
5955     if (!lock_user_struct(VERIFY_READ, target_fl, target_flock_addr, 1)) {
5956         return -TARGET_EFAULT;
5957     }
5958 
5959     __get_user(l_type, &target_fl->l_type);
5960     l_type = target_to_host_flock(l_type);
5961     if (l_type < 0) {
5962         return l_type;
5963     }
5964     fl->l_type = l_type;
5965     __get_user(fl->l_whence, &target_fl->l_whence);
5966     __get_user(fl->l_start, &target_fl->l_start);
5967     __get_user(fl->l_len, &target_fl->l_len);
5968     __get_user(fl->l_pid, &target_fl->l_pid);
5969     unlock_user_struct(target_fl, target_flock_addr, 0);
5970     return 0;
5971 }
5972 
5973 static inline abi_long copy_to_user_flock(abi_ulong target_flock_addr,
5974                                           const struct flock64 *fl)
5975 {
5976     struct target_flock *target_fl;
5977     short l_type;
5978 
5979     if (!lock_user_struct(VERIFY_WRITE, target_fl, target_flock_addr, 0)) {
5980         return -TARGET_EFAULT;
5981     }
5982 
5983     l_type = host_to_target_flock(fl->l_type);
5984     __put_user(l_type, &target_fl->l_type);
5985     __put_user(fl->l_whence, &target_fl->l_whence);
5986     __put_user(fl->l_start, &target_fl->l_start);
5987     __put_user(fl->l_len, &target_fl->l_len);
5988     __put_user(fl->l_pid, &target_fl->l_pid);
5989     unlock_user_struct(target_fl, target_flock_addr, 1);
5990     return 0;
5991 }
5992 
5993 typedef abi_long from_flock64_fn(struct flock64 *fl, abi_ulong target_addr);
5994 typedef abi_long to_flock64_fn(abi_ulong target_addr, const struct flock64 *fl);
5995 
5996 #if defined(TARGET_ARM) && TARGET_ABI_BITS == 32
5997 static inline abi_long copy_from_user_oabi_flock64(struct flock64 *fl,
5998                                                    abi_ulong target_flock_addr)
5999 {
6000     struct target_oabi_flock64 *target_fl;
6001     int l_type;
6002 
6003     if (!lock_user_struct(VERIFY_READ, target_fl, target_flock_addr, 1)) {
6004         return -TARGET_EFAULT;
6005     }
6006 
6007     __get_user(l_type, &target_fl->l_type);
6008     l_type = target_to_host_flock(l_type);
6009     if (l_type < 0) {
6010         return l_type;
6011     }
6012     fl->l_type = l_type;
6013     __get_user(fl->l_whence, &target_fl->l_whence);
6014     __get_user(fl->l_start, &target_fl->l_start);
6015     __get_user(fl->l_len, &target_fl->l_len);
6016     __get_user(fl->l_pid, &target_fl->l_pid);
6017     unlock_user_struct(target_fl, target_flock_addr, 0);
6018     return 0;
6019 }
6020 
6021 static inline abi_long copy_to_user_oabi_flock64(abi_ulong target_flock_addr,
6022                                                  const struct flock64 *fl)
6023 {
6024     struct target_oabi_flock64 *target_fl;
6025     short l_type;
6026 
6027     if (!lock_user_struct(VERIFY_WRITE, target_fl, target_flock_addr, 0)) {
6028         return -TARGET_EFAULT;
6029     }
6030 
6031     l_type = host_to_target_flock(fl->l_type);
6032     __put_user(l_type, &target_fl->l_type);
6033     __put_user(fl->l_whence, &target_fl->l_whence);
6034     __put_user(fl->l_start, &target_fl->l_start);
6035     __put_user(fl->l_len, &target_fl->l_len);
6036     __put_user(fl->l_pid, &target_fl->l_pid);
6037     unlock_user_struct(target_fl, target_flock_addr, 1);
6038     return 0;
6039 }
6040 #endif
6041 
6042 static inline abi_long copy_from_user_flock64(struct flock64 *fl,
6043                                               abi_ulong target_flock_addr)
6044 {
6045     struct target_flock64 *target_fl;
6046     int l_type;
6047 
6048     if (!lock_user_struct(VERIFY_READ, target_fl, target_flock_addr, 1)) {
6049         return -TARGET_EFAULT;
6050     }
6051 
6052     __get_user(l_type, &target_fl->l_type);
6053     l_type = target_to_host_flock(l_type);
6054     if (l_type < 0) {
6055         return l_type;
6056     }
6057     fl->l_type = l_type;
6058     __get_user(fl->l_whence, &target_fl->l_whence);
6059     __get_user(fl->l_start, &target_fl->l_start);
6060     __get_user(fl->l_len, &target_fl->l_len);
6061     __get_user(fl->l_pid, &target_fl->l_pid);
6062     unlock_user_struct(target_fl, target_flock_addr, 0);
6063     return 0;
6064 }
6065 
6066 static inline abi_long copy_to_user_flock64(abi_ulong target_flock_addr,
6067                                             const struct flock64 *fl)
6068 {
6069     struct target_flock64 *target_fl;
6070     short l_type;
6071 
6072     if (!lock_user_struct(VERIFY_WRITE, target_fl, target_flock_addr, 0)) {
6073         return -TARGET_EFAULT;
6074     }
6075 
6076     l_type = host_to_target_flock(fl->l_type);
6077     __put_user(l_type, &target_fl->l_type);
6078     __put_user(fl->l_whence, &target_fl->l_whence);
6079     __put_user(fl->l_start, &target_fl->l_start);
6080     __put_user(fl->l_len, &target_fl->l_len);
6081     __put_user(fl->l_pid, &target_fl->l_pid);
6082     unlock_user_struct(target_fl, target_flock_addr, 1);
6083     return 0;
6084 }
6085 
6086 static abi_long do_fcntl(int fd, int cmd, abi_ulong arg)
6087 {
6088     struct flock64 fl64;
6089 #ifdef F_GETOWN_EX
6090     struct f_owner_ex fox;
6091     struct target_f_owner_ex *target_fox;
6092 #endif
6093     abi_long ret;
6094     int host_cmd = target_to_host_fcntl_cmd(cmd);
6095 
6096     if (host_cmd == -TARGET_EINVAL)
6097 	    return host_cmd;
6098 
6099     switch(cmd) {
6100     case TARGET_F_GETLK:
6101         ret = copy_from_user_flock(&fl64, arg);
6102         if (ret) {
6103             return ret;
6104         }
6105         ret = get_errno(safe_fcntl(fd, host_cmd, &fl64));
6106         if (ret == 0) {
6107             ret = copy_to_user_flock(arg, &fl64);
6108         }
6109         break;
6110 
6111     case TARGET_F_SETLK:
6112     case TARGET_F_SETLKW:
6113         ret = copy_from_user_flock(&fl64, arg);
6114         if (ret) {
6115             return ret;
6116         }
6117         ret = get_errno(safe_fcntl(fd, host_cmd, &fl64));
6118         break;
6119 
6120     case TARGET_F_GETLK64:
6121         ret = copy_from_user_flock64(&fl64, arg);
6122         if (ret) {
6123             return ret;
6124         }
6125         ret = get_errno(safe_fcntl(fd, host_cmd, &fl64));
6126         if (ret == 0) {
6127             ret = copy_to_user_flock64(arg, &fl64);
6128         }
6129         break;
6130     case TARGET_F_SETLK64:
6131     case TARGET_F_SETLKW64:
6132         ret = copy_from_user_flock64(&fl64, arg);
6133         if (ret) {
6134             return ret;
6135         }
6136         ret = get_errno(safe_fcntl(fd, host_cmd, &fl64));
6137         break;
6138 
6139     case TARGET_F_GETFL:
6140         ret = get_errno(safe_fcntl(fd, host_cmd, arg));
6141         if (ret >= 0) {
6142             ret = host_to_target_bitmask(ret, fcntl_flags_tbl);
6143         }
6144         break;
6145 
6146     case TARGET_F_SETFL:
6147         ret = get_errno(safe_fcntl(fd, host_cmd,
6148                                    target_to_host_bitmask(arg,
6149                                                           fcntl_flags_tbl)));
6150         break;
6151 
6152 #ifdef F_GETOWN_EX
6153     case TARGET_F_GETOWN_EX:
6154         ret = get_errno(safe_fcntl(fd, host_cmd, &fox));
6155         if (ret >= 0) {
6156             if (!lock_user_struct(VERIFY_WRITE, target_fox, arg, 0))
6157                 return -TARGET_EFAULT;
6158             target_fox->type = tswap32(fox.type);
6159             target_fox->pid = tswap32(fox.pid);
6160             unlock_user_struct(target_fox, arg, 1);
6161         }
6162         break;
6163 #endif
6164 
6165 #ifdef F_SETOWN_EX
6166     case TARGET_F_SETOWN_EX:
6167         if (!lock_user_struct(VERIFY_READ, target_fox, arg, 1))
6168             return -TARGET_EFAULT;
6169         fox.type = tswap32(target_fox->type);
6170         fox.pid = tswap32(target_fox->pid);
6171         unlock_user_struct(target_fox, arg, 0);
6172         ret = get_errno(safe_fcntl(fd, host_cmd, &fox));
6173         break;
6174 #endif
6175 
6176     case TARGET_F_SETOWN:
6177     case TARGET_F_GETOWN:
6178     case TARGET_F_SETSIG:
6179     case TARGET_F_GETSIG:
6180     case TARGET_F_SETLEASE:
6181     case TARGET_F_GETLEASE:
6182     case TARGET_F_SETPIPE_SZ:
6183     case TARGET_F_GETPIPE_SZ:
6184         ret = get_errno(safe_fcntl(fd, host_cmd, arg));
6185         break;
6186 
6187     default:
6188         ret = get_errno(safe_fcntl(fd, cmd, arg));
6189         break;
6190     }
6191     return ret;
6192 }
6193 
6194 #ifdef USE_UID16
6195 
6196 static inline int high2lowuid(int uid)
6197 {
6198     if (uid > 65535)
6199         return 65534;
6200     else
6201         return uid;
6202 }
6203 
6204 static inline int high2lowgid(int gid)
6205 {
6206     if (gid > 65535)
6207         return 65534;
6208     else
6209         return gid;
6210 }
6211 
6212 static inline int low2highuid(int uid)
6213 {
6214     if ((int16_t)uid == -1)
6215         return -1;
6216     else
6217         return uid;
6218 }
6219 
6220 static inline int low2highgid(int gid)
6221 {
6222     if ((int16_t)gid == -1)
6223         return -1;
6224     else
6225         return gid;
6226 }
6227 static inline int tswapid(int id)
6228 {
6229     return tswap16(id);
6230 }
6231 
6232 #define put_user_id(x, gaddr) put_user_u16(x, gaddr)
6233 
6234 #else /* !USE_UID16 */
6235 static inline int high2lowuid(int uid)
6236 {
6237     return uid;
6238 }
6239 static inline int high2lowgid(int gid)
6240 {
6241     return gid;
6242 }
6243 static inline int low2highuid(int uid)
6244 {
6245     return uid;
6246 }
6247 static inline int low2highgid(int gid)
6248 {
6249     return gid;
6250 }
6251 static inline int tswapid(int id)
6252 {
6253     return tswap32(id);
6254 }
6255 
6256 #define put_user_id(x, gaddr) put_user_u32(x, gaddr)
6257 
6258 #endif /* USE_UID16 */
6259 
6260 /* We must do direct syscalls for setting UID/GID, because we want to
6261  * implement the Linux system call semantics of "change only for this thread",
6262  * not the libc/POSIX semantics of "change for all threads in process".
6263  * (See http://ewontfix.com/17/ for more details.)
6264  * We use the 32-bit version of the syscalls if present; if it is not
6265  * then either the host architecture supports 32-bit UIDs natively with
6266  * the standard syscall, or the 16-bit UID is the best we can do.
6267  */
6268 #ifdef __NR_setuid32
6269 #define __NR_sys_setuid __NR_setuid32
6270 #else
6271 #define __NR_sys_setuid __NR_setuid
6272 #endif
6273 #ifdef __NR_setgid32
6274 #define __NR_sys_setgid __NR_setgid32
6275 #else
6276 #define __NR_sys_setgid __NR_setgid
6277 #endif
6278 #ifdef __NR_setresuid32
6279 #define __NR_sys_setresuid __NR_setresuid32
6280 #else
6281 #define __NR_sys_setresuid __NR_setresuid
6282 #endif
6283 #ifdef __NR_setresgid32
6284 #define __NR_sys_setresgid __NR_setresgid32
6285 #else
6286 #define __NR_sys_setresgid __NR_setresgid
6287 #endif
6288 
6289 _syscall1(int, sys_setuid, uid_t, uid)
6290 _syscall1(int, sys_setgid, gid_t, gid)
6291 _syscall3(int, sys_setresuid, uid_t, ruid, uid_t, euid, uid_t, suid)
6292 _syscall3(int, sys_setresgid, gid_t, rgid, gid_t, egid, gid_t, sgid)
6293 
6294 void syscall_init(void)
6295 {
6296     IOCTLEntry *ie;
6297     const argtype *arg_type;
6298     int size;
6299     int i;
6300 
6301     thunk_init(STRUCT_MAX);
6302 
6303 #define STRUCT(name, ...) thunk_register_struct(STRUCT_ ## name, #name, struct_ ## name ## _def);
6304 #define STRUCT_SPECIAL(name) thunk_register_struct_direct(STRUCT_ ## name, #name, &struct_ ## name ## _def);
6305 #include "syscall_types.h"
6306 #undef STRUCT
6307 #undef STRUCT_SPECIAL
6308 
6309     /* Build target_to_host_errno_table[] table from
6310      * host_to_target_errno_table[]. */
6311     for (i = 0; i < ERRNO_TABLE_SIZE; i++) {
6312         target_to_host_errno_table[host_to_target_errno_table[i]] = i;
6313     }
6314 
6315     /* we patch the ioctl size if necessary. We rely on the fact that
6316        no ioctl has all the bits at '1' in the size field */
6317     ie = ioctl_entries;
6318     while (ie->target_cmd != 0) {
6319         if (((ie->target_cmd >> TARGET_IOC_SIZESHIFT) & TARGET_IOC_SIZEMASK) ==
6320             TARGET_IOC_SIZEMASK) {
6321             arg_type = ie->arg_type;
6322             if (arg_type[0] != TYPE_PTR) {
6323                 fprintf(stderr, "cannot patch size for ioctl 0x%x\n",
6324                         ie->target_cmd);
6325                 exit(1);
6326             }
6327             arg_type++;
6328             size = thunk_type_size(arg_type, 0);
6329             ie->target_cmd = (ie->target_cmd &
6330                               ~(TARGET_IOC_SIZEMASK << TARGET_IOC_SIZESHIFT)) |
6331                 (size << TARGET_IOC_SIZESHIFT);
6332         }
6333 
6334         /* automatic consistency check if same arch */
6335 #if (defined(__i386__) && defined(TARGET_I386) && defined(TARGET_ABI32)) || \
6336     (defined(__x86_64__) && defined(TARGET_X86_64))
6337         if (unlikely(ie->target_cmd != ie->host_cmd)) {
6338             fprintf(stderr, "ERROR: ioctl(%s): target=0x%x host=0x%x\n",
6339                     ie->name, ie->target_cmd, ie->host_cmd);
6340         }
6341 #endif
6342         ie++;
6343     }
6344 }
6345 
6346 #if TARGET_ABI_BITS == 32
6347 static inline uint64_t target_offset64(uint32_t word0, uint32_t word1)
6348 {
6349 #ifdef TARGET_WORDS_BIGENDIAN
6350     return ((uint64_t)word0 << 32) | word1;
6351 #else
6352     return ((uint64_t)word1 << 32) | word0;
6353 #endif
6354 }
6355 #else /* TARGET_ABI_BITS == 32 */
6356 static inline uint64_t target_offset64(uint64_t word0, uint64_t word1)
6357 {
6358     return word0;
6359 }
6360 #endif /* TARGET_ABI_BITS != 32 */
6361 
6362 #ifdef TARGET_NR_truncate64
6363 static inline abi_long target_truncate64(void *cpu_env, const char *arg1,
6364                                          abi_long arg2,
6365                                          abi_long arg3,
6366                                          abi_long arg4)
6367 {
6368     if (regpairs_aligned(cpu_env, TARGET_NR_truncate64)) {
6369         arg2 = arg3;
6370         arg3 = arg4;
6371     }
6372     return get_errno(truncate64(arg1, target_offset64(arg2, arg3)));
6373 }
6374 #endif
6375 
6376 #ifdef TARGET_NR_ftruncate64
6377 static inline abi_long target_ftruncate64(void *cpu_env, abi_long arg1,
6378                                           abi_long arg2,
6379                                           abi_long arg3,
6380                                           abi_long arg4)
6381 {
6382     if (regpairs_aligned(cpu_env, TARGET_NR_ftruncate64)) {
6383         arg2 = arg3;
6384         arg3 = arg4;
6385     }
6386     return get_errno(ftruncate64(arg1, target_offset64(arg2, arg3)));
6387 }
6388 #endif
6389 
6390 static inline abi_long target_to_host_itimerspec(struct itimerspec *host_itspec,
6391                                                  abi_ulong target_addr)
6392 {
6393     struct target_itimerspec *target_itspec;
6394 
6395     if (!lock_user_struct(VERIFY_READ, target_itspec, target_addr, 1)) {
6396         return -TARGET_EFAULT;
6397     }
6398 
6399     host_itspec->it_interval.tv_sec =
6400                             tswapal(target_itspec->it_interval.tv_sec);
6401     host_itspec->it_interval.tv_nsec =
6402                             tswapal(target_itspec->it_interval.tv_nsec);
6403     host_itspec->it_value.tv_sec = tswapal(target_itspec->it_value.tv_sec);
6404     host_itspec->it_value.tv_nsec = tswapal(target_itspec->it_value.tv_nsec);
6405 
6406     unlock_user_struct(target_itspec, target_addr, 1);
6407     return 0;
6408 }
6409 
6410 static inline abi_long host_to_target_itimerspec(abi_ulong target_addr,
6411                                                struct itimerspec *host_its)
6412 {
6413     struct target_itimerspec *target_itspec;
6414 
6415     if (!lock_user_struct(VERIFY_WRITE, target_itspec, target_addr, 0)) {
6416         return -TARGET_EFAULT;
6417     }
6418 
6419     target_itspec->it_interval.tv_sec = tswapal(host_its->it_interval.tv_sec);
6420     target_itspec->it_interval.tv_nsec = tswapal(host_its->it_interval.tv_nsec);
6421 
6422     target_itspec->it_value.tv_sec = tswapal(host_its->it_value.tv_sec);
6423     target_itspec->it_value.tv_nsec = tswapal(host_its->it_value.tv_nsec);
6424 
6425     unlock_user_struct(target_itspec, target_addr, 0);
6426     return 0;
6427 }
6428 
6429 static inline abi_long target_to_host_timex(struct timex *host_tx,
6430                                             abi_long target_addr)
6431 {
6432     struct target_timex *target_tx;
6433 
6434     if (!lock_user_struct(VERIFY_READ, target_tx, target_addr, 1)) {
6435         return -TARGET_EFAULT;
6436     }
6437 
6438     __get_user(host_tx->modes, &target_tx->modes);
6439     __get_user(host_tx->offset, &target_tx->offset);
6440     __get_user(host_tx->freq, &target_tx->freq);
6441     __get_user(host_tx->maxerror, &target_tx->maxerror);
6442     __get_user(host_tx->esterror, &target_tx->esterror);
6443     __get_user(host_tx->status, &target_tx->status);
6444     __get_user(host_tx->constant, &target_tx->constant);
6445     __get_user(host_tx->precision, &target_tx->precision);
6446     __get_user(host_tx->tolerance, &target_tx->tolerance);
6447     __get_user(host_tx->time.tv_sec, &target_tx->time.tv_sec);
6448     __get_user(host_tx->time.tv_usec, &target_tx->time.tv_usec);
6449     __get_user(host_tx->tick, &target_tx->tick);
6450     __get_user(host_tx->ppsfreq, &target_tx->ppsfreq);
6451     __get_user(host_tx->jitter, &target_tx->jitter);
6452     __get_user(host_tx->shift, &target_tx->shift);
6453     __get_user(host_tx->stabil, &target_tx->stabil);
6454     __get_user(host_tx->jitcnt, &target_tx->jitcnt);
6455     __get_user(host_tx->calcnt, &target_tx->calcnt);
6456     __get_user(host_tx->errcnt, &target_tx->errcnt);
6457     __get_user(host_tx->stbcnt, &target_tx->stbcnt);
6458     __get_user(host_tx->tai, &target_tx->tai);
6459 
6460     unlock_user_struct(target_tx, target_addr, 0);
6461     return 0;
6462 }
6463 
6464 static inline abi_long host_to_target_timex(abi_long target_addr,
6465                                             struct timex *host_tx)
6466 {
6467     struct target_timex *target_tx;
6468 
6469     if (!lock_user_struct(VERIFY_WRITE, target_tx, target_addr, 0)) {
6470         return -TARGET_EFAULT;
6471     }
6472 
6473     __put_user(host_tx->modes, &target_tx->modes);
6474     __put_user(host_tx->offset, &target_tx->offset);
6475     __put_user(host_tx->freq, &target_tx->freq);
6476     __put_user(host_tx->maxerror, &target_tx->maxerror);
6477     __put_user(host_tx->esterror, &target_tx->esterror);
6478     __put_user(host_tx->status, &target_tx->status);
6479     __put_user(host_tx->constant, &target_tx->constant);
6480     __put_user(host_tx->precision, &target_tx->precision);
6481     __put_user(host_tx->tolerance, &target_tx->tolerance);
6482     __put_user(host_tx->time.tv_sec, &target_tx->time.tv_sec);
6483     __put_user(host_tx->time.tv_usec, &target_tx->time.tv_usec);
6484     __put_user(host_tx->tick, &target_tx->tick);
6485     __put_user(host_tx->ppsfreq, &target_tx->ppsfreq);
6486     __put_user(host_tx->jitter, &target_tx->jitter);
6487     __put_user(host_tx->shift, &target_tx->shift);
6488     __put_user(host_tx->stabil, &target_tx->stabil);
6489     __put_user(host_tx->jitcnt, &target_tx->jitcnt);
6490     __put_user(host_tx->calcnt, &target_tx->calcnt);
6491     __put_user(host_tx->errcnt, &target_tx->errcnt);
6492     __put_user(host_tx->stbcnt, &target_tx->stbcnt);
6493     __put_user(host_tx->tai, &target_tx->tai);
6494 
6495     unlock_user_struct(target_tx, target_addr, 1);
6496     return 0;
6497 }
6498 
6499 
6500 static inline abi_long target_to_host_sigevent(struct sigevent *host_sevp,
6501                                                abi_ulong target_addr)
6502 {
6503     struct target_sigevent *target_sevp;
6504 
6505     if (!lock_user_struct(VERIFY_READ, target_sevp, target_addr, 1)) {
6506         return -TARGET_EFAULT;
6507     }
6508 
6509     /* This union is awkward on 64 bit systems because it has a 32 bit
6510      * integer and a pointer in it; we follow the conversion approach
6511      * used for handling sigval types in signal.c so the guest should get
6512      * the correct value back even if we did a 64 bit byteswap and it's
6513      * using the 32 bit integer.
6514      */
6515     host_sevp->sigev_value.sival_ptr =
6516         (void *)(uintptr_t)tswapal(target_sevp->sigev_value.sival_ptr);
6517     host_sevp->sigev_signo =
6518         target_to_host_signal(tswap32(target_sevp->sigev_signo));
6519     host_sevp->sigev_notify = tswap32(target_sevp->sigev_notify);
6520     host_sevp->_sigev_un._tid = tswap32(target_sevp->_sigev_un._tid);
6521 
6522     unlock_user_struct(target_sevp, target_addr, 1);
6523     return 0;
6524 }
6525 
6526 #if defined(TARGET_NR_mlockall)
6527 static inline int target_to_host_mlockall_arg(int arg)
6528 {
6529     int result = 0;
6530 
6531     if (arg & TARGET_MLOCKALL_MCL_CURRENT) {
6532         result |= MCL_CURRENT;
6533     }
6534     if (arg & TARGET_MLOCKALL_MCL_FUTURE) {
6535         result |= MCL_FUTURE;
6536     }
6537     return result;
6538 }
6539 #endif
6540 
6541 #if (defined(TARGET_NR_stat64) || defined(TARGET_NR_lstat64) ||     \
6542      defined(TARGET_NR_fstat64) || defined(TARGET_NR_fstatat64) ||  \
6543      defined(TARGET_NR_newfstatat))
6544 static inline abi_long host_to_target_stat64(void *cpu_env,
6545                                              abi_ulong target_addr,
6546                                              struct stat *host_st)
6547 {
6548 #if defined(TARGET_ARM) && defined(TARGET_ABI32)
6549     if (((CPUARMState *)cpu_env)->eabi) {
6550         struct target_eabi_stat64 *target_st;
6551 
6552         if (!lock_user_struct(VERIFY_WRITE, target_st, target_addr, 0))
6553             return -TARGET_EFAULT;
6554         memset(target_st, 0, sizeof(struct target_eabi_stat64));
6555         __put_user(host_st->st_dev, &target_st->st_dev);
6556         __put_user(host_st->st_ino, &target_st->st_ino);
6557 #ifdef TARGET_STAT64_HAS_BROKEN_ST_INO
6558         __put_user(host_st->st_ino, &target_st->__st_ino);
6559 #endif
6560         __put_user(host_st->st_mode, &target_st->st_mode);
6561         __put_user(host_st->st_nlink, &target_st->st_nlink);
6562         __put_user(host_st->st_uid, &target_st->st_uid);
6563         __put_user(host_st->st_gid, &target_st->st_gid);
6564         __put_user(host_st->st_rdev, &target_st->st_rdev);
6565         __put_user(host_st->st_size, &target_st->st_size);
6566         __put_user(host_st->st_blksize, &target_st->st_blksize);
6567         __put_user(host_st->st_blocks, &target_st->st_blocks);
6568         __put_user(host_st->st_atime, &target_st->target_st_atime);
6569         __put_user(host_st->st_mtime, &target_st->target_st_mtime);
6570         __put_user(host_st->st_ctime, &target_st->target_st_ctime);
6571 #if _POSIX_C_SOURCE >= 200809L || _XOPEN_SOURCE >= 700
6572         __put_user(host_st->st_atim.tv_nsec, &target_st->target_st_atime_nsec);
6573         __put_user(host_st->st_mtim.tv_nsec, &target_st->target_st_mtime_nsec);
6574         __put_user(host_st->st_ctim.tv_nsec, &target_st->target_st_ctime_nsec);
6575 #endif
6576         unlock_user_struct(target_st, target_addr, 1);
6577     } else
6578 #endif
6579     {
6580 #if defined(TARGET_HAS_STRUCT_STAT64)
6581         struct target_stat64 *target_st;
6582 #else
6583         struct target_stat *target_st;
6584 #endif
6585 
6586         if (!lock_user_struct(VERIFY_WRITE, target_st, target_addr, 0))
6587             return -TARGET_EFAULT;
6588         memset(target_st, 0, sizeof(*target_st));
6589         __put_user(host_st->st_dev, &target_st->st_dev);
6590         __put_user(host_st->st_ino, &target_st->st_ino);
6591 #ifdef TARGET_STAT64_HAS_BROKEN_ST_INO
6592         __put_user(host_st->st_ino, &target_st->__st_ino);
6593 #endif
6594         __put_user(host_st->st_mode, &target_st->st_mode);
6595         __put_user(host_st->st_nlink, &target_st->st_nlink);
6596         __put_user(host_st->st_uid, &target_st->st_uid);
6597         __put_user(host_st->st_gid, &target_st->st_gid);
6598         __put_user(host_st->st_rdev, &target_st->st_rdev);
6599         /* XXX: better use of kernel struct */
6600         __put_user(host_st->st_size, &target_st->st_size);
6601         __put_user(host_st->st_blksize, &target_st->st_blksize);
6602         __put_user(host_st->st_blocks, &target_st->st_blocks);
6603         __put_user(host_st->st_atime, &target_st->target_st_atime);
6604         __put_user(host_st->st_mtime, &target_st->target_st_mtime);
6605         __put_user(host_st->st_ctime, &target_st->target_st_ctime);
6606 #if _POSIX_C_SOURCE >= 200809L || _XOPEN_SOURCE >= 700
6607         __put_user(host_st->st_atim.tv_nsec, &target_st->target_st_atime_nsec);
6608         __put_user(host_st->st_mtim.tv_nsec, &target_st->target_st_mtime_nsec);
6609         __put_user(host_st->st_ctim.tv_nsec, &target_st->target_st_ctime_nsec);
6610 #endif
6611         unlock_user_struct(target_st, target_addr, 1);
6612     }
6613 
6614     return 0;
6615 }
6616 #endif
6617 
6618 #if defined(TARGET_NR_statx) && defined(__NR_statx)
6619 static inline abi_long host_to_target_statx(struct target_statx *host_stx,
6620                                             abi_ulong target_addr)
6621 {
6622     struct target_statx *target_stx;
6623 
6624     if (!lock_user_struct(VERIFY_WRITE, target_stx, target_addr,  0)) {
6625         return -TARGET_EFAULT;
6626     }
6627     memset(target_stx, 0, sizeof(*target_stx));
6628 
6629     __put_user(host_stx->stx_mask, &target_stx->stx_mask);
6630     __put_user(host_stx->stx_blksize, &target_stx->stx_blksize);
6631     __put_user(host_stx->stx_attributes, &target_stx->stx_attributes);
6632     __put_user(host_stx->stx_nlink, &target_stx->stx_nlink);
6633     __put_user(host_stx->stx_uid, &target_stx->stx_uid);
6634     __put_user(host_stx->stx_gid, &target_stx->stx_gid);
6635     __put_user(host_stx->stx_mode, &target_stx->stx_mode);
6636     __put_user(host_stx->stx_ino, &target_stx->stx_ino);
6637     __put_user(host_stx->stx_size, &target_stx->stx_size);
6638     __put_user(host_stx->stx_blocks, &target_stx->stx_blocks);
6639     __put_user(host_stx->stx_attributes_mask, &target_stx->stx_attributes_mask);
6640     __put_user(host_stx->stx_atime.tv_sec, &target_stx->stx_atime.tv_sec);
6641     __put_user(host_stx->stx_atime.tv_nsec, &target_stx->stx_atime.tv_nsec);
6642     __put_user(host_stx->stx_btime.tv_sec, &target_stx->stx_atime.tv_sec);
6643     __put_user(host_stx->stx_btime.tv_nsec, &target_stx->stx_atime.tv_nsec);
6644     __put_user(host_stx->stx_ctime.tv_sec, &target_stx->stx_atime.tv_sec);
6645     __put_user(host_stx->stx_ctime.tv_nsec, &target_stx->stx_atime.tv_nsec);
6646     __put_user(host_stx->stx_mtime.tv_sec, &target_stx->stx_atime.tv_sec);
6647     __put_user(host_stx->stx_mtime.tv_nsec, &target_stx->stx_atime.tv_nsec);
6648     __put_user(host_stx->stx_rdev_major, &target_stx->stx_rdev_major);
6649     __put_user(host_stx->stx_rdev_minor, &target_stx->stx_rdev_minor);
6650     __put_user(host_stx->stx_dev_major, &target_stx->stx_dev_major);
6651     __put_user(host_stx->stx_dev_minor, &target_stx->stx_dev_minor);
6652 
6653     unlock_user_struct(target_stx, target_addr, 1);
6654 
6655     return 0;
6656 }
6657 #endif
6658 
6659 
6660 /* ??? Using host futex calls even when target atomic operations
6661    are not really atomic probably breaks things.  However implementing
6662    futexes locally would make futexes shared between multiple processes
6663    tricky.  However they're probably useless because guest atomic
6664    operations won't work either.  */
6665 static int do_futex(target_ulong uaddr, int op, int val, target_ulong timeout,
6666                     target_ulong uaddr2, int val3)
6667 {
6668     struct timespec ts, *pts;
6669     int base_op;
6670 
6671     /* ??? We assume FUTEX_* constants are the same on both host
6672        and target.  */
6673 #ifdef FUTEX_CMD_MASK
6674     base_op = op & FUTEX_CMD_MASK;
6675 #else
6676     base_op = op;
6677 #endif
6678     switch (base_op) {
6679     case FUTEX_WAIT:
6680     case FUTEX_WAIT_BITSET:
6681         if (timeout) {
6682             pts = &ts;
6683             target_to_host_timespec(pts, timeout);
6684         } else {
6685             pts = NULL;
6686         }
6687         return get_errno(safe_futex(g2h(uaddr), op, tswap32(val),
6688                          pts, NULL, val3));
6689     case FUTEX_WAKE:
6690         return get_errno(safe_futex(g2h(uaddr), op, val, NULL, NULL, 0));
6691     case FUTEX_FD:
6692         return get_errno(safe_futex(g2h(uaddr), op, val, NULL, NULL, 0));
6693     case FUTEX_REQUEUE:
6694     case FUTEX_CMP_REQUEUE:
6695     case FUTEX_WAKE_OP:
6696         /* For FUTEX_REQUEUE, FUTEX_CMP_REQUEUE, and FUTEX_WAKE_OP, the
6697            TIMEOUT parameter is interpreted as a uint32_t by the kernel.
6698            But the prototype takes a `struct timespec *'; insert casts
6699            to satisfy the compiler.  We do not need to tswap TIMEOUT
6700            since it's not compared to guest memory.  */
6701         pts = (struct timespec *)(uintptr_t) timeout;
6702         return get_errno(safe_futex(g2h(uaddr), op, val, pts,
6703                                     g2h(uaddr2),
6704                                     (base_op == FUTEX_CMP_REQUEUE
6705                                      ? tswap32(val3)
6706                                      : val3)));
6707     default:
6708         return -TARGET_ENOSYS;
6709     }
6710 }
6711 #if defined(TARGET_NR_name_to_handle_at) && defined(CONFIG_OPEN_BY_HANDLE)
6712 static abi_long do_name_to_handle_at(abi_long dirfd, abi_long pathname,
6713                                      abi_long handle, abi_long mount_id,
6714                                      abi_long flags)
6715 {
6716     struct file_handle *target_fh;
6717     struct file_handle *fh;
6718     int mid = 0;
6719     abi_long ret;
6720     char *name;
6721     unsigned int size, total_size;
6722 
6723     if (get_user_s32(size, handle)) {
6724         return -TARGET_EFAULT;
6725     }
6726 
6727     name = lock_user_string(pathname);
6728     if (!name) {
6729         return -TARGET_EFAULT;
6730     }
6731 
6732     total_size = sizeof(struct file_handle) + size;
6733     target_fh = lock_user(VERIFY_WRITE, handle, total_size, 0);
6734     if (!target_fh) {
6735         unlock_user(name, pathname, 0);
6736         return -TARGET_EFAULT;
6737     }
6738 
6739     fh = g_malloc0(total_size);
6740     fh->handle_bytes = size;
6741 
6742     ret = get_errno(name_to_handle_at(dirfd, path(name), fh, &mid, flags));
6743     unlock_user(name, pathname, 0);
6744 
6745     /* man name_to_handle_at(2):
6746      * Other than the use of the handle_bytes field, the caller should treat
6747      * the file_handle structure as an opaque data type
6748      */
6749 
6750     memcpy(target_fh, fh, total_size);
6751     target_fh->handle_bytes = tswap32(fh->handle_bytes);
6752     target_fh->handle_type = tswap32(fh->handle_type);
6753     g_free(fh);
6754     unlock_user(target_fh, handle, total_size);
6755 
6756     if (put_user_s32(mid, mount_id)) {
6757         return -TARGET_EFAULT;
6758     }
6759 
6760     return ret;
6761 
6762 }
6763 #endif
6764 
6765 #if defined(TARGET_NR_open_by_handle_at) && defined(CONFIG_OPEN_BY_HANDLE)
6766 static abi_long do_open_by_handle_at(abi_long mount_fd, abi_long handle,
6767                                      abi_long flags)
6768 {
6769     struct file_handle *target_fh;
6770     struct file_handle *fh;
6771     unsigned int size, total_size;
6772     abi_long ret;
6773 
6774     if (get_user_s32(size, handle)) {
6775         return -TARGET_EFAULT;
6776     }
6777 
6778     total_size = sizeof(struct file_handle) + size;
6779     target_fh = lock_user(VERIFY_READ, handle, total_size, 1);
6780     if (!target_fh) {
6781         return -TARGET_EFAULT;
6782     }
6783 
6784     fh = g_memdup(target_fh, total_size);
6785     fh->handle_bytes = size;
6786     fh->handle_type = tswap32(target_fh->handle_type);
6787 
6788     ret = get_errno(open_by_handle_at(mount_fd, fh,
6789                     target_to_host_bitmask(flags, fcntl_flags_tbl)));
6790 
6791     g_free(fh);
6792 
6793     unlock_user(target_fh, handle, total_size);
6794 
6795     return ret;
6796 }
6797 #endif
6798 
6799 #if defined(TARGET_NR_signalfd) || defined(TARGET_NR_signalfd4)
6800 
6801 static abi_long do_signalfd4(int fd, abi_long mask, int flags)
6802 {
6803     int host_flags;
6804     target_sigset_t *target_mask;
6805     sigset_t host_mask;
6806     abi_long ret;
6807 
6808     if (flags & ~(TARGET_O_NONBLOCK | TARGET_O_CLOEXEC)) {
6809         return -TARGET_EINVAL;
6810     }
6811     if (!lock_user_struct(VERIFY_READ, target_mask, mask, 1)) {
6812         return -TARGET_EFAULT;
6813     }
6814 
6815     target_to_host_sigset(&host_mask, target_mask);
6816 
6817     host_flags = target_to_host_bitmask(flags, fcntl_flags_tbl);
6818 
6819     ret = get_errno(signalfd(fd, &host_mask, host_flags));
6820     if (ret >= 0) {
6821         fd_trans_register(ret, &target_signalfd_trans);
6822     }
6823 
6824     unlock_user_struct(target_mask, mask, 0);
6825 
6826     return ret;
6827 }
6828 #endif
6829 
6830 /* Map host to target signal numbers for the wait family of syscalls.
6831    Assume all other status bits are the same.  */
6832 int host_to_target_waitstatus(int status)
6833 {
6834     if (WIFSIGNALED(status)) {
6835         return host_to_target_signal(WTERMSIG(status)) | (status & ~0x7f);
6836     }
6837     if (WIFSTOPPED(status)) {
6838         return (host_to_target_signal(WSTOPSIG(status)) << 8)
6839                | (status & 0xff);
6840     }
6841     return status;
6842 }
6843 
6844 static int open_self_cmdline(void *cpu_env, int fd)
6845 {
6846     CPUState *cpu = env_cpu((CPUArchState *)cpu_env);
6847     struct linux_binprm *bprm = ((TaskState *)cpu->opaque)->bprm;
6848     int i;
6849 
6850     for (i = 0; i < bprm->argc; i++) {
6851         size_t len = strlen(bprm->argv[i]) + 1;
6852 
6853         if (write(fd, bprm->argv[i], len) != len) {
6854             return -1;
6855         }
6856     }
6857 
6858     return 0;
6859 }
6860 
6861 static int open_self_maps(void *cpu_env, int fd)
6862 {
6863     CPUState *cpu = env_cpu((CPUArchState *)cpu_env);
6864     TaskState *ts = cpu->opaque;
6865     FILE *fp;
6866     char *line = NULL;
6867     size_t len = 0;
6868     ssize_t read;
6869 
6870     fp = fopen("/proc/self/maps", "r");
6871     if (fp == NULL) {
6872         return -1;
6873     }
6874 
6875     while ((read = getline(&line, &len, fp)) != -1) {
6876         int fields, dev_maj, dev_min, inode;
6877         uint64_t min, max, offset;
6878         char flag_r, flag_w, flag_x, flag_p;
6879         char path[512] = "";
6880         fields = sscanf(line, "%"PRIx64"-%"PRIx64" %c%c%c%c %"PRIx64" %x:%x %d"
6881                         " %512s", &min, &max, &flag_r, &flag_w, &flag_x,
6882                         &flag_p, &offset, &dev_maj, &dev_min, &inode, path);
6883 
6884         if ((fields < 10) || (fields > 11)) {
6885             continue;
6886         }
6887         if (h2g_valid(min)) {
6888             int flags = page_get_flags(h2g(min));
6889             max = h2g_valid(max - 1) ? max : (uintptr_t)g2h(GUEST_ADDR_MAX) + 1;
6890             if (page_check_range(h2g(min), max - min, flags) == -1) {
6891                 continue;
6892             }
6893             if (h2g(min) == ts->info->stack_limit) {
6894                 pstrcpy(path, sizeof(path), "      [stack]");
6895             }
6896             dprintf(fd, TARGET_ABI_FMT_ptr "-" TARGET_ABI_FMT_ptr
6897                     " %c%c%c%c %08" PRIx64 " %02x:%02x %d %s%s\n",
6898                     h2g(min), h2g(max - 1) + 1, flag_r, flag_w,
6899                     flag_x, flag_p, offset, dev_maj, dev_min, inode,
6900                     path[0] ? "         " : "", path);
6901         }
6902     }
6903 
6904     free(line);
6905     fclose(fp);
6906 
6907     return 0;
6908 }
6909 
6910 static int open_self_stat(void *cpu_env, int fd)
6911 {
6912     CPUState *cpu = env_cpu((CPUArchState *)cpu_env);
6913     TaskState *ts = cpu->opaque;
6914     abi_ulong start_stack = ts->info->start_stack;
6915     int i;
6916 
6917     for (i = 0; i < 44; i++) {
6918       char buf[128];
6919       int len;
6920       uint64_t val = 0;
6921 
6922       if (i == 0) {
6923         /* pid */
6924         val = getpid();
6925         snprintf(buf, sizeof(buf), "%"PRId64 " ", val);
6926       } else if (i == 1) {
6927         /* app name */
6928         snprintf(buf, sizeof(buf), "(%s) ", ts->bprm->argv[0]);
6929       } else if (i == 27) {
6930         /* stack bottom */
6931         val = start_stack;
6932         snprintf(buf, sizeof(buf), "%"PRId64 " ", val);
6933       } else {
6934         /* for the rest, there is MasterCard */
6935         snprintf(buf, sizeof(buf), "0%c", i == 43 ? '\n' : ' ');
6936       }
6937 
6938       len = strlen(buf);
6939       if (write(fd, buf, len) != len) {
6940           return -1;
6941       }
6942     }
6943 
6944     return 0;
6945 }
6946 
6947 static int open_self_auxv(void *cpu_env, int fd)
6948 {
6949     CPUState *cpu = env_cpu((CPUArchState *)cpu_env);
6950     TaskState *ts = cpu->opaque;
6951     abi_ulong auxv = ts->info->saved_auxv;
6952     abi_ulong len = ts->info->auxv_len;
6953     char *ptr;
6954 
6955     /*
6956      * Auxiliary vector is stored in target process stack.
6957      * read in whole auxv vector and copy it to file
6958      */
6959     ptr = lock_user(VERIFY_READ, auxv, len, 0);
6960     if (ptr != NULL) {
6961         while (len > 0) {
6962             ssize_t r;
6963             r = write(fd, ptr, len);
6964             if (r <= 0) {
6965                 break;
6966             }
6967             len -= r;
6968             ptr += r;
6969         }
6970         lseek(fd, 0, SEEK_SET);
6971         unlock_user(ptr, auxv, len);
6972     }
6973 
6974     return 0;
6975 }
6976 
6977 static int is_proc_myself(const char *filename, const char *entry)
6978 {
6979     if (!strncmp(filename, "/proc/", strlen("/proc/"))) {
6980         filename += strlen("/proc/");
6981         if (!strncmp(filename, "self/", strlen("self/"))) {
6982             filename += strlen("self/");
6983         } else if (*filename >= '1' && *filename <= '9') {
6984             char myself[80];
6985             snprintf(myself, sizeof(myself), "%d/", getpid());
6986             if (!strncmp(filename, myself, strlen(myself))) {
6987                 filename += strlen(myself);
6988             } else {
6989                 return 0;
6990             }
6991         } else {
6992             return 0;
6993         }
6994         if (!strcmp(filename, entry)) {
6995             return 1;
6996         }
6997     }
6998     return 0;
6999 }
7000 
7001 #if defined(HOST_WORDS_BIGENDIAN) != defined(TARGET_WORDS_BIGENDIAN) || \
7002     defined(TARGET_SPARC) || defined(TARGET_M68K)
7003 static int is_proc(const char *filename, const char *entry)
7004 {
7005     return strcmp(filename, entry) == 0;
7006 }
7007 #endif
7008 
7009 #if defined(HOST_WORDS_BIGENDIAN) != defined(TARGET_WORDS_BIGENDIAN)
7010 static int open_net_route(void *cpu_env, int fd)
7011 {
7012     FILE *fp;
7013     char *line = NULL;
7014     size_t len = 0;
7015     ssize_t read;
7016 
7017     fp = fopen("/proc/net/route", "r");
7018     if (fp == NULL) {
7019         return -1;
7020     }
7021 
7022     /* read header */
7023 
7024     read = getline(&line, &len, fp);
7025     dprintf(fd, "%s", line);
7026 
7027     /* read routes */
7028 
7029     while ((read = getline(&line, &len, fp)) != -1) {
7030         char iface[16];
7031         uint32_t dest, gw, mask;
7032         unsigned int flags, refcnt, use, metric, mtu, window, irtt;
7033         int fields;
7034 
7035         fields = sscanf(line,
7036                         "%s\t%08x\t%08x\t%04x\t%d\t%d\t%d\t%08x\t%d\t%u\t%u\n",
7037                         iface, &dest, &gw, &flags, &refcnt, &use, &metric,
7038                         &mask, &mtu, &window, &irtt);
7039         if (fields != 11) {
7040             continue;
7041         }
7042         dprintf(fd, "%s\t%08x\t%08x\t%04x\t%d\t%d\t%d\t%08x\t%d\t%u\t%u\n",
7043                 iface, tswap32(dest), tswap32(gw), flags, refcnt, use,
7044                 metric, tswap32(mask), mtu, window, irtt);
7045     }
7046 
7047     free(line);
7048     fclose(fp);
7049 
7050     return 0;
7051 }
7052 #endif
7053 
7054 #if defined(TARGET_SPARC)
7055 static int open_cpuinfo(void *cpu_env, int fd)
7056 {
7057     dprintf(fd, "type\t\t: sun4u\n");
7058     return 0;
7059 }
7060 #endif
7061 
7062 #if defined(TARGET_M68K)
7063 static int open_hardware(void *cpu_env, int fd)
7064 {
7065     dprintf(fd, "Model:\t\tqemu-m68k\n");
7066     return 0;
7067 }
7068 #endif
7069 
7070 static int do_openat(void *cpu_env, int dirfd, const char *pathname, int flags, mode_t mode)
7071 {
7072     struct fake_open {
7073         const char *filename;
7074         int (*fill)(void *cpu_env, int fd);
7075         int (*cmp)(const char *s1, const char *s2);
7076     };
7077     const struct fake_open *fake_open;
7078     static const struct fake_open fakes[] = {
7079         { "maps", open_self_maps, is_proc_myself },
7080         { "stat", open_self_stat, is_proc_myself },
7081         { "auxv", open_self_auxv, is_proc_myself },
7082         { "cmdline", open_self_cmdline, is_proc_myself },
7083 #if defined(HOST_WORDS_BIGENDIAN) != defined(TARGET_WORDS_BIGENDIAN)
7084         { "/proc/net/route", open_net_route, is_proc },
7085 #endif
7086 #if defined(TARGET_SPARC)
7087         { "/proc/cpuinfo", open_cpuinfo, is_proc },
7088 #endif
7089 #if defined(TARGET_M68K)
7090         { "/proc/hardware", open_hardware, is_proc },
7091 #endif
7092         { NULL, NULL, NULL }
7093     };
7094 
7095     if (is_proc_myself(pathname, "exe")) {
7096         int execfd = qemu_getauxval(AT_EXECFD);
7097         return execfd ? execfd : safe_openat(dirfd, exec_path, flags, mode);
7098     }
7099 
7100     for (fake_open = fakes; fake_open->filename; fake_open++) {
7101         if (fake_open->cmp(pathname, fake_open->filename)) {
7102             break;
7103         }
7104     }
7105 
7106     if (fake_open->filename) {
7107         const char *tmpdir;
7108         char filename[PATH_MAX];
7109         int fd, r;
7110 
7111         /* create temporary file to map stat to */
7112         tmpdir = getenv("TMPDIR");
7113         if (!tmpdir)
7114             tmpdir = "/tmp";
7115         snprintf(filename, sizeof(filename), "%s/qemu-open.XXXXXX", tmpdir);
7116         fd = mkstemp(filename);
7117         if (fd < 0) {
7118             return fd;
7119         }
7120         unlink(filename);
7121 
7122         if ((r = fake_open->fill(cpu_env, fd))) {
7123             int e = errno;
7124             close(fd);
7125             errno = e;
7126             return r;
7127         }
7128         lseek(fd, 0, SEEK_SET);
7129 
7130         return fd;
7131     }
7132 
7133     return safe_openat(dirfd, path(pathname), flags, mode);
7134 }
7135 
7136 #define TIMER_MAGIC 0x0caf0000
7137 #define TIMER_MAGIC_MASK 0xffff0000
7138 
7139 /* Convert QEMU provided timer ID back to internal 16bit index format */
7140 static target_timer_t get_timer_id(abi_long arg)
7141 {
7142     target_timer_t timerid = arg;
7143 
7144     if ((timerid & TIMER_MAGIC_MASK) != TIMER_MAGIC) {
7145         return -TARGET_EINVAL;
7146     }
7147 
7148     timerid &= 0xffff;
7149 
7150     if (timerid >= ARRAY_SIZE(g_posix_timers)) {
7151         return -TARGET_EINVAL;
7152     }
7153 
7154     return timerid;
7155 }
7156 
7157 static int target_to_host_cpu_mask(unsigned long *host_mask,
7158                                    size_t host_size,
7159                                    abi_ulong target_addr,
7160                                    size_t target_size)
7161 {
7162     unsigned target_bits = sizeof(abi_ulong) * 8;
7163     unsigned host_bits = sizeof(*host_mask) * 8;
7164     abi_ulong *target_mask;
7165     unsigned i, j;
7166 
7167     assert(host_size >= target_size);
7168 
7169     target_mask = lock_user(VERIFY_READ, target_addr, target_size, 1);
7170     if (!target_mask) {
7171         return -TARGET_EFAULT;
7172     }
7173     memset(host_mask, 0, host_size);
7174 
7175     for (i = 0 ; i < target_size / sizeof(abi_ulong); i++) {
7176         unsigned bit = i * target_bits;
7177         abi_ulong val;
7178 
7179         __get_user(val, &target_mask[i]);
7180         for (j = 0; j < target_bits; j++, bit++) {
7181             if (val & (1UL << j)) {
7182                 host_mask[bit / host_bits] |= 1UL << (bit % host_bits);
7183             }
7184         }
7185     }
7186 
7187     unlock_user(target_mask, target_addr, 0);
7188     return 0;
7189 }
7190 
7191 static int host_to_target_cpu_mask(const unsigned long *host_mask,
7192                                    size_t host_size,
7193                                    abi_ulong target_addr,
7194                                    size_t target_size)
7195 {
7196     unsigned target_bits = sizeof(abi_ulong) * 8;
7197     unsigned host_bits = sizeof(*host_mask) * 8;
7198     abi_ulong *target_mask;
7199     unsigned i, j;
7200 
7201     assert(host_size >= target_size);
7202 
7203     target_mask = lock_user(VERIFY_WRITE, target_addr, target_size, 0);
7204     if (!target_mask) {
7205         return -TARGET_EFAULT;
7206     }
7207 
7208     for (i = 0 ; i < target_size / sizeof(abi_ulong); i++) {
7209         unsigned bit = i * target_bits;
7210         abi_ulong val = 0;
7211 
7212         for (j = 0; j < target_bits; j++, bit++) {
7213             if (host_mask[bit / host_bits] & (1UL << (bit % host_bits))) {
7214                 val |= 1UL << j;
7215             }
7216         }
7217         __put_user(val, &target_mask[i]);
7218     }
7219 
7220     unlock_user(target_mask, target_addr, target_size);
7221     return 0;
7222 }
7223 
7224 /* This is an internal helper for do_syscall so that it is easier
7225  * to have a single return point, so that actions, such as logging
7226  * of syscall results, can be performed.
7227  * All errnos that do_syscall() returns must be -TARGET_<errcode>.
7228  */
7229 static abi_long do_syscall1(void *cpu_env, int num, abi_long arg1,
7230                             abi_long arg2, abi_long arg3, abi_long arg4,
7231                             abi_long arg5, abi_long arg6, abi_long arg7,
7232                             abi_long arg8)
7233 {
7234     CPUState *cpu = env_cpu(cpu_env);
7235     abi_long ret;
7236 #if defined(TARGET_NR_stat) || defined(TARGET_NR_stat64) \
7237     || defined(TARGET_NR_lstat) || defined(TARGET_NR_lstat64) \
7238     || defined(TARGET_NR_fstat) || defined(TARGET_NR_fstat64) \
7239     || defined(TARGET_NR_statx)
7240     struct stat st;
7241 #endif
7242 #if defined(TARGET_NR_statfs) || defined(TARGET_NR_statfs64) \
7243     || defined(TARGET_NR_fstatfs)
7244     struct statfs stfs;
7245 #endif
7246     void *p;
7247 
7248     switch(num) {
7249     case TARGET_NR_exit:
7250         /* In old applications this may be used to implement _exit(2).
7251            However in threaded applictions it is used for thread termination,
7252            and _exit_group is used for application termination.
7253            Do thread termination if we have more then one thread.  */
7254 
7255         if (block_signals()) {
7256             return -TARGET_ERESTARTSYS;
7257         }
7258 
7259         cpu_list_lock();
7260 
7261         if (CPU_NEXT(first_cpu)) {
7262             TaskState *ts;
7263 
7264             /* Remove the CPU from the list.  */
7265             QTAILQ_REMOVE_RCU(&cpus, cpu, node);
7266 
7267             cpu_list_unlock();
7268 
7269             ts = cpu->opaque;
7270             if (ts->child_tidptr) {
7271                 put_user_u32(0, ts->child_tidptr);
7272                 sys_futex(g2h(ts->child_tidptr), FUTEX_WAKE, INT_MAX,
7273                           NULL, NULL, 0);
7274             }
7275             thread_cpu = NULL;
7276             object_unref(OBJECT(cpu));
7277             g_free(ts);
7278             rcu_unregister_thread();
7279             pthread_exit(NULL);
7280         }
7281 
7282         cpu_list_unlock();
7283         preexit_cleanup(cpu_env, arg1);
7284         _exit(arg1);
7285         return 0; /* avoid warning */
7286     case TARGET_NR_read:
7287         if (arg2 == 0 && arg3 == 0) {
7288             return get_errno(safe_read(arg1, 0, 0));
7289         } else {
7290             if (!(p = lock_user(VERIFY_WRITE, arg2, arg3, 0)))
7291                 return -TARGET_EFAULT;
7292             ret = get_errno(safe_read(arg1, p, arg3));
7293             if (ret >= 0 &&
7294                 fd_trans_host_to_target_data(arg1)) {
7295                 ret = fd_trans_host_to_target_data(arg1)(p, ret);
7296             }
7297             unlock_user(p, arg2, ret);
7298         }
7299         return ret;
7300     case TARGET_NR_write:
7301         if (arg2 == 0 && arg3 == 0) {
7302             return get_errno(safe_write(arg1, 0, 0));
7303         }
7304         if (!(p = lock_user(VERIFY_READ, arg2, arg3, 1)))
7305             return -TARGET_EFAULT;
7306         if (fd_trans_target_to_host_data(arg1)) {
7307             void *copy = g_malloc(arg3);
7308             memcpy(copy, p, arg3);
7309             ret = fd_trans_target_to_host_data(arg1)(copy, arg3);
7310             if (ret >= 0) {
7311                 ret = get_errno(safe_write(arg1, copy, ret));
7312             }
7313             g_free(copy);
7314         } else {
7315             ret = get_errno(safe_write(arg1, p, arg3));
7316         }
7317         unlock_user(p, arg2, 0);
7318         return ret;
7319 
7320 #ifdef TARGET_NR_open
7321     case TARGET_NR_open:
7322         if (!(p = lock_user_string(arg1)))
7323             return -TARGET_EFAULT;
7324         ret = get_errno(do_openat(cpu_env, AT_FDCWD, p,
7325                                   target_to_host_bitmask(arg2, fcntl_flags_tbl),
7326                                   arg3));
7327         fd_trans_unregister(ret);
7328         unlock_user(p, arg1, 0);
7329         return ret;
7330 #endif
7331     case TARGET_NR_openat:
7332         if (!(p = lock_user_string(arg2)))
7333             return -TARGET_EFAULT;
7334         ret = get_errno(do_openat(cpu_env, arg1, p,
7335                                   target_to_host_bitmask(arg3, fcntl_flags_tbl),
7336                                   arg4));
7337         fd_trans_unregister(ret);
7338         unlock_user(p, arg2, 0);
7339         return ret;
7340 #if defined(TARGET_NR_name_to_handle_at) && defined(CONFIG_OPEN_BY_HANDLE)
7341     case TARGET_NR_name_to_handle_at:
7342         ret = do_name_to_handle_at(arg1, arg2, arg3, arg4, arg5);
7343         return ret;
7344 #endif
7345 #if defined(TARGET_NR_open_by_handle_at) && defined(CONFIG_OPEN_BY_HANDLE)
7346     case TARGET_NR_open_by_handle_at:
7347         ret = do_open_by_handle_at(arg1, arg2, arg3);
7348         fd_trans_unregister(ret);
7349         return ret;
7350 #endif
7351     case TARGET_NR_close:
7352         fd_trans_unregister(arg1);
7353         return get_errno(close(arg1));
7354 
7355     case TARGET_NR_brk:
7356         return do_brk(arg1);
7357 #ifdef TARGET_NR_fork
7358     case TARGET_NR_fork:
7359         return get_errno(do_fork(cpu_env, TARGET_SIGCHLD, 0, 0, 0, 0));
7360 #endif
7361 #ifdef TARGET_NR_waitpid
7362     case TARGET_NR_waitpid:
7363         {
7364             int status;
7365             ret = get_errno(safe_wait4(arg1, &status, arg3, 0));
7366             if (!is_error(ret) && arg2 && ret
7367                 && put_user_s32(host_to_target_waitstatus(status), arg2))
7368                 return -TARGET_EFAULT;
7369         }
7370         return ret;
7371 #endif
7372 #ifdef TARGET_NR_waitid
7373     case TARGET_NR_waitid:
7374         {
7375             siginfo_t info;
7376             info.si_pid = 0;
7377             ret = get_errno(safe_waitid(arg1, arg2, &info, arg4, NULL));
7378             if (!is_error(ret) && arg3 && info.si_pid != 0) {
7379                 if (!(p = lock_user(VERIFY_WRITE, arg3, sizeof(target_siginfo_t), 0)))
7380                     return -TARGET_EFAULT;
7381                 host_to_target_siginfo(p, &info);
7382                 unlock_user(p, arg3, sizeof(target_siginfo_t));
7383             }
7384         }
7385         return ret;
7386 #endif
7387 #ifdef TARGET_NR_creat /* not on alpha */
7388     case TARGET_NR_creat:
7389         if (!(p = lock_user_string(arg1)))
7390             return -TARGET_EFAULT;
7391         ret = get_errno(creat(p, arg2));
7392         fd_trans_unregister(ret);
7393         unlock_user(p, arg1, 0);
7394         return ret;
7395 #endif
7396 #ifdef TARGET_NR_link
7397     case TARGET_NR_link:
7398         {
7399             void * p2;
7400             p = lock_user_string(arg1);
7401             p2 = lock_user_string(arg2);
7402             if (!p || !p2)
7403                 ret = -TARGET_EFAULT;
7404             else
7405                 ret = get_errno(link(p, p2));
7406             unlock_user(p2, arg2, 0);
7407             unlock_user(p, arg1, 0);
7408         }
7409         return ret;
7410 #endif
7411 #if defined(TARGET_NR_linkat)
7412     case TARGET_NR_linkat:
7413         {
7414             void * p2 = NULL;
7415             if (!arg2 || !arg4)
7416                 return -TARGET_EFAULT;
7417             p  = lock_user_string(arg2);
7418             p2 = lock_user_string(arg4);
7419             if (!p || !p2)
7420                 ret = -TARGET_EFAULT;
7421             else
7422                 ret = get_errno(linkat(arg1, p, arg3, p2, arg5));
7423             unlock_user(p, arg2, 0);
7424             unlock_user(p2, arg4, 0);
7425         }
7426         return ret;
7427 #endif
7428 #ifdef TARGET_NR_unlink
7429     case TARGET_NR_unlink:
7430         if (!(p = lock_user_string(arg1)))
7431             return -TARGET_EFAULT;
7432         ret = get_errno(unlink(p));
7433         unlock_user(p, arg1, 0);
7434         return ret;
7435 #endif
7436 #if defined(TARGET_NR_unlinkat)
7437     case TARGET_NR_unlinkat:
7438         if (!(p = lock_user_string(arg2)))
7439             return -TARGET_EFAULT;
7440         ret = get_errno(unlinkat(arg1, p, arg3));
7441         unlock_user(p, arg2, 0);
7442         return ret;
7443 #endif
7444     case TARGET_NR_execve:
7445         {
7446             char **argp, **envp;
7447             int argc, envc;
7448             abi_ulong gp;
7449             abi_ulong guest_argp;
7450             abi_ulong guest_envp;
7451             abi_ulong addr;
7452             char **q;
7453             int total_size = 0;
7454 
7455             argc = 0;
7456             guest_argp = arg2;
7457             for (gp = guest_argp; gp; gp += sizeof(abi_ulong)) {
7458                 if (get_user_ual(addr, gp))
7459                     return -TARGET_EFAULT;
7460                 if (!addr)
7461                     break;
7462                 argc++;
7463             }
7464             envc = 0;
7465             guest_envp = arg3;
7466             for (gp = guest_envp; gp; gp += sizeof(abi_ulong)) {
7467                 if (get_user_ual(addr, gp))
7468                     return -TARGET_EFAULT;
7469                 if (!addr)
7470                     break;
7471                 envc++;
7472             }
7473 
7474             argp = g_new0(char *, argc + 1);
7475             envp = g_new0(char *, envc + 1);
7476 
7477             for (gp = guest_argp, q = argp; gp;
7478                   gp += sizeof(abi_ulong), q++) {
7479                 if (get_user_ual(addr, gp))
7480                     goto execve_efault;
7481                 if (!addr)
7482                     break;
7483                 if (!(*q = lock_user_string(addr)))
7484                     goto execve_efault;
7485                 total_size += strlen(*q) + 1;
7486             }
7487             *q = NULL;
7488 
7489             for (gp = guest_envp, q = envp; gp;
7490                   gp += sizeof(abi_ulong), q++) {
7491                 if (get_user_ual(addr, gp))
7492                     goto execve_efault;
7493                 if (!addr)
7494                     break;
7495                 if (!(*q = lock_user_string(addr)))
7496                     goto execve_efault;
7497                 total_size += strlen(*q) + 1;
7498             }
7499             *q = NULL;
7500 
7501             if (!(p = lock_user_string(arg1)))
7502                 goto execve_efault;
7503             /* Although execve() is not an interruptible syscall it is
7504              * a special case where we must use the safe_syscall wrapper:
7505              * if we allow a signal to happen before we make the host
7506              * syscall then we will 'lose' it, because at the point of
7507              * execve the process leaves QEMU's control. So we use the
7508              * safe syscall wrapper to ensure that we either take the
7509              * signal as a guest signal, or else it does not happen
7510              * before the execve completes and makes it the other
7511              * program's problem.
7512              */
7513             ret = get_errno(safe_execve(p, argp, envp));
7514             unlock_user(p, arg1, 0);
7515 
7516             goto execve_end;
7517 
7518         execve_efault:
7519             ret = -TARGET_EFAULT;
7520 
7521         execve_end:
7522             for (gp = guest_argp, q = argp; *q;
7523                   gp += sizeof(abi_ulong), q++) {
7524                 if (get_user_ual(addr, gp)
7525                     || !addr)
7526                     break;
7527                 unlock_user(*q, addr, 0);
7528             }
7529             for (gp = guest_envp, q = envp; *q;
7530                   gp += sizeof(abi_ulong), q++) {
7531                 if (get_user_ual(addr, gp)
7532                     || !addr)
7533                     break;
7534                 unlock_user(*q, addr, 0);
7535             }
7536 
7537             g_free(argp);
7538             g_free(envp);
7539         }
7540         return ret;
7541     case TARGET_NR_chdir:
7542         if (!(p = lock_user_string(arg1)))
7543             return -TARGET_EFAULT;
7544         ret = get_errno(chdir(p));
7545         unlock_user(p, arg1, 0);
7546         return ret;
7547 #ifdef TARGET_NR_time
7548     case TARGET_NR_time:
7549         {
7550             time_t host_time;
7551             ret = get_errno(time(&host_time));
7552             if (!is_error(ret)
7553                 && arg1
7554                 && put_user_sal(host_time, arg1))
7555                 return -TARGET_EFAULT;
7556         }
7557         return ret;
7558 #endif
7559 #ifdef TARGET_NR_mknod
7560     case TARGET_NR_mknod:
7561         if (!(p = lock_user_string(arg1)))
7562             return -TARGET_EFAULT;
7563         ret = get_errno(mknod(p, arg2, arg3));
7564         unlock_user(p, arg1, 0);
7565         return ret;
7566 #endif
7567 #if defined(TARGET_NR_mknodat)
7568     case TARGET_NR_mknodat:
7569         if (!(p = lock_user_string(arg2)))
7570             return -TARGET_EFAULT;
7571         ret = get_errno(mknodat(arg1, p, arg3, arg4));
7572         unlock_user(p, arg2, 0);
7573         return ret;
7574 #endif
7575 #ifdef TARGET_NR_chmod
7576     case TARGET_NR_chmod:
7577         if (!(p = lock_user_string(arg1)))
7578             return -TARGET_EFAULT;
7579         ret = get_errno(chmod(p, arg2));
7580         unlock_user(p, arg1, 0);
7581         return ret;
7582 #endif
7583 #ifdef TARGET_NR_lseek
7584     case TARGET_NR_lseek:
7585         return get_errno(lseek(arg1, arg2, arg3));
7586 #endif
7587 #if defined(TARGET_NR_getxpid) && defined(TARGET_ALPHA)
7588     /* Alpha specific */
7589     case TARGET_NR_getxpid:
7590         ((CPUAlphaState *)cpu_env)->ir[IR_A4] = getppid();
7591         return get_errno(getpid());
7592 #endif
7593 #ifdef TARGET_NR_getpid
7594     case TARGET_NR_getpid:
7595         return get_errno(getpid());
7596 #endif
7597     case TARGET_NR_mount:
7598         {
7599             /* need to look at the data field */
7600             void *p2, *p3;
7601 
7602             if (arg1) {
7603                 p = lock_user_string(arg1);
7604                 if (!p) {
7605                     return -TARGET_EFAULT;
7606                 }
7607             } else {
7608                 p = NULL;
7609             }
7610 
7611             p2 = lock_user_string(arg2);
7612             if (!p2) {
7613                 if (arg1) {
7614                     unlock_user(p, arg1, 0);
7615                 }
7616                 return -TARGET_EFAULT;
7617             }
7618 
7619             if (arg3) {
7620                 p3 = lock_user_string(arg3);
7621                 if (!p3) {
7622                     if (arg1) {
7623                         unlock_user(p, arg1, 0);
7624                     }
7625                     unlock_user(p2, arg2, 0);
7626                     return -TARGET_EFAULT;
7627                 }
7628             } else {
7629                 p3 = NULL;
7630             }
7631 
7632             /* FIXME - arg5 should be locked, but it isn't clear how to
7633              * do that since it's not guaranteed to be a NULL-terminated
7634              * string.
7635              */
7636             if (!arg5) {
7637                 ret = mount(p, p2, p3, (unsigned long)arg4, NULL);
7638             } else {
7639                 ret = mount(p, p2, p3, (unsigned long)arg4, g2h(arg5));
7640             }
7641             ret = get_errno(ret);
7642 
7643             if (arg1) {
7644                 unlock_user(p, arg1, 0);
7645             }
7646             unlock_user(p2, arg2, 0);
7647             if (arg3) {
7648                 unlock_user(p3, arg3, 0);
7649             }
7650         }
7651         return ret;
7652 #ifdef TARGET_NR_umount
7653     case TARGET_NR_umount:
7654         if (!(p = lock_user_string(arg1)))
7655             return -TARGET_EFAULT;
7656         ret = get_errno(umount(p));
7657         unlock_user(p, arg1, 0);
7658         return ret;
7659 #endif
7660 #ifdef TARGET_NR_stime /* not on alpha */
7661     case TARGET_NR_stime:
7662         {
7663             time_t host_time;
7664             if (get_user_sal(host_time, arg1))
7665                 return -TARGET_EFAULT;
7666             return get_errno(stime(&host_time));
7667         }
7668 #endif
7669 #ifdef TARGET_NR_alarm /* not on alpha */
7670     case TARGET_NR_alarm:
7671         return alarm(arg1);
7672 #endif
7673 #ifdef TARGET_NR_pause /* not on alpha */
7674     case TARGET_NR_pause:
7675         if (!block_signals()) {
7676             sigsuspend(&((TaskState *)cpu->opaque)->signal_mask);
7677         }
7678         return -TARGET_EINTR;
7679 #endif
7680 #ifdef TARGET_NR_utime
7681     case TARGET_NR_utime:
7682         {
7683             struct utimbuf tbuf, *host_tbuf;
7684             struct target_utimbuf *target_tbuf;
7685             if (arg2) {
7686                 if (!lock_user_struct(VERIFY_READ, target_tbuf, arg2, 1))
7687                     return -TARGET_EFAULT;
7688                 tbuf.actime = tswapal(target_tbuf->actime);
7689                 tbuf.modtime = tswapal(target_tbuf->modtime);
7690                 unlock_user_struct(target_tbuf, arg2, 0);
7691                 host_tbuf = &tbuf;
7692             } else {
7693                 host_tbuf = NULL;
7694             }
7695             if (!(p = lock_user_string(arg1)))
7696                 return -TARGET_EFAULT;
7697             ret = get_errno(utime(p, host_tbuf));
7698             unlock_user(p, arg1, 0);
7699         }
7700         return ret;
7701 #endif
7702 #ifdef TARGET_NR_utimes
7703     case TARGET_NR_utimes:
7704         {
7705             struct timeval *tvp, tv[2];
7706             if (arg2) {
7707                 if (copy_from_user_timeval(&tv[0], arg2)
7708                     || copy_from_user_timeval(&tv[1],
7709                                               arg2 + sizeof(struct target_timeval)))
7710                     return -TARGET_EFAULT;
7711                 tvp = tv;
7712             } else {
7713                 tvp = NULL;
7714             }
7715             if (!(p = lock_user_string(arg1)))
7716                 return -TARGET_EFAULT;
7717             ret = get_errno(utimes(p, tvp));
7718             unlock_user(p, arg1, 0);
7719         }
7720         return ret;
7721 #endif
7722 #if defined(TARGET_NR_futimesat)
7723     case TARGET_NR_futimesat:
7724         {
7725             struct timeval *tvp, tv[2];
7726             if (arg3) {
7727                 if (copy_from_user_timeval(&tv[0], arg3)
7728                     || copy_from_user_timeval(&tv[1],
7729                                               arg3 + sizeof(struct target_timeval)))
7730                     return -TARGET_EFAULT;
7731                 tvp = tv;
7732             } else {
7733                 tvp = NULL;
7734             }
7735             if (!(p = lock_user_string(arg2))) {
7736                 return -TARGET_EFAULT;
7737             }
7738             ret = get_errno(futimesat(arg1, path(p), tvp));
7739             unlock_user(p, arg2, 0);
7740         }
7741         return ret;
7742 #endif
7743 #ifdef TARGET_NR_access
7744     case TARGET_NR_access:
7745         if (!(p = lock_user_string(arg1))) {
7746             return -TARGET_EFAULT;
7747         }
7748         ret = get_errno(access(path(p), arg2));
7749         unlock_user(p, arg1, 0);
7750         return ret;
7751 #endif
7752 #if defined(TARGET_NR_faccessat) && defined(__NR_faccessat)
7753     case TARGET_NR_faccessat:
7754         if (!(p = lock_user_string(arg2))) {
7755             return -TARGET_EFAULT;
7756         }
7757         ret = get_errno(faccessat(arg1, p, arg3, 0));
7758         unlock_user(p, arg2, 0);
7759         return ret;
7760 #endif
7761 #ifdef TARGET_NR_nice /* not on alpha */
7762     case TARGET_NR_nice:
7763         return get_errno(nice(arg1));
7764 #endif
7765     case TARGET_NR_sync:
7766         sync();
7767         return 0;
7768 #if defined(TARGET_NR_syncfs) && defined(CONFIG_SYNCFS)
7769     case TARGET_NR_syncfs:
7770         return get_errno(syncfs(arg1));
7771 #endif
7772     case TARGET_NR_kill:
7773         return get_errno(safe_kill(arg1, target_to_host_signal(arg2)));
7774 #ifdef TARGET_NR_rename
7775     case TARGET_NR_rename:
7776         {
7777             void *p2;
7778             p = lock_user_string(arg1);
7779             p2 = lock_user_string(arg2);
7780             if (!p || !p2)
7781                 ret = -TARGET_EFAULT;
7782             else
7783                 ret = get_errno(rename(p, p2));
7784             unlock_user(p2, arg2, 0);
7785             unlock_user(p, arg1, 0);
7786         }
7787         return ret;
7788 #endif
7789 #if defined(TARGET_NR_renameat)
7790     case TARGET_NR_renameat:
7791         {
7792             void *p2;
7793             p  = lock_user_string(arg2);
7794             p2 = lock_user_string(arg4);
7795             if (!p || !p2)
7796                 ret = -TARGET_EFAULT;
7797             else
7798                 ret = get_errno(renameat(arg1, p, arg3, p2));
7799             unlock_user(p2, arg4, 0);
7800             unlock_user(p, arg2, 0);
7801         }
7802         return ret;
7803 #endif
7804 #if defined(TARGET_NR_renameat2)
7805     case TARGET_NR_renameat2:
7806         {
7807             void *p2;
7808             p  = lock_user_string(arg2);
7809             p2 = lock_user_string(arg4);
7810             if (!p || !p2) {
7811                 ret = -TARGET_EFAULT;
7812             } else {
7813                 ret = get_errno(sys_renameat2(arg1, p, arg3, p2, arg5));
7814             }
7815             unlock_user(p2, arg4, 0);
7816             unlock_user(p, arg2, 0);
7817         }
7818         return ret;
7819 #endif
7820 #ifdef TARGET_NR_mkdir
7821     case TARGET_NR_mkdir:
7822         if (!(p = lock_user_string(arg1)))
7823             return -TARGET_EFAULT;
7824         ret = get_errno(mkdir(p, arg2));
7825         unlock_user(p, arg1, 0);
7826         return ret;
7827 #endif
7828 #if defined(TARGET_NR_mkdirat)
7829     case TARGET_NR_mkdirat:
7830         if (!(p = lock_user_string(arg2)))
7831             return -TARGET_EFAULT;
7832         ret = get_errno(mkdirat(arg1, p, arg3));
7833         unlock_user(p, arg2, 0);
7834         return ret;
7835 #endif
7836 #ifdef TARGET_NR_rmdir
7837     case TARGET_NR_rmdir:
7838         if (!(p = lock_user_string(arg1)))
7839             return -TARGET_EFAULT;
7840         ret = get_errno(rmdir(p));
7841         unlock_user(p, arg1, 0);
7842         return ret;
7843 #endif
7844     case TARGET_NR_dup:
7845         ret = get_errno(dup(arg1));
7846         if (ret >= 0) {
7847             fd_trans_dup(arg1, ret);
7848         }
7849         return ret;
7850 #ifdef TARGET_NR_pipe
7851     case TARGET_NR_pipe:
7852         return do_pipe(cpu_env, arg1, 0, 0);
7853 #endif
7854 #ifdef TARGET_NR_pipe2
7855     case TARGET_NR_pipe2:
7856         return do_pipe(cpu_env, arg1,
7857                        target_to_host_bitmask(arg2, fcntl_flags_tbl), 1);
7858 #endif
7859     case TARGET_NR_times:
7860         {
7861             struct target_tms *tmsp;
7862             struct tms tms;
7863             ret = get_errno(times(&tms));
7864             if (arg1) {
7865                 tmsp = lock_user(VERIFY_WRITE, arg1, sizeof(struct target_tms), 0);
7866                 if (!tmsp)
7867                     return -TARGET_EFAULT;
7868                 tmsp->tms_utime = tswapal(host_to_target_clock_t(tms.tms_utime));
7869                 tmsp->tms_stime = tswapal(host_to_target_clock_t(tms.tms_stime));
7870                 tmsp->tms_cutime = tswapal(host_to_target_clock_t(tms.tms_cutime));
7871                 tmsp->tms_cstime = tswapal(host_to_target_clock_t(tms.tms_cstime));
7872             }
7873             if (!is_error(ret))
7874                 ret = host_to_target_clock_t(ret);
7875         }
7876         return ret;
7877     case TARGET_NR_acct:
7878         if (arg1 == 0) {
7879             ret = get_errno(acct(NULL));
7880         } else {
7881             if (!(p = lock_user_string(arg1))) {
7882                 return -TARGET_EFAULT;
7883             }
7884             ret = get_errno(acct(path(p)));
7885             unlock_user(p, arg1, 0);
7886         }
7887         return ret;
7888 #ifdef TARGET_NR_umount2
7889     case TARGET_NR_umount2:
7890         if (!(p = lock_user_string(arg1)))
7891             return -TARGET_EFAULT;
7892         ret = get_errno(umount2(p, arg2));
7893         unlock_user(p, arg1, 0);
7894         return ret;
7895 #endif
7896     case TARGET_NR_ioctl:
7897         return do_ioctl(arg1, arg2, arg3);
7898 #ifdef TARGET_NR_fcntl
7899     case TARGET_NR_fcntl:
7900         return do_fcntl(arg1, arg2, arg3);
7901 #endif
7902     case TARGET_NR_setpgid:
7903         return get_errno(setpgid(arg1, arg2));
7904     case TARGET_NR_umask:
7905         return get_errno(umask(arg1));
7906     case TARGET_NR_chroot:
7907         if (!(p = lock_user_string(arg1)))
7908             return -TARGET_EFAULT;
7909         ret = get_errno(chroot(p));
7910         unlock_user(p, arg1, 0);
7911         return ret;
7912 #ifdef TARGET_NR_dup2
7913     case TARGET_NR_dup2:
7914         ret = get_errno(dup2(arg1, arg2));
7915         if (ret >= 0) {
7916             fd_trans_dup(arg1, arg2);
7917         }
7918         return ret;
7919 #endif
7920 #if defined(CONFIG_DUP3) && defined(TARGET_NR_dup3)
7921     case TARGET_NR_dup3:
7922     {
7923         int host_flags;
7924 
7925         if ((arg3 & ~TARGET_O_CLOEXEC) != 0) {
7926             return -EINVAL;
7927         }
7928         host_flags = target_to_host_bitmask(arg3, fcntl_flags_tbl);
7929         ret = get_errno(dup3(arg1, arg2, host_flags));
7930         if (ret >= 0) {
7931             fd_trans_dup(arg1, arg2);
7932         }
7933         return ret;
7934     }
7935 #endif
7936 #ifdef TARGET_NR_getppid /* not on alpha */
7937     case TARGET_NR_getppid:
7938         return get_errno(getppid());
7939 #endif
7940 #ifdef TARGET_NR_getpgrp
7941     case TARGET_NR_getpgrp:
7942         return get_errno(getpgrp());
7943 #endif
7944     case TARGET_NR_setsid:
7945         return get_errno(setsid());
7946 #ifdef TARGET_NR_sigaction
7947     case TARGET_NR_sigaction:
7948         {
7949 #if defined(TARGET_ALPHA)
7950             struct target_sigaction act, oact, *pact = 0;
7951             struct target_old_sigaction *old_act;
7952             if (arg2) {
7953                 if (!lock_user_struct(VERIFY_READ, old_act, arg2, 1))
7954                     return -TARGET_EFAULT;
7955                 act._sa_handler = old_act->_sa_handler;
7956                 target_siginitset(&act.sa_mask, old_act->sa_mask);
7957                 act.sa_flags = old_act->sa_flags;
7958                 act.sa_restorer = 0;
7959                 unlock_user_struct(old_act, arg2, 0);
7960                 pact = &act;
7961             }
7962             ret = get_errno(do_sigaction(arg1, pact, &oact));
7963             if (!is_error(ret) && arg3) {
7964                 if (!lock_user_struct(VERIFY_WRITE, old_act, arg3, 0))
7965                     return -TARGET_EFAULT;
7966                 old_act->_sa_handler = oact._sa_handler;
7967                 old_act->sa_mask = oact.sa_mask.sig[0];
7968                 old_act->sa_flags = oact.sa_flags;
7969                 unlock_user_struct(old_act, arg3, 1);
7970             }
7971 #elif defined(TARGET_MIPS)
7972 	    struct target_sigaction act, oact, *pact, *old_act;
7973 
7974 	    if (arg2) {
7975                 if (!lock_user_struct(VERIFY_READ, old_act, arg2, 1))
7976                     return -TARGET_EFAULT;
7977 		act._sa_handler = old_act->_sa_handler;
7978 		target_siginitset(&act.sa_mask, old_act->sa_mask.sig[0]);
7979 		act.sa_flags = old_act->sa_flags;
7980 		unlock_user_struct(old_act, arg2, 0);
7981 		pact = &act;
7982 	    } else {
7983 		pact = NULL;
7984 	    }
7985 
7986 	    ret = get_errno(do_sigaction(arg1, pact, &oact));
7987 
7988 	    if (!is_error(ret) && arg3) {
7989                 if (!lock_user_struct(VERIFY_WRITE, old_act, arg3, 0))
7990                     return -TARGET_EFAULT;
7991 		old_act->_sa_handler = oact._sa_handler;
7992 		old_act->sa_flags = oact.sa_flags;
7993 		old_act->sa_mask.sig[0] = oact.sa_mask.sig[0];
7994 		old_act->sa_mask.sig[1] = 0;
7995 		old_act->sa_mask.sig[2] = 0;
7996 		old_act->sa_mask.sig[3] = 0;
7997 		unlock_user_struct(old_act, arg3, 1);
7998 	    }
7999 #else
8000             struct target_old_sigaction *old_act;
8001             struct target_sigaction act, oact, *pact;
8002             if (arg2) {
8003                 if (!lock_user_struct(VERIFY_READ, old_act, arg2, 1))
8004                     return -TARGET_EFAULT;
8005                 act._sa_handler = old_act->_sa_handler;
8006                 target_siginitset(&act.sa_mask, old_act->sa_mask);
8007                 act.sa_flags = old_act->sa_flags;
8008                 act.sa_restorer = old_act->sa_restorer;
8009 #ifdef TARGET_ARCH_HAS_KA_RESTORER
8010                 act.ka_restorer = 0;
8011 #endif
8012                 unlock_user_struct(old_act, arg2, 0);
8013                 pact = &act;
8014             } else {
8015                 pact = NULL;
8016             }
8017             ret = get_errno(do_sigaction(arg1, pact, &oact));
8018             if (!is_error(ret) && arg3) {
8019                 if (!lock_user_struct(VERIFY_WRITE, old_act, arg3, 0))
8020                     return -TARGET_EFAULT;
8021                 old_act->_sa_handler = oact._sa_handler;
8022                 old_act->sa_mask = oact.sa_mask.sig[0];
8023                 old_act->sa_flags = oact.sa_flags;
8024                 old_act->sa_restorer = oact.sa_restorer;
8025                 unlock_user_struct(old_act, arg3, 1);
8026             }
8027 #endif
8028         }
8029         return ret;
8030 #endif
8031     case TARGET_NR_rt_sigaction:
8032         {
8033 #if defined(TARGET_ALPHA)
8034             /* For Alpha and SPARC this is a 5 argument syscall, with
8035              * a 'restorer' parameter which must be copied into the
8036              * sa_restorer field of the sigaction struct.
8037              * For Alpha that 'restorer' is arg5; for SPARC it is arg4,
8038              * and arg5 is the sigsetsize.
8039              * Alpha also has a separate rt_sigaction struct that it uses
8040              * here; SPARC uses the usual sigaction struct.
8041              */
8042             struct target_rt_sigaction *rt_act;
8043             struct target_sigaction act, oact, *pact = 0;
8044 
8045             if (arg4 != sizeof(target_sigset_t)) {
8046                 return -TARGET_EINVAL;
8047             }
8048             if (arg2) {
8049                 if (!lock_user_struct(VERIFY_READ, rt_act, arg2, 1))
8050                     return -TARGET_EFAULT;
8051                 act._sa_handler = rt_act->_sa_handler;
8052                 act.sa_mask = rt_act->sa_mask;
8053                 act.sa_flags = rt_act->sa_flags;
8054                 act.sa_restorer = arg5;
8055                 unlock_user_struct(rt_act, arg2, 0);
8056                 pact = &act;
8057             }
8058             ret = get_errno(do_sigaction(arg1, pact, &oact));
8059             if (!is_error(ret) && arg3) {
8060                 if (!lock_user_struct(VERIFY_WRITE, rt_act, arg3, 0))
8061                     return -TARGET_EFAULT;
8062                 rt_act->_sa_handler = oact._sa_handler;
8063                 rt_act->sa_mask = oact.sa_mask;
8064                 rt_act->sa_flags = oact.sa_flags;
8065                 unlock_user_struct(rt_act, arg3, 1);
8066             }
8067 #else
8068 #ifdef TARGET_SPARC
8069             target_ulong restorer = arg4;
8070             target_ulong sigsetsize = arg5;
8071 #else
8072             target_ulong sigsetsize = arg4;
8073 #endif
8074             struct target_sigaction *act;
8075             struct target_sigaction *oact;
8076 
8077             if (sigsetsize != sizeof(target_sigset_t)) {
8078                 return -TARGET_EINVAL;
8079             }
8080             if (arg2) {
8081                 if (!lock_user_struct(VERIFY_READ, act, arg2, 1)) {
8082                     return -TARGET_EFAULT;
8083                 }
8084 #ifdef TARGET_ARCH_HAS_KA_RESTORER
8085                 act->ka_restorer = restorer;
8086 #endif
8087             } else {
8088                 act = NULL;
8089             }
8090             if (arg3) {
8091                 if (!lock_user_struct(VERIFY_WRITE, oact, arg3, 0)) {
8092                     ret = -TARGET_EFAULT;
8093                     goto rt_sigaction_fail;
8094                 }
8095             } else
8096                 oact = NULL;
8097             ret = get_errno(do_sigaction(arg1, act, oact));
8098 	rt_sigaction_fail:
8099             if (act)
8100                 unlock_user_struct(act, arg2, 0);
8101             if (oact)
8102                 unlock_user_struct(oact, arg3, 1);
8103 #endif
8104         }
8105         return ret;
8106 #ifdef TARGET_NR_sgetmask /* not on alpha */
8107     case TARGET_NR_sgetmask:
8108         {
8109             sigset_t cur_set;
8110             abi_ulong target_set;
8111             ret = do_sigprocmask(0, NULL, &cur_set);
8112             if (!ret) {
8113                 host_to_target_old_sigset(&target_set, &cur_set);
8114                 ret = target_set;
8115             }
8116         }
8117         return ret;
8118 #endif
8119 #ifdef TARGET_NR_ssetmask /* not on alpha */
8120     case TARGET_NR_ssetmask:
8121         {
8122             sigset_t set, oset;
8123             abi_ulong target_set = arg1;
8124             target_to_host_old_sigset(&set, &target_set);
8125             ret = do_sigprocmask(SIG_SETMASK, &set, &oset);
8126             if (!ret) {
8127                 host_to_target_old_sigset(&target_set, &oset);
8128                 ret = target_set;
8129             }
8130         }
8131         return ret;
8132 #endif
8133 #ifdef TARGET_NR_sigprocmask
8134     case TARGET_NR_sigprocmask:
8135         {
8136 #if defined(TARGET_ALPHA)
8137             sigset_t set, oldset;
8138             abi_ulong mask;
8139             int how;
8140 
8141             switch (arg1) {
8142             case TARGET_SIG_BLOCK:
8143                 how = SIG_BLOCK;
8144                 break;
8145             case TARGET_SIG_UNBLOCK:
8146                 how = SIG_UNBLOCK;
8147                 break;
8148             case TARGET_SIG_SETMASK:
8149                 how = SIG_SETMASK;
8150                 break;
8151             default:
8152                 return -TARGET_EINVAL;
8153             }
8154             mask = arg2;
8155             target_to_host_old_sigset(&set, &mask);
8156 
8157             ret = do_sigprocmask(how, &set, &oldset);
8158             if (!is_error(ret)) {
8159                 host_to_target_old_sigset(&mask, &oldset);
8160                 ret = mask;
8161                 ((CPUAlphaState *)cpu_env)->ir[IR_V0] = 0; /* force no error */
8162             }
8163 #else
8164             sigset_t set, oldset, *set_ptr;
8165             int how;
8166 
8167             if (arg2) {
8168                 switch (arg1) {
8169                 case TARGET_SIG_BLOCK:
8170                     how = SIG_BLOCK;
8171                     break;
8172                 case TARGET_SIG_UNBLOCK:
8173                     how = SIG_UNBLOCK;
8174                     break;
8175                 case TARGET_SIG_SETMASK:
8176                     how = SIG_SETMASK;
8177                     break;
8178                 default:
8179                     return -TARGET_EINVAL;
8180                 }
8181                 if (!(p = lock_user(VERIFY_READ, arg2, sizeof(target_sigset_t), 1)))
8182                     return -TARGET_EFAULT;
8183                 target_to_host_old_sigset(&set, p);
8184                 unlock_user(p, arg2, 0);
8185                 set_ptr = &set;
8186             } else {
8187                 how = 0;
8188                 set_ptr = NULL;
8189             }
8190             ret = do_sigprocmask(how, set_ptr, &oldset);
8191             if (!is_error(ret) && arg3) {
8192                 if (!(p = lock_user(VERIFY_WRITE, arg3, sizeof(target_sigset_t), 0)))
8193                     return -TARGET_EFAULT;
8194                 host_to_target_old_sigset(p, &oldset);
8195                 unlock_user(p, arg3, sizeof(target_sigset_t));
8196             }
8197 #endif
8198         }
8199         return ret;
8200 #endif
8201     case TARGET_NR_rt_sigprocmask:
8202         {
8203             int how = arg1;
8204             sigset_t set, oldset, *set_ptr;
8205 
8206             if (arg4 != sizeof(target_sigset_t)) {
8207                 return -TARGET_EINVAL;
8208             }
8209 
8210             if (arg2) {
8211                 switch(how) {
8212                 case TARGET_SIG_BLOCK:
8213                     how = SIG_BLOCK;
8214                     break;
8215                 case TARGET_SIG_UNBLOCK:
8216                     how = SIG_UNBLOCK;
8217                     break;
8218                 case TARGET_SIG_SETMASK:
8219                     how = SIG_SETMASK;
8220                     break;
8221                 default:
8222                     return -TARGET_EINVAL;
8223                 }
8224                 if (!(p = lock_user(VERIFY_READ, arg2, sizeof(target_sigset_t), 1)))
8225                     return -TARGET_EFAULT;
8226                 target_to_host_sigset(&set, p);
8227                 unlock_user(p, arg2, 0);
8228                 set_ptr = &set;
8229             } else {
8230                 how = 0;
8231                 set_ptr = NULL;
8232             }
8233             ret = do_sigprocmask(how, set_ptr, &oldset);
8234             if (!is_error(ret) && arg3) {
8235                 if (!(p = lock_user(VERIFY_WRITE, arg3, sizeof(target_sigset_t), 0)))
8236                     return -TARGET_EFAULT;
8237                 host_to_target_sigset(p, &oldset);
8238                 unlock_user(p, arg3, sizeof(target_sigset_t));
8239             }
8240         }
8241         return ret;
8242 #ifdef TARGET_NR_sigpending
8243     case TARGET_NR_sigpending:
8244         {
8245             sigset_t set;
8246             ret = get_errno(sigpending(&set));
8247             if (!is_error(ret)) {
8248                 if (!(p = lock_user(VERIFY_WRITE, arg1, sizeof(target_sigset_t), 0)))
8249                     return -TARGET_EFAULT;
8250                 host_to_target_old_sigset(p, &set);
8251                 unlock_user(p, arg1, sizeof(target_sigset_t));
8252             }
8253         }
8254         return ret;
8255 #endif
8256     case TARGET_NR_rt_sigpending:
8257         {
8258             sigset_t set;
8259 
8260             /* Yes, this check is >, not != like most. We follow the kernel's
8261              * logic and it does it like this because it implements
8262              * NR_sigpending through the same code path, and in that case
8263              * the old_sigset_t is smaller in size.
8264              */
8265             if (arg2 > sizeof(target_sigset_t)) {
8266                 return -TARGET_EINVAL;
8267             }
8268 
8269             ret = get_errno(sigpending(&set));
8270             if (!is_error(ret)) {
8271                 if (!(p = lock_user(VERIFY_WRITE, arg1, sizeof(target_sigset_t), 0)))
8272                     return -TARGET_EFAULT;
8273                 host_to_target_sigset(p, &set);
8274                 unlock_user(p, arg1, sizeof(target_sigset_t));
8275             }
8276         }
8277         return ret;
8278 #ifdef TARGET_NR_sigsuspend
8279     case TARGET_NR_sigsuspend:
8280         {
8281             TaskState *ts = cpu->opaque;
8282 #if defined(TARGET_ALPHA)
8283             abi_ulong mask = arg1;
8284             target_to_host_old_sigset(&ts->sigsuspend_mask, &mask);
8285 #else
8286             if (!(p = lock_user(VERIFY_READ, arg1, sizeof(target_sigset_t), 1)))
8287                 return -TARGET_EFAULT;
8288             target_to_host_old_sigset(&ts->sigsuspend_mask, p);
8289             unlock_user(p, arg1, 0);
8290 #endif
8291             ret = get_errno(safe_rt_sigsuspend(&ts->sigsuspend_mask,
8292                                                SIGSET_T_SIZE));
8293             if (ret != -TARGET_ERESTARTSYS) {
8294                 ts->in_sigsuspend = 1;
8295             }
8296         }
8297         return ret;
8298 #endif
8299     case TARGET_NR_rt_sigsuspend:
8300         {
8301             TaskState *ts = cpu->opaque;
8302 
8303             if (arg2 != sizeof(target_sigset_t)) {
8304                 return -TARGET_EINVAL;
8305             }
8306             if (!(p = lock_user(VERIFY_READ, arg1, sizeof(target_sigset_t), 1)))
8307                 return -TARGET_EFAULT;
8308             target_to_host_sigset(&ts->sigsuspend_mask, p);
8309             unlock_user(p, arg1, 0);
8310             ret = get_errno(safe_rt_sigsuspend(&ts->sigsuspend_mask,
8311                                                SIGSET_T_SIZE));
8312             if (ret != -TARGET_ERESTARTSYS) {
8313                 ts->in_sigsuspend = 1;
8314             }
8315         }
8316         return ret;
8317     case TARGET_NR_rt_sigtimedwait:
8318         {
8319             sigset_t set;
8320             struct timespec uts, *puts;
8321             siginfo_t uinfo;
8322 
8323             if (arg4 != sizeof(target_sigset_t)) {
8324                 return -TARGET_EINVAL;
8325             }
8326 
8327             if (!(p = lock_user(VERIFY_READ, arg1, sizeof(target_sigset_t), 1)))
8328                 return -TARGET_EFAULT;
8329             target_to_host_sigset(&set, p);
8330             unlock_user(p, arg1, 0);
8331             if (arg3) {
8332                 puts = &uts;
8333                 target_to_host_timespec(puts, arg3);
8334             } else {
8335                 puts = NULL;
8336             }
8337             ret = get_errno(safe_rt_sigtimedwait(&set, &uinfo, puts,
8338                                                  SIGSET_T_SIZE));
8339             if (!is_error(ret)) {
8340                 if (arg2) {
8341                     p = lock_user(VERIFY_WRITE, arg2, sizeof(target_siginfo_t),
8342                                   0);
8343                     if (!p) {
8344                         return -TARGET_EFAULT;
8345                     }
8346                     host_to_target_siginfo(p, &uinfo);
8347                     unlock_user(p, arg2, sizeof(target_siginfo_t));
8348                 }
8349                 ret = host_to_target_signal(ret);
8350             }
8351         }
8352         return ret;
8353     case TARGET_NR_rt_sigqueueinfo:
8354         {
8355             siginfo_t uinfo;
8356 
8357             p = lock_user(VERIFY_READ, arg3, sizeof(target_siginfo_t), 1);
8358             if (!p) {
8359                 return -TARGET_EFAULT;
8360             }
8361             target_to_host_siginfo(&uinfo, p);
8362             unlock_user(p, arg3, 0);
8363             ret = get_errno(sys_rt_sigqueueinfo(arg1, arg2, &uinfo));
8364         }
8365         return ret;
8366     case TARGET_NR_rt_tgsigqueueinfo:
8367         {
8368             siginfo_t uinfo;
8369 
8370             p = lock_user(VERIFY_READ, arg4, sizeof(target_siginfo_t), 1);
8371             if (!p) {
8372                 return -TARGET_EFAULT;
8373             }
8374             target_to_host_siginfo(&uinfo, p);
8375             unlock_user(p, arg4, 0);
8376             ret = get_errno(sys_rt_tgsigqueueinfo(arg1, arg2, arg3, &uinfo));
8377         }
8378         return ret;
8379 #ifdef TARGET_NR_sigreturn
8380     case TARGET_NR_sigreturn:
8381         if (block_signals()) {
8382             return -TARGET_ERESTARTSYS;
8383         }
8384         return do_sigreturn(cpu_env);
8385 #endif
8386     case TARGET_NR_rt_sigreturn:
8387         if (block_signals()) {
8388             return -TARGET_ERESTARTSYS;
8389         }
8390         return do_rt_sigreturn(cpu_env);
8391     case TARGET_NR_sethostname:
8392         if (!(p = lock_user_string(arg1)))
8393             return -TARGET_EFAULT;
8394         ret = get_errno(sethostname(p, arg2));
8395         unlock_user(p, arg1, 0);
8396         return ret;
8397 #ifdef TARGET_NR_setrlimit
8398     case TARGET_NR_setrlimit:
8399         {
8400             int resource = target_to_host_resource(arg1);
8401             struct target_rlimit *target_rlim;
8402             struct rlimit rlim;
8403             if (!lock_user_struct(VERIFY_READ, target_rlim, arg2, 1))
8404                 return -TARGET_EFAULT;
8405             rlim.rlim_cur = target_to_host_rlim(target_rlim->rlim_cur);
8406             rlim.rlim_max = target_to_host_rlim(target_rlim->rlim_max);
8407             unlock_user_struct(target_rlim, arg2, 0);
8408             /*
8409              * If we just passed through resource limit settings for memory then
8410              * they would also apply to QEMU's own allocations, and QEMU will
8411              * crash or hang or die if its allocations fail. Ideally we would
8412              * track the guest allocations in QEMU and apply the limits ourselves.
8413              * For now, just tell the guest the call succeeded but don't actually
8414              * limit anything.
8415              */
8416             if (resource != RLIMIT_AS &&
8417                 resource != RLIMIT_DATA &&
8418                 resource != RLIMIT_STACK) {
8419                 return get_errno(setrlimit(resource, &rlim));
8420             } else {
8421                 return 0;
8422             }
8423         }
8424 #endif
8425 #ifdef TARGET_NR_getrlimit
8426     case TARGET_NR_getrlimit:
8427         {
8428             int resource = target_to_host_resource(arg1);
8429             struct target_rlimit *target_rlim;
8430             struct rlimit rlim;
8431 
8432             ret = get_errno(getrlimit(resource, &rlim));
8433             if (!is_error(ret)) {
8434                 if (!lock_user_struct(VERIFY_WRITE, target_rlim, arg2, 0))
8435                     return -TARGET_EFAULT;
8436                 target_rlim->rlim_cur = host_to_target_rlim(rlim.rlim_cur);
8437                 target_rlim->rlim_max = host_to_target_rlim(rlim.rlim_max);
8438                 unlock_user_struct(target_rlim, arg2, 1);
8439             }
8440         }
8441         return ret;
8442 #endif
8443     case TARGET_NR_getrusage:
8444         {
8445             struct rusage rusage;
8446             ret = get_errno(getrusage(arg1, &rusage));
8447             if (!is_error(ret)) {
8448                 ret = host_to_target_rusage(arg2, &rusage);
8449             }
8450         }
8451         return ret;
8452     case TARGET_NR_gettimeofday:
8453         {
8454             struct timeval tv;
8455             ret = get_errno(gettimeofday(&tv, NULL));
8456             if (!is_error(ret)) {
8457                 if (copy_to_user_timeval(arg1, &tv))
8458                     return -TARGET_EFAULT;
8459             }
8460         }
8461         return ret;
8462     case TARGET_NR_settimeofday:
8463         {
8464             struct timeval tv, *ptv = NULL;
8465             struct timezone tz, *ptz = NULL;
8466 
8467             if (arg1) {
8468                 if (copy_from_user_timeval(&tv, arg1)) {
8469                     return -TARGET_EFAULT;
8470                 }
8471                 ptv = &tv;
8472             }
8473 
8474             if (arg2) {
8475                 if (copy_from_user_timezone(&tz, arg2)) {
8476                     return -TARGET_EFAULT;
8477                 }
8478                 ptz = &tz;
8479             }
8480 
8481             return get_errno(settimeofday(ptv, ptz));
8482         }
8483 #if defined(TARGET_NR_select)
8484     case TARGET_NR_select:
8485 #if defined(TARGET_WANT_NI_OLD_SELECT)
8486         /* some architectures used to have old_select here
8487          * but now ENOSYS it.
8488          */
8489         ret = -TARGET_ENOSYS;
8490 #elif defined(TARGET_WANT_OLD_SYS_SELECT)
8491         ret = do_old_select(arg1);
8492 #else
8493         ret = do_select(arg1, arg2, arg3, arg4, arg5);
8494 #endif
8495         return ret;
8496 #endif
8497 #ifdef TARGET_NR_pselect6
8498     case TARGET_NR_pselect6:
8499         {
8500             abi_long rfd_addr, wfd_addr, efd_addr, n, ts_addr;
8501             fd_set rfds, wfds, efds;
8502             fd_set *rfds_ptr, *wfds_ptr, *efds_ptr;
8503             struct timespec ts, *ts_ptr;
8504 
8505             /*
8506              * The 6th arg is actually two args smashed together,
8507              * so we cannot use the C library.
8508              */
8509             sigset_t set;
8510             struct {
8511                 sigset_t *set;
8512                 size_t size;
8513             } sig, *sig_ptr;
8514 
8515             abi_ulong arg_sigset, arg_sigsize, *arg7;
8516             target_sigset_t *target_sigset;
8517 
8518             n = arg1;
8519             rfd_addr = arg2;
8520             wfd_addr = arg3;
8521             efd_addr = arg4;
8522             ts_addr = arg5;
8523 
8524             ret = copy_from_user_fdset_ptr(&rfds, &rfds_ptr, rfd_addr, n);
8525             if (ret) {
8526                 return ret;
8527             }
8528             ret = copy_from_user_fdset_ptr(&wfds, &wfds_ptr, wfd_addr, n);
8529             if (ret) {
8530                 return ret;
8531             }
8532             ret = copy_from_user_fdset_ptr(&efds, &efds_ptr, efd_addr, n);
8533             if (ret) {
8534                 return ret;
8535             }
8536 
8537             /*
8538              * This takes a timespec, and not a timeval, so we cannot
8539              * use the do_select() helper ...
8540              */
8541             if (ts_addr) {
8542                 if (target_to_host_timespec(&ts, ts_addr)) {
8543                     return -TARGET_EFAULT;
8544                 }
8545                 ts_ptr = &ts;
8546             } else {
8547                 ts_ptr = NULL;
8548             }
8549 
8550             /* Extract the two packed args for the sigset */
8551             if (arg6) {
8552                 sig_ptr = &sig;
8553                 sig.size = SIGSET_T_SIZE;
8554 
8555                 arg7 = lock_user(VERIFY_READ, arg6, sizeof(*arg7) * 2, 1);
8556                 if (!arg7) {
8557                     return -TARGET_EFAULT;
8558                 }
8559                 arg_sigset = tswapal(arg7[0]);
8560                 arg_sigsize = tswapal(arg7[1]);
8561                 unlock_user(arg7, arg6, 0);
8562 
8563                 if (arg_sigset) {
8564                     sig.set = &set;
8565                     if (arg_sigsize != sizeof(*target_sigset)) {
8566                         /* Like the kernel, we enforce correct size sigsets */
8567                         return -TARGET_EINVAL;
8568                     }
8569                     target_sigset = lock_user(VERIFY_READ, arg_sigset,
8570                                               sizeof(*target_sigset), 1);
8571                     if (!target_sigset) {
8572                         return -TARGET_EFAULT;
8573                     }
8574                     target_to_host_sigset(&set, target_sigset);
8575                     unlock_user(target_sigset, arg_sigset, 0);
8576                 } else {
8577                     sig.set = NULL;
8578                 }
8579             } else {
8580                 sig_ptr = NULL;
8581             }
8582 
8583             ret = get_errno(safe_pselect6(n, rfds_ptr, wfds_ptr, efds_ptr,
8584                                           ts_ptr, sig_ptr));
8585 
8586             if (!is_error(ret)) {
8587                 if (rfd_addr && copy_to_user_fdset(rfd_addr, &rfds, n))
8588                     return -TARGET_EFAULT;
8589                 if (wfd_addr && copy_to_user_fdset(wfd_addr, &wfds, n))
8590                     return -TARGET_EFAULT;
8591                 if (efd_addr && copy_to_user_fdset(efd_addr, &efds, n))
8592                     return -TARGET_EFAULT;
8593 
8594                 if (ts_addr && host_to_target_timespec(ts_addr, &ts))
8595                     return -TARGET_EFAULT;
8596             }
8597         }
8598         return ret;
8599 #endif
8600 #ifdef TARGET_NR_symlink
8601     case TARGET_NR_symlink:
8602         {
8603             void *p2;
8604             p = lock_user_string(arg1);
8605             p2 = lock_user_string(arg2);
8606             if (!p || !p2)
8607                 ret = -TARGET_EFAULT;
8608             else
8609                 ret = get_errno(symlink(p, p2));
8610             unlock_user(p2, arg2, 0);
8611             unlock_user(p, arg1, 0);
8612         }
8613         return ret;
8614 #endif
8615 #if defined(TARGET_NR_symlinkat)
8616     case TARGET_NR_symlinkat:
8617         {
8618             void *p2;
8619             p  = lock_user_string(arg1);
8620             p2 = lock_user_string(arg3);
8621             if (!p || !p2)
8622                 ret = -TARGET_EFAULT;
8623             else
8624                 ret = get_errno(symlinkat(p, arg2, p2));
8625             unlock_user(p2, arg3, 0);
8626             unlock_user(p, arg1, 0);
8627         }
8628         return ret;
8629 #endif
8630 #ifdef TARGET_NR_readlink
8631     case TARGET_NR_readlink:
8632         {
8633             void *p2;
8634             p = lock_user_string(arg1);
8635             p2 = lock_user(VERIFY_WRITE, arg2, arg3, 0);
8636             if (!p || !p2) {
8637                 ret = -TARGET_EFAULT;
8638             } else if (!arg3) {
8639                 /* Short circuit this for the magic exe check. */
8640                 ret = -TARGET_EINVAL;
8641             } else if (is_proc_myself((const char *)p, "exe")) {
8642                 char real[PATH_MAX], *temp;
8643                 temp = realpath(exec_path, real);
8644                 /* Return value is # of bytes that we wrote to the buffer. */
8645                 if (temp == NULL) {
8646                     ret = get_errno(-1);
8647                 } else {
8648                     /* Don't worry about sign mismatch as earlier mapping
8649                      * logic would have thrown a bad address error. */
8650                     ret = MIN(strlen(real), arg3);
8651                     /* We cannot NUL terminate the string. */
8652                     memcpy(p2, real, ret);
8653                 }
8654             } else {
8655                 ret = get_errno(readlink(path(p), p2, arg3));
8656             }
8657             unlock_user(p2, arg2, ret);
8658             unlock_user(p, arg1, 0);
8659         }
8660         return ret;
8661 #endif
8662 #if defined(TARGET_NR_readlinkat)
8663     case TARGET_NR_readlinkat:
8664         {
8665             void *p2;
8666             p  = lock_user_string(arg2);
8667             p2 = lock_user(VERIFY_WRITE, arg3, arg4, 0);
8668             if (!p || !p2) {
8669                 ret = -TARGET_EFAULT;
8670             } else if (is_proc_myself((const char *)p, "exe")) {
8671                 char real[PATH_MAX], *temp;
8672                 temp = realpath(exec_path, real);
8673                 ret = temp == NULL ? get_errno(-1) : strlen(real) ;
8674                 snprintf((char *)p2, arg4, "%s", real);
8675             } else {
8676                 ret = get_errno(readlinkat(arg1, path(p), p2, arg4));
8677             }
8678             unlock_user(p2, arg3, ret);
8679             unlock_user(p, arg2, 0);
8680         }
8681         return ret;
8682 #endif
8683 #ifdef TARGET_NR_swapon
8684     case TARGET_NR_swapon:
8685         if (!(p = lock_user_string(arg1)))
8686             return -TARGET_EFAULT;
8687         ret = get_errno(swapon(p, arg2));
8688         unlock_user(p, arg1, 0);
8689         return ret;
8690 #endif
8691     case TARGET_NR_reboot:
8692         if (arg3 == LINUX_REBOOT_CMD_RESTART2) {
8693            /* arg4 must be ignored in all other cases */
8694            p = lock_user_string(arg4);
8695            if (!p) {
8696                return -TARGET_EFAULT;
8697            }
8698            ret = get_errno(reboot(arg1, arg2, arg3, p));
8699            unlock_user(p, arg4, 0);
8700         } else {
8701            ret = get_errno(reboot(arg1, arg2, arg3, NULL));
8702         }
8703         return ret;
8704 #ifdef TARGET_NR_mmap
8705     case TARGET_NR_mmap:
8706 #if (defined(TARGET_I386) && defined(TARGET_ABI32)) || \
8707     (defined(TARGET_ARM) && defined(TARGET_ABI32)) || \
8708     defined(TARGET_M68K) || defined(TARGET_CRIS) || defined(TARGET_MICROBLAZE) \
8709     || defined(TARGET_S390X)
8710         {
8711             abi_ulong *v;
8712             abi_ulong v1, v2, v3, v4, v5, v6;
8713             if (!(v = lock_user(VERIFY_READ, arg1, 6 * sizeof(abi_ulong), 1)))
8714                 return -TARGET_EFAULT;
8715             v1 = tswapal(v[0]);
8716             v2 = tswapal(v[1]);
8717             v3 = tswapal(v[2]);
8718             v4 = tswapal(v[3]);
8719             v5 = tswapal(v[4]);
8720             v6 = tswapal(v[5]);
8721             unlock_user(v, arg1, 0);
8722             ret = get_errno(target_mmap(v1, v2, v3,
8723                                         target_to_host_bitmask(v4, mmap_flags_tbl),
8724                                         v5, v6));
8725         }
8726 #else
8727         ret = get_errno(target_mmap(arg1, arg2, arg3,
8728                                     target_to_host_bitmask(arg4, mmap_flags_tbl),
8729                                     arg5,
8730                                     arg6));
8731 #endif
8732         return ret;
8733 #endif
8734 #ifdef TARGET_NR_mmap2
8735     case TARGET_NR_mmap2:
8736 #ifndef MMAP_SHIFT
8737 #define MMAP_SHIFT 12
8738 #endif
8739         ret = target_mmap(arg1, arg2, arg3,
8740                           target_to_host_bitmask(arg4, mmap_flags_tbl),
8741                           arg5, arg6 << MMAP_SHIFT);
8742         return get_errno(ret);
8743 #endif
8744     case TARGET_NR_munmap:
8745         return get_errno(target_munmap(arg1, arg2));
8746     case TARGET_NR_mprotect:
8747         {
8748             TaskState *ts = cpu->opaque;
8749             /* Special hack to detect libc making the stack executable.  */
8750             if ((arg3 & PROT_GROWSDOWN)
8751                 && arg1 >= ts->info->stack_limit
8752                 && arg1 <= ts->info->start_stack) {
8753                 arg3 &= ~PROT_GROWSDOWN;
8754                 arg2 = arg2 + arg1 - ts->info->stack_limit;
8755                 arg1 = ts->info->stack_limit;
8756             }
8757         }
8758         return get_errno(target_mprotect(arg1, arg2, arg3));
8759 #ifdef TARGET_NR_mremap
8760     case TARGET_NR_mremap:
8761         return get_errno(target_mremap(arg1, arg2, arg3, arg4, arg5));
8762 #endif
8763         /* ??? msync/mlock/munlock are broken for softmmu.  */
8764 #ifdef TARGET_NR_msync
8765     case TARGET_NR_msync:
8766         return get_errno(msync(g2h(arg1), arg2, arg3));
8767 #endif
8768 #ifdef TARGET_NR_mlock
8769     case TARGET_NR_mlock:
8770         return get_errno(mlock(g2h(arg1), arg2));
8771 #endif
8772 #ifdef TARGET_NR_munlock
8773     case TARGET_NR_munlock:
8774         return get_errno(munlock(g2h(arg1), arg2));
8775 #endif
8776 #ifdef TARGET_NR_mlockall
8777     case TARGET_NR_mlockall:
8778         return get_errno(mlockall(target_to_host_mlockall_arg(arg1)));
8779 #endif
8780 #ifdef TARGET_NR_munlockall
8781     case TARGET_NR_munlockall:
8782         return get_errno(munlockall());
8783 #endif
8784 #ifdef TARGET_NR_truncate
8785     case TARGET_NR_truncate:
8786         if (!(p = lock_user_string(arg1)))
8787             return -TARGET_EFAULT;
8788         ret = get_errno(truncate(p, arg2));
8789         unlock_user(p, arg1, 0);
8790         return ret;
8791 #endif
8792 #ifdef TARGET_NR_ftruncate
8793     case TARGET_NR_ftruncate:
8794         return get_errno(ftruncate(arg1, arg2));
8795 #endif
8796     case TARGET_NR_fchmod:
8797         return get_errno(fchmod(arg1, arg2));
8798 #if defined(TARGET_NR_fchmodat)
8799     case TARGET_NR_fchmodat:
8800         if (!(p = lock_user_string(arg2)))
8801             return -TARGET_EFAULT;
8802         ret = get_errno(fchmodat(arg1, p, arg3, 0));
8803         unlock_user(p, arg2, 0);
8804         return ret;
8805 #endif
8806     case TARGET_NR_getpriority:
8807         /* Note that negative values are valid for getpriority, so we must
8808            differentiate based on errno settings.  */
8809         errno = 0;
8810         ret = getpriority(arg1, arg2);
8811         if (ret == -1 && errno != 0) {
8812             return -host_to_target_errno(errno);
8813         }
8814 #ifdef TARGET_ALPHA
8815         /* Return value is the unbiased priority.  Signal no error.  */
8816         ((CPUAlphaState *)cpu_env)->ir[IR_V0] = 0;
8817 #else
8818         /* Return value is a biased priority to avoid negative numbers.  */
8819         ret = 20 - ret;
8820 #endif
8821         return ret;
8822     case TARGET_NR_setpriority:
8823         return get_errno(setpriority(arg1, arg2, arg3));
8824 #ifdef TARGET_NR_statfs
8825     case TARGET_NR_statfs:
8826         if (!(p = lock_user_string(arg1))) {
8827             return -TARGET_EFAULT;
8828         }
8829         ret = get_errno(statfs(path(p), &stfs));
8830         unlock_user(p, arg1, 0);
8831     convert_statfs:
8832         if (!is_error(ret)) {
8833             struct target_statfs *target_stfs;
8834 
8835             if (!lock_user_struct(VERIFY_WRITE, target_stfs, arg2, 0))
8836                 return -TARGET_EFAULT;
8837             __put_user(stfs.f_type, &target_stfs->f_type);
8838             __put_user(stfs.f_bsize, &target_stfs->f_bsize);
8839             __put_user(stfs.f_blocks, &target_stfs->f_blocks);
8840             __put_user(stfs.f_bfree, &target_stfs->f_bfree);
8841             __put_user(stfs.f_bavail, &target_stfs->f_bavail);
8842             __put_user(stfs.f_files, &target_stfs->f_files);
8843             __put_user(stfs.f_ffree, &target_stfs->f_ffree);
8844             __put_user(stfs.f_fsid.__val[0], &target_stfs->f_fsid.val[0]);
8845             __put_user(stfs.f_fsid.__val[1], &target_stfs->f_fsid.val[1]);
8846             __put_user(stfs.f_namelen, &target_stfs->f_namelen);
8847             __put_user(stfs.f_frsize, &target_stfs->f_frsize);
8848 #ifdef _STATFS_F_FLAGS
8849             __put_user(stfs.f_flags, &target_stfs->f_flags);
8850 #else
8851             __put_user(0, &target_stfs->f_flags);
8852 #endif
8853             memset(target_stfs->f_spare, 0, sizeof(target_stfs->f_spare));
8854             unlock_user_struct(target_stfs, arg2, 1);
8855         }
8856         return ret;
8857 #endif
8858 #ifdef TARGET_NR_fstatfs
8859     case TARGET_NR_fstatfs:
8860         ret = get_errno(fstatfs(arg1, &stfs));
8861         goto convert_statfs;
8862 #endif
8863 #ifdef TARGET_NR_statfs64
8864     case TARGET_NR_statfs64:
8865         if (!(p = lock_user_string(arg1))) {
8866             return -TARGET_EFAULT;
8867         }
8868         ret = get_errno(statfs(path(p), &stfs));
8869         unlock_user(p, arg1, 0);
8870     convert_statfs64:
8871         if (!is_error(ret)) {
8872             struct target_statfs64 *target_stfs;
8873 
8874             if (!lock_user_struct(VERIFY_WRITE, target_stfs, arg3, 0))
8875                 return -TARGET_EFAULT;
8876             __put_user(stfs.f_type, &target_stfs->f_type);
8877             __put_user(stfs.f_bsize, &target_stfs->f_bsize);
8878             __put_user(stfs.f_blocks, &target_stfs->f_blocks);
8879             __put_user(stfs.f_bfree, &target_stfs->f_bfree);
8880             __put_user(stfs.f_bavail, &target_stfs->f_bavail);
8881             __put_user(stfs.f_files, &target_stfs->f_files);
8882             __put_user(stfs.f_ffree, &target_stfs->f_ffree);
8883             __put_user(stfs.f_fsid.__val[0], &target_stfs->f_fsid.val[0]);
8884             __put_user(stfs.f_fsid.__val[1], &target_stfs->f_fsid.val[1]);
8885             __put_user(stfs.f_namelen, &target_stfs->f_namelen);
8886             __put_user(stfs.f_frsize, &target_stfs->f_frsize);
8887             memset(target_stfs->f_spare, 0, sizeof(target_stfs->f_spare));
8888             unlock_user_struct(target_stfs, arg3, 1);
8889         }
8890         return ret;
8891     case TARGET_NR_fstatfs64:
8892         ret = get_errno(fstatfs(arg1, &stfs));
8893         goto convert_statfs64;
8894 #endif
8895 #ifdef TARGET_NR_socketcall
8896     case TARGET_NR_socketcall:
8897         return do_socketcall(arg1, arg2);
8898 #endif
8899 #ifdef TARGET_NR_accept
8900     case TARGET_NR_accept:
8901         return do_accept4(arg1, arg2, arg3, 0);
8902 #endif
8903 #ifdef TARGET_NR_accept4
8904     case TARGET_NR_accept4:
8905         return do_accept4(arg1, arg2, arg3, arg4);
8906 #endif
8907 #ifdef TARGET_NR_bind
8908     case TARGET_NR_bind:
8909         return do_bind(arg1, arg2, arg3);
8910 #endif
8911 #ifdef TARGET_NR_connect
8912     case TARGET_NR_connect:
8913         return do_connect(arg1, arg2, arg3);
8914 #endif
8915 #ifdef TARGET_NR_getpeername
8916     case TARGET_NR_getpeername:
8917         return do_getpeername(arg1, arg2, arg3);
8918 #endif
8919 #ifdef TARGET_NR_getsockname
8920     case TARGET_NR_getsockname:
8921         return do_getsockname(arg1, arg2, arg3);
8922 #endif
8923 #ifdef TARGET_NR_getsockopt
8924     case TARGET_NR_getsockopt:
8925         return do_getsockopt(arg1, arg2, arg3, arg4, arg5);
8926 #endif
8927 #ifdef TARGET_NR_listen
8928     case TARGET_NR_listen:
8929         return get_errno(listen(arg1, arg2));
8930 #endif
8931 #ifdef TARGET_NR_recv
8932     case TARGET_NR_recv:
8933         return do_recvfrom(arg1, arg2, arg3, arg4, 0, 0);
8934 #endif
8935 #ifdef TARGET_NR_recvfrom
8936     case TARGET_NR_recvfrom:
8937         return do_recvfrom(arg1, arg2, arg3, arg4, arg5, arg6);
8938 #endif
8939 #ifdef TARGET_NR_recvmsg
8940     case TARGET_NR_recvmsg:
8941         return do_sendrecvmsg(arg1, arg2, arg3, 0);
8942 #endif
8943 #ifdef TARGET_NR_send
8944     case TARGET_NR_send:
8945         return do_sendto(arg1, arg2, arg3, arg4, 0, 0);
8946 #endif
8947 #ifdef TARGET_NR_sendmsg
8948     case TARGET_NR_sendmsg:
8949         return do_sendrecvmsg(arg1, arg2, arg3, 1);
8950 #endif
8951 #ifdef TARGET_NR_sendmmsg
8952     case TARGET_NR_sendmmsg:
8953         return do_sendrecvmmsg(arg1, arg2, arg3, arg4, 1);
8954     case TARGET_NR_recvmmsg:
8955         return do_sendrecvmmsg(arg1, arg2, arg3, arg4, 0);
8956 #endif
8957 #ifdef TARGET_NR_sendto
8958     case TARGET_NR_sendto:
8959         return do_sendto(arg1, arg2, arg3, arg4, arg5, arg6);
8960 #endif
8961 #ifdef TARGET_NR_shutdown
8962     case TARGET_NR_shutdown:
8963         return get_errno(shutdown(arg1, arg2));
8964 #endif
8965 #if defined(TARGET_NR_getrandom) && defined(__NR_getrandom)
8966     case TARGET_NR_getrandom:
8967         p = lock_user(VERIFY_WRITE, arg1, arg2, 0);
8968         if (!p) {
8969             return -TARGET_EFAULT;
8970         }
8971         ret = get_errno(getrandom(p, arg2, arg3));
8972         unlock_user(p, arg1, ret);
8973         return ret;
8974 #endif
8975 #ifdef TARGET_NR_socket
8976     case TARGET_NR_socket:
8977         return do_socket(arg1, arg2, arg3);
8978 #endif
8979 #ifdef TARGET_NR_socketpair
8980     case TARGET_NR_socketpair:
8981         return do_socketpair(arg1, arg2, arg3, arg4);
8982 #endif
8983 #ifdef TARGET_NR_setsockopt
8984     case TARGET_NR_setsockopt:
8985         return do_setsockopt(arg1, arg2, arg3, arg4, (socklen_t) arg5);
8986 #endif
8987 #if defined(TARGET_NR_syslog)
8988     case TARGET_NR_syslog:
8989         {
8990             int len = arg2;
8991 
8992             switch (arg1) {
8993             case TARGET_SYSLOG_ACTION_CLOSE:         /* Close log */
8994             case TARGET_SYSLOG_ACTION_OPEN:          /* Open log */
8995             case TARGET_SYSLOG_ACTION_CLEAR:         /* Clear ring buffer */
8996             case TARGET_SYSLOG_ACTION_CONSOLE_OFF:   /* Disable logging */
8997             case TARGET_SYSLOG_ACTION_CONSOLE_ON:    /* Enable logging */
8998             case TARGET_SYSLOG_ACTION_CONSOLE_LEVEL: /* Set messages level */
8999             case TARGET_SYSLOG_ACTION_SIZE_UNREAD:   /* Number of chars */
9000             case TARGET_SYSLOG_ACTION_SIZE_BUFFER:   /* Size of the buffer */
9001                 return get_errno(sys_syslog((int)arg1, NULL, (int)arg3));
9002             case TARGET_SYSLOG_ACTION_READ:          /* Read from log */
9003             case TARGET_SYSLOG_ACTION_READ_CLEAR:    /* Read/clear msgs */
9004             case TARGET_SYSLOG_ACTION_READ_ALL:      /* Read last messages */
9005                 {
9006                     if (len < 0) {
9007                         return -TARGET_EINVAL;
9008                     }
9009                     if (len == 0) {
9010                         return 0;
9011                     }
9012                     p = lock_user(VERIFY_WRITE, arg2, arg3, 0);
9013                     if (!p) {
9014                         return -TARGET_EFAULT;
9015                     }
9016                     ret = get_errno(sys_syslog((int)arg1, p, (int)arg3));
9017                     unlock_user(p, arg2, arg3);
9018                 }
9019                 return ret;
9020             default:
9021                 return -TARGET_EINVAL;
9022             }
9023         }
9024         break;
9025 #endif
9026     case TARGET_NR_setitimer:
9027         {
9028             struct itimerval value, ovalue, *pvalue;
9029 
9030             if (arg2) {
9031                 pvalue = &value;
9032                 if (copy_from_user_timeval(&pvalue->it_interval, arg2)
9033                     || copy_from_user_timeval(&pvalue->it_value,
9034                                               arg2 + sizeof(struct target_timeval)))
9035                     return -TARGET_EFAULT;
9036             } else {
9037                 pvalue = NULL;
9038             }
9039             ret = get_errno(setitimer(arg1, pvalue, &ovalue));
9040             if (!is_error(ret) && arg3) {
9041                 if (copy_to_user_timeval(arg3,
9042                                          &ovalue.it_interval)
9043                     || copy_to_user_timeval(arg3 + sizeof(struct target_timeval),
9044                                             &ovalue.it_value))
9045                     return -TARGET_EFAULT;
9046             }
9047         }
9048         return ret;
9049     case TARGET_NR_getitimer:
9050         {
9051             struct itimerval value;
9052 
9053             ret = get_errno(getitimer(arg1, &value));
9054             if (!is_error(ret) && arg2) {
9055                 if (copy_to_user_timeval(arg2,
9056                                          &value.it_interval)
9057                     || copy_to_user_timeval(arg2 + sizeof(struct target_timeval),
9058                                             &value.it_value))
9059                     return -TARGET_EFAULT;
9060             }
9061         }
9062         return ret;
9063 #ifdef TARGET_NR_stat
9064     case TARGET_NR_stat:
9065         if (!(p = lock_user_string(arg1))) {
9066             return -TARGET_EFAULT;
9067         }
9068         ret = get_errno(stat(path(p), &st));
9069         unlock_user(p, arg1, 0);
9070         goto do_stat;
9071 #endif
9072 #ifdef TARGET_NR_lstat
9073     case TARGET_NR_lstat:
9074         if (!(p = lock_user_string(arg1))) {
9075             return -TARGET_EFAULT;
9076         }
9077         ret = get_errno(lstat(path(p), &st));
9078         unlock_user(p, arg1, 0);
9079         goto do_stat;
9080 #endif
9081 #ifdef TARGET_NR_fstat
9082     case TARGET_NR_fstat:
9083         {
9084             ret = get_errno(fstat(arg1, &st));
9085 #if defined(TARGET_NR_stat) || defined(TARGET_NR_lstat)
9086         do_stat:
9087 #endif
9088             if (!is_error(ret)) {
9089                 struct target_stat *target_st;
9090 
9091                 if (!lock_user_struct(VERIFY_WRITE, target_st, arg2, 0))
9092                     return -TARGET_EFAULT;
9093                 memset(target_st, 0, sizeof(*target_st));
9094                 __put_user(st.st_dev, &target_st->st_dev);
9095                 __put_user(st.st_ino, &target_st->st_ino);
9096                 __put_user(st.st_mode, &target_st->st_mode);
9097                 __put_user(st.st_uid, &target_st->st_uid);
9098                 __put_user(st.st_gid, &target_st->st_gid);
9099                 __put_user(st.st_nlink, &target_st->st_nlink);
9100                 __put_user(st.st_rdev, &target_st->st_rdev);
9101                 __put_user(st.st_size, &target_st->st_size);
9102                 __put_user(st.st_blksize, &target_st->st_blksize);
9103                 __put_user(st.st_blocks, &target_st->st_blocks);
9104                 __put_user(st.st_atime, &target_st->target_st_atime);
9105                 __put_user(st.st_mtime, &target_st->target_st_mtime);
9106                 __put_user(st.st_ctime, &target_st->target_st_ctime);
9107 #if (_POSIX_C_SOURCE >= 200809L || _XOPEN_SOURCE >= 700) && \
9108     defined(TARGET_STAT_HAVE_NSEC)
9109                 __put_user(st.st_atim.tv_nsec,
9110                            &target_st->target_st_atime_nsec);
9111                 __put_user(st.st_mtim.tv_nsec,
9112                            &target_st->target_st_mtime_nsec);
9113                 __put_user(st.st_ctim.tv_nsec,
9114                            &target_st->target_st_ctime_nsec);
9115 #endif
9116                 unlock_user_struct(target_st, arg2, 1);
9117             }
9118         }
9119         return ret;
9120 #endif
9121     case TARGET_NR_vhangup:
9122         return get_errno(vhangup());
9123 #ifdef TARGET_NR_syscall
9124     case TARGET_NR_syscall:
9125         return do_syscall(cpu_env, arg1 & 0xffff, arg2, arg3, arg4, arg5,
9126                           arg6, arg7, arg8, 0);
9127 #endif
9128     case TARGET_NR_wait4:
9129         {
9130             int status;
9131             abi_long status_ptr = arg2;
9132             struct rusage rusage, *rusage_ptr;
9133             abi_ulong target_rusage = arg4;
9134             abi_long rusage_err;
9135             if (target_rusage)
9136                 rusage_ptr = &rusage;
9137             else
9138                 rusage_ptr = NULL;
9139             ret = get_errno(safe_wait4(arg1, &status, arg3, rusage_ptr));
9140             if (!is_error(ret)) {
9141                 if (status_ptr && ret) {
9142                     status = host_to_target_waitstatus(status);
9143                     if (put_user_s32(status, status_ptr))
9144                         return -TARGET_EFAULT;
9145                 }
9146                 if (target_rusage) {
9147                     rusage_err = host_to_target_rusage(target_rusage, &rusage);
9148                     if (rusage_err) {
9149                         ret = rusage_err;
9150                     }
9151                 }
9152             }
9153         }
9154         return ret;
9155 #ifdef TARGET_NR_swapoff
9156     case TARGET_NR_swapoff:
9157         if (!(p = lock_user_string(arg1)))
9158             return -TARGET_EFAULT;
9159         ret = get_errno(swapoff(p));
9160         unlock_user(p, arg1, 0);
9161         return ret;
9162 #endif
9163     case TARGET_NR_sysinfo:
9164         {
9165             struct target_sysinfo *target_value;
9166             struct sysinfo value;
9167             ret = get_errno(sysinfo(&value));
9168             if (!is_error(ret) && arg1)
9169             {
9170                 if (!lock_user_struct(VERIFY_WRITE, target_value, arg1, 0))
9171                     return -TARGET_EFAULT;
9172                 __put_user(value.uptime, &target_value->uptime);
9173                 __put_user(value.loads[0], &target_value->loads[0]);
9174                 __put_user(value.loads[1], &target_value->loads[1]);
9175                 __put_user(value.loads[2], &target_value->loads[2]);
9176                 __put_user(value.totalram, &target_value->totalram);
9177                 __put_user(value.freeram, &target_value->freeram);
9178                 __put_user(value.sharedram, &target_value->sharedram);
9179                 __put_user(value.bufferram, &target_value->bufferram);
9180                 __put_user(value.totalswap, &target_value->totalswap);
9181                 __put_user(value.freeswap, &target_value->freeswap);
9182                 __put_user(value.procs, &target_value->procs);
9183                 __put_user(value.totalhigh, &target_value->totalhigh);
9184                 __put_user(value.freehigh, &target_value->freehigh);
9185                 __put_user(value.mem_unit, &target_value->mem_unit);
9186                 unlock_user_struct(target_value, arg1, 1);
9187             }
9188         }
9189         return ret;
9190 #ifdef TARGET_NR_ipc
9191     case TARGET_NR_ipc:
9192         return do_ipc(cpu_env, arg1, arg2, arg3, arg4, arg5, arg6);
9193 #endif
9194 #ifdef TARGET_NR_semget
9195     case TARGET_NR_semget:
9196         return get_errno(semget(arg1, arg2, arg3));
9197 #endif
9198 #ifdef TARGET_NR_semop
9199     case TARGET_NR_semop:
9200         return do_semop(arg1, arg2, arg3);
9201 #endif
9202 #ifdef TARGET_NR_semctl
9203     case TARGET_NR_semctl:
9204         return do_semctl(arg1, arg2, arg3, arg4);
9205 #endif
9206 #ifdef TARGET_NR_msgctl
9207     case TARGET_NR_msgctl:
9208         return do_msgctl(arg1, arg2, arg3);
9209 #endif
9210 #ifdef TARGET_NR_msgget
9211     case TARGET_NR_msgget:
9212         return get_errno(msgget(arg1, arg2));
9213 #endif
9214 #ifdef TARGET_NR_msgrcv
9215     case TARGET_NR_msgrcv:
9216         return do_msgrcv(arg1, arg2, arg3, arg4, arg5);
9217 #endif
9218 #ifdef TARGET_NR_msgsnd
9219     case TARGET_NR_msgsnd:
9220         return do_msgsnd(arg1, arg2, arg3, arg4);
9221 #endif
9222 #ifdef TARGET_NR_shmget
9223     case TARGET_NR_shmget:
9224         return get_errno(shmget(arg1, arg2, arg3));
9225 #endif
9226 #ifdef TARGET_NR_shmctl
9227     case TARGET_NR_shmctl:
9228         return do_shmctl(arg1, arg2, arg3);
9229 #endif
9230 #ifdef TARGET_NR_shmat
9231     case TARGET_NR_shmat:
9232         return do_shmat(cpu_env, arg1, arg2, arg3);
9233 #endif
9234 #ifdef TARGET_NR_shmdt
9235     case TARGET_NR_shmdt:
9236         return do_shmdt(arg1);
9237 #endif
9238     case TARGET_NR_fsync:
9239         return get_errno(fsync(arg1));
9240     case TARGET_NR_clone:
9241         /* Linux manages to have three different orderings for its
9242          * arguments to clone(); the BACKWARDS and BACKWARDS2 defines
9243          * match the kernel's CONFIG_CLONE_* settings.
9244          * Microblaze is further special in that it uses a sixth
9245          * implicit argument to clone for the TLS pointer.
9246          */
9247 #if defined(TARGET_MICROBLAZE)
9248         ret = get_errno(do_fork(cpu_env, arg1, arg2, arg4, arg6, arg5));
9249 #elif defined(TARGET_CLONE_BACKWARDS)
9250         ret = get_errno(do_fork(cpu_env, arg1, arg2, arg3, arg4, arg5));
9251 #elif defined(TARGET_CLONE_BACKWARDS2)
9252         ret = get_errno(do_fork(cpu_env, arg2, arg1, arg3, arg5, arg4));
9253 #else
9254         ret = get_errno(do_fork(cpu_env, arg1, arg2, arg3, arg5, arg4));
9255 #endif
9256         return ret;
9257 #ifdef __NR_exit_group
9258         /* new thread calls */
9259     case TARGET_NR_exit_group:
9260         preexit_cleanup(cpu_env, arg1);
9261         return get_errno(exit_group(arg1));
9262 #endif
9263     case TARGET_NR_setdomainname:
9264         if (!(p = lock_user_string(arg1)))
9265             return -TARGET_EFAULT;
9266         ret = get_errno(setdomainname(p, arg2));
9267         unlock_user(p, arg1, 0);
9268         return ret;
9269     case TARGET_NR_uname:
9270         /* no need to transcode because we use the linux syscall */
9271         {
9272             struct new_utsname * buf;
9273 
9274             if (!lock_user_struct(VERIFY_WRITE, buf, arg1, 0))
9275                 return -TARGET_EFAULT;
9276             ret = get_errno(sys_uname(buf));
9277             if (!is_error(ret)) {
9278                 /* Overwrite the native machine name with whatever is being
9279                    emulated. */
9280                 g_strlcpy(buf->machine, cpu_to_uname_machine(cpu_env),
9281                           sizeof(buf->machine));
9282                 /* Allow the user to override the reported release.  */
9283                 if (qemu_uname_release && *qemu_uname_release) {
9284                     g_strlcpy(buf->release, qemu_uname_release,
9285                               sizeof(buf->release));
9286                 }
9287             }
9288             unlock_user_struct(buf, arg1, 1);
9289         }
9290         return ret;
9291 #ifdef TARGET_I386
9292     case TARGET_NR_modify_ldt:
9293         return do_modify_ldt(cpu_env, arg1, arg2, arg3);
9294 #if !defined(TARGET_X86_64)
9295     case TARGET_NR_vm86:
9296         return do_vm86(cpu_env, arg1, arg2);
9297 #endif
9298 #endif
9299     case TARGET_NR_adjtimex:
9300         {
9301             struct timex host_buf;
9302 
9303             if (target_to_host_timex(&host_buf, arg1) != 0) {
9304                 return -TARGET_EFAULT;
9305             }
9306             ret = get_errno(adjtimex(&host_buf));
9307             if (!is_error(ret)) {
9308                 if (host_to_target_timex(arg1, &host_buf) != 0) {
9309                     return -TARGET_EFAULT;
9310                 }
9311             }
9312         }
9313         return ret;
9314 #if defined(TARGET_NR_clock_adjtime) && defined(CONFIG_CLOCK_ADJTIME)
9315     case TARGET_NR_clock_adjtime:
9316         {
9317             struct timex htx, *phtx = &htx;
9318 
9319             if (target_to_host_timex(phtx, arg2) != 0) {
9320                 return -TARGET_EFAULT;
9321             }
9322             ret = get_errno(clock_adjtime(arg1, phtx));
9323             if (!is_error(ret) && phtx) {
9324                 if (host_to_target_timex(arg2, phtx) != 0) {
9325                     return -TARGET_EFAULT;
9326                 }
9327             }
9328         }
9329         return ret;
9330 #endif
9331     case TARGET_NR_getpgid:
9332         return get_errno(getpgid(arg1));
9333     case TARGET_NR_fchdir:
9334         return get_errno(fchdir(arg1));
9335     case TARGET_NR_personality:
9336         return get_errno(personality(arg1));
9337 #ifdef TARGET_NR__llseek /* Not on alpha */
9338     case TARGET_NR__llseek:
9339         {
9340             int64_t res;
9341 #if !defined(__NR_llseek)
9342             res = lseek(arg1, ((uint64_t)arg2 << 32) | (abi_ulong)arg3, arg5);
9343             if (res == -1) {
9344                 ret = get_errno(res);
9345             } else {
9346                 ret = 0;
9347             }
9348 #else
9349             ret = get_errno(_llseek(arg1, arg2, arg3, &res, arg5));
9350 #endif
9351             if ((ret == 0) && put_user_s64(res, arg4)) {
9352                 return -TARGET_EFAULT;
9353             }
9354         }
9355         return ret;
9356 #endif
9357 #ifdef TARGET_NR_getdents
9358     case TARGET_NR_getdents:
9359 #ifdef EMULATE_GETDENTS_WITH_GETDENTS
9360 #if TARGET_ABI_BITS == 32 && HOST_LONG_BITS == 64
9361         {
9362             struct target_dirent *target_dirp;
9363             struct linux_dirent *dirp;
9364             abi_long count = arg3;
9365 
9366             dirp = g_try_malloc(count);
9367             if (!dirp) {
9368                 return -TARGET_ENOMEM;
9369             }
9370 
9371             ret = get_errno(sys_getdents(arg1, dirp, count));
9372             if (!is_error(ret)) {
9373                 struct linux_dirent *de;
9374 		struct target_dirent *tde;
9375                 int len = ret;
9376                 int reclen, treclen;
9377 		int count1, tnamelen;
9378 
9379 		count1 = 0;
9380                 de = dirp;
9381                 if (!(target_dirp = lock_user(VERIFY_WRITE, arg2, count, 0)))
9382                     return -TARGET_EFAULT;
9383 		tde = target_dirp;
9384                 while (len > 0) {
9385                     reclen = de->d_reclen;
9386                     tnamelen = reclen - offsetof(struct linux_dirent, d_name);
9387                     assert(tnamelen >= 0);
9388                     treclen = tnamelen + offsetof(struct target_dirent, d_name);
9389                     assert(count1 + treclen <= count);
9390                     tde->d_reclen = tswap16(treclen);
9391                     tde->d_ino = tswapal(de->d_ino);
9392                     tde->d_off = tswapal(de->d_off);
9393                     memcpy(tde->d_name, de->d_name, tnamelen);
9394                     de = (struct linux_dirent *)((char *)de + reclen);
9395                     len -= reclen;
9396                     tde = (struct target_dirent *)((char *)tde + treclen);
9397 		    count1 += treclen;
9398                 }
9399 		ret = count1;
9400                 unlock_user(target_dirp, arg2, ret);
9401             }
9402             g_free(dirp);
9403         }
9404 #else
9405         {
9406             struct linux_dirent *dirp;
9407             abi_long count = arg3;
9408 
9409             if (!(dirp = lock_user(VERIFY_WRITE, arg2, count, 0)))
9410                 return -TARGET_EFAULT;
9411             ret = get_errno(sys_getdents(arg1, dirp, count));
9412             if (!is_error(ret)) {
9413                 struct linux_dirent *de;
9414                 int len = ret;
9415                 int reclen;
9416                 de = dirp;
9417                 while (len > 0) {
9418                     reclen = de->d_reclen;
9419                     if (reclen > len)
9420                         break;
9421                     de->d_reclen = tswap16(reclen);
9422                     tswapls(&de->d_ino);
9423                     tswapls(&de->d_off);
9424                     de = (struct linux_dirent *)((char *)de + reclen);
9425                     len -= reclen;
9426                 }
9427             }
9428             unlock_user(dirp, arg2, ret);
9429         }
9430 #endif
9431 #else
9432         /* Implement getdents in terms of getdents64 */
9433         {
9434             struct linux_dirent64 *dirp;
9435             abi_long count = arg3;
9436 
9437             dirp = lock_user(VERIFY_WRITE, arg2, count, 0);
9438             if (!dirp) {
9439                 return -TARGET_EFAULT;
9440             }
9441             ret = get_errno(sys_getdents64(arg1, dirp, count));
9442             if (!is_error(ret)) {
9443                 /* Convert the dirent64 structs to target dirent.  We do this
9444                  * in-place, since we can guarantee that a target_dirent is no
9445                  * larger than a dirent64; however this means we have to be
9446                  * careful to read everything before writing in the new format.
9447                  */
9448                 struct linux_dirent64 *de;
9449                 struct target_dirent *tde;
9450                 int len = ret;
9451                 int tlen = 0;
9452 
9453                 de = dirp;
9454                 tde = (struct target_dirent *)dirp;
9455                 while (len > 0) {
9456                     int namelen, treclen;
9457                     int reclen = de->d_reclen;
9458                     uint64_t ino = de->d_ino;
9459                     int64_t off = de->d_off;
9460                     uint8_t type = de->d_type;
9461 
9462                     namelen = strlen(de->d_name);
9463                     treclen = offsetof(struct target_dirent, d_name)
9464                         + namelen + 2;
9465                     treclen = QEMU_ALIGN_UP(treclen, sizeof(abi_long));
9466 
9467                     memmove(tde->d_name, de->d_name, namelen + 1);
9468                     tde->d_ino = tswapal(ino);
9469                     tde->d_off = tswapal(off);
9470                     tde->d_reclen = tswap16(treclen);
9471                     /* The target_dirent type is in what was formerly a padding
9472                      * byte at the end of the structure:
9473                      */
9474                     *(((char *)tde) + treclen - 1) = type;
9475 
9476                     de = (struct linux_dirent64 *)((char *)de + reclen);
9477                     tde = (struct target_dirent *)((char *)tde + treclen);
9478                     len -= reclen;
9479                     tlen += treclen;
9480                 }
9481                 ret = tlen;
9482             }
9483             unlock_user(dirp, arg2, ret);
9484         }
9485 #endif
9486         return ret;
9487 #endif /* TARGET_NR_getdents */
9488 #if defined(TARGET_NR_getdents64) && defined(__NR_getdents64)
9489     case TARGET_NR_getdents64:
9490         {
9491             struct linux_dirent64 *dirp;
9492             abi_long count = arg3;
9493             if (!(dirp = lock_user(VERIFY_WRITE, arg2, count, 0)))
9494                 return -TARGET_EFAULT;
9495             ret = get_errno(sys_getdents64(arg1, dirp, count));
9496             if (!is_error(ret)) {
9497                 struct linux_dirent64 *de;
9498                 int len = ret;
9499                 int reclen;
9500                 de = dirp;
9501                 while (len > 0) {
9502                     reclen = de->d_reclen;
9503                     if (reclen > len)
9504                         break;
9505                     de->d_reclen = tswap16(reclen);
9506                     tswap64s((uint64_t *)&de->d_ino);
9507                     tswap64s((uint64_t *)&de->d_off);
9508                     de = (struct linux_dirent64 *)((char *)de + reclen);
9509                     len -= reclen;
9510                 }
9511             }
9512             unlock_user(dirp, arg2, ret);
9513         }
9514         return ret;
9515 #endif /* TARGET_NR_getdents64 */
9516 #if defined(TARGET_NR__newselect)
9517     case TARGET_NR__newselect:
9518         return do_select(arg1, arg2, arg3, arg4, arg5);
9519 #endif
9520 #if defined(TARGET_NR_poll) || defined(TARGET_NR_ppoll)
9521 # ifdef TARGET_NR_poll
9522     case TARGET_NR_poll:
9523 # endif
9524 # ifdef TARGET_NR_ppoll
9525     case TARGET_NR_ppoll:
9526 # endif
9527         {
9528             struct target_pollfd *target_pfd;
9529             unsigned int nfds = arg2;
9530             struct pollfd *pfd;
9531             unsigned int i;
9532 
9533             pfd = NULL;
9534             target_pfd = NULL;
9535             if (nfds) {
9536                 if (nfds > (INT_MAX / sizeof(struct target_pollfd))) {
9537                     return -TARGET_EINVAL;
9538                 }
9539 
9540                 target_pfd = lock_user(VERIFY_WRITE, arg1,
9541                                        sizeof(struct target_pollfd) * nfds, 1);
9542                 if (!target_pfd) {
9543                     return -TARGET_EFAULT;
9544                 }
9545 
9546                 pfd = alloca(sizeof(struct pollfd) * nfds);
9547                 for (i = 0; i < nfds; i++) {
9548                     pfd[i].fd = tswap32(target_pfd[i].fd);
9549                     pfd[i].events = tswap16(target_pfd[i].events);
9550                 }
9551             }
9552 
9553             switch (num) {
9554 # ifdef TARGET_NR_ppoll
9555             case TARGET_NR_ppoll:
9556             {
9557                 struct timespec _timeout_ts, *timeout_ts = &_timeout_ts;
9558                 target_sigset_t *target_set;
9559                 sigset_t _set, *set = &_set;
9560 
9561                 if (arg3) {
9562                     if (target_to_host_timespec(timeout_ts, arg3)) {
9563                         unlock_user(target_pfd, arg1, 0);
9564                         return -TARGET_EFAULT;
9565                     }
9566                 } else {
9567                     timeout_ts = NULL;
9568                 }
9569 
9570                 if (arg4) {
9571                     if (arg5 != sizeof(target_sigset_t)) {
9572                         unlock_user(target_pfd, arg1, 0);
9573                         return -TARGET_EINVAL;
9574                     }
9575 
9576                     target_set = lock_user(VERIFY_READ, arg4, sizeof(target_sigset_t), 1);
9577                     if (!target_set) {
9578                         unlock_user(target_pfd, arg1, 0);
9579                         return -TARGET_EFAULT;
9580                     }
9581                     target_to_host_sigset(set, target_set);
9582                 } else {
9583                     set = NULL;
9584                 }
9585 
9586                 ret = get_errno(safe_ppoll(pfd, nfds, timeout_ts,
9587                                            set, SIGSET_T_SIZE));
9588 
9589                 if (!is_error(ret) && arg3) {
9590                     host_to_target_timespec(arg3, timeout_ts);
9591                 }
9592                 if (arg4) {
9593                     unlock_user(target_set, arg4, 0);
9594                 }
9595                 break;
9596             }
9597 # endif
9598 # ifdef TARGET_NR_poll
9599             case TARGET_NR_poll:
9600             {
9601                 struct timespec ts, *pts;
9602 
9603                 if (arg3 >= 0) {
9604                     /* Convert ms to secs, ns */
9605                     ts.tv_sec = arg3 / 1000;
9606                     ts.tv_nsec = (arg3 % 1000) * 1000000LL;
9607                     pts = &ts;
9608                 } else {
9609                     /* -ve poll() timeout means "infinite" */
9610                     pts = NULL;
9611                 }
9612                 ret = get_errno(safe_ppoll(pfd, nfds, pts, NULL, 0));
9613                 break;
9614             }
9615 # endif
9616             default:
9617                 g_assert_not_reached();
9618             }
9619 
9620             if (!is_error(ret)) {
9621                 for(i = 0; i < nfds; i++) {
9622                     target_pfd[i].revents = tswap16(pfd[i].revents);
9623                 }
9624             }
9625             unlock_user(target_pfd, arg1, sizeof(struct target_pollfd) * nfds);
9626         }
9627         return ret;
9628 #endif
9629     case TARGET_NR_flock:
9630         /* NOTE: the flock constant seems to be the same for every
9631            Linux platform */
9632         return get_errno(safe_flock(arg1, arg2));
9633     case TARGET_NR_readv:
9634         {
9635             struct iovec *vec = lock_iovec(VERIFY_WRITE, arg2, arg3, 0);
9636             if (vec != NULL) {
9637                 ret = get_errno(safe_readv(arg1, vec, arg3));
9638                 unlock_iovec(vec, arg2, arg3, 1);
9639             } else {
9640                 ret = -host_to_target_errno(errno);
9641             }
9642         }
9643         return ret;
9644     case TARGET_NR_writev:
9645         {
9646             struct iovec *vec = lock_iovec(VERIFY_READ, arg2, arg3, 1);
9647             if (vec != NULL) {
9648                 ret = get_errno(safe_writev(arg1, vec, arg3));
9649                 unlock_iovec(vec, arg2, arg3, 0);
9650             } else {
9651                 ret = -host_to_target_errno(errno);
9652             }
9653         }
9654         return ret;
9655 #if defined(TARGET_NR_preadv)
9656     case TARGET_NR_preadv:
9657         {
9658             struct iovec *vec = lock_iovec(VERIFY_WRITE, arg2, arg3, 0);
9659             if (vec != NULL) {
9660                 unsigned long low, high;
9661 
9662                 target_to_host_low_high(arg4, arg5, &low, &high);
9663                 ret = get_errno(safe_preadv(arg1, vec, arg3, low, high));
9664                 unlock_iovec(vec, arg2, arg3, 1);
9665             } else {
9666                 ret = -host_to_target_errno(errno);
9667            }
9668         }
9669         return ret;
9670 #endif
9671 #if defined(TARGET_NR_pwritev)
9672     case TARGET_NR_pwritev:
9673         {
9674             struct iovec *vec = lock_iovec(VERIFY_READ, arg2, arg3, 1);
9675             if (vec != NULL) {
9676                 unsigned long low, high;
9677 
9678                 target_to_host_low_high(arg4, arg5, &low, &high);
9679                 ret = get_errno(safe_pwritev(arg1, vec, arg3, low, high));
9680                 unlock_iovec(vec, arg2, arg3, 0);
9681             } else {
9682                 ret = -host_to_target_errno(errno);
9683            }
9684         }
9685         return ret;
9686 #endif
9687     case TARGET_NR_getsid:
9688         return get_errno(getsid(arg1));
9689 #if defined(TARGET_NR_fdatasync) /* Not on alpha (osf_datasync ?) */
9690     case TARGET_NR_fdatasync:
9691         return get_errno(fdatasync(arg1));
9692 #endif
9693 #ifdef TARGET_NR__sysctl
9694     case TARGET_NR__sysctl:
9695         /* We don't implement this, but ENOTDIR is always a safe
9696            return value. */
9697         return -TARGET_ENOTDIR;
9698 #endif
9699     case TARGET_NR_sched_getaffinity:
9700         {
9701             unsigned int mask_size;
9702             unsigned long *mask;
9703 
9704             /*
9705              * sched_getaffinity needs multiples of ulong, so need to take
9706              * care of mismatches between target ulong and host ulong sizes.
9707              */
9708             if (arg2 & (sizeof(abi_ulong) - 1)) {
9709                 return -TARGET_EINVAL;
9710             }
9711             mask_size = (arg2 + (sizeof(*mask) - 1)) & ~(sizeof(*mask) - 1);
9712 
9713             mask = alloca(mask_size);
9714             memset(mask, 0, mask_size);
9715             ret = get_errno(sys_sched_getaffinity(arg1, mask_size, mask));
9716 
9717             if (!is_error(ret)) {
9718                 if (ret > arg2) {
9719                     /* More data returned than the caller's buffer will fit.
9720                      * This only happens if sizeof(abi_long) < sizeof(long)
9721                      * and the caller passed us a buffer holding an odd number
9722                      * of abi_longs. If the host kernel is actually using the
9723                      * extra 4 bytes then fail EINVAL; otherwise we can just
9724                      * ignore them and only copy the interesting part.
9725                      */
9726                     int numcpus = sysconf(_SC_NPROCESSORS_CONF);
9727                     if (numcpus > arg2 * 8) {
9728                         return -TARGET_EINVAL;
9729                     }
9730                     ret = arg2;
9731                 }
9732 
9733                 if (host_to_target_cpu_mask(mask, mask_size, arg3, ret)) {
9734                     return -TARGET_EFAULT;
9735                 }
9736             }
9737         }
9738         return ret;
9739     case TARGET_NR_sched_setaffinity:
9740         {
9741             unsigned int mask_size;
9742             unsigned long *mask;
9743 
9744             /*
9745              * sched_setaffinity needs multiples of ulong, so need to take
9746              * care of mismatches between target ulong and host ulong sizes.
9747              */
9748             if (arg2 & (sizeof(abi_ulong) - 1)) {
9749                 return -TARGET_EINVAL;
9750             }
9751             mask_size = (arg2 + (sizeof(*mask) - 1)) & ~(sizeof(*mask) - 1);
9752             mask = alloca(mask_size);
9753 
9754             ret = target_to_host_cpu_mask(mask, mask_size, arg3, arg2);
9755             if (ret) {
9756                 return ret;
9757             }
9758 
9759             return get_errno(sys_sched_setaffinity(arg1, mask_size, mask));
9760         }
9761     case TARGET_NR_getcpu:
9762         {
9763             unsigned cpu, node;
9764             ret = get_errno(sys_getcpu(arg1 ? &cpu : NULL,
9765                                        arg2 ? &node : NULL,
9766                                        NULL));
9767             if (is_error(ret)) {
9768                 return ret;
9769             }
9770             if (arg1 && put_user_u32(cpu, arg1)) {
9771                 return -TARGET_EFAULT;
9772             }
9773             if (arg2 && put_user_u32(node, arg2)) {
9774                 return -TARGET_EFAULT;
9775             }
9776         }
9777         return ret;
9778     case TARGET_NR_sched_setparam:
9779         {
9780             struct sched_param *target_schp;
9781             struct sched_param schp;
9782 
9783             if (arg2 == 0) {
9784                 return -TARGET_EINVAL;
9785             }
9786             if (!lock_user_struct(VERIFY_READ, target_schp, arg2, 1))
9787                 return -TARGET_EFAULT;
9788             schp.sched_priority = tswap32(target_schp->sched_priority);
9789             unlock_user_struct(target_schp, arg2, 0);
9790             return get_errno(sched_setparam(arg1, &schp));
9791         }
9792     case TARGET_NR_sched_getparam:
9793         {
9794             struct sched_param *target_schp;
9795             struct sched_param schp;
9796 
9797             if (arg2 == 0) {
9798                 return -TARGET_EINVAL;
9799             }
9800             ret = get_errno(sched_getparam(arg1, &schp));
9801             if (!is_error(ret)) {
9802                 if (!lock_user_struct(VERIFY_WRITE, target_schp, arg2, 0))
9803                     return -TARGET_EFAULT;
9804                 target_schp->sched_priority = tswap32(schp.sched_priority);
9805                 unlock_user_struct(target_schp, arg2, 1);
9806             }
9807         }
9808         return ret;
9809     case TARGET_NR_sched_setscheduler:
9810         {
9811             struct sched_param *target_schp;
9812             struct sched_param schp;
9813             if (arg3 == 0) {
9814                 return -TARGET_EINVAL;
9815             }
9816             if (!lock_user_struct(VERIFY_READ, target_schp, arg3, 1))
9817                 return -TARGET_EFAULT;
9818             schp.sched_priority = tswap32(target_schp->sched_priority);
9819             unlock_user_struct(target_schp, arg3, 0);
9820             return get_errno(sched_setscheduler(arg1, arg2, &schp));
9821         }
9822     case TARGET_NR_sched_getscheduler:
9823         return get_errno(sched_getscheduler(arg1));
9824     case TARGET_NR_sched_yield:
9825         return get_errno(sched_yield());
9826     case TARGET_NR_sched_get_priority_max:
9827         return get_errno(sched_get_priority_max(arg1));
9828     case TARGET_NR_sched_get_priority_min:
9829         return get_errno(sched_get_priority_min(arg1));
9830     case TARGET_NR_sched_rr_get_interval:
9831         {
9832             struct timespec ts;
9833             ret = get_errno(sched_rr_get_interval(arg1, &ts));
9834             if (!is_error(ret)) {
9835                 ret = host_to_target_timespec(arg2, &ts);
9836             }
9837         }
9838         return ret;
9839     case TARGET_NR_nanosleep:
9840         {
9841             struct timespec req, rem;
9842             target_to_host_timespec(&req, arg1);
9843             ret = get_errno(safe_nanosleep(&req, &rem));
9844             if (is_error(ret) && arg2) {
9845                 host_to_target_timespec(arg2, &rem);
9846             }
9847         }
9848         return ret;
9849     case TARGET_NR_prctl:
9850         switch (arg1) {
9851         case PR_GET_PDEATHSIG:
9852         {
9853             int deathsig;
9854             ret = get_errno(prctl(arg1, &deathsig, arg3, arg4, arg5));
9855             if (!is_error(ret) && arg2
9856                 && put_user_ual(deathsig, arg2)) {
9857                 return -TARGET_EFAULT;
9858             }
9859             return ret;
9860         }
9861 #ifdef PR_GET_NAME
9862         case PR_GET_NAME:
9863         {
9864             void *name = lock_user(VERIFY_WRITE, arg2, 16, 1);
9865             if (!name) {
9866                 return -TARGET_EFAULT;
9867             }
9868             ret = get_errno(prctl(arg1, (unsigned long)name,
9869                                   arg3, arg4, arg5));
9870             unlock_user(name, arg2, 16);
9871             return ret;
9872         }
9873         case PR_SET_NAME:
9874         {
9875             void *name = lock_user(VERIFY_READ, arg2, 16, 1);
9876             if (!name) {
9877                 return -TARGET_EFAULT;
9878             }
9879             ret = get_errno(prctl(arg1, (unsigned long)name,
9880                                   arg3, arg4, arg5));
9881             unlock_user(name, arg2, 0);
9882             return ret;
9883         }
9884 #endif
9885 #ifdef TARGET_MIPS
9886         case TARGET_PR_GET_FP_MODE:
9887         {
9888             CPUMIPSState *env = ((CPUMIPSState *)cpu_env);
9889             ret = 0;
9890             if (env->CP0_Status & (1 << CP0St_FR)) {
9891                 ret |= TARGET_PR_FP_MODE_FR;
9892             }
9893             if (env->CP0_Config5 & (1 << CP0C5_FRE)) {
9894                 ret |= TARGET_PR_FP_MODE_FRE;
9895             }
9896             return ret;
9897         }
9898         case TARGET_PR_SET_FP_MODE:
9899         {
9900             CPUMIPSState *env = ((CPUMIPSState *)cpu_env);
9901             bool old_fr = env->CP0_Status & (1 << CP0St_FR);
9902             bool old_fre = env->CP0_Config5 & (1 << CP0C5_FRE);
9903             bool new_fr = arg2 & TARGET_PR_FP_MODE_FR;
9904             bool new_fre = arg2 & TARGET_PR_FP_MODE_FRE;
9905 
9906             const unsigned int known_bits = TARGET_PR_FP_MODE_FR |
9907                                             TARGET_PR_FP_MODE_FRE;
9908 
9909             /* If nothing to change, return right away, successfully.  */
9910             if (old_fr == new_fr && old_fre == new_fre) {
9911                 return 0;
9912             }
9913             /* Check the value is valid */
9914             if (arg2 & ~known_bits) {
9915                 return -TARGET_EOPNOTSUPP;
9916             }
9917             /* Setting FRE without FR is not supported.  */
9918             if (new_fre && !new_fr) {
9919                 return -TARGET_EOPNOTSUPP;
9920             }
9921             if (new_fr && !(env->active_fpu.fcr0 & (1 << FCR0_F64))) {
9922                 /* FR1 is not supported */
9923                 return -TARGET_EOPNOTSUPP;
9924             }
9925             if (!new_fr && (env->active_fpu.fcr0 & (1 << FCR0_F64))
9926                 && !(env->CP0_Status_rw_bitmask & (1 << CP0St_FR))) {
9927                 /* cannot set FR=0 */
9928                 return -TARGET_EOPNOTSUPP;
9929             }
9930             if (new_fre && !(env->active_fpu.fcr0 & (1 << FCR0_FREP))) {
9931                 /* Cannot set FRE=1 */
9932                 return -TARGET_EOPNOTSUPP;
9933             }
9934 
9935             int i;
9936             fpr_t *fpr = env->active_fpu.fpr;
9937             for (i = 0; i < 32 ; i += 2) {
9938                 if (!old_fr && new_fr) {
9939                     fpr[i].w[!FP_ENDIAN_IDX] = fpr[i + 1].w[FP_ENDIAN_IDX];
9940                 } else if (old_fr && !new_fr) {
9941                     fpr[i + 1].w[FP_ENDIAN_IDX] = fpr[i].w[!FP_ENDIAN_IDX];
9942                 }
9943             }
9944 
9945             if (new_fr) {
9946                 env->CP0_Status |= (1 << CP0St_FR);
9947                 env->hflags |= MIPS_HFLAG_F64;
9948             } else {
9949                 env->CP0_Status &= ~(1 << CP0St_FR);
9950                 env->hflags &= ~MIPS_HFLAG_F64;
9951             }
9952             if (new_fre) {
9953                 env->CP0_Config5 |= (1 << CP0C5_FRE);
9954                 if (env->active_fpu.fcr0 & (1 << FCR0_FREP)) {
9955                     env->hflags |= MIPS_HFLAG_FRE;
9956                 }
9957             } else {
9958                 env->CP0_Config5 &= ~(1 << CP0C5_FRE);
9959                 env->hflags &= ~MIPS_HFLAG_FRE;
9960             }
9961 
9962             return 0;
9963         }
9964 #endif /* MIPS */
9965 #ifdef TARGET_AARCH64
9966         case TARGET_PR_SVE_SET_VL:
9967             /*
9968              * We cannot support either PR_SVE_SET_VL_ONEXEC or
9969              * PR_SVE_VL_INHERIT.  Note the kernel definition
9970              * of sve_vl_valid allows for VQ=512, i.e. VL=8192,
9971              * even though the current architectural maximum is VQ=16.
9972              */
9973             ret = -TARGET_EINVAL;
9974             if (cpu_isar_feature(aa64_sve, env_archcpu(cpu_env))
9975                 && arg2 >= 0 && arg2 <= 512 * 16 && !(arg2 & 15)) {
9976                 CPUARMState *env = cpu_env;
9977                 ARMCPU *cpu = env_archcpu(env);
9978                 uint32_t vq, old_vq;
9979 
9980                 old_vq = (env->vfp.zcr_el[1] & 0xf) + 1;
9981                 vq = MAX(arg2 / 16, 1);
9982                 vq = MIN(vq, cpu->sve_max_vq);
9983 
9984                 if (vq < old_vq) {
9985                     aarch64_sve_narrow_vq(env, vq);
9986                 }
9987                 env->vfp.zcr_el[1] = vq - 1;
9988                 arm_rebuild_hflags(env);
9989                 ret = vq * 16;
9990             }
9991             return ret;
9992         case TARGET_PR_SVE_GET_VL:
9993             ret = -TARGET_EINVAL;
9994             {
9995                 ARMCPU *cpu = env_archcpu(cpu_env);
9996                 if (cpu_isar_feature(aa64_sve, cpu)) {
9997                     ret = ((cpu->env.vfp.zcr_el[1] & 0xf) + 1) * 16;
9998                 }
9999             }
10000             return ret;
10001         case TARGET_PR_PAC_RESET_KEYS:
10002             {
10003                 CPUARMState *env = cpu_env;
10004                 ARMCPU *cpu = env_archcpu(env);
10005 
10006                 if (arg3 || arg4 || arg5) {
10007                     return -TARGET_EINVAL;
10008                 }
10009                 if (cpu_isar_feature(aa64_pauth, cpu)) {
10010                     int all = (TARGET_PR_PAC_APIAKEY | TARGET_PR_PAC_APIBKEY |
10011                                TARGET_PR_PAC_APDAKEY | TARGET_PR_PAC_APDBKEY |
10012                                TARGET_PR_PAC_APGAKEY);
10013                     int ret = 0;
10014                     Error *err = NULL;
10015 
10016                     if (arg2 == 0) {
10017                         arg2 = all;
10018                     } else if (arg2 & ~all) {
10019                         return -TARGET_EINVAL;
10020                     }
10021                     if (arg2 & TARGET_PR_PAC_APIAKEY) {
10022                         ret |= qemu_guest_getrandom(&env->keys.apia,
10023                                                     sizeof(ARMPACKey), &err);
10024                     }
10025                     if (arg2 & TARGET_PR_PAC_APIBKEY) {
10026                         ret |= qemu_guest_getrandom(&env->keys.apib,
10027                                                     sizeof(ARMPACKey), &err);
10028                     }
10029                     if (arg2 & TARGET_PR_PAC_APDAKEY) {
10030                         ret |= qemu_guest_getrandom(&env->keys.apda,
10031                                                     sizeof(ARMPACKey), &err);
10032                     }
10033                     if (arg2 & TARGET_PR_PAC_APDBKEY) {
10034                         ret |= qemu_guest_getrandom(&env->keys.apdb,
10035                                                     sizeof(ARMPACKey), &err);
10036                     }
10037                     if (arg2 & TARGET_PR_PAC_APGAKEY) {
10038                         ret |= qemu_guest_getrandom(&env->keys.apga,
10039                                                     sizeof(ARMPACKey), &err);
10040                     }
10041                     if (ret != 0) {
10042                         /*
10043                          * Some unknown failure in the crypto.  The best
10044                          * we can do is log it and fail the syscall.
10045                          * The real syscall cannot fail this way.
10046                          */
10047                         qemu_log_mask(LOG_UNIMP,
10048                                       "PR_PAC_RESET_KEYS: Crypto failure: %s",
10049                                       error_get_pretty(err));
10050                         error_free(err);
10051                         return -TARGET_EIO;
10052                     }
10053                     return 0;
10054                 }
10055             }
10056             return -TARGET_EINVAL;
10057 #endif /* AARCH64 */
10058         case PR_GET_SECCOMP:
10059         case PR_SET_SECCOMP:
10060             /* Disable seccomp to prevent the target disabling syscalls we
10061              * need. */
10062             return -TARGET_EINVAL;
10063         default:
10064             /* Most prctl options have no pointer arguments */
10065             return get_errno(prctl(arg1, arg2, arg3, arg4, arg5));
10066         }
10067         break;
10068 #ifdef TARGET_NR_arch_prctl
10069     case TARGET_NR_arch_prctl:
10070 #if defined(TARGET_I386) && !defined(TARGET_ABI32)
10071         return do_arch_prctl(cpu_env, arg1, arg2);
10072 #else
10073 #error unreachable
10074 #endif
10075 #endif
10076 #ifdef TARGET_NR_pread64
10077     case TARGET_NR_pread64:
10078         if (regpairs_aligned(cpu_env, num)) {
10079             arg4 = arg5;
10080             arg5 = arg6;
10081         }
10082         if (arg2 == 0 && arg3 == 0) {
10083             /* Special-case NULL buffer and zero length, which should succeed */
10084             p = 0;
10085         } else {
10086             p = lock_user(VERIFY_WRITE, arg2, arg3, 0);
10087             if (!p) {
10088                 return -TARGET_EFAULT;
10089             }
10090         }
10091         ret = get_errno(pread64(arg1, p, arg3, target_offset64(arg4, arg5)));
10092         unlock_user(p, arg2, ret);
10093         return ret;
10094     case TARGET_NR_pwrite64:
10095         if (regpairs_aligned(cpu_env, num)) {
10096             arg4 = arg5;
10097             arg5 = arg6;
10098         }
10099         if (arg2 == 0 && arg3 == 0) {
10100             /* Special-case NULL buffer and zero length, which should succeed */
10101             p = 0;
10102         } else {
10103             p = lock_user(VERIFY_READ, arg2, arg3, 1);
10104             if (!p) {
10105                 return -TARGET_EFAULT;
10106             }
10107         }
10108         ret = get_errno(pwrite64(arg1, p, arg3, target_offset64(arg4, arg5)));
10109         unlock_user(p, arg2, 0);
10110         return ret;
10111 #endif
10112     case TARGET_NR_getcwd:
10113         if (!(p = lock_user(VERIFY_WRITE, arg1, arg2, 0)))
10114             return -TARGET_EFAULT;
10115         ret = get_errno(sys_getcwd1(p, arg2));
10116         unlock_user(p, arg1, ret);
10117         return ret;
10118     case TARGET_NR_capget:
10119     case TARGET_NR_capset:
10120     {
10121         struct target_user_cap_header *target_header;
10122         struct target_user_cap_data *target_data = NULL;
10123         struct __user_cap_header_struct header;
10124         struct __user_cap_data_struct data[2];
10125         struct __user_cap_data_struct *dataptr = NULL;
10126         int i, target_datalen;
10127         int data_items = 1;
10128 
10129         if (!lock_user_struct(VERIFY_WRITE, target_header, arg1, 1)) {
10130             return -TARGET_EFAULT;
10131         }
10132         header.version = tswap32(target_header->version);
10133         header.pid = tswap32(target_header->pid);
10134 
10135         if (header.version != _LINUX_CAPABILITY_VERSION) {
10136             /* Version 2 and up takes pointer to two user_data structs */
10137             data_items = 2;
10138         }
10139 
10140         target_datalen = sizeof(*target_data) * data_items;
10141 
10142         if (arg2) {
10143             if (num == TARGET_NR_capget) {
10144                 target_data = lock_user(VERIFY_WRITE, arg2, target_datalen, 0);
10145             } else {
10146                 target_data = lock_user(VERIFY_READ, arg2, target_datalen, 1);
10147             }
10148             if (!target_data) {
10149                 unlock_user_struct(target_header, arg1, 0);
10150                 return -TARGET_EFAULT;
10151             }
10152 
10153             if (num == TARGET_NR_capset) {
10154                 for (i = 0; i < data_items; i++) {
10155                     data[i].effective = tswap32(target_data[i].effective);
10156                     data[i].permitted = tswap32(target_data[i].permitted);
10157                     data[i].inheritable = tswap32(target_data[i].inheritable);
10158                 }
10159             }
10160 
10161             dataptr = data;
10162         }
10163 
10164         if (num == TARGET_NR_capget) {
10165             ret = get_errno(capget(&header, dataptr));
10166         } else {
10167             ret = get_errno(capset(&header, dataptr));
10168         }
10169 
10170         /* The kernel always updates version for both capget and capset */
10171         target_header->version = tswap32(header.version);
10172         unlock_user_struct(target_header, arg1, 1);
10173 
10174         if (arg2) {
10175             if (num == TARGET_NR_capget) {
10176                 for (i = 0; i < data_items; i++) {
10177                     target_data[i].effective = tswap32(data[i].effective);
10178                     target_data[i].permitted = tswap32(data[i].permitted);
10179                     target_data[i].inheritable = tswap32(data[i].inheritable);
10180                 }
10181                 unlock_user(target_data, arg2, target_datalen);
10182             } else {
10183                 unlock_user(target_data, arg2, 0);
10184             }
10185         }
10186         return ret;
10187     }
10188     case TARGET_NR_sigaltstack:
10189         return do_sigaltstack(arg1, arg2,
10190                               get_sp_from_cpustate((CPUArchState *)cpu_env));
10191 
10192 #ifdef CONFIG_SENDFILE
10193 #ifdef TARGET_NR_sendfile
10194     case TARGET_NR_sendfile:
10195     {
10196         off_t *offp = NULL;
10197         off_t off;
10198         if (arg3) {
10199             ret = get_user_sal(off, arg3);
10200             if (is_error(ret)) {
10201                 return ret;
10202             }
10203             offp = &off;
10204         }
10205         ret = get_errno(sendfile(arg1, arg2, offp, arg4));
10206         if (!is_error(ret) && arg3) {
10207             abi_long ret2 = put_user_sal(off, arg3);
10208             if (is_error(ret2)) {
10209                 ret = ret2;
10210             }
10211         }
10212         return ret;
10213     }
10214 #endif
10215 #ifdef TARGET_NR_sendfile64
10216     case TARGET_NR_sendfile64:
10217     {
10218         off_t *offp = NULL;
10219         off_t off;
10220         if (arg3) {
10221             ret = get_user_s64(off, arg3);
10222             if (is_error(ret)) {
10223                 return ret;
10224             }
10225             offp = &off;
10226         }
10227         ret = get_errno(sendfile(arg1, arg2, offp, arg4));
10228         if (!is_error(ret) && arg3) {
10229             abi_long ret2 = put_user_s64(off, arg3);
10230             if (is_error(ret2)) {
10231                 ret = ret2;
10232             }
10233         }
10234         return ret;
10235     }
10236 #endif
10237 #endif
10238 #ifdef TARGET_NR_vfork
10239     case TARGET_NR_vfork:
10240         return get_errno(do_fork(cpu_env,
10241                          CLONE_VFORK | CLONE_VM | TARGET_SIGCHLD,
10242                          0, 0, 0, 0));
10243 #endif
10244 #ifdef TARGET_NR_ugetrlimit
10245     case TARGET_NR_ugetrlimit:
10246     {
10247 	struct rlimit rlim;
10248 	int resource = target_to_host_resource(arg1);
10249 	ret = get_errno(getrlimit(resource, &rlim));
10250 	if (!is_error(ret)) {
10251 	    struct target_rlimit *target_rlim;
10252             if (!lock_user_struct(VERIFY_WRITE, target_rlim, arg2, 0))
10253                 return -TARGET_EFAULT;
10254 	    target_rlim->rlim_cur = host_to_target_rlim(rlim.rlim_cur);
10255 	    target_rlim->rlim_max = host_to_target_rlim(rlim.rlim_max);
10256             unlock_user_struct(target_rlim, arg2, 1);
10257 	}
10258         return ret;
10259     }
10260 #endif
10261 #ifdef TARGET_NR_truncate64
10262     case TARGET_NR_truncate64:
10263         if (!(p = lock_user_string(arg1)))
10264             return -TARGET_EFAULT;
10265 	ret = target_truncate64(cpu_env, p, arg2, arg3, arg4);
10266         unlock_user(p, arg1, 0);
10267         return ret;
10268 #endif
10269 #ifdef TARGET_NR_ftruncate64
10270     case TARGET_NR_ftruncate64:
10271         return target_ftruncate64(cpu_env, arg1, arg2, arg3, arg4);
10272 #endif
10273 #ifdef TARGET_NR_stat64
10274     case TARGET_NR_stat64:
10275         if (!(p = lock_user_string(arg1))) {
10276             return -TARGET_EFAULT;
10277         }
10278         ret = get_errno(stat(path(p), &st));
10279         unlock_user(p, arg1, 0);
10280         if (!is_error(ret))
10281             ret = host_to_target_stat64(cpu_env, arg2, &st);
10282         return ret;
10283 #endif
10284 #ifdef TARGET_NR_lstat64
10285     case TARGET_NR_lstat64:
10286         if (!(p = lock_user_string(arg1))) {
10287             return -TARGET_EFAULT;
10288         }
10289         ret = get_errno(lstat(path(p), &st));
10290         unlock_user(p, arg1, 0);
10291         if (!is_error(ret))
10292             ret = host_to_target_stat64(cpu_env, arg2, &st);
10293         return ret;
10294 #endif
10295 #ifdef TARGET_NR_fstat64
10296     case TARGET_NR_fstat64:
10297         ret = get_errno(fstat(arg1, &st));
10298         if (!is_error(ret))
10299             ret = host_to_target_stat64(cpu_env, arg2, &st);
10300         return ret;
10301 #endif
10302 #if (defined(TARGET_NR_fstatat64) || defined(TARGET_NR_newfstatat))
10303 #ifdef TARGET_NR_fstatat64
10304     case TARGET_NR_fstatat64:
10305 #endif
10306 #ifdef TARGET_NR_newfstatat
10307     case TARGET_NR_newfstatat:
10308 #endif
10309         if (!(p = lock_user_string(arg2))) {
10310             return -TARGET_EFAULT;
10311         }
10312         ret = get_errno(fstatat(arg1, path(p), &st, arg4));
10313         unlock_user(p, arg2, 0);
10314         if (!is_error(ret))
10315             ret = host_to_target_stat64(cpu_env, arg3, &st);
10316         return ret;
10317 #endif
10318 #if defined(TARGET_NR_statx)
10319     case TARGET_NR_statx:
10320         {
10321             struct target_statx *target_stx;
10322             int dirfd = arg1;
10323             int flags = arg3;
10324 
10325             p = lock_user_string(arg2);
10326             if (p == NULL) {
10327                 return -TARGET_EFAULT;
10328             }
10329 #if defined(__NR_statx)
10330             {
10331                 /*
10332                  * It is assumed that struct statx is architecture independent.
10333                  */
10334                 struct target_statx host_stx;
10335                 int mask = arg4;
10336 
10337                 ret = get_errno(sys_statx(dirfd, p, flags, mask, &host_stx));
10338                 if (!is_error(ret)) {
10339                     if (host_to_target_statx(&host_stx, arg5) != 0) {
10340                         unlock_user(p, arg2, 0);
10341                         return -TARGET_EFAULT;
10342                     }
10343                 }
10344 
10345                 if (ret != -TARGET_ENOSYS) {
10346                     unlock_user(p, arg2, 0);
10347                     return ret;
10348                 }
10349             }
10350 #endif
10351             ret = get_errno(fstatat(dirfd, path(p), &st, flags));
10352             unlock_user(p, arg2, 0);
10353 
10354             if (!is_error(ret)) {
10355                 if (!lock_user_struct(VERIFY_WRITE, target_stx, arg5, 0)) {
10356                     return -TARGET_EFAULT;
10357                 }
10358                 memset(target_stx, 0, sizeof(*target_stx));
10359                 __put_user(major(st.st_dev), &target_stx->stx_dev_major);
10360                 __put_user(minor(st.st_dev), &target_stx->stx_dev_minor);
10361                 __put_user(st.st_ino, &target_stx->stx_ino);
10362                 __put_user(st.st_mode, &target_stx->stx_mode);
10363                 __put_user(st.st_uid, &target_stx->stx_uid);
10364                 __put_user(st.st_gid, &target_stx->stx_gid);
10365                 __put_user(st.st_nlink, &target_stx->stx_nlink);
10366                 __put_user(major(st.st_rdev), &target_stx->stx_rdev_major);
10367                 __put_user(minor(st.st_rdev), &target_stx->stx_rdev_minor);
10368                 __put_user(st.st_size, &target_stx->stx_size);
10369                 __put_user(st.st_blksize, &target_stx->stx_blksize);
10370                 __put_user(st.st_blocks, &target_stx->stx_blocks);
10371                 __put_user(st.st_atime, &target_stx->stx_atime.tv_sec);
10372                 __put_user(st.st_mtime, &target_stx->stx_mtime.tv_sec);
10373                 __put_user(st.st_ctime, &target_stx->stx_ctime.tv_sec);
10374                 unlock_user_struct(target_stx, arg5, 1);
10375             }
10376         }
10377         return ret;
10378 #endif
10379 #ifdef TARGET_NR_lchown
10380     case TARGET_NR_lchown:
10381         if (!(p = lock_user_string(arg1)))
10382             return -TARGET_EFAULT;
10383         ret = get_errno(lchown(p, low2highuid(arg2), low2highgid(arg3)));
10384         unlock_user(p, arg1, 0);
10385         return ret;
10386 #endif
10387 #ifdef TARGET_NR_getuid
10388     case TARGET_NR_getuid:
10389         return get_errno(high2lowuid(getuid()));
10390 #endif
10391 #ifdef TARGET_NR_getgid
10392     case TARGET_NR_getgid:
10393         return get_errno(high2lowgid(getgid()));
10394 #endif
10395 #ifdef TARGET_NR_geteuid
10396     case TARGET_NR_geteuid:
10397         return get_errno(high2lowuid(geteuid()));
10398 #endif
10399 #ifdef TARGET_NR_getegid
10400     case TARGET_NR_getegid:
10401         return get_errno(high2lowgid(getegid()));
10402 #endif
10403     case TARGET_NR_setreuid:
10404         return get_errno(setreuid(low2highuid(arg1), low2highuid(arg2)));
10405     case TARGET_NR_setregid:
10406         return get_errno(setregid(low2highgid(arg1), low2highgid(arg2)));
10407     case TARGET_NR_getgroups:
10408         {
10409             int gidsetsize = arg1;
10410             target_id *target_grouplist;
10411             gid_t *grouplist;
10412             int i;
10413 
10414             grouplist = alloca(gidsetsize * sizeof(gid_t));
10415             ret = get_errno(getgroups(gidsetsize, grouplist));
10416             if (gidsetsize == 0)
10417                 return ret;
10418             if (!is_error(ret)) {
10419                 target_grouplist = lock_user(VERIFY_WRITE, arg2, gidsetsize * sizeof(target_id), 0);
10420                 if (!target_grouplist)
10421                     return -TARGET_EFAULT;
10422                 for(i = 0;i < ret; i++)
10423                     target_grouplist[i] = tswapid(high2lowgid(grouplist[i]));
10424                 unlock_user(target_grouplist, arg2, gidsetsize * sizeof(target_id));
10425             }
10426         }
10427         return ret;
10428     case TARGET_NR_setgroups:
10429         {
10430             int gidsetsize = arg1;
10431             target_id *target_grouplist;
10432             gid_t *grouplist = NULL;
10433             int i;
10434             if (gidsetsize) {
10435                 grouplist = alloca(gidsetsize * sizeof(gid_t));
10436                 target_grouplist = lock_user(VERIFY_READ, arg2, gidsetsize * sizeof(target_id), 1);
10437                 if (!target_grouplist) {
10438                     return -TARGET_EFAULT;
10439                 }
10440                 for (i = 0; i < gidsetsize; i++) {
10441                     grouplist[i] = low2highgid(tswapid(target_grouplist[i]));
10442                 }
10443                 unlock_user(target_grouplist, arg2, 0);
10444             }
10445             return get_errno(setgroups(gidsetsize, grouplist));
10446         }
10447     case TARGET_NR_fchown:
10448         return get_errno(fchown(arg1, low2highuid(arg2), low2highgid(arg3)));
10449 #if defined(TARGET_NR_fchownat)
10450     case TARGET_NR_fchownat:
10451         if (!(p = lock_user_string(arg2)))
10452             return -TARGET_EFAULT;
10453         ret = get_errno(fchownat(arg1, p, low2highuid(arg3),
10454                                  low2highgid(arg4), arg5));
10455         unlock_user(p, arg2, 0);
10456         return ret;
10457 #endif
10458 #ifdef TARGET_NR_setresuid
10459     case TARGET_NR_setresuid:
10460         return get_errno(sys_setresuid(low2highuid(arg1),
10461                                        low2highuid(arg2),
10462                                        low2highuid(arg3)));
10463 #endif
10464 #ifdef TARGET_NR_getresuid
10465     case TARGET_NR_getresuid:
10466         {
10467             uid_t ruid, euid, suid;
10468             ret = get_errno(getresuid(&ruid, &euid, &suid));
10469             if (!is_error(ret)) {
10470                 if (put_user_id(high2lowuid(ruid), arg1)
10471                     || put_user_id(high2lowuid(euid), arg2)
10472                     || put_user_id(high2lowuid(suid), arg3))
10473                     return -TARGET_EFAULT;
10474             }
10475         }
10476         return ret;
10477 #endif
10478 #ifdef TARGET_NR_getresgid
10479     case TARGET_NR_setresgid:
10480         return get_errno(sys_setresgid(low2highgid(arg1),
10481                                        low2highgid(arg2),
10482                                        low2highgid(arg3)));
10483 #endif
10484 #ifdef TARGET_NR_getresgid
10485     case TARGET_NR_getresgid:
10486         {
10487             gid_t rgid, egid, sgid;
10488             ret = get_errno(getresgid(&rgid, &egid, &sgid));
10489             if (!is_error(ret)) {
10490                 if (put_user_id(high2lowgid(rgid), arg1)
10491                     || put_user_id(high2lowgid(egid), arg2)
10492                     || put_user_id(high2lowgid(sgid), arg3))
10493                     return -TARGET_EFAULT;
10494             }
10495         }
10496         return ret;
10497 #endif
10498 #ifdef TARGET_NR_chown
10499     case TARGET_NR_chown:
10500         if (!(p = lock_user_string(arg1)))
10501             return -TARGET_EFAULT;
10502         ret = get_errno(chown(p, low2highuid(arg2), low2highgid(arg3)));
10503         unlock_user(p, arg1, 0);
10504         return ret;
10505 #endif
10506     case TARGET_NR_setuid:
10507         return get_errno(sys_setuid(low2highuid(arg1)));
10508     case TARGET_NR_setgid:
10509         return get_errno(sys_setgid(low2highgid(arg1)));
10510     case TARGET_NR_setfsuid:
10511         return get_errno(setfsuid(arg1));
10512     case TARGET_NR_setfsgid:
10513         return get_errno(setfsgid(arg1));
10514 
10515 #ifdef TARGET_NR_lchown32
10516     case TARGET_NR_lchown32:
10517         if (!(p = lock_user_string(arg1)))
10518             return -TARGET_EFAULT;
10519         ret = get_errno(lchown(p, arg2, arg3));
10520         unlock_user(p, arg1, 0);
10521         return ret;
10522 #endif
10523 #ifdef TARGET_NR_getuid32
10524     case TARGET_NR_getuid32:
10525         return get_errno(getuid());
10526 #endif
10527 
10528 #if defined(TARGET_NR_getxuid) && defined(TARGET_ALPHA)
10529    /* Alpha specific */
10530     case TARGET_NR_getxuid:
10531          {
10532             uid_t euid;
10533             euid=geteuid();
10534             ((CPUAlphaState *)cpu_env)->ir[IR_A4]=euid;
10535          }
10536         return get_errno(getuid());
10537 #endif
10538 #if defined(TARGET_NR_getxgid) && defined(TARGET_ALPHA)
10539    /* Alpha specific */
10540     case TARGET_NR_getxgid:
10541          {
10542             uid_t egid;
10543             egid=getegid();
10544             ((CPUAlphaState *)cpu_env)->ir[IR_A4]=egid;
10545          }
10546         return get_errno(getgid());
10547 #endif
10548 #if defined(TARGET_NR_osf_getsysinfo) && defined(TARGET_ALPHA)
10549     /* Alpha specific */
10550     case TARGET_NR_osf_getsysinfo:
10551         ret = -TARGET_EOPNOTSUPP;
10552         switch (arg1) {
10553           case TARGET_GSI_IEEE_FP_CONTROL:
10554             {
10555                 uint64_t fpcr = cpu_alpha_load_fpcr(cpu_env);
10556                 uint64_t swcr = ((CPUAlphaState *)cpu_env)->swcr;
10557 
10558                 swcr &= ~SWCR_STATUS_MASK;
10559                 swcr |= (fpcr >> 35) & SWCR_STATUS_MASK;
10560 
10561                 if (put_user_u64 (swcr, arg2))
10562                         return -TARGET_EFAULT;
10563                 ret = 0;
10564             }
10565             break;
10566 
10567           /* case GSI_IEEE_STATE_AT_SIGNAL:
10568              -- Not implemented in linux kernel.
10569              case GSI_UACPROC:
10570              -- Retrieves current unaligned access state; not much used.
10571              case GSI_PROC_TYPE:
10572              -- Retrieves implver information; surely not used.
10573              case GSI_GET_HWRPB:
10574              -- Grabs a copy of the HWRPB; surely not used.
10575           */
10576         }
10577         return ret;
10578 #endif
10579 #if defined(TARGET_NR_osf_setsysinfo) && defined(TARGET_ALPHA)
10580     /* Alpha specific */
10581     case TARGET_NR_osf_setsysinfo:
10582         ret = -TARGET_EOPNOTSUPP;
10583         switch (arg1) {
10584           case TARGET_SSI_IEEE_FP_CONTROL:
10585             {
10586                 uint64_t swcr, fpcr;
10587 
10588                 if (get_user_u64 (swcr, arg2)) {
10589                     return -TARGET_EFAULT;
10590                 }
10591 
10592                 /*
10593                  * The kernel calls swcr_update_status to update the
10594                  * status bits from the fpcr at every point that it
10595                  * could be queried.  Therefore, we store the status
10596                  * bits only in FPCR.
10597                  */
10598                 ((CPUAlphaState *)cpu_env)->swcr
10599                     = swcr & (SWCR_TRAP_ENABLE_MASK | SWCR_MAP_MASK);
10600 
10601                 fpcr = cpu_alpha_load_fpcr(cpu_env);
10602                 fpcr &= ((uint64_t)FPCR_DYN_MASK << 32);
10603                 fpcr |= alpha_ieee_swcr_to_fpcr(swcr);
10604                 cpu_alpha_store_fpcr(cpu_env, fpcr);
10605                 ret = 0;
10606             }
10607             break;
10608 
10609           case TARGET_SSI_IEEE_RAISE_EXCEPTION:
10610             {
10611                 uint64_t exc, fpcr, fex;
10612 
10613                 if (get_user_u64(exc, arg2)) {
10614                     return -TARGET_EFAULT;
10615                 }
10616                 exc &= SWCR_STATUS_MASK;
10617                 fpcr = cpu_alpha_load_fpcr(cpu_env);
10618 
10619                 /* Old exceptions are not signaled.  */
10620                 fex = alpha_ieee_fpcr_to_swcr(fpcr);
10621                 fex = exc & ~fex;
10622                 fex >>= SWCR_STATUS_TO_EXCSUM_SHIFT;
10623                 fex &= ((CPUArchState *)cpu_env)->swcr;
10624 
10625                 /* Update the hardware fpcr.  */
10626                 fpcr |= alpha_ieee_swcr_to_fpcr(exc);
10627                 cpu_alpha_store_fpcr(cpu_env, fpcr);
10628 
10629                 if (fex) {
10630                     int si_code = TARGET_FPE_FLTUNK;
10631                     target_siginfo_t info;
10632 
10633                     if (fex & SWCR_TRAP_ENABLE_DNO) {
10634                         si_code = TARGET_FPE_FLTUND;
10635                     }
10636                     if (fex & SWCR_TRAP_ENABLE_INE) {
10637                         si_code = TARGET_FPE_FLTRES;
10638                     }
10639                     if (fex & SWCR_TRAP_ENABLE_UNF) {
10640                         si_code = TARGET_FPE_FLTUND;
10641                     }
10642                     if (fex & SWCR_TRAP_ENABLE_OVF) {
10643                         si_code = TARGET_FPE_FLTOVF;
10644                     }
10645                     if (fex & SWCR_TRAP_ENABLE_DZE) {
10646                         si_code = TARGET_FPE_FLTDIV;
10647                     }
10648                     if (fex & SWCR_TRAP_ENABLE_INV) {
10649                         si_code = TARGET_FPE_FLTINV;
10650                     }
10651 
10652                     info.si_signo = SIGFPE;
10653                     info.si_errno = 0;
10654                     info.si_code = si_code;
10655                     info._sifields._sigfault._addr
10656                         = ((CPUArchState *)cpu_env)->pc;
10657                     queue_signal((CPUArchState *)cpu_env, info.si_signo,
10658                                  QEMU_SI_FAULT, &info);
10659                 }
10660                 ret = 0;
10661             }
10662             break;
10663 
10664           /* case SSI_NVPAIRS:
10665              -- Used with SSIN_UACPROC to enable unaligned accesses.
10666              case SSI_IEEE_STATE_AT_SIGNAL:
10667              case SSI_IEEE_IGNORE_STATE_AT_SIGNAL:
10668              -- Not implemented in linux kernel
10669           */
10670         }
10671         return ret;
10672 #endif
10673 #ifdef TARGET_NR_osf_sigprocmask
10674     /* Alpha specific.  */
10675     case TARGET_NR_osf_sigprocmask:
10676         {
10677             abi_ulong mask;
10678             int how;
10679             sigset_t set, oldset;
10680 
10681             switch(arg1) {
10682             case TARGET_SIG_BLOCK:
10683                 how = SIG_BLOCK;
10684                 break;
10685             case TARGET_SIG_UNBLOCK:
10686                 how = SIG_UNBLOCK;
10687                 break;
10688             case TARGET_SIG_SETMASK:
10689                 how = SIG_SETMASK;
10690                 break;
10691             default:
10692                 return -TARGET_EINVAL;
10693             }
10694             mask = arg2;
10695             target_to_host_old_sigset(&set, &mask);
10696             ret = do_sigprocmask(how, &set, &oldset);
10697             if (!ret) {
10698                 host_to_target_old_sigset(&mask, &oldset);
10699                 ret = mask;
10700             }
10701         }
10702         return ret;
10703 #endif
10704 
10705 #ifdef TARGET_NR_getgid32
10706     case TARGET_NR_getgid32:
10707         return get_errno(getgid());
10708 #endif
10709 #ifdef TARGET_NR_geteuid32
10710     case TARGET_NR_geteuid32:
10711         return get_errno(geteuid());
10712 #endif
10713 #ifdef TARGET_NR_getegid32
10714     case TARGET_NR_getegid32:
10715         return get_errno(getegid());
10716 #endif
10717 #ifdef TARGET_NR_setreuid32
10718     case TARGET_NR_setreuid32:
10719         return get_errno(setreuid(arg1, arg2));
10720 #endif
10721 #ifdef TARGET_NR_setregid32
10722     case TARGET_NR_setregid32:
10723         return get_errno(setregid(arg1, arg2));
10724 #endif
10725 #ifdef TARGET_NR_getgroups32
10726     case TARGET_NR_getgroups32:
10727         {
10728             int gidsetsize = arg1;
10729             uint32_t *target_grouplist;
10730             gid_t *grouplist;
10731             int i;
10732 
10733             grouplist = alloca(gidsetsize * sizeof(gid_t));
10734             ret = get_errno(getgroups(gidsetsize, grouplist));
10735             if (gidsetsize == 0)
10736                 return ret;
10737             if (!is_error(ret)) {
10738                 target_grouplist = lock_user(VERIFY_WRITE, arg2, gidsetsize * 4, 0);
10739                 if (!target_grouplist) {
10740                     return -TARGET_EFAULT;
10741                 }
10742                 for(i = 0;i < ret; i++)
10743                     target_grouplist[i] = tswap32(grouplist[i]);
10744                 unlock_user(target_grouplist, arg2, gidsetsize * 4);
10745             }
10746         }
10747         return ret;
10748 #endif
10749 #ifdef TARGET_NR_setgroups32
10750     case TARGET_NR_setgroups32:
10751         {
10752             int gidsetsize = arg1;
10753             uint32_t *target_grouplist;
10754             gid_t *grouplist;
10755             int i;
10756 
10757             grouplist = alloca(gidsetsize * sizeof(gid_t));
10758             target_grouplist = lock_user(VERIFY_READ, arg2, gidsetsize * 4, 1);
10759             if (!target_grouplist) {
10760                 return -TARGET_EFAULT;
10761             }
10762             for(i = 0;i < gidsetsize; i++)
10763                 grouplist[i] = tswap32(target_grouplist[i]);
10764             unlock_user(target_grouplist, arg2, 0);
10765             return get_errno(setgroups(gidsetsize, grouplist));
10766         }
10767 #endif
10768 #ifdef TARGET_NR_fchown32
10769     case TARGET_NR_fchown32:
10770         return get_errno(fchown(arg1, arg2, arg3));
10771 #endif
10772 #ifdef TARGET_NR_setresuid32
10773     case TARGET_NR_setresuid32:
10774         return get_errno(sys_setresuid(arg1, arg2, arg3));
10775 #endif
10776 #ifdef TARGET_NR_getresuid32
10777     case TARGET_NR_getresuid32:
10778         {
10779             uid_t ruid, euid, suid;
10780             ret = get_errno(getresuid(&ruid, &euid, &suid));
10781             if (!is_error(ret)) {
10782                 if (put_user_u32(ruid, arg1)
10783                     || put_user_u32(euid, arg2)
10784                     || put_user_u32(suid, arg3))
10785                     return -TARGET_EFAULT;
10786             }
10787         }
10788         return ret;
10789 #endif
10790 #ifdef TARGET_NR_setresgid32
10791     case TARGET_NR_setresgid32:
10792         return get_errno(sys_setresgid(arg1, arg2, arg3));
10793 #endif
10794 #ifdef TARGET_NR_getresgid32
10795     case TARGET_NR_getresgid32:
10796         {
10797             gid_t rgid, egid, sgid;
10798             ret = get_errno(getresgid(&rgid, &egid, &sgid));
10799             if (!is_error(ret)) {
10800                 if (put_user_u32(rgid, arg1)
10801                     || put_user_u32(egid, arg2)
10802                     || put_user_u32(sgid, arg3))
10803                     return -TARGET_EFAULT;
10804             }
10805         }
10806         return ret;
10807 #endif
10808 #ifdef TARGET_NR_chown32
10809     case TARGET_NR_chown32:
10810         if (!(p = lock_user_string(arg1)))
10811             return -TARGET_EFAULT;
10812         ret = get_errno(chown(p, arg2, arg3));
10813         unlock_user(p, arg1, 0);
10814         return ret;
10815 #endif
10816 #ifdef TARGET_NR_setuid32
10817     case TARGET_NR_setuid32:
10818         return get_errno(sys_setuid(arg1));
10819 #endif
10820 #ifdef TARGET_NR_setgid32
10821     case TARGET_NR_setgid32:
10822         return get_errno(sys_setgid(arg1));
10823 #endif
10824 #ifdef TARGET_NR_setfsuid32
10825     case TARGET_NR_setfsuid32:
10826         return get_errno(setfsuid(arg1));
10827 #endif
10828 #ifdef TARGET_NR_setfsgid32
10829     case TARGET_NR_setfsgid32:
10830         return get_errno(setfsgid(arg1));
10831 #endif
10832 #ifdef TARGET_NR_mincore
10833     case TARGET_NR_mincore:
10834         {
10835             void *a = lock_user(VERIFY_READ, arg1, arg2, 0);
10836             if (!a) {
10837                 return -TARGET_ENOMEM;
10838             }
10839             p = lock_user_string(arg3);
10840             if (!p) {
10841                 ret = -TARGET_EFAULT;
10842             } else {
10843                 ret = get_errno(mincore(a, arg2, p));
10844                 unlock_user(p, arg3, ret);
10845             }
10846             unlock_user(a, arg1, 0);
10847         }
10848         return ret;
10849 #endif
10850 #ifdef TARGET_NR_arm_fadvise64_64
10851     case TARGET_NR_arm_fadvise64_64:
10852         /* arm_fadvise64_64 looks like fadvise64_64 but
10853          * with different argument order: fd, advice, offset, len
10854          * rather than the usual fd, offset, len, advice.
10855          * Note that offset and len are both 64-bit so appear as
10856          * pairs of 32-bit registers.
10857          */
10858         ret = posix_fadvise(arg1, target_offset64(arg3, arg4),
10859                             target_offset64(arg5, arg6), arg2);
10860         return -host_to_target_errno(ret);
10861 #endif
10862 
10863 #if TARGET_ABI_BITS == 32
10864 
10865 #ifdef TARGET_NR_fadvise64_64
10866     case TARGET_NR_fadvise64_64:
10867 #if defined(TARGET_PPC) || defined(TARGET_XTENSA)
10868         /* 6 args: fd, advice, offset (high, low), len (high, low) */
10869         ret = arg2;
10870         arg2 = arg3;
10871         arg3 = arg4;
10872         arg4 = arg5;
10873         arg5 = arg6;
10874         arg6 = ret;
10875 #else
10876         /* 6 args: fd, offset (high, low), len (high, low), advice */
10877         if (regpairs_aligned(cpu_env, num)) {
10878             /* offset is in (3,4), len in (5,6) and advice in 7 */
10879             arg2 = arg3;
10880             arg3 = arg4;
10881             arg4 = arg5;
10882             arg5 = arg6;
10883             arg6 = arg7;
10884         }
10885 #endif
10886         ret = posix_fadvise(arg1, target_offset64(arg2, arg3),
10887                             target_offset64(arg4, arg5), arg6);
10888         return -host_to_target_errno(ret);
10889 #endif
10890 
10891 #ifdef TARGET_NR_fadvise64
10892     case TARGET_NR_fadvise64:
10893         /* 5 args: fd, offset (high, low), len, advice */
10894         if (regpairs_aligned(cpu_env, num)) {
10895             /* offset is in (3,4), len in 5 and advice in 6 */
10896             arg2 = arg3;
10897             arg3 = arg4;
10898             arg4 = arg5;
10899             arg5 = arg6;
10900         }
10901         ret = posix_fadvise(arg1, target_offset64(arg2, arg3), arg4, arg5);
10902         return -host_to_target_errno(ret);
10903 #endif
10904 
10905 #else /* not a 32-bit ABI */
10906 #if defined(TARGET_NR_fadvise64_64) || defined(TARGET_NR_fadvise64)
10907 #ifdef TARGET_NR_fadvise64_64
10908     case TARGET_NR_fadvise64_64:
10909 #endif
10910 #ifdef TARGET_NR_fadvise64
10911     case TARGET_NR_fadvise64:
10912 #endif
10913 #ifdef TARGET_S390X
10914         switch (arg4) {
10915         case 4: arg4 = POSIX_FADV_NOREUSE + 1; break; /* make sure it's an invalid value */
10916         case 5: arg4 = POSIX_FADV_NOREUSE + 2; break; /* ditto */
10917         case 6: arg4 = POSIX_FADV_DONTNEED; break;
10918         case 7: arg4 = POSIX_FADV_NOREUSE; break;
10919         default: break;
10920         }
10921 #endif
10922         return -host_to_target_errno(posix_fadvise(arg1, arg2, arg3, arg4));
10923 #endif
10924 #endif /* end of 64-bit ABI fadvise handling */
10925 
10926 #ifdef TARGET_NR_madvise
10927     case TARGET_NR_madvise:
10928         /* A straight passthrough may not be safe because qemu sometimes
10929            turns private file-backed mappings into anonymous mappings.
10930            This will break MADV_DONTNEED.
10931            This is a hint, so ignoring and returning success is ok.  */
10932         return 0;
10933 #endif
10934 #if TARGET_ABI_BITS == 32
10935     case TARGET_NR_fcntl64:
10936     {
10937 	int cmd;
10938 	struct flock64 fl;
10939         from_flock64_fn *copyfrom = copy_from_user_flock64;
10940         to_flock64_fn *copyto = copy_to_user_flock64;
10941 
10942 #ifdef TARGET_ARM
10943         if (!((CPUARMState *)cpu_env)->eabi) {
10944             copyfrom = copy_from_user_oabi_flock64;
10945             copyto = copy_to_user_oabi_flock64;
10946         }
10947 #endif
10948 
10949 	cmd = target_to_host_fcntl_cmd(arg2);
10950         if (cmd == -TARGET_EINVAL) {
10951             return cmd;
10952         }
10953 
10954         switch(arg2) {
10955         case TARGET_F_GETLK64:
10956             ret = copyfrom(&fl, arg3);
10957             if (ret) {
10958                 break;
10959             }
10960             ret = get_errno(safe_fcntl(arg1, cmd, &fl));
10961             if (ret == 0) {
10962                 ret = copyto(arg3, &fl);
10963             }
10964 	    break;
10965 
10966         case TARGET_F_SETLK64:
10967         case TARGET_F_SETLKW64:
10968             ret = copyfrom(&fl, arg3);
10969             if (ret) {
10970                 break;
10971             }
10972             ret = get_errno(safe_fcntl(arg1, cmd, &fl));
10973 	    break;
10974         default:
10975             ret = do_fcntl(arg1, arg2, arg3);
10976             break;
10977         }
10978         return ret;
10979     }
10980 #endif
10981 #ifdef TARGET_NR_cacheflush
10982     case TARGET_NR_cacheflush:
10983         /* self-modifying code is handled automatically, so nothing needed */
10984         return 0;
10985 #endif
10986 #ifdef TARGET_NR_getpagesize
10987     case TARGET_NR_getpagesize:
10988         return TARGET_PAGE_SIZE;
10989 #endif
10990     case TARGET_NR_gettid:
10991         return get_errno(sys_gettid());
10992 #ifdef TARGET_NR_readahead
10993     case TARGET_NR_readahead:
10994 #if TARGET_ABI_BITS == 32
10995         if (regpairs_aligned(cpu_env, num)) {
10996             arg2 = arg3;
10997             arg3 = arg4;
10998             arg4 = arg5;
10999         }
11000         ret = get_errno(readahead(arg1, target_offset64(arg2, arg3) , arg4));
11001 #else
11002         ret = get_errno(readahead(arg1, arg2, arg3));
11003 #endif
11004         return ret;
11005 #endif
11006 #ifdef CONFIG_ATTR
11007 #ifdef TARGET_NR_setxattr
11008     case TARGET_NR_listxattr:
11009     case TARGET_NR_llistxattr:
11010     {
11011         void *p, *b = 0;
11012         if (arg2) {
11013             b = lock_user(VERIFY_WRITE, arg2, arg3, 0);
11014             if (!b) {
11015                 return -TARGET_EFAULT;
11016             }
11017         }
11018         p = lock_user_string(arg1);
11019         if (p) {
11020             if (num == TARGET_NR_listxattr) {
11021                 ret = get_errno(listxattr(p, b, arg3));
11022             } else {
11023                 ret = get_errno(llistxattr(p, b, arg3));
11024             }
11025         } else {
11026             ret = -TARGET_EFAULT;
11027         }
11028         unlock_user(p, arg1, 0);
11029         unlock_user(b, arg2, arg3);
11030         return ret;
11031     }
11032     case TARGET_NR_flistxattr:
11033     {
11034         void *b = 0;
11035         if (arg2) {
11036             b = lock_user(VERIFY_WRITE, arg2, arg3, 0);
11037             if (!b) {
11038                 return -TARGET_EFAULT;
11039             }
11040         }
11041         ret = get_errno(flistxattr(arg1, b, arg3));
11042         unlock_user(b, arg2, arg3);
11043         return ret;
11044     }
11045     case TARGET_NR_setxattr:
11046     case TARGET_NR_lsetxattr:
11047         {
11048             void *p, *n, *v = 0;
11049             if (arg3) {
11050                 v = lock_user(VERIFY_READ, arg3, arg4, 1);
11051                 if (!v) {
11052                     return -TARGET_EFAULT;
11053                 }
11054             }
11055             p = lock_user_string(arg1);
11056             n = lock_user_string(arg2);
11057             if (p && n) {
11058                 if (num == TARGET_NR_setxattr) {
11059                     ret = get_errno(setxattr(p, n, v, arg4, arg5));
11060                 } else {
11061                     ret = get_errno(lsetxattr(p, n, v, arg4, arg5));
11062                 }
11063             } else {
11064                 ret = -TARGET_EFAULT;
11065             }
11066             unlock_user(p, arg1, 0);
11067             unlock_user(n, arg2, 0);
11068             unlock_user(v, arg3, 0);
11069         }
11070         return ret;
11071     case TARGET_NR_fsetxattr:
11072         {
11073             void *n, *v = 0;
11074             if (arg3) {
11075                 v = lock_user(VERIFY_READ, arg3, arg4, 1);
11076                 if (!v) {
11077                     return -TARGET_EFAULT;
11078                 }
11079             }
11080             n = lock_user_string(arg2);
11081             if (n) {
11082                 ret = get_errno(fsetxattr(arg1, n, v, arg4, arg5));
11083             } else {
11084                 ret = -TARGET_EFAULT;
11085             }
11086             unlock_user(n, arg2, 0);
11087             unlock_user(v, arg3, 0);
11088         }
11089         return ret;
11090     case TARGET_NR_getxattr:
11091     case TARGET_NR_lgetxattr:
11092         {
11093             void *p, *n, *v = 0;
11094             if (arg3) {
11095                 v = lock_user(VERIFY_WRITE, arg3, arg4, 0);
11096                 if (!v) {
11097                     return -TARGET_EFAULT;
11098                 }
11099             }
11100             p = lock_user_string(arg1);
11101             n = lock_user_string(arg2);
11102             if (p && n) {
11103                 if (num == TARGET_NR_getxattr) {
11104                     ret = get_errno(getxattr(p, n, v, arg4));
11105                 } else {
11106                     ret = get_errno(lgetxattr(p, n, v, arg4));
11107                 }
11108             } else {
11109                 ret = -TARGET_EFAULT;
11110             }
11111             unlock_user(p, arg1, 0);
11112             unlock_user(n, arg2, 0);
11113             unlock_user(v, arg3, arg4);
11114         }
11115         return ret;
11116     case TARGET_NR_fgetxattr:
11117         {
11118             void *n, *v = 0;
11119             if (arg3) {
11120                 v = lock_user(VERIFY_WRITE, arg3, arg4, 0);
11121                 if (!v) {
11122                     return -TARGET_EFAULT;
11123                 }
11124             }
11125             n = lock_user_string(arg2);
11126             if (n) {
11127                 ret = get_errno(fgetxattr(arg1, n, v, arg4));
11128             } else {
11129                 ret = -TARGET_EFAULT;
11130             }
11131             unlock_user(n, arg2, 0);
11132             unlock_user(v, arg3, arg4);
11133         }
11134         return ret;
11135     case TARGET_NR_removexattr:
11136     case TARGET_NR_lremovexattr:
11137         {
11138             void *p, *n;
11139             p = lock_user_string(arg1);
11140             n = lock_user_string(arg2);
11141             if (p && n) {
11142                 if (num == TARGET_NR_removexattr) {
11143                     ret = get_errno(removexattr(p, n));
11144                 } else {
11145                     ret = get_errno(lremovexattr(p, n));
11146                 }
11147             } else {
11148                 ret = -TARGET_EFAULT;
11149             }
11150             unlock_user(p, arg1, 0);
11151             unlock_user(n, arg2, 0);
11152         }
11153         return ret;
11154     case TARGET_NR_fremovexattr:
11155         {
11156             void *n;
11157             n = lock_user_string(arg2);
11158             if (n) {
11159                 ret = get_errno(fremovexattr(arg1, n));
11160             } else {
11161                 ret = -TARGET_EFAULT;
11162             }
11163             unlock_user(n, arg2, 0);
11164         }
11165         return ret;
11166 #endif
11167 #endif /* CONFIG_ATTR */
11168 #ifdef TARGET_NR_set_thread_area
11169     case TARGET_NR_set_thread_area:
11170 #if defined(TARGET_MIPS)
11171       ((CPUMIPSState *) cpu_env)->active_tc.CP0_UserLocal = arg1;
11172       return 0;
11173 #elif defined(TARGET_CRIS)
11174       if (arg1 & 0xff)
11175           ret = -TARGET_EINVAL;
11176       else {
11177           ((CPUCRISState *) cpu_env)->pregs[PR_PID] = arg1;
11178           ret = 0;
11179       }
11180       return ret;
11181 #elif defined(TARGET_I386) && defined(TARGET_ABI32)
11182       return do_set_thread_area(cpu_env, arg1);
11183 #elif defined(TARGET_M68K)
11184       {
11185           TaskState *ts = cpu->opaque;
11186           ts->tp_value = arg1;
11187           return 0;
11188       }
11189 #else
11190       return -TARGET_ENOSYS;
11191 #endif
11192 #endif
11193 #ifdef TARGET_NR_get_thread_area
11194     case TARGET_NR_get_thread_area:
11195 #if defined(TARGET_I386) && defined(TARGET_ABI32)
11196         return do_get_thread_area(cpu_env, arg1);
11197 #elif defined(TARGET_M68K)
11198         {
11199             TaskState *ts = cpu->opaque;
11200             return ts->tp_value;
11201         }
11202 #else
11203         return -TARGET_ENOSYS;
11204 #endif
11205 #endif
11206 #ifdef TARGET_NR_getdomainname
11207     case TARGET_NR_getdomainname:
11208         return -TARGET_ENOSYS;
11209 #endif
11210 
11211 #ifdef TARGET_NR_clock_settime
11212     case TARGET_NR_clock_settime:
11213     {
11214         struct timespec ts;
11215 
11216         ret = target_to_host_timespec(&ts, arg2);
11217         if (!is_error(ret)) {
11218             ret = get_errno(clock_settime(arg1, &ts));
11219         }
11220         return ret;
11221     }
11222 #endif
11223 #ifdef TARGET_NR_clock_gettime
11224     case TARGET_NR_clock_gettime:
11225     {
11226         struct timespec ts;
11227         ret = get_errno(clock_gettime(arg1, &ts));
11228         if (!is_error(ret)) {
11229             ret = host_to_target_timespec(arg2, &ts);
11230         }
11231         return ret;
11232     }
11233 #endif
11234 #ifdef TARGET_NR_clock_getres
11235     case TARGET_NR_clock_getres:
11236     {
11237         struct timespec ts;
11238         ret = get_errno(clock_getres(arg1, &ts));
11239         if (!is_error(ret)) {
11240             host_to_target_timespec(arg2, &ts);
11241         }
11242         return ret;
11243     }
11244 #endif
11245 #ifdef TARGET_NR_clock_nanosleep
11246     case TARGET_NR_clock_nanosleep:
11247     {
11248         struct timespec ts;
11249         target_to_host_timespec(&ts, arg3);
11250         ret = get_errno(safe_clock_nanosleep(arg1, arg2,
11251                                              &ts, arg4 ? &ts : NULL));
11252         if (arg4)
11253             host_to_target_timespec(arg4, &ts);
11254 
11255 #if defined(TARGET_PPC)
11256         /* clock_nanosleep is odd in that it returns positive errno values.
11257          * On PPC, CR0 bit 3 should be set in such a situation. */
11258         if (ret && ret != -TARGET_ERESTARTSYS) {
11259             ((CPUPPCState *)cpu_env)->crf[0] |= 1;
11260         }
11261 #endif
11262         return ret;
11263     }
11264 #endif
11265 
11266 #if defined(TARGET_NR_set_tid_address) && defined(__NR_set_tid_address)
11267     case TARGET_NR_set_tid_address:
11268         return get_errno(set_tid_address((int *)g2h(arg1)));
11269 #endif
11270 
11271     case TARGET_NR_tkill:
11272         return get_errno(safe_tkill((int)arg1, target_to_host_signal(arg2)));
11273 
11274     case TARGET_NR_tgkill:
11275         return get_errno(safe_tgkill((int)arg1, (int)arg2,
11276                          target_to_host_signal(arg3)));
11277 
11278 #ifdef TARGET_NR_set_robust_list
11279     case TARGET_NR_set_robust_list:
11280     case TARGET_NR_get_robust_list:
11281         /* The ABI for supporting robust futexes has userspace pass
11282          * the kernel a pointer to a linked list which is updated by
11283          * userspace after the syscall; the list is walked by the kernel
11284          * when the thread exits. Since the linked list in QEMU guest
11285          * memory isn't a valid linked list for the host and we have
11286          * no way to reliably intercept the thread-death event, we can't
11287          * support these. Silently return ENOSYS so that guest userspace
11288          * falls back to a non-robust futex implementation (which should
11289          * be OK except in the corner case of the guest crashing while
11290          * holding a mutex that is shared with another process via
11291          * shared memory).
11292          */
11293         return -TARGET_ENOSYS;
11294 #endif
11295 
11296 #if defined(TARGET_NR_utimensat)
11297     case TARGET_NR_utimensat:
11298         {
11299             struct timespec *tsp, ts[2];
11300             if (!arg3) {
11301                 tsp = NULL;
11302             } else {
11303                 target_to_host_timespec(ts, arg3);
11304                 target_to_host_timespec(ts+1, arg3+sizeof(struct target_timespec));
11305                 tsp = ts;
11306             }
11307             if (!arg2)
11308                 ret = get_errno(sys_utimensat(arg1, NULL, tsp, arg4));
11309             else {
11310                 if (!(p = lock_user_string(arg2))) {
11311                     return -TARGET_EFAULT;
11312                 }
11313                 ret = get_errno(sys_utimensat(arg1, path(p), tsp, arg4));
11314                 unlock_user(p, arg2, 0);
11315             }
11316         }
11317         return ret;
11318 #endif
11319     case TARGET_NR_futex:
11320         return do_futex(arg1, arg2, arg3, arg4, arg5, arg6);
11321 #if defined(TARGET_NR_inotify_init) && defined(__NR_inotify_init)
11322     case TARGET_NR_inotify_init:
11323         ret = get_errno(sys_inotify_init());
11324         if (ret >= 0) {
11325             fd_trans_register(ret, &target_inotify_trans);
11326         }
11327         return ret;
11328 #endif
11329 #ifdef CONFIG_INOTIFY1
11330 #if defined(TARGET_NR_inotify_init1) && defined(__NR_inotify_init1)
11331     case TARGET_NR_inotify_init1:
11332         ret = get_errno(sys_inotify_init1(target_to_host_bitmask(arg1,
11333                                           fcntl_flags_tbl)));
11334         if (ret >= 0) {
11335             fd_trans_register(ret, &target_inotify_trans);
11336         }
11337         return ret;
11338 #endif
11339 #endif
11340 #if defined(TARGET_NR_inotify_add_watch) && defined(__NR_inotify_add_watch)
11341     case TARGET_NR_inotify_add_watch:
11342         p = lock_user_string(arg2);
11343         ret = get_errno(sys_inotify_add_watch(arg1, path(p), arg3));
11344         unlock_user(p, arg2, 0);
11345         return ret;
11346 #endif
11347 #if defined(TARGET_NR_inotify_rm_watch) && defined(__NR_inotify_rm_watch)
11348     case TARGET_NR_inotify_rm_watch:
11349         return get_errno(sys_inotify_rm_watch(arg1, arg2));
11350 #endif
11351 
11352 #if defined(TARGET_NR_mq_open) && defined(__NR_mq_open)
11353     case TARGET_NR_mq_open:
11354         {
11355             struct mq_attr posix_mq_attr;
11356             struct mq_attr *pposix_mq_attr;
11357             int host_flags;
11358 
11359             host_flags = target_to_host_bitmask(arg2, fcntl_flags_tbl);
11360             pposix_mq_attr = NULL;
11361             if (arg4) {
11362                 if (copy_from_user_mq_attr(&posix_mq_attr, arg4) != 0) {
11363                     return -TARGET_EFAULT;
11364                 }
11365                 pposix_mq_attr = &posix_mq_attr;
11366             }
11367             p = lock_user_string(arg1 - 1);
11368             if (!p) {
11369                 return -TARGET_EFAULT;
11370             }
11371             ret = get_errno(mq_open(p, host_flags, arg3, pposix_mq_attr));
11372             unlock_user (p, arg1, 0);
11373         }
11374         return ret;
11375 
11376     case TARGET_NR_mq_unlink:
11377         p = lock_user_string(arg1 - 1);
11378         if (!p) {
11379             return -TARGET_EFAULT;
11380         }
11381         ret = get_errno(mq_unlink(p));
11382         unlock_user (p, arg1, 0);
11383         return ret;
11384 
11385     case TARGET_NR_mq_timedsend:
11386         {
11387             struct timespec ts;
11388 
11389             p = lock_user (VERIFY_READ, arg2, arg3, 1);
11390             if (arg5 != 0) {
11391                 target_to_host_timespec(&ts, arg5);
11392                 ret = get_errno(safe_mq_timedsend(arg1, p, arg3, arg4, &ts));
11393                 host_to_target_timespec(arg5, &ts);
11394             } else {
11395                 ret = get_errno(safe_mq_timedsend(arg1, p, arg3, arg4, NULL));
11396             }
11397             unlock_user (p, arg2, arg3);
11398         }
11399         return ret;
11400 
11401     case TARGET_NR_mq_timedreceive:
11402         {
11403             struct timespec ts;
11404             unsigned int prio;
11405 
11406             p = lock_user (VERIFY_READ, arg2, arg3, 1);
11407             if (arg5 != 0) {
11408                 target_to_host_timespec(&ts, arg5);
11409                 ret = get_errno(safe_mq_timedreceive(arg1, p, arg3,
11410                                                      &prio, &ts));
11411                 host_to_target_timespec(arg5, &ts);
11412             } else {
11413                 ret = get_errno(safe_mq_timedreceive(arg1, p, arg3,
11414                                                      &prio, NULL));
11415             }
11416             unlock_user (p, arg2, arg3);
11417             if (arg4 != 0)
11418                 put_user_u32(prio, arg4);
11419         }
11420         return ret;
11421 
11422     /* Not implemented for now... */
11423 /*     case TARGET_NR_mq_notify: */
11424 /*         break; */
11425 
11426     case TARGET_NR_mq_getsetattr:
11427         {
11428             struct mq_attr posix_mq_attr_in, posix_mq_attr_out;
11429             ret = 0;
11430             if (arg2 != 0) {
11431                 copy_from_user_mq_attr(&posix_mq_attr_in, arg2);
11432                 ret = get_errno(mq_setattr(arg1, &posix_mq_attr_in,
11433                                            &posix_mq_attr_out));
11434             } else if (arg3 != 0) {
11435                 ret = get_errno(mq_getattr(arg1, &posix_mq_attr_out));
11436             }
11437             if (ret == 0 && arg3 != 0) {
11438                 copy_to_user_mq_attr(arg3, &posix_mq_attr_out);
11439             }
11440         }
11441         return ret;
11442 #endif
11443 
11444 #ifdef CONFIG_SPLICE
11445 #ifdef TARGET_NR_tee
11446     case TARGET_NR_tee:
11447         {
11448             ret = get_errno(tee(arg1,arg2,arg3,arg4));
11449         }
11450         return ret;
11451 #endif
11452 #ifdef TARGET_NR_splice
11453     case TARGET_NR_splice:
11454         {
11455             loff_t loff_in, loff_out;
11456             loff_t *ploff_in = NULL, *ploff_out = NULL;
11457             if (arg2) {
11458                 if (get_user_u64(loff_in, arg2)) {
11459                     return -TARGET_EFAULT;
11460                 }
11461                 ploff_in = &loff_in;
11462             }
11463             if (arg4) {
11464                 if (get_user_u64(loff_out, arg4)) {
11465                     return -TARGET_EFAULT;
11466                 }
11467                 ploff_out = &loff_out;
11468             }
11469             ret = get_errno(splice(arg1, ploff_in, arg3, ploff_out, arg5, arg6));
11470             if (arg2) {
11471                 if (put_user_u64(loff_in, arg2)) {
11472                     return -TARGET_EFAULT;
11473                 }
11474             }
11475             if (arg4) {
11476                 if (put_user_u64(loff_out, arg4)) {
11477                     return -TARGET_EFAULT;
11478                 }
11479             }
11480         }
11481         return ret;
11482 #endif
11483 #ifdef TARGET_NR_vmsplice
11484 	case TARGET_NR_vmsplice:
11485         {
11486             struct iovec *vec = lock_iovec(VERIFY_READ, arg2, arg3, 1);
11487             if (vec != NULL) {
11488                 ret = get_errno(vmsplice(arg1, vec, arg3, arg4));
11489                 unlock_iovec(vec, arg2, arg3, 0);
11490             } else {
11491                 ret = -host_to_target_errno(errno);
11492             }
11493         }
11494         return ret;
11495 #endif
11496 #endif /* CONFIG_SPLICE */
11497 #ifdef CONFIG_EVENTFD
11498 #if defined(TARGET_NR_eventfd)
11499     case TARGET_NR_eventfd:
11500         ret = get_errno(eventfd(arg1, 0));
11501         if (ret >= 0) {
11502             fd_trans_register(ret, &target_eventfd_trans);
11503         }
11504         return ret;
11505 #endif
11506 #if defined(TARGET_NR_eventfd2)
11507     case TARGET_NR_eventfd2:
11508     {
11509         int host_flags = arg2 & (~(TARGET_O_NONBLOCK | TARGET_O_CLOEXEC));
11510         if (arg2 & TARGET_O_NONBLOCK) {
11511             host_flags |= O_NONBLOCK;
11512         }
11513         if (arg2 & TARGET_O_CLOEXEC) {
11514             host_flags |= O_CLOEXEC;
11515         }
11516         ret = get_errno(eventfd(arg1, host_flags));
11517         if (ret >= 0) {
11518             fd_trans_register(ret, &target_eventfd_trans);
11519         }
11520         return ret;
11521     }
11522 #endif
11523 #endif /* CONFIG_EVENTFD  */
11524 #if defined(CONFIG_FALLOCATE) && defined(TARGET_NR_fallocate)
11525     case TARGET_NR_fallocate:
11526 #if TARGET_ABI_BITS == 32
11527         ret = get_errno(fallocate(arg1, arg2, target_offset64(arg3, arg4),
11528                                   target_offset64(arg5, arg6)));
11529 #else
11530         ret = get_errno(fallocate(arg1, arg2, arg3, arg4));
11531 #endif
11532         return ret;
11533 #endif
11534 #if defined(CONFIG_SYNC_FILE_RANGE)
11535 #if defined(TARGET_NR_sync_file_range)
11536     case TARGET_NR_sync_file_range:
11537 #if TARGET_ABI_BITS == 32
11538 #if defined(TARGET_MIPS)
11539         ret = get_errno(sync_file_range(arg1, target_offset64(arg3, arg4),
11540                                         target_offset64(arg5, arg6), arg7));
11541 #else
11542         ret = get_errno(sync_file_range(arg1, target_offset64(arg2, arg3),
11543                                         target_offset64(arg4, arg5), arg6));
11544 #endif /* !TARGET_MIPS */
11545 #else
11546         ret = get_errno(sync_file_range(arg1, arg2, arg3, arg4));
11547 #endif
11548         return ret;
11549 #endif
11550 #if defined(TARGET_NR_sync_file_range2)
11551     case TARGET_NR_sync_file_range2:
11552         /* This is like sync_file_range but the arguments are reordered */
11553 #if TARGET_ABI_BITS == 32
11554         ret = get_errno(sync_file_range(arg1, target_offset64(arg3, arg4),
11555                                         target_offset64(arg5, arg6), arg2));
11556 #else
11557         ret = get_errno(sync_file_range(arg1, arg3, arg4, arg2));
11558 #endif
11559         return ret;
11560 #endif
11561 #endif
11562 #if defined(TARGET_NR_signalfd4)
11563     case TARGET_NR_signalfd4:
11564         return do_signalfd4(arg1, arg2, arg4);
11565 #endif
11566 #if defined(TARGET_NR_signalfd)
11567     case TARGET_NR_signalfd:
11568         return do_signalfd4(arg1, arg2, 0);
11569 #endif
11570 #if defined(CONFIG_EPOLL)
11571 #if defined(TARGET_NR_epoll_create)
11572     case TARGET_NR_epoll_create:
11573         return get_errno(epoll_create(arg1));
11574 #endif
11575 #if defined(TARGET_NR_epoll_create1) && defined(CONFIG_EPOLL_CREATE1)
11576     case TARGET_NR_epoll_create1:
11577         return get_errno(epoll_create1(arg1));
11578 #endif
11579 #if defined(TARGET_NR_epoll_ctl)
11580     case TARGET_NR_epoll_ctl:
11581     {
11582         struct epoll_event ep;
11583         struct epoll_event *epp = 0;
11584         if (arg4) {
11585             struct target_epoll_event *target_ep;
11586             if (!lock_user_struct(VERIFY_READ, target_ep, arg4, 1)) {
11587                 return -TARGET_EFAULT;
11588             }
11589             ep.events = tswap32(target_ep->events);
11590             /* The epoll_data_t union is just opaque data to the kernel,
11591              * so we transfer all 64 bits across and need not worry what
11592              * actual data type it is.
11593              */
11594             ep.data.u64 = tswap64(target_ep->data.u64);
11595             unlock_user_struct(target_ep, arg4, 0);
11596             epp = &ep;
11597         }
11598         return get_errno(epoll_ctl(arg1, arg2, arg3, epp));
11599     }
11600 #endif
11601 
11602 #if defined(TARGET_NR_epoll_wait) || defined(TARGET_NR_epoll_pwait)
11603 #if defined(TARGET_NR_epoll_wait)
11604     case TARGET_NR_epoll_wait:
11605 #endif
11606 #if defined(TARGET_NR_epoll_pwait)
11607     case TARGET_NR_epoll_pwait:
11608 #endif
11609     {
11610         struct target_epoll_event *target_ep;
11611         struct epoll_event *ep;
11612         int epfd = arg1;
11613         int maxevents = arg3;
11614         int timeout = arg4;
11615 
11616         if (maxevents <= 0 || maxevents > TARGET_EP_MAX_EVENTS) {
11617             return -TARGET_EINVAL;
11618         }
11619 
11620         target_ep = lock_user(VERIFY_WRITE, arg2,
11621                               maxevents * sizeof(struct target_epoll_event), 1);
11622         if (!target_ep) {
11623             return -TARGET_EFAULT;
11624         }
11625 
11626         ep = g_try_new(struct epoll_event, maxevents);
11627         if (!ep) {
11628             unlock_user(target_ep, arg2, 0);
11629             return -TARGET_ENOMEM;
11630         }
11631 
11632         switch (num) {
11633 #if defined(TARGET_NR_epoll_pwait)
11634         case TARGET_NR_epoll_pwait:
11635         {
11636             target_sigset_t *target_set;
11637             sigset_t _set, *set = &_set;
11638 
11639             if (arg5) {
11640                 if (arg6 != sizeof(target_sigset_t)) {
11641                     ret = -TARGET_EINVAL;
11642                     break;
11643                 }
11644 
11645                 target_set = lock_user(VERIFY_READ, arg5,
11646                                        sizeof(target_sigset_t), 1);
11647                 if (!target_set) {
11648                     ret = -TARGET_EFAULT;
11649                     break;
11650                 }
11651                 target_to_host_sigset(set, target_set);
11652                 unlock_user(target_set, arg5, 0);
11653             } else {
11654                 set = NULL;
11655             }
11656 
11657             ret = get_errno(safe_epoll_pwait(epfd, ep, maxevents, timeout,
11658                                              set, SIGSET_T_SIZE));
11659             break;
11660         }
11661 #endif
11662 #if defined(TARGET_NR_epoll_wait)
11663         case TARGET_NR_epoll_wait:
11664             ret = get_errno(safe_epoll_pwait(epfd, ep, maxevents, timeout,
11665                                              NULL, 0));
11666             break;
11667 #endif
11668         default:
11669             ret = -TARGET_ENOSYS;
11670         }
11671         if (!is_error(ret)) {
11672             int i;
11673             for (i = 0; i < ret; i++) {
11674                 target_ep[i].events = tswap32(ep[i].events);
11675                 target_ep[i].data.u64 = tswap64(ep[i].data.u64);
11676             }
11677             unlock_user(target_ep, arg2,
11678                         ret * sizeof(struct target_epoll_event));
11679         } else {
11680             unlock_user(target_ep, arg2, 0);
11681         }
11682         g_free(ep);
11683         return ret;
11684     }
11685 #endif
11686 #endif
11687 #ifdef TARGET_NR_prlimit64
11688     case TARGET_NR_prlimit64:
11689     {
11690         /* args: pid, resource number, ptr to new rlimit, ptr to old rlimit */
11691         struct target_rlimit64 *target_rnew, *target_rold;
11692         struct host_rlimit64 rnew, rold, *rnewp = 0;
11693         int resource = target_to_host_resource(arg2);
11694         if (arg3) {
11695             if (!lock_user_struct(VERIFY_READ, target_rnew, arg3, 1)) {
11696                 return -TARGET_EFAULT;
11697             }
11698             rnew.rlim_cur = tswap64(target_rnew->rlim_cur);
11699             rnew.rlim_max = tswap64(target_rnew->rlim_max);
11700             unlock_user_struct(target_rnew, arg3, 0);
11701             rnewp = &rnew;
11702         }
11703 
11704         ret = get_errno(sys_prlimit64(arg1, resource, rnewp, arg4 ? &rold : 0));
11705         if (!is_error(ret) && arg4) {
11706             if (!lock_user_struct(VERIFY_WRITE, target_rold, arg4, 1)) {
11707                 return -TARGET_EFAULT;
11708             }
11709             target_rold->rlim_cur = tswap64(rold.rlim_cur);
11710             target_rold->rlim_max = tswap64(rold.rlim_max);
11711             unlock_user_struct(target_rold, arg4, 1);
11712         }
11713         return ret;
11714     }
11715 #endif
11716 #ifdef TARGET_NR_gethostname
11717     case TARGET_NR_gethostname:
11718     {
11719         char *name = lock_user(VERIFY_WRITE, arg1, arg2, 0);
11720         if (name) {
11721             ret = get_errno(gethostname(name, arg2));
11722             unlock_user(name, arg1, arg2);
11723         } else {
11724             ret = -TARGET_EFAULT;
11725         }
11726         return ret;
11727     }
11728 #endif
11729 #ifdef TARGET_NR_atomic_cmpxchg_32
11730     case TARGET_NR_atomic_cmpxchg_32:
11731     {
11732         /* should use start_exclusive from main.c */
11733         abi_ulong mem_value;
11734         if (get_user_u32(mem_value, arg6)) {
11735             target_siginfo_t info;
11736             info.si_signo = SIGSEGV;
11737             info.si_errno = 0;
11738             info.si_code = TARGET_SEGV_MAPERR;
11739             info._sifields._sigfault._addr = arg6;
11740             queue_signal((CPUArchState *)cpu_env, info.si_signo,
11741                          QEMU_SI_FAULT, &info);
11742             ret = 0xdeadbeef;
11743 
11744         }
11745         if (mem_value == arg2)
11746             put_user_u32(arg1, arg6);
11747         return mem_value;
11748     }
11749 #endif
11750 #ifdef TARGET_NR_atomic_barrier
11751     case TARGET_NR_atomic_barrier:
11752         /* Like the kernel implementation and the
11753            qemu arm barrier, no-op this? */
11754         return 0;
11755 #endif
11756 
11757 #ifdef TARGET_NR_timer_create
11758     case TARGET_NR_timer_create:
11759     {
11760         /* args: clockid_t clockid, struct sigevent *sevp, timer_t *timerid */
11761 
11762         struct sigevent host_sevp = { {0}, }, *phost_sevp = NULL;
11763 
11764         int clkid = arg1;
11765         int timer_index = next_free_host_timer();
11766 
11767         if (timer_index < 0) {
11768             ret = -TARGET_EAGAIN;
11769         } else {
11770             timer_t *phtimer = g_posix_timers  + timer_index;
11771 
11772             if (arg2) {
11773                 phost_sevp = &host_sevp;
11774                 ret = target_to_host_sigevent(phost_sevp, arg2);
11775                 if (ret != 0) {
11776                     return ret;
11777                 }
11778             }
11779 
11780             ret = get_errno(timer_create(clkid, phost_sevp, phtimer));
11781             if (ret) {
11782                 phtimer = NULL;
11783             } else {
11784                 if (put_user(TIMER_MAGIC | timer_index, arg3, target_timer_t)) {
11785                     return -TARGET_EFAULT;
11786                 }
11787             }
11788         }
11789         return ret;
11790     }
11791 #endif
11792 
11793 #ifdef TARGET_NR_timer_settime
11794     case TARGET_NR_timer_settime:
11795     {
11796         /* args: timer_t timerid, int flags, const struct itimerspec *new_value,
11797          * struct itimerspec * old_value */
11798         target_timer_t timerid = get_timer_id(arg1);
11799 
11800         if (timerid < 0) {
11801             ret = timerid;
11802         } else if (arg3 == 0) {
11803             ret = -TARGET_EINVAL;
11804         } else {
11805             timer_t htimer = g_posix_timers[timerid];
11806             struct itimerspec hspec_new = {{0},}, hspec_old = {{0},};
11807 
11808             if (target_to_host_itimerspec(&hspec_new, arg3)) {
11809                 return -TARGET_EFAULT;
11810             }
11811             ret = get_errno(
11812                           timer_settime(htimer, arg2, &hspec_new, &hspec_old));
11813             if (arg4 && host_to_target_itimerspec(arg4, &hspec_old)) {
11814                 return -TARGET_EFAULT;
11815             }
11816         }
11817         return ret;
11818     }
11819 #endif
11820 
11821 #ifdef TARGET_NR_timer_gettime
11822     case TARGET_NR_timer_gettime:
11823     {
11824         /* args: timer_t timerid, struct itimerspec *curr_value */
11825         target_timer_t timerid = get_timer_id(arg1);
11826 
11827         if (timerid < 0) {
11828             ret = timerid;
11829         } else if (!arg2) {
11830             ret = -TARGET_EFAULT;
11831         } else {
11832             timer_t htimer = g_posix_timers[timerid];
11833             struct itimerspec hspec;
11834             ret = get_errno(timer_gettime(htimer, &hspec));
11835 
11836             if (host_to_target_itimerspec(arg2, &hspec)) {
11837                 ret = -TARGET_EFAULT;
11838             }
11839         }
11840         return ret;
11841     }
11842 #endif
11843 
11844 #ifdef TARGET_NR_timer_getoverrun
11845     case TARGET_NR_timer_getoverrun:
11846     {
11847         /* args: timer_t timerid */
11848         target_timer_t timerid = get_timer_id(arg1);
11849 
11850         if (timerid < 0) {
11851             ret = timerid;
11852         } else {
11853             timer_t htimer = g_posix_timers[timerid];
11854             ret = get_errno(timer_getoverrun(htimer));
11855         }
11856         return ret;
11857     }
11858 #endif
11859 
11860 #ifdef TARGET_NR_timer_delete
11861     case TARGET_NR_timer_delete:
11862     {
11863         /* args: timer_t timerid */
11864         target_timer_t timerid = get_timer_id(arg1);
11865 
11866         if (timerid < 0) {
11867             ret = timerid;
11868         } else {
11869             timer_t htimer = g_posix_timers[timerid];
11870             ret = get_errno(timer_delete(htimer));
11871             g_posix_timers[timerid] = 0;
11872         }
11873         return ret;
11874     }
11875 #endif
11876 
11877 #if defined(TARGET_NR_timerfd_create) && defined(CONFIG_TIMERFD)
11878     case TARGET_NR_timerfd_create:
11879         return get_errno(timerfd_create(arg1,
11880                           target_to_host_bitmask(arg2, fcntl_flags_tbl)));
11881 #endif
11882 
11883 #if defined(TARGET_NR_timerfd_gettime) && defined(CONFIG_TIMERFD)
11884     case TARGET_NR_timerfd_gettime:
11885         {
11886             struct itimerspec its_curr;
11887 
11888             ret = get_errno(timerfd_gettime(arg1, &its_curr));
11889 
11890             if (arg2 && host_to_target_itimerspec(arg2, &its_curr)) {
11891                 return -TARGET_EFAULT;
11892             }
11893         }
11894         return ret;
11895 #endif
11896 
11897 #if defined(TARGET_NR_timerfd_settime) && defined(CONFIG_TIMERFD)
11898     case TARGET_NR_timerfd_settime:
11899         {
11900             struct itimerspec its_new, its_old, *p_new;
11901 
11902             if (arg3) {
11903                 if (target_to_host_itimerspec(&its_new, arg3)) {
11904                     return -TARGET_EFAULT;
11905                 }
11906                 p_new = &its_new;
11907             } else {
11908                 p_new = NULL;
11909             }
11910 
11911             ret = get_errno(timerfd_settime(arg1, arg2, p_new, &its_old));
11912 
11913             if (arg4 && host_to_target_itimerspec(arg4, &its_old)) {
11914                 return -TARGET_EFAULT;
11915             }
11916         }
11917         return ret;
11918 #endif
11919 
11920 #if defined(TARGET_NR_ioprio_get) && defined(__NR_ioprio_get)
11921     case TARGET_NR_ioprio_get:
11922         return get_errno(ioprio_get(arg1, arg2));
11923 #endif
11924 
11925 #if defined(TARGET_NR_ioprio_set) && defined(__NR_ioprio_set)
11926     case TARGET_NR_ioprio_set:
11927         return get_errno(ioprio_set(arg1, arg2, arg3));
11928 #endif
11929 
11930 #if defined(TARGET_NR_setns) && defined(CONFIG_SETNS)
11931     case TARGET_NR_setns:
11932         return get_errno(setns(arg1, arg2));
11933 #endif
11934 #if defined(TARGET_NR_unshare) && defined(CONFIG_SETNS)
11935     case TARGET_NR_unshare:
11936         return get_errno(unshare(arg1));
11937 #endif
11938 #if defined(TARGET_NR_kcmp) && defined(__NR_kcmp)
11939     case TARGET_NR_kcmp:
11940         return get_errno(kcmp(arg1, arg2, arg3, arg4, arg5));
11941 #endif
11942 #ifdef TARGET_NR_swapcontext
11943     case TARGET_NR_swapcontext:
11944         /* PowerPC specific.  */
11945         return do_swapcontext(cpu_env, arg1, arg2, arg3);
11946 #endif
11947 #ifdef TARGET_NR_memfd_create
11948     case TARGET_NR_memfd_create:
11949         p = lock_user_string(arg1);
11950         if (!p) {
11951             return -TARGET_EFAULT;
11952         }
11953         ret = get_errno(memfd_create(p, arg2));
11954         fd_trans_unregister(ret);
11955         unlock_user(p, arg1, 0);
11956         return ret;
11957 #endif
11958 
11959     default:
11960         qemu_log_mask(LOG_UNIMP, "Unsupported syscall: %d\n", num);
11961         return -TARGET_ENOSYS;
11962     }
11963     return ret;
11964 }
11965 
11966 abi_long do_syscall(void *cpu_env, int num, abi_long arg1,
11967                     abi_long arg2, abi_long arg3, abi_long arg4,
11968                     abi_long arg5, abi_long arg6, abi_long arg7,
11969                     abi_long arg8)
11970 {
11971     CPUState *cpu = env_cpu(cpu_env);
11972     abi_long ret;
11973 
11974 #ifdef DEBUG_ERESTARTSYS
11975     /* Debug-only code for exercising the syscall-restart code paths
11976      * in the per-architecture cpu main loops: restart every syscall
11977      * the guest makes once before letting it through.
11978      */
11979     {
11980         static bool flag;
11981         flag = !flag;
11982         if (flag) {
11983             return -TARGET_ERESTARTSYS;
11984         }
11985     }
11986 #endif
11987 
11988     record_syscall_start(cpu, num, arg1,
11989                          arg2, arg3, arg4, arg5, arg6, arg7, arg8);
11990 
11991     if (unlikely(do_strace)) {
11992         print_syscall(num, arg1, arg2, arg3, arg4, arg5, arg6);
11993         ret = do_syscall1(cpu_env, num, arg1, arg2, arg3, arg4,
11994                           arg5, arg6, arg7, arg8);
11995         print_syscall_ret(num, ret);
11996     } else {
11997         ret = do_syscall1(cpu_env, num, arg1, arg2, arg3, arg4,
11998                           arg5, arg6, arg7, arg8);
11999     }
12000 
12001     record_syscall_return(cpu, num, ret);
12002     return ret;
12003 }
12004