xref: /openbmc/qemu/linux-user/syscall.c (revision e6e03dcf)
1 /*
2  *  Linux syscalls
3  *
4  *  Copyright (c) 2003 Fabrice Bellard
5  *
6  *  This program is free software; you can redistribute it and/or modify
7  *  it under the terms of the GNU General Public License as published by
8  *  the Free Software Foundation; either version 2 of the License, or
9  *  (at your option) any later version.
10  *
11  *  This program is distributed in the hope that it will be useful,
12  *  but WITHOUT ANY WARRANTY; without even the implied warranty of
13  *  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
14  *  GNU General Public License for more details.
15  *
16  *  You should have received a copy of the GNU General Public License
17  *  along with this program; if not, see <http://www.gnu.org/licenses/>.
18  */
19 #define _ATFILE_SOURCE
20 #include "qemu/osdep.h"
21 #include "qemu/cutils.h"
22 #include "qemu/path.h"
23 #include "qemu/memfd.h"
24 #include "qemu/queue.h"
25 #include <elf.h>
26 #include <endian.h>
27 #include <grp.h>
28 #include <sys/ipc.h>
29 #include <sys/msg.h>
30 #include <sys/wait.h>
31 #include <sys/mount.h>
32 #include <sys/file.h>
33 #include <sys/fsuid.h>
34 #include <sys/personality.h>
35 #include <sys/prctl.h>
36 #include <sys/resource.h>
37 #include <sys/swap.h>
38 #include <linux/capability.h>
39 #include <sched.h>
40 #include <sys/timex.h>
41 #include <sys/socket.h>
42 #include <linux/sockios.h>
43 #include <sys/un.h>
44 #include <sys/uio.h>
45 #include <poll.h>
46 #include <sys/times.h>
47 #include <sys/shm.h>
48 #include <sys/sem.h>
49 #include <sys/statfs.h>
50 #include <utime.h>
51 #include <sys/sysinfo.h>
52 #include <sys/signalfd.h>
53 //#include <sys/user.h>
54 #include <netinet/ip.h>
55 #include <netinet/tcp.h>
56 #include <linux/wireless.h>
57 #include <linux/icmp.h>
58 #include <linux/icmpv6.h>
59 #include <linux/errqueue.h>
60 #include <linux/random.h>
61 #ifdef CONFIG_TIMERFD
62 #include <sys/timerfd.h>
63 #endif
64 #ifdef CONFIG_EVENTFD
65 #include <sys/eventfd.h>
66 #endif
67 #ifdef CONFIG_EPOLL
68 #include <sys/epoll.h>
69 #endif
70 #ifdef CONFIG_ATTR
71 #include "qemu/xattr.h"
72 #endif
73 #ifdef CONFIG_SENDFILE
74 #include <sys/sendfile.h>
75 #endif
76 
77 #define termios host_termios
78 #define winsize host_winsize
79 #define termio host_termio
80 #define sgttyb host_sgttyb /* same as target */
81 #define tchars host_tchars /* same as target */
82 #define ltchars host_ltchars /* same as target */
83 
84 #include <linux/termios.h>
85 #include <linux/unistd.h>
86 #include <linux/cdrom.h>
87 #include <linux/hdreg.h>
88 #include <linux/soundcard.h>
89 #include <linux/kd.h>
90 #include <linux/mtio.h>
91 #include <linux/fs.h>
92 #include <linux/fd.h>
93 #if defined(CONFIG_FIEMAP)
94 #include <linux/fiemap.h>
95 #endif
96 #include <linux/fb.h>
97 #if defined(CONFIG_USBFS)
98 #include <linux/usbdevice_fs.h>
99 #include <linux/usb/ch9.h>
100 #endif
101 #include <linux/vt.h>
102 #include <linux/dm-ioctl.h>
103 #include <linux/reboot.h>
104 #include <linux/route.h>
105 #include <linux/filter.h>
106 #include <linux/blkpg.h>
107 #include <netpacket/packet.h>
108 #include <linux/netlink.h>
109 #include <linux/if_alg.h>
110 #include "linux_loop.h"
111 #include "uname.h"
112 
113 #include "qemu.h"
114 #include "qemu/guest-random.h"
115 #include "qapi/error.h"
116 #include "fd-trans.h"
117 
118 #ifndef CLONE_IO
119 #define CLONE_IO                0x80000000      /* Clone io context */
120 #endif
121 
122 /* We can't directly call the host clone syscall, because this will
123  * badly confuse libc (breaking mutexes, for example). So we must
124  * divide clone flags into:
125  *  * flag combinations that look like pthread_create()
126  *  * flag combinations that look like fork()
127  *  * flags we can implement within QEMU itself
128  *  * flags we can't support and will return an error for
129  */
130 /* For thread creation, all these flags must be present; for
131  * fork, none must be present.
132  */
133 #define CLONE_THREAD_FLAGS                              \
134     (CLONE_VM | CLONE_FS | CLONE_FILES |                \
135      CLONE_SIGHAND | CLONE_THREAD | CLONE_SYSVSEM)
136 
137 /* These flags are ignored:
138  * CLONE_DETACHED is now ignored by the kernel;
139  * CLONE_IO is just an optimisation hint to the I/O scheduler
140  */
141 #define CLONE_IGNORED_FLAGS                     \
142     (CLONE_DETACHED | CLONE_IO)
143 
144 /* Flags for fork which we can implement within QEMU itself */
145 #define CLONE_OPTIONAL_FORK_FLAGS               \
146     (CLONE_SETTLS | CLONE_PARENT_SETTID |       \
147      CLONE_CHILD_CLEARTID | CLONE_CHILD_SETTID)
148 
149 /* Flags for thread creation which we can implement within QEMU itself */
150 #define CLONE_OPTIONAL_THREAD_FLAGS                             \
151     (CLONE_SETTLS | CLONE_PARENT_SETTID |                       \
152      CLONE_CHILD_CLEARTID | CLONE_CHILD_SETTID | CLONE_PARENT)
153 
154 #define CLONE_INVALID_FORK_FLAGS                                        \
155     (~(CSIGNAL | CLONE_OPTIONAL_FORK_FLAGS | CLONE_IGNORED_FLAGS))
156 
157 #define CLONE_INVALID_THREAD_FLAGS                                      \
158     (~(CSIGNAL | CLONE_THREAD_FLAGS | CLONE_OPTIONAL_THREAD_FLAGS |     \
159        CLONE_IGNORED_FLAGS))
160 
161 /* CLONE_VFORK is special cased early in do_fork(). The other flag bits
162  * have almost all been allocated. We cannot support any of
163  * CLONE_NEWNS, CLONE_NEWCGROUP, CLONE_NEWUTS, CLONE_NEWIPC,
164  * CLONE_NEWUSER, CLONE_NEWPID, CLONE_NEWNET, CLONE_PTRACE, CLONE_UNTRACED.
165  * The checks against the invalid thread masks above will catch these.
166  * (The one remaining unallocated bit is 0x1000 which used to be CLONE_PID.)
167  */
168 
169 /* Define DEBUG_ERESTARTSYS to force every syscall to be restarted
170  * once. This exercises the codepaths for restart.
171  */
172 //#define DEBUG_ERESTARTSYS
173 
174 //#include <linux/msdos_fs.h>
175 #define	VFAT_IOCTL_READDIR_BOTH		_IOR('r', 1, struct linux_dirent [2])
176 #define	VFAT_IOCTL_READDIR_SHORT	_IOR('r', 2, struct linux_dirent [2])
177 
178 #undef _syscall0
179 #undef _syscall1
180 #undef _syscall2
181 #undef _syscall3
182 #undef _syscall4
183 #undef _syscall5
184 #undef _syscall6
185 
186 #define _syscall0(type,name)		\
187 static type name (void)			\
188 {					\
189 	return syscall(__NR_##name);	\
190 }
191 
192 #define _syscall1(type,name,type1,arg1)		\
193 static type name (type1 arg1)			\
194 {						\
195 	return syscall(__NR_##name, arg1);	\
196 }
197 
198 #define _syscall2(type,name,type1,arg1,type2,arg2)	\
199 static type name (type1 arg1,type2 arg2)		\
200 {							\
201 	return syscall(__NR_##name, arg1, arg2);	\
202 }
203 
204 #define _syscall3(type,name,type1,arg1,type2,arg2,type3,arg3)	\
205 static type name (type1 arg1,type2 arg2,type3 arg3)		\
206 {								\
207 	return syscall(__NR_##name, arg1, arg2, arg3);		\
208 }
209 
210 #define _syscall4(type,name,type1,arg1,type2,arg2,type3,arg3,type4,arg4)	\
211 static type name (type1 arg1,type2 arg2,type3 arg3,type4 arg4)			\
212 {										\
213 	return syscall(__NR_##name, arg1, arg2, arg3, arg4);			\
214 }
215 
216 #define _syscall5(type,name,type1,arg1,type2,arg2,type3,arg3,type4,arg4,	\
217 		  type5,arg5)							\
218 static type name (type1 arg1,type2 arg2,type3 arg3,type4 arg4,type5 arg5)	\
219 {										\
220 	return syscall(__NR_##name, arg1, arg2, arg3, arg4, arg5);		\
221 }
222 
223 
224 #define _syscall6(type,name,type1,arg1,type2,arg2,type3,arg3,type4,arg4,	\
225 		  type5,arg5,type6,arg6)					\
226 static type name (type1 arg1,type2 arg2,type3 arg3,type4 arg4,type5 arg5,	\
227                   type6 arg6)							\
228 {										\
229 	return syscall(__NR_##name, arg1, arg2, arg3, arg4, arg5, arg6);	\
230 }
231 
232 
233 #define __NR_sys_uname __NR_uname
234 #define __NR_sys_getcwd1 __NR_getcwd
235 #define __NR_sys_getdents __NR_getdents
236 #define __NR_sys_getdents64 __NR_getdents64
237 #define __NR_sys_getpriority __NR_getpriority
238 #define __NR_sys_rt_sigqueueinfo __NR_rt_sigqueueinfo
239 #define __NR_sys_rt_tgsigqueueinfo __NR_rt_tgsigqueueinfo
240 #define __NR_sys_syslog __NR_syslog
241 #define __NR_sys_futex __NR_futex
242 #define __NR_sys_inotify_init __NR_inotify_init
243 #define __NR_sys_inotify_add_watch __NR_inotify_add_watch
244 #define __NR_sys_inotify_rm_watch __NR_inotify_rm_watch
245 #define __NR_sys_statx __NR_statx
246 
247 #if defined(__alpha__) || defined(__x86_64__) || defined(__s390x__)
248 #define __NR__llseek __NR_lseek
249 #endif
250 
251 /* Newer kernel ports have llseek() instead of _llseek() */
252 #if defined(TARGET_NR_llseek) && !defined(TARGET_NR__llseek)
253 #define TARGET_NR__llseek TARGET_NR_llseek
254 #endif
255 
256 #define __NR_sys_gettid __NR_gettid
257 _syscall0(int, sys_gettid)
258 
259 /* For the 64-bit guest on 32-bit host case we must emulate
260  * getdents using getdents64, because otherwise the host
261  * might hand us back more dirent records than we can fit
262  * into the guest buffer after structure format conversion.
263  * Otherwise we emulate getdents with getdents if the host has it.
264  */
265 #if defined(__NR_getdents) && HOST_LONG_BITS >= TARGET_ABI_BITS
266 #define EMULATE_GETDENTS_WITH_GETDENTS
267 #endif
268 
269 #if defined(TARGET_NR_getdents) && defined(EMULATE_GETDENTS_WITH_GETDENTS)
270 _syscall3(int, sys_getdents, uint, fd, struct linux_dirent *, dirp, uint, count);
271 #endif
272 #if (defined(TARGET_NR_getdents) && \
273       !defined(EMULATE_GETDENTS_WITH_GETDENTS)) || \
274     (defined(TARGET_NR_getdents64) && defined(__NR_getdents64))
275 _syscall3(int, sys_getdents64, uint, fd, struct linux_dirent64 *, dirp, uint, count);
276 #endif
277 #if defined(TARGET_NR__llseek) && defined(__NR_llseek)
278 _syscall5(int, _llseek,  uint,  fd, ulong, hi, ulong, lo,
279           loff_t *, res, uint, wh);
280 #endif
281 _syscall3(int, sys_rt_sigqueueinfo, pid_t, pid, int, sig, siginfo_t *, uinfo)
282 _syscall4(int, sys_rt_tgsigqueueinfo, pid_t, pid, pid_t, tid, int, sig,
283           siginfo_t *, uinfo)
284 _syscall3(int,sys_syslog,int,type,char*,bufp,int,len)
285 #ifdef __NR_exit_group
286 _syscall1(int,exit_group,int,error_code)
287 #endif
288 #if defined(TARGET_NR_set_tid_address) && defined(__NR_set_tid_address)
289 _syscall1(int,set_tid_address,int *,tidptr)
290 #endif
291 #if defined(TARGET_NR_futex) && defined(__NR_futex)
292 _syscall6(int,sys_futex,int *,uaddr,int,op,int,val,
293           const struct timespec *,timeout,int *,uaddr2,int,val3)
294 #endif
295 #define __NR_sys_sched_getaffinity __NR_sched_getaffinity
296 _syscall3(int, sys_sched_getaffinity, pid_t, pid, unsigned int, len,
297           unsigned long *, user_mask_ptr);
298 #define __NR_sys_sched_setaffinity __NR_sched_setaffinity
299 _syscall3(int, sys_sched_setaffinity, pid_t, pid, unsigned int, len,
300           unsigned long *, user_mask_ptr);
301 #define __NR_sys_getcpu __NR_getcpu
302 _syscall3(int, sys_getcpu, unsigned *, cpu, unsigned *, node, void *, tcache);
303 _syscall4(int, reboot, int, magic1, int, magic2, unsigned int, cmd,
304           void *, arg);
305 _syscall2(int, capget, struct __user_cap_header_struct *, header,
306           struct __user_cap_data_struct *, data);
307 _syscall2(int, capset, struct __user_cap_header_struct *, header,
308           struct __user_cap_data_struct *, data);
309 #if defined(TARGET_NR_ioprio_get) && defined(__NR_ioprio_get)
310 _syscall2(int, ioprio_get, int, which, int, who)
311 #endif
312 #if defined(TARGET_NR_ioprio_set) && defined(__NR_ioprio_set)
313 _syscall3(int, ioprio_set, int, which, int, who, int, ioprio)
314 #endif
315 #if defined(TARGET_NR_getrandom) && defined(__NR_getrandom)
316 _syscall3(int, getrandom, void *, buf, size_t, buflen, unsigned int, flags)
317 #endif
318 
319 #if defined(TARGET_NR_kcmp) && defined(__NR_kcmp)
320 _syscall5(int, kcmp, pid_t, pid1, pid_t, pid2, int, type,
321           unsigned long, idx1, unsigned long, idx2)
322 #endif
323 
324 /*
325  * It is assumed that struct statx is architecture independent.
326  */
327 #if defined(TARGET_NR_statx) && defined(__NR_statx)
328 _syscall5(int, sys_statx, int, dirfd, const char *, pathname, int, flags,
329           unsigned int, mask, struct target_statx *, statxbuf)
330 #endif
331 
332 static bitmask_transtbl fcntl_flags_tbl[] = {
333   { TARGET_O_ACCMODE,   TARGET_O_WRONLY,    O_ACCMODE,   O_WRONLY,    },
334   { TARGET_O_ACCMODE,   TARGET_O_RDWR,      O_ACCMODE,   O_RDWR,      },
335   { TARGET_O_CREAT,     TARGET_O_CREAT,     O_CREAT,     O_CREAT,     },
336   { TARGET_O_EXCL,      TARGET_O_EXCL,      O_EXCL,      O_EXCL,      },
337   { TARGET_O_NOCTTY,    TARGET_O_NOCTTY,    O_NOCTTY,    O_NOCTTY,    },
338   { TARGET_O_TRUNC,     TARGET_O_TRUNC,     O_TRUNC,     O_TRUNC,     },
339   { TARGET_O_APPEND,    TARGET_O_APPEND,    O_APPEND,    O_APPEND,    },
340   { TARGET_O_NONBLOCK,  TARGET_O_NONBLOCK,  O_NONBLOCK,  O_NONBLOCK,  },
341   { TARGET_O_SYNC,      TARGET_O_DSYNC,     O_SYNC,      O_DSYNC,     },
342   { TARGET_O_SYNC,      TARGET_O_SYNC,      O_SYNC,      O_SYNC,      },
343   { TARGET_FASYNC,      TARGET_FASYNC,      FASYNC,      FASYNC,      },
344   { TARGET_O_DIRECTORY, TARGET_O_DIRECTORY, O_DIRECTORY, O_DIRECTORY, },
345   { TARGET_O_NOFOLLOW,  TARGET_O_NOFOLLOW,  O_NOFOLLOW,  O_NOFOLLOW,  },
346 #if defined(O_DIRECT)
347   { TARGET_O_DIRECT,    TARGET_O_DIRECT,    O_DIRECT,    O_DIRECT,    },
348 #endif
349 #if defined(O_NOATIME)
350   { TARGET_O_NOATIME,   TARGET_O_NOATIME,   O_NOATIME,   O_NOATIME    },
351 #endif
352 #if defined(O_CLOEXEC)
353   { TARGET_O_CLOEXEC,   TARGET_O_CLOEXEC,   O_CLOEXEC,   O_CLOEXEC    },
354 #endif
355 #if defined(O_PATH)
356   { TARGET_O_PATH,      TARGET_O_PATH,      O_PATH,      O_PATH       },
357 #endif
358 #if defined(O_TMPFILE)
359   { TARGET_O_TMPFILE,   TARGET_O_TMPFILE,   O_TMPFILE,   O_TMPFILE    },
360 #endif
361   /* Don't terminate the list prematurely on 64-bit host+guest.  */
362 #if TARGET_O_LARGEFILE != 0 || O_LARGEFILE != 0
363   { TARGET_O_LARGEFILE, TARGET_O_LARGEFILE, O_LARGEFILE, O_LARGEFILE, },
364 #endif
365   { 0, 0, 0, 0 }
366 };
367 
368 static int sys_getcwd1(char *buf, size_t size)
369 {
370   if (getcwd(buf, size) == NULL) {
371       /* getcwd() sets errno */
372       return (-1);
373   }
374   return strlen(buf)+1;
375 }
376 
377 #ifdef TARGET_NR_utimensat
378 #if defined(__NR_utimensat)
379 #define __NR_sys_utimensat __NR_utimensat
380 _syscall4(int,sys_utimensat,int,dirfd,const char *,pathname,
381           const struct timespec *,tsp,int,flags)
382 #else
383 static int sys_utimensat(int dirfd, const char *pathname,
384                          const struct timespec times[2], int flags)
385 {
386     errno = ENOSYS;
387     return -1;
388 }
389 #endif
390 #endif /* TARGET_NR_utimensat */
391 
392 #ifdef TARGET_NR_renameat2
393 #if defined(__NR_renameat2)
394 #define __NR_sys_renameat2 __NR_renameat2
395 _syscall5(int, sys_renameat2, int, oldfd, const char *, old, int, newfd,
396           const char *, new, unsigned int, flags)
397 #else
398 static int sys_renameat2(int oldfd, const char *old,
399                          int newfd, const char *new, int flags)
400 {
401     if (flags == 0) {
402         return renameat(oldfd, old, newfd, new);
403     }
404     errno = ENOSYS;
405     return -1;
406 }
407 #endif
408 #endif /* TARGET_NR_renameat2 */
409 
410 #ifdef CONFIG_INOTIFY
411 #include <sys/inotify.h>
412 
413 #if defined(TARGET_NR_inotify_init) && defined(__NR_inotify_init)
414 static int sys_inotify_init(void)
415 {
416   return (inotify_init());
417 }
418 #endif
419 #if defined(TARGET_NR_inotify_add_watch) && defined(__NR_inotify_add_watch)
420 static int sys_inotify_add_watch(int fd,const char *pathname, int32_t mask)
421 {
422   return (inotify_add_watch(fd, pathname, mask));
423 }
424 #endif
425 #if defined(TARGET_NR_inotify_rm_watch) && defined(__NR_inotify_rm_watch)
426 static int sys_inotify_rm_watch(int fd, int32_t wd)
427 {
428   return (inotify_rm_watch(fd, wd));
429 }
430 #endif
431 #ifdef CONFIG_INOTIFY1
432 #if defined(TARGET_NR_inotify_init1) && defined(__NR_inotify_init1)
433 static int sys_inotify_init1(int flags)
434 {
435   return (inotify_init1(flags));
436 }
437 #endif
438 #endif
439 #else
440 /* Userspace can usually survive runtime without inotify */
441 #undef TARGET_NR_inotify_init
442 #undef TARGET_NR_inotify_init1
443 #undef TARGET_NR_inotify_add_watch
444 #undef TARGET_NR_inotify_rm_watch
445 #endif /* CONFIG_INOTIFY  */
446 
447 #if defined(TARGET_NR_prlimit64)
448 #ifndef __NR_prlimit64
449 # define __NR_prlimit64 -1
450 #endif
451 #define __NR_sys_prlimit64 __NR_prlimit64
452 /* The glibc rlimit structure may not be that used by the underlying syscall */
453 struct host_rlimit64 {
454     uint64_t rlim_cur;
455     uint64_t rlim_max;
456 };
457 _syscall4(int, sys_prlimit64, pid_t, pid, int, resource,
458           const struct host_rlimit64 *, new_limit,
459           struct host_rlimit64 *, old_limit)
460 #endif
461 
462 
463 #if defined(TARGET_NR_timer_create)
464 /* Maxiumum of 32 active POSIX timers allowed at any one time. */
465 static timer_t g_posix_timers[32] = { 0, } ;
466 
467 static inline int next_free_host_timer(void)
468 {
469     int k ;
470     /* FIXME: Does finding the next free slot require a lock? */
471     for (k = 0; k < ARRAY_SIZE(g_posix_timers); k++) {
472         if (g_posix_timers[k] == 0) {
473             g_posix_timers[k] = (timer_t) 1;
474             return k;
475         }
476     }
477     return -1;
478 }
479 #endif
480 
481 /* ARM EABI and MIPS expect 64bit types aligned even on pairs or registers */
482 #ifdef TARGET_ARM
483 static inline int regpairs_aligned(void *cpu_env, int num)
484 {
485     return ((((CPUARMState *)cpu_env)->eabi) == 1) ;
486 }
487 #elif defined(TARGET_MIPS) && (TARGET_ABI_BITS == 32)
488 static inline int regpairs_aligned(void *cpu_env, int num) { return 1; }
489 #elif defined(TARGET_PPC) && !defined(TARGET_PPC64)
490 /* SysV AVI for PPC32 expects 64bit parameters to be passed on odd/even pairs
491  * of registers which translates to the same as ARM/MIPS, because we start with
492  * r3 as arg1 */
493 static inline int regpairs_aligned(void *cpu_env, int num) { return 1; }
494 #elif defined(TARGET_SH4)
495 /* SH4 doesn't align register pairs, except for p{read,write}64 */
496 static inline int regpairs_aligned(void *cpu_env, int num)
497 {
498     switch (num) {
499     case TARGET_NR_pread64:
500     case TARGET_NR_pwrite64:
501         return 1;
502 
503     default:
504         return 0;
505     }
506 }
507 #elif defined(TARGET_XTENSA)
508 static inline int regpairs_aligned(void *cpu_env, int num) { return 1; }
509 #else
510 static inline int regpairs_aligned(void *cpu_env, int num) { return 0; }
511 #endif
512 
513 #define ERRNO_TABLE_SIZE 1200
514 
515 /* target_to_host_errno_table[] is initialized from
516  * host_to_target_errno_table[] in syscall_init(). */
517 static uint16_t target_to_host_errno_table[ERRNO_TABLE_SIZE] = {
518 };
519 
520 /*
521  * This list is the union of errno values overridden in asm-<arch>/errno.h
522  * minus the errnos that are not actually generic to all archs.
523  */
524 static uint16_t host_to_target_errno_table[ERRNO_TABLE_SIZE] = {
525     [EAGAIN]		= TARGET_EAGAIN,
526     [EIDRM]		= TARGET_EIDRM,
527     [ECHRNG]		= TARGET_ECHRNG,
528     [EL2NSYNC]		= TARGET_EL2NSYNC,
529     [EL3HLT]		= TARGET_EL3HLT,
530     [EL3RST]		= TARGET_EL3RST,
531     [ELNRNG]		= TARGET_ELNRNG,
532     [EUNATCH]		= TARGET_EUNATCH,
533     [ENOCSI]		= TARGET_ENOCSI,
534     [EL2HLT]		= TARGET_EL2HLT,
535     [EDEADLK]		= TARGET_EDEADLK,
536     [ENOLCK]		= TARGET_ENOLCK,
537     [EBADE]		= TARGET_EBADE,
538     [EBADR]		= TARGET_EBADR,
539     [EXFULL]		= TARGET_EXFULL,
540     [ENOANO]		= TARGET_ENOANO,
541     [EBADRQC]		= TARGET_EBADRQC,
542     [EBADSLT]		= TARGET_EBADSLT,
543     [EBFONT]		= TARGET_EBFONT,
544     [ENOSTR]		= TARGET_ENOSTR,
545     [ENODATA]		= TARGET_ENODATA,
546     [ETIME]		= TARGET_ETIME,
547     [ENOSR]		= TARGET_ENOSR,
548     [ENONET]		= TARGET_ENONET,
549     [ENOPKG]		= TARGET_ENOPKG,
550     [EREMOTE]		= TARGET_EREMOTE,
551     [ENOLINK]		= TARGET_ENOLINK,
552     [EADV]		= TARGET_EADV,
553     [ESRMNT]		= TARGET_ESRMNT,
554     [ECOMM]		= TARGET_ECOMM,
555     [EPROTO]		= TARGET_EPROTO,
556     [EDOTDOT]		= TARGET_EDOTDOT,
557     [EMULTIHOP]		= TARGET_EMULTIHOP,
558     [EBADMSG]		= TARGET_EBADMSG,
559     [ENAMETOOLONG]	= TARGET_ENAMETOOLONG,
560     [EOVERFLOW]		= TARGET_EOVERFLOW,
561     [ENOTUNIQ]		= TARGET_ENOTUNIQ,
562     [EBADFD]		= TARGET_EBADFD,
563     [EREMCHG]		= TARGET_EREMCHG,
564     [ELIBACC]		= TARGET_ELIBACC,
565     [ELIBBAD]		= TARGET_ELIBBAD,
566     [ELIBSCN]		= TARGET_ELIBSCN,
567     [ELIBMAX]		= TARGET_ELIBMAX,
568     [ELIBEXEC]		= TARGET_ELIBEXEC,
569     [EILSEQ]		= TARGET_EILSEQ,
570     [ENOSYS]		= TARGET_ENOSYS,
571     [ELOOP]		= TARGET_ELOOP,
572     [ERESTART]		= TARGET_ERESTART,
573     [ESTRPIPE]		= TARGET_ESTRPIPE,
574     [ENOTEMPTY]		= TARGET_ENOTEMPTY,
575     [EUSERS]		= TARGET_EUSERS,
576     [ENOTSOCK]		= TARGET_ENOTSOCK,
577     [EDESTADDRREQ]	= TARGET_EDESTADDRREQ,
578     [EMSGSIZE]		= TARGET_EMSGSIZE,
579     [EPROTOTYPE]	= TARGET_EPROTOTYPE,
580     [ENOPROTOOPT]	= TARGET_ENOPROTOOPT,
581     [EPROTONOSUPPORT]	= TARGET_EPROTONOSUPPORT,
582     [ESOCKTNOSUPPORT]	= TARGET_ESOCKTNOSUPPORT,
583     [EOPNOTSUPP]	= TARGET_EOPNOTSUPP,
584     [EPFNOSUPPORT]	= TARGET_EPFNOSUPPORT,
585     [EAFNOSUPPORT]	= TARGET_EAFNOSUPPORT,
586     [EADDRINUSE]	= TARGET_EADDRINUSE,
587     [EADDRNOTAVAIL]	= TARGET_EADDRNOTAVAIL,
588     [ENETDOWN]		= TARGET_ENETDOWN,
589     [ENETUNREACH]	= TARGET_ENETUNREACH,
590     [ENETRESET]		= TARGET_ENETRESET,
591     [ECONNABORTED]	= TARGET_ECONNABORTED,
592     [ECONNRESET]	= TARGET_ECONNRESET,
593     [ENOBUFS]		= TARGET_ENOBUFS,
594     [EISCONN]		= TARGET_EISCONN,
595     [ENOTCONN]		= TARGET_ENOTCONN,
596     [EUCLEAN]		= TARGET_EUCLEAN,
597     [ENOTNAM]		= TARGET_ENOTNAM,
598     [ENAVAIL]		= TARGET_ENAVAIL,
599     [EISNAM]		= TARGET_EISNAM,
600     [EREMOTEIO]		= TARGET_EREMOTEIO,
601     [EDQUOT]            = TARGET_EDQUOT,
602     [ESHUTDOWN]		= TARGET_ESHUTDOWN,
603     [ETOOMANYREFS]	= TARGET_ETOOMANYREFS,
604     [ETIMEDOUT]		= TARGET_ETIMEDOUT,
605     [ECONNREFUSED]	= TARGET_ECONNREFUSED,
606     [EHOSTDOWN]		= TARGET_EHOSTDOWN,
607     [EHOSTUNREACH]	= TARGET_EHOSTUNREACH,
608     [EALREADY]		= TARGET_EALREADY,
609     [EINPROGRESS]	= TARGET_EINPROGRESS,
610     [ESTALE]		= TARGET_ESTALE,
611     [ECANCELED]		= TARGET_ECANCELED,
612     [ENOMEDIUM]		= TARGET_ENOMEDIUM,
613     [EMEDIUMTYPE]	= TARGET_EMEDIUMTYPE,
614 #ifdef ENOKEY
615     [ENOKEY]		= TARGET_ENOKEY,
616 #endif
617 #ifdef EKEYEXPIRED
618     [EKEYEXPIRED]	= TARGET_EKEYEXPIRED,
619 #endif
620 #ifdef EKEYREVOKED
621     [EKEYREVOKED]	= TARGET_EKEYREVOKED,
622 #endif
623 #ifdef EKEYREJECTED
624     [EKEYREJECTED]	= TARGET_EKEYREJECTED,
625 #endif
626 #ifdef EOWNERDEAD
627     [EOWNERDEAD]	= TARGET_EOWNERDEAD,
628 #endif
629 #ifdef ENOTRECOVERABLE
630     [ENOTRECOVERABLE]	= TARGET_ENOTRECOVERABLE,
631 #endif
632 #ifdef ENOMSG
633     [ENOMSG]            = TARGET_ENOMSG,
634 #endif
635 #ifdef ERKFILL
636     [ERFKILL]           = TARGET_ERFKILL,
637 #endif
638 #ifdef EHWPOISON
639     [EHWPOISON]         = TARGET_EHWPOISON,
640 #endif
641 };
642 
643 static inline int host_to_target_errno(int err)
644 {
645     if (err >= 0 && err < ERRNO_TABLE_SIZE &&
646         host_to_target_errno_table[err]) {
647         return host_to_target_errno_table[err];
648     }
649     return err;
650 }
651 
652 static inline int target_to_host_errno(int err)
653 {
654     if (err >= 0 && err < ERRNO_TABLE_SIZE &&
655         target_to_host_errno_table[err]) {
656         return target_to_host_errno_table[err];
657     }
658     return err;
659 }
660 
661 static inline abi_long get_errno(abi_long ret)
662 {
663     if (ret == -1)
664         return -host_to_target_errno(errno);
665     else
666         return ret;
667 }
668 
669 const char *target_strerror(int err)
670 {
671     if (err == TARGET_ERESTARTSYS) {
672         return "To be restarted";
673     }
674     if (err == TARGET_QEMU_ESIGRETURN) {
675         return "Successful exit from sigreturn";
676     }
677 
678     if ((err >= ERRNO_TABLE_SIZE) || (err < 0)) {
679         return NULL;
680     }
681     return strerror(target_to_host_errno(err));
682 }
683 
684 #define safe_syscall0(type, name) \
685 static type safe_##name(void) \
686 { \
687     return safe_syscall(__NR_##name); \
688 }
689 
690 #define safe_syscall1(type, name, type1, arg1) \
691 static type safe_##name(type1 arg1) \
692 { \
693     return safe_syscall(__NR_##name, arg1); \
694 }
695 
696 #define safe_syscall2(type, name, type1, arg1, type2, arg2) \
697 static type safe_##name(type1 arg1, type2 arg2) \
698 { \
699     return safe_syscall(__NR_##name, arg1, arg2); \
700 }
701 
702 #define safe_syscall3(type, name, type1, arg1, type2, arg2, type3, arg3) \
703 static type safe_##name(type1 arg1, type2 arg2, type3 arg3) \
704 { \
705     return safe_syscall(__NR_##name, arg1, arg2, arg3); \
706 }
707 
708 #define safe_syscall4(type, name, type1, arg1, type2, arg2, type3, arg3, \
709     type4, arg4) \
710 static type safe_##name(type1 arg1, type2 arg2, type3 arg3, type4 arg4) \
711 { \
712     return safe_syscall(__NR_##name, arg1, arg2, arg3, arg4); \
713 }
714 
715 #define safe_syscall5(type, name, type1, arg1, type2, arg2, type3, arg3, \
716     type4, arg4, type5, arg5) \
717 static type safe_##name(type1 arg1, type2 arg2, type3 arg3, type4 arg4, \
718     type5 arg5) \
719 { \
720     return safe_syscall(__NR_##name, arg1, arg2, arg3, arg4, arg5); \
721 }
722 
723 #define safe_syscall6(type, name, type1, arg1, type2, arg2, type3, arg3, \
724     type4, arg4, type5, arg5, type6, arg6) \
725 static type safe_##name(type1 arg1, type2 arg2, type3 arg3, type4 arg4, \
726     type5 arg5, type6 arg6) \
727 { \
728     return safe_syscall(__NR_##name, arg1, arg2, arg3, arg4, arg5, arg6); \
729 }
730 
731 safe_syscall3(ssize_t, read, int, fd, void *, buff, size_t, count)
732 safe_syscall3(ssize_t, write, int, fd, const void *, buff, size_t, count)
733 safe_syscall4(int, openat, int, dirfd, const char *, pathname, \
734               int, flags, mode_t, mode)
735 safe_syscall4(pid_t, wait4, pid_t, pid, int *, status, int, options, \
736               struct rusage *, rusage)
737 safe_syscall5(int, waitid, idtype_t, idtype, id_t, id, siginfo_t *, infop, \
738               int, options, struct rusage *, rusage)
739 safe_syscall3(int, execve, const char *, filename, char **, argv, char **, envp)
740 safe_syscall6(int, pselect6, int, nfds, fd_set *, readfds, fd_set *, writefds, \
741               fd_set *, exceptfds, struct timespec *, timeout, void *, sig)
742 safe_syscall5(int, ppoll, struct pollfd *, ufds, unsigned int, nfds,
743               struct timespec *, tsp, const sigset_t *, sigmask,
744               size_t, sigsetsize)
745 safe_syscall6(int, epoll_pwait, int, epfd, struct epoll_event *, events,
746               int, maxevents, int, timeout, const sigset_t *, sigmask,
747               size_t, sigsetsize)
748 safe_syscall6(int,futex,int *,uaddr,int,op,int,val, \
749               const struct timespec *,timeout,int *,uaddr2,int,val3)
750 safe_syscall2(int, rt_sigsuspend, sigset_t *, newset, size_t, sigsetsize)
751 safe_syscall2(int, kill, pid_t, pid, int, sig)
752 safe_syscall2(int, tkill, int, tid, int, sig)
753 safe_syscall3(int, tgkill, int, tgid, int, pid, int, sig)
754 safe_syscall3(ssize_t, readv, int, fd, const struct iovec *, iov, int, iovcnt)
755 safe_syscall3(ssize_t, writev, int, fd, const struct iovec *, iov, int, iovcnt)
756 safe_syscall5(ssize_t, preadv, int, fd, const struct iovec *, iov, int, iovcnt,
757               unsigned long, pos_l, unsigned long, pos_h)
758 safe_syscall5(ssize_t, pwritev, int, fd, const struct iovec *, iov, int, iovcnt,
759               unsigned long, pos_l, unsigned long, pos_h)
760 safe_syscall3(int, connect, int, fd, const struct sockaddr *, addr,
761               socklen_t, addrlen)
762 safe_syscall6(ssize_t, sendto, int, fd, const void *, buf, size_t, len,
763               int, flags, const struct sockaddr *, addr, socklen_t, addrlen)
764 safe_syscall6(ssize_t, recvfrom, int, fd, void *, buf, size_t, len,
765               int, flags, struct sockaddr *, addr, socklen_t *, addrlen)
766 safe_syscall3(ssize_t, sendmsg, int, fd, const struct msghdr *, msg, int, flags)
767 safe_syscall3(ssize_t, recvmsg, int, fd, struct msghdr *, msg, int, flags)
768 safe_syscall2(int, flock, int, fd, int, operation)
769 safe_syscall4(int, rt_sigtimedwait, const sigset_t *, these, siginfo_t *, uinfo,
770               const struct timespec *, uts, size_t, sigsetsize)
771 safe_syscall4(int, accept4, int, fd, struct sockaddr *, addr, socklen_t *, len,
772               int, flags)
773 safe_syscall2(int, nanosleep, const struct timespec *, req,
774               struct timespec *, rem)
775 #ifdef TARGET_NR_clock_nanosleep
776 safe_syscall4(int, clock_nanosleep, const clockid_t, clock, int, flags,
777               const struct timespec *, req, struct timespec *, rem)
778 #endif
779 #ifdef __NR_ipc
780 safe_syscall6(int, ipc, int, call, long, first, long, second, long, third,
781               void *, ptr, long, fifth)
782 #endif
783 #ifdef __NR_msgsnd
784 safe_syscall4(int, msgsnd, int, msgid, const void *, msgp, size_t, sz,
785               int, flags)
786 #endif
787 #ifdef __NR_msgrcv
788 safe_syscall5(int, msgrcv, int, msgid, void *, msgp, size_t, sz,
789               long, msgtype, int, flags)
790 #endif
791 #ifdef __NR_semtimedop
792 safe_syscall4(int, semtimedop, int, semid, struct sembuf *, tsops,
793               unsigned, nsops, const struct timespec *, timeout)
794 #endif
795 #if defined(TARGET_NR_mq_open) && defined(__NR_mq_open)
796 safe_syscall5(int, mq_timedsend, int, mqdes, const char *, msg_ptr,
797               size_t, len, unsigned, prio, const struct timespec *, timeout)
798 safe_syscall5(int, mq_timedreceive, int, mqdes, char *, msg_ptr,
799               size_t, len, unsigned *, prio, const struct timespec *, timeout)
800 #endif
801 /* We do ioctl like this rather than via safe_syscall3 to preserve the
802  * "third argument might be integer or pointer or not present" behaviour of
803  * the libc function.
804  */
805 #define safe_ioctl(...) safe_syscall(__NR_ioctl, __VA_ARGS__)
806 /* Similarly for fcntl. Note that callers must always:
807  *  pass the F_GETLK64 etc constants rather than the unsuffixed F_GETLK
808  *  use the flock64 struct rather than unsuffixed flock
809  * This will then work and use a 64-bit offset for both 32-bit and 64-bit hosts.
810  */
811 #ifdef __NR_fcntl64
812 #define safe_fcntl(...) safe_syscall(__NR_fcntl64, __VA_ARGS__)
813 #else
814 #define safe_fcntl(...) safe_syscall(__NR_fcntl, __VA_ARGS__)
815 #endif
816 
817 static inline int host_to_target_sock_type(int host_type)
818 {
819     int target_type;
820 
821     switch (host_type & 0xf /* SOCK_TYPE_MASK */) {
822     case SOCK_DGRAM:
823         target_type = TARGET_SOCK_DGRAM;
824         break;
825     case SOCK_STREAM:
826         target_type = TARGET_SOCK_STREAM;
827         break;
828     default:
829         target_type = host_type & 0xf /* SOCK_TYPE_MASK */;
830         break;
831     }
832 
833 #if defined(SOCK_CLOEXEC)
834     if (host_type & SOCK_CLOEXEC) {
835         target_type |= TARGET_SOCK_CLOEXEC;
836     }
837 #endif
838 
839 #if defined(SOCK_NONBLOCK)
840     if (host_type & SOCK_NONBLOCK) {
841         target_type |= TARGET_SOCK_NONBLOCK;
842     }
843 #endif
844 
845     return target_type;
846 }
847 
848 static abi_ulong target_brk;
849 static abi_ulong target_original_brk;
850 static abi_ulong brk_page;
851 
852 void target_set_brk(abi_ulong new_brk)
853 {
854     target_original_brk = target_brk = HOST_PAGE_ALIGN(new_brk);
855     brk_page = HOST_PAGE_ALIGN(target_brk);
856 }
857 
858 //#define DEBUGF_BRK(message, args...) do { fprintf(stderr, (message), ## args); } while (0)
859 #define DEBUGF_BRK(message, args...)
860 
861 /* do_brk() must return target values and target errnos. */
862 abi_long do_brk(abi_ulong new_brk)
863 {
864     abi_long mapped_addr;
865     abi_ulong new_alloc_size;
866 
867     DEBUGF_BRK("do_brk(" TARGET_ABI_FMT_lx ") -> ", new_brk);
868 
869     if (!new_brk) {
870         DEBUGF_BRK(TARGET_ABI_FMT_lx " (!new_brk)\n", target_brk);
871         return target_brk;
872     }
873     if (new_brk < target_original_brk) {
874         DEBUGF_BRK(TARGET_ABI_FMT_lx " (new_brk < target_original_brk)\n",
875                    target_brk);
876         return target_brk;
877     }
878 
879     /* If the new brk is less than the highest page reserved to the
880      * target heap allocation, set it and we're almost done...  */
881     if (new_brk <= brk_page) {
882         /* Heap contents are initialized to zero, as for anonymous
883          * mapped pages.  */
884         if (new_brk > target_brk) {
885             memset(g2h(target_brk), 0, new_brk - target_brk);
886         }
887 	target_brk = new_brk;
888         DEBUGF_BRK(TARGET_ABI_FMT_lx " (new_brk <= brk_page)\n", target_brk);
889 	return target_brk;
890     }
891 
892     /* We need to allocate more memory after the brk... Note that
893      * we don't use MAP_FIXED because that will map over the top of
894      * any existing mapping (like the one with the host libc or qemu
895      * itself); instead we treat "mapped but at wrong address" as
896      * a failure and unmap again.
897      */
898     new_alloc_size = HOST_PAGE_ALIGN(new_brk - brk_page);
899     mapped_addr = get_errno(target_mmap(brk_page, new_alloc_size,
900                                         PROT_READ|PROT_WRITE,
901                                         MAP_ANON|MAP_PRIVATE, 0, 0));
902 
903     if (mapped_addr == brk_page) {
904         /* Heap contents are initialized to zero, as for anonymous
905          * mapped pages.  Technically the new pages are already
906          * initialized to zero since they *are* anonymous mapped
907          * pages, however we have to take care with the contents that
908          * come from the remaining part of the previous page: it may
909          * contains garbage data due to a previous heap usage (grown
910          * then shrunken).  */
911         memset(g2h(target_brk), 0, brk_page - target_brk);
912 
913         target_brk = new_brk;
914         brk_page = HOST_PAGE_ALIGN(target_brk);
915         DEBUGF_BRK(TARGET_ABI_FMT_lx " (mapped_addr == brk_page)\n",
916             target_brk);
917         return target_brk;
918     } else if (mapped_addr != -1) {
919         /* Mapped but at wrong address, meaning there wasn't actually
920          * enough space for this brk.
921          */
922         target_munmap(mapped_addr, new_alloc_size);
923         mapped_addr = -1;
924         DEBUGF_BRK(TARGET_ABI_FMT_lx " (mapped_addr != -1)\n", target_brk);
925     }
926     else {
927         DEBUGF_BRK(TARGET_ABI_FMT_lx " (otherwise)\n", target_brk);
928     }
929 
930 #if defined(TARGET_ALPHA)
931     /* We (partially) emulate OSF/1 on Alpha, which requires we
932        return a proper errno, not an unchanged brk value.  */
933     return -TARGET_ENOMEM;
934 #endif
935     /* For everything else, return the previous break. */
936     return target_brk;
937 }
938 
939 static inline abi_long copy_from_user_fdset(fd_set *fds,
940                                             abi_ulong target_fds_addr,
941                                             int n)
942 {
943     int i, nw, j, k;
944     abi_ulong b, *target_fds;
945 
946     nw = DIV_ROUND_UP(n, TARGET_ABI_BITS);
947     if (!(target_fds = lock_user(VERIFY_READ,
948                                  target_fds_addr,
949                                  sizeof(abi_ulong) * nw,
950                                  1)))
951         return -TARGET_EFAULT;
952 
953     FD_ZERO(fds);
954     k = 0;
955     for (i = 0; i < nw; i++) {
956         /* grab the abi_ulong */
957         __get_user(b, &target_fds[i]);
958         for (j = 0; j < TARGET_ABI_BITS; j++) {
959             /* check the bit inside the abi_ulong */
960             if ((b >> j) & 1)
961                 FD_SET(k, fds);
962             k++;
963         }
964     }
965 
966     unlock_user(target_fds, target_fds_addr, 0);
967 
968     return 0;
969 }
970 
971 static inline abi_ulong copy_from_user_fdset_ptr(fd_set *fds, fd_set **fds_ptr,
972                                                  abi_ulong target_fds_addr,
973                                                  int n)
974 {
975     if (target_fds_addr) {
976         if (copy_from_user_fdset(fds, target_fds_addr, n))
977             return -TARGET_EFAULT;
978         *fds_ptr = fds;
979     } else {
980         *fds_ptr = NULL;
981     }
982     return 0;
983 }
984 
985 static inline abi_long copy_to_user_fdset(abi_ulong target_fds_addr,
986                                           const fd_set *fds,
987                                           int n)
988 {
989     int i, nw, j, k;
990     abi_long v;
991     abi_ulong *target_fds;
992 
993     nw = DIV_ROUND_UP(n, TARGET_ABI_BITS);
994     if (!(target_fds = lock_user(VERIFY_WRITE,
995                                  target_fds_addr,
996                                  sizeof(abi_ulong) * nw,
997                                  0)))
998         return -TARGET_EFAULT;
999 
1000     k = 0;
1001     for (i = 0; i < nw; i++) {
1002         v = 0;
1003         for (j = 0; j < TARGET_ABI_BITS; j++) {
1004             v |= ((abi_ulong)(FD_ISSET(k, fds) != 0) << j);
1005             k++;
1006         }
1007         __put_user(v, &target_fds[i]);
1008     }
1009 
1010     unlock_user(target_fds, target_fds_addr, sizeof(abi_ulong) * nw);
1011 
1012     return 0;
1013 }
1014 
1015 #if defined(__alpha__)
1016 #define HOST_HZ 1024
1017 #else
1018 #define HOST_HZ 100
1019 #endif
1020 
1021 static inline abi_long host_to_target_clock_t(long ticks)
1022 {
1023 #if HOST_HZ == TARGET_HZ
1024     return ticks;
1025 #else
1026     return ((int64_t)ticks * TARGET_HZ) / HOST_HZ;
1027 #endif
1028 }
1029 
1030 static inline abi_long host_to_target_rusage(abi_ulong target_addr,
1031                                              const struct rusage *rusage)
1032 {
1033     struct target_rusage *target_rusage;
1034 
1035     if (!lock_user_struct(VERIFY_WRITE, target_rusage, target_addr, 0))
1036         return -TARGET_EFAULT;
1037     target_rusage->ru_utime.tv_sec = tswapal(rusage->ru_utime.tv_sec);
1038     target_rusage->ru_utime.tv_usec = tswapal(rusage->ru_utime.tv_usec);
1039     target_rusage->ru_stime.tv_sec = tswapal(rusage->ru_stime.tv_sec);
1040     target_rusage->ru_stime.tv_usec = tswapal(rusage->ru_stime.tv_usec);
1041     target_rusage->ru_maxrss = tswapal(rusage->ru_maxrss);
1042     target_rusage->ru_ixrss = tswapal(rusage->ru_ixrss);
1043     target_rusage->ru_idrss = tswapal(rusage->ru_idrss);
1044     target_rusage->ru_isrss = tswapal(rusage->ru_isrss);
1045     target_rusage->ru_minflt = tswapal(rusage->ru_minflt);
1046     target_rusage->ru_majflt = tswapal(rusage->ru_majflt);
1047     target_rusage->ru_nswap = tswapal(rusage->ru_nswap);
1048     target_rusage->ru_inblock = tswapal(rusage->ru_inblock);
1049     target_rusage->ru_oublock = tswapal(rusage->ru_oublock);
1050     target_rusage->ru_msgsnd = tswapal(rusage->ru_msgsnd);
1051     target_rusage->ru_msgrcv = tswapal(rusage->ru_msgrcv);
1052     target_rusage->ru_nsignals = tswapal(rusage->ru_nsignals);
1053     target_rusage->ru_nvcsw = tswapal(rusage->ru_nvcsw);
1054     target_rusage->ru_nivcsw = tswapal(rusage->ru_nivcsw);
1055     unlock_user_struct(target_rusage, target_addr, 1);
1056 
1057     return 0;
1058 }
1059 
1060 static inline rlim_t target_to_host_rlim(abi_ulong target_rlim)
1061 {
1062     abi_ulong target_rlim_swap;
1063     rlim_t result;
1064 
1065     target_rlim_swap = tswapal(target_rlim);
1066     if (target_rlim_swap == TARGET_RLIM_INFINITY)
1067         return RLIM_INFINITY;
1068 
1069     result = target_rlim_swap;
1070     if (target_rlim_swap != (rlim_t)result)
1071         return RLIM_INFINITY;
1072 
1073     return result;
1074 }
1075 
1076 static inline abi_ulong host_to_target_rlim(rlim_t rlim)
1077 {
1078     abi_ulong target_rlim_swap;
1079     abi_ulong result;
1080 
1081     if (rlim == RLIM_INFINITY || rlim != (abi_long)rlim)
1082         target_rlim_swap = TARGET_RLIM_INFINITY;
1083     else
1084         target_rlim_swap = rlim;
1085     result = tswapal(target_rlim_swap);
1086 
1087     return result;
1088 }
1089 
1090 static inline int target_to_host_resource(int code)
1091 {
1092     switch (code) {
1093     case TARGET_RLIMIT_AS:
1094         return RLIMIT_AS;
1095     case TARGET_RLIMIT_CORE:
1096         return RLIMIT_CORE;
1097     case TARGET_RLIMIT_CPU:
1098         return RLIMIT_CPU;
1099     case TARGET_RLIMIT_DATA:
1100         return RLIMIT_DATA;
1101     case TARGET_RLIMIT_FSIZE:
1102         return RLIMIT_FSIZE;
1103     case TARGET_RLIMIT_LOCKS:
1104         return RLIMIT_LOCKS;
1105     case TARGET_RLIMIT_MEMLOCK:
1106         return RLIMIT_MEMLOCK;
1107     case TARGET_RLIMIT_MSGQUEUE:
1108         return RLIMIT_MSGQUEUE;
1109     case TARGET_RLIMIT_NICE:
1110         return RLIMIT_NICE;
1111     case TARGET_RLIMIT_NOFILE:
1112         return RLIMIT_NOFILE;
1113     case TARGET_RLIMIT_NPROC:
1114         return RLIMIT_NPROC;
1115     case TARGET_RLIMIT_RSS:
1116         return RLIMIT_RSS;
1117     case TARGET_RLIMIT_RTPRIO:
1118         return RLIMIT_RTPRIO;
1119     case TARGET_RLIMIT_SIGPENDING:
1120         return RLIMIT_SIGPENDING;
1121     case TARGET_RLIMIT_STACK:
1122         return RLIMIT_STACK;
1123     default:
1124         return code;
1125     }
1126 }
1127 
1128 static inline abi_long copy_from_user_timeval(struct timeval *tv,
1129                                               abi_ulong target_tv_addr)
1130 {
1131     struct target_timeval *target_tv;
1132 
1133     if (!lock_user_struct(VERIFY_READ, target_tv, target_tv_addr, 1)) {
1134         return -TARGET_EFAULT;
1135     }
1136 
1137     __get_user(tv->tv_sec, &target_tv->tv_sec);
1138     __get_user(tv->tv_usec, &target_tv->tv_usec);
1139 
1140     unlock_user_struct(target_tv, target_tv_addr, 0);
1141 
1142     return 0;
1143 }
1144 
1145 static inline abi_long copy_to_user_timeval(abi_ulong target_tv_addr,
1146                                             const struct timeval *tv)
1147 {
1148     struct target_timeval *target_tv;
1149 
1150     if (!lock_user_struct(VERIFY_WRITE, target_tv, target_tv_addr, 0)) {
1151         return -TARGET_EFAULT;
1152     }
1153 
1154     __put_user(tv->tv_sec, &target_tv->tv_sec);
1155     __put_user(tv->tv_usec, &target_tv->tv_usec);
1156 
1157     unlock_user_struct(target_tv, target_tv_addr, 1);
1158 
1159     return 0;
1160 }
1161 
1162 static inline abi_long copy_to_user_timeval64(abi_ulong target_tv_addr,
1163                                              const struct timeval *tv)
1164 {
1165     struct target__kernel_sock_timeval *target_tv;
1166 
1167     if (!lock_user_struct(VERIFY_WRITE, target_tv, target_tv_addr, 0)) {
1168         return -TARGET_EFAULT;
1169     }
1170 
1171     __put_user(tv->tv_sec, &target_tv->tv_sec);
1172     __put_user(tv->tv_usec, &target_tv->tv_usec);
1173 
1174     unlock_user_struct(target_tv, target_tv_addr, 1);
1175 
1176     return 0;
1177 }
1178 
1179 static inline abi_long target_to_host_timespec(struct timespec *host_ts,
1180                                                abi_ulong target_addr)
1181 {
1182     struct target_timespec *target_ts;
1183 
1184     if (!lock_user_struct(VERIFY_READ, target_ts, target_addr, 1)) {
1185         return -TARGET_EFAULT;
1186     }
1187     __get_user(host_ts->tv_sec, &target_ts->tv_sec);
1188     __get_user(host_ts->tv_nsec, &target_ts->tv_nsec);
1189     unlock_user_struct(target_ts, target_addr, 0);
1190     return 0;
1191 }
1192 
1193 static inline abi_long host_to_target_timespec(abi_ulong target_addr,
1194                                                struct timespec *host_ts)
1195 {
1196     struct target_timespec *target_ts;
1197 
1198     if (!lock_user_struct(VERIFY_WRITE, target_ts, target_addr, 0)) {
1199         return -TARGET_EFAULT;
1200     }
1201     __put_user(host_ts->tv_sec, &target_ts->tv_sec);
1202     __put_user(host_ts->tv_nsec, &target_ts->tv_nsec);
1203     unlock_user_struct(target_ts, target_addr, 1);
1204     return 0;
1205 }
1206 
1207 static inline abi_long host_to_target_timespec64(abi_ulong target_addr,
1208                                                  struct timespec *host_ts)
1209 {
1210     struct target__kernel_timespec *target_ts;
1211 
1212     if (!lock_user_struct(VERIFY_WRITE, target_ts, target_addr, 0)) {
1213         return -TARGET_EFAULT;
1214     }
1215     __put_user(host_ts->tv_sec, &target_ts->tv_sec);
1216     __put_user(host_ts->tv_nsec, &target_ts->tv_nsec);
1217     unlock_user_struct(target_ts, target_addr, 1);
1218     return 0;
1219 }
1220 
1221 static inline abi_long copy_from_user_timezone(struct timezone *tz,
1222                                                abi_ulong target_tz_addr)
1223 {
1224     struct target_timezone *target_tz;
1225 
1226     if (!lock_user_struct(VERIFY_READ, target_tz, target_tz_addr, 1)) {
1227         return -TARGET_EFAULT;
1228     }
1229 
1230     __get_user(tz->tz_minuteswest, &target_tz->tz_minuteswest);
1231     __get_user(tz->tz_dsttime, &target_tz->tz_dsttime);
1232 
1233     unlock_user_struct(target_tz, target_tz_addr, 0);
1234 
1235     return 0;
1236 }
1237 
1238 #if defined(TARGET_NR_mq_open) && defined(__NR_mq_open)
1239 #include <mqueue.h>
1240 
1241 static inline abi_long copy_from_user_mq_attr(struct mq_attr *attr,
1242                                               abi_ulong target_mq_attr_addr)
1243 {
1244     struct target_mq_attr *target_mq_attr;
1245 
1246     if (!lock_user_struct(VERIFY_READ, target_mq_attr,
1247                           target_mq_attr_addr, 1))
1248         return -TARGET_EFAULT;
1249 
1250     __get_user(attr->mq_flags, &target_mq_attr->mq_flags);
1251     __get_user(attr->mq_maxmsg, &target_mq_attr->mq_maxmsg);
1252     __get_user(attr->mq_msgsize, &target_mq_attr->mq_msgsize);
1253     __get_user(attr->mq_curmsgs, &target_mq_attr->mq_curmsgs);
1254 
1255     unlock_user_struct(target_mq_attr, target_mq_attr_addr, 0);
1256 
1257     return 0;
1258 }
1259 
1260 static inline abi_long copy_to_user_mq_attr(abi_ulong target_mq_attr_addr,
1261                                             const struct mq_attr *attr)
1262 {
1263     struct target_mq_attr *target_mq_attr;
1264 
1265     if (!lock_user_struct(VERIFY_WRITE, target_mq_attr,
1266                           target_mq_attr_addr, 0))
1267         return -TARGET_EFAULT;
1268 
1269     __put_user(attr->mq_flags, &target_mq_attr->mq_flags);
1270     __put_user(attr->mq_maxmsg, &target_mq_attr->mq_maxmsg);
1271     __put_user(attr->mq_msgsize, &target_mq_attr->mq_msgsize);
1272     __put_user(attr->mq_curmsgs, &target_mq_attr->mq_curmsgs);
1273 
1274     unlock_user_struct(target_mq_attr, target_mq_attr_addr, 1);
1275 
1276     return 0;
1277 }
1278 #endif
1279 
1280 #if defined(TARGET_NR_select) || defined(TARGET_NR__newselect)
1281 /* do_select() must return target values and target errnos. */
1282 static abi_long do_select(int n,
1283                           abi_ulong rfd_addr, abi_ulong wfd_addr,
1284                           abi_ulong efd_addr, abi_ulong target_tv_addr)
1285 {
1286     fd_set rfds, wfds, efds;
1287     fd_set *rfds_ptr, *wfds_ptr, *efds_ptr;
1288     struct timeval tv;
1289     struct timespec ts, *ts_ptr;
1290     abi_long ret;
1291 
1292     ret = copy_from_user_fdset_ptr(&rfds, &rfds_ptr, rfd_addr, n);
1293     if (ret) {
1294         return ret;
1295     }
1296     ret = copy_from_user_fdset_ptr(&wfds, &wfds_ptr, wfd_addr, n);
1297     if (ret) {
1298         return ret;
1299     }
1300     ret = copy_from_user_fdset_ptr(&efds, &efds_ptr, efd_addr, n);
1301     if (ret) {
1302         return ret;
1303     }
1304 
1305     if (target_tv_addr) {
1306         if (copy_from_user_timeval(&tv, target_tv_addr))
1307             return -TARGET_EFAULT;
1308         ts.tv_sec = tv.tv_sec;
1309         ts.tv_nsec = tv.tv_usec * 1000;
1310         ts_ptr = &ts;
1311     } else {
1312         ts_ptr = NULL;
1313     }
1314 
1315     ret = get_errno(safe_pselect6(n, rfds_ptr, wfds_ptr, efds_ptr,
1316                                   ts_ptr, NULL));
1317 
1318     if (!is_error(ret)) {
1319         if (rfd_addr && copy_to_user_fdset(rfd_addr, &rfds, n))
1320             return -TARGET_EFAULT;
1321         if (wfd_addr && copy_to_user_fdset(wfd_addr, &wfds, n))
1322             return -TARGET_EFAULT;
1323         if (efd_addr && copy_to_user_fdset(efd_addr, &efds, n))
1324             return -TARGET_EFAULT;
1325 
1326         if (target_tv_addr) {
1327             tv.tv_sec = ts.tv_sec;
1328             tv.tv_usec = ts.tv_nsec / 1000;
1329             if (copy_to_user_timeval(target_tv_addr, &tv)) {
1330                 return -TARGET_EFAULT;
1331             }
1332         }
1333     }
1334 
1335     return ret;
1336 }
1337 
1338 #if defined(TARGET_WANT_OLD_SYS_SELECT)
1339 static abi_long do_old_select(abi_ulong arg1)
1340 {
1341     struct target_sel_arg_struct *sel;
1342     abi_ulong inp, outp, exp, tvp;
1343     long nsel;
1344 
1345     if (!lock_user_struct(VERIFY_READ, sel, arg1, 1)) {
1346         return -TARGET_EFAULT;
1347     }
1348 
1349     nsel = tswapal(sel->n);
1350     inp = tswapal(sel->inp);
1351     outp = tswapal(sel->outp);
1352     exp = tswapal(sel->exp);
1353     tvp = tswapal(sel->tvp);
1354 
1355     unlock_user_struct(sel, arg1, 0);
1356 
1357     return do_select(nsel, inp, outp, exp, tvp);
1358 }
1359 #endif
1360 #endif
1361 
1362 static abi_long do_pipe2(int host_pipe[], int flags)
1363 {
1364 #ifdef CONFIG_PIPE2
1365     return pipe2(host_pipe, flags);
1366 #else
1367     return -ENOSYS;
1368 #endif
1369 }
1370 
1371 static abi_long do_pipe(void *cpu_env, abi_ulong pipedes,
1372                         int flags, int is_pipe2)
1373 {
1374     int host_pipe[2];
1375     abi_long ret;
1376     ret = flags ? do_pipe2(host_pipe, flags) : pipe(host_pipe);
1377 
1378     if (is_error(ret))
1379         return get_errno(ret);
1380 
1381     /* Several targets have special calling conventions for the original
1382        pipe syscall, but didn't replicate this into the pipe2 syscall.  */
1383     if (!is_pipe2) {
1384 #if defined(TARGET_ALPHA)
1385         ((CPUAlphaState *)cpu_env)->ir[IR_A4] = host_pipe[1];
1386         return host_pipe[0];
1387 #elif defined(TARGET_MIPS)
1388         ((CPUMIPSState*)cpu_env)->active_tc.gpr[3] = host_pipe[1];
1389         return host_pipe[0];
1390 #elif defined(TARGET_SH4)
1391         ((CPUSH4State*)cpu_env)->gregs[1] = host_pipe[1];
1392         return host_pipe[0];
1393 #elif defined(TARGET_SPARC)
1394         ((CPUSPARCState*)cpu_env)->regwptr[1] = host_pipe[1];
1395         return host_pipe[0];
1396 #endif
1397     }
1398 
1399     if (put_user_s32(host_pipe[0], pipedes)
1400         || put_user_s32(host_pipe[1], pipedes + sizeof(host_pipe[0])))
1401         return -TARGET_EFAULT;
1402     return get_errno(ret);
1403 }
1404 
1405 static inline abi_long target_to_host_ip_mreq(struct ip_mreqn *mreqn,
1406                                               abi_ulong target_addr,
1407                                               socklen_t len)
1408 {
1409     struct target_ip_mreqn *target_smreqn;
1410 
1411     target_smreqn = lock_user(VERIFY_READ, target_addr, len, 1);
1412     if (!target_smreqn)
1413         return -TARGET_EFAULT;
1414     mreqn->imr_multiaddr.s_addr = target_smreqn->imr_multiaddr.s_addr;
1415     mreqn->imr_address.s_addr = target_smreqn->imr_address.s_addr;
1416     if (len == sizeof(struct target_ip_mreqn))
1417         mreqn->imr_ifindex = tswapal(target_smreqn->imr_ifindex);
1418     unlock_user(target_smreqn, target_addr, 0);
1419 
1420     return 0;
1421 }
1422 
1423 static inline abi_long target_to_host_sockaddr(int fd, struct sockaddr *addr,
1424                                                abi_ulong target_addr,
1425                                                socklen_t len)
1426 {
1427     const socklen_t unix_maxlen = sizeof (struct sockaddr_un);
1428     sa_family_t sa_family;
1429     struct target_sockaddr *target_saddr;
1430 
1431     if (fd_trans_target_to_host_addr(fd)) {
1432         return fd_trans_target_to_host_addr(fd)(addr, target_addr, len);
1433     }
1434 
1435     target_saddr = lock_user(VERIFY_READ, target_addr, len, 1);
1436     if (!target_saddr)
1437         return -TARGET_EFAULT;
1438 
1439     sa_family = tswap16(target_saddr->sa_family);
1440 
1441     /* Oops. The caller might send a incomplete sun_path; sun_path
1442      * must be terminated by \0 (see the manual page), but
1443      * unfortunately it is quite common to specify sockaddr_un
1444      * length as "strlen(x->sun_path)" while it should be
1445      * "strlen(...) + 1". We'll fix that here if needed.
1446      * Linux kernel has a similar feature.
1447      */
1448 
1449     if (sa_family == AF_UNIX) {
1450         if (len < unix_maxlen && len > 0) {
1451             char *cp = (char*)target_saddr;
1452 
1453             if ( cp[len-1] && !cp[len] )
1454                 len++;
1455         }
1456         if (len > unix_maxlen)
1457             len = unix_maxlen;
1458     }
1459 
1460     memcpy(addr, target_saddr, len);
1461     addr->sa_family = sa_family;
1462     if (sa_family == AF_NETLINK) {
1463         struct sockaddr_nl *nladdr;
1464 
1465         nladdr = (struct sockaddr_nl *)addr;
1466         nladdr->nl_pid = tswap32(nladdr->nl_pid);
1467         nladdr->nl_groups = tswap32(nladdr->nl_groups);
1468     } else if (sa_family == AF_PACKET) {
1469 	struct target_sockaddr_ll *lladdr;
1470 
1471 	lladdr = (struct target_sockaddr_ll *)addr;
1472 	lladdr->sll_ifindex = tswap32(lladdr->sll_ifindex);
1473 	lladdr->sll_hatype = tswap16(lladdr->sll_hatype);
1474     }
1475     unlock_user(target_saddr, target_addr, 0);
1476 
1477     return 0;
1478 }
1479 
1480 static inline abi_long host_to_target_sockaddr(abi_ulong target_addr,
1481                                                struct sockaddr *addr,
1482                                                socklen_t len)
1483 {
1484     struct target_sockaddr *target_saddr;
1485 
1486     if (len == 0) {
1487         return 0;
1488     }
1489     assert(addr);
1490 
1491     target_saddr = lock_user(VERIFY_WRITE, target_addr, len, 0);
1492     if (!target_saddr)
1493         return -TARGET_EFAULT;
1494     memcpy(target_saddr, addr, len);
1495     if (len >= offsetof(struct target_sockaddr, sa_family) +
1496         sizeof(target_saddr->sa_family)) {
1497         target_saddr->sa_family = tswap16(addr->sa_family);
1498     }
1499     if (addr->sa_family == AF_NETLINK &&
1500         len >= sizeof(struct target_sockaddr_nl)) {
1501         struct target_sockaddr_nl *target_nl =
1502                (struct target_sockaddr_nl *)target_saddr;
1503         target_nl->nl_pid = tswap32(target_nl->nl_pid);
1504         target_nl->nl_groups = tswap32(target_nl->nl_groups);
1505     } else if (addr->sa_family == AF_PACKET) {
1506         struct sockaddr_ll *target_ll = (struct sockaddr_ll *)target_saddr;
1507         target_ll->sll_ifindex = tswap32(target_ll->sll_ifindex);
1508         target_ll->sll_hatype = tswap16(target_ll->sll_hatype);
1509     } else if (addr->sa_family == AF_INET6 &&
1510                len >= sizeof(struct target_sockaddr_in6)) {
1511         struct target_sockaddr_in6 *target_in6 =
1512                (struct target_sockaddr_in6 *)target_saddr;
1513         target_in6->sin6_scope_id = tswap16(target_in6->sin6_scope_id);
1514     }
1515     unlock_user(target_saddr, target_addr, len);
1516 
1517     return 0;
1518 }
1519 
1520 static inline abi_long target_to_host_cmsg(struct msghdr *msgh,
1521                                            struct target_msghdr *target_msgh)
1522 {
1523     struct cmsghdr *cmsg = CMSG_FIRSTHDR(msgh);
1524     abi_long msg_controllen;
1525     abi_ulong target_cmsg_addr;
1526     struct target_cmsghdr *target_cmsg, *target_cmsg_start;
1527     socklen_t space = 0;
1528 
1529     msg_controllen = tswapal(target_msgh->msg_controllen);
1530     if (msg_controllen < sizeof (struct target_cmsghdr))
1531         goto the_end;
1532     target_cmsg_addr = tswapal(target_msgh->msg_control);
1533     target_cmsg = lock_user(VERIFY_READ, target_cmsg_addr, msg_controllen, 1);
1534     target_cmsg_start = target_cmsg;
1535     if (!target_cmsg)
1536         return -TARGET_EFAULT;
1537 
1538     while (cmsg && target_cmsg) {
1539         void *data = CMSG_DATA(cmsg);
1540         void *target_data = TARGET_CMSG_DATA(target_cmsg);
1541 
1542         int len = tswapal(target_cmsg->cmsg_len)
1543             - sizeof(struct target_cmsghdr);
1544 
1545         space += CMSG_SPACE(len);
1546         if (space > msgh->msg_controllen) {
1547             space -= CMSG_SPACE(len);
1548             /* This is a QEMU bug, since we allocated the payload
1549              * area ourselves (unlike overflow in host-to-target
1550              * conversion, which is just the guest giving us a buffer
1551              * that's too small). It can't happen for the payload types
1552              * we currently support; if it becomes an issue in future
1553              * we would need to improve our allocation strategy to
1554              * something more intelligent than "twice the size of the
1555              * target buffer we're reading from".
1556              */
1557             gemu_log("Host cmsg overflow\n");
1558             break;
1559         }
1560 
1561         if (tswap32(target_cmsg->cmsg_level) == TARGET_SOL_SOCKET) {
1562             cmsg->cmsg_level = SOL_SOCKET;
1563         } else {
1564             cmsg->cmsg_level = tswap32(target_cmsg->cmsg_level);
1565         }
1566         cmsg->cmsg_type = tswap32(target_cmsg->cmsg_type);
1567         cmsg->cmsg_len = CMSG_LEN(len);
1568 
1569         if (cmsg->cmsg_level == SOL_SOCKET && cmsg->cmsg_type == SCM_RIGHTS) {
1570             int *fd = (int *)data;
1571             int *target_fd = (int *)target_data;
1572             int i, numfds = len / sizeof(int);
1573 
1574             for (i = 0; i < numfds; i++) {
1575                 __get_user(fd[i], target_fd + i);
1576             }
1577         } else if (cmsg->cmsg_level == SOL_SOCKET
1578                &&  cmsg->cmsg_type == SCM_CREDENTIALS) {
1579             struct ucred *cred = (struct ucred *)data;
1580             struct target_ucred *target_cred =
1581                 (struct target_ucred *)target_data;
1582 
1583             __get_user(cred->pid, &target_cred->pid);
1584             __get_user(cred->uid, &target_cred->uid);
1585             __get_user(cred->gid, &target_cred->gid);
1586         } else {
1587             gemu_log("Unsupported ancillary data: %d/%d\n",
1588                                         cmsg->cmsg_level, cmsg->cmsg_type);
1589             memcpy(data, target_data, len);
1590         }
1591 
1592         cmsg = CMSG_NXTHDR(msgh, cmsg);
1593         target_cmsg = TARGET_CMSG_NXTHDR(target_msgh, target_cmsg,
1594                                          target_cmsg_start);
1595     }
1596     unlock_user(target_cmsg, target_cmsg_addr, 0);
1597  the_end:
1598     msgh->msg_controllen = space;
1599     return 0;
1600 }
1601 
1602 static inline abi_long host_to_target_cmsg(struct target_msghdr *target_msgh,
1603                                            struct msghdr *msgh)
1604 {
1605     struct cmsghdr *cmsg = CMSG_FIRSTHDR(msgh);
1606     abi_long msg_controllen;
1607     abi_ulong target_cmsg_addr;
1608     struct target_cmsghdr *target_cmsg, *target_cmsg_start;
1609     socklen_t space = 0;
1610 
1611     msg_controllen = tswapal(target_msgh->msg_controllen);
1612     if (msg_controllen < sizeof (struct target_cmsghdr))
1613         goto the_end;
1614     target_cmsg_addr = tswapal(target_msgh->msg_control);
1615     target_cmsg = lock_user(VERIFY_WRITE, target_cmsg_addr, msg_controllen, 0);
1616     target_cmsg_start = target_cmsg;
1617     if (!target_cmsg)
1618         return -TARGET_EFAULT;
1619 
1620     while (cmsg && target_cmsg) {
1621         void *data = CMSG_DATA(cmsg);
1622         void *target_data = TARGET_CMSG_DATA(target_cmsg);
1623 
1624         int len = cmsg->cmsg_len - sizeof(struct cmsghdr);
1625         int tgt_len, tgt_space;
1626 
1627         /* We never copy a half-header but may copy half-data;
1628          * this is Linux's behaviour in put_cmsg(). Note that
1629          * truncation here is a guest problem (which we report
1630          * to the guest via the CTRUNC bit), unlike truncation
1631          * in target_to_host_cmsg, which is a QEMU bug.
1632          */
1633         if (msg_controllen < sizeof(struct target_cmsghdr)) {
1634             target_msgh->msg_flags |= tswap32(MSG_CTRUNC);
1635             break;
1636         }
1637 
1638         if (cmsg->cmsg_level == SOL_SOCKET) {
1639             target_cmsg->cmsg_level = tswap32(TARGET_SOL_SOCKET);
1640         } else {
1641             target_cmsg->cmsg_level = tswap32(cmsg->cmsg_level);
1642         }
1643         target_cmsg->cmsg_type = tswap32(cmsg->cmsg_type);
1644 
1645         /* Payload types which need a different size of payload on
1646          * the target must adjust tgt_len here.
1647          */
1648         tgt_len = len;
1649         switch (cmsg->cmsg_level) {
1650         case SOL_SOCKET:
1651             switch (cmsg->cmsg_type) {
1652             case SO_TIMESTAMP:
1653                 tgt_len = sizeof(struct target_timeval);
1654                 break;
1655             default:
1656                 break;
1657             }
1658             break;
1659         default:
1660             break;
1661         }
1662 
1663         if (msg_controllen < TARGET_CMSG_LEN(tgt_len)) {
1664             target_msgh->msg_flags |= tswap32(MSG_CTRUNC);
1665             tgt_len = msg_controllen - sizeof(struct target_cmsghdr);
1666         }
1667 
1668         /* We must now copy-and-convert len bytes of payload
1669          * into tgt_len bytes of destination space. Bear in mind
1670          * that in both source and destination we may be dealing
1671          * with a truncated value!
1672          */
1673         switch (cmsg->cmsg_level) {
1674         case SOL_SOCKET:
1675             switch (cmsg->cmsg_type) {
1676             case SCM_RIGHTS:
1677             {
1678                 int *fd = (int *)data;
1679                 int *target_fd = (int *)target_data;
1680                 int i, numfds = tgt_len / sizeof(int);
1681 
1682                 for (i = 0; i < numfds; i++) {
1683                     __put_user(fd[i], target_fd + i);
1684                 }
1685                 break;
1686             }
1687             case SO_TIMESTAMP:
1688             {
1689                 struct timeval *tv = (struct timeval *)data;
1690                 struct target_timeval *target_tv =
1691                     (struct target_timeval *)target_data;
1692 
1693                 if (len != sizeof(struct timeval) ||
1694                     tgt_len != sizeof(struct target_timeval)) {
1695                     goto unimplemented;
1696                 }
1697 
1698                 /* copy struct timeval to target */
1699                 __put_user(tv->tv_sec, &target_tv->tv_sec);
1700                 __put_user(tv->tv_usec, &target_tv->tv_usec);
1701                 break;
1702             }
1703             case SCM_CREDENTIALS:
1704             {
1705                 struct ucred *cred = (struct ucred *)data;
1706                 struct target_ucred *target_cred =
1707                     (struct target_ucred *)target_data;
1708 
1709                 __put_user(cred->pid, &target_cred->pid);
1710                 __put_user(cred->uid, &target_cred->uid);
1711                 __put_user(cred->gid, &target_cred->gid);
1712                 break;
1713             }
1714             default:
1715                 goto unimplemented;
1716             }
1717             break;
1718 
1719         case SOL_IP:
1720             switch (cmsg->cmsg_type) {
1721             case IP_TTL:
1722             {
1723                 uint32_t *v = (uint32_t *)data;
1724                 uint32_t *t_int = (uint32_t *)target_data;
1725 
1726                 if (len != sizeof(uint32_t) ||
1727                     tgt_len != sizeof(uint32_t)) {
1728                     goto unimplemented;
1729                 }
1730                 __put_user(*v, t_int);
1731                 break;
1732             }
1733             case IP_RECVERR:
1734             {
1735                 struct errhdr_t {
1736                    struct sock_extended_err ee;
1737                    struct sockaddr_in offender;
1738                 };
1739                 struct errhdr_t *errh = (struct errhdr_t *)data;
1740                 struct errhdr_t *target_errh =
1741                     (struct errhdr_t *)target_data;
1742 
1743                 if (len != sizeof(struct errhdr_t) ||
1744                     tgt_len != sizeof(struct errhdr_t)) {
1745                     goto unimplemented;
1746                 }
1747                 __put_user(errh->ee.ee_errno, &target_errh->ee.ee_errno);
1748                 __put_user(errh->ee.ee_origin, &target_errh->ee.ee_origin);
1749                 __put_user(errh->ee.ee_type,  &target_errh->ee.ee_type);
1750                 __put_user(errh->ee.ee_code, &target_errh->ee.ee_code);
1751                 __put_user(errh->ee.ee_pad, &target_errh->ee.ee_pad);
1752                 __put_user(errh->ee.ee_info, &target_errh->ee.ee_info);
1753                 __put_user(errh->ee.ee_data, &target_errh->ee.ee_data);
1754                 host_to_target_sockaddr((unsigned long) &target_errh->offender,
1755                     (void *) &errh->offender, sizeof(errh->offender));
1756                 break;
1757             }
1758             default:
1759                 goto unimplemented;
1760             }
1761             break;
1762 
1763         case SOL_IPV6:
1764             switch (cmsg->cmsg_type) {
1765             case IPV6_HOPLIMIT:
1766             {
1767                 uint32_t *v = (uint32_t *)data;
1768                 uint32_t *t_int = (uint32_t *)target_data;
1769 
1770                 if (len != sizeof(uint32_t) ||
1771                     tgt_len != sizeof(uint32_t)) {
1772                     goto unimplemented;
1773                 }
1774                 __put_user(*v, t_int);
1775                 break;
1776             }
1777             case IPV6_RECVERR:
1778             {
1779                 struct errhdr6_t {
1780                    struct sock_extended_err ee;
1781                    struct sockaddr_in6 offender;
1782                 };
1783                 struct errhdr6_t *errh = (struct errhdr6_t *)data;
1784                 struct errhdr6_t *target_errh =
1785                     (struct errhdr6_t *)target_data;
1786 
1787                 if (len != sizeof(struct errhdr6_t) ||
1788                     tgt_len != sizeof(struct errhdr6_t)) {
1789                     goto unimplemented;
1790                 }
1791                 __put_user(errh->ee.ee_errno, &target_errh->ee.ee_errno);
1792                 __put_user(errh->ee.ee_origin, &target_errh->ee.ee_origin);
1793                 __put_user(errh->ee.ee_type,  &target_errh->ee.ee_type);
1794                 __put_user(errh->ee.ee_code, &target_errh->ee.ee_code);
1795                 __put_user(errh->ee.ee_pad, &target_errh->ee.ee_pad);
1796                 __put_user(errh->ee.ee_info, &target_errh->ee.ee_info);
1797                 __put_user(errh->ee.ee_data, &target_errh->ee.ee_data);
1798                 host_to_target_sockaddr((unsigned long) &target_errh->offender,
1799                     (void *) &errh->offender, sizeof(errh->offender));
1800                 break;
1801             }
1802             default:
1803                 goto unimplemented;
1804             }
1805             break;
1806 
1807         default:
1808         unimplemented:
1809             gemu_log("Unsupported ancillary data: %d/%d\n",
1810                                         cmsg->cmsg_level, cmsg->cmsg_type);
1811             memcpy(target_data, data, MIN(len, tgt_len));
1812             if (tgt_len > len) {
1813                 memset(target_data + len, 0, tgt_len - len);
1814             }
1815         }
1816 
1817         target_cmsg->cmsg_len = tswapal(TARGET_CMSG_LEN(tgt_len));
1818         tgt_space = TARGET_CMSG_SPACE(tgt_len);
1819         if (msg_controllen < tgt_space) {
1820             tgt_space = msg_controllen;
1821         }
1822         msg_controllen -= tgt_space;
1823         space += tgt_space;
1824         cmsg = CMSG_NXTHDR(msgh, cmsg);
1825         target_cmsg = TARGET_CMSG_NXTHDR(target_msgh, target_cmsg,
1826                                          target_cmsg_start);
1827     }
1828     unlock_user(target_cmsg, target_cmsg_addr, space);
1829  the_end:
1830     target_msgh->msg_controllen = tswapal(space);
1831     return 0;
1832 }
1833 
1834 /* do_setsockopt() Must return target values and target errnos. */
1835 static abi_long do_setsockopt(int sockfd, int level, int optname,
1836                               abi_ulong optval_addr, socklen_t optlen)
1837 {
1838     abi_long ret;
1839     int val;
1840     struct ip_mreqn *ip_mreq;
1841     struct ip_mreq_source *ip_mreq_source;
1842 
1843     switch(level) {
1844     case SOL_TCP:
1845         /* TCP options all take an 'int' value.  */
1846         if (optlen < sizeof(uint32_t))
1847             return -TARGET_EINVAL;
1848 
1849         if (get_user_u32(val, optval_addr))
1850             return -TARGET_EFAULT;
1851         ret = get_errno(setsockopt(sockfd, level, optname, &val, sizeof(val)));
1852         break;
1853     case SOL_IP:
1854         switch(optname) {
1855         case IP_TOS:
1856         case IP_TTL:
1857         case IP_HDRINCL:
1858         case IP_ROUTER_ALERT:
1859         case IP_RECVOPTS:
1860         case IP_RETOPTS:
1861         case IP_PKTINFO:
1862         case IP_MTU_DISCOVER:
1863         case IP_RECVERR:
1864         case IP_RECVTTL:
1865         case IP_RECVTOS:
1866 #ifdef IP_FREEBIND
1867         case IP_FREEBIND:
1868 #endif
1869         case IP_MULTICAST_TTL:
1870         case IP_MULTICAST_LOOP:
1871             val = 0;
1872             if (optlen >= sizeof(uint32_t)) {
1873                 if (get_user_u32(val, optval_addr))
1874                     return -TARGET_EFAULT;
1875             } else if (optlen >= 1) {
1876                 if (get_user_u8(val, optval_addr))
1877                     return -TARGET_EFAULT;
1878             }
1879             ret = get_errno(setsockopt(sockfd, level, optname, &val, sizeof(val)));
1880             break;
1881         case IP_ADD_MEMBERSHIP:
1882         case IP_DROP_MEMBERSHIP:
1883             if (optlen < sizeof (struct target_ip_mreq) ||
1884                 optlen > sizeof (struct target_ip_mreqn))
1885                 return -TARGET_EINVAL;
1886 
1887             ip_mreq = (struct ip_mreqn *) alloca(optlen);
1888             target_to_host_ip_mreq(ip_mreq, optval_addr, optlen);
1889             ret = get_errno(setsockopt(sockfd, level, optname, ip_mreq, optlen));
1890             break;
1891 
1892         case IP_BLOCK_SOURCE:
1893         case IP_UNBLOCK_SOURCE:
1894         case IP_ADD_SOURCE_MEMBERSHIP:
1895         case IP_DROP_SOURCE_MEMBERSHIP:
1896             if (optlen != sizeof (struct target_ip_mreq_source))
1897                 return -TARGET_EINVAL;
1898 
1899             ip_mreq_source = lock_user(VERIFY_READ, optval_addr, optlen, 1);
1900             ret = get_errno(setsockopt(sockfd, level, optname, ip_mreq_source, optlen));
1901             unlock_user (ip_mreq_source, optval_addr, 0);
1902             break;
1903 
1904         default:
1905             goto unimplemented;
1906         }
1907         break;
1908     case SOL_IPV6:
1909         switch (optname) {
1910         case IPV6_MTU_DISCOVER:
1911         case IPV6_MTU:
1912         case IPV6_V6ONLY:
1913         case IPV6_RECVPKTINFO:
1914         case IPV6_UNICAST_HOPS:
1915         case IPV6_MULTICAST_HOPS:
1916         case IPV6_MULTICAST_LOOP:
1917         case IPV6_RECVERR:
1918         case IPV6_RECVHOPLIMIT:
1919         case IPV6_2292HOPLIMIT:
1920         case IPV6_CHECKSUM:
1921         case IPV6_ADDRFORM:
1922         case IPV6_2292PKTINFO:
1923         case IPV6_RECVTCLASS:
1924         case IPV6_RECVRTHDR:
1925         case IPV6_2292RTHDR:
1926         case IPV6_RECVHOPOPTS:
1927         case IPV6_2292HOPOPTS:
1928         case IPV6_RECVDSTOPTS:
1929         case IPV6_2292DSTOPTS:
1930         case IPV6_TCLASS:
1931 #ifdef IPV6_RECVPATHMTU
1932         case IPV6_RECVPATHMTU:
1933 #endif
1934 #ifdef IPV6_TRANSPARENT
1935         case IPV6_TRANSPARENT:
1936 #endif
1937 #ifdef IPV6_FREEBIND
1938         case IPV6_FREEBIND:
1939 #endif
1940 #ifdef IPV6_RECVORIGDSTADDR
1941         case IPV6_RECVORIGDSTADDR:
1942 #endif
1943             val = 0;
1944             if (optlen < sizeof(uint32_t)) {
1945                 return -TARGET_EINVAL;
1946             }
1947             if (get_user_u32(val, optval_addr)) {
1948                 return -TARGET_EFAULT;
1949             }
1950             ret = get_errno(setsockopt(sockfd, level, optname,
1951                                        &val, sizeof(val)));
1952             break;
1953         case IPV6_PKTINFO:
1954         {
1955             struct in6_pktinfo pki;
1956 
1957             if (optlen < sizeof(pki)) {
1958                 return -TARGET_EINVAL;
1959             }
1960 
1961             if (copy_from_user(&pki, optval_addr, sizeof(pki))) {
1962                 return -TARGET_EFAULT;
1963             }
1964 
1965             pki.ipi6_ifindex = tswap32(pki.ipi6_ifindex);
1966 
1967             ret = get_errno(setsockopt(sockfd, level, optname,
1968                                        &pki, sizeof(pki)));
1969             break;
1970         }
1971         case IPV6_ADD_MEMBERSHIP:
1972         case IPV6_DROP_MEMBERSHIP:
1973         {
1974             struct ipv6_mreq ipv6mreq;
1975 
1976             if (optlen < sizeof(ipv6mreq)) {
1977                 return -TARGET_EINVAL;
1978             }
1979 
1980             if (copy_from_user(&ipv6mreq, optval_addr, sizeof(ipv6mreq))) {
1981                 return -TARGET_EFAULT;
1982             }
1983 
1984             ipv6mreq.ipv6mr_interface = tswap32(ipv6mreq.ipv6mr_interface);
1985 
1986             ret = get_errno(setsockopt(sockfd, level, optname,
1987                                        &ipv6mreq, sizeof(ipv6mreq)));
1988             break;
1989         }
1990         default:
1991             goto unimplemented;
1992         }
1993         break;
1994     case SOL_ICMPV6:
1995         switch (optname) {
1996         case ICMPV6_FILTER:
1997         {
1998             struct icmp6_filter icmp6f;
1999 
2000             if (optlen > sizeof(icmp6f)) {
2001                 optlen = sizeof(icmp6f);
2002             }
2003 
2004             if (copy_from_user(&icmp6f, optval_addr, optlen)) {
2005                 return -TARGET_EFAULT;
2006             }
2007 
2008             for (val = 0; val < 8; val++) {
2009                 icmp6f.data[val] = tswap32(icmp6f.data[val]);
2010             }
2011 
2012             ret = get_errno(setsockopt(sockfd, level, optname,
2013                                        &icmp6f, optlen));
2014             break;
2015         }
2016         default:
2017             goto unimplemented;
2018         }
2019         break;
2020     case SOL_RAW:
2021         switch (optname) {
2022         case ICMP_FILTER:
2023         case IPV6_CHECKSUM:
2024             /* those take an u32 value */
2025             if (optlen < sizeof(uint32_t)) {
2026                 return -TARGET_EINVAL;
2027             }
2028 
2029             if (get_user_u32(val, optval_addr)) {
2030                 return -TARGET_EFAULT;
2031             }
2032             ret = get_errno(setsockopt(sockfd, level, optname,
2033                                        &val, sizeof(val)));
2034             break;
2035 
2036         default:
2037             goto unimplemented;
2038         }
2039         break;
2040 #if defined(SOL_ALG) && defined(ALG_SET_KEY) && defined(ALG_SET_AEAD_AUTHSIZE)
2041     case SOL_ALG:
2042         switch (optname) {
2043         case ALG_SET_KEY:
2044         {
2045             char *alg_key = g_malloc(optlen);
2046 
2047             if (!alg_key) {
2048                 return -TARGET_ENOMEM;
2049             }
2050             if (copy_from_user(alg_key, optval_addr, optlen)) {
2051                 g_free(alg_key);
2052                 return -TARGET_EFAULT;
2053             }
2054             ret = get_errno(setsockopt(sockfd, level, optname,
2055                                        alg_key, optlen));
2056             g_free(alg_key);
2057             break;
2058         }
2059         case ALG_SET_AEAD_AUTHSIZE:
2060         {
2061             ret = get_errno(setsockopt(sockfd, level, optname,
2062                                        NULL, optlen));
2063             break;
2064         }
2065         default:
2066             goto unimplemented;
2067         }
2068         break;
2069 #endif
2070     case TARGET_SOL_SOCKET:
2071         switch (optname) {
2072         case TARGET_SO_RCVTIMEO:
2073         {
2074                 struct timeval tv;
2075 
2076                 optname = SO_RCVTIMEO;
2077 
2078 set_timeout:
2079                 if (optlen != sizeof(struct target_timeval)) {
2080                     return -TARGET_EINVAL;
2081                 }
2082 
2083                 if (copy_from_user_timeval(&tv, optval_addr)) {
2084                     return -TARGET_EFAULT;
2085                 }
2086 
2087                 ret = get_errno(setsockopt(sockfd, SOL_SOCKET, optname,
2088                                 &tv, sizeof(tv)));
2089                 return ret;
2090         }
2091         case TARGET_SO_SNDTIMEO:
2092                 optname = SO_SNDTIMEO;
2093                 goto set_timeout;
2094         case TARGET_SO_ATTACH_FILTER:
2095         {
2096                 struct target_sock_fprog *tfprog;
2097                 struct target_sock_filter *tfilter;
2098                 struct sock_fprog fprog;
2099                 struct sock_filter *filter;
2100                 int i;
2101 
2102                 if (optlen != sizeof(*tfprog)) {
2103                     return -TARGET_EINVAL;
2104                 }
2105                 if (!lock_user_struct(VERIFY_READ, tfprog, optval_addr, 0)) {
2106                     return -TARGET_EFAULT;
2107                 }
2108                 if (!lock_user_struct(VERIFY_READ, tfilter,
2109                                       tswapal(tfprog->filter), 0)) {
2110                     unlock_user_struct(tfprog, optval_addr, 1);
2111                     return -TARGET_EFAULT;
2112                 }
2113 
2114                 fprog.len = tswap16(tfprog->len);
2115                 filter = g_try_new(struct sock_filter, fprog.len);
2116                 if (filter == NULL) {
2117                     unlock_user_struct(tfilter, tfprog->filter, 1);
2118                     unlock_user_struct(tfprog, optval_addr, 1);
2119                     return -TARGET_ENOMEM;
2120                 }
2121                 for (i = 0; i < fprog.len; i++) {
2122                     filter[i].code = tswap16(tfilter[i].code);
2123                     filter[i].jt = tfilter[i].jt;
2124                     filter[i].jf = tfilter[i].jf;
2125                     filter[i].k = tswap32(tfilter[i].k);
2126                 }
2127                 fprog.filter = filter;
2128 
2129                 ret = get_errno(setsockopt(sockfd, SOL_SOCKET,
2130                                 SO_ATTACH_FILTER, &fprog, sizeof(fprog)));
2131                 g_free(filter);
2132 
2133                 unlock_user_struct(tfilter, tfprog->filter, 1);
2134                 unlock_user_struct(tfprog, optval_addr, 1);
2135                 return ret;
2136         }
2137 	case TARGET_SO_BINDTODEVICE:
2138 	{
2139 		char *dev_ifname, *addr_ifname;
2140 
2141 		if (optlen > IFNAMSIZ - 1) {
2142 		    optlen = IFNAMSIZ - 1;
2143 		}
2144 		dev_ifname = lock_user(VERIFY_READ, optval_addr, optlen, 1);
2145 		if (!dev_ifname) {
2146 		    return -TARGET_EFAULT;
2147 		}
2148 		optname = SO_BINDTODEVICE;
2149 		addr_ifname = alloca(IFNAMSIZ);
2150 		memcpy(addr_ifname, dev_ifname, optlen);
2151 		addr_ifname[optlen] = 0;
2152 		ret = get_errno(setsockopt(sockfd, SOL_SOCKET, optname,
2153                                            addr_ifname, optlen));
2154 		unlock_user (dev_ifname, optval_addr, 0);
2155 		return ret;
2156 	}
2157         case TARGET_SO_LINGER:
2158         {
2159                 struct linger lg;
2160                 struct target_linger *tlg;
2161 
2162                 if (optlen != sizeof(struct target_linger)) {
2163                     return -TARGET_EINVAL;
2164                 }
2165                 if (!lock_user_struct(VERIFY_READ, tlg, optval_addr, 1)) {
2166                     return -TARGET_EFAULT;
2167                 }
2168                 __get_user(lg.l_onoff, &tlg->l_onoff);
2169                 __get_user(lg.l_linger, &tlg->l_linger);
2170                 ret = get_errno(setsockopt(sockfd, SOL_SOCKET, SO_LINGER,
2171                                 &lg, sizeof(lg)));
2172                 unlock_user_struct(tlg, optval_addr, 0);
2173                 return ret;
2174         }
2175             /* Options with 'int' argument.  */
2176         case TARGET_SO_DEBUG:
2177 		optname = SO_DEBUG;
2178 		break;
2179         case TARGET_SO_REUSEADDR:
2180 		optname = SO_REUSEADDR;
2181 		break;
2182 #ifdef SO_REUSEPORT
2183         case TARGET_SO_REUSEPORT:
2184                 optname = SO_REUSEPORT;
2185                 break;
2186 #endif
2187         case TARGET_SO_TYPE:
2188 		optname = SO_TYPE;
2189 		break;
2190         case TARGET_SO_ERROR:
2191 		optname = SO_ERROR;
2192 		break;
2193         case TARGET_SO_DONTROUTE:
2194 		optname = SO_DONTROUTE;
2195 		break;
2196         case TARGET_SO_BROADCAST:
2197 		optname = SO_BROADCAST;
2198 		break;
2199         case TARGET_SO_SNDBUF:
2200 		optname = SO_SNDBUF;
2201 		break;
2202         case TARGET_SO_SNDBUFFORCE:
2203                 optname = SO_SNDBUFFORCE;
2204                 break;
2205         case TARGET_SO_RCVBUF:
2206 		optname = SO_RCVBUF;
2207 		break;
2208         case TARGET_SO_RCVBUFFORCE:
2209                 optname = SO_RCVBUFFORCE;
2210                 break;
2211         case TARGET_SO_KEEPALIVE:
2212 		optname = SO_KEEPALIVE;
2213 		break;
2214         case TARGET_SO_OOBINLINE:
2215 		optname = SO_OOBINLINE;
2216 		break;
2217         case TARGET_SO_NO_CHECK:
2218 		optname = SO_NO_CHECK;
2219 		break;
2220         case TARGET_SO_PRIORITY:
2221 		optname = SO_PRIORITY;
2222 		break;
2223 #ifdef SO_BSDCOMPAT
2224         case TARGET_SO_BSDCOMPAT:
2225 		optname = SO_BSDCOMPAT;
2226 		break;
2227 #endif
2228         case TARGET_SO_PASSCRED:
2229 		optname = SO_PASSCRED;
2230 		break;
2231         case TARGET_SO_PASSSEC:
2232                 optname = SO_PASSSEC;
2233                 break;
2234         case TARGET_SO_TIMESTAMP:
2235 		optname = SO_TIMESTAMP;
2236 		break;
2237         case TARGET_SO_RCVLOWAT:
2238 		optname = SO_RCVLOWAT;
2239 		break;
2240         default:
2241             goto unimplemented;
2242         }
2243 	if (optlen < sizeof(uint32_t))
2244             return -TARGET_EINVAL;
2245 
2246 	if (get_user_u32(val, optval_addr))
2247             return -TARGET_EFAULT;
2248 	ret = get_errno(setsockopt(sockfd, SOL_SOCKET, optname, &val, sizeof(val)));
2249         break;
2250     default:
2251     unimplemented:
2252         gemu_log("Unsupported setsockopt level=%d optname=%d\n", level, optname);
2253         ret = -TARGET_ENOPROTOOPT;
2254     }
2255     return ret;
2256 }
2257 
2258 /* do_getsockopt() Must return target values and target errnos. */
2259 static abi_long do_getsockopt(int sockfd, int level, int optname,
2260                               abi_ulong optval_addr, abi_ulong optlen)
2261 {
2262     abi_long ret;
2263     int len, val;
2264     socklen_t lv;
2265 
2266     switch(level) {
2267     case TARGET_SOL_SOCKET:
2268         level = SOL_SOCKET;
2269         switch (optname) {
2270         /* These don't just return a single integer */
2271         case TARGET_SO_RCVTIMEO:
2272         case TARGET_SO_SNDTIMEO:
2273         case TARGET_SO_PEERNAME:
2274             goto unimplemented;
2275         case TARGET_SO_PEERCRED: {
2276             struct ucred cr;
2277             socklen_t crlen;
2278             struct target_ucred *tcr;
2279 
2280             if (get_user_u32(len, optlen)) {
2281                 return -TARGET_EFAULT;
2282             }
2283             if (len < 0) {
2284                 return -TARGET_EINVAL;
2285             }
2286 
2287             crlen = sizeof(cr);
2288             ret = get_errno(getsockopt(sockfd, level, SO_PEERCRED,
2289                                        &cr, &crlen));
2290             if (ret < 0) {
2291                 return ret;
2292             }
2293             if (len > crlen) {
2294                 len = crlen;
2295             }
2296             if (!lock_user_struct(VERIFY_WRITE, tcr, optval_addr, 0)) {
2297                 return -TARGET_EFAULT;
2298             }
2299             __put_user(cr.pid, &tcr->pid);
2300             __put_user(cr.uid, &tcr->uid);
2301             __put_user(cr.gid, &tcr->gid);
2302             unlock_user_struct(tcr, optval_addr, 1);
2303             if (put_user_u32(len, optlen)) {
2304                 return -TARGET_EFAULT;
2305             }
2306             break;
2307         }
2308         case TARGET_SO_LINGER:
2309         {
2310             struct linger lg;
2311             socklen_t lglen;
2312             struct target_linger *tlg;
2313 
2314             if (get_user_u32(len, optlen)) {
2315                 return -TARGET_EFAULT;
2316             }
2317             if (len < 0) {
2318                 return -TARGET_EINVAL;
2319             }
2320 
2321             lglen = sizeof(lg);
2322             ret = get_errno(getsockopt(sockfd, level, SO_LINGER,
2323                                        &lg, &lglen));
2324             if (ret < 0) {
2325                 return ret;
2326             }
2327             if (len > lglen) {
2328                 len = lglen;
2329             }
2330             if (!lock_user_struct(VERIFY_WRITE, tlg, optval_addr, 0)) {
2331                 return -TARGET_EFAULT;
2332             }
2333             __put_user(lg.l_onoff, &tlg->l_onoff);
2334             __put_user(lg.l_linger, &tlg->l_linger);
2335             unlock_user_struct(tlg, optval_addr, 1);
2336             if (put_user_u32(len, optlen)) {
2337                 return -TARGET_EFAULT;
2338             }
2339             break;
2340         }
2341         /* Options with 'int' argument.  */
2342         case TARGET_SO_DEBUG:
2343             optname = SO_DEBUG;
2344             goto int_case;
2345         case TARGET_SO_REUSEADDR:
2346             optname = SO_REUSEADDR;
2347             goto int_case;
2348 #ifdef SO_REUSEPORT
2349         case TARGET_SO_REUSEPORT:
2350             optname = SO_REUSEPORT;
2351             goto int_case;
2352 #endif
2353         case TARGET_SO_TYPE:
2354             optname = SO_TYPE;
2355             goto int_case;
2356         case TARGET_SO_ERROR:
2357             optname = SO_ERROR;
2358             goto int_case;
2359         case TARGET_SO_DONTROUTE:
2360             optname = SO_DONTROUTE;
2361             goto int_case;
2362         case TARGET_SO_BROADCAST:
2363             optname = SO_BROADCAST;
2364             goto int_case;
2365         case TARGET_SO_SNDBUF:
2366             optname = SO_SNDBUF;
2367             goto int_case;
2368         case TARGET_SO_RCVBUF:
2369             optname = SO_RCVBUF;
2370             goto int_case;
2371         case TARGET_SO_KEEPALIVE:
2372             optname = SO_KEEPALIVE;
2373             goto int_case;
2374         case TARGET_SO_OOBINLINE:
2375             optname = SO_OOBINLINE;
2376             goto int_case;
2377         case TARGET_SO_NO_CHECK:
2378             optname = SO_NO_CHECK;
2379             goto int_case;
2380         case TARGET_SO_PRIORITY:
2381             optname = SO_PRIORITY;
2382             goto int_case;
2383 #ifdef SO_BSDCOMPAT
2384         case TARGET_SO_BSDCOMPAT:
2385             optname = SO_BSDCOMPAT;
2386             goto int_case;
2387 #endif
2388         case TARGET_SO_PASSCRED:
2389             optname = SO_PASSCRED;
2390             goto int_case;
2391         case TARGET_SO_TIMESTAMP:
2392             optname = SO_TIMESTAMP;
2393             goto int_case;
2394         case TARGET_SO_RCVLOWAT:
2395             optname = SO_RCVLOWAT;
2396             goto int_case;
2397         case TARGET_SO_ACCEPTCONN:
2398             optname = SO_ACCEPTCONN;
2399             goto int_case;
2400         default:
2401             goto int_case;
2402         }
2403         break;
2404     case SOL_TCP:
2405         /* TCP options all take an 'int' value.  */
2406     int_case:
2407         if (get_user_u32(len, optlen))
2408             return -TARGET_EFAULT;
2409         if (len < 0)
2410             return -TARGET_EINVAL;
2411         lv = sizeof(lv);
2412         ret = get_errno(getsockopt(sockfd, level, optname, &val, &lv));
2413         if (ret < 0)
2414             return ret;
2415         if (optname == SO_TYPE) {
2416             val = host_to_target_sock_type(val);
2417         }
2418         if (len > lv)
2419             len = lv;
2420         if (len == 4) {
2421             if (put_user_u32(val, optval_addr))
2422                 return -TARGET_EFAULT;
2423         } else {
2424             if (put_user_u8(val, optval_addr))
2425                 return -TARGET_EFAULT;
2426         }
2427         if (put_user_u32(len, optlen))
2428             return -TARGET_EFAULT;
2429         break;
2430     case SOL_IP:
2431         switch(optname) {
2432         case IP_TOS:
2433         case IP_TTL:
2434         case IP_HDRINCL:
2435         case IP_ROUTER_ALERT:
2436         case IP_RECVOPTS:
2437         case IP_RETOPTS:
2438         case IP_PKTINFO:
2439         case IP_MTU_DISCOVER:
2440         case IP_RECVERR:
2441         case IP_RECVTOS:
2442 #ifdef IP_FREEBIND
2443         case IP_FREEBIND:
2444 #endif
2445         case IP_MULTICAST_TTL:
2446         case IP_MULTICAST_LOOP:
2447             if (get_user_u32(len, optlen))
2448                 return -TARGET_EFAULT;
2449             if (len < 0)
2450                 return -TARGET_EINVAL;
2451             lv = sizeof(lv);
2452             ret = get_errno(getsockopt(sockfd, level, optname, &val, &lv));
2453             if (ret < 0)
2454                 return ret;
2455             if (len < sizeof(int) && len > 0 && val >= 0 && val < 255) {
2456                 len = 1;
2457                 if (put_user_u32(len, optlen)
2458                     || put_user_u8(val, optval_addr))
2459                     return -TARGET_EFAULT;
2460             } else {
2461                 if (len > sizeof(int))
2462                     len = sizeof(int);
2463                 if (put_user_u32(len, optlen)
2464                     || put_user_u32(val, optval_addr))
2465                     return -TARGET_EFAULT;
2466             }
2467             break;
2468         default:
2469             ret = -TARGET_ENOPROTOOPT;
2470             break;
2471         }
2472         break;
2473     case SOL_IPV6:
2474         switch (optname) {
2475         case IPV6_MTU_DISCOVER:
2476         case IPV6_MTU:
2477         case IPV6_V6ONLY:
2478         case IPV6_RECVPKTINFO:
2479         case IPV6_UNICAST_HOPS:
2480         case IPV6_MULTICAST_HOPS:
2481         case IPV6_MULTICAST_LOOP:
2482         case IPV6_RECVERR:
2483         case IPV6_RECVHOPLIMIT:
2484         case IPV6_2292HOPLIMIT:
2485         case IPV6_CHECKSUM:
2486         case IPV6_ADDRFORM:
2487         case IPV6_2292PKTINFO:
2488         case IPV6_RECVTCLASS:
2489         case IPV6_RECVRTHDR:
2490         case IPV6_2292RTHDR:
2491         case IPV6_RECVHOPOPTS:
2492         case IPV6_2292HOPOPTS:
2493         case IPV6_RECVDSTOPTS:
2494         case IPV6_2292DSTOPTS:
2495         case IPV6_TCLASS:
2496 #ifdef IPV6_RECVPATHMTU
2497         case IPV6_RECVPATHMTU:
2498 #endif
2499 #ifdef IPV6_TRANSPARENT
2500         case IPV6_TRANSPARENT:
2501 #endif
2502 #ifdef IPV6_FREEBIND
2503         case IPV6_FREEBIND:
2504 #endif
2505 #ifdef IPV6_RECVORIGDSTADDR
2506         case IPV6_RECVORIGDSTADDR:
2507 #endif
2508             if (get_user_u32(len, optlen))
2509                 return -TARGET_EFAULT;
2510             if (len < 0)
2511                 return -TARGET_EINVAL;
2512             lv = sizeof(lv);
2513             ret = get_errno(getsockopt(sockfd, level, optname, &val, &lv));
2514             if (ret < 0)
2515                 return ret;
2516             if (len < sizeof(int) && len > 0 && val >= 0 && val < 255) {
2517                 len = 1;
2518                 if (put_user_u32(len, optlen)
2519                     || put_user_u8(val, optval_addr))
2520                     return -TARGET_EFAULT;
2521             } else {
2522                 if (len > sizeof(int))
2523                     len = sizeof(int);
2524                 if (put_user_u32(len, optlen)
2525                     || put_user_u32(val, optval_addr))
2526                     return -TARGET_EFAULT;
2527             }
2528             break;
2529         default:
2530             ret = -TARGET_ENOPROTOOPT;
2531             break;
2532         }
2533         break;
2534     default:
2535     unimplemented:
2536         gemu_log("getsockopt level=%d optname=%d not yet supported\n",
2537                  level, optname);
2538         ret = -TARGET_EOPNOTSUPP;
2539         break;
2540     }
2541     return ret;
2542 }
2543 
2544 /* Convert target low/high pair representing file offset into the host
2545  * low/high pair. This function doesn't handle offsets bigger than 64 bits
2546  * as the kernel doesn't handle them either.
2547  */
2548 static void target_to_host_low_high(abi_ulong tlow,
2549                                     abi_ulong thigh,
2550                                     unsigned long *hlow,
2551                                     unsigned long *hhigh)
2552 {
2553     uint64_t off = tlow |
2554         ((unsigned long long)thigh << TARGET_LONG_BITS / 2) <<
2555         TARGET_LONG_BITS / 2;
2556 
2557     *hlow = off;
2558     *hhigh = (off >> HOST_LONG_BITS / 2) >> HOST_LONG_BITS / 2;
2559 }
2560 
2561 static struct iovec *lock_iovec(int type, abi_ulong target_addr,
2562                                 abi_ulong count, int copy)
2563 {
2564     struct target_iovec *target_vec;
2565     struct iovec *vec;
2566     abi_ulong total_len, max_len;
2567     int i;
2568     int err = 0;
2569     bool bad_address = false;
2570 
2571     if (count == 0) {
2572         errno = 0;
2573         return NULL;
2574     }
2575     if (count > IOV_MAX) {
2576         errno = EINVAL;
2577         return NULL;
2578     }
2579 
2580     vec = g_try_new0(struct iovec, count);
2581     if (vec == NULL) {
2582         errno = ENOMEM;
2583         return NULL;
2584     }
2585 
2586     target_vec = lock_user(VERIFY_READ, target_addr,
2587                            count * sizeof(struct target_iovec), 1);
2588     if (target_vec == NULL) {
2589         err = EFAULT;
2590         goto fail2;
2591     }
2592 
2593     /* ??? If host page size > target page size, this will result in a
2594        value larger than what we can actually support.  */
2595     max_len = 0x7fffffff & TARGET_PAGE_MASK;
2596     total_len = 0;
2597 
2598     for (i = 0; i < count; i++) {
2599         abi_ulong base = tswapal(target_vec[i].iov_base);
2600         abi_long len = tswapal(target_vec[i].iov_len);
2601 
2602         if (len < 0) {
2603             err = EINVAL;
2604             goto fail;
2605         } else if (len == 0) {
2606             /* Zero length pointer is ignored.  */
2607             vec[i].iov_base = 0;
2608         } else {
2609             vec[i].iov_base = lock_user(type, base, len, copy);
2610             /* If the first buffer pointer is bad, this is a fault.  But
2611              * subsequent bad buffers will result in a partial write; this
2612              * is realized by filling the vector with null pointers and
2613              * zero lengths. */
2614             if (!vec[i].iov_base) {
2615                 if (i == 0) {
2616                     err = EFAULT;
2617                     goto fail;
2618                 } else {
2619                     bad_address = true;
2620                 }
2621             }
2622             if (bad_address) {
2623                 len = 0;
2624             }
2625             if (len > max_len - total_len) {
2626                 len = max_len - total_len;
2627             }
2628         }
2629         vec[i].iov_len = len;
2630         total_len += len;
2631     }
2632 
2633     unlock_user(target_vec, target_addr, 0);
2634     return vec;
2635 
2636  fail:
2637     while (--i >= 0) {
2638         if (tswapal(target_vec[i].iov_len) > 0) {
2639             unlock_user(vec[i].iov_base, tswapal(target_vec[i].iov_base), 0);
2640         }
2641     }
2642     unlock_user(target_vec, target_addr, 0);
2643  fail2:
2644     g_free(vec);
2645     errno = err;
2646     return NULL;
2647 }
2648 
2649 static void unlock_iovec(struct iovec *vec, abi_ulong target_addr,
2650                          abi_ulong count, int copy)
2651 {
2652     struct target_iovec *target_vec;
2653     int i;
2654 
2655     target_vec = lock_user(VERIFY_READ, target_addr,
2656                            count * sizeof(struct target_iovec), 1);
2657     if (target_vec) {
2658         for (i = 0; i < count; i++) {
2659             abi_ulong base = tswapal(target_vec[i].iov_base);
2660             abi_long len = tswapal(target_vec[i].iov_len);
2661             if (len < 0) {
2662                 break;
2663             }
2664             unlock_user(vec[i].iov_base, base, copy ? vec[i].iov_len : 0);
2665         }
2666         unlock_user(target_vec, target_addr, 0);
2667     }
2668 
2669     g_free(vec);
2670 }
2671 
2672 static inline int target_to_host_sock_type(int *type)
2673 {
2674     int host_type = 0;
2675     int target_type = *type;
2676 
2677     switch (target_type & TARGET_SOCK_TYPE_MASK) {
2678     case TARGET_SOCK_DGRAM:
2679         host_type = SOCK_DGRAM;
2680         break;
2681     case TARGET_SOCK_STREAM:
2682         host_type = SOCK_STREAM;
2683         break;
2684     default:
2685         host_type = target_type & TARGET_SOCK_TYPE_MASK;
2686         break;
2687     }
2688     if (target_type & TARGET_SOCK_CLOEXEC) {
2689 #if defined(SOCK_CLOEXEC)
2690         host_type |= SOCK_CLOEXEC;
2691 #else
2692         return -TARGET_EINVAL;
2693 #endif
2694     }
2695     if (target_type & TARGET_SOCK_NONBLOCK) {
2696 #if defined(SOCK_NONBLOCK)
2697         host_type |= SOCK_NONBLOCK;
2698 #elif !defined(O_NONBLOCK)
2699         return -TARGET_EINVAL;
2700 #endif
2701     }
2702     *type = host_type;
2703     return 0;
2704 }
2705 
2706 /* Try to emulate socket type flags after socket creation.  */
2707 static int sock_flags_fixup(int fd, int target_type)
2708 {
2709 #if !defined(SOCK_NONBLOCK) && defined(O_NONBLOCK)
2710     if (target_type & TARGET_SOCK_NONBLOCK) {
2711         int flags = fcntl(fd, F_GETFL);
2712         if (fcntl(fd, F_SETFL, O_NONBLOCK | flags) == -1) {
2713             close(fd);
2714             return -TARGET_EINVAL;
2715         }
2716     }
2717 #endif
2718     return fd;
2719 }
2720 
2721 /* do_socket() Must return target values and target errnos. */
2722 static abi_long do_socket(int domain, int type, int protocol)
2723 {
2724     int target_type = type;
2725     int ret;
2726 
2727     ret = target_to_host_sock_type(&type);
2728     if (ret) {
2729         return ret;
2730     }
2731 
2732     if (domain == PF_NETLINK && !(
2733 #ifdef CONFIG_RTNETLINK
2734          protocol == NETLINK_ROUTE ||
2735 #endif
2736          protocol == NETLINK_KOBJECT_UEVENT ||
2737          protocol == NETLINK_AUDIT)) {
2738         return -EPFNOSUPPORT;
2739     }
2740 
2741     if (domain == AF_PACKET ||
2742         (domain == AF_INET && type == SOCK_PACKET)) {
2743         protocol = tswap16(protocol);
2744     }
2745 
2746     ret = get_errno(socket(domain, type, protocol));
2747     if (ret >= 0) {
2748         ret = sock_flags_fixup(ret, target_type);
2749         if (type == SOCK_PACKET) {
2750             /* Manage an obsolete case :
2751              * if socket type is SOCK_PACKET, bind by name
2752              */
2753             fd_trans_register(ret, &target_packet_trans);
2754         } else if (domain == PF_NETLINK) {
2755             switch (protocol) {
2756 #ifdef CONFIG_RTNETLINK
2757             case NETLINK_ROUTE:
2758                 fd_trans_register(ret, &target_netlink_route_trans);
2759                 break;
2760 #endif
2761             case NETLINK_KOBJECT_UEVENT:
2762                 /* nothing to do: messages are strings */
2763                 break;
2764             case NETLINK_AUDIT:
2765                 fd_trans_register(ret, &target_netlink_audit_trans);
2766                 break;
2767             default:
2768                 g_assert_not_reached();
2769             }
2770         }
2771     }
2772     return ret;
2773 }
2774 
2775 /* do_bind() Must return target values and target errnos. */
2776 static abi_long do_bind(int sockfd, abi_ulong target_addr,
2777                         socklen_t addrlen)
2778 {
2779     void *addr;
2780     abi_long ret;
2781 
2782     if ((int)addrlen < 0) {
2783         return -TARGET_EINVAL;
2784     }
2785 
2786     addr = alloca(addrlen+1);
2787 
2788     ret = target_to_host_sockaddr(sockfd, addr, target_addr, addrlen);
2789     if (ret)
2790         return ret;
2791 
2792     return get_errno(bind(sockfd, addr, addrlen));
2793 }
2794 
2795 /* do_connect() Must return target values and target errnos. */
2796 static abi_long do_connect(int sockfd, abi_ulong target_addr,
2797                            socklen_t addrlen)
2798 {
2799     void *addr;
2800     abi_long ret;
2801 
2802     if ((int)addrlen < 0) {
2803         return -TARGET_EINVAL;
2804     }
2805 
2806     addr = alloca(addrlen+1);
2807 
2808     ret = target_to_host_sockaddr(sockfd, addr, target_addr, addrlen);
2809     if (ret)
2810         return ret;
2811 
2812     return get_errno(safe_connect(sockfd, addr, addrlen));
2813 }
2814 
2815 /* do_sendrecvmsg_locked() Must return target values and target errnos. */
2816 static abi_long do_sendrecvmsg_locked(int fd, struct target_msghdr *msgp,
2817                                       int flags, int send)
2818 {
2819     abi_long ret, len;
2820     struct msghdr msg;
2821     abi_ulong count;
2822     struct iovec *vec;
2823     abi_ulong target_vec;
2824 
2825     if (msgp->msg_name) {
2826         msg.msg_namelen = tswap32(msgp->msg_namelen);
2827         msg.msg_name = alloca(msg.msg_namelen+1);
2828         ret = target_to_host_sockaddr(fd, msg.msg_name,
2829                                       tswapal(msgp->msg_name),
2830                                       msg.msg_namelen);
2831         if (ret == -TARGET_EFAULT) {
2832             /* For connected sockets msg_name and msg_namelen must
2833              * be ignored, so returning EFAULT immediately is wrong.
2834              * Instead, pass a bad msg_name to the host kernel, and
2835              * let it decide whether to return EFAULT or not.
2836              */
2837             msg.msg_name = (void *)-1;
2838         } else if (ret) {
2839             goto out2;
2840         }
2841     } else {
2842         msg.msg_name = NULL;
2843         msg.msg_namelen = 0;
2844     }
2845     msg.msg_controllen = 2 * tswapal(msgp->msg_controllen);
2846     msg.msg_control = alloca(msg.msg_controllen);
2847     memset(msg.msg_control, 0, msg.msg_controllen);
2848 
2849     msg.msg_flags = tswap32(msgp->msg_flags);
2850 
2851     count = tswapal(msgp->msg_iovlen);
2852     target_vec = tswapal(msgp->msg_iov);
2853 
2854     if (count > IOV_MAX) {
2855         /* sendrcvmsg returns a different errno for this condition than
2856          * readv/writev, so we must catch it here before lock_iovec() does.
2857          */
2858         ret = -TARGET_EMSGSIZE;
2859         goto out2;
2860     }
2861 
2862     vec = lock_iovec(send ? VERIFY_READ : VERIFY_WRITE,
2863                      target_vec, count, send);
2864     if (vec == NULL) {
2865         ret = -host_to_target_errno(errno);
2866         goto out2;
2867     }
2868     msg.msg_iovlen = count;
2869     msg.msg_iov = vec;
2870 
2871     if (send) {
2872         if (fd_trans_target_to_host_data(fd)) {
2873             void *host_msg;
2874 
2875             host_msg = g_malloc(msg.msg_iov->iov_len);
2876             memcpy(host_msg, msg.msg_iov->iov_base, msg.msg_iov->iov_len);
2877             ret = fd_trans_target_to_host_data(fd)(host_msg,
2878                                                    msg.msg_iov->iov_len);
2879             if (ret >= 0) {
2880                 msg.msg_iov->iov_base = host_msg;
2881                 ret = get_errno(safe_sendmsg(fd, &msg, flags));
2882             }
2883             g_free(host_msg);
2884         } else {
2885             ret = target_to_host_cmsg(&msg, msgp);
2886             if (ret == 0) {
2887                 ret = get_errno(safe_sendmsg(fd, &msg, flags));
2888             }
2889         }
2890     } else {
2891         ret = get_errno(safe_recvmsg(fd, &msg, flags));
2892         if (!is_error(ret)) {
2893             len = ret;
2894             if (fd_trans_host_to_target_data(fd)) {
2895                 ret = fd_trans_host_to_target_data(fd)(msg.msg_iov->iov_base,
2896                                                MIN(msg.msg_iov->iov_len, len));
2897             } else {
2898                 ret = host_to_target_cmsg(msgp, &msg);
2899             }
2900             if (!is_error(ret)) {
2901                 msgp->msg_namelen = tswap32(msg.msg_namelen);
2902                 msgp->msg_flags = tswap32(msg.msg_flags);
2903                 if (msg.msg_name != NULL && msg.msg_name != (void *)-1) {
2904                     ret = host_to_target_sockaddr(tswapal(msgp->msg_name),
2905                                     msg.msg_name, msg.msg_namelen);
2906                     if (ret) {
2907                         goto out;
2908                     }
2909                 }
2910 
2911                 ret = len;
2912             }
2913         }
2914     }
2915 
2916 out:
2917     unlock_iovec(vec, target_vec, count, !send);
2918 out2:
2919     return ret;
2920 }
2921 
2922 static abi_long do_sendrecvmsg(int fd, abi_ulong target_msg,
2923                                int flags, int send)
2924 {
2925     abi_long ret;
2926     struct target_msghdr *msgp;
2927 
2928     if (!lock_user_struct(send ? VERIFY_READ : VERIFY_WRITE,
2929                           msgp,
2930                           target_msg,
2931                           send ? 1 : 0)) {
2932         return -TARGET_EFAULT;
2933     }
2934     ret = do_sendrecvmsg_locked(fd, msgp, flags, send);
2935     unlock_user_struct(msgp, target_msg, send ? 0 : 1);
2936     return ret;
2937 }
2938 
2939 /* We don't rely on the C library to have sendmmsg/recvmmsg support,
2940  * so it might not have this *mmsg-specific flag either.
2941  */
2942 #ifndef MSG_WAITFORONE
2943 #define MSG_WAITFORONE 0x10000
2944 #endif
2945 
2946 static abi_long do_sendrecvmmsg(int fd, abi_ulong target_msgvec,
2947                                 unsigned int vlen, unsigned int flags,
2948                                 int send)
2949 {
2950     struct target_mmsghdr *mmsgp;
2951     abi_long ret = 0;
2952     int i;
2953 
2954     if (vlen > UIO_MAXIOV) {
2955         vlen = UIO_MAXIOV;
2956     }
2957 
2958     mmsgp = lock_user(VERIFY_WRITE, target_msgvec, sizeof(*mmsgp) * vlen, 1);
2959     if (!mmsgp) {
2960         return -TARGET_EFAULT;
2961     }
2962 
2963     for (i = 0; i < vlen; i++) {
2964         ret = do_sendrecvmsg_locked(fd, &mmsgp[i].msg_hdr, flags, send);
2965         if (is_error(ret)) {
2966             break;
2967         }
2968         mmsgp[i].msg_len = tswap32(ret);
2969         /* MSG_WAITFORONE turns on MSG_DONTWAIT after one packet */
2970         if (flags & MSG_WAITFORONE) {
2971             flags |= MSG_DONTWAIT;
2972         }
2973     }
2974 
2975     unlock_user(mmsgp, target_msgvec, sizeof(*mmsgp) * i);
2976 
2977     /* Return number of datagrams sent if we sent any at all;
2978      * otherwise return the error.
2979      */
2980     if (i) {
2981         return i;
2982     }
2983     return ret;
2984 }
2985 
2986 /* do_accept4() Must return target values and target errnos. */
2987 static abi_long do_accept4(int fd, abi_ulong target_addr,
2988                            abi_ulong target_addrlen_addr, int flags)
2989 {
2990     socklen_t addrlen, ret_addrlen;
2991     void *addr;
2992     abi_long ret;
2993     int host_flags;
2994 
2995     host_flags = target_to_host_bitmask(flags, fcntl_flags_tbl);
2996 
2997     if (target_addr == 0) {
2998         return get_errno(safe_accept4(fd, NULL, NULL, host_flags));
2999     }
3000 
3001     /* linux returns EINVAL if addrlen pointer is invalid */
3002     if (get_user_u32(addrlen, target_addrlen_addr))
3003         return -TARGET_EINVAL;
3004 
3005     if ((int)addrlen < 0) {
3006         return -TARGET_EINVAL;
3007     }
3008 
3009     if (!access_ok(VERIFY_WRITE, target_addr, addrlen))
3010         return -TARGET_EINVAL;
3011 
3012     addr = alloca(addrlen);
3013 
3014     ret_addrlen = addrlen;
3015     ret = get_errno(safe_accept4(fd, addr, &ret_addrlen, host_flags));
3016     if (!is_error(ret)) {
3017         host_to_target_sockaddr(target_addr, addr, MIN(addrlen, ret_addrlen));
3018         if (put_user_u32(ret_addrlen, target_addrlen_addr)) {
3019             ret = -TARGET_EFAULT;
3020         }
3021     }
3022     return ret;
3023 }
3024 
3025 /* do_getpeername() Must return target values and target errnos. */
3026 static abi_long do_getpeername(int fd, abi_ulong target_addr,
3027                                abi_ulong target_addrlen_addr)
3028 {
3029     socklen_t addrlen, ret_addrlen;
3030     void *addr;
3031     abi_long ret;
3032 
3033     if (get_user_u32(addrlen, target_addrlen_addr))
3034         return -TARGET_EFAULT;
3035 
3036     if ((int)addrlen < 0) {
3037         return -TARGET_EINVAL;
3038     }
3039 
3040     if (!access_ok(VERIFY_WRITE, target_addr, addrlen))
3041         return -TARGET_EFAULT;
3042 
3043     addr = alloca(addrlen);
3044 
3045     ret_addrlen = addrlen;
3046     ret = get_errno(getpeername(fd, addr, &ret_addrlen));
3047     if (!is_error(ret)) {
3048         host_to_target_sockaddr(target_addr, addr, MIN(addrlen, ret_addrlen));
3049         if (put_user_u32(ret_addrlen, target_addrlen_addr)) {
3050             ret = -TARGET_EFAULT;
3051         }
3052     }
3053     return ret;
3054 }
3055 
3056 /* do_getsockname() Must return target values and target errnos. */
3057 static abi_long do_getsockname(int fd, abi_ulong target_addr,
3058                                abi_ulong target_addrlen_addr)
3059 {
3060     socklen_t addrlen, ret_addrlen;
3061     void *addr;
3062     abi_long ret;
3063 
3064     if (get_user_u32(addrlen, target_addrlen_addr))
3065         return -TARGET_EFAULT;
3066 
3067     if ((int)addrlen < 0) {
3068         return -TARGET_EINVAL;
3069     }
3070 
3071     if (!access_ok(VERIFY_WRITE, target_addr, addrlen))
3072         return -TARGET_EFAULT;
3073 
3074     addr = alloca(addrlen);
3075 
3076     ret_addrlen = addrlen;
3077     ret = get_errno(getsockname(fd, addr, &ret_addrlen));
3078     if (!is_error(ret)) {
3079         host_to_target_sockaddr(target_addr, addr, MIN(addrlen, ret_addrlen));
3080         if (put_user_u32(ret_addrlen, target_addrlen_addr)) {
3081             ret = -TARGET_EFAULT;
3082         }
3083     }
3084     return ret;
3085 }
3086 
3087 /* do_socketpair() Must return target values and target errnos. */
3088 static abi_long do_socketpair(int domain, int type, int protocol,
3089                               abi_ulong target_tab_addr)
3090 {
3091     int tab[2];
3092     abi_long ret;
3093 
3094     target_to_host_sock_type(&type);
3095 
3096     ret = get_errno(socketpair(domain, type, protocol, tab));
3097     if (!is_error(ret)) {
3098         if (put_user_s32(tab[0], target_tab_addr)
3099             || put_user_s32(tab[1], target_tab_addr + sizeof(tab[0])))
3100             ret = -TARGET_EFAULT;
3101     }
3102     return ret;
3103 }
3104 
3105 /* do_sendto() Must return target values and target errnos. */
3106 static abi_long do_sendto(int fd, abi_ulong msg, size_t len, int flags,
3107                           abi_ulong target_addr, socklen_t addrlen)
3108 {
3109     void *addr;
3110     void *host_msg;
3111     void *copy_msg = NULL;
3112     abi_long ret;
3113 
3114     if ((int)addrlen < 0) {
3115         return -TARGET_EINVAL;
3116     }
3117 
3118     host_msg = lock_user(VERIFY_READ, msg, len, 1);
3119     if (!host_msg)
3120         return -TARGET_EFAULT;
3121     if (fd_trans_target_to_host_data(fd)) {
3122         copy_msg = host_msg;
3123         host_msg = g_malloc(len);
3124         memcpy(host_msg, copy_msg, len);
3125         ret = fd_trans_target_to_host_data(fd)(host_msg, len);
3126         if (ret < 0) {
3127             goto fail;
3128         }
3129     }
3130     if (target_addr) {
3131         addr = alloca(addrlen+1);
3132         ret = target_to_host_sockaddr(fd, addr, target_addr, addrlen);
3133         if (ret) {
3134             goto fail;
3135         }
3136         ret = get_errno(safe_sendto(fd, host_msg, len, flags, addr, addrlen));
3137     } else {
3138         ret = get_errno(safe_sendto(fd, host_msg, len, flags, NULL, 0));
3139     }
3140 fail:
3141     if (copy_msg) {
3142         g_free(host_msg);
3143         host_msg = copy_msg;
3144     }
3145     unlock_user(host_msg, msg, 0);
3146     return ret;
3147 }
3148 
3149 /* do_recvfrom() Must return target values and target errnos. */
3150 static abi_long do_recvfrom(int fd, abi_ulong msg, size_t len, int flags,
3151                             abi_ulong target_addr,
3152                             abi_ulong target_addrlen)
3153 {
3154     socklen_t addrlen, ret_addrlen;
3155     void *addr;
3156     void *host_msg;
3157     abi_long ret;
3158 
3159     host_msg = lock_user(VERIFY_WRITE, msg, len, 0);
3160     if (!host_msg)
3161         return -TARGET_EFAULT;
3162     if (target_addr) {
3163         if (get_user_u32(addrlen, target_addrlen)) {
3164             ret = -TARGET_EFAULT;
3165             goto fail;
3166         }
3167         if ((int)addrlen < 0) {
3168             ret = -TARGET_EINVAL;
3169             goto fail;
3170         }
3171         addr = alloca(addrlen);
3172         ret_addrlen = addrlen;
3173         ret = get_errno(safe_recvfrom(fd, host_msg, len, flags,
3174                                       addr, &ret_addrlen));
3175     } else {
3176         addr = NULL; /* To keep compiler quiet.  */
3177         addrlen = 0; /* To keep compiler quiet.  */
3178         ret = get_errno(safe_recvfrom(fd, host_msg, len, flags, NULL, 0));
3179     }
3180     if (!is_error(ret)) {
3181         if (fd_trans_host_to_target_data(fd)) {
3182             abi_long trans;
3183             trans = fd_trans_host_to_target_data(fd)(host_msg, MIN(ret, len));
3184             if (is_error(trans)) {
3185                 ret = trans;
3186                 goto fail;
3187             }
3188         }
3189         if (target_addr) {
3190             host_to_target_sockaddr(target_addr, addr,
3191                                     MIN(addrlen, ret_addrlen));
3192             if (put_user_u32(ret_addrlen, target_addrlen)) {
3193                 ret = -TARGET_EFAULT;
3194                 goto fail;
3195             }
3196         }
3197         unlock_user(host_msg, msg, len);
3198     } else {
3199 fail:
3200         unlock_user(host_msg, msg, 0);
3201     }
3202     return ret;
3203 }
3204 
3205 #ifdef TARGET_NR_socketcall
3206 /* do_socketcall() must return target values and target errnos. */
3207 static abi_long do_socketcall(int num, abi_ulong vptr)
3208 {
3209     static const unsigned nargs[] = { /* number of arguments per operation */
3210         [TARGET_SYS_SOCKET] = 3,      /* domain, type, protocol */
3211         [TARGET_SYS_BIND] = 3,        /* fd, addr, addrlen */
3212         [TARGET_SYS_CONNECT] = 3,     /* fd, addr, addrlen */
3213         [TARGET_SYS_LISTEN] = 2,      /* fd, backlog */
3214         [TARGET_SYS_ACCEPT] = 3,      /* fd, addr, addrlen */
3215         [TARGET_SYS_GETSOCKNAME] = 3, /* fd, addr, addrlen */
3216         [TARGET_SYS_GETPEERNAME] = 3, /* fd, addr, addrlen */
3217         [TARGET_SYS_SOCKETPAIR] = 4,  /* domain, type, protocol, tab */
3218         [TARGET_SYS_SEND] = 4,        /* fd, msg, len, flags */
3219         [TARGET_SYS_RECV] = 4,        /* fd, msg, len, flags */
3220         [TARGET_SYS_SENDTO] = 6,      /* fd, msg, len, flags, addr, addrlen */
3221         [TARGET_SYS_RECVFROM] = 6,    /* fd, msg, len, flags, addr, addrlen */
3222         [TARGET_SYS_SHUTDOWN] = 2,    /* fd, how */
3223         [TARGET_SYS_SETSOCKOPT] = 5,  /* fd, level, optname, optval, optlen */
3224         [TARGET_SYS_GETSOCKOPT] = 5,  /* fd, level, optname, optval, optlen */
3225         [TARGET_SYS_SENDMSG] = 3,     /* fd, msg, flags */
3226         [TARGET_SYS_RECVMSG] = 3,     /* fd, msg, flags */
3227         [TARGET_SYS_ACCEPT4] = 4,     /* fd, addr, addrlen, flags */
3228         [TARGET_SYS_RECVMMSG] = 4,    /* fd, msgvec, vlen, flags */
3229         [TARGET_SYS_SENDMMSG] = 4,    /* fd, msgvec, vlen, flags */
3230     };
3231     abi_long a[6]; /* max 6 args */
3232     unsigned i;
3233 
3234     /* check the range of the first argument num */
3235     /* (TARGET_SYS_SENDMMSG is the highest among TARGET_SYS_xxx) */
3236     if (num < 1 || num > TARGET_SYS_SENDMMSG) {
3237         return -TARGET_EINVAL;
3238     }
3239     /* ensure we have space for args */
3240     if (nargs[num] > ARRAY_SIZE(a)) {
3241         return -TARGET_EINVAL;
3242     }
3243     /* collect the arguments in a[] according to nargs[] */
3244     for (i = 0; i < nargs[num]; ++i) {
3245         if (get_user_ual(a[i], vptr + i * sizeof(abi_long)) != 0) {
3246             return -TARGET_EFAULT;
3247         }
3248     }
3249     /* now when we have the args, invoke the appropriate underlying function */
3250     switch (num) {
3251     case TARGET_SYS_SOCKET: /* domain, type, protocol */
3252         return do_socket(a[0], a[1], a[2]);
3253     case TARGET_SYS_BIND: /* sockfd, addr, addrlen */
3254         return do_bind(a[0], a[1], a[2]);
3255     case TARGET_SYS_CONNECT: /* sockfd, addr, addrlen */
3256         return do_connect(a[0], a[1], a[2]);
3257     case TARGET_SYS_LISTEN: /* sockfd, backlog */
3258         return get_errno(listen(a[0], a[1]));
3259     case TARGET_SYS_ACCEPT: /* sockfd, addr, addrlen */
3260         return do_accept4(a[0], a[1], a[2], 0);
3261     case TARGET_SYS_GETSOCKNAME: /* sockfd, addr, addrlen */
3262         return do_getsockname(a[0], a[1], a[2]);
3263     case TARGET_SYS_GETPEERNAME: /* sockfd, addr, addrlen */
3264         return do_getpeername(a[0], a[1], a[2]);
3265     case TARGET_SYS_SOCKETPAIR: /* domain, type, protocol, tab */
3266         return do_socketpair(a[0], a[1], a[2], a[3]);
3267     case TARGET_SYS_SEND: /* sockfd, msg, len, flags */
3268         return do_sendto(a[0], a[1], a[2], a[3], 0, 0);
3269     case TARGET_SYS_RECV: /* sockfd, msg, len, flags */
3270         return do_recvfrom(a[0], a[1], a[2], a[3], 0, 0);
3271     case TARGET_SYS_SENDTO: /* sockfd, msg, len, flags, addr, addrlen */
3272         return do_sendto(a[0], a[1], a[2], a[3], a[4], a[5]);
3273     case TARGET_SYS_RECVFROM: /* sockfd, msg, len, flags, addr, addrlen */
3274         return do_recvfrom(a[0], a[1], a[2], a[3], a[4], a[5]);
3275     case TARGET_SYS_SHUTDOWN: /* sockfd, how */
3276         return get_errno(shutdown(a[0], a[1]));
3277     case TARGET_SYS_SETSOCKOPT: /* sockfd, level, optname, optval, optlen */
3278         return do_setsockopt(a[0], a[1], a[2], a[3], a[4]);
3279     case TARGET_SYS_GETSOCKOPT: /* sockfd, level, optname, optval, optlen */
3280         return do_getsockopt(a[0], a[1], a[2], a[3], a[4]);
3281     case TARGET_SYS_SENDMSG: /* sockfd, msg, flags */
3282         return do_sendrecvmsg(a[0], a[1], a[2], 1);
3283     case TARGET_SYS_RECVMSG: /* sockfd, msg, flags */
3284         return do_sendrecvmsg(a[0], a[1], a[2], 0);
3285     case TARGET_SYS_ACCEPT4: /* sockfd, addr, addrlen, flags */
3286         return do_accept4(a[0], a[1], a[2], a[3]);
3287     case TARGET_SYS_RECVMMSG: /* sockfd, msgvec, vlen, flags */
3288         return do_sendrecvmmsg(a[0], a[1], a[2], a[3], 0);
3289     case TARGET_SYS_SENDMMSG: /* sockfd, msgvec, vlen, flags */
3290         return do_sendrecvmmsg(a[0], a[1], a[2], a[3], 1);
3291     default:
3292         gemu_log("Unsupported socketcall: %d\n", num);
3293         return -TARGET_EINVAL;
3294     }
3295 }
3296 #endif
3297 
3298 #define N_SHM_REGIONS	32
3299 
3300 static struct shm_region {
3301     abi_ulong start;
3302     abi_ulong size;
3303     bool in_use;
3304 } shm_regions[N_SHM_REGIONS];
3305 
3306 #ifndef TARGET_SEMID64_DS
3307 /* asm-generic version of this struct */
3308 struct target_semid64_ds
3309 {
3310   struct target_ipc_perm sem_perm;
3311   abi_ulong sem_otime;
3312 #if TARGET_ABI_BITS == 32
3313   abi_ulong __unused1;
3314 #endif
3315   abi_ulong sem_ctime;
3316 #if TARGET_ABI_BITS == 32
3317   abi_ulong __unused2;
3318 #endif
3319   abi_ulong sem_nsems;
3320   abi_ulong __unused3;
3321   abi_ulong __unused4;
3322 };
3323 #endif
3324 
3325 static inline abi_long target_to_host_ipc_perm(struct ipc_perm *host_ip,
3326                                                abi_ulong target_addr)
3327 {
3328     struct target_ipc_perm *target_ip;
3329     struct target_semid64_ds *target_sd;
3330 
3331     if (!lock_user_struct(VERIFY_READ, target_sd, target_addr, 1))
3332         return -TARGET_EFAULT;
3333     target_ip = &(target_sd->sem_perm);
3334     host_ip->__key = tswap32(target_ip->__key);
3335     host_ip->uid = tswap32(target_ip->uid);
3336     host_ip->gid = tswap32(target_ip->gid);
3337     host_ip->cuid = tswap32(target_ip->cuid);
3338     host_ip->cgid = tswap32(target_ip->cgid);
3339 #if defined(TARGET_ALPHA) || defined(TARGET_MIPS) || defined(TARGET_PPC)
3340     host_ip->mode = tswap32(target_ip->mode);
3341 #else
3342     host_ip->mode = tswap16(target_ip->mode);
3343 #endif
3344 #if defined(TARGET_PPC)
3345     host_ip->__seq = tswap32(target_ip->__seq);
3346 #else
3347     host_ip->__seq = tswap16(target_ip->__seq);
3348 #endif
3349     unlock_user_struct(target_sd, target_addr, 0);
3350     return 0;
3351 }
3352 
3353 static inline abi_long host_to_target_ipc_perm(abi_ulong target_addr,
3354                                                struct ipc_perm *host_ip)
3355 {
3356     struct target_ipc_perm *target_ip;
3357     struct target_semid64_ds *target_sd;
3358 
3359     if (!lock_user_struct(VERIFY_WRITE, target_sd, target_addr, 0))
3360         return -TARGET_EFAULT;
3361     target_ip = &(target_sd->sem_perm);
3362     target_ip->__key = tswap32(host_ip->__key);
3363     target_ip->uid = tswap32(host_ip->uid);
3364     target_ip->gid = tswap32(host_ip->gid);
3365     target_ip->cuid = tswap32(host_ip->cuid);
3366     target_ip->cgid = tswap32(host_ip->cgid);
3367 #if defined(TARGET_ALPHA) || defined(TARGET_MIPS) || defined(TARGET_PPC)
3368     target_ip->mode = tswap32(host_ip->mode);
3369 #else
3370     target_ip->mode = tswap16(host_ip->mode);
3371 #endif
3372 #if defined(TARGET_PPC)
3373     target_ip->__seq = tswap32(host_ip->__seq);
3374 #else
3375     target_ip->__seq = tswap16(host_ip->__seq);
3376 #endif
3377     unlock_user_struct(target_sd, target_addr, 1);
3378     return 0;
3379 }
3380 
3381 static inline abi_long target_to_host_semid_ds(struct semid_ds *host_sd,
3382                                                abi_ulong target_addr)
3383 {
3384     struct target_semid64_ds *target_sd;
3385 
3386     if (!lock_user_struct(VERIFY_READ, target_sd, target_addr, 1))
3387         return -TARGET_EFAULT;
3388     if (target_to_host_ipc_perm(&(host_sd->sem_perm),target_addr))
3389         return -TARGET_EFAULT;
3390     host_sd->sem_nsems = tswapal(target_sd->sem_nsems);
3391     host_sd->sem_otime = tswapal(target_sd->sem_otime);
3392     host_sd->sem_ctime = tswapal(target_sd->sem_ctime);
3393     unlock_user_struct(target_sd, target_addr, 0);
3394     return 0;
3395 }
3396 
3397 static inline abi_long host_to_target_semid_ds(abi_ulong target_addr,
3398                                                struct semid_ds *host_sd)
3399 {
3400     struct target_semid64_ds *target_sd;
3401 
3402     if (!lock_user_struct(VERIFY_WRITE, target_sd, target_addr, 0))
3403         return -TARGET_EFAULT;
3404     if (host_to_target_ipc_perm(target_addr,&(host_sd->sem_perm)))
3405         return -TARGET_EFAULT;
3406     target_sd->sem_nsems = tswapal(host_sd->sem_nsems);
3407     target_sd->sem_otime = tswapal(host_sd->sem_otime);
3408     target_sd->sem_ctime = tswapal(host_sd->sem_ctime);
3409     unlock_user_struct(target_sd, target_addr, 1);
3410     return 0;
3411 }
3412 
3413 struct target_seminfo {
3414     int semmap;
3415     int semmni;
3416     int semmns;
3417     int semmnu;
3418     int semmsl;
3419     int semopm;
3420     int semume;
3421     int semusz;
3422     int semvmx;
3423     int semaem;
3424 };
3425 
3426 static inline abi_long host_to_target_seminfo(abi_ulong target_addr,
3427                                               struct seminfo *host_seminfo)
3428 {
3429     struct target_seminfo *target_seminfo;
3430     if (!lock_user_struct(VERIFY_WRITE, target_seminfo, target_addr, 0))
3431         return -TARGET_EFAULT;
3432     __put_user(host_seminfo->semmap, &target_seminfo->semmap);
3433     __put_user(host_seminfo->semmni, &target_seminfo->semmni);
3434     __put_user(host_seminfo->semmns, &target_seminfo->semmns);
3435     __put_user(host_seminfo->semmnu, &target_seminfo->semmnu);
3436     __put_user(host_seminfo->semmsl, &target_seminfo->semmsl);
3437     __put_user(host_seminfo->semopm, &target_seminfo->semopm);
3438     __put_user(host_seminfo->semume, &target_seminfo->semume);
3439     __put_user(host_seminfo->semusz, &target_seminfo->semusz);
3440     __put_user(host_seminfo->semvmx, &target_seminfo->semvmx);
3441     __put_user(host_seminfo->semaem, &target_seminfo->semaem);
3442     unlock_user_struct(target_seminfo, target_addr, 1);
3443     return 0;
3444 }
3445 
3446 union semun {
3447 	int val;
3448 	struct semid_ds *buf;
3449 	unsigned short *array;
3450 	struct seminfo *__buf;
3451 };
3452 
3453 union target_semun {
3454 	int val;
3455 	abi_ulong buf;
3456 	abi_ulong array;
3457 	abi_ulong __buf;
3458 };
3459 
3460 static inline abi_long target_to_host_semarray(int semid, unsigned short **host_array,
3461                                                abi_ulong target_addr)
3462 {
3463     int nsems;
3464     unsigned short *array;
3465     union semun semun;
3466     struct semid_ds semid_ds;
3467     int i, ret;
3468 
3469     semun.buf = &semid_ds;
3470 
3471     ret = semctl(semid, 0, IPC_STAT, semun);
3472     if (ret == -1)
3473         return get_errno(ret);
3474 
3475     nsems = semid_ds.sem_nsems;
3476 
3477     *host_array = g_try_new(unsigned short, nsems);
3478     if (!*host_array) {
3479         return -TARGET_ENOMEM;
3480     }
3481     array = lock_user(VERIFY_READ, target_addr,
3482                       nsems*sizeof(unsigned short), 1);
3483     if (!array) {
3484         g_free(*host_array);
3485         return -TARGET_EFAULT;
3486     }
3487 
3488     for(i=0; i<nsems; i++) {
3489         __get_user((*host_array)[i], &array[i]);
3490     }
3491     unlock_user(array, target_addr, 0);
3492 
3493     return 0;
3494 }
3495 
3496 static inline abi_long host_to_target_semarray(int semid, abi_ulong target_addr,
3497                                                unsigned short **host_array)
3498 {
3499     int nsems;
3500     unsigned short *array;
3501     union semun semun;
3502     struct semid_ds semid_ds;
3503     int i, ret;
3504 
3505     semun.buf = &semid_ds;
3506 
3507     ret = semctl(semid, 0, IPC_STAT, semun);
3508     if (ret == -1)
3509         return get_errno(ret);
3510 
3511     nsems = semid_ds.sem_nsems;
3512 
3513     array = lock_user(VERIFY_WRITE, target_addr,
3514                       nsems*sizeof(unsigned short), 0);
3515     if (!array)
3516         return -TARGET_EFAULT;
3517 
3518     for(i=0; i<nsems; i++) {
3519         __put_user((*host_array)[i], &array[i]);
3520     }
3521     g_free(*host_array);
3522     unlock_user(array, target_addr, 1);
3523 
3524     return 0;
3525 }
3526 
3527 static inline abi_long do_semctl(int semid, int semnum, int cmd,
3528                                  abi_ulong target_arg)
3529 {
3530     union target_semun target_su = { .buf = target_arg };
3531     union semun arg;
3532     struct semid_ds dsarg;
3533     unsigned short *array = NULL;
3534     struct seminfo seminfo;
3535     abi_long ret = -TARGET_EINVAL;
3536     abi_long err;
3537     cmd &= 0xff;
3538 
3539     switch( cmd ) {
3540 	case GETVAL:
3541 	case SETVAL:
3542             /* In 64 bit cross-endian situations, we will erroneously pick up
3543              * the wrong half of the union for the "val" element.  To rectify
3544              * this, the entire 8-byte structure is byteswapped, followed by
3545 	     * a swap of the 4 byte val field. In other cases, the data is
3546 	     * already in proper host byte order. */
3547 	    if (sizeof(target_su.val) != (sizeof(target_su.buf))) {
3548 		target_su.buf = tswapal(target_su.buf);
3549 		arg.val = tswap32(target_su.val);
3550 	    } else {
3551 		arg.val = target_su.val;
3552 	    }
3553             ret = get_errno(semctl(semid, semnum, cmd, arg));
3554             break;
3555 	case GETALL:
3556 	case SETALL:
3557             err = target_to_host_semarray(semid, &array, target_su.array);
3558             if (err)
3559                 return err;
3560             arg.array = array;
3561             ret = get_errno(semctl(semid, semnum, cmd, arg));
3562             err = host_to_target_semarray(semid, target_su.array, &array);
3563             if (err)
3564                 return err;
3565             break;
3566 	case IPC_STAT:
3567 	case IPC_SET:
3568 	case SEM_STAT:
3569             err = target_to_host_semid_ds(&dsarg, target_su.buf);
3570             if (err)
3571                 return err;
3572             arg.buf = &dsarg;
3573             ret = get_errno(semctl(semid, semnum, cmd, arg));
3574             err = host_to_target_semid_ds(target_su.buf, &dsarg);
3575             if (err)
3576                 return err;
3577             break;
3578 	case IPC_INFO:
3579 	case SEM_INFO:
3580             arg.__buf = &seminfo;
3581             ret = get_errno(semctl(semid, semnum, cmd, arg));
3582             err = host_to_target_seminfo(target_su.__buf, &seminfo);
3583             if (err)
3584                 return err;
3585             break;
3586 	case IPC_RMID:
3587 	case GETPID:
3588 	case GETNCNT:
3589 	case GETZCNT:
3590             ret = get_errno(semctl(semid, semnum, cmd, NULL));
3591             break;
3592     }
3593 
3594     return ret;
3595 }
3596 
3597 struct target_sembuf {
3598     unsigned short sem_num;
3599     short sem_op;
3600     short sem_flg;
3601 };
3602 
3603 static inline abi_long target_to_host_sembuf(struct sembuf *host_sembuf,
3604                                              abi_ulong target_addr,
3605                                              unsigned nsops)
3606 {
3607     struct target_sembuf *target_sembuf;
3608     int i;
3609 
3610     target_sembuf = lock_user(VERIFY_READ, target_addr,
3611                               nsops*sizeof(struct target_sembuf), 1);
3612     if (!target_sembuf)
3613         return -TARGET_EFAULT;
3614 
3615     for(i=0; i<nsops; i++) {
3616         __get_user(host_sembuf[i].sem_num, &target_sembuf[i].sem_num);
3617         __get_user(host_sembuf[i].sem_op, &target_sembuf[i].sem_op);
3618         __get_user(host_sembuf[i].sem_flg, &target_sembuf[i].sem_flg);
3619     }
3620 
3621     unlock_user(target_sembuf, target_addr, 0);
3622 
3623     return 0;
3624 }
3625 
3626 static inline abi_long do_semop(int semid, abi_long ptr, unsigned nsops)
3627 {
3628     struct sembuf sops[nsops];
3629     abi_long ret;
3630 
3631     if (target_to_host_sembuf(sops, ptr, nsops))
3632         return -TARGET_EFAULT;
3633 
3634     ret = -TARGET_ENOSYS;
3635 #ifdef __NR_semtimedop
3636     ret = get_errno(safe_semtimedop(semid, sops, nsops, NULL));
3637 #endif
3638 #ifdef __NR_ipc
3639     if (ret == -TARGET_ENOSYS) {
3640         ret = get_errno(safe_ipc(IPCOP_semtimedop, semid, nsops, 0, sops, 0));
3641     }
3642 #endif
3643     return ret;
3644 }
3645 
3646 struct target_msqid_ds
3647 {
3648     struct target_ipc_perm msg_perm;
3649     abi_ulong msg_stime;
3650 #if TARGET_ABI_BITS == 32
3651     abi_ulong __unused1;
3652 #endif
3653     abi_ulong msg_rtime;
3654 #if TARGET_ABI_BITS == 32
3655     abi_ulong __unused2;
3656 #endif
3657     abi_ulong msg_ctime;
3658 #if TARGET_ABI_BITS == 32
3659     abi_ulong __unused3;
3660 #endif
3661     abi_ulong __msg_cbytes;
3662     abi_ulong msg_qnum;
3663     abi_ulong msg_qbytes;
3664     abi_ulong msg_lspid;
3665     abi_ulong msg_lrpid;
3666     abi_ulong __unused4;
3667     abi_ulong __unused5;
3668 };
3669 
3670 static inline abi_long target_to_host_msqid_ds(struct msqid_ds *host_md,
3671                                                abi_ulong target_addr)
3672 {
3673     struct target_msqid_ds *target_md;
3674 
3675     if (!lock_user_struct(VERIFY_READ, target_md, target_addr, 1))
3676         return -TARGET_EFAULT;
3677     if (target_to_host_ipc_perm(&(host_md->msg_perm),target_addr))
3678         return -TARGET_EFAULT;
3679     host_md->msg_stime = tswapal(target_md->msg_stime);
3680     host_md->msg_rtime = tswapal(target_md->msg_rtime);
3681     host_md->msg_ctime = tswapal(target_md->msg_ctime);
3682     host_md->__msg_cbytes = tswapal(target_md->__msg_cbytes);
3683     host_md->msg_qnum = tswapal(target_md->msg_qnum);
3684     host_md->msg_qbytes = tswapal(target_md->msg_qbytes);
3685     host_md->msg_lspid = tswapal(target_md->msg_lspid);
3686     host_md->msg_lrpid = tswapal(target_md->msg_lrpid);
3687     unlock_user_struct(target_md, target_addr, 0);
3688     return 0;
3689 }
3690 
3691 static inline abi_long host_to_target_msqid_ds(abi_ulong target_addr,
3692                                                struct msqid_ds *host_md)
3693 {
3694     struct target_msqid_ds *target_md;
3695 
3696     if (!lock_user_struct(VERIFY_WRITE, target_md, target_addr, 0))
3697         return -TARGET_EFAULT;
3698     if (host_to_target_ipc_perm(target_addr,&(host_md->msg_perm)))
3699         return -TARGET_EFAULT;
3700     target_md->msg_stime = tswapal(host_md->msg_stime);
3701     target_md->msg_rtime = tswapal(host_md->msg_rtime);
3702     target_md->msg_ctime = tswapal(host_md->msg_ctime);
3703     target_md->__msg_cbytes = tswapal(host_md->__msg_cbytes);
3704     target_md->msg_qnum = tswapal(host_md->msg_qnum);
3705     target_md->msg_qbytes = tswapal(host_md->msg_qbytes);
3706     target_md->msg_lspid = tswapal(host_md->msg_lspid);
3707     target_md->msg_lrpid = tswapal(host_md->msg_lrpid);
3708     unlock_user_struct(target_md, target_addr, 1);
3709     return 0;
3710 }
3711 
3712 struct target_msginfo {
3713     int msgpool;
3714     int msgmap;
3715     int msgmax;
3716     int msgmnb;
3717     int msgmni;
3718     int msgssz;
3719     int msgtql;
3720     unsigned short int msgseg;
3721 };
3722 
3723 static inline abi_long host_to_target_msginfo(abi_ulong target_addr,
3724                                               struct msginfo *host_msginfo)
3725 {
3726     struct target_msginfo *target_msginfo;
3727     if (!lock_user_struct(VERIFY_WRITE, target_msginfo, target_addr, 0))
3728         return -TARGET_EFAULT;
3729     __put_user(host_msginfo->msgpool, &target_msginfo->msgpool);
3730     __put_user(host_msginfo->msgmap, &target_msginfo->msgmap);
3731     __put_user(host_msginfo->msgmax, &target_msginfo->msgmax);
3732     __put_user(host_msginfo->msgmnb, &target_msginfo->msgmnb);
3733     __put_user(host_msginfo->msgmni, &target_msginfo->msgmni);
3734     __put_user(host_msginfo->msgssz, &target_msginfo->msgssz);
3735     __put_user(host_msginfo->msgtql, &target_msginfo->msgtql);
3736     __put_user(host_msginfo->msgseg, &target_msginfo->msgseg);
3737     unlock_user_struct(target_msginfo, target_addr, 1);
3738     return 0;
3739 }
3740 
3741 static inline abi_long do_msgctl(int msgid, int cmd, abi_long ptr)
3742 {
3743     struct msqid_ds dsarg;
3744     struct msginfo msginfo;
3745     abi_long ret = -TARGET_EINVAL;
3746 
3747     cmd &= 0xff;
3748 
3749     switch (cmd) {
3750     case IPC_STAT:
3751     case IPC_SET:
3752     case MSG_STAT:
3753         if (target_to_host_msqid_ds(&dsarg,ptr))
3754             return -TARGET_EFAULT;
3755         ret = get_errno(msgctl(msgid, cmd, &dsarg));
3756         if (host_to_target_msqid_ds(ptr,&dsarg))
3757             return -TARGET_EFAULT;
3758         break;
3759     case IPC_RMID:
3760         ret = get_errno(msgctl(msgid, cmd, NULL));
3761         break;
3762     case IPC_INFO:
3763     case MSG_INFO:
3764         ret = get_errno(msgctl(msgid, cmd, (struct msqid_ds *)&msginfo));
3765         if (host_to_target_msginfo(ptr, &msginfo))
3766             return -TARGET_EFAULT;
3767         break;
3768     }
3769 
3770     return ret;
3771 }
3772 
3773 struct target_msgbuf {
3774     abi_long mtype;
3775     char	mtext[1];
3776 };
3777 
3778 static inline abi_long do_msgsnd(int msqid, abi_long msgp,
3779                                  ssize_t msgsz, int msgflg)
3780 {
3781     struct target_msgbuf *target_mb;
3782     struct msgbuf *host_mb;
3783     abi_long ret = 0;
3784 
3785     if (msgsz < 0) {
3786         return -TARGET_EINVAL;
3787     }
3788 
3789     if (!lock_user_struct(VERIFY_READ, target_mb, msgp, 0))
3790         return -TARGET_EFAULT;
3791     host_mb = g_try_malloc(msgsz + sizeof(long));
3792     if (!host_mb) {
3793         unlock_user_struct(target_mb, msgp, 0);
3794         return -TARGET_ENOMEM;
3795     }
3796     host_mb->mtype = (abi_long) tswapal(target_mb->mtype);
3797     memcpy(host_mb->mtext, target_mb->mtext, msgsz);
3798     ret = -TARGET_ENOSYS;
3799 #ifdef __NR_msgsnd
3800     ret = get_errno(safe_msgsnd(msqid, host_mb, msgsz, msgflg));
3801 #endif
3802 #ifdef __NR_ipc
3803     if (ret == -TARGET_ENOSYS) {
3804         ret = get_errno(safe_ipc(IPCOP_msgsnd, msqid, msgsz, msgflg,
3805                                  host_mb, 0));
3806     }
3807 #endif
3808     g_free(host_mb);
3809     unlock_user_struct(target_mb, msgp, 0);
3810 
3811     return ret;
3812 }
3813 
3814 static inline abi_long do_msgrcv(int msqid, abi_long msgp,
3815                                  ssize_t msgsz, abi_long msgtyp,
3816                                  int msgflg)
3817 {
3818     struct target_msgbuf *target_mb;
3819     char *target_mtext;
3820     struct msgbuf *host_mb;
3821     abi_long ret = 0;
3822 
3823     if (msgsz < 0) {
3824         return -TARGET_EINVAL;
3825     }
3826 
3827     if (!lock_user_struct(VERIFY_WRITE, target_mb, msgp, 0))
3828         return -TARGET_EFAULT;
3829 
3830     host_mb = g_try_malloc(msgsz + sizeof(long));
3831     if (!host_mb) {
3832         ret = -TARGET_ENOMEM;
3833         goto end;
3834     }
3835     ret = -TARGET_ENOSYS;
3836 #ifdef __NR_msgrcv
3837     ret = get_errno(safe_msgrcv(msqid, host_mb, msgsz, msgtyp, msgflg));
3838 #endif
3839 #ifdef __NR_ipc
3840     if (ret == -TARGET_ENOSYS) {
3841         ret = get_errno(safe_ipc(IPCOP_CALL(1, IPCOP_msgrcv), msqid, msgsz,
3842                         msgflg, host_mb, msgtyp));
3843     }
3844 #endif
3845 
3846     if (ret > 0) {
3847         abi_ulong target_mtext_addr = msgp + sizeof(abi_ulong);
3848         target_mtext = lock_user(VERIFY_WRITE, target_mtext_addr, ret, 0);
3849         if (!target_mtext) {
3850             ret = -TARGET_EFAULT;
3851             goto end;
3852         }
3853         memcpy(target_mb->mtext, host_mb->mtext, ret);
3854         unlock_user(target_mtext, target_mtext_addr, ret);
3855     }
3856 
3857     target_mb->mtype = tswapal(host_mb->mtype);
3858 
3859 end:
3860     if (target_mb)
3861         unlock_user_struct(target_mb, msgp, 1);
3862     g_free(host_mb);
3863     return ret;
3864 }
3865 
3866 static inline abi_long target_to_host_shmid_ds(struct shmid_ds *host_sd,
3867                                                abi_ulong target_addr)
3868 {
3869     struct target_shmid_ds *target_sd;
3870 
3871     if (!lock_user_struct(VERIFY_READ, target_sd, target_addr, 1))
3872         return -TARGET_EFAULT;
3873     if (target_to_host_ipc_perm(&(host_sd->shm_perm), target_addr))
3874         return -TARGET_EFAULT;
3875     __get_user(host_sd->shm_segsz, &target_sd->shm_segsz);
3876     __get_user(host_sd->shm_atime, &target_sd->shm_atime);
3877     __get_user(host_sd->shm_dtime, &target_sd->shm_dtime);
3878     __get_user(host_sd->shm_ctime, &target_sd->shm_ctime);
3879     __get_user(host_sd->shm_cpid, &target_sd->shm_cpid);
3880     __get_user(host_sd->shm_lpid, &target_sd->shm_lpid);
3881     __get_user(host_sd->shm_nattch, &target_sd->shm_nattch);
3882     unlock_user_struct(target_sd, target_addr, 0);
3883     return 0;
3884 }
3885 
3886 static inline abi_long host_to_target_shmid_ds(abi_ulong target_addr,
3887                                                struct shmid_ds *host_sd)
3888 {
3889     struct target_shmid_ds *target_sd;
3890 
3891     if (!lock_user_struct(VERIFY_WRITE, target_sd, target_addr, 0))
3892         return -TARGET_EFAULT;
3893     if (host_to_target_ipc_perm(target_addr, &(host_sd->shm_perm)))
3894         return -TARGET_EFAULT;
3895     __put_user(host_sd->shm_segsz, &target_sd->shm_segsz);
3896     __put_user(host_sd->shm_atime, &target_sd->shm_atime);
3897     __put_user(host_sd->shm_dtime, &target_sd->shm_dtime);
3898     __put_user(host_sd->shm_ctime, &target_sd->shm_ctime);
3899     __put_user(host_sd->shm_cpid, &target_sd->shm_cpid);
3900     __put_user(host_sd->shm_lpid, &target_sd->shm_lpid);
3901     __put_user(host_sd->shm_nattch, &target_sd->shm_nattch);
3902     unlock_user_struct(target_sd, target_addr, 1);
3903     return 0;
3904 }
3905 
3906 struct  target_shminfo {
3907     abi_ulong shmmax;
3908     abi_ulong shmmin;
3909     abi_ulong shmmni;
3910     abi_ulong shmseg;
3911     abi_ulong shmall;
3912 };
3913 
3914 static inline abi_long host_to_target_shminfo(abi_ulong target_addr,
3915                                               struct shminfo *host_shminfo)
3916 {
3917     struct target_shminfo *target_shminfo;
3918     if (!lock_user_struct(VERIFY_WRITE, target_shminfo, target_addr, 0))
3919         return -TARGET_EFAULT;
3920     __put_user(host_shminfo->shmmax, &target_shminfo->shmmax);
3921     __put_user(host_shminfo->shmmin, &target_shminfo->shmmin);
3922     __put_user(host_shminfo->shmmni, &target_shminfo->shmmni);
3923     __put_user(host_shminfo->shmseg, &target_shminfo->shmseg);
3924     __put_user(host_shminfo->shmall, &target_shminfo->shmall);
3925     unlock_user_struct(target_shminfo, target_addr, 1);
3926     return 0;
3927 }
3928 
3929 struct target_shm_info {
3930     int used_ids;
3931     abi_ulong shm_tot;
3932     abi_ulong shm_rss;
3933     abi_ulong shm_swp;
3934     abi_ulong swap_attempts;
3935     abi_ulong swap_successes;
3936 };
3937 
3938 static inline abi_long host_to_target_shm_info(abi_ulong target_addr,
3939                                                struct shm_info *host_shm_info)
3940 {
3941     struct target_shm_info *target_shm_info;
3942     if (!lock_user_struct(VERIFY_WRITE, target_shm_info, target_addr, 0))
3943         return -TARGET_EFAULT;
3944     __put_user(host_shm_info->used_ids, &target_shm_info->used_ids);
3945     __put_user(host_shm_info->shm_tot, &target_shm_info->shm_tot);
3946     __put_user(host_shm_info->shm_rss, &target_shm_info->shm_rss);
3947     __put_user(host_shm_info->shm_swp, &target_shm_info->shm_swp);
3948     __put_user(host_shm_info->swap_attempts, &target_shm_info->swap_attempts);
3949     __put_user(host_shm_info->swap_successes, &target_shm_info->swap_successes);
3950     unlock_user_struct(target_shm_info, target_addr, 1);
3951     return 0;
3952 }
3953 
3954 static inline abi_long do_shmctl(int shmid, int cmd, abi_long buf)
3955 {
3956     struct shmid_ds dsarg;
3957     struct shminfo shminfo;
3958     struct shm_info shm_info;
3959     abi_long ret = -TARGET_EINVAL;
3960 
3961     cmd &= 0xff;
3962 
3963     switch(cmd) {
3964     case IPC_STAT:
3965     case IPC_SET:
3966     case SHM_STAT:
3967         if (target_to_host_shmid_ds(&dsarg, buf))
3968             return -TARGET_EFAULT;
3969         ret = get_errno(shmctl(shmid, cmd, &dsarg));
3970         if (host_to_target_shmid_ds(buf, &dsarg))
3971             return -TARGET_EFAULT;
3972         break;
3973     case IPC_INFO:
3974         ret = get_errno(shmctl(shmid, cmd, (struct shmid_ds *)&shminfo));
3975         if (host_to_target_shminfo(buf, &shminfo))
3976             return -TARGET_EFAULT;
3977         break;
3978     case SHM_INFO:
3979         ret = get_errno(shmctl(shmid, cmd, (struct shmid_ds *)&shm_info));
3980         if (host_to_target_shm_info(buf, &shm_info))
3981             return -TARGET_EFAULT;
3982         break;
3983     case IPC_RMID:
3984     case SHM_LOCK:
3985     case SHM_UNLOCK:
3986         ret = get_errno(shmctl(shmid, cmd, NULL));
3987         break;
3988     }
3989 
3990     return ret;
3991 }
3992 
3993 #ifndef TARGET_FORCE_SHMLBA
3994 /* For most architectures, SHMLBA is the same as the page size;
3995  * some architectures have larger values, in which case they should
3996  * define TARGET_FORCE_SHMLBA and provide a target_shmlba() function.
3997  * This corresponds to the kernel arch code defining __ARCH_FORCE_SHMLBA
3998  * and defining its own value for SHMLBA.
3999  *
4000  * The kernel also permits SHMLBA to be set by the architecture to a
4001  * value larger than the page size without setting __ARCH_FORCE_SHMLBA;
4002  * this means that addresses are rounded to the large size if
4003  * SHM_RND is set but addresses not aligned to that size are not rejected
4004  * as long as they are at least page-aligned. Since the only architecture
4005  * which uses this is ia64 this code doesn't provide for that oddity.
4006  */
4007 static inline abi_ulong target_shmlba(CPUArchState *cpu_env)
4008 {
4009     return TARGET_PAGE_SIZE;
4010 }
4011 #endif
4012 
4013 static inline abi_ulong do_shmat(CPUArchState *cpu_env,
4014                                  int shmid, abi_ulong shmaddr, int shmflg)
4015 {
4016     abi_long raddr;
4017     void *host_raddr;
4018     struct shmid_ds shm_info;
4019     int i,ret;
4020     abi_ulong shmlba;
4021 
4022     /* find out the length of the shared memory segment */
4023     ret = get_errno(shmctl(shmid, IPC_STAT, &shm_info));
4024     if (is_error(ret)) {
4025         /* can't get length, bail out */
4026         return ret;
4027     }
4028 
4029     shmlba = target_shmlba(cpu_env);
4030 
4031     if (shmaddr & (shmlba - 1)) {
4032         if (shmflg & SHM_RND) {
4033             shmaddr &= ~(shmlba - 1);
4034         } else {
4035             return -TARGET_EINVAL;
4036         }
4037     }
4038     if (!guest_range_valid(shmaddr, shm_info.shm_segsz)) {
4039         return -TARGET_EINVAL;
4040     }
4041 
4042     mmap_lock();
4043 
4044     if (shmaddr)
4045         host_raddr = shmat(shmid, (void *)g2h(shmaddr), shmflg);
4046     else {
4047         abi_ulong mmap_start;
4048 
4049         /* In order to use the host shmat, we need to honor host SHMLBA.  */
4050         mmap_start = mmap_find_vma(0, shm_info.shm_segsz, MAX(SHMLBA, shmlba));
4051 
4052         if (mmap_start == -1) {
4053             errno = ENOMEM;
4054             host_raddr = (void *)-1;
4055         } else
4056             host_raddr = shmat(shmid, g2h(mmap_start), shmflg | SHM_REMAP);
4057     }
4058 
4059     if (host_raddr == (void *)-1) {
4060         mmap_unlock();
4061         return get_errno((long)host_raddr);
4062     }
4063     raddr=h2g((unsigned long)host_raddr);
4064 
4065     page_set_flags(raddr, raddr + shm_info.shm_segsz,
4066                    PAGE_VALID | PAGE_READ |
4067                    ((shmflg & SHM_RDONLY)? 0 : PAGE_WRITE));
4068 
4069     for (i = 0; i < N_SHM_REGIONS; i++) {
4070         if (!shm_regions[i].in_use) {
4071             shm_regions[i].in_use = true;
4072             shm_regions[i].start = raddr;
4073             shm_regions[i].size = shm_info.shm_segsz;
4074             break;
4075         }
4076     }
4077 
4078     mmap_unlock();
4079     return raddr;
4080 
4081 }
4082 
4083 static inline abi_long do_shmdt(abi_ulong shmaddr)
4084 {
4085     int i;
4086     abi_long rv;
4087 
4088     mmap_lock();
4089 
4090     for (i = 0; i < N_SHM_REGIONS; ++i) {
4091         if (shm_regions[i].in_use && shm_regions[i].start == shmaddr) {
4092             shm_regions[i].in_use = false;
4093             page_set_flags(shmaddr, shmaddr + shm_regions[i].size, 0);
4094             break;
4095         }
4096     }
4097     rv = get_errno(shmdt(g2h(shmaddr)));
4098 
4099     mmap_unlock();
4100 
4101     return rv;
4102 }
4103 
4104 #ifdef TARGET_NR_ipc
4105 /* ??? This only works with linear mappings.  */
4106 /* do_ipc() must return target values and target errnos. */
4107 static abi_long do_ipc(CPUArchState *cpu_env,
4108                        unsigned int call, abi_long first,
4109                        abi_long second, abi_long third,
4110                        abi_long ptr, abi_long fifth)
4111 {
4112     int version;
4113     abi_long ret = 0;
4114 
4115     version = call >> 16;
4116     call &= 0xffff;
4117 
4118     switch (call) {
4119     case IPCOP_semop:
4120         ret = do_semop(first, ptr, second);
4121         break;
4122 
4123     case IPCOP_semget:
4124         ret = get_errno(semget(first, second, third));
4125         break;
4126 
4127     case IPCOP_semctl: {
4128         /* The semun argument to semctl is passed by value, so dereference the
4129          * ptr argument. */
4130         abi_ulong atptr;
4131         get_user_ual(atptr, ptr);
4132         ret = do_semctl(first, second, third, atptr);
4133         break;
4134     }
4135 
4136     case IPCOP_msgget:
4137         ret = get_errno(msgget(first, second));
4138         break;
4139 
4140     case IPCOP_msgsnd:
4141         ret = do_msgsnd(first, ptr, second, third);
4142         break;
4143 
4144     case IPCOP_msgctl:
4145         ret = do_msgctl(first, second, ptr);
4146         break;
4147 
4148     case IPCOP_msgrcv:
4149         switch (version) {
4150         case 0:
4151             {
4152                 struct target_ipc_kludge {
4153                     abi_long msgp;
4154                     abi_long msgtyp;
4155                 } *tmp;
4156 
4157                 if (!lock_user_struct(VERIFY_READ, tmp, ptr, 1)) {
4158                     ret = -TARGET_EFAULT;
4159                     break;
4160                 }
4161 
4162                 ret = do_msgrcv(first, tswapal(tmp->msgp), second, tswapal(tmp->msgtyp), third);
4163 
4164                 unlock_user_struct(tmp, ptr, 0);
4165                 break;
4166             }
4167         default:
4168             ret = do_msgrcv(first, ptr, second, fifth, third);
4169         }
4170         break;
4171 
4172     case IPCOP_shmat:
4173         switch (version) {
4174         default:
4175         {
4176             abi_ulong raddr;
4177             raddr = do_shmat(cpu_env, first, ptr, second);
4178             if (is_error(raddr))
4179                 return get_errno(raddr);
4180             if (put_user_ual(raddr, third))
4181                 return -TARGET_EFAULT;
4182             break;
4183         }
4184         case 1:
4185             ret = -TARGET_EINVAL;
4186             break;
4187         }
4188 	break;
4189     case IPCOP_shmdt:
4190         ret = do_shmdt(ptr);
4191 	break;
4192 
4193     case IPCOP_shmget:
4194 	/* IPC_* flag values are the same on all linux platforms */
4195 	ret = get_errno(shmget(first, second, third));
4196 	break;
4197 
4198 	/* IPC_* and SHM_* command values are the same on all linux platforms */
4199     case IPCOP_shmctl:
4200         ret = do_shmctl(first, second, ptr);
4201         break;
4202     default:
4203 	gemu_log("Unsupported ipc call: %d (version %d)\n", call, version);
4204 	ret = -TARGET_ENOSYS;
4205 	break;
4206     }
4207     return ret;
4208 }
4209 #endif
4210 
4211 /* kernel structure types definitions */
4212 
4213 #define STRUCT(name, ...) STRUCT_ ## name,
4214 #define STRUCT_SPECIAL(name) STRUCT_ ## name,
4215 enum {
4216 #include "syscall_types.h"
4217 STRUCT_MAX
4218 };
4219 #undef STRUCT
4220 #undef STRUCT_SPECIAL
4221 
4222 #define STRUCT(name, ...) static const argtype struct_ ## name ## _def[] = {  __VA_ARGS__, TYPE_NULL };
4223 #define STRUCT_SPECIAL(name)
4224 #include "syscall_types.h"
4225 #undef STRUCT
4226 #undef STRUCT_SPECIAL
4227 
4228 typedef struct IOCTLEntry IOCTLEntry;
4229 
4230 typedef abi_long do_ioctl_fn(const IOCTLEntry *ie, uint8_t *buf_temp,
4231                              int fd, int cmd, abi_long arg);
4232 
4233 struct IOCTLEntry {
4234     int target_cmd;
4235     unsigned int host_cmd;
4236     const char *name;
4237     int access;
4238     do_ioctl_fn *do_ioctl;
4239     const argtype arg_type[5];
4240 };
4241 
4242 #define IOC_R 0x0001
4243 #define IOC_W 0x0002
4244 #define IOC_RW (IOC_R | IOC_W)
4245 
4246 #define MAX_STRUCT_SIZE 4096
4247 
4248 #ifdef CONFIG_FIEMAP
4249 /* So fiemap access checks don't overflow on 32 bit systems.
4250  * This is very slightly smaller than the limit imposed by
4251  * the underlying kernel.
4252  */
4253 #define FIEMAP_MAX_EXTENTS ((UINT_MAX - sizeof(struct fiemap))  \
4254                             / sizeof(struct fiemap_extent))
4255 
4256 static abi_long do_ioctl_fs_ioc_fiemap(const IOCTLEntry *ie, uint8_t *buf_temp,
4257                                        int fd, int cmd, abi_long arg)
4258 {
4259     /* The parameter for this ioctl is a struct fiemap followed
4260      * by an array of struct fiemap_extent whose size is set
4261      * in fiemap->fm_extent_count. The array is filled in by the
4262      * ioctl.
4263      */
4264     int target_size_in, target_size_out;
4265     struct fiemap *fm;
4266     const argtype *arg_type = ie->arg_type;
4267     const argtype extent_arg_type[] = { MK_STRUCT(STRUCT_fiemap_extent) };
4268     void *argptr, *p;
4269     abi_long ret;
4270     int i, extent_size = thunk_type_size(extent_arg_type, 0);
4271     uint32_t outbufsz;
4272     int free_fm = 0;
4273 
4274     assert(arg_type[0] == TYPE_PTR);
4275     assert(ie->access == IOC_RW);
4276     arg_type++;
4277     target_size_in = thunk_type_size(arg_type, 0);
4278     argptr = lock_user(VERIFY_READ, arg, target_size_in, 1);
4279     if (!argptr) {
4280         return -TARGET_EFAULT;
4281     }
4282     thunk_convert(buf_temp, argptr, arg_type, THUNK_HOST);
4283     unlock_user(argptr, arg, 0);
4284     fm = (struct fiemap *)buf_temp;
4285     if (fm->fm_extent_count > FIEMAP_MAX_EXTENTS) {
4286         return -TARGET_EINVAL;
4287     }
4288 
4289     outbufsz = sizeof (*fm) +
4290         (sizeof(struct fiemap_extent) * fm->fm_extent_count);
4291 
4292     if (outbufsz > MAX_STRUCT_SIZE) {
4293         /* We can't fit all the extents into the fixed size buffer.
4294          * Allocate one that is large enough and use it instead.
4295          */
4296         fm = g_try_malloc(outbufsz);
4297         if (!fm) {
4298             return -TARGET_ENOMEM;
4299         }
4300         memcpy(fm, buf_temp, sizeof(struct fiemap));
4301         free_fm = 1;
4302     }
4303     ret = get_errno(safe_ioctl(fd, ie->host_cmd, fm));
4304     if (!is_error(ret)) {
4305         target_size_out = target_size_in;
4306         /* An extent_count of 0 means we were only counting the extents
4307          * so there are no structs to copy
4308          */
4309         if (fm->fm_extent_count != 0) {
4310             target_size_out += fm->fm_mapped_extents * extent_size;
4311         }
4312         argptr = lock_user(VERIFY_WRITE, arg, target_size_out, 0);
4313         if (!argptr) {
4314             ret = -TARGET_EFAULT;
4315         } else {
4316             /* Convert the struct fiemap */
4317             thunk_convert(argptr, fm, arg_type, THUNK_TARGET);
4318             if (fm->fm_extent_count != 0) {
4319                 p = argptr + target_size_in;
4320                 /* ...and then all the struct fiemap_extents */
4321                 for (i = 0; i < fm->fm_mapped_extents; i++) {
4322                     thunk_convert(p, &fm->fm_extents[i], extent_arg_type,
4323                                   THUNK_TARGET);
4324                     p += extent_size;
4325                 }
4326             }
4327             unlock_user(argptr, arg, target_size_out);
4328         }
4329     }
4330     if (free_fm) {
4331         g_free(fm);
4332     }
4333     return ret;
4334 }
4335 #endif
4336 
4337 static abi_long do_ioctl_ifconf(const IOCTLEntry *ie, uint8_t *buf_temp,
4338                                 int fd, int cmd, abi_long arg)
4339 {
4340     const argtype *arg_type = ie->arg_type;
4341     int target_size;
4342     void *argptr;
4343     int ret;
4344     struct ifconf *host_ifconf;
4345     uint32_t outbufsz;
4346     const argtype ifreq_arg_type[] = { MK_STRUCT(STRUCT_sockaddr_ifreq) };
4347     int target_ifreq_size;
4348     int nb_ifreq;
4349     int free_buf = 0;
4350     int i;
4351     int target_ifc_len;
4352     abi_long target_ifc_buf;
4353     int host_ifc_len;
4354     char *host_ifc_buf;
4355 
4356     assert(arg_type[0] == TYPE_PTR);
4357     assert(ie->access == IOC_RW);
4358 
4359     arg_type++;
4360     target_size = thunk_type_size(arg_type, 0);
4361 
4362     argptr = lock_user(VERIFY_READ, arg, target_size, 1);
4363     if (!argptr)
4364         return -TARGET_EFAULT;
4365     thunk_convert(buf_temp, argptr, arg_type, THUNK_HOST);
4366     unlock_user(argptr, arg, 0);
4367 
4368     host_ifconf = (struct ifconf *)(unsigned long)buf_temp;
4369     target_ifc_buf = (abi_long)(unsigned long)host_ifconf->ifc_buf;
4370     target_ifreq_size = thunk_type_size(ifreq_arg_type, 0);
4371 
4372     if (target_ifc_buf != 0) {
4373         target_ifc_len = host_ifconf->ifc_len;
4374         nb_ifreq = target_ifc_len / target_ifreq_size;
4375         host_ifc_len = nb_ifreq * sizeof(struct ifreq);
4376 
4377         outbufsz = sizeof(*host_ifconf) + host_ifc_len;
4378         if (outbufsz > MAX_STRUCT_SIZE) {
4379             /*
4380              * We can't fit all the extents into the fixed size buffer.
4381              * Allocate one that is large enough and use it instead.
4382              */
4383             host_ifconf = malloc(outbufsz);
4384             if (!host_ifconf) {
4385                 return -TARGET_ENOMEM;
4386             }
4387             memcpy(host_ifconf, buf_temp, sizeof(*host_ifconf));
4388             free_buf = 1;
4389         }
4390         host_ifc_buf = (char *)host_ifconf + sizeof(*host_ifconf);
4391 
4392         host_ifconf->ifc_len = host_ifc_len;
4393     } else {
4394       host_ifc_buf = NULL;
4395     }
4396     host_ifconf->ifc_buf = host_ifc_buf;
4397 
4398     ret = get_errno(safe_ioctl(fd, ie->host_cmd, host_ifconf));
4399     if (!is_error(ret)) {
4400 	/* convert host ifc_len to target ifc_len */
4401 
4402         nb_ifreq = host_ifconf->ifc_len / sizeof(struct ifreq);
4403         target_ifc_len = nb_ifreq * target_ifreq_size;
4404         host_ifconf->ifc_len = target_ifc_len;
4405 
4406 	/* restore target ifc_buf */
4407 
4408         host_ifconf->ifc_buf = (char *)(unsigned long)target_ifc_buf;
4409 
4410 	/* copy struct ifconf to target user */
4411 
4412         argptr = lock_user(VERIFY_WRITE, arg, target_size, 0);
4413         if (!argptr)
4414             return -TARGET_EFAULT;
4415         thunk_convert(argptr, host_ifconf, arg_type, THUNK_TARGET);
4416         unlock_user(argptr, arg, target_size);
4417 
4418         if (target_ifc_buf != 0) {
4419             /* copy ifreq[] to target user */
4420             argptr = lock_user(VERIFY_WRITE, target_ifc_buf, target_ifc_len, 0);
4421             for (i = 0; i < nb_ifreq ; i++) {
4422                 thunk_convert(argptr + i * target_ifreq_size,
4423                               host_ifc_buf + i * sizeof(struct ifreq),
4424                               ifreq_arg_type, THUNK_TARGET);
4425             }
4426             unlock_user(argptr, target_ifc_buf, target_ifc_len);
4427         }
4428     }
4429 
4430     if (free_buf) {
4431         free(host_ifconf);
4432     }
4433 
4434     return ret;
4435 }
4436 
4437 #if defined(CONFIG_USBFS)
4438 #if HOST_LONG_BITS > 64
4439 #error USBDEVFS thunks do not support >64 bit hosts yet.
4440 #endif
4441 struct live_urb {
4442     uint64_t target_urb_adr;
4443     uint64_t target_buf_adr;
4444     char *target_buf_ptr;
4445     struct usbdevfs_urb host_urb;
4446 };
4447 
4448 static GHashTable *usbdevfs_urb_hashtable(void)
4449 {
4450     static GHashTable *urb_hashtable;
4451 
4452     if (!urb_hashtable) {
4453         urb_hashtable = g_hash_table_new(g_int64_hash, g_int64_equal);
4454     }
4455     return urb_hashtable;
4456 }
4457 
4458 static void urb_hashtable_insert(struct live_urb *urb)
4459 {
4460     GHashTable *urb_hashtable = usbdevfs_urb_hashtable();
4461     g_hash_table_insert(urb_hashtable, urb, urb);
4462 }
4463 
4464 static struct live_urb *urb_hashtable_lookup(uint64_t target_urb_adr)
4465 {
4466     GHashTable *urb_hashtable = usbdevfs_urb_hashtable();
4467     return g_hash_table_lookup(urb_hashtable, &target_urb_adr);
4468 }
4469 
4470 static void urb_hashtable_remove(struct live_urb *urb)
4471 {
4472     GHashTable *urb_hashtable = usbdevfs_urb_hashtable();
4473     g_hash_table_remove(urb_hashtable, urb);
4474 }
4475 
4476 static abi_long
4477 do_ioctl_usbdevfs_reapurb(const IOCTLEntry *ie, uint8_t *buf_temp,
4478                           int fd, int cmd, abi_long arg)
4479 {
4480     const argtype usbfsurb_arg_type[] = { MK_STRUCT(STRUCT_usbdevfs_urb) };
4481     const argtype ptrvoid_arg_type[] = { TYPE_PTRVOID, 0, 0 };
4482     struct live_urb *lurb;
4483     void *argptr;
4484     uint64_t hurb;
4485     int target_size;
4486     uintptr_t target_urb_adr;
4487     abi_long ret;
4488 
4489     target_size = thunk_type_size(usbfsurb_arg_type, THUNK_TARGET);
4490 
4491     memset(buf_temp, 0, sizeof(uint64_t));
4492     ret = get_errno(safe_ioctl(fd, ie->host_cmd, buf_temp));
4493     if (is_error(ret)) {
4494         return ret;
4495     }
4496 
4497     memcpy(&hurb, buf_temp, sizeof(uint64_t));
4498     lurb = (void *)((uintptr_t)hurb - offsetof(struct live_urb, host_urb));
4499     if (!lurb->target_urb_adr) {
4500         return -TARGET_EFAULT;
4501     }
4502     urb_hashtable_remove(lurb);
4503     unlock_user(lurb->target_buf_ptr, lurb->target_buf_adr,
4504         lurb->host_urb.buffer_length);
4505     lurb->target_buf_ptr = NULL;
4506 
4507     /* restore the guest buffer pointer */
4508     lurb->host_urb.buffer = (void *)(uintptr_t)lurb->target_buf_adr;
4509 
4510     /* update the guest urb struct */
4511     argptr = lock_user(VERIFY_WRITE, lurb->target_urb_adr, target_size, 0);
4512     if (!argptr) {
4513         g_free(lurb);
4514         return -TARGET_EFAULT;
4515     }
4516     thunk_convert(argptr, &lurb->host_urb, usbfsurb_arg_type, THUNK_TARGET);
4517     unlock_user(argptr, lurb->target_urb_adr, target_size);
4518 
4519     target_size = thunk_type_size(ptrvoid_arg_type, THUNK_TARGET);
4520     /* write back the urb handle */
4521     argptr = lock_user(VERIFY_WRITE, arg, target_size, 0);
4522     if (!argptr) {
4523         g_free(lurb);
4524         return -TARGET_EFAULT;
4525     }
4526 
4527     /* GHashTable uses 64-bit keys but thunk_convert expects uintptr_t */
4528     target_urb_adr = lurb->target_urb_adr;
4529     thunk_convert(argptr, &target_urb_adr, ptrvoid_arg_type, THUNK_TARGET);
4530     unlock_user(argptr, arg, target_size);
4531 
4532     g_free(lurb);
4533     return ret;
4534 }
4535 
4536 static abi_long
4537 do_ioctl_usbdevfs_discardurb(const IOCTLEntry *ie,
4538                              uint8_t *buf_temp __attribute__((unused)),
4539                              int fd, int cmd, abi_long arg)
4540 {
4541     struct live_urb *lurb;
4542 
4543     /* map target address back to host URB with metadata. */
4544     lurb = urb_hashtable_lookup(arg);
4545     if (!lurb) {
4546         return -TARGET_EFAULT;
4547     }
4548     return get_errno(safe_ioctl(fd, ie->host_cmd, &lurb->host_urb));
4549 }
4550 
4551 static abi_long
4552 do_ioctl_usbdevfs_submiturb(const IOCTLEntry *ie, uint8_t *buf_temp,
4553                             int fd, int cmd, abi_long arg)
4554 {
4555     const argtype *arg_type = ie->arg_type;
4556     int target_size;
4557     abi_long ret;
4558     void *argptr;
4559     int rw_dir;
4560     struct live_urb *lurb;
4561 
4562     /*
4563      * each submitted URB needs to map to a unique ID for the
4564      * kernel, and that unique ID needs to be a pointer to
4565      * host memory.  hence, we need to malloc for each URB.
4566      * isochronous transfers have a variable length struct.
4567      */
4568     arg_type++;
4569     target_size = thunk_type_size(arg_type, THUNK_TARGET);
4570 
4571     /* construct host copy of urb and metadata */
4572     lurb = g_try_malloc0(sizeof(struct live_urb));
4573     if (!lurb) {
4574         return -TARGET_ENOMEM;
4575     }
4576 
4577     argptr = lock_user(VERIFY_READ, arg, target_size, 1);
4578     if (!argptr) {
4579         g_free(lurb);
4580         return -TARGET_EFAULT;
4581     }
4582     thunk_convert(&lurb->host_urb, argptr, arg_type, THUNK_HOST);
4583     unlock_user(argptr, arg, 0);
4584 
4585     lurb->target_urb_adr = arg;
4586     lurb->target_buf_adr = (uintptr_t)lurb->host_urb.buffer;
4587 
4588     /* buffer space used depends on endpoint type so lock the entire buffer */
4589     /* control type urbs should check the buffer contents for true direction */
4590     rw_dir = lurb->host_urb.endpoint & USB_DIR_IN ? VERIFY_WRITE : VERIFY_READ;
4591     lurb->target_buf_ptr = lock_user(rw_dir, lurb->target_buf_adr,
4592         lurb->host_urb.buffer_length, 1);
4593     if (lurb->target_buf_ptr == NULL) {
4594         g_free(lurb);
4595         return -TARGET_EFAULT;
4596     }
4597 
4598     /* update buffer pointer in host copy */
4599     lurb->host_urb.buffer = lurb->target_buf_ptr;
4600 
4601     ret = get_errno(safe_ioctl(fd, ie->host_cmd, &lurb->host_urb));
4602     if (is_error(ret)) {
4603         unlock_user(lurb->target_buf_ptr, lurb->target_buf_adr, 0);
4604         g_free(lurb);
4605     } else {
4606         urb_hashtable_insert(lurb);
4607     }
4608 
4609     return ret;
4610 }
4611 #endif /* CONFIG_USBFS */
4612 
4613 static abi_long do_ioctl_dm(const IOCTLEntry *ie, uint8_t *buf_temp, int fd,
4614                             int cmd, abi_long arg)
4615 {
4616     void *argptr;
4617     struct dm_ioctl *host_dm;
4618     abi_long guest_data;
4619     uint32_t guest_data_size;
4620     int target_size;
4621     const argtype *arg_type = ie->arg_type;
4622     abi_long ret;
4623     void *big_buf = NULL;
4624     char *host_data;
4625 
4626     arg_type++;
4627     target_size = thunk_type_size(arg_type, 0);
4628     argptr = lock_user(VERIFY_READ, arg, target_size, 1);
4629     if (!argptr) {
4630         ret = -TARGET_EFAULT;
4631         goto out;
4632     }
4633     thunk_convert(buf_temp, argptr, arg_type, THUNK_HOST);
4634     unlock_user(argptr, arg, 0);
4635 
4636     /* buf_temp is too small, so fetch things into a bigger buffer */
4637     big_buf = g_malloc0(((struct dm_ioctl*)buf_temp)->data_size * 2);
4638     memcpy(big_buf, buf_temp, target_size);
4639     buf_temp = big_buf;
4640     host_dm = big_buf;
4641 
4642     guest_data = arg + host_dm->data_start;
4643     if ((guest_data - arg) < 0) {
4644         ret = -TARGET_EINVAL;
4645         goto out;
4646     }
4647     guest_data_size = host_dm->data_size - host_dm->data_start;
4648     host_data = (char*)host_dm + host_dm->data_start;
4649 
4650     argptr = lock_user(VERIFY_READ, guest_data, guest_data_size, 1);
4651     if (!argptr) {
4652         ret = -TARGET_EFAULT;
4653         goto out;
4654     }
4655 
4656     switch (ie->host_cmd) {
4657     case DM_REMOVE_ALL:
4658     case DM_LIST_DEVICES:
4659     case DM_DEV_CREATE:
4660     case DM_DEV_REMOVE:
4661     case DM_DEV_SUSPEND:
4662     case DM_DEV_STATUS:
4663     case DM_DEV_WAIT:
4664     case DM_TABLE_STATUS:
4665     case DM_TABLE_CLEAR:
4666     case DM_TABLE_DEPS:
4667     case DM_LIST_VERSIONS:
4668         /* no input data */
4669         break;
4670     case DM_DEV_RENAME:
4671     case DM_DEV_SET_GEOMETRY:
4672         /* data contains only strings */
4673         memcpy(host_data, argptr, guest_data_size);
4674         break;
4675     case DM_TARGET_MSG:
4676         memcpy(host_data, argptr, guest_data_size);
4677         *(uint64_t*)host_data = tswap64(*(uint64_t*)argptr);
4678         break;
4679     case DM_TABLE_LOAD:
4680     {
4681         void *gspec = argptr;
4682         void *cur_data = host_data;
4683         const argtype arg_type[] = { MK_STRUCT(STRUCT_dm_target_spec) };
4684         int spec_size = thunk_type_size(arg_type, 0);
4685         int i;
4686 
4687         for (i = 0; i < host_dm->target_count; i++) {
4688             struct dm_target_spec *spec = cur_data;
4689             uint32_t next;
4690             int slen;
4691 
4692             thunk_convert(spec, gspec, arg_type, THUNK_HOST);
4693             slen = strlen((char*)gspec + spec_size) + 1;
4694             next = spec->next;
4695             spec->next = sizeof(*spec) + slen;
4696             strcpy((char*)&spec[1], gspec + spec_size);
4697             gspec += next;
4698             cur_data += spec->next;
4699         }
4700         break;
4701     }
4702     default:
4703         ret = -TARGET_EINVAL;
4704         unlock_user(argptr, guest_data, 0);
4705         goto out;
4706     }
4707     unlock_user(argptr, guest_data, 0);
4708 
4709     ret = get_errno(safe_ioctl(fd, ie->host_cmd, buf_temp));
4710     if (!is_error(ret)) {
4711         guest_data = arg + host_dm->data_start;
4712         guest_data_size = host_dm->data_size - host_dm->data_start;
4713         argptr = lock_user(VERIFY_WRITE, guest_data, guest_data_size, 0);
4714         switch (ie->host_cmd) {
4715         case DM_REMOVE_ALL:
4716         case DM_DEV_CREATE:
4717         case DM_DEV_REMOVE:
4718         case DM_DEV_RENAME:
4719         case DM_DEV_SUSPEND:
4720         case DM_DEV_STATUS:
4721         case DM_TABLE_LOAD:
4722         case DM_TABLE_CLEAR:
4723         case DM_TARGET_MSG:
4724         case DM_DEV_SET_GEOMETRY:
4725             /* no return data */
4726             break;
4727         case DM_LIST_DEVICES:
4728         {
4729             struct dm_name_list *nl = (void*)host_dm + host_dm->data_start;
4730             uint32_t remaining_data = guest_data_size;
4731             void *cur_data = argptr;
4732             const argtype arg_type[] = { MK_STRUCT(STRUCT_dm_name_list) };
4733             int nl_size = 12; /* can't use thunk_size due to alignment */
4734 
4735             while (1) {
4736                 uint32_t next = nl->next;
4737                 if (next) {
4738                     nl->next = nl_size + (strlen(nl->name) + 1);
4739                 }
4740                 if (remaining_data < nl->next) {
4741                     host_dm->flags |= DM_BUFFER_FULL_FLAG;
4742                     break;
4743                 }
4744                 thunk_convert(cur_data, nl, arg_type, THUNK_TARGET);
4745                 strcpy(cur_data + nl_size, nl->name);
4746                 cur_data += nl->next;
4747                 remaining_data -= nl->next;
4748                 if (!next) {
4749                     break;
4750                 }
4751                 nl = (void*)nl + next;
4752             }
4753             break;
4754         }
4755         case DM_DEV_WAIT:
4756         case DM_TABLE_STATUS:
4757         {
4758             struct dm_target_spec *spec = (void*)host_dm + host_dm->data_start;
4759             void *cur_data = argptr;
4760             const argtype arg_type[] = { MK_STRUCT(STRUCT_dm_target_spec) };
4761             int spec_size = thunk_type_size(arg_type, 0);
4762             int i;
4763 
4764             for (i = 0; i < host_dm->target_count; i++) {
4765                 uint32_t next = spec->next;
4766                 int slen = strlen((char*)&spec[1]) + 1;
4767                 spec->next = (cur_data - argptr) + spec_size + slen;
4768                 if (guest_data_size < spec->next) {
4769                     host_dm->flags |= DM_BUFFER_FULL_FLAG;
4770                     break;
4771                 }
4772                 thunk_convert(cur_data, spec, arg_type, THUNK_TARGET);
4773                 strcpy(cur_data + spec_size, (char*)&spec[1]);
4774                 cur_data = argptr + spec->next;
4775                 spec = (void*)host_dm + host_dm->data_start + next;
4776             }
4777             break;
4778         }
4779         case DM_TABLE_DEPS:
4780         {
4781             void *hdata = (void*)host_dm + host_dm->data_start;
4782             int count = *(uint32_t*)hdata;
4783             uint64_t *hdev = hdata + 8;
4784             uint64_t *gdev = argptr + 8;
4785             int i;
4786 
4787             *(uint32_t*)argptr = tswap32(count);
4788             for (i = 0; i < count; i++) {
4789                 *gdev = tswap64(*hdev);
4790                 gdev++;
4791                 hdev++;
4792             }
4793             break;
4794         }
4795         case DM_LIST_VERSIONS:
4796         {
4797             struct dm_target_versions *vers = (void*)host_dm + host_dm->data_start;
4798             uint32_t remaining_data = guest_data_size;
4799             void *cur_data = argptr;
4800             const argtype arg_type[] = { MK_STRUCT(STRUCT_dm_target_versions) };
4801             int vers_size = thunk_type_size(arg_type, 0);
4802 
4803             while (1) {
4804                 uint32_t next = vers->next;
4805                 if (next) {
4806                     vers->next = vers_size + (strlen(vers->name) + 1);
4807                 }
4808                 if (remaining_data < vers->next) {
4809                     host_dm->flags |= DM_BUFFER_FULL_FLAG;
4810                     break;
4811                 }
4812                 thunk_convert(cur_data, vers, arg_type, THUNK_TARGET);
4813                 strcpy(cur_data + vers_size, vers->name);
4814                 cur_data += vers->next;
4815                 remaining_data -= vers->next;
4816                 if (!next) {
4817                     break;
4818                 }
4819                 vers = (void*)vers + next;
4820             }
4821             break;
4822         }
4823         default:
4824             unlock_user(argptr, guest_data, 0);
4825             ret = -TARGET_EINVAL;
4826             goto out;
4827         }
4828         unlock_user(argptr, guest_data, guest_data_size);
4829 
4830         argptr = lock_user(VERIFY_WRITE, arg, target_size, 0);
4831         if (!argptr) {
4832             ret = -TARGET_EFAULT;
4833             goto out;
4834         }
4835         thunk_convert(argptr, buf_temp, arg_type, THUNK_TARGET);
4836         unlock_user(argptr, arg, target_size);
4837     }
4838 out:
4839     g_free(big_buf);
4840     return ret;
4841 }
4842 
4843 static abi_long do_ioctl_blkpg(const IOCTLEntry *ie, uint8_t *buf_temp, int fd,
4844                                int cmd, abi_long arg)
4845 {
4846     void *argptr;
4847     int target_size;
4848     const argtype *arg_type = ie->arg_type;
4849     const argtype part_arg_type[] = { MK_STRUCT(STRUCT_blkpg_partition) };
4850     abi_long ret;
4851 
4852     struct blkpg_ioctl_arg *host_blkpg = (void*)buf_temp;
4853     struct blkpg_partition host_part;
4854 
4855     /* Read and convert blkpg */
4856     arg_type++;
4857     target_size = thunk_type_size(arg_type, 0);
4858     argptr = lock_user(VERIFY_READ, arg, target_size, 1);
4859     if (!argptr) {
4860         ret = -TARGET_EFAULT;
4861         goto out;
4862     }
4863     thunk_convert(buf_temp, argptr, arg_type, THUNK_HOST);
4864     unlock_user(argptr, arg, 0);
4865 
4866     switch (host_blkpg->op) {
4867     case BLKPG_ADD_PARTITION:
4868     case BLKPG_DEL_PARTITION:
4869         /* payload is struct blkpg_partition */
4870         break;
4871     default:
4872         /* Unknown opcode */
4873         ret = -TARGET_EINVAL;
4874         goto out;
4875     }
4876 
4877     /* Read and convert blkpg->data */
4878     arg = (abi_long)(uintptr_t)host_blkpg->data;
4879     target_size = thunk_type_size(part_arg_type, 0);
4880     argptr = lock_user(VERIFY_READ, arg, target_size, 1);
4881     if (!argptr) {
4882         ret = -TARGET_EFAULT;
4883         goto out;
4884     }
4885     thunk_convert(&host_part, argptr, part_arg_type, THUNK_HOST);
4886     unlock_user(argptr, arg, 0);
4887 
4888     /* Swizzle the data pointer to our local copy and call! */
4889     host_blkpg->data = &host_part;
4890     ret = get_errno(safe_ioctl(fd, ie->host_cmd, host_blkpg));
4891 
4892 out:
4893     return ret;
4894 }
4895 
4896 static abi_long do_ioctl_rt(const IOCTLEntry *ie, uint8_t *buf_temp,
4897                                 int fd, int cmd, abi_long arg)
4898 {
4899     const argtype *arg_type = ie->arg_type;
4900     const StructEntry *se;
4901     const argtype *field_types;
4902     const int *dst_offsets, *src_offsets;
4903     int target_size;
4904     void *argptr;
4905     abi_ulong *target_rt_dev_ptr = NULL;
4906     unsigned long *host_rt_dev_ptr = NULL;
4907     abi_long ret;
4908     int i;
4909 
4910     assert(ie->access == IOC_W);
4911     assert(*arg_type == TYPE_PTR);
4912     arg_type++;
4913     assert(*arg_type == TYPE_STRUCT);
4914     target_size = thunk_type_size(arg_type, 0);
4915     argptr = lock_user(VERIFY_READ, arg, target_size, 1);
4916     if (!argptr) {
4917         return -TARGET_EFAULT;
4918     }
4919     arg_type++;
4920     assert(*arg_type == (int)STRUCT_rtentry);
4921     se = struct_entries + *arg_type++;
4922     assert(se->convert[0] == NULL);
4923     /* convert struct here to be able to catch rt_dev string */
4924     field_types = se->field_types;
4925     dst_offsets = se->field_offsets[THUNK_HOST];
4926     src_offsets = se->field_offsets[THUNK_TARGET];
4927     for (i = 0; i < se->nb_fields; i++) {
4928         if (dst_offsets[i] == offsetof(struct rtentry, rt_dev)) {
4929             assert(*field_types == TYPE_PTRVOID);
4930             target_rt_dev_ptr = (abi_ulong *)(argptr + src_offsets[i]);
4931             host_rt_dev_ptr = (unsigned long *)(buf_temp + dst_offsets[i]);
4932             if (*target_rt_dev_ptr != 0) {
4933                 *host_rt_dev_ptr = (unsigned long)lock_user_string(
4934                                                   tswapal(*target_rt_dev_ptr));
4935                 if (!*host_rt_dev_ptr) {
4936                     unlock_user(argptr, arg, 0);
4937                     return -TARGET_EFAULT;
4938                 }
4939             } else {
4940                 *host_rt_dev_ptr = 0;
4941             }
4942             field_types++;
4943             continue;
4944         }
4945         field_types = thunk_convert(buf_temp + dst_offsets[i],
4946                                     argptr + src_offsets[i],
4947                                     field_types, THUNK_HOST);
4948     }
4949     unlock_user(argptr, arg, 0);
4950 
4951     ret = get_errno(safe_ioctl(fd, ie->host_cmd, buf_temp));
4952 
4953     assert(host_rt_dev_ptr != NULL);
4954     assert(target_rt_dev_ptr != NULL);
4955     if (*host_rt_dev_ptr != 0) {
4956         unlock_user((void *)*host_rt_dev_ptr,
4957                     *target_rt_dev_ptr, 0);
4958     }
4959     return ret;
4960 }
4961 
4962 static abi_long do_ioctl_kdsigaccept(const IOCTLEntry *ie, uint8_t *buf_temp,
4963                                      int fd, int cmd, abi_long arg)
4964 {
4965     int sig = target_to_host_signal(arg);
4966     return get_errno(safe_ioctl(fd, ie->host_cmd, sig));
4967 }
4968 
4969 static abi_long do_ioctl_SIOCGSTAMP(const IOCTLEntry *ie, uint8_t *buf_temp,
4970                                     int fd, int cmd, abi_long arg)
4971 {
4972     struct timeval tv;
4973     abi_long ret;
4974 
4975     ret = get_errno(safe_ioctl(fd, SIOCGSTAMP, &tv));
4976     if (is_error(ret)) {
4977         return ret;
4978     }
4979 
4980     if (cmd == (int)TARGET_SIOCGSTAMP_OLD) {
4981         if (copy_to_user_timeval(arg, &tv)) {
4982             return -TARGET_EFAULT;
4983         }
4984     } else {
4985         if (copy_to_user_timeval64(arg, &tv)) {
4986             return -TARGET_EFAULT;
4987         }
4988     }
4989 
4990     return ret;
4991 }
4992 
4993 static abi_long do_ioctl_SIOCGSTAMPNS(const IOCTLEntry *ie, uint8_t *buf_temp,
4994                                       int fd, int cmd, abi_long arg)
4995 {
4996     struct timespec ts;
4997     abi_long ret;
4998 
4999     ret = get_errno(safe_ioctl(fd, SIOCGSTAMPNS, &ts));
5000     if (is_error(ret)) {
5001         return ret;
5002     }
5003 
5004     if (cmd == (int)TARGET_SIOCGSTAMPNS_OLD) {
5005         if (host_to_target_timespec(arg, &ts)) {
5006             return -TARGET_EFAULT;
5007         }
5008     } else{
5009         if (host_to_target_timespec64(arg, &ts)) {
5010             return -TARGET_EFAULT;
5011         }
5012     }
5013 
5014     return ret;
5015 }
5016 
5017 #ifdef TIOCGPTPEER
5018 static abi_long do_ioctl_tiocgptpeer(const IOCTLEntry *ie, uint8_t *buf_temp,
5019                                      int fd, int cmd, abi_long arg)
5020 {
5021     int flags = target_to_host_bitmask(arg, fcntl_flags_tbl);
5022     return get_errno(safe_ioctl(fd, ie->host_cmd, flags));
5023 }
5024 #endif
5025 
5026 static IOCTLEntry ioctl_entries[] = {
5027 #define IOCTL(cmd, access, ...) \
5028     { TARGET_ ## cmd, cmd, #cmd, access, 0, {  __VA_ARGS__ } },
5029 #define IOCTL_SPECIAL(cmd, access, dofn, ...)                      \
5030     { TARGET_ ## cmd, cmd, #cmd, access, dofn, {  __VA_ARGS__ } },
5031 #define IOCTL_IGNORE(cmd) \
5032     { TARGET_ ## cmd, 0, #cmd },
5033 #include "ioctls.h"
5034     { 0, 0, },
5035 };
5036 
5037 /* ??? Implement proper locking for ioctls.  */
5038 /* do_ioctl() Must return target values and target errnos. */
5039 static abi_long do_ioctl(int fd, int cmd, abi_long arg)
5040 {
5041     const IOCTLEntry *ie;
5042     const argtype *arg_type;
5043     abi_long ret;
5044     uint8_t buf_temp[MAX_STRUCT_SIZE];
5045     int target_size;
5046     void *argptr;
5047 
5048     ie = ioctl_entries;
5049     for(;;) {
5050         if (ie->target_cmd == 0) {
5051             gemu_log("Unsupported ioctl: cmd=0x%04lx\n", (long)cmd);
5052             return -TARGET_ENOSYS;
5053         }
5054         if (ie->target_cmd == cmd)
5055             break;
5056         ie++;
5057     }
5058     arg_type = ie->arg_type;
5059     if (ie->do_ioctl) {
5060         return ie->do_ioctl(ie, buf_temp, fd, cmd, arg);
5061     } else if (!ie->host_cmd) {
5062         /* Some architectures define BSD ioctls in their headers
5063            that are not implemented in Linux.  */
5064         return -TARGET_ENOSYS;
5065     }
5066 
5067     switch(arg_type[0]) {
5068     case TYPE_NULL:
5069         /* no argument */
5070         ret = get_errno(safe_ioctl(fd, ie->host_cmd));
5071         break;
5072     case TYPE_PTRVOID:
5073     case TYPE_INT:
5074         ret = get_errno(safe_ioctl(fd, ie->host_cmd, arg));
5075         break;
5076     case TYPE_PTR:
5077         arg_type++;
5078         target_size = thunk_type_size(arg_type, 0);
5079         switch(ie->access) {
5080         case IOC_R:
5081             ret = get_errno(safe_ioctl(fd, ie->host_cmd, buf_temp));
5082             if (!is_error(ret)) {
5083                 argptr = lock_user(VERIFY_WRITE, arg, target_size, 0);
5084                 if (!argptr)
5085                     return -TARGET_EFAULT;
5086                 thunk_convert(argptr, buf_temp, arg_type, THUNK_TARGET);
5087                 unlock_user(argptr, arg, target_size);
5088             }
5089             break;
5090         case IOC_W:
5091             argptr = lock_user(VERIFY_READ, arg, target_size, 1);
5092             if (!argptr)
5093                 return -TARGET_EFAULT;
5094             thunk_convert(buf_temp, argptr, arg_type, THUNK_HOST);
5095             unlock_user(argptr, arg, 0);
5096             ret = get_errno(safe_ioctl(fd, ie->host_cmd, buf_temp));
5097             break;
5098         default:
5099         case IOC_RW:
5100             argptr = lock_user(VERIFY_READ, arg, target_size, 1);
5101             if (!argptr)
5102                 return -TARGET_EFAULT;
5103             thunk_convert(buf_temp, argptr, arg_type, THUNK_HOST);
5104             unlock_user(argptr, arg, 0);
5105             ret = get_errno(safe_ioctl(fd, ie->host_cmd, buf_temp));
5106             if (!is_error(ret)) {
5107                 argptr = lock_user(VERIFY_WRITE, arg, target_size, 0);
5108                 if (!argptr)
5109                     return -TARGET_EFAULT;
5110                 thunk_convert(argptr, buf_temp, arg_type, THUNK_TARGET);
5111                 unlock_user(argptr, arg, target_size);
5112             }
5113             break;
5114         }
5115         break;
5116     default:
5117         gemu_log("Unsupported ioctl type: cmd=0x%04lx type=%d\n",
5118                  (long)cmd, arg_type[0]);
5119         ret = -TARGET_ENOSYS;
5120         break;
5121     }
5122     return ret;
5123 }
5124 
5125 static const bitmask_transtbl iflag_tbl[] = {
5126         { TARGET_IGNBRK, TARGET_IGNBRK, IGNBRK, IGNBRK },
5127         { TARGET_BRKINT, TARGET_BRKINT, BRKINT, BRKINT },
5128         { TARGET_IGNPAR, TARGET_IGNPAR, IGNPAR, IGNPAR },
5129         { TARGET_PARMRK, TARGET_PARMRK, PARMRK, PARMRK },
5130         { TARGET_INPCK, TARGET_INPCK, INPCK, INPCK },
5131         { TARGET_ISTRIP, TARGET_ISTRIP, ISTRIP, ISTRIP },
5132         { TARGET_INLCR, TARGET_INLCR, INLCR, INLCR },
5133         { TARGET_IGNCR, TARGET_IGNCR, IGNCR, IGNCR },
5134         { TARGET_ICRNL, TARGET_ICRNL, ICRNL, ICRNL },
5135         { TARGET_IUCLC, TARGET_IUCLC, IUCLC, IUCLC },
5136         { TARGET_IXON, TARGET_IXON, IXON, IXON },
5137         { TARGET_IXANY, TARGET_IXANY, IXANY, IXANY },
5138         { TARGET_IXOFF, TARGET_IXOFF, IXOFF, IXOFF },
5139         { TARGET_IMAXBEL, TARGET_IMAXBEL, IMAXBEL, IMAXBEL },
5140         { 0, 0, 0, 0 }
5141 };
5142 
5143 static const bitmask_transtbl oflag_tbl[] = {
5144 	{ TARGET_OPOST, TARGET_OPOST, OPOST, OPOST },
5145 	{ TARGET_OLCUC, TARGET_OLCUC, OLCUC, OLCUC },
5146 	{ TARGET_ONLCR, TARGET_ONLCR, ONLCR, ONLCR },
5147 	{ TARGET_OCRNL, TARGET_OCRNL, OCRNL, OCRNL },
5148 	{ TARGET_ONOCR, TARGET_ONOCR, ONOCR, ONOCR },
5149 	{ TARGET_ONLRET, TARGET_ONLRET, ONLRET, ONLRET },
5150 	{ TARGET_OFILL, TARGET_OFILL, OFILL, OFILL },
5151 	{ TARGET_OFDEL, TARGET_OFDEL, OFDEL, OFDEL },
5152 	{ TARGET_NLDLY, TARGET_NL0, NLDLY, NL0 },
5153 	{ TARGET_NLDLY, TARGET_NL1, NLDLY, NL1 },
5154 	{ TARGET_CRDLY, TARGET_CR0, CRDLY, CR0 },
5155 	{ TARGET_CRDLY, TARGET_CR1, CRDLY, CR1 },
5156 	{ TARGET_CRDLY, TARGET_CR2, CRDLY, CR2 },
5157 	{ TARGET_CRDLY, TARGET_CR3, CRDLY, CR3 },
5158 	{ TARGET_TABDLY, TARGET_TAB0, TABDLY, TAB0 },
5159 	{ TARGET_TABDLY, TARGET_TAB1, TABDLY, TAB1 },
5160 	{ TARGET_TABDLY, TARGET_TAB2, TABDLY, TAB2 },
5161 	{ TARGET_TABDLY, TARGET_TAB3, TABDLY, TAB3 },
5162 	{ TARGET_BSDLY, TARGET_BS0, BSDLY, BS0 },
5163 	{ TARGET_BSDLY, TARGET_BS1, BSDLY, BS1 },
5164 	{ TARGET_VTDLY, TARGET_VT0, VTDLY, VT0 },
5165 	{ TARGET_VTDLY, TARGET_VT1, VTDLY, VT1 },
5166 	{ TARGET_FFDLY, TARGET_FF0, FFDLY, FF0 },
5167 	{ TARGET_FFDLY, TARGET_FF1, FFDLY, FF1 },
5168 	{ 0, 0, 0, 0 }
5169 };
5170 
5171 static const bitmask_transtbl cflag_tbl[] = {
5172 	{ TARGET_CBAUD, TARGET_B0, CBAUD, B0 },
5173 	{ TARGET_CBAUD, TARGET_B50, CBAUD, B50 },
5174 	{ TARGET_CBAUD, TARGET_B75, CBAUD, B75 },
5175 	{ TARGET_CBAUD, TARGET_B110, CBAUD, B110 },
5176 	{ TARGET_CBAUD, TARGET_B134, CBAUD, B134 },
5177 	{ TARGET_CBAUD, TARGET_B150, CBAUD, B150 },
5178 	{ TARGET_CBAUD, TARGET_B200, CBAUD, B200 },
5179 	{ TARGET_CBAUD, TARGET_B300, CBAUD, B300 },
5180 	{ TARGET_CBAUD, TARGET_B600, CBAUD, B600 },
5181 	{ TARGET_CBAUD, TARGET_B1200, CBAUD, B1200 },
5182 	{ TARGET_CBAUD, TARGET_B1800, CBAUD, B1800 },
5183 	{ TARGET_CBAUD, TARGET_B2400, CBAUD, B2400 },
5184 	{ TARGET_CBAUD, TARGET_B4800, CBAUD, B4800 },
5185 	{ TARGET_CBAUD, TARGET_B9600, CBAUD, B9600 },
5186 	{ TARGET_CBAUD, TARGET_B19200, CBAUD, B19200 },
5187 	{ TARGET_CBAUD, TARGET_B38400, CBAUD, B38400 },
5188 	{ TARGET_CBAUD, TARGET_B57600, CBAUD, B57600 },
5189 	{ TARGET_CBAUD, TARGET_B115200, CBAUD, B115200 },
5190 	{ TARGET_CBAUD, TARGET_B230400, CBAUD, B230400 },
5191 	{ TARGET_CBAUD, TARGET_B460800, CBAUD, B460800 },
5192 	{ TARGET_CSIZE, TARGET_CS5, CSIZE, CS5 },
5193 	{ TARGET_CSIZE, TARGET_CS6, CSIZE, CS6 },
5194 	{ TARGET_CSIZE, TARGET_CS7, CSIZE, CS7 },
5195 	{ TARGET_CSIZE, TARGET_CS8, CSIZE, CS8 },
5196 	{ TARGET_CSTOPB, TARGET_CSTOPB, CSTOPB, CSTOPB },
5197 	{ TARGET_CREAD, TARGET_CREAD, CREAD, CREAD },
5198 	{ TARGET_PARENB, TARGET_PARENB, PARENB, PARENB },
5199 	{ TARGET_PARODD, TARGET_PARODD, PARODD, PARODD },
5200 	{ TARGET_HUPCL, TARGET_HUPCL, HUPCL, HUPCL },
5201 	{ TARGET_CLOCAL, TARGET_CLOCAL, CLOCAL, CLOCAL },
5202 	{ TARGET_CRTSCTS, TARGET_CRTSCTS, CRTSCTS, CRTSCTS },
5203 	{ 0, 0, 0, 0 }
5204 };
5205 
5206 static const bitmask_transtbl lflag_tbl[] = {
5207 	{ TARGET_ISIG, TARGET_ISIG, ISIG, ISIG },
5208 	{ TARGET_ICANON, TARGET_ICANON, ICANON, ICANON },
5209 	{ TARGET_XCASE, TARGET_XCASE, XCASE, XCASE },
5210 	{ TARGET_ECHO, TARGET_ECHO, ECHO, ECHO },
5211 	{ TARGET_ECHOE, TARGET_ECHOE, ECHOE, ECHOE },
5212 	{ TARGET_ECHOK, TARGET_ECHOK, ECHOK, ECHOK },
5213 	{ TARGET_ECHONL, TARGET_ECHONL, ECHONL, ECHONL },
5214 	{ TARGET_NOFLSH, TARGET_NOFLSH, NOFLSH, NOFLSH },
5215 	{ TARGET_TOSTOP, TARGET_TOSTOP, TOSTOP, TOSTOP },
5216 	{ TARGET_ECHOCTL, TARGET_ECHOCTL, ECHOCTL, ECHOCTL },
5217 	{ TARGET_ECHOPRT, TARGET_ECHOPRT, ECHOPRT, ECHOPRT },
5218 	{ TARGET_ECHOKE, TARGET_ECHOKE, ECHOKE, ECHOKE },
5219 	{ TARGET_FLUSHO, TARGET_FLUSHO, FLUSHO, FLUSHO },
5220 	{ TARGET_PENDIN, TARGET_PENDIN, PENDIN, PENDIN },
5221 	{ TARGET_IEXTEN, TARGET_IEXTEN, IEXTEN, IEXTEN },
5222 	{ 0, 0, 0, 0 }
5223 };
5224 
5225 static void target_to_host_termios (void *dst, const void *src)
5226 {
5227     struct host_termios *host = dst;
5228     const struct target_termios *target = src;
5229 
5230     host->c_iflag =
5231         target_to_host_bitmask(tswap32(target->c_iflag), iflag_tbl);
5232     host->c_oflag =
5233         target_to_host_bitmask(tswap32(target->c_oflag), oflag_tbl);
5234     host->c_cflag =
5235         target_to_host_bitmask(tswap32(target->c_cflag), cflag_tbl);
5236     host->c_lflag =
5237         target_to_host_bitmask(tswap32(target->c_lflag), lflag_tbl);
5238     host->c_line = target->c_line;
5239 
5240     memset(host->c_cc, 0, sizeof(host->c_cc));
5241     host->c_cc[VINTR] = target->c_cc[TARGET_VINTR];
5242     host->c_cc[VQUIT] = target->c_cc[TARGET_VQUIT];
5243     host->c_cc[VERASE] = target->c_cc[TARGET_VERASE];
5244     host->c_cc[VKILL] = target->c_cc[TARGET_VKILL];
5245     host->c_cc[VEOF] = target->c_cc[TARGET_VEOF];
5246     host->c_cc[VTIME] = target->c_cc[TARGET_VTIME];
5247     host->c_cc[VMIN] = target->c_cc[TARGET_VMIN];
5248     host->c_cc[VSWTC] = target->c_cc[TARGET_VSWTC];
5249     host->c_cc[VSTART] = target->c_cc[TARGET_VSTART];
5250     host->c_cc[VSTOP] = target->c_cc[TARGET_VSTOP];
5251     host->c_cc[VSUSP] = target->c_cc[TARGET_VSUSP];
5252     host->c_cc[VEOL] = target->c_cc[TARGET_VEOL];
5253     host->c_cc[VREPRINT] = target->c_cc[TARGET_VREPRINT];
5254     host->c_cc[VDISCARD] = target->c_cc[TARGET_VDISCARD];
5255     host->c_cc[VWERASE] = target->c_cc[TARGET_VWERASE];
5256     host->c_cc[VLNEXT] = target->c_cc[TARGET_VLNEXT];
5257     host->c_cc[VEOL2] = target->c_cc[TARGET_VEOL2];
5258 }
5259 
5260 static void host_to_target_termios (void *dst, const void *src)
5261 {
5262     struct target_termios *target = dst;
5263     const struct host_termios *host = src;
5264 
5265     target->c_iflag =
5266         tswap32(host_to_target_bitmask(host->c_iflag, iflag_tbl));
5267     target->c_oflag =
5268         tswap32(host_to_target_bitmask(host->c_oflag, oflag_tbl));
5269     target->c_cflag =
5270         tswap32(host_to_target_bitmask(host->c_cflag, cflag_tbl));
5271     target->c_lflag =
5272         tswap32(host_to_target_bitmask(host->c_lflag, lflag_tbl));
5273     target->c_line = host->c_line;
5274 
5275     memset(target->c_cc, 0, sizeof(target->c_cc));
5276     target->c_cc[TARGET_VINTR] = host->c_cc[VINTR];
5277     target->c_cc[TARGET_VQUIT] = host->c_cc[VQUIT];
5278     target->c_cc[TARGET_VERASE] = host->c_cc[VERASE];
5279     target->c_cc[TARGET_VKILL] = host->c_cc[VKILL];
5280     target->c_cc[TARGET_VEOF] = host->c_cc[VEOF];
5281     target->c_cc[TARGET_VTIME] = host->c_cc[VTIME];
5282     target->c_cc[TARGET_VMIN] = host->c_cc[VMIN];
5283     target->c_cc[TARGET_VSWTC] = host->c_cc[VSWTC];
5284     target->c_cc[TARGET_VSTART] = host->c_cc[VSTART];
5285     target->c_cc[TARGET_VSTOP] = host->c_cc[VSTOP];
5286     target->c_cc[TARGET_VSUSP] = host->c_cc[VSUSP];
5287     target->c_cc[TARGET_VEOL] = host->c_cc[VEOL];
5288     target->c_cc[TARGET_VREPRINT] = host->c_cc[VREPRINT];
5289     target->c_cc[TARGET_VDISCARD] = host->c_cc[VDISCARD];
5290     target->c_cc[TARGET_VWERASE] = host->c_cc[VWERASE];
5291     target->c_cc[TARGET_VLNEXT] = host->c_cc[VLNEXT];
5292     target->c_cc[TARGET_VEOL2] = host->c_cc[VEOL2];
5293 }
5294 
5295 static const StructEntry struct_termios_def = {
5296     .convert = { host_to_target_termios, target_to_host_termios },
5297     .size = { sizeof(struct target_termios), sizeof(struct host_termios) },
5298     .align = { __alignof__(struct target_termios), __alignof__(struct host_termios) },
5299 };
5300 
5301 static bitmask_transtbl mmap_flags_tbl[] = {
5302     { TARGET_MAP_SHARED, TARGET_MAP_SHARED, MAP_SHARED, MAP_SHARED },
5303     { TARGET_MAP_PRIVATE, TARGET_MAP_PRIVATE, MAP_PRIVATE, MAP_PRIVATE },
5304     { TARGET_MAP_FIXED, TARGET_MAP_FIXED, MAP_FIXED, MAP_FIXED },
5305     { TARGET_MAP_ANONYMOUS, TARGET_MAP_ANONYMOUS,
5306       MAP_ANONYMOUS, MAP_ANONYMOUS },
5307     { TARGET_MAP_GROWSDOWN, TARGET_MAP_GROWSDOWN,
5308       MAP_GROWSDOWN, MAP_GROWSDOWN },
5309     { TARGET_MAP_DENYWRITE, TARGET_MAP_DENYWRITE,
5310       MAP_DENYWRITE, MAP_DENYWRITE },
5311     { TARGET_MAP_EXECUTABLE, TARGET_MAP_EXECUTABLE,
5312       MAP_EXECUTABLE, MAP_EXECUTABLE },
5313     { TARGET_MAP_LOCKED, TARGET_MAP_LOCKED, MAP_LOCKED, MAP_LOCKED },
5314     { TARGET_MAP_NORESERVE, TARGET_MAP_NORESERVE,
5315       MAP_NORESERVE, MAP_NORESERVE },
5316     { TARGET_MAP_HUGETLB, TARGET_MAP_HUGETLB, MAP_HUGETLB, MAP_HUGETLB },
5317     /* MAP_STACK had been ignored by the kernel for quite some time.
5318        Recognize it for the target insofar as we do not want to pass
5319        it through to the host.  */
5320     { TARGET_MAP_STACK, TARGET_MAP_STACK, 0, 0 },
5321     { 0, 0, 0, 0 }
5322 };
5323 
5324 #if defined(TARGET_I386)
5325 
5326 /* NOTE: there is really one LDT for all the threads */
5327 static uint8_t *ldt_table;
5328 
5329 static abi_long read_ldt(abi_ulong ptr, unsigned long bytecount)
5330 {
5331     int size;
5332     void *p;
5333 
5334     if (!ldt_table)
5335         return 0;
5336     size = TARGET_LDT_ENTRIES * TARGET_LDT_ENTRY_SIZE;
5337     if (size > bytecount)
5338         size = bytecount;
5339     p = lock_user(VERIFY_WRITE, ptr, size, 0);
5340     if (!p)
5341         return -TARGET_EFAULT;
5342     /* ??? Should this by byteswapped?  */
5343     memcpy(p, ldt_table, size);
5344     unlock_user(p, ptr, size);
5345     return size;
5346 }
5347 
5348 /* XXX: add locking support */
5349 static abi_long write_ldt(CPUX86State *env,
5350                           abi_ulong ptr, unsigned long bytecount, int oldmode)
5351 {
5352     struct target_modify_ldt_ldt_s ldt_info;
5353     struct target_modify_ldt_ldt_s *target_ldt_info;
5354     int seg_32bit, contents, read_exec_only, limit_in_pages;
5355     int seg_not_present, useable, lm;
5356     uint32_t *lp, entry_1, entry_2;
5357 
5358     if (bytecount != sizeof(ldt_info))
5359         return -TARGET_EINVAL;
5360     if (!lock_user_struct(VERIFY_READ, target_ldt_info, ptr, 1))
5361         return -TARGET_EFAULT;
5362     ldt_info.entry_number = tswap32(target_ldt_info->entry_number);
5363     ldt_info.base_addr = tswapal(target_ldt_info->base_addr);
5364     ldt_info.limit = tswap32(target_ldt_info->limit);
5365     ldt_info.flags = tswap32(target_ldt_info->flags);
5366     unlock_user_struct(target_ldt_info, ptr, 0);
5367 
5368     if (ldt_info.entry_number >= TARGET_LDT_ENTRIES)
5369         return -TARGET_EINVAL;
5370     seg_32bit = ldt_info.flags & 1;
5371     contents = (ldt_info.flags >> 1) & 3;
5372     read_exec_only = (ldt_info.flags >> 3) & 1;
5373     limit_in_pages = (ldt_info.flags >> 4) & 1;
5374     seg_not_present = (ldt_info.flags >> 5) & 1;
5375     useable = (ldt_info.flags >> 6) & 1;
5376 #ifdef TARGET_ABI32
5377     lm = 0;
5378 #else
5379     lm = (ldt_info.flags >> 7) & 1;
5380 #endif
5381     if (contents == 3) {
5382         if (oldmode)
5383             return -TARGET_EINVAL;
5384         if (seg_not_present == 0)
5385             return -TARGET_EINVAL;
5386     }
5387     /* allocate the LDT */
5388     if (!ldt_table) {
5389         env->ldt.base = target_mmap(0,
5390                                     TARGET_LDT_ENTRIES * TARGET_LDT_ENTRY_SIZE,
5391                                     PROT_READ|PROT_WRITE,
5392                                     MAP_ANONYMOUS|MAP_PRIVATE, -1, 0);
5393         if (env->ldt.base == -1)
5394             return -TARGET_ENOMEM;
5395         memset(g2h(env->ldt.base), 0,
5396                TARGET_LDT_ENTRIES * TARGET_LDT_ENTRY_SIZE);
5397         env->ldt.limit = 0xffff;
5398         ldt_table = g2h(env->ldt.base);
5399     }
5400 
5401     /* NOTE: same code as Linux kernel */
5402     /* Allow LDTs to be cleared by the user. */
5403     if (ldt_info.base_addr == 0 && ldt_info.limit == 0) {
5404         if (oldmode ||
5405             (contents == 0		&&
5406              read_exec_only == 1	&&
5407              seg_32bit == 0		&&
5408              limit_in_pages == 0	&&
5409              seg_not_present == 1	&&
5410              useable == 0 )) {
5411             entry_1 = 0;
5412             entry_2 = 0;
5413             goto install;
5414         }
5415     }
5416 
5417     entry_1 = ((ldt_info.base_addr & 0x0000ffff) << 16) |
5418         (ldt_info.limit & 0x0ffff);
5419     entry_2 = (ldt_info.base_addr & 0xff000000) |
5420         ((ldt_info.base_addr & 0x00ff0000) >> 16) |
5421         (ldt_info.limit & 0xf0000) |
5422         ((read_exec_only ^ 1) << 9) |
5423         (contents << 10) |
5424         ((seg_not_present ^ 1) << 15) |
5425         (seg_32bit << 22) |
5426         (limit_in_pages << 23) |
5427         (lm << 21) |
5428         0x7000;
5429     if (!oldmode)
5430         entry_2 |= (useable << 20);
5431 
5432     /* Install the new entry ...  */
5433 install:
5434     lp = (uint32_t *)(ldt_table + (ldt_info.entry_number << 3));
5435     lp[0] = tswap32(entry_1);
5436     lp[1] = tswap32(entry_2);
5437     return 0;
5438 }
5439 
5440 /* specific and weird i386 syscalls */
5441 static abi_long do_modify_ldt(CPUX86State *env, int func, abi_ulong ptr,
5442                               unsigned long bytecount)
5443 {
5444     abi_long ret;
5445 
5446     switch (func) {
5447     case 0:
5448         ret = read_ldt(ptr, bytecount);
5449         break;
5450     case 1:
5451         ret = write_ldt(env, ptr, bytecount, 1);
5452         break;
5453     case 0x11:
5454         ret = write_ldt(env, ptr, bytecount, 0);
5455         break;
5456     default:
5457         ret = -TARGET_ENOSYS;
5458         break;
5459     }
5460     return ret;
5461 }
5462 
5463 #if defined(TARGET_I386) && defined(TARGET_ABI32)
5464 abi_long do_set_thread_area(CPUX86State *env, abi_ulong ptr)
5465 {
5466     uint64_t *gdt_table = g2h(env->gdt.base);
5467     struct target_modify_ldt_ldt_s ldt_info;
5468     struct target_modify_ldt_ldt_s *target_ldt_info;
5469     int seg_32bit, contents, read_exec_only, limit_in_pages;
5470     int seg_not_present, useable, lm;
5471     uint32_t *lp, entry_1, entry_2;
5472     int i;
5473 
5474     lock_user_struct(VERIFY_WRITE, target_ldt_info, ptr, 1);
5475     if (!target_ldt_info)
5476         return -TARGET_EFAULT;
5477     ldt_info.entry_number = tswap32(target_ldt_info->entry_number);
5478     ldt_info.base_addr = tswapal(target_ldt_info->base_addr);
5479     ldt_info.limit = tswap32(target_ldt_info->limit);
5480     ldt_info.flags = tswap32(target_ldt_info->flags);
5481     if (ldt_info.entry_number == -1) {
5482         for (i=TARGET_GDT_ENTRY_TLS_MIN; i<=TARGET_GDT_ENTRY_TLS_MAX; i++) {
5483             if (gdt_table[i] == 0) {
5484                 ldt_info.entry_number = i;
5485                 target_ldt_info->entry_number = tswap32(i);
5486                 break;
5487             }
5488         }
5489     }
5490     unlock_user_struct(target_ldt_info, ptr, 1);
5491 
5492     if (ldt_info.entry_number < TARGET_GDT_ENTRY_TLS_MIN ||
5493         ldt_info.entry_number > TARGET_GDT_ENTRY_TLS_MAX)
5494            return -TARGET_EINVAL;
5495     seg_32bit = ldt_info.flags & 1;
5496     contents = (ldt_info.flags >> 1) & 3;
5497     read_exec_only = (ldt_info.flags >> 3) & 1;
5498     limit_in_pages = (ldt_info.flags >> 4) & 1;
5499     seg_not_present = (ldt_info.flags >> 5) & 1;
5500     useable = (ldt_info.flags >> 6) & 1;
5501 #ifdef TARGET_ABI32
5502     lm = 0;
5503 #else
5504     lm = (ldt_info.flags >> 7) & 1;
5505 #endif
5506 
5507     if (contents == 3) {
5508         if (seg_not_present == 0)
5509             return -TARGET_EINVAL;
5510     }
5511 
5512     /* NOTE: same code as Linux kernel */
5513     /* Allow LDTs to be cleared by the user. */
5514     if (ldt_info.base_addr == 0 && ldt_info.limit == 0) {
5515         if ((contents == 0             &&
5516              read_exec_only == 1       &&
5517              seg_32bit == 0            &&
5518              limit_in_pages == 0       &&
5519              seg_not_present == 1      &&
5520              useable == 0 )) {
5521             entry_1 = 0;
5522             entry_2 = 0;
5523             goto install;
5524         }
5525     }
5526 
5527     entry_1 = ((ldt_info.base_addr & 0x0000ffff) << 16) |
5528         (ldt_info.limit & 0x0ffff);
5529     entry_2 = (ldt_info.base_addr & 0xff000000) |
5530         ((ldt_info.base_addr & 0x00ff0000) >> 16) |
5531         (ldt_info.limit & 0xf0000) |
5532         ((read_exec_only ^ 1) << 9) |
5533         (contents << 10) |
5534         ((seg_not_present ^ 1) << 15) |
5535         (seg_32bit << 22) |
5536         (limit_in_pages << 23) |
5537         (useable << 20) |
5538         (lm << 21) |
5539         0x7000;
5540 
5541     /* Install the new entry ...  */
5542 install:
5543     lp = (uint32_t *)(gdt_table + ldt_info.entry_number);
5544     lp[0] = tswap32(entry_1);
5545     lp[1] = tswap32(entry_2);
5546     return 0;
5547 }
5548 
5549 static abi_long do_get_thread_area(CPUX86State *env, abi_ulong ptr)
5550 {
5551     struct target_modify_ldt_ldt_s *target_ldt_info;
5552     uint64_t *gdt_table = g2h(env->gdt.base);
5553     uint32_t base_addr, limit, flags;
5554     int seg_32bit, contents, read_exec_only, limit_in_pages, idx;
5555     int seg_not_present, useable, lm;
5556     uint32_t *lp, entry_1, entry_2;
5557 
5558     lock_user_struct(VERIFY_WRITE, target_ldt_info, ptr, 1);
5559     if (!target_ldt_info)
5560         return -TARGET_EFAULT;
5561     idx = tswap32(target_ldt_info->entry_number);
5562     if (idx < TARGET_GDT_ENTRY_TLS_MIN ||
5563         idx > TARGET_GDT_ENTRY_TLS_MAX) {
5564         unlock_user_struct(target_ldt_info, ptr, 1);
5565         return -TARGET_EINVAL;
5566     }
5567     lp = (uint32_t *)(gdt_table + idx);
5568     entry_1 = tswap32(lp[0]);
5569     entry_2 = tswap32(lp[1]);
5570 
5571     read_exec_only = ((entry_2 >> 9) & 1) ^ 1;
5572     contents = (entry_2 >> 10) & 3;
5573     seg_not_present = ((entry_2 >> 15) & 1) ^ 1;
5574     seg_32bit = (entry_2 >> 22) & 1;
5575     limit_in_pages = (entry_2 >> 23) & 1;
5576     useable = (entry_2 >> 20) & 1;
5577 #ifdef TARGET_ABI32
5578     lm = 0;
5579 #else
5580     lm = (entry_2 >> 21) & 1;
5581 #endif
5582     flags = (seg_32bit << 0) | (contents << 1) |
5583         (read_exec_only << 3) | (limit_in_pages << 4) |
5584         (seg_not_present << 5) | (useable << 6) | (lm << 7);
5585     limit = (entry_1 & 0xffff) | (entry_2  & 0xf0000);
5586     base_addr = (entry_1 >> 16) |
5587         (entry_2 & 0xff000000) |
5588         ((entry_2 & 0xff) << 16);
5589     target_ldt_info->base_addr = tswapal(base_addr);
5590     target_ldt_info->limit = tswap32(limit);
5591     target_ldt_info->flags = tswap32(flags);
5592     unlock_user_struct(target_ldt_info, ptr, 1);
5593     return 0;
5594 }
5595 #endif /* TARGET_I386 && TARGET_ABI32 */
5596 
5597 #ifndef TARGET_ABI32
5598 abi_long do_arch_prctl(CPUX86State *env, int code, abi_ulong addr)
5599 {
5600     abi_long ret = 0;
5601     abi_ulong val;
5602     int idx;
5603 
5604     switch(code) {
5605     case TARGET_ARCH_SET_GS:
5606     case TARGET_ARCH_SET_FS:
5607         if (code == TARGET_ARCH_SET_GS)
5608             idx = R_GS;
5609         else
5610             idx = R_FS;
5611         cpu_x86_load_seg(env, idx, 0);
5612         env->segs[idx].base = addr;
5613         break;
5614     case TARGET_ARCH_GET_GS:
5615     case TARGET_ARCH_GET_FS:
5616         if (code == TARGET_ARCH_GET_GS)
5617             idx = R_GS;
5618         else
5619             idx = R_FS;
5620         val = env->segs[idx].base;
5621         if (put_user(val, addr, abi_ulong))
5622             ret = -TARGET_EFAULT;
5623         break;
5624     default:
5625         ret = -TARGET_EINVAL;
5626         break;
5627     }
5628     return ret;
5629 }
5630 #endif
5631 
5632 #endif /* defined(TARGET_I386) */
5633 
5634 #define NEW_STACK_SIZE 0x40000
5635 
5636 
5637 static pthread_mutex_t clone_lock = PTHREAD_MUTEX_INITIALIZER;
5638 typedef struct {
5639     CPUArchState *env;
5640     pthread_mutex_t mutex;
5641     pthread_cond_t cond;
5642     pthread_t thread;
5643     uint32_t tid;
5644     abi_ulong child_tidptr;
5645     abi_ulong parent_tidptr;
5646     sigset_t sigmask;
5647 } new_thread_info;
5648 
5649 static void *clone_func(void *arg)
5650 {
5651     new_thread_info *info = arg;
5652     CPUArchState *env;
5653     CPUState *cpu;
5654     TaskState *ts;
5655 
5656     rcu_register_thread();
5657     tcg_register_thread();
5658     env = info->env;
5659     cpu = env_cpu(env);
5660     thread_cpu = cpu;
5661     ts = (TaskState *)cpu->opaque;
5662     info->tid = sys_gettid();
5663     task_settid(ts);
5664     if (info->child_tidptr)
5665         put_user_u32(info->tid, info->child_tidptr);
5666     if (info->parent_tidptr)
5667         put_user_u32(info->tid, info->parent_tidptr);
5668     qemu_guest_random_seed_thread_part2(cpu->random_seed);
5669     /* Enable signals.  */
5670     sigprocmask(SIG_SETMASK, &info->sigmask, NULL);
5671     /* Signal to the parent that we're ready.  */
5672     pthread_mutex_lock(&info->mutex);
5673     pthread_cond_broadcast(&info->cond);
5674     pthread_mutex_unlock(&info->mutex);
5675     /* Wait until the parent has finished initializing the tls state.  */
5676     pthread_mutex_lock(&clone_lock);
5677     pthread_mutex_unlock(&clone_lock);
5678     cpu_loop(env);
5679     /* never exits */
5680     return NULL;
5681 }
5682 
5683 /* do_fork() Must return host values and target errnos (unlike most
5684    do_*() functions). */
5685 static int do_fork(CPUArchState *env, unsigned int flags, abi_ulong newsp,
5686                    abi_ulong parent_tidptr, target_ulong newtls,
5687                    abi_ulong child_tidptr)
5688 {
5689     CPUState *cpu = env_cpu(env);
5690     int ret;
5691     TaskState *ts;
5692     CPUState *new_cpu;
5693     CPUArchState *new_env;
5694     sigset_t sigmask;
5695 
5696     flags &= ~CLONE_IGNORED_FLAGS;
5697 
5698     /* Emulate vfork() with fork() */
5699     if (flags & CLONE_VFORK)
5700         flags &= ~(CLONE_VFORK | CLONE_VM);
5701 
5702     if (flags & CLONE_VM) {
5703         TaskState *parent_ts = (TaskState *)cpu->opaque;
5704         new_thread_info info;
5705         pthread_attr_t attr;
5706 
5707         if (((flags & CLONE_THREAD_FLAGS) != CLONE_THREAD_FLAGS) ||
5708             (flags & CLONE_INVALID_THREAD_FLAGS)) {
5709             return -TARGET_EINVAL;
5710         }
5711 
5712         ts = g_new0(TaskState, 1);
5713         init_task_state(ts);
5714 
5715         /* Grab a mutex so that thread setup appears atomic.  */
5716         pthread_mutex_lock(&clone_lock);
5717 
5718         /* we create a new CPU instance. */
5719         new_env = cpu_copy(env);
5720         /* Init regs that differ from the parent.  */
5721         cpu_clone_regs(new_env, newsp);
5722         new_cpu = env_cpu(new_env);
5723         new_cpu->opaque = ts;
5724         ts->bprm = parent_ts->bprm;
5725         ts->info = parent_ts->info;
5726         ts->signal_mask = parent_ts->signal_mask;
5727 
5728         if (flags & CLONE_CHILD_CLEARTID) {
5729             ts->child_tidptr = child_tidptr;
5730         }
5731 
5732         if (flags & CLONE_SETTLS) {
5733             cpu_set_tls (new_env, newtls);
5734         }
5735 
5736         memset(&info, 0, sizeof(info));
5737         pthread_mutex_init(&info.mutex, NULL);
5738         pthread_mutex_lock(&info.mutex);
5739         pthread_cond_init(&info.cond, NULL);
5740         info.env = new_env;
5741         if (flags & CLONE_CHILD_SETTID) {
5742             info.child_tidptr = child_tidptr;
5743         }
5744         if (flags & CLONE_PARENT_SETTID) {
5745             info.parent_tidptr = parent_tidptr;
5746         }
5747 
5748         ret = pthread_attr_init(&attr);
5749         ret = pthread_attr_setstacksize(&attr, NEW_STACK_SIZE);
5750         ret = pthread_attr_setdetachstate(&attr, PTHREAD_CREATE_DETACHED);
5751         /* It is not safe to deliver signals until the child has finished
5752            initializing, so temporarily block all signals.  */
5753         sigfillset(&sigmask);
5754         sigprocmask(SIG_BLOCK, &sigmask, &info.sigmask);
5755         cpu->random_seed = qemu_guest_random_seed_thread_part1();
5756 
5757         /* If this is our first additional thread, we need to ensure we
5758          * generate code for parallel execution and flush old translations.
5759          */
5760         if (!parallel_cpus) {
5761             parallel_cpus = true;
5762             tb_flush(cpu);
5763         }
5764 
5765         ret = pthread_create(&info.thread, &attr, clone_func, &info);
5766         /* TODO: Free new CPU state if thread creation failed.  */
5767 
5768         sigprocmask(SIG_SETMASK, &info.sigmask, NULL);
5769         pthread_attr_destroy(&attr);
5770         if (ret == 0) {
5771             /* Wait for the child to initialize.  */
5772             pthread_cond_wait(&info.cond, &info.mutex);
5773             ret = info.tid;
5774         } else {
5775             ret = -1;
5776         }
5777         pthread_mutex_unlock(&info.mutex);
5778         pthread_cond_destroy(&info.cond);
5779         pthread_mutex_destroy(&info.mutex);
5780         pthread_mutex_unlock(&clone_lock);
5781     } else {
5782         /* if no CLONE_VM, we consider it is a fork */
5783         if (flags & CLONE_INVALID_FORK_FLAGS) {
5784             return -TARGET_EINVAL;
5785         }
5786 
5787         /* We can't support custom termination signals */
5788         if ((flags & CSIGNAL) != TARGET_SIGCHLD) {
5789             return -TARGET_EINVAL;
5790         }
5791 
5792         if (block_signals()) {
5793             return -TARGET_ERESTARTSYS;
5794         }
5795 
5796         fork_start();
5797         ret = fork();
5798         if (ret == 0) {
5799             /* Child Process.  */
5800             cpu_clone_regs(env, newsp);
5801             fork_end(1);
5802             /* There is a race condition here.  The parent process could
5803                theoretically read the TID in the child process before the child
5804                tid is set.  This would require using either ptrace
5805                (not implemented) or having *_tidptr to point at a shared memory
5806                mapping.  We can't repeat the spinlock hack used above because
5807                the child process gets its own copy of the lock.  */
5808             if (flags & CLONE_CHILD_SETTID)
5809                 put_user_u32(sys_gettid(), child_tidptr);
5810             if (flags & CLONE_PARENT_SETTID)
5811                 put_user_u32(sys_gettid(), parent_tidptr);
5812             ts = (TaskState *)cpu->opaque;
5813             if (flags & CLONE_SETTLS)
5814                 cpu_set_tls (env, newtls);
5815             if (flags & CLONE_CHILD_CLEARTID)
5816                 ts->child_tidptr = child_tidptr;
5817         } else {
5818             fork_end(0);
5819         }
5820     }
5821     return ret;
5822 }
5823 
5824 /* warning : doesn't handle linux specific flags... */
5825 static int target_to_host_fcntl_cmd(int cmd)
5826 {
5827     int ret;
5828 
5829     switch(cmd) {
5830     case TARGET_F_DUPFD:
5831     case TARGET_F_GETFD:
5832     case TARGET_F_SETFD:
5833     case TARGET_F_GETFL:
5834     case TARGET_F_SETFL:
5835         ret = cmd;
5836         break;
5837     case TARGET_F_GETLK:
5838         ret = F_GETLK64;
5839         break;
5840     case TARGET_F_SETLK:
5841         ret = F_SETLK64;
5842         break;
5843     case TARGET_F_SETLKW:
5844         ret = F_SETLKW64;
5845         break;
5846     case TARGET_F_GETOWN:
5847         ret = F_GETOWN;
5848         break;
5849     case TARGET_F_SETOWN:
5850         ret = F_SETOWN;
5851         break;
5852     case TARGET_F_GETSIG:
5853         ret = F_GETSIG;
5854         break;
5855     case TARGET_F_SETSIG:
5856         ret = F_SETSIG;
5857         break;
5858 #if TARGET_ABI_BITS == 32
5859     case TARGET_F_GETLK64:
5860         ret = F_GETLK64;
5861         break;
5862     case TARGET_F_SETLK64:
5863         ret = F_SETLK64;
5864         break;
5865     case TARGET_F_SETLKW64:
5866         ret = F_SETLKW64;
5867         break;
5868 #endif
5869     case TARGET_F_SETLEASE:
5870         ret = F_SETLEASE;
5871         break;
5872     case TARGET_F_GETLEASE:
5873         ret = F_GETLEASE;
5874         break;
5875 #ifdef F_DUPFD_CLOEXEC
5876     case TARGET_F_DUPFD_CLOEXEC:
5877         ret = F_DUPFD_CLOEXEC;
5878         break;
5879 #endif
5880     case TARGET_F_NOTIFY:
5881         ret = F_NOTIFY;
5882         break;
5883 #ifdef F_GETOWN_EX
5884     case TARGET_F_GETOWN_EX:
5885         ret = F_GETOWN_EX;
5886         break;
5887 #endif
5888 #ifdef F_SETOWN_EX
5889     case TARGET_F_SETOWN_EX:
5890         ret = F_SETOWN_EX;
5891         break;
5892 #endif
5893 #ifdef F_SETPIPE_SZ
5894     case TARGET_F_SETPIPE_SZ:
5895         ret = F_SETPIPE_SZ;
5896         break;
5897     case TARGET_F_GETPIPE_SZ:
5898         ret = F_GETPIPE_SZ;
5899         break;
5900 #endif
5901     default:
5902         ret = -TARGET_EINVAL;
5903         break;
5904     }
5905 
5906 #if defined(__powerpc64__)
5907     /* On PPC64, glibc headers has the F_*LK* defined to 12, 13 and 14 and
5908      * is not supported by kernel. The glibc fcntl call actually adjusts
5909      * them to 5, 6 and 7 before making the syscall(). Since we make the
5910      * syscall directly, adjust to what is supported by the kernel.
5911      */
5912     if (ret >= F_GETLK64 && ret <= F_SETLKW64) {
5913         ret -= F_GETLK64 - 5;
5914     }
5915 #endif
5916 
5917     return ret;
5918 }
5919 
5920 #define FLOCK_TRANSTBL \
5921     switch (type) { \
5922     TRANSTBL_CONVERT(F_RDLCK); \
5923     TRANSTBL_CONVERT(F_WRLCK); \
5924     TRANSTBL_CONVERT(F_UNLCK); \
5925     TRANSTBL_CONVERT(F_EXLCK); \
5926     TRANSTBL_CONVERT(F_SHLCK); \
5927     }
5928 
5929 static int target_to_host_flock(int type)
5930 {
5931 #define TRANSTBL_CONVERT(a) case TARGET_##a: return a
5932     FLOCK_TRANSTBL
5933 #undef  TRANSTBL_CONVERT
5934     return -TARGET_EINVAL;
5935 }
5936 
5937 static int host_to_target_flock(int type)
5938 {
5939 #define TRANSTBL_CONVERT(a) case a: return TARGET_##a
5940     FLOCK_TRANSTBL
5941 #undef  TRANSTBL_CONVERT
5942     /* if we don't know how to convert the value coming
5943      * from the host we copy to the target field as-is
5944      */
5945     return type;
5946 }
5947 
5948 static inline abi_long copy_from_user_flock(struct flock64 *fl,
5949                                             abi_ulong target_flock_addr)
5950 {
5951     struct target_flock *target_fl;
5952     int l_type;
5953 
5954     if (!lock_user_struct(VERIFY_READ, target_fl, target_flock_addr, 1)) {
5955         return -TARGET_EFAULT;
5956     }
5957 
5958     __get_user(l_type, &target_fl->l_type);
5959     l_type = target_to_host_flock(l_type);
5960     if (l_type < 0) {
5961         return l_type;
5962     }
5963     fl->l_type = l_type;
5964     __get_user(fl->l_whence, &target_fl->l_whence);
5965     __get_user(fl->l_start, &target_fl->l_start);
5966     __get_user(fl->l_len, &target_fl->l_len);
5967     __get_user(fl->l_pid, &target_fl->l_pid);
5968     unlock_user_struct(target_fl, target_flock_addr, 0);
5969     return 0;
5970 }
5971 
5972 static inline abi_long copy_to_user_flock(abi_ulong target_flock_addr,
5973                                           const struct flock64 *fl)
5974 {
5975     struct target_flock *target_fl;
5976     short l_type;
5977 
5978     if (!lock_user_struct(VERIFY_WRITE, target_fl, target_flock_addr, 0)) {
5979         return -TARGET_EFAULT;
5980     }
5981 
5982     l_type = host_to_target_flock(fl->l_type);
5983     __put_user(l_type, &target_fl->l_type);
5984     __put_user(fl->l_whence, &target_fl->l_whence);
5985     __put_user(fl->l_start, &target_fl->l_start);
5986     __put_user(fl->l_len, &target_fl->l_len);
5987     __put_user(fl->l_pid, &target_fl->l_pid);
5988     unlock_user_struct(target_fl, target_flock_addr, 1);
5989     return 0;
5990 }
5991 
5992 typedef abi_long from_flock64_fn(struct flock64 *fl, abi_ulong target_addr);
5993 typedef abi_long to_flock64_fn(abi_ulong target_addr, const struct flock64 *fl);
5994 
5995 #if defined(TARGET_ARM) && TARGET_ABI_BITS == 32
5996 static inline abi_long copy_from_user_oabi_flock64(struct flock64 *fl,
5997                                                    abi_ulong target_flock_addr)
5998 {
5999     struct target_oabi_flock64 *target_fl;
6000     int l_type;
6001 
6002     if (!lock_user_struct(VERIFY_READ, target_fl, target_flock_addr, 1)) {
6003         return -TARGET_EFAULT;
6004     }
6005 
6006     __get_user(l_type, &target_fl->l_type);
6007     l_type = target_to_host_flock(l_type);
6008     if (l_type < 0) {
6009         return l_type;
6010     }
6011     fl->l_type = l_type;
6012     __get_user(fl->l_whence, &target_fl->l_whence);
6013     __get_user(fl->l_start, &target_fl->l_start);
6014     __get_user(fl->l_len, &target_fl->l_len);
6015     __get_user(fl->l_pid, &target_fl->l_pid);
6016     unlock_user_struct(target_fl, target_flock_addr, 0);
6017     return 0;
6018 }
6019 
6020 static inline abi_long copy_to_user_oabi_flock64(abi_ulong target_flock_addr,
6021                                                  const struct flock64 *fl)
6022 {
6023     struct target_oabi_flock64 *target_fl;
6024     short l_type;
6025 
6026     if (!lock_user_struct(VERIFY_WRITE, target_fl, target_flock_addr, 0)) {
6027         return -TARGET_EFAULT;
6028     }
6029 
6030     l_type = host_to_target_flock(fl->l_type);
6031     __put_user(l_type, &target_fl->l_type);
6032     __put_user(fl->l_whence, &target_fl->l_whence);
6033     __put_user(fl->l_start, &target_fl->l_start);
6034     __put_user(fl->l_len, &target_fl->l_len);
6035     __put_user(fl->l_pid, &target_fl->l_pid);
6036     unlock_user_struct(target_fl, target_flock_addr, 1);
6037     return 0;
6038 }
6039 #endif
6040 
6041 static inline abi_long copy_from_user_flock64(struct flock64 *fl,
6042                                               abi_ulong target_flock_addr)
6043 {
6044     struct target_flock64 *target_fl;
6045     int l_type;
6046 
6047     if (!lock_user_struct(VERIFY_READ, target_fl, target_flock_addr, 1)) {
6048         return -TARGET_EFAULT;
6049     }
6050 
6051     __get_user(l_type, &target_fl->l_type);
6052     l_type = target_to_host_flock(l_type);
6053     if (l_type < 0) {
6054         return l_type;
6055     }
6056     fl->l_type = l_type;
6057     __get_user(fl->l_whence, &target_fl->l_whence);
6058     __get_user(fl->l_start, &target_fl->l_start);
6059     __get_user(fl->l_len, &target_fl->l_len);
6060     __get_user(fl->l_pid, &target_fl->l_pid);
6061     unlock_user_struct(target_fl, target_flock_addr, 0);
6062     return 0;
6063 }
6064 
6065 static inline abi_long copy_to_user_flock64(abi_ulong target_flock_addr,
6066                                             const struct flock64 *fl)
6067 {
6068     struct target_flock64 *target_fl;
6069     short l_type;
6070 
6071     if (!lock_user_struct(VERIFY_WRITE, target_fl, target_flock_addr, 0)) {
6072         return -TARGET_EFAULT;
6073     }
6074 
6075     l_type = host_to_target_flock(fl->l_type);
6076     __put_user(l_type, &target_fl->l_type);
6077     __put_user(fl->l_whence, &target_fl->l_whence);
6078     __put_user(fl->l_start, &target_fl->l_start);
6079     __put_user(fl->l_len, &target_fl->l_len);
6080     __put_user(fl->l_pid, &target_fl->l_pid);
6081     unlock_user_struct(target_fl, target_flock_addr, 1);
6082     return 0;
6083 }
6084 
6085 static abi_long do_fcntl(int fd, int cmd, abi_ulong arg)
6086 {
6087     struct flock64 fl64;
6088 #ifdef F_GETOWN_EX
6089     struct f_owner_ex fox;
6090     struct target_f_owner_ex *target_fox;
6091 #endif
6092     abi_long ret;
6093     int host_cmd = target_to_host_fcntl_cmd(cmd);
6094 
6095     if (host_cmd == -TARGET_EINVAL)
6096 	    return host_cmd;
6097 
6098     switch(cmd) {
6099     case TARGET_F_GETLK:
6100         ret = copy_from_user_flock(&fl64, arg);
6101         if (ret) {
6102             return ret;
6103         }
6104         ret = get_errno(safe_fcntl(fd, host_cmd, &fl64));
6105         if (ret == 0) {
6106             ret = copy_to_user_flock(arg, &fl64);
6107         }
6108         break;
6109 
6110     case TARGET_F_SETLK:
6111     case TARGET_F_SETLKW:
6112         ret = copy_from_user_flock(&fl64, arg);
6113         if (ret) {
6114             return ret;
6115         }
6116         ret = get_errno(safe_fcntl(fd, host_cmd, &fl64));
6117         break;
6118 
6119     case TARGET_F_GETLK64:
6120         ret = copy_from_user_flock64(&fl64, arg);
6121         if (ret) {
6122             return ret;
6123         }
6124         ret = get_errno(safe_fcntl(fd, host_cmd, &fl64));
6125         if (ret == 0) {
6126             ret = copy_to_user_flock64(arg, &fl64);
6127         }
6128         break;
6129     case TARGET_F_SETLK64:
6130     case TARGET_F_SETLKW64:
6131         ret = copy_from_user_flock64(&fl64, arg);
6132         if (ret) {
6133             return ret;
6134         }
6135         ret = get_errno(safe_fcntl(fd, host_cmd, &fl64));
6136         break;
6137 
6138     case TARGET_F_GETFL:
6139         ret = get_errno(safe_fcntl(fd, host_cmd, arg));
6140         if (ret >= 0) {
6141             ret = host_to_target_bitmask(ret, fcntl_flags_tbl);
6142         }
6143         break;
6144 
6145     case TARGET_F_SETFL:
6146         ret = get_errno(safe_fcntl(fd, host_cmd,
6147                                    target_to_host_bitmask(arg,
6148                                                           fcntl_flags_tbl)));
6149         break;
6150 
6151 #ifdef F_GETOWN_EX
6152     case TARGET_F_GETOWN_EX:
6153         ret = get_errno(safe_fcntl(fd, host_cmd, &fox));
6154         if (ret >= 0) {
6155             if (!lock_user_struct(VERIFY_WRITE, target_fox, arg, 0))
6156                 return -TARGET_EFAULT;
6157             target_fox->type = tswap32(fox.type);
6158             target_fox->pid = tswap32(fox.pid);
6159             unlock_user_struct(target_fox, arg, 1);
6160         }
6161         break;
6162 #endif
6163 
6164 #ifdef F_SETOWN_EX
6165     case TARGET_F_SETOWN_EX:
6166         if (!lock_user_struct(VERIFY_READ, target_fox, arg, 1))
6167             return -TARGET_EFAULT;
6168         fox.type = tswap32(target_fox->type);
6169         fox.pid = tswap32(target_fox->pid);
6170         unlock_user_struct(target_fox, arg, 0);
6171         ret = get_errno(safe_fcntl(fd, host_cmd, &fox));
6172         break;
6173 #endif
6174 
6175     case TARGET_F_SETOWN:
6176     case TARGET_F_GETOWN:
6177     case TARGET_F_SETSIG:
6178     case TARGET_F_GETSIG:
6179     case TARGET_F_SETLEASE:
6180     case TARGET_F_GETLEASE:
6181     case TARGET_F_SETPIPE_SZ:
6182     case TARGET_F_GETPIPE_SZ:
6183         ret = get_errno(safe_fcntl(fd, host_cmd, arg));
6184         break;
6185 
6186     default:
6187         ret = get_errno(safe_fcntl(fd, cmd, arg));
6188         break;
6189     }
6190     return ret;
6191 }
6192 
6193 #ifdef USE_UID16
6194 
6195 static inline int high2lowuid(int uid)
6196 {
6197     if (uid > 65535)
6198         return 65534;
6199     else
6200         return uid;
6201 }
6202 
6203 static inline int high2lowgid(int gid)
6204 {
6205     if (gid > 65535)
6206         return 65534;
6207     else
6208         return gid;
6209 }
6210 
6211 static inline int low2highuid(int uid)
6212 {
6213     if ((int16_t)uid == -1)
6214         return -1;
6215     else
6216         return uid;
6217 }
6218 
6219 static inline int low2highgid(int gid)
6220 {
6221     if ((int16_t)gid == -1)
6222         return -1;
6223     else
6224         return gid;
6225 }
6226 static inline int tswapid(int id)
6227 {
6228     return tswap16(id);
6229 }
6230 
6231 #define put_user_id(x, gaddr) put_user_u16(x, gaddr)
6232 
6233 #else /* !USE_UID16 */
6234 static inline int high2lowuid(int uid)
6235 {
6236     return uid;
6237 }
6238 static inline int high2lowgid(int gid)
6239 {
6240     return gid;
6241 }
6242 static inline int low2highuid(int uid)
6243 {
6244     return uid;
6245 }
6246 static inline int low2highgid(int gid)
6247 {
6248     return gid;
6249 }
6250 static inline int tswapid(int id)
6251 {
6252     return tswap32(id);
6253 }
6254 
6255 #define put_user_id(x, gaddr) put_user_u32(x, gaddr)
6256 
6257 #endif /* USE_UID16 */
6258 
6259 /* We must do direct syscalls for setting UID/GID, because we want to
6260  * implement the Linux system call semantics of "change only for this thread",
6261  * not the libc/POSIX semantics of "change for all threads in process".
6262  * (See http://ewontfix.com/17/ for more details.)
6263  * We use the 32-bit version of the syscalls if present; if it is not
6264  * then either the host architecture supports 32-bit UIDs natively with
6265  * the standard syscall, or the 16-bit UID is the best we can do.
6266  */
6267 #ifdef __NR_setuid32
6268 #define __NR_sys_setuid __NR_setuid32
6269 #else
6270 #define __NR_sys_setuid __NR_setuid
6271 #endif
6272 #ifdef __NR_setgid32
6273 #define __NR_sys_setgid __NR_setgid32
6274 #else
6275 #define __NR_sys_setgid __NR_setgid
6276 #endif
6277 #ifdef __NR_setresuid32
6278 #define __NR_sys_setresuid __NR_setresuid32
6279 #else
6280 #define __NR_sys_setresuid __NR_setresuid
6281 #endif
6282 #ifdef __NR_setresgid32
6283 #define __NR_sys_setresgid __NR_setresgid32
6284 #else
6285 #define __NR_sys_setresgid __NR_setresgid
6286 #endif
6287 
6288 _syscall1(int, sys_setuid, uid_t, uid)
6289 _syscall1(int, sys_setgid, gid_t, gid)
6290 _syscall3(int, sys_setresuid, uid_t, ruid, uid_t, euid, uid_t, suid)
6291 _syscall3(int, sys_setresgid, gid_t, rgid, gid_t, egid, gid_t, sgid)
6292 
6293 void syscall_init(void)
6294 {
6295     IOCTLEntry *ie;
6296     const argtype *arg_type;
6297     int size;
6298     int i;
6299 
6300     thunk_init(STRUCT_MAX);
6301 
6302 #define STRUCT(name, ...) thunk_register_struct(STRUCT_ ## name, #name, struct_ ## name ## _def);
6303 #define STRUCT_SPECIAL(name) thunk_register_struct_direct(STRUCT_ ## name, #name, &struct_ ## name ## _def);
6304 #include "syscall_types.h"
6305 #undef STRUCT
6306 #undef STRUCT_SPECIAL
6307 
6308     /* Build target_to_host_errno_table[] table from
6309      * host_to_target_errno_table[]. */
6310     for (i = 0; i < ERRNO_TABLE_SIZE; i++) {
6311         target_to_host_errno_table[host_to_target_errno_table[i]] = i;
6312     }
6313 
6314     /* we patch the ioctl size if necessary. We rely on the fact that
6315        no ioctl has all the bits at '1' in the size field */
6316     ie = ioctl_entries;
6317     while (ie->target_cmd != 0) {
6318         if (((ie->target_cmd >> TARGET_IOC_SIZESHIFT) & TARGET_IOC_SIZEMASK) ==
6319             TARGET_IOC_SIZEMASK) {
6320             arg_type = ie->arg_type;
6321             if (arg_type[0] != TYPE_PTR) {
6322                 fprintf(stderr, "cannot patch size for ioctl 0x%x\n",
6323                         ie->target_cmd);
6324                 exit(1);
6325             }
6326             arg_type++;
6327             size = thunk_type_size(arg_type, 0);
6328             ie->target_cmd = (ie->target_cmd &
6329                               ~(TARGET_IOC_SIZEMASK << TARGET_IOC_SIZESHIFT)) |
6330                 (size << TARGET_IOC_SIZESHIFT);
6331         }
6332 
6333         /* automatic consistency check if same arch */
6334 #if (defined(__i386__) && defined(TARGET_I386) && defined(TARGET_ABI32)) || \
6335     (defined(__x86_64__) && defined(TARGET_X86_64))
6336         if (unlikely(ie->target_cmd != ie->host_cmd)) {
6337             fprintf(stderr, "ERROR: ioctl(%s): target=0x%x host=0x%x\n",
6338                     ie->name, ie->target_cmd, ie->host_cmd);
6339         }
6340 #endif
6341         ie++;
6342     }
6343 }
6344 
6345 #if TARGET_ABI_BITS == 32
6346 static inline uint64_t target_offset64(uint32_t word0, uint32_t word1)
6347 {
6348 #ifdef TARGET_WORDS_BIGENDIAN
6349     return ((uint64_t)word0 << 32) | word1;
6350 #else
6351     return ((uint64_t)word1 << 32) | word0;
6352 #endif
6353 }
6354 #else /* TARGET_ABI_BITS == 32 */
6355 static inline uint64_t target_offset64(uint64_t word0, uint64_t word1)
6356 {
6357     return word0;
6358 }
6359 #endif /* TARGET_ABI_BITS != 32 */
6360 
6361 #ifdef TARGET_NR_truncate64
6362 static inline abi_long target_truncate64(void *cpu_env, const char *arg1,
6363                                          abi_long arg2,
6364                                          abi_long arg3,
6365                                          abi_long arg4)
6366 {
6367     if (regpairs_aligned(cpu_env, TARGET_NR_truncate64)) {
6368         arg2 = arg3;
6369         arg3 = arg4;
6370     }
6371     return get_errno(truncate64(arg1, target_offset64(arg2, arg3)));
6372 }
6373 #endif
6374 
6375 #ifdef TARGET_NR_ftruncate64
6376 static inline abi_long target_ftruncate64(void *cpu_env, abi_long arg1,
6377                                           abi_long arg2,
6378                                           abi_long arg3,
6379                                           abi_long arg4)
6380 {
6381     if (regpairs_aligned(cpu_env, TARGET_NR_ftruncate64)) {
6382         arg2 = arg3;
6383         arg3 = arg4;
6384     }
6385     return get_errno(ftruncate64(arg1, target_offset64(arg2, arg3)));
6386 }
6387 #endif
6388 
6389 static inline abi_long target_to_host_itimerspec(struct itimerspec *host_itspec,
6390                                                  abi_ulong target_addr)
6391 {
6392     struct target_itimerspec *target_itspec;
6393 
6394     if (!lock_user_struct(VERIFY_READ, target_itspec, target_addr, 1)) {
6395         return -TARGET_EFAULT;
6396     }
6397 
6398     host_itspec->it_interval.tv_sec =
6399                             tswapal(target_itspec->it_interval.tv_sec);
6400     host_itspec->it_interval.tv_nsec =
6401                             tswapal(target_itspec->it_interval.tv_nsec);
6402     host_itspec->it_value.tv_sec = tswapal(target_itspec->it_value.tv_sec);
6403     host_itspec->it_value.tv_nsec = tswapal(target_itspec->it_value.tv_nsec);
6404 
6405     unlock_user_struct(target_itspec, target_addr, 1);
6406     return 0;
6407 }
6408 
6409 static inline abi_long host_to_target_itimerspec(abi_ulong target_addr,
6410                                                struct itimerspec *host_its)
6411 {
6412     struct target_itimerspec *target_itspec;
6413 
6414     if (!lock_user_struct(VERIFY_WRITE, target_itspec, target_addr, 0)) {
6415         return -TARGET_EFAULT;
6416     }
6417 
6418     target_itspec->it_interval.tv_sec = tswapal(host_its->it_interval.tv_sec);
6419     target_itspec->it_interval.tv_nsec = tswapal(host_its->it_interval.tv_nsec);
6420 
6421     target_itspec->it_value.tv_sec = tswapal(host_its->it_value.tv_sec);
6422     target_itspec->it_value.tv_nsec = tswapal(host_its->it_value.tv_nsec);
6423 
6424     unlock_user_struct(target_itspec, target_addr, 0);
6425     return 0;
6426 }
6427 
6428 static inline abi_long target_to_host_timex(struct timex *host_tx,
6429                                             abi_long target_addr)
6430 {
6431     struct target_timex *target_tx;
6432 
6433     if (!lock_user_struct(VERIFY_READ, target_tx, target_addr, 1)) {
6434         return -TARGET_EFAULT;
6435     }
6436 
6437     __get_user(host_tx->modes, &target_tx->modes);
6438     __get_user(host_tx->offset, &target_tx->offset);
6439     __get_user(host_tx->freq, &target_tx->freq);
6440     __get_user(host_tx->maxerror, &target_tx->maxerror);
6441     __get_user(host_tx->esterror, &target_tx->esterror);
6442     __get_user(host_tx->status, &target_tx->status);
6443     __get_user(host_tx->constant, &target_tx->constant);
6444     __get_user(host_tx->precision, &target_tx->precision);
6445     __get_user(host_tx->tolerance, &target_tx->tolerance);
6446     __get_user(host_tx->time.tv_sec, &target_tx->time.tv_sec);
6447     __get_user(host_tx->time.tv_usec, &target_tx->time.tv_usec);
6448     __get_user(host_tx->tick, &target_tx->tick);
6449     __get_user(host_tx->ppsfreq, &target_tx->ppsfreq);
6450     __get_user(host_tx->jitter, &target_tx->jitter);
6451     __get_user(host_tx->shift, &target_tx->shift);
6452     __get_user(host_tx->stabil, &target_tx->stabil);
6453     __get_user(host_tx->jitcnt, &target_tx->jitcnt);
6454     __get_user(host_tx->calcnt, &target_tx->calcnt);
6455     __get_user(host_tx->errcnt, &target_tx->errcnt);
6456     __get_user(host_tx->stbcnt, &target_tx->stbcnt);
6457     __get_user(host_tx->tai, &target_tx->tai);
6458 
6459     unlock_user_struct(target_tx, target_addr, 0);
6460     return 0;
6461 }
6462 
6463 static inline abi_long host_to_target_timex(abi_long target_addr,
6464                                             struct timex *host_tx)
6465 {
6466     struct target_timex *target_tx;
6467 
6468     if (!lock_user_struct(VERIFY_WRITE, target_tx, target_addr, 0)) {
6469         return -TARGET_EFAULT;
6470     }
6471 
6472     __put_user(host_tx->modes, &target_tx->modes);
6473     __put_user(host_tx->offset, &target_tx->offset);
6474     __put_user(host_tx->freq, &target_tx->freq);
6475     __put_user(host_tx->maxerror, &target_tx->maxerror);
6476     __put_user(host_tx->esterror, &target_tx->esterror);
6477     __put_user(host_tx->status, &target_tx->status);
6478     __put_user(host_tx->constant, &target_tx->constant);
6479     __put_user(host_tx->precision, &target_tx->precision);
6480     __put_user(host_tx->tolerance, &target_tx->tolerance);
6481     __put_user(host_tx->time.tv_sec, &target_tx->time.tv_sec);
6482     __put_user(host_tx->time.tv_usec, &target_tx->time.tv_usec);
6483     __put_user(host_tx->tick, &target_tx->tick);
6484     __put_user(host_tx->ppsfreq, &target_tx->ppsfreq);
6485     __put_user(host_tx->jitter, &target_tx->jitter);
6486     __put_user(host_tx->shift, &target_tx->shift);
6487     __put_user(host_tx->stabil, &target_tx->stabil);
6488     __put_user(host_tx->jitcnt, &target_tx->jitcnt);
6489     __put_user(host_tx->calcnt, &target_tx->calcnt);
6490     __put_user(host_tx->errcnt, &target_tx->errcnt);
6491     __put_user(host_tx->stbcnt, &target_tx->stbcnt);
6492     __put_user(host_tx->tai, &target_tx->tai);
6493 
6494     unlock_user_struct(target_tx, target_addr, 1);
6495     return 0;
6496 }
6497 
6498 
6499 static inline abi_long target_to_host_sigevent(struct sigevent *host_sevp,
6500                                                abi_ulong target_addr)
6501 {
6502     struct target_sigevent *target_sevp;
6503 
6504     if (!lock_user_struct(VERIFY_READ, target_sevp, target_addr, 1)) {
6505         return -TARGET_EFAULT;
6506     }
6507 
6508     /* This union is awkward on 64 bit systems because it has a 32 bit
6509      * integer and a pointer in it; we follow the conversion approach
6510      * used for handling sigval types in signal.c so the guest should get
6511      * the correct value back even if we did a 64 bit byteswap and it's
6512      * using the 32 bit integer.
6513      */
6514     host_sevp->sigev_value.sival_ptr =
6515         (void *)(uintptr_t)tswapal(target_sevp->sigev_value.sival_ptr);
6516     host_sevp->sigev_signo =
6517         target_to_host_signal(tswap32(target_sevp->sigev_signo));
6518     host_sevp->sigev_notify = tswap32(target_sevp->sigev_notify);
6519     host_sevp->_sigev_un._tid = tswap32(target_sevp->_sigev_un._tid);
6520 
6521     unlock_user_struct(target_sevp, target_addr, 1);
6522     return 0;
6523 }
6524 
6525 #if defined(TARGET_NR_mlockall)
6526 static inline int target_to_host_mlockall_arg(int arg)
6527 {
6528     int result = 0;
6529 
6530     if (arg & TARGET_MLOCKALL_MCL_CURRENT) {
6531         result |= MCL_CURRENT;
6532     }
6533     if (arg & TARGET_MLOCKALL_MCL_FUTURE) {
6534         result |= MCL_FUTURE;
6535     }
6536     return result;
6537 }
6538 #endif
6539 
6540 #if (defined(TARGET_NR_stat64) || defined(TARGET_NR_lstat64) ||     \
6541      defined(TARGET_NR_fstat64) || defined(TARGET_NR_fstatat64) ||  \
6542      defined(TARGET_NR_newfstatat))
6543 static inline abi_long host_to_target_stat64(void *cpu_env,
6544                                              abi_ulong target_addr,
6545                                              struct stat *host_st)
6546 {
6547 #if defined(TARGET_ARM) && defined(TARGET_ABI32)
6548     if (((CPUARMState *)cpu_env)->eabi) {
6549         struct target_eabi_stat64 *target_st;
6550 
6551         if (!lock_user_struct(VERIFY_WRITE, target_st, target_addr, 0))
6552             return -TARGET_EFAULT;
6553         memset(target_st, 0, sizeof(struct target_eabi_stat64));
6554         __put_user(host_st->st_dev, &target_st->st_dev);
6555         __put_user(host_st->st_ino, &target_st->st_ino);
6556 #ifdef TARGET_STAT64_HAS_BROKEN_ST_INO
6557         __put_user(host_st->st_ino, &target_st->__st_ino);
6558 #endif
6559         __put_user(host_st->st_mode, &target_st->st_mode);
6560         __put_user(host_st->st_nlink, &target_st->st_nlink);
6561         __put_user(host_st->st_uid, &target_st->st_uid);
6562         __put_user(host_st->st_gid, &target_st->st_gid);
6563         __put_user(host_st->st_rdev, &target_st->st_rdev);
6564         __put_user(host_st->st_size, &target_st->st_size);
6565         __put_user(host_st->st_blksize, &target_st->st_blksize);
6566         __put_user(host_st->st_blocks, &target_st->st_blocks);
6567         __put_user(host_st->st_atime, &target_st->target_st_atime);
6568         __put_user(host_st->st_mtime, &target_st->target_st_mtime);
6569         __put_user(host_st->st_ctime, &target_st->target_st_ctime);
6570 #if _POSIX_C_SOURCE >= 200809L || _XOPEN_SOURCE >= 700
6571         __put_user(host_st->st_atim.tv_nsec, &target_st->target_st_atime_nsec);
6572         __put_user(host_st->st_mtim.tv_nsec, &target_st->target_st_mtime_nsec);
6573         __put_user(host_st->st_ctim.tv_nsec, &target_st->target_st_ctime_nsec);
6574 #endif
6575         unlock_user_struct(target_st, target_addr, 1);
6576     } else
6577 #endif
6578     {
6579 #if defined(TARGET_HAS_STRUCT_STAT64)
6580         struct target_stat64 *target_st;
6581 #else
6582         struct target_stat *target_st;
6583 #endif
6584 
6585         if (!lock_user_struct(VERIFY_WRITE, target_st, target_addr, 0))
6586             return -TARGET_EFAULT;
6587         memset(target_st, 0, sizeof(*target_st));
6588         __put_user(host_st->st_dev, &target_st->st_dev);
6589         __put_user(host_st->st_ino, &target_st->st_ino);
6590 #ifdef TARGET_STAT64_HAS_BROKEN_ST_INO
6591         __put_user(host_st->st_ino, &target_st->__st_ino);
6592 #endif
6593         __put_user(host_st->st_mode, &target_st->st_mode);
6594         __put_user(host_st->st_nlink, &target_st->st_nlink);
6595         __put_user(host_st->st_uid, &target_st->st_uid);
6596         __put_user(host_st->st_gid, &target_st->st_gid);
6597         __put_user(host_st->st_rdev, &target_st->st_rdev);
6598         /* XXX: better use of kernel struct */
6599         __put_user(host_st->st_size, &target_st->st_size);
6600         __put_user(host_st->st_blksize, &target_st->st_blksize);
6601         __put_user(host_st->st_blocks, &target_st->st_blocks);
6602         __put_user(host_st->st_atime, &target_st->target_st_atime);
6603         __put_user(host_st->st_mtime, &target_st->target_st_mtime);
6604         __put_user(host_st->st_ctime, &target_st->target_st_ctime);
6605 #if _POSIX_C_SOURCE >= 200809L || _XOPEN_SOURCE >= 700
6606         __put_user(host_st->st_atim.tv_nsec, &target_st->target_st_atime_nsec);
6607         __put_user(host_st->st_mtim.tv_nsec, &target_st->target_st_mtime_nsec);
6608         __put_user(host_st->st_ctim.tv_nsec, &target_st->target_st_ctime_nsec);
6609 #endif
6610         unlock_user_struct(target_st, target_addr, 1);
6611     }
6612 
6613     return 0;
6614 }
6615 #endif
6616 
6617 #if defined(TARGET_NR_statx) && defined(__NR_statx)
6618 static inline abi_long host_to_target_statx(struct target_statx *host_stx,
6619                                             abi_ulong target_addr)
6620 {
6621     struct target_statx *target_stx;
6622 
6623     if (!lock_user_struct(VERIFY_WRITE, target_stx, target_addr,  0)) {
6624         return -TARGET_EFAULT;
6625     }
6626     memset(target_stx, 0, sizeof(*target_stx));
6627 
6628     __put_user(host_stx->stx_mask, &target_stx->stx_mask);
6629     __put_user(host_stx->stx_blksize, &target_stx->stx_blksize);
6630     __put_user(host_stx->stx_attributes, &target_stx->stx_attributes);
6631     __put_user(host_stx->stx_nlink, &target_stx->stx_nlink);
6632     __put_user(host_stx->stx_uid, &target_stx->stx_uid);
6633     __put_user(host_stx->stx_gid, &target_stx->stx_gid);
6634     __put_user(host_stx->stx_mode, &target_stx->stx_mode);
6635     __put_user(host_stx->stx_ino, &target_stx->stx_ino);
6636     __put_user(host_stx->stx_size, &target_stx->stx_size);
6637     __put_user(host_stx->stx_blocks, &target_stx->stx_blocks);
6638     __put_user(host_stx->stx_attributes_mask, &target_stx->stx_attributes_mask);
6639     __put_user(host_stx->stx_atime.tv_sec, &target_stx->stx_atime.tv_sec);
6640     __put_user(host_stx->stx_atime.tv_nsec, &target_stx->stx_atime.tv_nsec);
6641     __put_user(host_stx->stx_btime.tv_sec, &target_stx->stx_atime.tv_sec);
6642     __put_user(host_stx->stx_btime.tv_nsec, &target_stx->stx_atime.tv_nsec);
6643     __put_user(host_stx->stx_ctime.tv_sec, &target_stx->stx_atime.tv_sec);
6644     __put_user(host_stx->stx_ctime.tv_nsec, &target_stx->stx_atime.tv_nsec);
6645     __put_user(host_stx->stx_mtime.tv_sec, &target_stx->stx_atime.tv_sec);
6646     __put_user(host_stx->stx_mtime.tv_nsec, &target_stx->stx_atime.tv_nsec);
6647     __put_user(host_stx->stx_rdev_major, &target_stx->stx_rdev_major);
6648     __put_user(host_stx->stx_rdev_minor, &target_stx->stx_rdev_minor);
6649     __put_user(host_stx->stx_dev_major, &target_stx->stx_dev_major);
6650     __put_user(host_stx->stx_dev_minor, &target_stx->stx_dev_minor);
6651 
6652     unlock_user_struct(target_stx, target_addr, 1);
6653 
6654     return 0;
6655 }
6656 #endif
6657 
6658 
6659 /* ??? Using host futex calls even when target atomic operations
6660    are not really atomic probably breaks things.  However implementing
6661    futexes locally would make futexes shared between multiple processes
6662    tricky.  However they're probably useless because guest atomic
6663    operations won't work either.  */
6664 static int do_futex(target_ulong uaddr, int op, int val, target_ulong timeout,
6665                     target_ulong uaddr2, int val3)
6666 {
6667     struct timespec ts, *pts;
6668     int base_op;
6669 
6670     /* ??? We assume FUTEX_* constants are the same on both host
6671        and target.  */
6672 #ifdef FUTEX_CMD_MASK
6673     base_op = op & FUTEX_CMD_MASK;
6674 #else
6675     base_op = op;
6676 #endif
6677     switch (base_op) {
6678     case FUTEX_WAIT:
6679     case FUTEX_WAIT_BITSET:
6680         if (timeout) {
6681             pts = &ts;
6682             target_to_host_timespec(pts, timeout);
6683         } else {
6684             pts = NULL;
6685         }
6686         return get_errno(safe_futex(g2h(uaddr), op, tswap32(val),
6687                          pts, NULL, val3));
6688     case FUTEX_WAKE:
6689         return get_errno(safe_futex(g2h(uaddr), op, val, NULL, NULL, 0));
6690     case FUTEX_FD:
6691         return get_errno(safe_futex(g2h(uaddr), op, val, NULL, NULL, 0));
6692     case FUTEX_REQUEUE:
6693     case FUTEX_CMP_REQUEUE:
6694     case FUTEX_WAKE_OP:
6695         /* For FUTEX_REQUEUE, FUTEX_CMP_REQUEUE, and FUTEX_WAKE_OP, the
6696            TIMEOUT parameter is interpreted as a uint32_t by the kernel.
6697            But the prototype takes a `struct timespec *'; insert casts
6698            to satisfy the compiler.  We do not need to tswap TIMEOUT
6699            since it's not compared to guest memory.  */
6700         pts = (struct timespec *)(uintptr_t) timeout;
6701         return get_errno(safe_futex(g2h(uaddr), op, val, pts,
6702                                     g2h(uaddr2),
6703                                     (base_op == FUTEX_CMP_REQUEUE
6704                                      ? tswap32(val3)
6705                                      : val3)));
6706     default:
6707         return -TARGET_ENOSYS;
6708     }
6709 }
6710 #if defined(TARGET_NR_name_to_handle_at) && defined(CONFIG_OPEN_BY_HANDLE)
6711 static abi_long do_name_to_handle_at(abi_long dirfd, abi_long pathname,
6712                                      abi_long handle, abi_long mount_id,
6713                                      abi_long flags)
6714 {
6715     struct file_handle *target_fh;
6716     struct file_handle *fh;
6717     int mid = 0;
6718     abi_long ret;
6719     char *name;
6720     unsigned int size, total_size;
6721 
6722     if (get_user_s32(size, handle)) {
6723         return -TARGET_EFAULT;
6724     }
6725 
6726     name = lock_user_string(pathname);
6727     if (!name) {
6728         return -TARGET_EFAULT;
6729     }
6730 
6731     total_size = sizeof(struct file_handle) + size;
6732     target_fh = lock_user(VERIFY_WRITE, handle, total_size, 0);
6733     if (!target_fh) {
6734         unlock_user(name, pathname, 0);
6735         return -TARGET_EFAULT;
6736     }
6737 
6738     fh = g_malloc0(total_size);
6739     fh->handle_bytes = size;
6740 
6741     ret = get_errno(name_to_handle_at(dirfd, path(name), fh, &mid, flags));
6742     unlock_user(name, pathname, 0);
6743 
6744     /* man name_to_handle_at(2):
6745      * Other than the use of the handle_bytes field, the caller should treat
6746      * the file_handle structure as an opaque data type
6747      */
6748 
6749     memcpy(target_fh, fh, total_size);
6750     target_fh->handle_bytes = tswap32(fh->handle_bytes);
6751     target_fh->handle_type = tswap32(fh->handle_type);
6752     g_free(fh);
6753     unlock_user(target_fh, handle, total_size);
6754 
6755     if (put_user_s32(mid, mount_id)) {
6756         return -TARGET_EFAULT;
6757     }
6758 
6759     return ret;
6760 
6761 }
6762 #endif
6763 
6764 #if defined(TARGET_NR_open_by_handle_at) && defined(CONFIG_OPEN_BY_HANDLE)
6765 static abi_long do_open_by_handle_at(abi_long mount_fd, abi_long handle,
6766                                      abi_long flags)
6767 {
6768     struct file_handle *target_fh;
6769     struct file_handle *fh;
6770     unsigned int size, total_size;
6771     abi_long ret;
6772 
6773     if (get_user_s32(size, handle)) {
6774         return -TARGET_EFAULT;
6775     }
6776 
6777     total_size = sizeof(struct file_handle) + size;
6778     target_fh = lock_user(VERIFY_READ, handle, total_size, 1);
6779     if (!target_fh) {
6780         return -TARGET_EFAULT;
6781     }
6782 
6783     fh = g_memdup(target_fh, total_size);
6784     fh->handle_bytes = size;
6785     fh->handle_type = tswap32(target_fh->handle_type);
6786 
6787     ret = get_errno(open_by_handle_at(mount_fd, fh,
6788                     target_to_host_bitmask(flags, fcntl_flags_tbl)));
6789 
6790     g_free(fh);
6791 
6792     unlock_user(target_fh, handle, total_size);
6793 
6794     return ret;
6795 }
6796 #endif
6797 
6798 #if defined(TARGET_NR_signalfd) || defined(TARGET_NR_signalfd4)
6799 
6800 static abi_long do_signalfd4(int fd, abi_long mask, int flags)
6801 {
6802     int host_flags;
6803     target_sigset_t *target_mask;
6804     sigset_t host_mask;
6805     abi_long ret;
6806 
6807     if (flags & ~(TARGET_O_NONBLOCK | TARGET_O_CLOEXEC)) {
6808         return -TARGET_EINVAL;
6809     }
6810     if (!lock_user_struct(VERIFY_READ, target_mask, mask, 1)) {
6811         return -TARGET_EFAULT;
6812     }
6813 
6814     target_to_host_sigset(&host_mask, target_mask);
6815 
6816     host_flags = target_to_host_bitmask(flags, fcntl_flags_tbl);
6817 
6818     ret = get_errno(signalfd(fd, &host_mask, host_flags));
6819     if (ret >= 0) {
6820         fd_trans_register(ret, &target_signalfd_trans);
6821     }
6822 
6823     unlock_user_struct(target_mask, mask, 0);
6824 
6825     return ret;
6826 }
6827 #endif
6828 
6829 /* Map host to target signal numbers for the wait family of syscalls.
6830    Assume all other status bits are the same.  */
6831 int host_to_target_waitstatus(int status)
6832 {
6833     if (WIFSIGNALED(status)) {
6834         return host_to_target_signal(WTERMSIG(status)) | (status & ~0x7f);
6835     }
6836     if (WIFSTOPPED(status)) {
6837         return (host_to_target_signal(WSTOPSIG(status)) << 8)
6838                | (status & 0xff);
6839     }
6840     return status;
6841 }
6842 
6843 static int open_self_cmdline(void *cpu_env, int fd)
6844 {
6845     CPUState *cpu = env_cpu((CPUArchState *)cpu_env);
6846     struct linux_binprm *bprm = ((TaskState *)cpu->opaque)->bprm;
6847     int i;
6848 
6849     for (i = 0; i < bprm->argc; i++) {
6850         size_t len = strlen(bprm->argv[i]) + 1;
6851 
6852         if (write(fd, bprm->argv[i], len) != len) {
6853             return -1;
6854         }
6855     }
6856 
6857     return 0;
6858 }
6859 
6860 static int open_self_maps(void *cpu_env, int fd)
6861 {
6862     CPUState *cpu = env_cpu((CPUArchState *)cpu_env);
6863     TaskState *ts = cpu->opaque;
6864     FILE *fp;
6865     char *line = NULL;
6866     size_t len = 0;
6867     ssize_t read;
6868 
6869     fp = fopen("/proc/self/maps", "r");
6870     if (fp == NULL) {
6871         return -1;
6872     }
6873 
6874     while ((read = getline(&line, &len, fp)) != -1) {
6875         int fields, dev_maj, dev_min, inode;
6876         uint64_t min, max, offset;
6877         char flag_r, flag_w, flag_x, flag_p;
6878         char path[512] = "";
6879         fields = sscanf(line, "%"PRIx64"-%"PRIx64" %c%c%c%c %"PRIx64" %x:%x %d"
6880                         " %512s", &min, &max, &flag_r, &flag_w, &flag_x,
6881                         &flag_p, &offset, &dev_maj, &dev_min, &inode, path);
6882 
6883         if ((fields < 10) || (fields > 11)) {
6884             continue;
6885         }
6886         if (h2g_valid(min)) {
6887             int flags = page_get_flags(h2g(min));
6888             max = h2g_valid(max - 1) ? max : (uintptr_t)g2h(GUEST_ADDR_MAX) + 1;
6889             if (page_check_range(h2g(min), max - min, flags) == -1) {
6890                 continue;
6891             }
6892             if (h2g(min) == ts->info->stack_limit) {
6893                 pstrcpy(path, sizeof(path), "      [stack]");
6894             }
6895             dprintf(fd, TARGET_ABI_FMT_ptr "-" TARGET_ABI_FMT_ptr
6896                     " %c%c%c%c %08" PRIx64 " %02x:%02x %d %s%s\n",
6897                     h2g(min), h2g(max - 1) + 1, flag_r, flag_w,
6898                     flag_x, flag_p, offset, dev_maj, dev_min, inode,
6899                     path[0] ? "         " : "", path);
6900         }
6901     }
6902 
6903     free(line);
6904     fclose(fp);
6905 
6906     return 0;
6907 }
6908 
6909 static int open_self_stat(void *cpu_env, int fd)
6910 {
6911     CPUState *cpu = env_cpu((CPUArchState *)cpu_env);
6912     TaskState *ts = cpu->opaque;
6913     abi_ulong start_stack = ts->info->start_stack;
6914     int i;
6915 
6916     for (i = 0; i < 44; i++) {
6917       char buf[128];
6918       int len;
6919       uint64_t val = 0;
6920 
6921       if (i == 0) {
6922         /* pid */
6923         val = getpid();
6924         snprintf(buf, sizeof(buf), "%"PRId64 " ", val);
6925       } else if (i == 1) {
6926         /* app name */
6927         snprintf(buf, sizeof(buf), "(%s) ", ts->bprm->argv[0]);
6928       } else if (i == 27) {
6929         /* stack bottom */
6930         val = start_stack;
6931         snprintf(buf, sizeof(buf), "%"PRId64 " ", val);
6932       } else {
6933         /* for the rest, there is MasterCard */
6934         snprintf(buf, sizeof(buf), "0%c", i == 43 ? '\n' : ' ');
6935       }
6936 
6937       len = strlen(buf);
6938       if (write(fd, buf, len) != len) {
6939           return -1;
6940       }
6941     }
6942 
6943     return 0;
6944 }
6945 
6946 static int open_self_auxv(void *cpu_env, int fd)
6947 {
6948     CPUState *cpu = env_cpu((CPUArchState *)cpu_env);
6949     TaskState *ts = cpu->opaque;
6950     abi_ulong auxv = ts->info->saved_auxv;
6951     abi_ulong len = ts->info->auxv_len;
6952     char *ptr;
6953 
6954     /*
6955      * Auxiliary vector is stored in target process stack.
6956      * read in whole auxv vector and copy it to file
6957      */
6958     ptr = lock_user(VERIFY_READ, auxv, len, 0);
6959     if (ptr != NULL) {
6960         while (len > 0) {
6961             ssize_t r;
6962             r = write(fd, ptr, len);
6963             if (r <= 0) {
6964                 break;
6965             }
6966             len -= r;
6967             ptr += r;
6968         }
6969         lseek(fd, 0, SEEK_SET);
6970         unlock_user(ptr, auxv, len);
6971     }
6972 
6973     return 0;
6974 }
6975 
6976 static int is_proc_myself(const char *filename, const char *entry)
6977 {
6978     if (!strncmp(filename, "/proc/", strlen("/proc/"))) {
6979         filename += strlen("/proc/");
6980         if (!strncmp(filename, "self/", strlen("self/"))) {
6981             filename += strlen("self/");
6982         } else if (*filename >= '1' && *filename <= '9') {
6983             char myself[80];
6984             snprintf(myself, sizeof(myself), "%d/", getpid());
6985             if (!strncmp(filename, myself, strlen(myself))) {
6986                 filename += strlen(myself);
6987             } else {
6988                 return 0;
6989             }
6990         } else {
6991             return 0;
6992         }
6993         if (!strcmp(filename, entry)) {
6994             return 1;
6995         }
6996     }
6997     return 0;
6998 }
6999 
7000 #if defined(HOST_WORDS_BIGENDIAN) != defined(TARGET_WORDS_BIGENDIAN) || \
7001     defined(TARGET_SPARC) || defined(TARGET_M68K)
7002 static int is_proc(const char *filename, const char *entry)
7003 {
7004     return strcmp(filename, entry) == 0;
7005 }
7006 #endif
7007 
7008 #if defined(HOST_WORDS_BIGENDIAN) != defined(TARGET_WORDS_BIGENDIAN)
7009 static int open_net_route(void *cpu_env, int fd)
7010 {
7011     FILE *fp;
7012     char *line = NULL;
7013     size_t len = 0;
7014     ssize_t read;
7015 
7016     fp = fopen("/proc/net/route", "r");
7017     if (fp == NULL) {
7018         return -1;
7019     }
7020 
7021     /* read header */
7022 
7023     read = getline(&line, &len, fp);
7024     dprintf(fd, "%s", line);
7025 
7026     /* read routes */
7027 
7028     while ((read = getline(&line, &len, fp)) != -1) {
7029         char iface[16];
7030         uint32_t dest, gw, mask;
7031         unsigned int flags, refcnt, use, metric, mtu, window, irtt;
7032         int fields;
7033 
7034         fields = sscanf(line,
7035                         "%s\t%08x\t%08x\t%04x\t%d\t%d\t%d\t%08x\t%d\t%u\t%u\n",
7036                         iface, &dest, &gw, &flags, &refcnt, &use, &metric,
7037                         &mask, &mtu, &window, &irtt);
7038         if (fields != 11) {
7039             continue;
7040         }
7041         dprintf(fd, "%s\t%08x\t%08x\t%04x\t%d\t%d\t%d\t%08x\t%d\t%u\t%u\n",
7042                 iface, tswap32(dest), tswap32(gw), flags, refcnt, use,
7043                 metric, tswap32(mask), mtu, window, irtt);
7044     }
7045 
7046     free(line);
7047     fclose(fp);
7048 
7049     return 0;
7050 }
7051 #endif
7052 
7053 #if defined(TARGET_SPARC)
7054 static int open_cpuinfo(void *cpu_env, int fd)
7055 {
7056     dprintf(fd, "type\t\t: sun4u\n");
7057     return 0;
7058 }
7059 #endif
7060 
7061 #if defined(TARGET_M68K)
7062 static int open_hardware(void *cpu_env, int fd)
7063 {
7064     dprintf(fd, "Model:\t\tqemu-m68k\n");
7065     return 0;
7066 }
7067 #endif
7068 
7069 static int do_openat(void *cpu_env, int dirfd, const char *pathname, int flags, mode_t mode)
7070 {
7071     struct fake_open {
7072         const char *filename;
7073         int (*fill)(void *cpu_env, int fd);
7074         int (*cmp)(const char *s1, const char *s2);
7075     };
7076     const struct fake_open *fake_open;
7077     static const struct fake_open fakes[] = {
7078         { "maps", open_self_maps, is_proc_myself },
7079         { "stat", open_self_stat, is_proc_myself },
7080         { "auxv", open_self_auxv, is_proc_myself },
7081         { "cmdline", open_self_cmdline, is_proc_myself },
7082 #if defined(HOST_WORDS_BIGENDIAN) != defined(TARGET_WORDS_BIGENDIAN)
7083         { "/proc/net/route", open_net_route, is_proc },
7084 #endif
7085 #if defined(TARGET_SPARC)
7086         { "/proc/cpuinfo", open_cpuinfo, is_proc },
7087 #endif
7088 #if defined(TARGET_M68K)
7089         { "/proc/hardware", open_hardware, is_proc },
7090 #endif
7091         { NULL, NULL, NULL }
7092     };
7093 
7094     if (is_proc_myself(pathname, "exe")) {
7095         int execfd = qemu_getauxval(AT_EXECFD);
7096         return execfd ? execfd : safe_openat(dirfd, exec_path, flags, mode);
7097     }
7098 
7099     for (fake_open = fakes; fake_open->filename; fake_open++) {
7100         if (fake_open->cmp(pathname, fake_open->filename)) {
7101             break;
7102         }
7103     }
7104 
7105     if (fake_open->filename) {
7106         const char *tmpdir;
7107         char filename[PATH_MAX];
7108         int fd, r;
7109 
7110         /* create temporary file to map stat to */
7111         tmpdir = getenv("TMPDIR");
7112         if (!tmpdir)
7113             tmpdir = "/tmp";
7114         snprintf(filename, sizeof(filename), "%s/qemu-open.XXXXXX", tmpdir);
7115         fd = mkstemp(filename);
7116         if (fd < 0) {
7117             return fd;
7118         }
7119         unlink(filename);
7120 
7121         if ((r = fake_open->fill(cpu_env, fd))) {
7122             int e = errno;
7123             close(fd);
7124             errno = e;
7125             return r;
7126         }
7127         lseek(fd, 0, SEEK_SET);
7128 
7129         return fd;
7130     }
7131 
7132     return safe_openat(dirfd, path(pathname), flags, mode);
7133 }
7134 
7135 #define TIMER_MAGIC 0x0caf0000
7136 #define TIMER_MAGIC_MASK 0xffff0000
7137 
7138 /* Convert QEMU provided timer ID back to internal 16bit index format */
7139 static target_timer_t get_timer_id(abi_long arg)
7140 {
7141     target_timer_t timerid = arg;
7142 
7143     if ((timerid & TIMER_MAGIC_MASK) != TIMER_MAGIC) {
7144         return -TARGET_EINVAL;
7145     }
7146 
7147     timerid &= 0xffff;
7148 
7149     if (timerid >= ARRAY_SIZE(g_posix_timers)) {
7150         return -TARGET_EINVAL;
7151     }
7152 
7153     return timerid;
7154 }
7155 
7156 static int target_to_host_cpu_mask(unsigned long *host_mask,
7157                                    size_t host_size,
7158                                    abi_ulong target_addr,
7159                                    size_t target_size)
7160 {
7161     unsigned target_bits = sizeof(abi_ulong) * 8;
7162     unsigned host_bits = sizeof(*host_mask) * 8;
7163     abi_ulong *target_mask;
7164     unsigned i, j;
7165 
7166     assert(host_size >= target_size);
7167 
7168     target_mask = lock_user(VERIFY_READ, target_addr, target_size, 1);
7169     if (!target_mask) {
7170         return -TARGET_EFAULT;
7171     }
7172     memset(host_mask, 0, host_size);
7173 
7174     for (i = 0 ; i < target_size / sizeof(abi_ulong); i++) {
7175         unsigned bit = i * target_bits;
7176         abi_ulong val;
7177 
7178         __get_user(val, &target_mask[i]);
7179         for (j = 0; j < target_bits; j++, bit++) {
7180             if (val & (1UL << j)) {
7181                 host_mask[bit / host_bits] |= 1UL << (bit % host_bits);
7182             }
7183         }
7184     }
7185 
7186     unlock_user(target_mask, target_addr, 0);
7187     return 0;
7188 }
7189 
7190 static int host_to_target_cpu_mask(const unsigned long *host_mask,
7191                                    size_t host_size,
7192                                    abi_ulong target_addr,
7193                                    size_t target_size)
7194 {
7195     unsigned target_bits = sizeof(abi_ulong) * 8;
7196     unsigned host_bits = sizeof(*host_mask) * 8;
7197     abi_ulong *target_mask;
7198     unsigned i, j;
7199 
7200     assert(host_size >= target_size);
7201 
7202     target_mask = lock_user(VERIFY_WRITE, target_addr, target_size, 0);
7203     if (!target_mask) {
7204         return -TARGET_EFAULT;
7205     }
7206 
7207     for (i = 0 ; i < target_size / sizeof(abi_ulong); i++) {
7208         unsigned bit = i * target_bits;
7209         abi_ulong val = 0;
7210 
7211         for (j = 0; j < target_bits; j++, bit++) {
7212             if (host_mask[bit / host_bits] & (1UL << (bit % host_bits))) {
7213                 val |= 1UL << j;
7214             }
7215         }
7216         __put_user(val, &target_mask[i]);
7217     }
7218 
7219     unlock_user(target_mask, target_addr, target_size);
7220     return 0;
7221 }
7222 
7223 /* This is an internal helper for do_syscall so that it is easier
7224  * to have a single return point, so that actions, such as logging
7225  * of syscall results, can be performed.
7226  * All errnos that do_syscall() returns must be -TARGET_<errcode>.
7227  */
7228 static abi_long do_syscall1(void *cpu_env, int num, abi_long arg1,
7229                             abi_long arg2, abi_long arg3, abi_long arg4,
7230                             abi_long arg5, abi_long arg6, abi_long arg7,
7231                             abi_long arg8)
7232 {
7233     CPUState *cpu = env_cpu(cpu_env);
7234     abi_long ret;
7235 #if defined(TARGET_NR_stat) || defined(TARGET_NR_stat64) \
7236     || defined(TARGET_NR_lstat) || defined(TARGET_NR_lstat64) \
7237     || defined(TARGET_NR_fstat) || defined(TARGET_NR_fstat64) \
7238     || defined(TARGET_NR_statx)
7239     struct stat st;
7240 #endif
7241 #if defined(TARGET_NR_statfs) || defined(TARGET_NR_statfs64) \
7242     || defined(TARGET_NR_fstatfs)
7243     struct statfs stfs;
7244 #endif
7245     void *p;
7246 
7247     switch(num) {
7248     case TARGET_NR_exit:
7249         /* In old applications this may be used to implement _exit(2).
7250            However in threaded applictions it is used for thread termination,
7251            and _exit_group is used for application termination.
7252            Do thread termination if we have more then one thread.  */
7253 
7254         if (block_signals()) {
7255             return -TARGET_ERESTARTSYS;
7256         }
7257 
7258         cpu_list_lock();
7259 
7260         if (CPU_NEXT(first_cpu)) {
7261             TaskState *ts;
7262 
7263             /* Remove the CPU from the list.  */
7264             QTAILQ_REMOVE_RCU(&cpus, cpu, node);
7265 
7266             cpu_list_unlock();
7267 
7268             ts = cpu->opaque;
7269             if (ts->child_tidptr) {
7270                 put_user_u32(0, ts->child_tidptr);
7271                 sys_futex(g2h(ts->child_tidptr), FUTEX_WAKE, INT_MAX,
7272                           NULL, NULL, 0);
7273             }
7274             thread_cpu = NULL;
7275             object_unref(OBJECT(cpu));
7276             g_free(ts);
7277             rcu_unregister_thread();
7278             pthread_exit(NULL);
7279         }
7280 
7281         cpu_list_unlock();
7282         preexit_cleanup(cpu_env, arg1);
7283         _exit(arg1);
7284         return 0; /* avoid warning */
7285     case TARGET_NR_read:
7286         if (arg2 == 0 && arg3 == 0) {
7287             return get_errno(safe_read(arg1, 0, 0));
7288         } else {
7289             if (!(p = lock_user(VERIFY_WRITE, arg2, arg3, 0)))
7290                 return -TARGET_EFAULT;
7291             ret = get_errno(safe_read(arg1, p, arg3));
7292             if (ret >= 0 &&
7293                 fd_trans_host_to_target_data(arg1)) {
7294                 ret = fd_trans_host_to_target_data(arg1)(p, ret);
7295             }
7296             unlock_user(p, arg2, ret);
7297         }
7298         return ret;
7299     case TARGET_NR_write:
7300         if (arg2 == 0 && arg3 == 0) {
7301             return get_errno(safe_write(arg1, 0, 0));
7302         }
7303         if (!(p = lock_user(VERIFY_READ, arg2, arg3, 1)))
7304             return -TARGET_EFAULT;
7305         if (fd_trans_target_to_host_data(arg1)) {
7306             void *copy = g_malloc(arg3);
7307             memcpy(copy, p, arg3);
7308             ret = fd_trans_target_to_host_data(arg1)(copy, arg3);
7309             if (ret >= 0) {
7310                 ret = get_errno(safe_write(arg1, copy, ret));
7311             }
7312             g_free(copy);
7313         } else {
7314             ret = get_errno(safe_write(arg1, p, arg3));
7315         }
7316         unlock_user(p, arg2, 0);
7317         return ret;
7318 
7319 #ifdef TARGET_NR_open
7320     case TARGET_NR_open:
7321         if (!(p = lock_user_string(arg1)))
7322             return -TARGET_EFAULT;
7323         ret = get_errno(do_openat(cpu_env, AT_FDCWD, p,
7324                                   target_to_host_bitmask(arg2, fcntl_flags_tbl),
7325                                   arg3));
7326         fd_trans_unregister(ret);
7327         unlock_user(p, arg1, 0);
7328         return ret;
7329 #endif
7330     case TARGET_NR_openat:
7331         if (!(p = lock_user_string(arg2)))
7332             return -TARGET_EFAULT;
7333         ret = get_errno(do_openat(cpu_env, arg1, p,
7334                                   target_to_host_bitmask(arg3, fcntl_flags_tbl),
7335                                   arg4));
7336         fd_trans_unregister(ret);
7337         unlock_user(p, arg2, 0);
7338         return ret;
7339 #if defined(TARGET_NR_name_to_handle_at) && defined(CONFIG_OPEN_BY_HANDLE)
7340     case TARGET_NR_name_to_handle_at:
7341         ret = do_name_to_handle_at(arg1, arg2, arg3, arg4, arg5);
7342         return ret;
7343 #endif
7344 #if defined(TARGET_NR_open_by_handle_at) && defined(CONFIG_OPEN_BY_HANDLE)
7345     case TARGET_NR_open_by_handle_at:
7346         ret = do_open_by_handle_at(arg1, arg2, arg3);
7347         fd_trans_unregister(ret);
7348         return ret;
7349 #endif
7350     case TARGET_NR_close:
7351         fd_trans_unregister(arg1);
7352         return get_errno(close(arg1));
7353 
7354     case TARGET_NR_brk:
7355         return do_brk(arg1);
7356 #ifdef TARGET_NR_fork
7357     case TARGET_NR_fork:
7358         return get_errno(do_fork(cpu_env, TARGET_SIGCHLD, 0, 0, 0, 0));
7359 #endif
7360 #ifdef TARGET_NR_waitpid
7361     case TARGET_NR_waitpid:
7362         {
7363             int status;
7364             ret = get_errno(safe_wait4(arg1, &status, arg3, 0));
7365             if (!is_error(ret) && arg2 && ret
7366                 && put_user_s32(host_to_target_waitstatus(status), arg2))
7367                 return -TARGET_EFAULT;
7368         }
7369         return ret;
7370 #endif
7371 #ifdef TARGET_NR_waitid
7372     case TARGET_NR_waitid:
7373         {
7374             siginfo_t info;
7375             info.si_pid = 0;
7376             ret = get_errno(safe_waitid(arg1, arg2, &info, arg4, NULL));
7377             if (!is_error(ret) && arg3 && info.si_pid != 0) {
7378                 if (!(p = lock_user(VERIFY_WRITE, arg3, sizeof(target_siginfo_t), 0)))
7379                     return -TARGET_EFAULT;
7380                 host_to_target_siginfo(p, &info);
7381                 unlock_user(p, arg3, sizeof(target_siginfo_t));
7382             }
7383         }
7384         return ret;
7385 #endif
7386 #ifdef TARGET_NR_creat /* not on alpha */
7387     case TARGET_NR_creat:
7388         if (!(p = lock_user_string(arg1)))
7389             return -TARGET_EFAULT;
7390         ret = get_errno(creat(p, arg2));
7391         fd_trans_unregister(ret);
7392         unlock_user(p, arg1, 0);
7393         return ret;
7394 #endif
7395 #ifdef TARGET_NR_link
7396     case TARGET_NR_link:
7397         {
7398             void * p2;
7399             p = lock_user_string(arg1);
7400             p2 = lock_user_string(arg2);
7401             if (!p || !p2)
7402                 ret = -TARGET_EFAULT;
7403             else
7404                 ret = get_errno(link(p, p2));
7405             unlock_user(p2, arg2, 0);
7406             unlock_user(p, arg1, 0);
7407         }
7408         return ret;
7409 #endif
7410 #if defined(TARGET_NR_linkat)
7411     case TARGET_NR_linkat:
7412         {
7413             void * p2 = NULL;
7414             if (!arg2 || !arg4)
7415                 return -TARGET_EFAULT;
7416             p  = lock_user_string(arg2);
7417             p2 = lock_user_string(arg4);
7418             if (!p || !p2)
7419                 ret = -TARGET_EFAULT;
7420             else
7421                 ret = get_errno(linkat(arg1, p, arg3, p2, arg5));
7422             unlock_user(p, arg2, 0);
7423             unlock_user(p2, arg4, 0);
7424         }
7425         return ret;
7426 #endif
7427 #ifdef TARGET_NR_unlink
7428     case TARGET_NR_unlink:
7429         if (!(p = lock_user_string(arg1)))
7430             return -TARGET_EFAULT;
7431         ret = get_errno(unlink(p));
7432         unlock_user(p, arg1, 0);
7433         return ret;
7434 #endif
7435 #if defined(TARGET_NR_unlinkat)
7436     case TARGET_NR_unlinkat:
7437         if (!(p = lock_user_string(arg2)))
7438             return -TARGET_EFAULT;
7439         ret = get_errno(unlinkat(arg1, p, arg3));
7440         unlock_user(p, arg2, 0);
7441         return ret;
7442 #endif
7443     case TARGET_NR_execve:
7444         {
7445             char **argp, **envp;
7446             int argc, envc;
7447             abi_ulong gp;
7448             abi_ulong guest_argp;
7449             abi_ulong guest_envp;
7450             abi_ulong addr;
7451             char **q;
7452             int total_size = 0;
7453 
7454             argc = 0;
7455             guest_argp = arg2;
7456             for (gp = guest_argp; gp; gp += sizeof(abi_ulong)) {
7457                 if (get_user_ual(addr, gp))
7458                     return -TARGET_EFAULT;
7459                 if (!addr)
7460                     break;
7461                 argc++;
7462             }
7463             envc = 0;
7464             guest_envp = arg3;
7465             for (gp = guest_envp; gp; gp += sizeof(abi_ulong)) {
7466                 if (get_user_ual(addr, gp))
7467                     return -TARGET_EFAULT;
7468                 if (!addr)
7469                     break;
7470                 envc++;
7471             }
7472 
7473             argp = g_new0(char *, argc + 1);
7474             envp = g_new0(char *, envc + 1);
7475 
7476             for (gp = guest_argp, q = argp; gp;
7477                   gp += sizeof(abi_ulong), q++) {
7478                 if (get_user_ual(addr, gp))
7479                     goto execve_efault;
7480                 if (!addr)
7481                     break;
7482                 if (!(*q = lock_user_string(addr)))
7483                     goto execve_efault;
7484                 total_size += strlen(*q) + 1;
7485             }
7486             *q = NULL;
7487 
7488             for (gp = guest_envp, q = envp; gp;
7489                   gp += sizeof(abi_ulong), q++) {
7490                 if (get_user_ual(addr, gp))
7491                     goto execve_efault;
7492                 if (!addr)
7493                     break;
7494                 if (!(*q = lock_user_string(addr)))
7495                     goto execve_efault;
7496                 total_size += strlen(*q) + 1;
7497             }
7498             *q = NULL;
7499 
7500             if (!(p = lock_user_string(arg1)))
7501                 goto execve_efault;
7502             /* Although execve() is not an interruptible syscall it is
7503              * a special case where we must use the safe_syscall wrapper:
7504              * if we allow a signal to happen before we make the host
7505              * syscall then we will 'lose' it, because at the point of
7506              * execve the process leaves QEMU's control. So we use the
7507              * safe syscall wrapper to ensure that we either take the
7508              * signal as a guest signal, or else it does not happen
7509              * before the execve completes and makes it the other
7510              * program's problem.
7511              */
7512             ret = get_errno(safe_execve(p, argp, envp));
7513             unlock_user(p, arg1, 0);
7514 
7515             goto execve_end;
7516 
7517         execve_efault:
7518             ret = -TARGET_EFAULT;
7519 
7520         execve_end:
7521             for (gp = guest_argp, q = argp; *q;
7522                   gp += sizeof(abi_ulong), q++) {
7523                 if (get_user_ual(addr, gp)
7524                     || !addr)
7525                     break;
7526                 unlock_user(*q, addr, 0);
7527             }
7528             for (gp = guest_envp, q = envp; *q;
7529                   gp += sizeof(abi_ulong), q++) {
7530                 if (get_user_ual(addr, gp)
7531                     || !addr)
7532                     break;
7533                 unlock_user(*q, addr, 0);
7534             }
7535 
7536             g_free(argp);
7537             g_free(envp);
7538         }
7539         return ret;
7540     case TARGET_NR_chdir:
7541         if (!(p = lock_user_string(arg1)))
7542             return -TARGET_EFAULT;
7543         ret = get_errno(chdir(p));
7544         unlock_user(p, arg1, 0);
7545         return ret;
7546 #ifdef TARGET_NR_time
7547     case TARGET_NR_time:
7548         {
7549             time_t host_time;
7550             ret = get_errno(time(&host_time));
7551             if (!is_error(ret)
7552                 && arg1
7553                 && put_user_sal(host_time, arg1))
7554                 return -TARGET_EFAULT;
7555         }
7556         return ret;
7557 #endif
7558 #ifdef TARGET_NR_mknod
7559     case TARGET_NR_mknod:
7560         if (!(p = lock_user_string(arg1)))
7561             return -TARGET_EFAULT;
7562         ret = get_errno(mknod(p, arg2, arg3));
7563         unlock_user(p, arg1, 0);
7564         return ret;
7565 #endif
7566 #if defined(TARGET_NR_mknodat)
7567     case TARGET_NR_mknodat:
7568         if (!(p = lock_user_string(arg2)))
7569             return -TARGET_EFAULT;
7570         ret = get_errno(mknodat(arg1, p, arg3, arg4));
7571         unlock_user(p, arg2, 0);
7572         return ret;
7573 #endif
7574 #ifdef TARGET_NR_chmod
7575     case TARGET_NR_chmod:
7576         if (!(p = lock_user_string(arg1)))
7577             return -TARGET_EFAULT;
7578         ret = get_errno(chmod(p, arg2));
7579         unlock_user(p, arg1, 0);
7580         return ret;
7581 #endif
7582 #ifdef TARGET_NR_lseek
7583     case TARGET_NR_lseek:
7584         return get_errno(lseek(arg1, arg2, arg3));
7585 #endif
7586 #if defined(TARGET_NR_getxpid) && defined(TARGET_ALPHA)
7587     /* Alpha specific */
7588     case TARGET_NR_getxpid:
7589         ((CPUAlphaState *)cpu_env)->ir[IR_A4] = getppid();
7590         return get_errno(getpid());
7591 #endif
7592 #ifdef TARGET_NR_getpid
7593     case TARGET_NR_getpid:
7594         return get_errno(getpid());
7595 #endif
7596     case TARGET_NR_mount:
7597         {
7598             /* need to look at the data field */
7599             void *p2, *p3;
7600 
7601             if (arg1) {
7602                 p = lock_user_string(arg1);
7603                 if (!p) {
7604                     return -TARGET_EFAULT;
7605                 }
7606             } else {
7607                 p = NULL;
7608             }
7609 
7610             p2 = lock_user_string(arg2);
7611             if (!p2) {
7612                 if (arg1) {
7613                     unlock_user(p, arg1, 0);
7614                 }
7615                 return -TARGET_EFAULT;
7616             }
7617 
7618             if (arg3) {
7619                 p3 = lock_user_string(arg3);
7620                 if (!p3) {
7621                     if (arg1) {
7622                         unlock_user(p, arg1, 0);
7623                     }
7624                     unlock_user(p2, arg2, 0);
7625                     return -TARGET_EFAULT;
7626                 }
7627             } else {
7628                 p3 = NULL;
7629             }
7630 
7631             /* FIXME - arg5 should be locked, but it isn't clear how to
7632              * do that since it's not guaranteed to be a NULL-terminated
7633              * string.
7634              */
7635             if (!arg5) {
7636                 ret = mount(p, p2, p3, (unsigned long)arg4, NULL);
7637             } else {
7638                 ret = mount(p, p2, p3, (unsigned long)arg4, g2h(arg5));
7639             }
7640             ret = get_errno(ret);
7641 
7642             if (arg1) {
7643                 unlock_user(p, arg1, 0);
7644             }
7645             unlock_user(p2, arg2, 0);
7646             if (arg3) {
7647                 unlock_user(p3, arg3, 0);
7648             }
7649         }
7650         return ret;
7651 #ifdef TARGET_NR_umount
7652     case TARGET_NR_umount:
7653         if (!(p = lock_user_string(arg1)))
7654             return -TARGET_EFAULT;
7655         ret = get_errno(umount(p));
7656         unlock_user(p, arg1, 0);
7657         return ret;
7658 #endif
7659 #ifdef TARGET_NR_stime /* not on alpha */
7660     case TARGET_NR_stime:
7661         {
7662             time_t host_time;
7663             if (get_user_sal(host_time, arg1))
7664                 return -TARGET_EFAULT;
7665             return get_errno(stime(&host_time));
7666         }
7667 #endif
7668 #ifdef TARGET_NR_alarm /* not on alpha */
7669     case TARGET_NR_alarm:
7670         return alarm(arg1);
7671 #endif
7672 #ifdef TARGET_NR_pause /* not on alpha */
7673     case TARGET_NR_pause:
7674         if (!block_signals()) {
7675             sigsuspend(&((TaskState *)cpu->opaque)->signal_mask);
7676         }
7677         return -TARGET_EINTR;
7678 #endif
7679 #ifdef TARGET_NR_utime
7680     case TARGET_NR_utime:
7681         {
7682             struct utimbuf tbuf, *host_tbuf;
7683             struct target_utimbuf *target_tbuf;
7684             if (arg2) {
7685                 if (!lock_user_struct(VERIFY_READ, target_tbuf, arg2, 1))
7686                     return -TARGET_EFAULT;
7687                 tbuf.actime = tswapal(target_tbuf->actime);
7688                 tbuf.modtime = tswapal(target_tbuf->modtime);
7689                 unlock_user_struct(target_tbuf, arg2, 0);
7690                 host_tbuf = &tbuf;
7691             } else {
7692                 host_tbuf = NULL;
7693             }
7694             if (!(p = lock_user_string(arg1)))
7695                 return -TARGET_EFAULT;
7696             ret = get_errno(utime(p, host_tbuf));
7697             unlock_user(p, arg1, 0);
7698         }
7699         return ret;
7700 #endif
7701 #ifdef TARGET_NR_utimes
7702     case TARGET_NR_utimes:
7703         {
7704             struct timeval *tvp, tv[2];
7705             if (arg2) {
7706                 if (copy_from_user_timeval(&tv[0], arg2)
7707                     || copy_from_user_timeval(&tv[1],
7708                                               arg2 + sizeof(struct target_timeval)))
7709                     return -TARGET_EFAULT;
7710                 tvp = tv;
7711             } else {
7712                 tvp = NULL;
7713             }
7714             if (!(p = lock_user_string(arg1)))
7715                 return -TARGET_EFAULT;
7716             ret = get_errno(utimes(p, tvp));
7717             unlock_user(p, arg1, 0);
7718         }
7719         return ret;
7720 #endif
7721 #if defined(TARGET_NR_futimesat)
7722     case TARGET_NR_futimesat:
7723         {
7724             struct timeval *tvp, tv[2];
7725             if (arg3) {
7726                 if (copy_from_user_timeval(&tv[0], arg3)
7727                     || copy_from_user_timeval(&tv[1],
7728                                               arg3 + sizeof(struct target_timeval)))
7729                     return -TARGET_EFAULT;
7730                 tvp = tv;
7731             } else {
7732                 tvp = NULL;
7733             }
7734             if (!(p = lock_user_string(arg2))) {
7735                 return -TARGET_EFAULT;
7736             }
7737             ret = get_errno(futimesat(arg1, path(p), tvp));
7738             unlock_user(p, arg2, 0);
7739         }
7740         return ret;
7741 #endif
7742 #ifdef TARGET_NR_access
7743     case TARGET_NR_access:
7744         if (!(p = lock_user_string(arg1))) {
7745             return -TARGET_EFAULT;
7746         }
7747         ret = get_errno(access(path(p), arg2));
7748         unlock_user(p, arg1, 0);
7749         return ret;
7750 #endif
7751 #if defined(TARGET_NR_faccessat) && defined(__NR_faccessat)
7752     case TARGET_NR_faccessat:
7753         if (!(p = lock_user_string(arg2))) {
7754             return -TARGET_EFAULT;
7755         }
7756         ret = get_errno(faccessat(arg1, p, arg3, 0));
7757         unlock_user(p, arg2, 0);
7758         return ret;
7759 #endif
7760 #ifdef TARGET_NR_nice /* not on alpha */
7761     case TARGET_NR_nice:
7762         return get_errno(nice(arg1));
7763 #endif
7764     case TARGET_NR_sync:
7765         sync();
7766         return 0;
7767 #if defined(TARGET_NR_syncfs) && defined(CONFIG_SYNCFS)
7768     case TARGET_NR_syncfs:
7769         return get_errno(syncfs(arg1));
7770 #endif
7771     case TARGET_NR_kill:
7772         return get_errno(safe_kill(arg1, target_to_host_signal(arg2)));
7773 #ifdef TARGET_NR_rename
7774     case TARGET_NR_rename:
7775         {
7776             void *p2;
7777             p = lock_user_string(arg1);
7778             p2 = lock_user_string(arg2);
7779             if (!p || !p2)
7780                 ret = -TARGET_EFAULT;
7781             else
7782                 ret = get_errno(rename(p, p2));
7783             unlock_user(p2, arg2, 0);
7784             unlock_user(p, arg1, 0);
7785         }
7786         return ret;
7787 #endif
7788 #if defined(TARGET_NR_renameat)
7789     case TARGET_NR_renameat:
7790         {
7791             void *p2;
7792             p  = lock_user_string(arg2);
7793             p2 = lock_user_string(arg4);
7794             if (!p || !p2)
7795                 ret = -TARGET_EFAULT;
7796             else
7797                 ret = get_errno(renameat(arg1, p, arg3, p2));
7798             unlock_user(p2, arg4, 0);
7799             unlock_user(p, arg2, 0);
7800         }
7801         return ret;
7802 #endif
7803 #if defined(TARGET_NR_renameat2)
7804     case TARGET_NR_renameat2:
7805         {
7806             void *p2;
7807             p  = lock_user_string(arg2);
7808             p2 = lock_user_string(arg4);
7809             if (!p || !p2) {
7810                 ret = -TARGET_EFAULT;
7811             } else {
7812                 ret = get_errno(sys_renameat2(arg1, p, arg3, p2, arg5));
7813             }
7814             unlock_user(p2, arg4, 0);
7815             unlock_user(p, arg2, 0);
7816         }
7817         return ret;
7818 #endif
7819 #ifdef TARGET_NR_mkdir
7820     case TARGET_NR_mkdir:
7821         if (!(p = lock_user_string(arg1)))
7822             return -TARGET_EFAULT;
7823         ret = get_errno(mkdir(p, arg2));
7824         unlock_user(p, arg1, 0);
7825         return ret;
7826 #endif
7827 #if defined(TARGET_NR_mkdirat)
7828     case TARGET_NR_mkdirat:
7829         if (!(p = lock_user_string(arg2)))
7830             return -TARGET_EFAULT;
7831         ret = get_errno(mkdirat(arg1, p, arg3));
7832         unlock_user(p, arg2, 0);
7833         return ret;
7834 #endif
7835 #ifdef TARGET_NR_rmdir
7836     case TARGET_NR_rmdir:
7837         if (!(p = lock_user_string(arg1)))
7838             return -TARGET_EFAULT;
7839         ret = get_errno(rmdir(p));
7840         unlock_user(p, arg1, 0);
7841         return ret;
7842 #endif
7843     case TARGET_NR_dup:
7844         ret = get_errno(dup(arg1));
7845         if (ret >= 0) {
7846             fd_trans_dup(arg1, ret);
7847         }
7848         return ret;
7849 #ifdef TARGET_NR_pipe
7850     case TARGET_NR_pipe:
7851         return do_pipe(cpu_env, arg1, 0, 0);
7852 #endif
7853 #ifdef TARGET_NR_pipe2
7854     case TARGET_NR_pipe2:
7855         return do_pipe(cpu_env, arg1,
7856                        target_to_host_bitmask(arg2, fcntl_flags_tbl), 1);
7857 #endif
7858     case TARGET_NR_times:
7859         {
7860             struct target_tms *tmsp;
7861             struct tms tms;
7862             ret = get_errno(times(&tms));
7863             if (arg1) {
7864                 tmsp = lock_user(VERIFY_WRITE, arg1, sizeof(struct target_tms), 0);
7865                 if (!tmsp)
7866                     return -TARGET_EFAULT;
7867                 tmsp->tms_utime = tswapal(host_to_target_clock_t(tms.tms_utime));
7868                 tmsp->tms_stime = tswapal(host_to_target_clock_t(tms.tms_stime));
7869                 tmsp->tms_cutime = tswapal(host_to_target_clock_t(tms.tms_cutime));
7870                 tmsp->tms_cstime = tswapal(host_to_target_clock_t(tms.tms_cstime));
7871             }
7872             if (!is_error(ret))
7873                 ret = host_to_target_clock_t(ret);
7874         }
7875         return ret;
7876     case TARGET_NR_acct:
7877         if (arg1 == 0) {
7878             ret = get_errno(acct(NULL));
7879         } else {
7880             if (!(p = lock_user_string(arg1))) {
7881                 return -TARGET_EFAULT;
7882             }
7883             ret = get_errno(acct(path(p)));
7884             unlock_user(p, arg1, 0);
7885         }
7886         return ret;
7887 #ifdef TARGET_NR_umount2
7888     case TARGET_NR_umount2:
7889         if (!(p = lock_user_string(arg1)))
7890             return -TARGET_EFAULT;
7891         ret = get_errno(umount2(p, arg2));
7892         unlock_user(p, arg1, 0);
7893         return ret;
7894 #endif
7895     case TARGET_NR_ioctl:
7896         return do_ioctl(arg1, arg2, arg3);
7897 #ifdef TARGET_NR_fcntl
7898     case TARGET_NR_fcntl:
7899         return do_fcntl(arg1, arg2, arg3);
7900 #endif
7901     case TARGET_NR_setpgid:
7902         return get_errno(setpgid(arg1, arg2));
7903     case TARGET_NR_umask:
7904         return get_errno(umask(arg1));
7905     case TARGET_NR_chroot:
7906         if (!(p = lock_user_string(arg1)))
7907             return -TARGET_EFAULT;
7908         ret = get_errno(chroot(p));
7909         unlock_user(p, arg1, 0);
7910         return ret;
7911 #ifdef TARGET_NR_dup2
7912     case TARGET_NR_dup2:
7913         ret = get_errno(dup2(arg1, arg2));
7914         if (ret >= 0) {
7915             fd_trans_dup(arg1, arg2);
7916         }
7917         return ret;
7918 #endif
7919 #if defined(CONFIG_DUP3) && defined(TARGET_NR_dup3)
7920     case TARGET_NR_dup3:
7921     {
7922         int host_flags;
7923 
7924         if ((arg3 & ~TARGET_O_CLOEXEC) != 0) {
7925             return -EINVAL;
7926         }
7927         host_flags = target_to_host_bitmask(arg3, fcntl_flags_tbl);
7928         ret = get_errno(dup3(arg1, arg2, host_flags));
7929         if (ret >= 0) {
7930             fd_trans_dup(arg1, arg2);
7931         }
7932         return ret;
7933     }
7934 #endif
7935 #ifdef TARGET_NR_getppid /* not on alpha */
7936     case TARGET_NR_getppid:
7937         return get_errno(getppid());
7938 #endif
7939 #ifdef TARGET_NR_getpgrp
7940     case TARGET_NR_getpgrp:
7941         return get_errno(getpgrp());
7942 #endif
7943     case TARGET_NR_setsid:
7944         return get_errno(setsid());
7945 #ifdef TARGET_NR_sigaction
7946     case TARGET_NR_sigaction:
7947         {
7948 #if defined(TARGET_ALPHA)
7949             struct target_sigaction act, oact, *pact = 0;
7950             struct target_old_sigaction *old_act;
7951             if (arg2) {
7952                 if (!lock_user_struct(VERIFY_READ, old_act, arg2, 1))
7953                     return -TARGET_EFAULT;
7954                 act._sa_handler = old_act->_sa_handler;
7955                 target_siginitset(&act.sa_mask, old_act->sa_mask);
7956                 act.sa_flags = old_act->sa_flags;
7957                 act.sa_restorer = 0;
7958                 unlock_user_struct(old_act, arg2, 0);
7959                 pact = &act;
7960             }
7961             ret = get_errno(do_sigaction(arg1, pact, &oact));
7962             if (!is_error(ret) && arg3) {
7963                 if (!lock_user_struct(VERIFY_WRITE, old_act, arg3, 0))
7964                     return -TARGET_EFAULT;
7965                 old_act->_sa_handler = oact._sa_handler;
7966                 old_act->sa_mask = oact.sa_mask.sig[0];
7967                 old_act->sa_flags = oact.sa_flags;
7968                 unlock_user_struct(old_act, arg3, 1);
7969             }
7970 #elif defined(TARGET_MIPS)
7971 	    struct target_sigaction act, oact, *pact, *old_act;
7972 
7973 	    if (arg2) {
7974                 if (!lock_user_struct(VERIFY_READ, old_act, arg2, 1))
7975                     return -TARGET_EFAULT;
7976 		act._sa_handler = old_act->_sa_handler;
7977 		target_siginitset(&act.sa_mask, old_act->sa_mask.sig[0]);
7978 		act.sa_flags = old_act->sa_flags;
7979 		unlock_user_struct(old_act, arg2, 0);
7980 		pact = &act;
7981 	    } else {
7982 		pact = NULL;
7983 	    }
7984 
7985 	    ret = get_errno(do_sigaction(arg1, pact, &oact));
7986 
7987 	    if (!is_error(ret) && arg3) {
7988                 if (!lock_user_struct(VERIFY_WRITE, old_act, arg3, 0))
7989                     return -TARGET_EFAULT;
7990 		old_act->_sa_handler = oact._sa_handler;
7991 		old_act->sa_flags = oact.sa_flags;
7992 		old_act->sa_mask.sig[0] = oact.sa_mask.sig[0];
7993 		old_act->sa_mask.sig[1] = 0;
7994 		old_act->sa_mask.sig[2] = 0;
7995 		old_act->sa_mask.sig[3] = 0;
7996 		unlock_user_struct(old_act, arg3, 1);
7997 	    }
7998 #else
7999             struct target_old_sigaction *old_act;
8000             struct target_sigaction act, oact, *pact;
8001             if (arg2) {
8002                 if (!lock_user_struct(VERIFY_READ, old_act, arg2, 1))
8003                     return -TARGET_EFAULT;
8004                 act._sa_handler = old_act->_sa_handler;
8005                 target_siginitset(&act.sa_mask, old_act->sa_mask);
8006                 act.sa_flags = old_act->sa_flags;
8007                 act.sa_restorer = old_act->sa_restorer;
8008 #ifdef TARGET_ARCH_HAS_KA_RESTORER
8009                 act.ka_restorer = 0;
8010 #endif
8011                 unlock_user_struct(old_act, arg2, 0);
8012                 pact = &act;
8013             } else {
8014                 pact = NULL;
8015             }
8016             ret = get_errno(do_sigaction(arg1, pact, &oact));
8017             if (!is_error(ret) && arg3) {
8018                 if (!lock_user_struct(VERIFY_WRITE, old_act, arg3, 0))
8019                     return -TARGET_EFAULT;
8020                 old_act->_sa_handler = oact._sa_handler;
8021                 old_act->sa_mask = oact.sa_mask.sig[0];
8022                 old_act->sa_flags = oact.sa_flags;
8023                 old_act->sa_restorer = oact.sa_restorer;
8024                 unlock_user_struct(old_act, arg3, 1);
8025             }
8026 #endif
8027         }
8028         return ret;
8029 #endif
8030     case TARGET_NR_rt_sigaction:
8031         {
8032 #if defined(TARGET_ALPHA)
8033             /* For Alpha and SPARC this is a 5 argument syscall, with
8034              * a 'restorer' parameter which must be copied into the
8035              * sa_restorer field of the sigaction struct.
8036              * For Alpha that 'restorer' is arg5; for SPARC it is arg4,
8037              * and arg5 is the sigsetsize.
8038              * Alpha also has a separate rt_sigaction struct that it uses
8039              * here; SPARC uses the usual sigaction struct.
8040              */
8041             struct target_rt_sigaction *rt_act;
8042             struct target_sigaction act, oact, *pact = 0;
8043 
8044             if (arg4 != sizeof(target_sigset_t)) {
8045                 return -TARGET_EINVAL;
8046             }
8047             if (arg2) {
8048                 if (!lock_user_struct(VERIFY_READ, rt_act, arg2, 1))
8049                     return -TARGET_EFAULT;
8050                 act._sa_handler = rt_act->_sa_handler;
8051                 act.sa_mask = rt_act->sa_mask;
8052                 act.sa_flags = rt_act->sa_flags;
8053                 act.sa_restorer = arg5;
8054                 unlock_user_struct(rt_act, arg2, 0);
8055                 pact = &act;
8056             }
8057             ret = get_errno(do_sigaction(arg1, pact, &oact));
8058             if (!is_error(ret) && arg3) {
8059                 if (!lock_user_struct(VERIFY_WRITE, rt_act, arg3, 0))
8060                     return -TARGET_EFAULT;
8061                 rt_act->_sa_handler = oact._sa_handler;
8062                 rt_act->sa_mask = oact.sa_mask;
8063                 rt_act->sa_flags = oact.sa_flags;
8064                 unlock_user_struct(rt_act, arg3, 1);
8065             }
8066 #else
8067 #ifdef TARGET_SPARC
8068             target_ulong restorer = arg4;
8069             target_ulong sigsetsize = arg5;
8070 #else
8071             target_ulong sigsetsize = arg4;
8072 #endif
8073             struct target_sigaction *act;
8074             struct target_sigaction *oact;
8075 
8076             if (sigsetsize != sizeof(target_sigset_t)) {
8077                 return -TARGET_EINVAL;
8078             }
8079             if (arg2) {
8080                 if (!lock_user_struct(VERIFY_READ, act, arg2, 1)) {
8081                     return -TARGET_EFAULT;
8082                 }
8083 #ifdef TARGET_ARCH_HAS_KA_RESTORER
8084                 act->ka_restorer = restorer;
8085 #endif
8086             } else {
8087                 act = NULL;
8088             }
8089             if (arg3) {
8090                 if (!lock_user_struct(VERIFY_WRITE, oact, arg3, 0)) {
8091                     ret = -TARGET_EFAULT;
8092                     goto rt_sigaction_fail;
8093                 }
8094             } else
8095                 oact = NULL;
8096             ret = get_errno(do_sigaction(arg1, act, oact));
8097 	rt_sigaction_fail:
8098             if (act)
8099                 unlock_user_struct(act, arg2, 0);
8100             if (oact)
8101                 unlock_user_struct(oact, arg3, 1);
8102 #endif
8103         }
8104         return ret;
8105 #ifdef TARGET_NR_sgetmask /* not on alpha */
8106     case TARGET_NR_sgetmask:
8107         {
8108             sigset_t cur_set;
8109             abi_ulong target_set;
8110             ret = do_sigprocmask(0, NULL, &cur_set);
8111             if (!ret) {
8112                 host_to_target_old_sigset(&target_set, &cur_set);
8113                 ret = target_set;
8114             }
8115         }
8116         return ret;
8117 #endif
8118 #ifdef TARGET_NR_ssetmask /* not on alpha */
8119     case TARGET_NR_ssetmask:
8120         {
8121             sigset_t set, oset;
8122             abi_ulong target_set = arg1;
8123             target_to_host_old_sigset(&set, &target_set);
8124             ret = do_sigprocmask(SIG_SETMASK, &set, &oset);
8125             if (!ret) {
8126                 host_to_target_old_sigset(&target_set, &oset);
8127                 ret = target_set;
8128             }
8129         }
8130         return ret;
8131 #endif
8132 #ifdef TARGET_NR_sigprocmask
8133     case TARGET_NR_sigprocmask:
8134         {
8135 #if defined(TARGET_ALPHA)
8136             sigset_t set, oldset;
8137             abi_ulong mask;
8138             int how;
8139 
8140             switch (arg1) {
8141             case TARGET_SIG_BLOCK:
8142                 how = SIG_BLOCK;
8143                 break;
8144             case TARGET_SIG_UNBLOCK:
8145                 how = SIG_UNBLOCK;
8146                 break;
8147             case TARGET_SIG_SETMASK:
8148                 how = SIG_SETMASK;
8149                 break;
8150             default:
8151                 return -TARGET_EINVAL;
8152             }
8153             mask = arg2;
8154             target_to_host_old_sigset(&set, &mask);
8155 
8156             ret = do_sigprocmask(how, &set, &oldset);
8157             if (!is_error(ret)) {
8158                 host_to_target_old_sigset(&mask, &oldset);
8159                 ret = mask;
8160                 ((CPUAlphaState *)cpu_env)->ir[IR_V0] = 0; /* force no error */
8161             }
8162 #else
8163             sigset_t set, oldset, *set_ptr;
8164             int how;
8165 
8166             if (arg2) {
8167                 switch (arg1) {
8168                 case TARGET_SIG_BLOCK:
8169                     how = SIG_BLOCK;
8170                     break;
8171                 case TARGET_SIG_UNBLOCK:
8172                     how = SIG_UNBLOCK;
8173                     break;
8174                 case TARGET_SIG_SETMASK:
8175                     how = SIG_SETMASK;
8176                     break;
8177                 default:
8178                     return -TARGET_EINVAL;
8179                 }
8180                 if (!(p = lock_user(VERIFY_READ, arg2, sizeof(target_sigset_t), 1)))
8181                     return -TARGET_EFAULT;
8182                 target_to_host_old_sigset(&set, p);
8183                 unlock_user(p, arg2, 0);
8184                 set_ptr = &set;
8185             } else {
8186                 how = 0;
8187                 set_ptr = NULL;
8188             }
8189             ret = do_sigprocmask(how, set_ptr, &oldset);
8190             if (!is_error(ret) && arg3) {
8191                 if (!(p = lock_user(VERIFY_WRITE, arg3, sizeof(target_sigset_t), 0)))
8192                     return -TARGET_EFAULT;
8193                 host_to_target_old_sigset(p, &oldset);
8194                 unlock_user(p, arg3, sizeof(target_sigset_t));
8195             }
8196 #endif
8197         }
8198         return ret;
8199 #endif
8200     case TARGET_NR_rt_sigprocmask:
8201         {
8202             int how = arg1;
8203             sigset_t set, oldset, *set_ptr;
8204 
8205             if (arg4 != sizeof(target_sigset_t)) {
8206                 return -TARGET_EINVAL;
8207             }
8208 
8209             if (arg2) {
8210                 switch(how) {
8211                 case TARGET_SIG_BLOCK:
8212                     how = SIG_BLOCK;
8213                     break;
8214                 case TARGET_SIG_UNBLOCK:
8215                     how = SIG_UNBLOCK;
8216                     break;
8217                 case TARGET_SIG_SETMASK:
8218                     how = SIG_SETMASK;
8219                     break;
8220                 default:
8221                     return -TARGET_EINVAL;
8222                 }
8223                 if (!(p = lock_user(VERIFY_READ, arg2, sizeof(target_sigset_t), 1)))
8224                     return -TARGET_EFAULT;
8225                 target_to_host_sigset(&set, p);
8226                 unlock_user(p, arg2, 0);
8227                 set_ptr = &set;
8228             } else {
8229                 how = 0;
8230                 set_ptr = NULL;
8231             }
8232             ret = do_sigprocmask(how, set_ptr, &oldset);
8233             if (!is_error(ret) && arg3) {
8234                 if (!(p = lock_user(VERIFY_WRITE, arg3, sizeof(target_sigset_t), 0)))
8235                     return -TARGET_EFAULT;
8236                 host_to_target_sigset(p, &oldset);
8237                 unlock_user(p, arg3, sizeof(target_sigset_t));
8238             }
8239         }
8240         return ret;
8241 #ifdef TARGET_NR_sigpending
8242     case TARGET_NR_sigpending:
8243         {
8244             sigset_t set;
8245             ret = get_errno(sigpending(&set));
8246             if (!is_error(ret)) {
8247                 if (!(p = lock_user(VERIFY_WRITE, arg1, sizeof(target_sigset_t), 0)))
8248                     return -TARGET_EFAULT;
8249                 host_to_target_old_sigset(p, &set);
8250                 unlock_user(p, arg1, sizeof(target_sigset_t));
8251             }
8252         }
8253         return ret;
8254 #endif
8255     case TARGET_NR_rt_sigpending:
8256         {
8257             sigset_t set;
8258 
8259             /* Yes, this check is >, not != like most. We follow the kernel's
8260              * logic and it does it like this because it implements
8261              * NR_sigpending through the same code path, and in that case
8262              * the old_sigset_t is smaller in size.
8263              */
8264             if (arg2 > sizeof(target_sigset_t)) {
8265                 return -TARGET_EINVAL;
8266             }
8267 
8268             ret = get_errno(sigpending(&set));
8269             if (!is_error(ret)) {
8270                 if (!(p = lock_user(VERIFY_WRITE, arg1, sizeof(target_sigset_t), 0)))
8271                     return -TARGET_EFAULT;
8272                 host_to_target_sigset(p, &set);
8273                 unlock_user(p, arg1, sizeof(target_sigset_t));
8274             }
8275         }
8276         return ret;
8277 #ifdef TARGET_NR_sigsuspend
8278     case TARGET_NR_sigsuspend:
8279         {
8280             TaskState *ts = cpu->opaque;
8281 #if defined(TARGET_ALPHA)
8282             abi_ulong mask = arg1;
8283             target_to_host_old_sigset(&ts->sigsuspend_mask, &mask);
8284 #else
8285             if (!(p = lock_user(VERIFY_READ, arg1, sizeof(target_sigset_t), 1)))
8286                 return -TARGET_EFAULT;
8287             target_to_host_old_sigset(&ts->sigsuspend_mask, p);
8288             unlock_user(p, arg1, 0);
8289 #endif
8290             ret = get_errno(safe_rt_sigsuspend(&ts->sigsuspend_mask,
8291                                                SIGSET_T_SIZE));
8292             if (ret != -TARGET_ERESTARTSYS) {
8293                 ts->in_sigsuspend = 1;
8294             }
8295         }
8296         return ret;
8297 #endif
8298     case TARGET_NR_rt_sigsuspend:
8299         {
8300             TaskState *ts = cpu->opaque;
8301 
8302             if (arg2 != sizeof(target_sigset_t)) {
8303                 return -TARGET_EINVAL;
8304             }
8305             if (!(p = lock_user(VERIFY_READ, arg1, sizeof(target_sigset_t), 1)))
8306                 return -TARGET_EFAULT;
8307             target_to_host_sigset(&ts->sigsuspend_mask, p);
8308             unlock_user(p, arg1, 0);
8309             ret = get_errno(safe_rt_sigsuspend(&ts->sigsuspend_mask,
8310                                                SIGSET_T_SIZE));
8311             if (ret != -TARGET_ERESTARTSYS) {
8312                 ts->in_sigsuspend = 1;
8313             }
8314         }
8315         return ret;
8316     case TARGET_NR_rt_sigtimedwait:
8317         {
8318             sigset_t set;
8319             struct timespec uts, *puts;
8320             siginfo_t uinfo;
8321 
8322             if (arg4 != sizeof(target_sigset_t)) {
8323                 return -TARGET_EINVAL;
8324             }
8325 
8326             if (!(p = lock_user(VERIFY_READ, arg1, sizeof(target_sigset_t), 1)))
8327                 return -TARGET_EFAULT;
8328             target_to_host_sigset(&set, p);
8329             unlock_user(p, arg1, 0);
8330             if (arg3) {
8331                 puts = &uts;
8332                 target_to_host_timespec(puts, arg3);
8333             } else {
8334                 puts = NULL;
8335             }
8336             ret = get_errno(safe_rt_sigtimedwait(&set, &uinfo, puts,
8337                                                  SIGSET_T_SIZE));
8338             if (!is_error(ret)) {
8339                 if (arg2) {
8340                     p = lock_user(VERIFY_WRITE, arg2, sizeof(target_siginfo_t),
8341                                   0);
8342                     if (!p) {
8343                         return -TARGET_EFAULT;
8344                     }
8345                     host_to_target_siginfo(p, &uinfo);
8346                     unlock_user(p, arg2, sizeof(target_siginfo_t));
8347                 }
8348                 ret = host_to_target_signal(ret);
8349             }
8350         }
8351         return ret;
8352     case TARGET_NR_rt_sigqueueinfo:
8353         {
8354             siginfo_t uinfo;
8355 
8356             p = lock_user(VERIFY_READ, arg3, sizeof(target_siginfo_t), 1);
8357             if (!p) {
8358                 return -TARGET_EFAULT;
8359             }
8360             target_to_host_siginfo(&uinfo, p);
8361             unlock_user(p, arg3, 0);
8362             ret = get_errno(sys_rt_sigqueueinfo(arg1, arg2, &uinfo));
8363         }
8364         return ret;
8365     case TARGET_NR_rt_tgsigqueueinfo:
8366         {
8367             siginfo_t uinfo;
8368 
8369             p = lock_user(VERIFY_READ, arg4, sizeof(target_siginfo_t), 1);
8370             if (!p) {
8371                 return -TARGET_EFAULT;
8372             }
8373             target_to_host_siginfo(&uinfo, p);
8374             unlock_user(p, arg4, 0);
8375             ret = get_errno(sys_rt_tgsigqueueinfo(arg1, arg2, arg3, &uinfo));
8376         }
8377         return ret;
8378 #ifdef TARGET_NR_sigreturn
8379     case TARGET_NR_sigreturn:
8380         if (block_signals()) {
8381             return -TARGET_ERESTARTSYS;
8382         }
8383         return do_sigreturn(cpu_env);
8384 #endif
8385     case TARGET_NR_rt_sigreturn:
8386         if (block_signals()) {
8387             return -TARGET_ERESTARTSYS;
8388         }
8389         return do_rt_sigreturn(cpu_env);
8390     case TARGET_NR_sethostname:
8391         if (!(p = lock_user_string(arg1)))
8392             return -TARGET_EFAULT;
8393         ret = get_errno(sethostname(p, arg2));
8394         unlock_user(p, arg1, 0);
8395         return ret;
8396 #ifdef TARGET_NR_setrlimit
8397     case TARGET_NR_setrlimit:
8398         {
8399             int resource = target_to_host_resource(arg1);
8400             struct target_rlimit *target_rlim;
8401             struct rlimit rlim;
8402             if (!lock_user_struct(VERIFY_READ, target_rlim, arg2, 1))
8403                 return -TARGET_EFAULT;
8404             rlim.rlim_cur = target_to_host_rlim(target_rlim->rlim_cur);
8405             rlim.rlim_max = target_to_host_rlim(target_rlim->rlim_max);
8406             unlock_user_struct(target_rlim, arg2, 0);
8407             /*
8408              * If we just passed through resource limit settings for memory then
8409              * they would also apply to QEMU's own allocations, and QEMU will
8410              * crash or hang or die if its allocations fail. Ideally we would
8411              * track the guest allocations in QEMU and apply the limits ourselves.
8412              * For now, just tell the guest the call succeeded but don't actually
8413              * limit anything.
8414              */
8415             if (resource != RLIMIT_AS &&
8416                 resource != RLIMIT_DATA &&
8417                 resource != RLIMIT_STACK) {
8418                 return get_errno(setrlimit(resource, &rlim));
8419             } else {
8420                 return 0;
8421             }
8422         }
8423 #endif
8424 #ifdef TARGET_NR_getrlimit
8425     case TARGET_NR_getrlimit:
8426         {
8427             int resource = target_to_host_resource(arg1);
8428             struct target_rlimit *target_rlim;
8429             struct rlimit rlim;
8430 
8431             ret = get_errno(getrlimit(resource, &rlim));
8432             if (!is_error(ret)) {
8433                 if (!lock_user_struct(VERIFY_WRITE, target_rlim, arg2, 0))
8434                     return -TARGET_EFAULT;
8435                 target_rlim->rlim_cur = host_to_target_rlim(rlim.rlim_cur);
8436                 target_rlim->rlim_max = host_to_target_rlim(rlim.rlim_max);
8437                 unlock_user_struct(target_rlim, arg2, 1);
8438             }
8439         }
8440         return ret;
8441 #endif
8442     case TARGET_NR_getrusage:
8443         {
8444             struct rusage rusage;
8445             ret = get_errno(getrusage(arg1, &rusage));
8446             if (!is_error(ret)) {
8447                 ret = host_to_target_rusage(arg2, &rusage);
8448             }
8449         }
8450         return ret;
8451     case TARGET_NR_gettimeofday:
8452         {
8453             struct timeval tv;
8454             ret = get_errno(gettimeofday(&tv, NULL));
8455             if (!is_error(ret)) {
8456                 if (copy_to_user_timeval(arg1, &tv))
8457                     return -TARGET_EFAULT;
8458             }
8459         }
8460         return ret;
8461     case TARGET_NR_settimeofday:
8462         {
8463             struct timeval tv, *ptv = NULL;
8464             struct timezone tz, *ptz = NULL;
8465 
8466             if (arg1) {
8467                 if (copy_from_user_timeval(&tv, arg1)) {
8468                     return -TARGET_EFAULT;
8469                 }
8470                 ptv = &tv;
8471             }
8472 
8473             if (arg2) {
8474                 if (copy_from_user_timezone(&tz, arg2)) {
8475                     return -TARGET_EFAULT;
8476                 }
8477                 ptz = &tz;
8478             }
8479 
8480             return get_errno(settimeofday(ptv, ptz));
8481         }
8482 #if defined(TARGET_NR_select)
8483     case TARGET_NR_select:
8484 #if defined(TARGET_WANT_NI_OLD_SELECT)
8485         /* some architectures used to have old_select here
8486          * but now ENOSYS it.
8487          */
8488         ret = -TARGET_ENOSYS;
8489 #elif defined(TARGET_WANT_OLD_SYS_SELECT)
8490         ret = do_old_select(arg1);
8491 #else
8492         ret = do_select(arg1, arg2, arg3, arg4, arg5);
8493 #endif
8494         return ret;
8495 #endif
8496 #ifdef TARGET_NR_pselect6
8497     case TARGET_NR_pselect6:
8498         {
8499             abi_long rfd_addr, wfd_addr, efd_addr, n, ts_addr;
8500             fd_set rfds, wfds, efds;
8501             fd_set *rfds_ptr, *wfds_ptr, *efds_ptr;
8502             struct timespec ts, *ts_ptr;
8503 
8504             /*
8505              * The 6th arg is actually two args smashed together,
8506              * so we cannot use the C library.
8507              */
8508             sigset_t set;
8509             struct {
8510                 sigset_t *set;
8511                 size_t size;
8512             } sig, *sig_ptr;
8513 
8514             abi_ulong arg_sigset, arg_sigsize, *arg7;
8515             target_sigset_t *target_sigset;
8516 
8517             n = arg1;
8518             rfd_addr = arg2;
8519             wfd_addr = arg3;
8520             efd_addr = arg4;
8521             ts_addr = arg5;
8522 
8523             ret = copy_from_user_fdset_ptr(&rfds, &rfds_ptr, rfd_addr, n);
8524             if (ret) {
8525                 return ret;
8526             }
8527             ret = copy_from_user_fdset_ptr(&wfds, &wfds_ptr, wfd_addr, n);
8528             if (ret) {
8529                 return ret;
8530             }
8531             ret = copy_from_user_fdset_ptr(&efds, &efds_ptr, efd_addr, n);
8532             if (ret) {
8533                 return ret;
8534             }
8535 
8536             /*
8537              * This takes a timespec, and not a timeval, so we cannot
8538              * use the do_select() helper ...
8539              */
8540             if (ts_addr) {
8541                 if (target_to_host_timespec(&ts, ts_addr)) {
8542                     return -TARGET_EFAULT;
8543                 }
8544                 ts_ptr = &ts;
8545             } else {
8546                 ts_ptr = NULL;
8547             }
8548 
8549             /* Extract the two packed args for the sigset */
8550             if (arg6) {
8551                 sig_ptr = &sig;
8552                 sig.size = SIGSET_T_SIZE;
8553 
8554                 arg7 = lock_user(VERIFY_READ, arg6, sizeof(*arg7) * 2, 1);
8555                 if (!arg7) {
8556                     return -TARGET_EFAULT;
8557                 }
8558                 arg_sigset = tswapal(arg7[0]);
8559                 arg_sigsize = tswapal(arg7[1]);
8560                 unlock_user(arg7, arg6, 0);
8561 
8562                 if (arg_sigset) {
8563                     sig.set = &set;
8564                     if (arg_sigsize != sizeof(*target_sigset)) {
8565                         /* Like the kernel, we enforce correct size sigsets */
8566                         return -TARGET_EINVAL;
8567                     }
8568                     target_sigset = lock_user(VERIFY_READ, arg_sigset,
8569                                               sizeof(*target_sigset), 1);
8570                     if (!target_sigset) {
8571                         return -TARGET_EFAULT;
8572                     }
8573                     target_to_host_sigset(&set, target_sigset);
8574                     unlock_user(target_sigset, arg_sigset, 0);
8575                 } else {
8576                     sig.set = NULL;
8577                 }
8578             } else {
8579                 sig_ptr = NULL;
8580             }
8581 
8582             ret = get_errno(safe_pselect6(n, rfds_ptr, wfds_ptr, efds_ptr,
8583                                           ts_ptr, sig_ptr));
8584 
8585             if (!is_error(ret)) {
8586                 if (rfd_addr && copy_to_user_fdset(rfd_addr, &rfds, n))
8587                     return -TARGET_EFAULT;
8588                 if (wfd_addr && copy_to_user_fdset(wfd_addr, &wfds, n))
8589                     return -TARGET_EFAULT;
8590                 if (efd_addr && copy_to_user_fdset(efd_addr, &efds, n))
8591                     return -TARGET_EFAULT;
8592 
8593                 if (ts_addr && host_to_target_timespec(ts_addr, &ts))
8594                     return -TARGET_EFAULT;
8595             }
8596         }
8597         return ret;
8598 #endif
8599 #ifdef TARGET_NR_symlink
8600     case TARGET_NR_symlink:
8601         {
8602             void *p2;
8603             p = lock_user_string(arg1);
8604             p2 = lock_user_string(arg2);
8605             if (!p || !p2)
8606                 ret = -TARGET_EFAULT;
8607             else
8608                 ret = get_errno(symlink(p, p2));
8609             unlock_user(p2, arg2, 0);
8610             unlock_user(p, arg1, 0);
8611         }
8612         return ret;
8613 #endif
8614 #if defined(TARGET_NR_symlinkat)
8615     case TARGET_NR_symlinkat:
8616         {
8617             void *p2;
8618             p  = lock_user_string(arg1);
8619             p2 = lock_user_string(arg3);
8620             if (!p || !p2)
8621                 ret = -TARGET_EFAULT;
8622             else
8623                 ret = get_errno(symlinkat(p, arg2, p2));
8624             unlock_user(p2, arg3, 0);
8625             unlock_user(p, arg1, 0);
8626         }
8627         return ret;
8628 #endif
8629 #ifdef TARGET_NR_readlink
8630     case TARGET_NR_readlink:
8631         {
8632             void *p2;
8633             p = lock_user_string(arg1);
8634             p2 = lock_user(VERIFY_WRITE, arg2, arg3, 0);
8635             if (!p || !p2) {
8636                 ret = -TARGET_EFAULT;
8637             } else if (!arg3) {
8638                 /* Short circuit this for the magic exe check. */
8639                 ret = -TARGET_EINVAL;
8640             } else if (is_proc_myself((const char *)p, "exe")) {
8641                 char real[PATH_MAX], *temp;
8642                 temp = realpath(exec_path, real);
8643                 /* Return value is # of bytes that we wrote to the buffer. */
8644                 if (temp == NULL) {
8645                     ret = get_errno(-1);
8646                 } else {
8647                     /* Don't worry about sign mismatch as earlier mapping
8648                      * logic would have thrown a bad address error. */
8649                     ret = MIN(strlen(real), arg3);
8650                     /* We cannot NUL terminate the string. */
8651                     memcpy(p2, real, ret);
8652                 }
8653             } else {
8654                 ret = get_errno(readlink(path(p), p2, arg3));
8655             }
8656             unlock_user(p2, arg2, ret);
8657             unlock_user(p, arg1, 0);
8658         }
8659         return ret;
8660 #endif
8661 #if defined(TARGET_NR_readlinkat)
8662     case TARGET_NR_readlinkat:
8663         {
8664             void *p2;
8665             p  = lock_user_string(arg2);
8666             p2 = lock_user(VERIFY_WRITE, arg3, arg4, 0);
8667             if (!p || !p2) {
8668                 ret = -TARGET_EFAULT;
8669             } else if (is_proc_myself((const char *)p, "exe")) {
8670                 char real[PATH_MAX], *temp;
8671                 temp = realpath(exec_path, real);
8672                 ret = temp == NULL ? get_errno(-1) : strlen(real) ;
8673                 snprintf((char *)p2, arg4, "%s", real);
8674             } else {
8675                 ret = get_errno(readlinkat(arg1, path(p), p2, arg4));
8676             }
8677             unlock_user(p2, arg3, ret);
8678             unlock_user(p, arg2, 0);
8679         }
8680         return ret;
8681 #endif
8682 #ifdef TARGET_NR_swapon
8683     case TARGET_NR_swapon:
8684         if (!(p = lock_user_string(arg1)))
8685             return -TARGET_EFAULT;
8686         ret = get_errno(swapon(p, arg2));
8687         unlock_user(p, arg1, 0);
8688         return ret;
8689 #endif
8690     case TARGET_NR_reboot:
8691         if (arg3 == LINUX_REBOOT_CMD_RESTART2) {
8692            /* arg4 must be ignored in all other cases */
8693            p = lock_user_string(arg4);
8694            if (!p) {
8695                return -TARGET_EFAULT;
8696            }
8697            ret = get_errno(reboot(arg1, arg2, arg3, p));
8698            unlock_user(p, arg4, 0);
8699         } else {
8700            ret = get_errno(reboot(arg1, arg2, arg3, NULL));
8701         }
8702         return ret;
8703 #ifdef TARGET_NR_mmap
8704     case TARGET_NR_mmap:
8705 #if (defined(TARGET_I386) && defined(TARGET_ABI32)) || \
8706     (defined(TARGET_ARM) && defined(TARGET_ABI32)) || \
8707     defined(TARGET_M68K) || defined(TARGET_CRIS) || defined(TARGET_MICROBLAZE) \
8708     || defined(TARGET_S390X)
8709         {
8710             abi_ulong *v;
8711             abi_ulong v1, v2, v3, v4, v5, v6;
8712             if (!(v = lock_user(VERIFY_READ, arg1, 6 * sizeof(abi_ulong), 1)))
8713                 return -TARGET_EFAULT;
8714             v1 = tswapal(v[0]);
8715             v2 = tswapal(v[1]);
8716             v3 = tswapal(v[2]);
8717             v4 = tswapal(v[3]);
8718             v5 = tswapal(v[4]);
8719             v6 = tswapal(v[5]);
8720             unlock_user(v, arg1, 0);
8721             ret = get_errno(target_mmap(v1, v2, v3,
8722                                         target_to_host_bitmask(v4, mmap_flags_tbl),
8723                                         v5, v6));
8724         }
8725 #else
8726         ret = get_errno(target_mmap(arg1, arg2, arg3,
8727                                     target_to_host_bitmask(arg4, mmap_flags_tbl),
8728                                     arg5,
8729                                     arg6));
8730 #endif
8731         return ret;
8732 #endif
8733 #ifdef TARGET_NR_mmap2
8734     case TARGET_NR_mmap2:
8735 #ifndef MMAP_SHIFT
8736 #define MMAP_SHIFT 12
8737 #endif
8738         ret = target_mmap(arg1, arg2, arg3,
8739                           target_to_host_bitmask(arg4, mmap_flags_tbl),
8740                           arg5, arg6 << MMAP_SHIFT);
8741         return get_errno(ret);
8742 #endif
8743     case TARGET_NR_munmap:
8744         return get_errno(target_munmap(arg1, arg2));
8745     case TARGET_NR_mprotect:
8746         {
8747             TaskState *ts = cpu->opaque;
8748             /* Special hack to detect libc making the stack executable.  */
8749             if ((arg3 & PROT_GROWSDOWN)
8750                 && arg1 >= ts->info->stack_limit
8751                 && arg1 <= ts->info->start_stack) {
8752                 arg3 &= ~PROT_GROWSDOWN;
8753                 arg2 = arg2 + arg1 - ts->info->stack_limit;
8754                 arg1 = ts->info->stack_limit;
8755             }
8756         }
8757         return get_errno(target_mprotect(arg1, arg2, arg3));
8758 #ifdef TARGET_NR_mremap
8759     case TARGET_NR_mremap:
8760         return get_errno(target_mremap(arg1, arg2, arg3, arg4, arg5));
8761 #endif
8762         /* ??? msync/mlock/munlock are broken for softmmu.  */
8763 #ifdef TARGET_NR_msync
8764     case TARGET_NR_msync:
8765         return get_errno(msync(g2h(arg1), arg2, arg3));
8766 #endif
8767 #ifdef TARGET_NR_mlock
8768     case TARGET_NR_mlock:
8769         return get_errno(mlock(g2h(arg1), arg2));
8770 #endif
8771 #ifdef TARGET_NR_munlock
8772     case TARGET_NR_munlock:
8773         return get_errno(munlock(g2h(arg1), arg2));
8774 #endif
8775 #ifdef TARGET_NR_mlockall
8776     case TARGET_NR_mlockall:
8777         return get_errno(mlockall(target_to_host_mlockall_arg(arg1)));
8778 #endif
8779 #ifdef TARGET_NR_munlockall
8780     case TARGET_NR_munlockall:
8781         return get_errno(munlockall());
8782 #endif
8783 #ifdef TARGET_NR_truncate
8784     case TARGET_NR_truncate:
8785         if (!(p = lock_user_string(arg1)))
8786             return -TARGET_EFAULT;
8787         ret = get_errno(truncate(p, arg2));
8788         unlock_user(p, arg1, 0);
8789         return ret;
8790 #endif
8791 #ifdef TARGET_NR_ftruncate
8792     case TARGET_NR_ftruncate:
8793         return get_errno(ftruncate(arg1, arg2));
8794 #endif
8795     case TARGET_NR_fchmod:
8796         return get_errno(fchmod(arg1, arg2));
8797 #if defined(TARGET_NR_fchmodat)
8798     case TARGET_NR_fchmodat:
8799         if (!(p = lock_user_string(arg2)))
8800             return -TARGET_EFAULT;
8801         ret = get_errno(fchmodat(arg1, p, arg3, 0));
8802         unlock_user(p, arg2, 0);
8803         return ret;
8804 #endif
8805     case TARGET_NR_getpriority:
8806         /* Note that negative values are valid for getpriority, so we must
8807            differentiate based on errno settings.  */
8808         errno = 0;
8809         ret = getpriority(arg1, arg2);
8810         if (ret == -1 && errno != 0) {
8811             return -host_to_target_errno(errno);
8812         }
8813 #ifdef TARGET_ALPHA
8814         /* Return value is the unbiased priority.  Signal no error.  */
8815         ((CPUAlphaState *)cpu_env)->ir[IR_V0] = 0;
8816 #else
8817         /* Return value is a biased priority to avoid negative numbers.  */
8818         ret = 20 - ret;
8819 #endif
8820         return ret;
8821     case TARGET_NR_setpriority:
8822         return get_errno(setpriority(arg1, arg2, arg3));
8823 #ifdef TARGET_NR_statfs
8824     case TARGET_NR_statfs:
8825         if (!(p = lock_user_string(arg1))) {
8826             return -TARGET_EFAULT;
8827         }
8828         ret = get_errno(statfs(path(p), &stfs));
8829         unlock_user(p, arg1, 0);
8830     convert_statfs:
8831         if (!is_error(ret)) {
8832             struct target_statfs *target_stfs;
8833 
8834             if (!lock_user_struct(VERIFY_WRITE, target_stfs, arg2, 0))
8835                 return -TARGET_EFAULT;
8836             __put_user(stfs.f_type, &target_stfs->f_type);
8837             __put_user(stfs.f_bsize, &target_stfs->f_bsize);
8838             __put_user(stfs.f_blocks, &target_stfs->f_blocks);
8839             __put_user(stfs.f_bfree, &target_stfs->f_bfree);
8840             __put_user(stfs.f_bavail, &target_stfs->f_bavail);
8841             __put_user(stfs.f_files, &target_stfs->f_files);
8842             __put_user(stfs.f_ffree, &target_stfs->f_ffree);
8843             __put_user(stfs.f_fsid.__val[0], &target_stfs->f_fsid.val[0]);
8844             __put_user(stfs.f_fsid.__val[1], &target_stfs->f_fsid.val[1]);
8845             __put_user(stfs.f_namelen, &target_stfs->f_namelen);
8846             __put_user(stfs.f_frsize, &target_stfs->f_frsize);
8847 #ifdef _STATFS_F_FLAGS
8848             __put_user(stfs.f_flags, &target_stfs->f_flags);
8849 #else
8850             __put_user(0, &target_stfs->f_flags);
8851 #endif
8852             memset(target_stfs->f_spare, 0, sizeof(target_stfs->f_spare));
8853             unlock_user_struct(target_stfs, arg2, 1);
8854         }
8855         return ret;
8856 #endif
8857 #ifdef TARGET_NR_fstatfs
8858     case TARGET_NR_fstatfs:
8859         ret = get_errno(fstatfs(arg1, &stfs));
8860         goto convert_statfs;
8861 #endif
8862 #ifdef TARGET_NR_statfs64
8863     case TARGET_NR_statfs64:
8864         if (!(p = lock_user_string(arg1))) {
8865             return -TARGET_EFAULT;
8866         }
8867         ret = get_errno(statfs(path(p), &stfs));
8868         unlock_user(p, arg1, 0);
8869     convert_statfs64:
8870         if (!is_error(ret)) {
8871             struct target_statfs64 *target_stfs;
8872 
8873             if (!lock_user_struct(VERIFY_WRITE, target_stfs, arg3, 0))
8874                 return -TARGET_EFAULT;
8875             __put_user(stfs.f_type, &target_stfs->f_type);
8876             __put_user(stfs.f_bsize, &target_stfs->f_bsize);
8877             __put_user(stfs.f_blocks, &target_stfs->f_blocks);
8878             __put_user(stfs.f_bfree, &target_stfs->f_bfree);
8879             __put_user(stfs.f_bavail, &target_stfs->f_bavail);
8880             __put_user(stfs.f_files, &target_stfs->f_files);
8881             __put_user(stfs.f_ffree, &target_stfs->f_ffree);
8882             __put_user(stfs.f_fsid.__val[0], &target_stfs->f_fsid.val[0]);
8883             __put_user(stfs.f_fsid.__val[1], &target_stfs->f_fsid.val[1]);
8884             __put_user(stfs.f_namelen, &target_stfs->f_namelen);
8885             __put_user(stfs.f_frsize, &target_stfs->f_frsize);
8886             memset(target_stfs->f_spare, 0, sizeof(target_stfs->f_spare));
8887             unlock_user_struct(target_stfs, arg3, 1);
8888         }
8889         return ret;
8890     case TARGET_NR_fstatfs64:
8891         ret = get_errno(fstatfs(arg1, &stfs));
8892         goto convert_statfs64;
8893 #endif
8894 #ifdef TARGET_NR_socketcall
8895     case TARGET_NR_socketcall:
8896         return do_socketcall(arg1, arg2);
8897 #endif
8898 #ifdef TARGET_NR_accept
8899     case TARGET_NR_accept:
8900         return do_accept4(arg1, arg2, arg3, 0);
8901 #endif
8902 #ifdef TARGET_NR_accept4
8903     case TARGET_NR_accept4:
8904         return do_accept4(arg1, arg2, arg3, arg4);
8905 #endif
8906 #ifdef TARGET_NR_bind
8907     case TARGET_NR_bind:
8908         return do_bind(arg1, arg2, arg3);
8909 #endif
8910 #ifdef TARGET_NR_connect
8911     case TARGET_NR_connect:
8912         return do_connect(arg1, arg2, arg3);
8913 #endif
8914 #ifdef TARGET_NR_getpeername
8915     case TARGET_NR_getpeername:
8916         return do_getpeername(arg1, arg2, arg3);
8917 #endif
8918 #ifdef TARGET_NR_getsockname
8919     case TARGET_NR_getsockname:
8920         return do_getsockname(arg1, arg2, arg3);
8921 #endif
8922 #ifdef TARGET_NR_getsockopt
8923     case TARGET_NR_getsockopt:
8924         return do_getsockopt(arg1, arg2, arg3, arg4, arg5);
8925 #endif
8926 #ifdef TARGET_NR_listen
8927     case TARGET_NR_listen:
8928         return get_errno(listen(arg1, arg2));
8929 #endif
8930 #ifdef TARGET_NR_recv
8931     case TARGET_NR_recv:
8932         return do_recvfrom(arg1, arg2, arg3, arg4, 0, 0);
8933 #endif
8934 #ifdef TARGET_NR_recvfrom
8935     case TARGET_NR_recvfrom:
8936         return do_recvfrom(arg1, arg2, arg3, arg4, arg5, arg6);
8937 #endif
8938 #ifdef TARGET_NR_recvmsg
8939     case TARGET_NR_recvmsg:
8940         return do_sendrecvmsg(arg1, arg2, arg3, 0);
8941 #endif
8942 #ifdef TARGET_NR_send
8943     case TARGET_NR_send:
8944         return do_sendto(arg1, arg2, arg3, arg4, 0, 0);
8945 #endif
8946 #ifdef TARGET_NR_sendmsg
8947     case TARGET_NR_sendmsg:
8948         return do_sendrecvmsg(arg1, arg2, arg3, 1);
8949 #endif
8950 #ifdef TARGET_NR_sendmmsg
8951     case TARGET_NR_sendmmsg:
8952         return do_sendrecvmmsg(arg1, arg2, arg3, arg4, 1);
8953     case TARGET_NR_recvmmsg:
8954         return do_sendrecvmmsg(arg1, arg2, arg3, arg4, 0);
8955 #endif
8956 #ifdef TARGET_NR_sendto
8957     case TARGET_NR_sendto:
8958         return do_sendto(arg1, arg2, arg3, arg4, arg5, arg6);
8959 #endif
8960 #ifdef TARGET_NR_shutdown
8961     case TARGET_NR_shutdown:
8962         return get_errno(shutdown(arg1, arg2));
8963 #endif
8964 #if defined(TARGET_NR_getrandom) && defined(__NR_getrandom)
8965     case TARGET_NR_getrandom:
8966         p = lock_user(VERIFY_WRITE, arg1, arg2, 0);
8967         if (!p) {
8968             return -TARGET_EFAULT;
8969         }
8970         ret = get_errno(getrandom(p, arg2, arg3));
8971         unlock_user(p, arg1, ret);
8972         return ret;
8973 #endif
8974 #ifdef TARGET_NR_socket
8975     case TARGET_NR_socket:
8976         return do_socket(arg1, arg2, arg3);
8977 #endif
8978 #ifdef TARGET_NR_socketpair
8979     case TARGET_NR_socketpair:
8980         return do_socketpair(arg1, arg2, arg3, arg4);
8981 #endif
8982 #ifdef TARGET_NR_setsockopt
8983     case TARGET_NR_setsockopt:
8984         return do_setsockopt(arg1, arg2, arg3, arg4, (socklen_t) arg5);
8985 #endif
8986 #if defined(TARGET_NR_syslog)
8987     case TARGET_NR_syslog:
8988         {
8989             int len = arg2;
8990 
8991             switch (arg1) {
8992             case TARGET_SYSLOG_ACTION_CLOSE:         /* Close log */
8993             case TARGET_SYSLOG_ACTION_OPEN:          /* Open log */
8994             case TARGET_SYSLOG_ACTION_CLEAR:         /* Clear ring buffer */
8995             case TARGET_SYSLOG_ACTION_CONSOLE_OFF:   /* Disable logging */
8996             case TARGET_SYSLOG_ACTION_CONSOLE_ON:    /* Enable logging */
8997             case TARGET_SYSLOG_ACTION_CONSOLE_LEVEL: /* Set messages level */
8998             case TARGET_SYSLOG_ACTION_SIZE_UNREAD:   /* Number of chars */
8999             case TARGET_SYSLOG_ACTION_SIZE_BUFFER:   /* Size of the buffer */
9000                 return get_errno(sys_syslog((int)arg1, NULL, (int)arg3));
9001             case TARGET_SYSLOG_ACTION_READ:          /* Read from log */
9002             case TARGET_SYSLOG_ACTION_READ_CLEAR:    /* Read/clear msgs */
9003             case TARGET_SYSLOG_ACTION_READ_ALL:      /* Read last messages */
9004                 {
9005                     if (len < 0) {
9006                         return -TARGET_EINVAL;
9007                     }
9008                     if (len == 0) {
9009                         return 0;
9010                     }
9011                     p = lock_user(VERIFY_WRITE, arg2, arg3, 0);
9012                     if (!p) {
9013                         return -TARGET_EFAULT;
9014                     }
9015                     ret = get_errno(sys_syslog((int)arg1, p, (int)arg3));
9016                     unlock_user(p, arg2, arg3);
9017                 }
9018                 return ret;
9019             default:
9020                 return -TARGET_EINVAL;
9021             }
9022         }
9023         break;
9024 #endif
9025     case TARGET_NR_setitimer:
9026         {
9027             struct itimerval value, ovalue, *pvalue;
9028 
9029             if (arg2) {
9030                 pvalue = &value;
9031                 if (copy_from_user_timeval(&pvalue->it_interval, arg2)
9032                     || copy_from_user_timeval(&pvalue->it_value,
9033                                               arg2 + sizeof(struct target_timeval)))
9034                     return -TARGET_EFAULT;
9035             } else {
9036                 pvalue = NULL;
9037             }
9038             ret = get_errno(setitimer(arg1, pvalue, &ovalue));
9039             if (!is_error(ret) && arg3) {
9040                 if (copy_to_user_timeval(arg3,
9041                                          &ovalue.it_interval)
9042                     || copy_to_user_timeval(arg3 + sizeof(struct target_timeval),
9043                                             &ovalue.it_value))
9044                     return -TARGET_EFAULT;
9045             }
9046         }
9047         return ret;
9048     case TARGET_NR_getitimer:
9049         {
9050             struct itimerval value;
9051 
9052             ret = get_errno(getitimer(arg1, &value));
9053             if (!is_error(ret) && arg2) {
9054                 if (copy_to_user_timeval(arg2,
9055                                          &value.it_interval)
9056                     || copy_to_user_timeval(arg2 + sizeof(struct target_timeval),
9057                                             &value.it_value))
9058                     return -TARGET_EFAULT;
9059             }
9060         }
9061         return ret;
9062 #ifdef TARGET_NR_stat
9063     case TARGET_NR_stat:
9064         if (!(p = lock_user_string(arg1))) {
9065             return -TARGET_EFAULT;
9066         }
9067         ret = get_errno(stat(path(p), &st));
9068         unlock_user(p, arg1, 0);
9069         goto do_stat;
9070 #endif
9071 #ifdef TARGET_NR_lstat
9072     case TARGET_NR_lstat:
9073         if (!(p = lock_user_string(arg1))) {
9074             return -TARGET_EFAULT;
9075         }
9076         ret = get_errno(lstat(path(p), &st));
9077         unlock_user(p, arg1, 0);
9078         goto do_stat;
9079 #endif
9080 #ifdef TARGET_NR_fstat
9081     case TARGET_NR_fstat:
9082         {
9083             ret = get_errno(fstat(arg1, &st));
9084 #if defined(TARGET_NR_stat) || defined(TARGET_NR_lstat)
9085         do_stat:
9086 #endif
9087             if (!is_error(ret)) {
9088                 struct target_stat *target_st;
9089 
9090                 if (!lock_user_struct(VERIFY_WRITE, target_st, arg2, 0))
9091                     return -TARGET_EFAULT;
9092                 memset(target_st, 0, sizeof(*target_st));
9093                 __put_user(st.st_dev, &target_st->st_dev);
9094                 __put_user(st.st_ino, &target_st->st_ino);
9095                 __put_user(st.st_mode, &target_st->st_mode);
9096                 __put_user(st.st_uid, &target_st->st_uid);
9097                 __put_user(st.st_gid, &target_st->st_gid);
9098                 __put_user(st.st_nlink, &target_st->st_nlink);
9099                 __put_user(st.st_rdev, &target_st->st_rdev);
9100                 __put_user(st.st_size, &target_st->st_size);
9101                 __put_user(st.st_blksize, &target_st->st_blksize);
9102                 __put_user(st.st_blocks, &target_st->st_blocks);
9103                 __put_user(st.st_atime, &target_st->target_st_atime);
9104                 __put_user(st.st_mtime, &target_st->target_st_mtime);
9105                 __put_user(st.st_ctime, &target_st->target_st_ctime);
9106 #if (_POSIX_C_SOURCE >= 200809L || _XOPEN_SOURCE >= 700) && \
9107     defined(TARGET_STAT_HAVE_NSEC)
9108                 __put_user(st.st_atim.tv_nsec,
9109                            &target_st->target_st_atime_nsec);
9110                 __put_user(st.st_mtim.tv_nsec,
9111                            &target_st->target_st_mtime_nsec);
9112                 __put_user(st.st_ctim.tv_nsec,
9113                            &target_st->target_st_ctime_nsec);
9114 #endif
9115                 unlock_user_struct(target_st, arg2, 1);
9116             }
9117         }
9118         return ret;
9119 #endif
9120     case TARGET_NR_vhangup:
9121         return get_errno(vhangup());
9122 #ifdef TARGET_NR_syscall
9123     case TARGET_NR_syscall:
9124         return do_syscall(cpu_env, arg1 & 0xffff, arg2, arg3, arg4, arg5,
9125                           arg6, arg7, arg8, 0);
9126 #endif
9127     case TARGET_NR_wait4:
9128         {
9129             int status;
9130             abi_long status_ptr = arg2;
9131             struct rusage rusage, *rusage_ptr;
9132             abi_ulong target_rusage = arg4;
9133             abi_long rusage_err;
9134             if (target_rusage)
9135                 rusage_ptr = &rusage;
9136             else
9137                 rusage_ptr = NULL;
9138             ret = get_errno(safe_wait4(arg1, &status, arg3, rusage_ptr));
9139             if (!is_error(ret)) {
9140                 if (status_ptr && ret) {
9141                     status = host_to_target_waitstatus(status);
9142                     if (put_user_s32(status, status_ptr))
9143                         return -TARGET_EFAULT;
9144                 }
9145                 if (target_rusage) {
9146                     rusage_err = host_to_target_rusage(target_rusage, &rusage);
9147                     if (rusage_err) {
9148                         ret = rusage_err;
9149                     }
9150                 }
9151             }
9152         }
9153         return ret;
9154 #ifdef TARGET_NR_swapoff
9155     case TARGET_NR_swapoff:
9156         if (!(p = lock_user_string(arg1)))
9157             return -TARGET_EFAULT;
9158         ret = get_errno(swapoff(p));
9159         unlock_user(p, arg1, 0);
9160         return ret;
9161 #endif
9162     case TARGET_NR_sysinfo:
9163         {
9164             struct target_sysinfo *target_value;
9165             struct sysinfo value;
9166             ret = get_errno(sysinfo(&value));
9167             if (!is_error(ret) && arg1)
9168             {
9169                 if (!lock_user_struct(VERIFY_WRITE, target_value, arg1, 0))
9170                     return -TARGET_EFAULT;
9171                 __put_user(value.uptime, &target_value->uptime);
9172                 __put_user(value.loads[0], &target_value->loads[0]);
9173                 __put_user(value.loads[1], &target_value->loads[1]);
9174                 __put_user(value.loads[2], &target_value->loads[2]);
9175                 __put_user(value.totalram, &target_value->totalram);
9176                 __put_user(value.freeram, &target_value->freeram);
9177                 __put_user(value.sharedram, &target_value->sharedram);
9178                 __put_user(value.bufferram, &target_value->bufferram);
9179                 __put_user(value.totalswap, &target_value->totalswap);
9180                 __put_user(value.freeswap, &target_value->freeswap);
9181                 __put_user(value.procs, &target_value->procs);
9182                 __put_user(value.totalhigh, &target_value->totalhigh);
9183                 __put_user(value.freehigh, &target_value->freehigh);
9184                 __put_user(value.mem_unit, &target_value->mem_unit);
9185                 unlock_user_struct(target_value, arg1, 1);
9186             }
9187         }
9188         return ret;
9189 #ifdef TARGET_NR_ipc
9190     case TARGET_NR_ipc:
9191         return do_ipc(cpu_env, arg1, arg2, arg3, arg4, arg5, arg6);
9192 #endif
9193 #ifdef TARGET_NR_semget
9194     case TARGET_NR_semget:
9195         return get_errno(semget(arg1, arg2, arg3));
9196 #endif
9197 #ifdef TARGET_NR_semop
9198     case TARGET_NR_semop:
9199         return do_semop(arg1, arg2, arg3);
9200 #endif
9201 #ifdef TARGET_NR_semctl
9202     case TARGET_NR_semctl:
9203         return do_semctl(arg1, arg2, arg3, arg4);
9204 #endif
9205 #ifdef TARGET_NR_msgctl
9206     case TARGET_NR_msgctl:
9207         return do_msgctl(arg1, arg2, arg3);
9208 #endif
9209 #ifdef TARGET_NR_msgget
9210     case TARGET_NR_msgget:
9211         return get_errno(msgget(arg1, arg2));
9212 #endif
9213 #ifdef TARGET_NR_msgrcv
9214     case TARGET_NR_msgrcv:
9215         return do_msgrcv(arg1, arg2, arg3, arg4, arg5);
9216 #endif
9217 #ifdef TARGET_NR_msgsnd
9218     case TARGET_NR_msgsnd:
9219         return do_msgsnd(arg1, arg2, arg3, arg4);
9220 #endif
9221 #ifdef TARGET_NR_shmget
9222     case TARGET_NR_shmget:
9223         return get_errno(shmget(arg1, arg2, arg3));
9224 #endif
9225 #ifdef TARGET_NR_shmctl
9226     case TARGET_NR_shmctl:
9227         return do_shmctl(arg1, arg2, arg3);
9228 #endif
9229 #ifdef TARGET_NR_shmat
9230     case TARGET_NR_shmat:
9231         return do_shmat(cpu_env, arg1, arg2, arg3);
9232 #endif
9233 #ifdef TARGET_NR_shmdt
9234     case TARGET_NR_shmdt:
9235         return do_shmdt(arg1);
9236 #endif
9237     case TARGET_NR_fsync:
9238         return get_errno(fsync(arg1));
9239     case TARGET_NR_clone:
9240         /* Linux manages to have three different orderings for its
9241          * arguments to clone(); the BACKWARDS and BACKWARDS2 defines
9242          * match the kernel's CONFIG_CLONE_* settings.
9243          * Microblaze is further special in that it uses a sixth
9244          * implicit argument to clone for the TLS pointer.
9245          */
9246 #if defined(TARGET_MICROBLAZE)
9247         ret = get_errno(do_fork(cpu_env, arg1, arg2, arg4, arg6, arg5));
9248 #elif defined(TARGET_CLONE_BACKWARDS)
9249         ret = get_errno(do_fork(cpu_env, arg1, arg2, arg3, arg4, arg5));
9250 #elif defined(TARGET_CLONE_BACKWARDS2)
9251         ret = get_errno(do_fork(cpu_env, arg2, arg1, arg3, arg5, arg4));
9252 #else
9253         ret = get_errno(do_fork(cpu_env, arg1, arg2, arg3, arg5, arg4));
9254 #endif
9255         return ret;
9256 #ifdef __NR_exit_group
9257         /* new thread calls */
9258     case TARGET_NR_exit_group:
9259         preexit_cleanup(cpu_env, arg1);
9260         return get_errno(exit_group(arg1));
9261 #endif
9262     case TARGET_NR_setdomainname:
9263         if (!(p = lock_user_string(arg1)))
9264             return -TARGET_EFAULT;
9265         ret = get_errno(setdomainname(p, arg2));
9266         unlock_user(p, arg1, 0);
9267         return ret;
9268     case TARGET_NR_uname:
9269         /* no need to transcode because we use the linux syscall */
9270         {
9271             struct new_utsname * buf;
9272 
9273             if (!lock_user_struct(VERIFY_WRITE, buf, arg1, 0))
9274                 return -TARGET_EFAULT;
9275             ret = get_errno(sys_uname(buf));
9276             if (!is_error(ret)) {
9277                 /* Overwrite the native machine name with whatever is being
9278                    emulated. */
9279                 g_strlcpy(buf->machine, cpu_to_uname_machine(cpu_env),
9280                           sizeof(buf->machine));
9281                 /* Allow the user to override the reported release.  */
9282                 if (qemu_uname_release && *qemu_uname_release) {
9283                     g_strlcpy(buf->release, qemu_uname_release,
9284                               sizeof(buf->release));
9285                 }
9286             }
9287             unlock_user_struct(buf, arg1, 1);
9288         }
9289         return ret;
9290 #ifdef TARGET_I386
9291     case TARGET_NR_modify_ldt:
9292         return do_modify_ldt(cpu_env, arg1, arg2, arg3);
9293 #if !defined(TARGET_X86_64)
9294     case TARGET_NR_vm86:
9295         return do_vm86(cpu_env, arg1, arg2);
9296 #endif
9297 #endif
9298     case TARGET_NR_adjtimex:
9299         {
9300             struct timex host_buf;
9301 
9302             if (target_to_host_timex(&host_buf, arg1) != 0) {
9303                 return -TARGET_EFAULT;
9304             }
9305             ret = get_errno(adjtimex(&host_buf));
9306             if (!is_error(ret)) {
9307                 if (host_to_target_timex(arg1, &host_buf) != 0) {
9308                     return -TARGET_EFAULT;
9309                 }
9310             }
9311         }
9312         return ret;
9313 #if defined(TARGET_NR_clock_adjtime) && defined(CONFIG_CLOCK_ADJTIME)
9314     case TARGET_NR_clock_adjtime:
9315         {
9316             struct timex htx, *phtx = &htx;
9317 
9318             if (target_to_host_timex(phtx, arg2) != 0) {
9319                 return -TARGET_EFAULT;
9320             }
9321             ret = get_errno(clock_adjtime(arg1, phtx));
9322             if (!is_error(ret) && phtx) {
9323                 if (host_to_target_timex(arg2, phtx) != 0) {
9324                     return -TARGET_EFAULT;
9325                 }
9326             }
9327         }
9328         return ret;
9329 #endif
9330     case TARGET_NR_getpgid:
9331         return get_errno(getpgid(arg1));
9332     case TARGET_NR_fchdir:
9333         return get_errno(fchdir(arg1));
9334     case TARGET_NR_personality:
9335         return get_errno(personality(arg1));
9336 #ifdef TARGET_NR__llseek /* Not on alpha */
9337     case TARGET_NR__llseek:
9338         {
9339             int64_t res;
9340 #if !defined(__NR_llseek)
9341             res = lseek(arg1, ((uint64_t)arg2 << 32) | (abi_ulong)arg3, arg5);
9342             if (res == -1) {
9343                 ret = get_errno(res);
9344             } else {
9345                 ret = 0;
9346             }
9347 #else
9348             ret = get_errno(_llseek(arg1, arg2, arg3, &res, arg5));
9349 #endif
9350             if ((ret == 0) && put_user_s64(res, arg4)) {
9351                 return -TARGET_EFAULT;
9352             }
9353         }
9354         return ret;
9355 #endif
9356 #ifdef TARGET_NR_getdents
9357     case TARGET_NR_getdents:
9358 #ifdef EMULATE_GETDENTS_WITH_GETDENTS
9359 #if TARGET_ABI_BITS == 32 && HOST_LONG_BITS == 64
9360         {
9361             struct target_dirent *target_dirp;
9362             struct linux_dirent *dirp;
9363             abi_long count = arg3;
9364 
9365             dirp = g_try_malloc(count);
9366             if (!dirp) {
9367                 return -TARGET_ENOMEM;
9368             }
9369 
9370             ret = get_errno(sys_getdents(arg1, dirp, count));
9371             if (!is_error(ret)) {
9372                 struct linux_dirent *de;
9373 		struct target_dirent *tde;
9374                 int len = ret;
9375                 int reclen, treclen;
9376 		int count1, tnamelen;
9377 
9378 		count1 = 0;
9379                 de = dirp;
9380                 if (!(target_dirp = lock_user(VERIFY_WRITE, arg2, count, 0)))
9381                     return -TARGET_EFAULT;
9382 		tde = target_dirp;
9383                 while (len > 0) {
9384                     reclen = de->d_reclen;
9385                     tnamelen = reclen - offsetof(struct linux_dirent, d_name);
9386                     assert(tnamelen >= 0);
9387                     treclen = tnamelen + offsetof(struct target_dirent, d_name);
9388                     assert(count1 + treclen <= count);
9389                     tde->d_reclen = tswap16(treclen);
9390                     tde->d_ino = tswapal(de->d_ino);
9391                     tde->d_off = tswapal(de->d_off);
9392                     memcpy(tde->d_name, de->d_name, tnamelen);
9393                     de = (struct linux_dirent *)((char *)de + reclen);
9394                     len -= reclen;
9395                     tde = (struct target_dirent *)((char *)tde + treclen);
9396 		    count1 += treclen;
9397                 }
9398 		ret = count1;
9399                 unlock_user(target_dirp, arg2, ret);
9400             }
9401             g_free(dirp);
9402         }
9403 #else
9404         {
9405             struct linux_dirent *dirp;
9406             abi_long count = arg3;
9407 
9408             if (!(dirp = lock_user(VERIFY_WRITE, arg2, count, 0)))
9409                 return -TARGET_EFAULT;
9410             ret = get_errno(sys_getdents(arg1, dirp, count));
9411             if (!is_error(ret)) {
9412                 struct linux_dirent *de;
9413                 int len = ret;
9414                 int reclen;
9415                 de = dirp;
9416                 while (len > 0) {
9417                     reclen = de->d_reclen;
9418                     if (reclen > len)
9419                         break;
9420                     de->d_reclen = tswap16(reclen);
9421                     tswapls(&de->d_ino);
9422                     tswapls(&de->d_off);
9423                     de = (struct linux_dirent *)((char *)de + reclen);
9424                     len -= reclen;
9425                 }
9426             }
9427             unlock_user(dirp, arg2, ret);
9428         }
9429 #endif
9430 #else
9431         /* Implement getdents in terms of getdents64 */
9432         {
9433             struct linux_dirent64 *dirp;
9434             abi_long count = arg3;
9435 
9436             dirp = lock_user(VERIFY_WRITE, arg2, count, 0);
9437             if (!dirp) {
9438                 return -TARGET_EFAULT;
9439             }
9440             ret = get_errno(sys_getdents64(arg1, dirp, count));
9441             if (!is_error(ret)) {
9442                 /* Convert the dirent64 structs to target dirent.  We do this
9443                  * in-place, since we can guarantee that a target_dirent is no
9444                  * larger than a dirent64; however this means we have to be
9445                  * careful to read everything before writing in the new format.
9446                  */
9447                 struct linux_dirent64 *de;
9448                 struct target_dirent *tde;
9449                 int len = ret;
9450                 int tlen = 0;
9451 
9452                 de = dirp;
9453                 tde = (struct target_dirent *)dirp;
9454                 while (len > 0) {
9455                     int namelen, treclen;
9456                     int reclen = de->d_reclen;
9457                     uint64_t ino = de->d_ino;
9458                     int64_t off = de->d_off;
9459                     uint8_t type = de->d_type;
9460 
9461                     namelen = strlen(de->d_name);
9462                     treclen = offsetof(struct target_dirent, d_name)
9463                         + namelen + 2;
9464                     treclen = QEMU_ALIGN_UP(treclen, sizeof(abi_long));
9465 
9466                     memmove(tde->d_name, de->d_name, namelen + 1);
9467                     tde->d_ino = tswapal(ino);
9468                     tde->d_off = tswapal(off);
9469                     tde->d_reclen = tswap16(treclen);
9470                     /* The target_dirent type is in what was formerly a padding
9471                      * byte at the end of the structure:
9472                      */
9473                     *(((char *)tde) + treclen - 1) = type;
9474 
9475                     de = (struct linux_dirent64 *)((char *)de + reclen);
9476                     tde = (struct target_dirent *)((char *)tde + treclen);
9477                     len -= reclen;
9478                     tlen += treclen;
9479                 }
9480                 ret = tlen;
9481             }
9482             unlock_user(dirp, arg2, ret);
9483         }
9484 #endif
9485         return ret;
9486 #endif /* TARGET_NR_getdents */
9487 #if defined(TARGET_NR_getdents64) && defined(__NR_getdents64)
9488     case TARGET_NR_getdents64:
9489         {
9490             struct linux_dirent64 *dirp;
9491             abi_long count = arg3;
9492             if (!(dirp = lock_user(VERIFY_WRITE, arg2, count, 0)))
9493                 return -TARGET_EFAULT;
9494             ret = get_errno(sys_getdents64(arg1, dirp, count));
9495             if (!is_error(ret)) {
9496                 struct linux_dirent64 *de;
9497                 int len = ret;
9498                 int reclen;
9499                 de = dirp;
9500                 while (len > 0) {
9501                     reclen = de->d_reclen;
9502                     if (reclen > len)
9503                         break;
9504                     de->d_reclen = tswap16(reclen);
9505                     tswap64s((uint64_t *)&de->d_ino);
9506                     tswap64s((uint64_t *)&de->d_off);
9507                     de = (struct linux_dirent64 *)((char *)de + reclen);
9508                     len -= reclen;
9509                 }
9510             }
9511             unlock_user(dirp, arg2, ret);
9512         }
9513         return ret;
9514 #endif /* TARGET_NR_getdents64 */
9515 #if defined(TARGET_NR__newselect)
9516     case TARGET_NR__newselect:
9517         return do_select(arg1, arg2, arg3, arg4, arg5);
9518 #endif
9519 #if defined(TARGET_NR_poll) || defined(TARGET_NR_ppoll)
9520 # ifdef TARGET_NR_poll
9521     case TARGET_NR_poll:
9522 # endif
9523 # ifdef TARGET_NR_ppoll
9524     case TARGET_NR_ppoll:
9525 # endif
9526         {
9527             struct target_pollfd *target_pfd;
9528             unsigned int nfds = arg2;
9529             struct pollfd *pfd;
9530             unsigned int i;
9531 
9532             pfd = NULL;
9533             target_pfd = NULL;
9534             if (nfds) {
9535                 if (nfds > (INT_MAX / sizeof(struct target_pollfd))) {
9536                     return -TARGET_EINVAL;
9537                 }
9538 
9539                 target_pfd = lock_user(VERIFY_WRITE, arg1,
9540                                        sizeof(struct target_pollfd) * nfds, 1);
9541                 if (!target_pfd) {
9542                     return -TARGET_EFAULT;
9543                 }
9544 
9545                 pfd = alloca(sizeof(struct pollfd) * nfds);
9546                 for (i = 0; i < nfds; i++) {
9547                     pfd[i].fd = tswap32(target_pfd[i].fd);
9548                     pfd[i].events = tswap16(target_pfd[i].events);
9549                 }
9550             }
9551 
9552             switch (num) {
9553 # ifdef TARGET_NR_ppoll
9554             case TARGET_NR_ppoll:
9555             {
9556                 struct timespec _timeout_ts, *timeout_ts = &_timeout_ts;
9557                 target_sigset_t *target_set;
9558                 sigset_t _set, *set = &_set;
9559 
9560                 if (arg3) {
9561                     if (target_to_host_timespec(timeout_ts, arg3)) {
9562                         unlock_user(target_pfd, arg1, 0);
9563                         return -TARGET_EFAULT;
9564                     }
9565                 } else {
9566                     timeout_ts = NULL;
9567                 }
9568 
9569                 if (arg4) {
9570                     if (arg5 != sizeof(target_sigset_t)) {
9571                         unlock_user(target_pfd, arg1, 0);
9572                         return -TARGET_EINVAL;
9573                     }
9574 
9575                     target_set = lock_user(VERIFY_READ, arg4, sizeof(target_sigset_t), 1);
9576                     if (!target_set) {
9577                         unlock_user(target_pfd, arg1, 0);
9578                         return -TARGET_EFAULT;
9579                     }
9580                     target_to_host_sigset(set, target_set);
9581                 } else {
9582                     set = NULL;
9583                 }
9584 
9585                 ret = get_errno(safe_ppoll(pfd, nfds, timeout_ts,
9586                                            set, SIGSET_T_SIZE));
9587 
9588                 if (!is_error(ret) && arg3) {
9589                     host_to_target_timespec(arg3, timeout_ts);
9590                 }
9591                 if (arg4) {
9592                     unlock_user(target_set, arg4, 0);
9593                 }
9594                 break;
9595             }
9596 # endif
9597 # ifdef TARGET_NR_poll
9598             case TARGET_NR_poll:
9599             {
9600                 struct timespec ts, *pts;
9601 
9602                 if (arg3 >= 0) {
9603                     /* Convert ms to secs, ns */
9604                     ts.tv_sec = arg3 / 1000;
9605                     ts.tv_nsec = (arg3 % 1000) * 1000000LL;
9606                     pts = &ts;
9607                 } else {
9608                     /* -ve poll() timeout means "infinite" */
9609                     pts = NULL;
9610                 }
9611                 ret = get_errno(safe_ppoll(pfd, nfds, pts, NULL, 0));
9612                 break;
9613             }
9614 # endif
9615             default:
9616                 g_assert_not_reached();
9617             }
9618 
9619             if (!is_error(ret)) {
9620                 for(i = 0; i < nfds; i++) {
9621                     target_pfd[i].revents = tswap16(pfd[i].revents);
9622                 }
9623             }
9624             unlock_user(target_pfd, arg1, sizeof(struct target_pollfd) * nfds);
9625         }
9626         return ret;
9627 #endif
9628     case TARGET_NR_flock:
9629         /* NOTE: the flock constant seems to be the same for every
9630            Linux platform */
9631         return get_errno(safe_flock(arg1, arg2));
9632     case TARGET_NR_readv:
9633         {
9634             struct iovec *vec = lock_iovec(VERIFY_WRITE, arg2, arg3, 0);
9635             if (vec != NULL) {
9636                 ret = get_errno(safe_readv(arg1, vec, arg3));
9637                 unlock_iovec(vec, arg2, arg3, 1);
9638             } else {
9639                 ret = -host_to_target_errno(errno);
9640             }
9641         }
9642         return ret;
9643     case TARGET_NR_writev:
9644         {
9645             struct iovec *vec = lock_iovec(VERIFY_READ, arg2, arg3, 1);
9646             if (vec != NULL) {
9647                 ret = get_errno(safe_writev(arg1, vec, arg3));
9648                 unlock_iovec(vec, arg2, arg3, 0);
9649             } else {
9650                 ret = -host_to_target_errno(errno);
9651             }
9652         }
9653         return ret;
9654 #if defined(TARGET_NR_preadv)
9655     case TARGET_NR_preadv:
9656         {
9657             struct iovec *vec = lock_iovec(VERIFY_WRITE, arg2, arg3, 0);
9658             if (vec != NULL) {
9659                 unsigned long low, high;
9660 
9661                 target_to_host_low_high(arg4, arg5, &low, &high);
9662                 ret = get_errno(safe_preadv(arg1, vec, arg3, low, high));
9663                 unlock_iovec(vec, arg2, arg3, 1);
9664             } else {
9665                 ret = -host_to_target_errno(errno);
9666            }
9667         }
9668         return ret;
9669 #endif
9670 #if defined(TARGET_NR_pwritev)
9671     case TARGET_NR_pwritev:
9672         {
9673             struct iovec *vec = lock_iovec(VERIFY_READ, arg2, arg3, 1);
9674             if (vec != NULL) {
9675                 unsigned long low, high;
9676 
9677                 target_to_host_low_high(arg4, arg5, &low, &high);
9678                 ret = get_errno(safe_pwritev(arg1, vec, arg3, low, high));
9679                 unlock_iovec(vec, arg2, arg3, 0);
9680             } else {
9681                 ret = -host_to_target_errno(errno);
9682            }
9683         }
9684         return ret;
9685 #endif
9686     case TARGET_NR_getsid:
9687         return get_errno(getsid(arg1));
9688 #if defined(TARGET_NR_fdatasync) /* Not on alpha (osf_datasync ?) */
9689     case TARGET_NR_fdatasync:
9690         return get_errno(fdatasync(arg1));
9691 #endif
9692 #ifdef TARGET_NR__sysctl
9693     case TARGET_NR__sysctl:
9694         /* We don't implement this, but ENOTDIR is always a safe
9695            return value. */
9696         return -TARGET_ENOTDIR;
9697 #endif
9698     case TARGET_NR_sched_getaffinity:
9699         {
9700             unsigned int mask_size;
9701             unsigned long *mask;
9702 
9703             /*
9704              * sched_getaffinity needs multiples of ulong, so need to take
9705              * care of mismatches between target ulong and host ulong sizes.
9706              */
9707             if (arg2 & (sizeof(abi_ulong) - 1)) {
9708                 return -TARGET_EINVAL;
9709             }
9710             mask_size = (arg2 + (sizeof(*mask) - 1)) & ~(sizeof(*mask) - 1);
9711 
9712             mask = alloca(mask_size);
9713             memset(mask, 0, mask_size);
9714             ret = get_errno(sys_sched_getaffinity(arg1, mask_size, mask));
9715 
9716             if (!is_error(ret)) {
9717                 if (ret > arg2) {
9718                     /* More data returned than the caller's buffer will fit.
9719                      * This only happens if sizeof(abi_long) < sizeof(long)
9720                      * and the caller passed us a buffer holding an odd number
9721                      * of abi_longs. If the host kernel is actually using the
9722                      * extra 4 bytes then fail EINVAL; otherwise we can just
9723                      * ignore them and only copy the interesting part.
9724                      */
9725                     int numcpus = sysconf(_SC_NPROCESSORS_CONF);
9726                     if (numcpus > arg2 * 8) {
9727                         return -TARGET_EINVAL;
9728                     }
9729                     ret = arg2;
9730                 }
9731 
9732                 if (host_to_target_cpu_mask(mask, mask_size, arg3, ret)) {
9733                     return -TARGET_EFAULT;
9734                 }
9735             }
9736         }
9737         return ret;
9738     case TARGET_NR_sched_setaffinity:
9739         {
9740             unsigned int mask_size;
9741             unsigned long *mask;
9742 
9743             /*
9744              * sched_setaffinity needs multiples of ulong, so need to take
9745              * care of mismatches between target ulong and host ulong sizes.
9746              */
9747             if (arg2 & (sizeof(abi_ulong) - 1)) {
9748                 return -TARGET_EINVAL;
9749             }
9750             mask_size = (arg2 + (sizeof(*mask) - 1)) & ~(sizeof(*mask) - 1);
9751             mask = alloca(mask_size);
9752 
9753             ret = target_to_host_cpu_mask(mask, mask_size, arg3, arg2);
9754             if (ret) {
9755                 return ret;
9756             }
9757 
9758             return get_errno(sys_sched_setaffinity(arg1, mask_size, mask));
9759         }
9760     case TARGET_NR_getcpu:
9761         {
9762             unsigned cpu, node;
9763             ret = get_errno(sys_getcpu(arg1 ? &cpu : NULL,
9764                                        arg2 ? &node : NULL,
9765                                        NULL));
9766             if (is_error(ret)) {
9767                 return ret;
9768             }
9769             if (arg1 && put_user_u32(cpu, arg1)) {
9770                 return -TARGET_EFAULT;
9771             }
9772             if (arg2 && put_user_u32(node, arg2)) {
9773                 return -TARGET_EFAULT;
9774             }
9775         }
9776         return ret;
9777     case TARGET_NR_sched_setparam:
9778         {
9779             struct sched_param *target_schp;
9780             struct sched_param schp;
9781 
9782             if (arg2 == 0) {
9783                 return -TARGET_EINVAL;
9784             }
9785             if (!lock_user_struct(VERIFY_READ, target_schp, arg2, 1))
9786                 return -TARGET_EFAULT;
9787             schp.sched_priority = tswap32(target_schp->sched_priority);
9788             unlock_user_struct(target_schp, arg2, 0);
9789             return get_errno(sched_setparam(arg1, &schp));
9790         }
9791     case TARGET_NR_sched_getparam:
9792         {
9793             struct sched_param *target_schp;
9794             struct sched_param schp;
9795 
9796             if (arg2 == 0) {
9797                 return -TARGET_EINVAL;
9798             }
9799             ret = get_errno(sched_getparam(arg1, &schp));
9800             if (!is_error(ret)) {
9801                 if (!lock_user_struct(VERIFY_WRITE, target_schp, arg2, 0))
9802                     return -TARGET_EFAULT;
9803                 target_schp->sched_priority = tswap32(schp.sched_priority);
9804                 unlock_user_struct(target_schp, arg2, 1);
9805             }
9806         }
9807         return ret;
9808     case TARGET_NR_sched_setscheduler:
9809         {
9810             struct sched_param *target_schp;
9811             struct sched_param schp;
9812             if (arg3 == 0) {
9813                 return -TARGET_EINVAL;
9814             }
9815             if (!lock_user_struct(VERIFY_READ, target_schp, arg3, 1))
9816                 return -TARGET_EFAULT;
9817             schp.sched_priority = tswap32(target_schp->sched_priority);
9818             unlock_user_struct(target_schp, arg3, 0);
9819             return get_errno(sched_setscheduler(arg1, arg2, &schp));
9820         }
9821     case TARGET_NR_sched_getscheduler:
9822         return get_errno(sched_getscheduler(arg1));
9823     case TARGET_NR_sched_yield:
9824         return get_errno(sched_yield());
9825     case TARGET_NR_sched_get_priority_max:
9826         return get_errno(sched_get_priority_max(arg1));
9827     case TARGET_NR_sched_get_priority_min:
9828         return get_errno(sched_get_priority_min(arg1));
9829     case TARGET_NR_sched_rr_get_interval:
9830         {
9831             struct timespec ts;
9832             ret = get_errno(sched_rr_get_interval(arg1, &ts));
9833             if (!is_error(ret)) {
9834                 ret = host_to_target_timespec(arg2, &ts);
9835             }
9836         }
9837         return ret;
9838     case TARGET_NR_nanosleep:
9839         {
9840             struct timespec req, rem;
9841             target_to_host_timespec(&req, arg1);
9842             ret = get_errno(safe_nanosleep(&req, &rem));
9843             if (is_error(ret) && arg2) {
9844                 host_to_target_timespec(arg2, &rem);
9845             }
9846         }
9847         return ret;
9848     case TARGET_NR_prctl:
9849         switch (arg1) {
9850         case PR_GET_PDEATHSIG:
9851         {
9852             int deathsig;
9853             ret = get_errno(prctl(arg1, &deathsig, arg3, arg4, arg5));
9854             if (!is_error(ret) && arg2
9855                 && put_user_ual(deathsig, arg2)) {
9856                 return -TARGET_EFAULT;
9857             }
9858             return ret;
9859         }
9860 #ifdef PR_GET_NAME
9861         case PR_GET_NAME:
9862         {
9863             void *name = lock_user(VERIFY_WRITE, arg2, 16, 1);
9864             if (!name) {
9865                 return -TARGET_EFAULT;
9866             }
9867             ret = get_errno(prctl(arg1, (unsigned long)name,
9868                                   arg3, arg4, arg5));
9869             unlock_user(name, arg2, 16);
9870             return ret;
9871         }
9872         case PR_SET_NAME:
9873         {
9874             void *name = lock_user(VERIFY_READ, arg2, 16, 1);
9875             if (!name) {
9876                 return -TARGET_EFAULT;
9877             }
9878             ret = get_errno(prctl(arg1, (unsigned long)name,
9879                                   arg3, arg4, arg5));
9880             unlock_user(name, arg2, 0);
9881             return ret;
9882         }
9883 #endif
9884 #ifdef TARGET_MIPS
9885         case TARGET_PR_GET_FP_MODE:
9886         {
9887             CPUMIPSState *env = ((CPUMIPSState *)cpu_env);
9888             ret = 0;
9889             if (env->CP0_Status & (1 << CP0St_FR)) {
9890                 ret |= TARGET_PR_FP_MODE_FR;
9891             }
9892             if (env->CP0_Config5 & (1 << CP0C5_FRE)) {
9893                 ret |= TARGET_PR_FP_MODE_FRE;
9894             }
9895             return ret;
9896         }
9897         case TARGET_PR_SET_FP_MODE:
9898         {
9899             CPUMIPSState *env = ((CPUMIPSState *)cpu_env);
9900             bool old_fr = env->CP0_Status & (1 << CP0St_FR);
9901             bool old_fre = env->CP0_Config5 & (1 << CP0C5_FRE);
9902             bool new_fr = arg2 & TARGET_PR_FP_MODE_FR;
9903             bool new_fre = arg2 & TARGET_PR_FP_MODE_FRE;
9904 
9905             const unsigned int known_bits = TARGET_PR_FP_MODE_FR |
9906                                             TARGET_PR_FP_MODE_FRE;
9907 
9908             /* If nothing to change, return right away, successfully.  */
9909             if (old_fr == new_fr && old_fre == new_fre) {
9910                 return 0;
9911             }
9912             /* Check the value is valid */
9913             if (arg2 & ~known_bits) {
9914                 return -TARGET_EOPNOTSUPP;
9915             }
9916             /* Setting FRE without FR is not supported.  */
9917             if (new_fre && !new_fr) {
9918                 return -TARGET_EOPNOTSUPP;
9919             }
9920             if (new_fr && !(env->active_fpu.fcr0 & (1 << FCR0_F64))) {
9921                 /* FR1 is not supported */
9922                 return -TARGET_EOPNOTSUPP;
9923             }
9924             if (!new_fr && (env->active_fpu.fcr0 & (1 << FCR0_F64))
9925                 && !(env->CP0_Status_rw_bitmask & (1 << CP0St_FR))) {
9926                 /* cannot set FR=0 */
9927                 return -TARGET_EOPNOTSUPP;
9928             }
9929             if (new_fre && !(env->active_fpu.fcr0 & (1 << FCR0_FREP))) {
9930                 /* Cannot set FRE=1 */
9931                 return -TARGET_EOPNOTSUPP;
9932             }
9933 
9934             int i;
9935             fpr_t *fpr = env->active_fpu.fpr;
9936             for (i = 0; i < 32 ; i += 2) {
9937                 if (!old_fr && new_fr) {
9938                     fpr[i].w[!FP_ENDIAN_IDX] = fpr[i + 1].w[FP_ENDIAN_IDX];
9939                 } else if (old_fr && !new_fr) {
9940                     fpr[i + 1].w[FP_ENDIAN_IDX] = fpr[i].w[!FP_ENDIAN_IDX];
9941                 }
9942             }
9943 
9944             if (new_fr) {
9945                 env->CP0_Status |= (1 << CP0St_FR);
9946                 env->hflags |= MIPS_HFLAG_F64;
9947             } else {
9948                 env->CP0_Status &= ~(1 << CP0St_FR);
9949                 env->hflags &= ~MIPS_HFLAG_F64;
9950             }
9951             if (new_fre) {
9952                 env->CP0_Config5 |= (1 << CP0C5_FRE);
9953                 if (env->active_fpu.fcr0 & (1 << FCR0_FREP)) {
9954                     env->hflags |= MIPS_HFLAG_FRE;
9955                 }
9956             } else {
9957                 env->CP0_Config5 &= ~(1 << CP0C5_FRE);
9958                 env->hflags &= ~MIPS_HFLAG_FRE;
9959             }
9960 
9961             return 0;
9962         }
9963 #endif /* MIPS */
9964 #ifdef TARGET_AARCH64
9965         case TARGET_PR_SVE_SET_VL:
9966             /*
9967              * We cannot support either PR_SVE_SET_VL_ONEXEC or
9968              * PR_SVE_VL_INHERIT.  Note the kernel definition
9969              * of sve_vl_valid allows for VQ=512, i.e. VL=8192,
9970              * even though the current architectural maximum is VQ=16.
9971              */
9972             ret = -TARGET_EINVAL;
9973             if (cpu_isar_feature(aa64_sve, env_archcpu(cpu_env))
9974                 && arg2 >= 0 && arg2 <= 512 * 16 && !(arg2 & 15)) {
9975                 CPUARMState *env = cpu_env;
9976                 ARMCPU *cpu = env_archcpu(env);
9977                 uint32_t vq, old_vq;
9978 
9979                 old_vq = (env->vfp.zcr_el[1] & 0xf) + 1;
9980                 vq = MAX(arg2 / 16, 1);
9981                 vq = MIN(vq, cpu->sve_max_vq);
9982 
9983                 if (vq < old_vq) {
9984                     aarch64_sve_narrow_vq(env, vq);
9985                 }
9986                 env->vfp.zcr_el[1] = vq - 1;
9987                 arm_rebuild_hflags(env);
9988                 ret = vq * 16;
9989             }
9990             return ret;
9991         case TARGET_PR_SVE_GET_VL:
9992             ret = -TARGET_EINVAL;
9993             {
9994                 ARMCPU *cpu = env_archcpu(cpu_env);
9995                 if (cpu_isar_feature(aa64_sve, cpu)) {
9996                     ret = ((cpu->env.vfp.zcr_el[1] & 0xf) + 1) * 16;
9997                 }
9998             }
9999             return ret;
10000         case TARGET_PR_PAC_RESET_KEYS:
10001             {
10002                 CPUARMState *env = cpu_env;
10003                 ARMCPU *cpu = env_archcpu(env);
10004 
10005                 if (arg3 || arg4 || arg5) {
10006                     return -TARGET_EINVAL;
10007                 }
10008                 if (cpu_isar_feature(aa64_pauth, cpu)) {
10009                     int all = (TARGET_PR_PAC_APIAKEY | TARGET_PR_PAC_APIBKEY |
10010                                TARGET_PR_PAC_APDAKEY | TARGET_PR_PAC_APDBKEY |
10011                                TARGET_PR_PAC_APGAKEY);
10012                     int ret = 0;
10013                     Error *err = NULL;
10014 
10015                     if (arg2 == 0) {
10016                         arg2 = all;
10017                     } else if (arg2 & ~all) {
10018                         return -TARGET_EINVAL;
10019                     }
10020                     if (arg2 & TARGET_PR_PAC_APIAKEY) {
10021                         ret |= qemu_guest_getrandom(&env->keys.apia,
10022                                                     sizeof(ARMPACKey), &err);
10023                     }
10024                     if (arg2 & TARGET_PR_PAC_APIBKEY) {
10025                         ret |= qemu_guest_getrandom(&env->keys.apib,
10026                                                     sizeof(ARMPACKey), &err);
10027                     }
10028                     if (arg2 & TARGET_PR_PAC_APDAKEY) {
10029                         ret |= qemu_guest_getrandom(&env->keys.apda,
10030                                                     sizeof(ARMPACKey), &err);
10031                     }
10032                     if (arg2 & TARGET_PR_PAC_APDBKEY) {
10033                         ret |= qemu_guest_getrandom(&env->keys.apdb,
10034                                                     sizeof(ARMPACKey), &err);
10035                     }
10036                     if (arg2 & TARGET_PR_PAC_APGAKEY) {
10037                         ret |= qemu_guest_getrandom(&env->keys.apga,
10038                                                     sizeof(ARMPACKey), &err);
10039                     }
10040                     if (ret != 0) {
10041                         /*
10042                          * Some unknown failure in the crypto.  The best
10043                          * we can do is log it and fail the syscall.
10044                          * The real syscall cannot fail this way.
10045                          */
10046                         qemu_log_mask(LOG_UNIMP,
10047                                       "PR_PAC_RESET_KEYS: Crypto failure: %s",
10048                                       error_get_pretty(err));
10049                         error_free(err);
10050                         return -TARGET_EIO;
10051                     }
10052                     return 0;
10053                 }
10054             }
10055             return -TARGET_EINVAL;
10056 #endif /* AARCH64 */
10057         case PR_GET_SECCOMP:
10058         case PR_SET_SECCOMP:
10059             /* Disable seccomp to prevent the target disabling syscalls we
10060              * need. */
10061             return -TARGET_EINVAL;
10062         default:
10063             /* Most prctl options have no pointer arguments */
10064             return get_errno(prctl(arg1, arg2, arg3, arg4, arg5));
10065         }
10066         break;
10067 #ifdef TARGET_NR_arch_prctl
10068     case TARGET_NR_arch_prctl:
10069 #if defined(TARGET_I386) && !defined(TARGET_ABI32)
10070         return do_arch_prctl(cpu_env, arg1, arg2);
10071 #else
10072 #error unreachable
10073 #endif
10074 #endif
10075 #ifdef TARGET_NR_pread64
10076     case TARGET_NR_pread64:
10077         if (regpairs_aligned(cpu_env, num)) {
10078             arg4 = arg5;
10079             arg5 = arg6;
10080         }
10081         if (arg2 == 0 && arg3 == 0) {
10082             /* Special-case NULL buffer and zero length, which should succeed */
10083             p = 0;
10084         } else {
10085             p = lock_user(VERIFY_WRITE, arg2, arg3, 0);
10086             if (!p) {
10087                 return -TARGET_EFAULT;
10088             }
10089         }
10090         ret = get_errno(pread64(arg1, p, arg3, target_offset64(arg4, arg5)));
10091         unlock_user(p, arg2, ret);
10092         return ret;
10093     case TARGET_NR_pwrite64:
10094         if (regpairs_aligned(cpu_env, num)) {
10095             arg4 = arg5;
10096             arg5 = arg6;
10097         }
10098         if (arg2 == 0 && arg3 == 0) {
10099             /* Special-case NULL buffer and zero length, which should succeed */
10100             p = 0;
10101         } else {
10102             p = lock_user(VERIFY_READ, arg2, arg3, 1);
10103             if (!p) {
10104                 return -TARGET_EFAULT;
10105             }
10106         }
10107         ret = get_errno(pwrite64(arg1, p, arg3, target_offset64(arg4, arg5)));
10108         unlock_user(p, arg2, 0);
10109         return ret;
10110 #endif
10111     case TARGET_NR_getcwd:
10112         if (!(p = lock_user(VERIFY_WRITE, arg1, arg2, 0)))
10113             return -TARGET_EFAULT;
10114         ret = get_errno(sys_getcwd1(p, arg2));
10115         unlock_user(p, arg1, ret);
10116         return ret;
10117     case TARGET_NR_capget:
10118     case TARGET_NR_capset:
10119     {
10120         struct target_user_cap_header *target_header;
10121         struct target_user_cap_data *target_data = NULL;
10122         struct __user_cap_header_struct header;
10123         struct __user_cap_data_struct data[2];
10124         struct __user_cap_data_struct *dataptr = NULL;
10125         int i, target_datalen;
10126         int data_items = 1;
10127 
10128         if (!lock_user_struct(VERIFY_WRITE, target_header, arg1, 1)) {
10129             return -TARGET_EFAULT;
10130         }
10131         header.version = tswap32(target_header->version);
10132         header.pid = tswap32(target_header->pid);
10133 
10134         if (header.version != _LINUX_CAPABILITY_VERSION) {
10135             /* Version 2 and up takes pointer to two user_data structs */
10136             data_items = 2;
10137         }
10138 
10139         target_datalen = sizeof(*target_data) * data_items;
10140 
10141         if (arg2) {
10142             if (num == TARGET_NR_capget) {
10143                 target_data = lock_user(VERIFY_WRITE, arg2, target_datalen, 0);
10144             } else {
10145                 target_data = lock_user(VERIFY_READ, arg2, target_datalen, 1);
10146             }
10147             if (!target_data) {
10148                 unlock_user_struct(target_header, arg1, 0);
10149                 return -TARGET_EFAULT;
10150             }
10151 
10152             if (num == TARGET_NR_capset) {
10153                 for (i = 0; i < data_items; i++) {
10154                     data[i].effective = tswap32(target_data[i].effective);
10155                     data[i].permitted = tswap32(target_data[i].permitted);
10156                     data[i].inheritable = tswap32(target_data[i].inheritable);
10157                 }
10158             }
10159 
10160             dataptr = data;
10161         }
10162 
10163         if (num == TARGET_NR_capget) {
10164             ret = get_errno(capget(&header, dataptr));
10165         } else {
10166             ret = get_errno(capset(&header, dataptr));
10167         }
10168 
10169         /* The kernel always updates version for both capget and capset */
10170         target_header->version = tswap32(header.version);
10171         unlock_user_struct(target_header, arg1, 1);
10172 
10173         if (arg2) {
10174             if (num == TARGET_NR_capget) {
10175                 for (i = 0; i < data_items; i++) {
10176                     target_data[i].effective = tswap32(data[i].effective);
10177                     target_data[i].permitted = tswap32(data[i].permitted);
10178                     target_data[i].inheritable = tswap32(data[i].inheritable);
10179                 }
10180                 unlock_user(target_data, arg2, target_datalen);
10181             } else {
10182                 unlock_user(target_data, arg2, 0);
10183             }
10184         }
10185         return ret;
10186     }
10187     case TARGET_NR_sigaltstack:
10188         return do_sigaltstack(arg1, arg2,
10189                               get_sp_from_cpustate((CPUArchState *)cpu_env));
10190 
10191 #ifdef CONFIG_SENDFILE
10192 #ifdef TARGET_NR_sendfile
10193     case TARGET_NR_sendfile:
10194     {
10195         off_t *offp = NULL;
10196         off_t off;
10197         if (arg3) {
10198             ret = get_user_sal(off, arg3);
10199             if (is_error(ret)) {
10200                 return ret;
10201             }
10202             offp = &off;
10203         }
10204         ret = get_errno(sendfile(arg1, arg2, offp, arg4));
10205         if (!is_error(ret) && arg3) {
10206             abi_long ret2 = put_user_sal(off, arg3);
10207             if (is_error(ret2)) {
10208                 ret = ret2;
10209             }
10210         }
10211         return ret;
10212     }
10213 #endif
10214 #ifdef TARGET_NR_sendfile64
10215     case TARGET_NR_sendfile64:
10216     {
10217         off_t *offp = NULL;
10218         off_t off;
10219         if (arg3) {
10220             ret = get_user_s64(off, arg3);
10221             if (is_error(ret)) {
10222                 return ret;
10223             }
10224             offp = &off;
10225         }
10226         ret = get_errno(sendfile(arg1, arg2, offp, arg4));
10227         if (!is_error(ret) && arg3) {
10228             abi_long ret2 = put_user_s64(off, arg3);
10229             if (is_error(ret2)) {
10230                 ret = ret2;
10231             }
10232         }
10233         return ret;
10234     }
10235 #endif
10236 #endif
10237 #ifdef TARGET_NR_vfork
10238     case TARGET_NR_vfork:
10239         return get_errno(do_fork(cpu_env,
10240                          CLONE_VFORK | CLONE_VM | TARGET_SIGCHLD,
10241                          0, 0, 0, 0));
10242 #endif
10243 #ifdef TARGET_NR_ugetrlimit
10244     case TARGET_NR_ugetrlimit:
10245     {
10246 	struct rlimit rlim;
10247 	int resource = target_to_host_resource(arg1);
10248 	ret = get_errno(getrlimit(resource, &rlim));
10249 	if (!is_error(ret)) {
10250 	    struct target_rlimit *target_rlim;
10251             if (!lock_user_struct(VERIFY_WRITE, target_rlim, arg2, 0))
10252                 return -TARGET_EFAULT;
10253 	    target_rlim->rlim_cur = host_to_target_rlim(rlim.rlim_cur);
10254 	    target_rlim->rlim_max = host_to_target_rlim(rlim.rlim_max);
10255             unlock_user_struct(target_rlim, arg2, 1);
10256 	}
10257         return ret;
10258     }
10259 #endif
10260 #ifdef TARGET_NR_truncate64
10261     case TARGET_NR_truncate64:
10262         if (!(p = lock_user_string(arg1)))
10263             return -TARGET_EFAULT;
10264 	ret = target_truncate64(cpu_env, p, arg2, arg3, arg4);
10265         unlock_user(p, arg1, 0);
10266         return ret;
10267 #endif
10268 #ifdef TARGET_NR_ftruncate64
10269     case TARGET_NR_ftruncate64:
10270         return target_ftruncate64(cpu_env, arg1, arg2, arg3, arg4);
10271 #endif
10272 #ifdef TARGET_NR_stat64
10273     case TARGET_NR_stat64:
10274         if (!(p = lock_user_string(arg1))) {
10275             return -TARGET_EFAULT;
10276         }
10277         ret = get_errno(stat(path(p), &st));
10278         unlock_user(p, arg1, 0);
10279         if (!is_error(ret))
10280             ret = host_to_target_stat64(cpu_env, arg2, &st);
10281         return ret;
10282 #endif
10283 #ifdef TARGET_NR_lstat64
10284     case TARGET_NR_lstat64:
10285         if (!(p = lock_user_string(arg1))) {
10286             return -TARGET_EFAULT;
10287         }
10288         ret = get_errno(lstat(path(p), &st));
10289         unlock_user(p, arg1, 0);
10290         if (!is_error(ret))
10291             ret = host_to_target_stat64(cpu_env, arg2, &st);
10292         return ret;
10293 #endif
10294 #ifdef TARGET_NR_fstat64
10295     case TARGET_NR_fstat64:
10296         ret = get_errno(fstat(arg1, &st));
10297         if (!is_error(ret))
10298             ret = host_to_target_stat64(cpu_env, arg2, &st);
10299         return ret;
10300 #endif
10301 #if (defined(TARGET_NR_fstatat64) || defined(TARGET_NR_newfstatat))
10302 #ifdef TARGET_NR_fstatat64
10303     case TARGET_NR_fstatat64:
10304 #endif
10305 #ifdef TARGET_NR_newfstatat
10306     case TARGET_NR_newfstatat:
10307 #endif
10308         if (!(p = lock_user_string(arg2))) {
10309             return -TARGET_EFAULT;
10310         }
10311         ret = get_errno(fstatat(arg1, path(p), &st, arg4));
10312         unlock_user(p, arg2, 0);
10313         if (!is_error(ret))
10314             ret = host_to_target_stat64(cpu_env, arg3, &st);
10315         return ret;
10316 #endif
10317 #if defined(TARGET_NR_statx)
10318     case TARGET_NR_statx:
10319         {
10320             struct target_statx *target_stx;
10321             int dirfd = arg1;
10322             int flags = arg3;
10323 
10324             p = lock_user_string(arg2);
10325             if (p == NULL) {
10326                 return -TARGET_EFAULT;
10327             }
10328 #if defined(__NR_statx)
10329             {
10330                 /*
10331                  * It is assumed that struct statx is architecture independent.
10332                  */
10333                 struct target_statx host_stx;
10334                 int mask = arg4;
10335 
10336                 ret = get_errno(sys_statx(dirfd, p, flags, mask, &host_stx));
10337                 if (!is_error(ret)) {
10338                     if (host_to_target_statx(&host_stx, arg5) != 0) {
10339                         unlock_user(p, arg2, 0);
10340                         return -TARGET_EFAULT;
10341                     }
10342                 }
10343 
10344                 if (ret != -TARGET_ENOSYS) {
10345                     unlock_user(p, arg2, 0);
10346                     return ret;
10347                 }
10348             }
10349 #endif
10350             ret = get_errno(fstatat(dirfd, path(p), &st, flags));
10351             unlock_user(p, arg2, 0);
10352 
10353             if (!is_error(ret)) {
10354                 if (!lock_user_struct(VERIFY_WRITE, target_stx, arg5, 0)) {
10355                     return -TARGET_EFAULT;
10356                 }
10357                 memset(target_stx, 0, sizeof(*target_stx));
10358                 __put_user(major(st.st_dev), &target_stx->stx_dev_major);
10359                 __put_user(minor(st.st_dev), &target_stx->stx_dev_minor);
10360                 __put_user(st.st_ino, &target_stx->stx_ino);
10361                 __put_user(st.st_mode, &target_stx->stx_mode);
10362                 __put_user(st.st_uid, &target_stx->stx_uid);
10363                 __put_user(st.st_gid, &target_stx->stx_gid);
10364                 __put_user(st.st_nlink, &target_stx->stx_nlink);
10365                 __put_user(major(st.st_rdev), &target_stx->stx_rdev_major);
10366                 __put_user(minor(st.st_rdev), &target_stx->stx_rdev_minor);
10367                 __put_user(st.st_size, &target_stx->stx_size);
10368                 __put_user(st.st_blksize, &target_stx->stx_blksize);
10369                 __put_user(st.st_blocks, &target_stx->stx_blocks);
10370                 __put_user(st.st_atime, &target_stx->stx_atime.tv_sec);
10371                 __put_user(st.st_mtime, &target_stx->stx_mtime.tv_sec);
10372                 __put_user(st.st_ctime, &target_stx->stx_ctime.tv_sec);
10373                 unlock_user_struct(target_stx, arg5, 1);
10374             }
10375         }
10376         return ret;
10377 #endif
10378 #ifdef TARGET_NR_lchown
10379     case TARGET_NR_lchown:
10380         if (!(p = lock_user_string(arg1)))
10381             return -TARGET_EFAULT;
10382         ret = get_errno(lchown(p, low2highuid(arg2), low2highgid(arg3)));
10383         unlock_user(p, arg1, 0);
10384         return ret;
10385 #endif
10386 #ifdef TARGET_NR_getuid
10387     case TARGET_NR_getuid:
10388         return get_errno(high2lowuid(getuid()));
10389 #endif
10390 #ifdef TARGET_NR_getgid
10391     case TARGET_NR_getgid:
10392         return get_errno(high2lowgid(getgid()));
10393 #endif
10394 #ifdef TARGET_NR_geteuid
10395     case TARGET_NR_geteuid:
10396         return get_errno(high2lowuid(geteuid()));
10397 #endif
10398 #ifdef TARGET_NR_getegid
10399     case TARGET_NR_getegid:
10400         return get_errno(high2lowgid(getegid()));
10401 #endif
10402     case TARGET_NR_setreuid:
10403         return get_errno(setreuid(low2highuid(arg1), low2highuid(arg2)));
10404     case TARGET_NR_setregid:
10405         return get_errno(setregid(low2highgid(arg1), low2highgid(arg2)));
10406     case TARGET_NR_getgroups:
10407         {
10408             int gidsetsize = arg1;
10409             target_id *target_grouplist;
10410             gid_t *grouplist;
10411             int i;
10412 
10413             grouplist = alloca(gidsetsize * sizeof(gid_t));
10414             ret = get_errno(getgroups(gidsetsize, grouplist));
10415             if (gidsetsize == 0)
10416                 return ret;
10417             if (!is_error(ret)) {
10418                 target_grouplist = lock_user(VERIFY_WRITE, arg2, gidsetsize * sizeof(target_id), 0);
10419                 if (!target_grouplist)
10420                     return -TARGET_EFAULT;
10421                 for(i = 0;i < ret; i++)
10422                     target_grouplist[i] = tswapid(high2lowgid(grouplist[i]));
10423                 unlock_user(target_grouplist, arg2, gidsetsize * sizeof(target_id));
10424             }
10425         }
10426         return ret;
10427     case TARGET_NR_setgroups:
10428         {
10429             int gidsetsize = arg1;
10430             target_id *target_grouplist;
10431             gid_t *grouplist = NULL;
10432             int i;
10433             if (gidsetsize) {
10434                 grouplist = alloca(gidsetsize * sizeof(gid_t));
10435                 target_grouplist = lock_user(VERIFY_READ, arg2, gidsetsize * sizeof(target_id), 1);
10436                 if (!target_grouplist) {
10437                     return -TARGET_EFAULT;
10438                 }
10439                 for (i = 0; i < gidsetsize; i++) {
10440                     grouplist[i] = low2highgid(tswapid(target_grouplist[i]));
10441                 }
10442                 unlock_user(target_grouplist, arg2, 0);
10443             }
10444             return get_errno(setgroups(gidsetsize, grouplist));
10445         }
10446     case TARGET_NR_fchown:
10447         return get_errno(fchown(arg1, low2highuid(arg2), low2highgid(arg3)));
10448 #if defined(TARGET_NR_fchownat)
10449     case TARGET_NR_fchownat:
10450         if (!(p = lock_user_string(arg2)))
10451             return -TARGET_EFAULT;
10452         ret = get_errno(fchownat(arg1, p, low2highuid(arg3),
10453                                  low2highgid(arg4), arg5));
10454         unlock_user(p, arg2, 0);
10455         return ret;
10456 #endif
10457 #ifdef TARGET_NR_setresuid
10458     case TARGET_NR_setresuid:
10459         return get_errno(sys_setresuid(low2highuid(arg1),
10460                                        low2highuid(arg2),
10461                                        low2highuid(arg3)));
10462 #endif
10463 #ifdef TARGET_NR_getresuid
10464     case TARGET_NR_getresuid:
10465         {
10466             uid_t ruid, euid, suid;
10467             ret = get_errno(getresuid(&ruid, &euid, &suid));
10468             if (!is_error(ret)) {
10469                 if (put_user_id(high2lowuid(ruid), arg1)
10470                     || put_user_id(high2lowuid(euid), arg2)
10471                     || put_user_id(high2lowuid(suid), arg3))
10472                     return -TARGET_EFAULT;
10473             }
10474         }
10475         return ret;
10476 #endif
10477 #ifdef TARGET_NR_getresgid
10478     case TARGET_NR_setresgid:
10479         return get_errno(sys_setresgid(low2highgid(arg1),
10480                                        low2highgid(arg2),
10481                                        low2highgid(arg3)));
10482 #endif
10483 #ifdef TARGET_NR_getresgid
10484     case TARGET_NR_getresgid:
10485         {
10486             gid_t rgid, egid, sgid;
10487             ret = get_errno(getresgid(&rgid, &egid, &sgid));
10488             if (!is_error(ret)) {
10489                 if (put_user_id(high2lowgid(rgid), arg1)
10490                     || put_user_id(high2lowgid(egid), arg2)
10491                     || put_user_id(high2lowgid(sgid), arg3))
10492                     return -TARGET_EFAULT;
10493             }
10494         }
10495         return ret;
10496 #endif
10497 #ifdef TARGET_NR_chown
10498     case TARGET_NR_chown:
10499         if (!(p = lock_user_string(arg1)))
10500             return -TARGET_EFAULT;
10501         ret = get_errno(chown(p, low2highuid(arg2), low2highgid(arg3)));
10502         unlock_user(p, arg1, 0);
10503         return ret;
10504 #endif
10505     case TARGET_NR_setuid:
10506         return get_errno(sys_setuid(low2highuid(arg1)));
10507     case TARGET_NR_setgid:
10508         return get_errno(sys_setgid(low2highgid(arg1)));
10509     case TARGET_NR_setfsuid:
10510         return get_errno(setfsuid(arg1));
10511     case TARGET_NR_setfsgid:
10512         return get_errno(setfsgid(arg1));
10513 
10514 #ifdef TARGET_NR_lchown32
10515     case TARGET_NR_lchown32:
10516         if (!(p = lock_user_string(arg1)))
10517             return -TARGET_EFAULT;
10518         ret = get_errno(lchown(p, arg2, arg3));
10519         unlock_user(p, arg1, 0);
10520         return ret;
10521 #endif
10522 #ifdef TARGET_NR_getuid32
10523     case TARGET_NR_getuid32:
10524         return get_errno(getuid());
10525 #endif
10526 
10527 #if defined(TARGET_NR_getxuid) && defined(TARGET_ALPHA)
10528    /* Alpha specific */
10529     case TARGET_NR_getxuid:
10530          {
10531             uid_t euid;
10532             euid=geteuid();
10533             ((CPUAlphaState *)cpu_env)->ir[IR_A4]=euid;
10534          }
10535         return get_errno(getuid());
10536 #endif
10537 #if defined(TARGET_NR_getxgid) && defined(TARGET_ALPHA)
10538    /* Alpha specific */
10539     case TARGET_NR_getxgid:
10540          {
10541             uid_t egid;
10542             egid=getegid();
10543             ((CPUAlphaState *)cpu_env)->ir[IR_A4]=egid;
10544          }
10545         return get_errno(getgid());
10546 #endif
10547 #if defined(TARGET_NR_osf_getsysinfo) && defined(TARGET_ALPHA)
10548     /* Alpha specific */
10549     case TARGET_NR_osf_getsysinfo:
10550         ret = -TARGET_EOPNOTSUPP;
10551         switch (arg1) {
10552           case TARGET_GSI_IEEE_FP_CONTROL:
10553             {
10554                 uint64_t fpcr = cpu_alpha_load_fpcr(cpu_env);
10555                 uint64_t swcr = ((CPUAlphaState *)cpu_env)->swcr;
10556 
10557                 swcr &= ~SWCR_STATUS_MASK;
10558                 swcr |= (fpcr >> 35) & SWCR_STATUS_MASK;
10559 
10560                 if (put_user_u64 (swcr, arg2))
10561                         return -TARGET_EFAULT;
10562                 ret = 0;
10563             }
10564             break;
10565 
10566           /* case GSI_IEEE_STATE_AT_SIGNAL:
10567              -- Not implemented in linux kernel.
10568              case GSI_UACPROC:
10569              -- Retrieves current unaligned access state; not much used.
10570              case GSI_PROC_TYPE:
10571              -- Retrieves implver information; surely not used.
10572              case GSI_GET_HWRPB:
10573              -- Grabs a copy of the HWRPB; surely not used.
10574           */
10575         }
10576         return ret;
10577 #endif
10578 #if defined(TARGET_NR_osf_setsysinfo) && defined(TARGET_ALPHA)
10579     /* Alpha specific */
10580     case TARGET_NR_osf_setsysinfo:
10581         ret = -TARGET_EOPNOTSUPP;
10582         switch (arg1) {
10583           case TARGET_SSI_IEEE_FP_CONTROL:
10584             {
10585                 uint64_t swcr, fpcr;
10586 
10587                 if (get_user_u64 (swcr, arg2)) {
10588                     return -TARGET_EFAULT;
10589                 }
10590 
10591                 /*
10592                  * The kernel calls swcr_update_status to update the
10593                  * status bits from the fpcr at every point that it
10594                  * could be queried.  Therefore, we store the status
10595                  * bits only in FPCR.
10596                  */
10597                 ((CPUAlphaState *)cpu_env)->swcr
10598                     = swcr & (SWCR_TRAP_ENABLE_MASK | SWCR_MAP_MASK);
10599 
10600                 fpcr = cpu_alpha_load_fpcr(cpu_env);
10601                 fpcr &= ((uint64_t)FPCR_DYN_MASK << 32);
10602                 fpcr |= alpha_ieee_swcr_to_fpcr(swcr);
10603                 cpu_alpha_store_fpcr(cpu_env, fpcr);
10604                 ret = 0;
10605             }
10606             break;
10607 
10608           case TARGET_SSI_IEEE_RAISE_EXCEPTION:
10609             {
10610                 uint64_t exc, fpcr, fex;
10611 
10612                 if (get_user_u64(exc, arg2)) {
10613                     return -TARGET_EFAULT;
10614                 }
10615                 exc &= SWCR_STATUS_MASK;
10616                 fpcr = cpu_alpha_load_fpcr(cpu_env);
10617 
10618                 /* Old exceptions are not signaled.  */
10619                 fex = alpha_ieee_fpcr_to_swcr(fpcr);
10620                 fex = exc & ~fex;
10621                 fex >>= SWCR_STATUS_TO_EXCSUM_SHIFT;
10622                 fex &= ((CPUArchState *)cpu_env)->swcr;
10623 
10624                 /* Update the hardware fpcr.  */
10625                 fpcr |= alpha_ieee_swcr_to_fpcr(exc);
10626                 cpu_alpha_store_fpcr(cpu_env, fpcr);
10627 
10628                 if (fex) {
10629                     int si_code = TARGET_FPE_FLTUNK;
10630                     target_siginfo_t info;
10631 
10632                     if (fex & SWCR_TRAP_ENABLE_DNO) {
10633                         si_code = TARGET_FPE_FLTUND;
10634                     }
10635                     if (fex & SWCR_TRAP_ENABLE_INE) {
10636                         si_code = TARGET_FPE_FLTRES;
10637                     }
10638                     if (fex & SWCR_TRAP_ENABLE_UNF) {
10639                         si_code = TARGET_FPE_FLTUND;
10640                     }
10641                     if (fex & SWCR_TRAP_ENABLE_OVF) {
10642                         si_code = TARGET_FPE_FLTOVF;
10643                     }
10644                     if (fex & SWCR_TRAP_ENABLE_DZE) {
10645                         si_code = TARGET_FPE_FLTDIV;
10646                     }
10647                     if (fex & SWCR_TRAP_ENABLE_INV) {
10648                         si_code = TARGET_FPE_FLTINV;
10649                     }
10650 
10651                     info.si_signo = SIGFPE;
10652                     info.si_errno = 0;
10653                     info.si_code = si_code;
10654                     info._sifields._sigfault._addr
10655                         = ((CPUArchState *)cpu_env)->pc;
10656                     queue_signal((CPUArchState *)cpu_env, info.si_signo,
10657                                  QEMU_SI_FAULT, &info);
10658                 }
10659                 ret = 0;
10660             }
10661             break;
10662 
10663           /* case SSI_NVPAIRS:
10664              -- Used with SSIN_UACPROC to enable unaligned accesses.
10665              case SSI_IEEE_STATE_AT_SIGNAL:
10666              case SSI_IEEE_IGNORE_STATE_AT_SIGNAL:
10667              -- Not implemented in linux kernel
10668           */
10669         }
10670         return ret;
10671 #endif
10672 #ifdef TARGET_NR_osf_sigprocmask
10673     /* Alpha specific.  */
10674     case TARGET_NR_osf_sigprocmask:
10675         {
10676             abi_ulong mask;
10677             int how;
10678             sigset_t set, oldset;
10679 
10680             switch(arg1) {
10681             case TARGET_SIG_BLOCK:
10682                 how = SIG_BLOCK;
10683                 break;
10684             case TARGET_SIG_UNBLOCK:
10685                 how = SIG_UNBLOCK;
10686                 break;
10687             case TARGET_SIG_SETMASK:
10688                 how = SIG_SETMASK;
10689                 break;
10690             default:
10691                 return -TARGET_EINVAL;
10692             }
10693             mask = arg2;
10694             target_to_host_old_sigset(&set, &mask);
10695             ret = do_sigprocmask(how, &set, &oldset);
10696             if (!ret) {
10697                 host_to_target_old_sigset(&mask, &oldset);
10698                 ret = mask;
10699             }
10700         }
10701         return ret;
10702 #endif
10703 
10704 #ifdef TARGET_NR_getgid32
10705     case TARGET_NR_getgid32:
10706         return get_errno(getgid());
10707 #endif
10708 #ifdef TARGET_NR_geteuid32
10709     case TARGET_NR_geteuid32:
10710         return get_errno(geteuid());
10711 #endif
10712 #ifdef TARGET_NR_getegid32
10713     case TARGET_NR_getegid32:
10714         return get_errno(getegid());
10715 #endif
10716 #ifdef TARGET_NR_setreuid32
10717     case TARGET_NR_setreuid32:
10718         return get_errno(setreuid(arg1, arg2));
10719 #endif
10720 #ifdef TARGET_NR_setregid32
10721     case TARGET_NR_setregid32:
10722         return get_errno(setregid(arg1, arg2));
10723 #endif
10724 #ifdef TARGET_NR_getgroups32
10725     case TARGET_NR_getgroups32:
10726         {
10727             int gidsetsize = arg1;
10728             uint32_t *target_grouplist;
10729             gid_t *grouplist;
10730             int i;
10731 
10732             grouplist = alloca(gidsetsize * sizeof(gid_t));
10733             ret = get_errno(getgroups(gidsetsize, grouplist));
10734             if (gidsetsize == 0)
10735                 return ret;
10736             if (!is_error(ret)) {
10737                 target_grouplist = lock_user(VERIFY_WRITE, arg2, gidsetsize * 4, 0);
10738                 if (!target_grouplist) {
10739                     return -TARGET_EFAULT;
10740                 }
10741                 for(i = 0;i < ret; i++)
10742                     target_grouplist[i] = tswap32(grouplist[i]);
10743                 unlock_user(target_grouplist, arg2, gidsetsize * 4);
10744             }
10745         }
10746         return ret;
10747 #endif
10748 #ifdef TARGET_NR_setgroups32
10749     case TARGET_NR_setgroups32:
10750         {
10751             int gidsetsize = arg1;
10752             uint32_t *target_grouplist;
10753             gid_t *grouplist;
10754             int i;
10755 
10756             grouplist = alloca(gidsetsize * sizeof(gid_t));
10757             target_grouplist = lock_user(VERIFY_READ, arg2, gidsetsize * 4, 1);
10758             if (!target_grouplist) {
10759                 return -TARGET_EFAULT;
10760             }
10761             for(i = 0;i < gidsetsize; i++)
10762                 grouplist[i] = tswap32(target_grouplist[i]);
10763             unlock_user(target_grouplist, arg2, 0);
10764             return get_errno(setgroups(gidsetsize, grouplist));
10765         }
10766 #endif
10767 #ifdef TARGET_NR_fchown32
10768     case TARGET_NR_fchown32:
10769         return get_errno(fchown(arg1, arg2, arg3));
10770 #endif
10771 #ifdef TARGET_NR_setresuid32
10772     case TARGET_NR_setresuid32:
10773         return get_errno(sys_setresuid(arg1, arg2, arg3));
10774 #endif
10775 #ifdef TARGET_NR_getresuid32
10776     case TARGET_NR_getresuid32:
10777         {
10778             uid_t ruid, euid, suid;
10779             ret = get_errno(getresuid(&ruid, &euid, &suid));
10780             if (!is_error(ret)) {
10781                 if (put_user_u32(ruid, arg1)
10782                     || put_user_u32(euid, arg2)
10783                     || put_user_u32(suid, arg3))
10784                     return -TARGET_EFAULT;
10785             }
10786         }
10787         return ret;
10788 #endif
10789 #ifdef TARGET_NR_setresgid32
10790     case TARGET_NR_setresgid32:
10791         return get_errno(sys_setresgid(arg1, arg2, arg3));
10792 #endif
10793 #ifdef TARGET_NR_getresgid32
10794     case TARGET_NR_getresgid32:
10795         {
10796             gid_t rgid, egid, sgid;
10797             ret = get_errno(getresgid(&rgid, &egid, &sgid));
10798             if (!is_error(ret)) {
10799                 if (put_user_u32(rgid, arg1)
10800                     || put_user_u32(egid, arg2)
10801                     || put_user_u32(sgid, arg3))
10802                     return -TARGET_EFAULT;
10803             }
10804         }
10805         return ret;
10806 #endif
10807 #ifdef TARGET_NR_chown32
10808     case TARGET_NR_chown32:
10809         if (!(p = lock_user_string(arg1)))
10810             return -TARGET_EFAULT;
10811         ret = get_errno(chown(p, arg2, arg3));
10812         unlock_user(p, arg1, 0);
10813         return ret;
10814 #endif
10815 #ifdef TARGET_NR_setuid32
10816     case TARGET_NR_setuid32:
10817         return get_errno(sys_setuid(arg1));
10818 #endif
10819 #ifdef TARGET_NR_setgid32
10820     case TARGET_NR_setgid32:
10821         return get_errno(sys_setgid(arg1));
10822 #endif
10823 #ifdef TARGET_NR_setfsuid32
10824     case TARGET_NR_setfsuid32:
10825         return get_errno(setfsuid(arg1));
10826 #endif
10827 #ifdef TARGET_NR_setfsgid32
10828     case TARGET_NR_setfsgid32:
10829         return get_errno(setfsgid(arg1));
10830 #endif
10831 #ifdef TARGET_NR_mincore
10832     case TARGET_NR_mincore:
10833         {
10834             void *a = lock_user(VERIFY_READ, arg1, arg2, 0);
10835             if (!a) {
10836                 return -TARGET_ENOMEM;
10837             }
10838             p = lock_user_string(arg3);
10839             if (!p) {
10840                 ret = -TARGET_EFAULT;
10841             } else {
10842                 ret = get_errno(mincore(a, arg2, p));
10843                 unlock_user(p, arg3, ret);
10844             }
10845             unlock_user(a, arg1, 0);
10846         }
10847         return ret;
10848 #endif
10849 #ifdef TARGET_NR_arm_fadvise64_64
10850     case TARGET_NR_arm_fadvise64_64:
10851         /* arm_fadvise64_64 looks like fadvise64_64 but
10852          * with different argument order: fd, advice, offset, len
10853          * rather than the usual fd, offset, len, advice.
10854          * Note that offset and len are both 64-bit so appear as
10855          * pairs of 32-bit registers.
10856          */
10857         ret = posix_fadvise(arg1, target_offset64(arg3, arg4),
10858                             target_offset64(arg5, arg6), arg2);
10859         return -host_to_target_errno(ret);
10860 #endif
10861 
10862 #if TARGET_ABI_BITS == 32
10863 
10864 #ifdef TARGET_NR_fadvise64_64
10865     case TARGET_NR_fadvise64_64:
10866 #if defined(TARGET_PPC) || defined(TARGET_XTENSA)
10867         /* 6 args: fd, advice, offset (high, low), len (high, low) */
10868         ret = arg2;
10869         arg2 = arg3;
10870         arg3 = arg4;
10871         arg4 = arg5;
10872         arg5 = arg6;
10873         arg6 = ret;
10874 #else
10875         /* 6 args: fd, offset (high, low), len (high, low), advice */
10876         if (regpairs_aligned(cpu_env, num)) {
10877             /* offset is in (3,4), len in (5,6) and advice in 7 */
10878             arg2 = arg3;
10879             arg3 = arg4;
10880             arg4 = arg5;
10881             arg5 = arg6;
10882             arg6 = arg7;
10883         }
10884 #endif
10885         ret = posix_fadvise(arg1, target_offset64(arg2, arg3),
10886                             target_offset64(arg4, arg5), arg6);
10887         return -host_to_target_errno(ret);
10888 #endif
10889 
10890 #ifdef TARGET_NR_fadvise64
10891     case TARGET_NR_fadvise64:
10892         /* 5 args: fd, offset (high, low), len, advice */
10893         if (regpairs_aligned(cpu_env, num)) {
10894             /* offset is in (3,4), len in 5 and advice in 6 */
10895             arg2 = arg3;
10896             arg3 = arg4;
10897             arg4 = arg5;
10898             arg5 = arg6;
10899         }
10900         ret = posix_fadvise(arg1, target_offset64(arg2, arg3), arg4, arg5);
10901         return -host_to_target_errno(ret);
10902 #endif
10903 
10904 #else /* not a 32-bit ABI */
10905 #if defined(TARGET_NR_fadvise64_64) || defined(TARGET_NR_fadvise64)
10906 #ifdef TARGET_NR_fadvise64_64
10907     case TARGET_NR_fadvise64_64:
10908 #endif
10909 #ifdef TARGET_NR_fadvise64
10910     case TARGET_NR_fadvise64:
10911 #endif
10912 #ifdef TARGET_S390X
10913         switch (arg4) {
10914         case 4: arg4 = POSIX_FADV_NOREUSE + 1; break; /* make sure it's an invalid value */
10915         case 5: arg4 = POSIX_FADV_NOREUSE + 2; break; /* ditto */
10916         case 6: arg4 = POSIX_FADV_DONTNEED; break;
10917         case 7: arg4 = POSIX_FADV_NOREUSE; break;
10918         default: break;
10919         }
10920 #endif
10921         return -host_to_target_errno(posix_fadvise(arg1, arg2, arg3, arg4));
10922 #endif
10923 #endif /* end of 64-bit ABI fadvise handling */
10924 
10925 #ifdef TARGET_NR_madvise
10926     case TARGET_NR_madvise:
10927         /* A straight passthrough may not be safe because qemu sometimes
10928            turns private file-backed mappings into anonymous mappings.
10929            This will break MADV_DONTNEED.
10930            This is a hint, so ignoring and returning success is ok.  */
10931         return 0;
10932 #endif
10933 #if TARGET_ABI_BITS == 32
10934     case TARGET_NR_fcntl64:
10935     {
10936 	int cmd;
10937 	struct flock64 fl;
10938         from_flock64_fn *copyfrom = copy_from_user_flock64;
10939         to_flock64_fn *copyto = copy_to_user_flock64;
10940 
10941 #ifdef TARGET_ARM
10942         if (!((CPUARMState *)cpu_env)->eabi) {
10943             copyfrom = copy_from_user_oabi_flock64;
10944             copyto = copy_to_user_oabi_flock64;
10945         }
10946 #endif
10947 
10948 	cmd = target_to_host_fcntl_cmd(arg2);
10949         if (cmd == -TARGET_EINVAL) {
10950             return cmd;
10951         }
10952 
10953         switch(arg2) {
10954         case TARGET_F_GETLK64:
10955             ret = copyfrom(&fl, arg3);
10956             if (ret) {
10957                 break;
10958             }
10959             ret = get_errno(safe_fcntl(arg1, cmd, &fl));
10960             if (ret == 0) {
10961                 ret = copyto(arg3, &fl);
10962             }
10963 	    break;
10964 
10965         case TARGET_F_SETLK64:
10966         case TARGET_F_SETLKW64:
10967             ret = copyfrom(&fl, arg3);
10968             if (ret) {
10969                 break;
10970             }
10971             ret = get_errno(safe_fcntl(arg1, cmd, &fl));
10972 	    break;
10973         default:
10974             ret = do_fcntl(arg1, arg2, arg3);
10975             break;
10976         }
10977         return ret;
10978     }
10979 #endif
10980 #ifdef TARGET_NR_cacheflush
10981     case TARGET_NR_cacheflush:
10982         /* self-modifying code is handled automatically, so nothing needed */
10983         return 0;
10984 #endif
10985 #ifdef TARGET_NR_getpagesize
10986     case TARGET_NR_getpagesize:
10987         return TARGET_PAGE_SIZE;
10988 #endif
10989     case TARGET_NR_gettid:
10990         return get_errno(sys_gettid());
10991 #ifdef TARGET_NR_readahead
10992     case TARGET_NR_readahead:
10993 #if TARGET_ABI_BITS == 32
10994         if (regpairs_aligned(cpu_env, num)) {
10995             arg2 = arg3;
10996             arg3 = arg4;
10997             arg4 = arg5;
10998         }
10999         ret = get_errno(readahead(arg1, target_offset64(arg2, arg3) , arg4));
11000 #else
11001         ret = get_errno(readahead(arg1, arg2, arg3));
11002 #endif
11003         return ret;
11004 #endif
11005 #ifdef CONFIG_ATTR
11006 #ifdef TARGET_NR_setxattr
11007     case TARGET_NR_listxattr:
11008     case TARGET_NR_llistxattr:
11009     {
11010         void *p, *b = 0;
11011         if (arg2) {
11012             b = lock_user(VERIFY_WRITE, arg2, arg3, 0);
11013             if (!b) {
11014                 return -TARGET_EFAULT;
11015             }
11016         }
11017         p = lock_user_string(arg1);
11018         if (p) {
11019             if (num == TARGET_NR_listxattr) {
11020                 ret = get_errno(listxattr(p, b, arg3));
11021             } else {
11022                 ret = get_errno(llistxattr(p, b, arg3));
11023             }
11024         } else {
11025             ret = -TARGET_EFAULT;
11026         }
11027         unlock_user(p, arg1, 0);
11028         unlock_user(b, arg2, arg3);
11029         return ret;
11030     }
11031     case TARGET_NR_flistxattr:
11032     {
11033         void *b = 0;
11034         if (arg2) {
11035             b = lock_user(VERIFY_WRITE, arg2, arg3, 0);
11036             if (!b) {
11037                 return -TARGET_EFAULT;
11038             }
11039         }
11040         ret = get_errno(flistxattr(arg1, b, arg3));
11041         unlock_user(b, arg2, arg3);
11042         return ret;
11043     }
11044     case TARGET_NR_setxattr:
11045     case TARGET_NR_lsetxattr:
11046         {
11047             void *p, *n, *v = 0;
11048             if (arg3) {
11049                 v = lock_user(VERIFY_READ, arg3, arg4, 1);
11050                 if (!v) {
11051                     return -TARGET_EFAULT;
11052                 }
11053             }
11054             p = lock_user_string(arg1);
11055             n = lock_user_string(arg2);
11056             if (p && n) {
11057                 if (num == TARGET_NR_setxattr) {
11058                     ret = get_errno(setxattr(p, n, v, arg4, arg5));
11059                 } else {
11060                     ret = get_errno(lsetxattr(p, n, v, arg4, arg5));
11061                 }
11062             } else {
11063                 ret = -TARGET_EFAULT;
11064             }
11065             unlock_user(p, arg1, 0);
11066             unlock_user(n, arg2, 0);
11067             unlock_user(v, arg3, 0);
11068         }
11069         return ret;
11070     case TARGET_NR_fsetxattr:
11071         {
11072             void *n, *v = 0;
11073             if (arg3) {
11074                 v = lock_user(VERIFY_READ, arg3, arg4, 1);
11075                 if (!v) {
11076                     return -TARGET_EFAULT;
11077                 }
11078             }
11079             n = lock_user_string(arg2);
11080             if (n) {
11081                 ret = get_errno(fsetxattr(arg1, n, v, arg4, arg5));
11082             } else {
11083                 ret = -TARGET_EFAULT;
11084             }
11085             unlock_user(n, arg2, 0);
11086             unlock_user(v, arg3, 0);
11087         }
11088         return ret;
11089     case TARGET_NR_getxattr:
11090     case TARGET_NR_lgetxattr:
11091         {
11092             void *p, *n, *v = 0;
11093             if (arg3) {
11094                 v = lock_user(VERIFY_WRITE, arg3, arg4, 0);
11095                 if (!v) {
11096                     return -TARGET_EFAULT;
11097                 }
11098             }
11099             p = lock_user_string(arg1);
11100             n = lock_user_string(arg2);
11101             if (p && n) {
11102                 if (num == TARGET_NR_getxattr) {
11103                     ret = get_errno(getxattr(p, n, v, arg4));
11104                 } else {
11105                     ret = get_errno(lgetxattr(p, n, v, arg4));
11106                 }
11107             } else {
11108                 ret = -TARGET_EFAULT;
11109             }
11110             unlock_user(p, arg1, 0);
11111             unlock_user(n, arg2, 0);
11112             unlock_user(v, arg3, arg4);
11113         }
11114         return ret;
11115     case TARGET_NR_fgetxattr:
11116         {
11117             void *n, *v = 0;
11118             if (arg3) {
11119                 v = lock_user(VERIFY_WRITE, arg3, arg4, 0);
11120                 if (!v) {
11121                     return -TARGET_EFAULT;
11122                 }
11123             }
11124             n = lock_user_string(arg2);
11125             if (n) {
11126                 ret = get_errno(fgetxattr(arg1, n, v, arg4));
11127             } else {
11128                 ret = -TARGET_EFAULT;
11129             }
11130             unlock_user(n, arg2, 0);
11131             unlock_user(v, arg3, arg4);
11132         }
11133         return ret;
11134     case TARGET_NR_removexattr:
11135     case TARGET_NR_lremovexattr:
11136         {
11137             void *p, *n;
11138             p = lock_user_string(arg1);
11139             n = lock_user_string(arg2);
11140             if (p && n) {
11141                 if (num == TARGET_NR_removexattr) {
11142                     ret = get_errno(removexattr(p, n));
11143                 } else {
11144                     ret = get_errno(lremovexattr(p, n));
11145                 }
11146             } else {
11147                 ret = -TARGET_EFAULT;
11148             }
11149             unlock_user(p, arg1, 0);
11150             unlock_user(n, arg2, 0);
11151         }
11152         return ret;
11153     case TARGET_NR_fremovexattr:
11154         {
11155             void *n;
11156             n = lock_user_string(arg2);
11157             if (n) {
11158                 ret = get_errno(fremovexattr(arg1, n));
11159             } else {
11160                 ret = -TARGET_EFAULT;
11161             }
11162             unlock_user(n, arg2, 0);
11163         }
11164         return ret;
11165 #endif
11166 #endif /* CONFIG_ATTR */
11167 #ifdef TARGET_NR_set_thread_area
11168     case TARGET_NR_set_thread_area:
11169 #if defined(TARGET_MIPS)
11170       ((CPUMIPSState *) cpu_env)->active_tc.CP0_UserLocal = arg1;
11171       return 0;
11172 #elif defined(TARGET_CRIS)
11173       if (arg1 & 0xff)
11174           ret = -TARGET_EINVAL;
11175       else {
11176           ((CPUCRISState *) cpu_env)->pregs[PR_PID] = arg1;
11177           ret = 0;
11178       }
11179       return ret;
11180 #elif defined(TARGET_I386) && defined(TARGET_ABI32)
11181       return do_set_thread_area(cpu_env, arg1);
11182 #elif defined(TARGET_M68K)
11183       {
11184           TaskState *ts = cpu->opaque;
11185           ts->tp_value = arg1;
11186           return 0;
11187       }
11188 #else
11189       return -TARGET_ENOSYS;
11190 #endif
11191 #endif
11192 #ifdef TARGET_NR_get_thread_area
11193     case TARGET_NR_get_thread_area:
11194 #if defined(TARGET_I386) && defined(TARGET_ABI32)
11195         return do_get_thread_area(cpu_env, arg1);
11196 #elif defined(TARGET_M68K)
11197         {
11198             TaskState *ts = cpu->opaque;
11199             return ts->tp_value;
11200         }
11201 #else
11202         return -TARGET_ENOSYS;
11203 #endif
11204 #endif
11205 #ifdef TARGET_NR_getdomainname
11206     case TARGET_NR_getdomainname:
11207         return -TARGET_ENOSYS;
11208 #endif
11209 
11210 #ifdef TARGET_NR_clock_settime
11211     case TARGET_NR_clock_settime:
11212     {
11213         struct timespec ts;
11214 
11215         ret = target_to_host_timespec(&ts, arg2);
11216         if (!is_error(ret)) {
11217             ret = get_errno(clock_settime(arg1, &ts));
11218         }
11219         return ret;
11220     }
11221 #endif
11222 #ifdef TARGET_NR_clock_gettime
11223     case TARGET_NR_clock_gettime:
11224     {
11225         struct timespec ts;
11226         ret = get_errno(clock_gettime(arg1, &ts));
11227         if (!is_error(ret)) {
11228             ret = host_to_target_timespec(arg2, &ts);
11229         }
11230         return ret;
11231     }
11232 #endif
11233 #ifdef TARGET_NR_clock_getres
11234     case TARGET_NR_clock_getres:
11235     {
11236         struct timespec ts;
11237         ret = get_errno(clock_getres(arg1, &ts));
11238         if (!is_error(ret)) {
11239             host_to_target_timespec(arg2, &ts);
11240         }
11241         return ret;
11242     }
11243 #endif
11244 #ifdef TARGET_NR_clock_nanosleep
11245     case TARGET_NR_clock_nanosleep:
11246     {
11247         struct timespec ts;
11248         target_to_host_timespec(&ts, arg3);
11249         ret = get_errno(safe_clock_nanosleep(arg1, arg2,
11250                                              &ts, arg4 ? &ts : NULL));
11251         if (arg4)
11252             host_to_target_timespec(arg4, &ts);
11253 
11254 #if defined(TARGET_PPC)
11255         /* clock_nanosleep is odd in that it returns positive errno values.
11256          * On PPC, CR0 bit 3 should be set in such a situation. */
11257         if (ret && ret != -TARGET_ERESTARTSYS) {
11258             ((CPUPPCState *)cpu_env)->crf[0] |= 1;
11259         }
11260 #endif
11261         return ret;
11262     }
11263 #endif
11264 
11265 #if defined(TARGET_NR_set_tid_address) && defined(__NR_set_tid_address)
11266     case TARGET_NR_set_tid_address:
11267         return get_errno(set_tid_address((int *)g2h(arg1)));
11268 #endif
11269 
11270     case TARGET_NR_tkill:
11271         return get_errno(safe_tkill((int)arg1, target_to_host_signal(arg2)));
11272 
11273     case TARGET_NR_tgkill:
11274         return get_errno(safe_tgkill((int)arg1, (int)arg2,
11275                          target_to_host_signal(arg3)));
11276 
11277 #ifdef TARGET_NR_set_robust_list
11278     case TARGET_NR_set_robust_list:
11279     case TARGET_NR_get_robust_list:
11280         /* The ABI for supporting robust futexes has userspace pass
11281          * the kernel a pointer to a linked list which is updated by
11282          * userspace after the syscall; the list is walked by the kernel
11283          * when the thread exits. Since the linked list in QEMU guest
11284          * memory isn't a valid linked list for the host and we have
11285          * no way to reliably intercept the thread-death event, we can't
11286          * support these. Silently return ENOSYS so that guest userspace
11287          * falls back to a non-robust futex implementation (which should
11288          * be OK except in the corner case of the guest crashing while
11289          * holding a mutex that is shared with another process via
11290          * shared memory).
11291          */
11292         return -TARGET_ENOSYS;
11293 #endif
11294 
11295 #if defined(TARGET_NR_utimensat)
11296     case TARGET_NR_utimensat:
11297         {
11298             struct timespec *tsp, ts[2];
11299             if (!arg3) {
11300                 tsp = NULL;
11301             } else {
11302                 target_to_host_timespec(ts, arg3);
11303                 target_to_host_timespec(ts+1, arg3+sizeof(struct target_timespec));
11304                 tsp = ts;
11305             }
11306             if (!arg2)
11307                 ret = get_errno(sys_utimensat(arg1, NULL, tsp, arg4));
11308             else {
11309                 if (!(p = lock_user_string(arg2))) {
11310                     return -TARGET_EFAULT;
11311                 }
11312                 ret = get_errno(sys_utimensat(arg1, path(p), tsp, arg4));
11313                 unlock_user(p, arg2, 0);
11314             }
11315         }
11316         return ret;
11317 #endif
11318     case TARGET_NR_futex:
11319         return do_futex(arg1, arg2, arg3, arg4, arg5, arg6);
11320 #if defined(TARGET_NR_inotify_init) && defined(__NR_inotify_init)
11321     case TARGET_NR_inotify_init:
11322         ret = get_errno(sys_inotify_init());
11323         if (ret >= 0) {
11324             fd_trans_register(ret, &target_inotify_trans);
11325         }
11326         return ret;
11327 #endif
11328 #ifdef CONFIG_INOTIFY1
11329 #if defined(TARGET_NR_inotify_init1) && defined(__NR_inotify_init1)
11330     case TARGET_NR_inotify_init1:
11331         ret = get_errno(sys_inotify_init1(target_to_host_bitmask(arg1,
11332                                           fcntl_flags_tbl)));
11333         if (ret >= 0) {
11334             fd_trans_register(ret, &target_inotify_trans);
11335         }
11336         return ret;
11337 #endif
11338 #endif
11339 #if defined(TARGET_NR_inotify_add_watch) && defined(__NR_inotify_add_watch)
11340     case TARGET_NR_inotify_add_watch:
11341         p = lock_user_string(arg2);
11342         ret = get_errno(sys_inotify_add_watch(arg1, path(p), arg3));
11343         unlock_user(p, arg2, 0);
11344         return ret;
11345 #endif
11346 #if defined(TARGET_NR_inotify_rm_watch) && defined(__NR_inotify_rm_watch)
11347     case TARGET_NR_inotify_rm_watch:
11348         return get_errno(sys_inotify_rm_watch(arg1, arg2));
11349 #endif
11350 
11351 #if defined(TARGET_NR_mq_open) && defined(__NR_mq_open)
11352     case TARGET_NR_mq_open:
11353         {
11354             struct mq_attr posix_mq_attr;
11355             struct mq_attr *pposix_mq_attr;
11356             int host_flags;
11357 
11358             host_flags = target_to_host_bitmask(arg2, fcntl_flags_tbl);
11359             pposix_mq_attr = NULL;
11360             if (arg4) {
11361                 if (copy_from_user_mq_attr(&posix_mq_attr, arg4) != 0) {
11362                     return -TARGET_EFAULT;
11363                 }
11364                 pposix_mq_attr = &posix_mq_attr;
11365             }
11366             p = lock_user_string(arg1 - 1);
11367             if (!p) {
11368                 return -TARGET_EFAULT;
11369             }
11370             ret = get_errno(mq_open(p, host_flags, arg3, pposix_mq_attr));
11371             unlock_user (p, arg1, 0);
11372         }
11373         return ret;
11374 
11375     case TARGET_NR_mq_unlink:
11376         p = lock_user_string(arg1 - 1);
11377         if (!p) {
11378             return -TARGET_EFAULT;
11379         }
11380         ret = get_errno(mq_unlink(p));
11381         unlock_user (p, arg1, 0);
11382         return ret;
11383 
11384     case TARGET_NR_mq_timedsend:
11385         {
11386             struct timespec ts;
11387 
11388             p = lock_user (VERIFY_READ, arg2, arg3, 1);
11389             if (arg5 != 0) {
11390                 target_to_host_timespec(&ts, arg5);
11391                 ret = get_errno(safe_mq_timedsend(arg1, p, arg3, arg4, &ts));
11392                 host_to_target_timespec(arg5, &ts);
11393             } else {
11394                 ret = get_errno(safe_mq_timedsend(arg1, p, arg3, arg4, NULL));
11395             }
11396             unlock_user (p, arg2, arg3);
11397         }
11398         return ret;
11399 
11400     case TARGET_NR_mq_timedreceive:
11401         {
11402             struct timespec ts;
11403             unsigned int prio;
11404 
11405             p = lock_user (VERIFY_READ, arg2, arg3, 1);
11406             if (arg5 != 0) {
11407                 target_to_host_timespec(&ts, arg5);
11408                 ret = get_errno(safe_mq_timedreceive(arg1, p, arg3,
11409                                                      &prio, &ts));
11410                 host_to_target_timespec(arg5, &ts);
11411             } else {
11412                 ret = get_errno(safe_mq_timedreceive(arg1, p, arg3,
11413                                                      &prio, NULL));
11414             }
11415             unlock_user (p, arg2, arg3);
11416             if (arg4 != 0)
11417                 put_user_u32(prio, arg4);
11418         }
11419         return ret;
11420 
11421     /* Not implemented for now... */
11422 /*     case TARGET_NR_mq_notify: */
11423 /*         break; */
11424 
11425     case TARGET_NR_mq_getsetattr:
11426         {
11427             struct mq_attr posix_mq_attr_in, posix_mq_attr_out;
11428             ret = 0;
11429             if (arg2 != 0) {
11430                 copy_from_user_mq_attr(&posix_mq_attr_in, arg2);
11431                 ret = get_errno(mq_setattr(arg1, &posix_mq_attr_in,
11432                                            &posix_mq_attr_out));
11433             } else if (arg3 != 0) {
11434                 ret = get_errno(mq_getattr(arg1, &posix_mq_attr_out));
11435             }
11436             if (ret == 0 && arg3 != 0) {
11437                 copy_to_user_mq_attr(arg3, &posix_mq_attr_out);
11438             }
11439         }
11440         return ret;
11441 #endif
11442 
11443 #ifdef CONFIG_SPLICE
11444 #ifdef TARGET_NR_tee
11445     case TARGET_NR_tee:
11446         {
11447             ret = get_errno(tee(arg1,arg2,arg3,arg4));
11448         }
11449         return ret;
11450 #endif
11451 #ifdef TARGET_NR_splice
11452     case TARGET_NR_splice:
11453         {
11454             loff_t loff_in, loff_out;
11455             loff_t *ploff_in = NULL, *ploff_out = NULL;
11456             if (arg2) {
11457                 if (get_user_u64(loff_in, arg2)) {
11458                     return -TARGET_EFAULT;
11459                 }
11460                 ploff_in = &loff_in;
11461             }
11462             if (arg4) {
11463                 if (get_user_u64(loff_out, arg4)) {
11464                     return -TARGET_EFAULT;
11465                 }
11466                 ploff_out = &loff_out;
11467             }
11468             ret = get_errno(splice(arg1, ploff_in, arg3, ploff_out, arg5, arg6));
11469             if (arg2) {
11470                 if (put_user_u64(loff_in, arg2)) {
11471                     return -TARGET_EFAULT;
11472                 }
11473             }
11474             if (arg4) {
11475                 if (put_user_u64(loff_out, arg4)) {
11476                     return -TARGET_EFAULT;
11477                 }
11478             }
11479         }
11480         return ret;
11481 #endif
11482 #ifdef TARGET_NR_vmsplice
11483 	case TARGET_NR_vmsplice:
11484         {
11485             struct iovec *vec = lock_iovec(VERIFY_READ, arg2, arg3, 1);
11486             if (vec != NULL) {
11487                 ret = get_errno(vmsplice(arg1, vec, arg3, arg4));
11488                 unlock_iovec(vec, arg2, arg3, 0);
11489             } else {
11490                 ret = -host_to_target_errno(errno);
11491             }
11492         }
11493         return ret;
11494 #endif
11495 #endif /* CONFIG_SPLICE */
11496 #ifdef CONFIG_EVENTFD
11497 #if defined(TARGET_NR_eventfd)
11498     case TARGET_NR_eventfd:
11499         ret = get_errno(eventfd(arg1, 0));
11500         if (ret >= 0) {
11501             fd_trans_register(ret, &target_eventfd_trans);
11502         }
11503         return ret;
11504 #endif
11505 #if defined(TARGET_NR_eventfd2)
11506     case TARGET_NR_eventfd2:
11507     {
11508         int host_flags = arg2 & (~(TARGET_O_NONBLOCK | TARGET_O_CLOEXEC));
11509         if (arg2 & TARGET_O_NONBLOCK) {
11510             host_flags |= O_NONBLOCK;
11511         }
11512         if (arg2 & TARGET_O_CLOEXEC) {
11513             host_flags |= O_CLOEXEC;
11514         }
11515         ret = get_errno(eventfd(arg1, host_flags));
11516         if (ret >= 0) {
11517             fd_trans_register(ret, &target_eventfd_trans);
11518         }
11519         return ret;
11520     }
11521 #endif
11522 #endif /* CONFIG_EVENTFD  */
11523 #if defined(CONFIG_FALLOCATE) && defined(TARGET_NR_fallocate)
11524     case TARGET_NR_fallocate:
11525 #if TARGET_ABI_BITS == 32
11526         ret = get_errno(fallocate(arg1, arg2, target_offset64(arg3, arg4),
11527                                   target_offset64(arg5, arg6)));
11528 #else
11529         ret = get_errno(fallocate(arg1, arg2, arg3, arg4));
11530 #endif
11531         return ret;
11532 #endif
11533 #if defined(CONFIG_SYNC_FILE_RANGE)
11534 #if defined(TARGET_NR_sync_file_range)
11535     case TARGET_NR_sync_file_range:
11536 #if TARGET_ABI_BITS == 32
11537 #if defined(TARGET_MIPS)
11538         ret = get_errno(sync_file_range(arg1, target_offset64(arg3, arg4),
11539                                         target_offset64(arg5, arg6), arg7));
11540 #else
11541         ret = get_errno(sync_file_range(arg1, target_offset64(arg2, arg3),
11542                                         target_offset64(arg4, arg5), arg6));
11543 #endif /* !TARGET_MIPS */
11544 #else
11545         ret = get_errno(sync_file_range(arg1, arg2, arg3, arg4));
11546 #endif
11547         return ret;
11548 #endif
11549 #if defined(TARGET_NR_sync_file_range2)
11550     case TARGET_NR_sync_file_range2:
11551         /* This is like sync_file_range but the arguments are reordered */
11552 #if TARGET_ABI_BITS == 32
11553         ret = get_errno(sync_file_range(arg1, target_offset64(arg3, arg4),
11554                                         target_offset64(arg5, arg6), arg2));
11555 #else
11556         ret = get_errno(sync_file_range(arg1, arg3, arg4, arg2));
11557 #endif
11558         return ret;
11559 #endif
11560 #endif
11561 #if defined(TARGET_NR_signalfd4)
11562     case TARGET_NR_signalfd4:
11563         return do_signalfd4(arg1, arg2, arg4);
11564 #endif
11565 #if defined(TARGET_NR_signalfd)
11566     case TARGET_NR_signalfd:
11567         return do_signalfd4(arg1, arg2, 0);
11568 #endif
11569 #if defined(CONFIG_EPOLL)
11570 #if defined(TARGET_NR_epoll_create)
11571     case TARGET_NR_epoll_create:
11572         return get_errno(epoll_create(arg1));
11573 #endif
11574 #if defined(TARGET_NR_epoll_create1) && defined(CONFIG_EPOLL_CREATE1)
11575     case TARGET_NR_epoll_create1:
11576         return get_errno(epoll_create1(arg1));
11577 #endif
11578 #if defined(TARGET_NR_epoll_ctl)
11579     case TARGET_NR_epoll_ctl:
11580     {
11581         struct epoll_event ep;
11582         struct epoll_event *epp = 0;
11583         if (arg4) {
11584             struct target_epoll_event *target_ep;
11585             if (!lock_user_struct(VERIFY_READ, target_ep, arg4, 1)) {
11586                 return -TARGET_EFAULT;
11587             }
11588             ep.events = tswap32(target_ep->events);
11589             /* The epoll_data_t union is just opaque data to the kernel,
11590              * so we transfer all 64 bits across and need not worry what
11591              * actual data type it is.
11592              */
11593             ep.data.u64 = tswap64(target_ep->data.u64);
11594             unlock_user_struct(target_ep, arg4, 0);
11595             epp = &ep;
11596         }
11597         return get_errno(epoll_ctl(arg1, arg2, arg3, epp));
11598     }
11599 #endif
11600 
11601 #if defined(TARGET_NR_epoll_wait) || defined(TARGET_NR_epoll_pwait)
11602 #if defined(TARGET_NR_epoll_wait)
11603     case TARGET_NR_epoll_wait:
11604 #endif
11605 #if defined(TARGET_NR_epoll_pwait)
11606     case TARGET_NR_epoll_pwait:
11607 #endif
11608     {
11609         struct target_epoll_event *target_ep;
11610         struct epoll_event *ep;
11611         int epfd = arg1;
11612         int maxevents = arg3;
11613         int timeout = arg4;
11614 
11615         if (maxevents <= 0 || maxevents > TARGET_EP_MAX_EVENTS) {
11616             return -TARGET_EINVAL;
11617         }
11618 
11619         target_ep = lock_user(VERIFY_WRITE, arg2,
11620                               maxevents * sizeof(struct target_epoll_event), 1);
11621         if (!target_ep) {
11622             return -TARGET_EFAULT;
11623         }
11624 
11625         ep = g_try_new(struct epoll_event, maxevents);
11626         if (!ep) {
11627             unlock_user(target_ep, arg2, 0);
11628             return -TARGET_ENOMEM;
11629         }
11630 
11631         switch (num) {
11632 #if defined(TARGET_NR_epoll_pwait)
11633         case TARGET_NR_epoll_pwait:
11634         {
11635             target_sigset_t *target_set;
11636             sigset_t _set, *set = &_set;
11637 
11638             if (arg5) {
11639                 if (arg6 != sizeof(target_sigset_t)) {
11640                     ret = -TARGET_EINVAL;
11641                     break;
11642                 }
11643 
11644                 target_set = lock_user(VERIFY_READ, arg5,
11645                                        sizeof(target_sigset_t), 1);
11646                 if (!target_set) {
11647                     ret = -TARGET_EFAULT;
11648                     break;
11649                 }
11650                 target_to_host_sigset(set, target_set);
11651                 unlock_user(target_set, arg5, 0);
11652             } else {
11653                 set = NULL;
11654             }
11655 
11656             ret = get_errno(safe_epoll_pwait(epfd, ep, maxevents, timeout,
11657                                              set, SIGSET_T_SIZE));
11658             break;
11659         }
11660 #endif
11661 #if defined(TARGET_NR_epoll_wait)
11662         case TARGET_NR_epoll_wait:
11663             ret = get_errno(safe_epoll_pwait(epfd, ep, maxevents, timeout,
11664                                              NULL, 0));
11665             break;
11666 #endif
11667         default:
11668             ret = -TARGET_ENOSYS;
11669         }
11670         if (!is_error(ret)) {
11671             int i;
11672             for (i = 0; i < ret; i++) {
11673                 target_ep[i].events = tswap32(ep[i].events);
11674                 target_ep[i].data.u64 = tswap64(ep[i].data.u64);
11675             }
11676             unlock_user(target_ep, arg2,
11677                         ret * sizeof(struct target_epoll_event));
11678         } else {
11679             unlock_user(target_ep, arg2, 0);
11680         }
11681         g_free(ep);
11682         return ret;
11683     }
11684 #endif
11685 #endif
11686 #ifdef TARGET_NR_prlimit64
11687     case TARGET_NR_prlimit64:
11688     {
11689         /* args: pid, resource number, ptr to new rlimit, ptr to old rlimit */
11690         struct target_rlimit64 *target_rnew, *target_rold;
11691         struct host_rlimit64 rnew, rold, *rnewp = 0;
11692         int resource = target_to_host_resource(arg2);
11693         if (arg3) {
11694             if (!lock_user_struct(VERIFY_READ, target_rnew, arg3, 1)) {
11695                 return -TARGET_EFAULT;
11696             }
11697             rnew.rlim_cur = tswap64(target_rnew->rlim_cur);
11698             rnew.rlim_max = tswap64(target_rnew->rlim_max);
11699             unlock_user_struct(target_rnew, arg3, 0);
11700             rnewp = &rnew;
11701         }
11702 
11703         ret = get_errno(sys_prlimit64(arg1, resource, rnewp, arg4 ? &rold : 0));
11704         if (!is_error(ret) && arg4) {
11705             if (!lock_user_struct(VERIFY_WRITE, target_rold, arg4, 1)) {
11706                 return -TARGET_EFAULT;
11707             }
11708             target_rold->rlim_cur = tswap64(rold.rlim_cur);
11709             target_rold->rlim_max = tswap64(rold.rlim_max);
11710             unlock_user_struct(target_rold, arg4, 1);
11711         }
11712         return ret;
11713     }
11714 #endif
11715 #ifdef TARGET_NR_gethostname
11716     case TARGET_NR_gethostname:
11717     {
11718         char *name = lock_user(VERIFY_WRITE, arg1, arg2, 0);
11719         if (name) {
11720             ret = get_errno(gethostname(name, arg2));
11721             unlock_user(name, arg1, arg2);
11722         } else {
11723             ret = -TARGET_EFAULT;
11724         }
11725         return ret;
11726     }
11727 #endif
11728 #ifdef TARGET_NR_atomic_cmpxchg_32
11729     case TARGET_NR_atomic_cmpxchg_32:
11730     {
11731         /* should use start_exclusive from main.c */
11732         abi_ulong mem_value;
11733         if (get_user_u32(mem_value, arg6)) {
11734             target_siginfo_t info;
11735             info.si_signo = SIGSEGV;
11736             info.si_errno = 0;
11737             info.si_code = TARGET_SEGV_MAPERR;
11738             info._sifields._sigfault._addr = arg6;
11739             queue_signal((CPUArchState *)cpu_env, info.si_signo,
11740                          QEMU_SI_FAULT, &info);
11741             ret = 0xdeadbeef;
11742 
11743         }
11744         if (mem_value == arg2)
11745             put_user_u32(arg1, arg6);
11746         return mem_value;
11747     }
11748 #endif
11749 #ifdef TARGET_NR_atomic_barrier
11750     case TARGET_NR_atomic_barrier:
11751         /* Like the kernel implementation and the
11752            qemu arm barrier, no-op this? */
11753         return 0;
11754 #endif
11755 
11756 #ifdef TARGET_NR_timer_create
11757     case TARGET_NR_timer_create:
11758     {
11759         /* args: clockid_t clockid, struct sigevent *sevp, timer_t *timerid */
11760 
11761         struct sigevent host_sevp = { {0}, }, *phost_sevp = NULL;
11762 
11763         int clkid = arg1;
11764         int timer_index = next_free_host_timer();
11765 
11766         if (timer_index < 0) {
11767             ret = -TARGET_EAGAIN;
11768         } else {
11769             timer_t *phtimer = g_posix_timers  + timer_index;
11770 
11771             if (arg2) {
11772                 phost_sevp = &host_sevp;
11773                 ret = target_to_host_sigevent(phost_sevp, arg2);
11774                 if (ret != 0) {
11775                     return ret;
11776                 }
11777             }
11778 
11779             ret = get_errno(timer_create(clkid, phost_sevp, phtimer));
11780             if (ret) {
11781                 phtimer = NULL;
11782             } else {
11783                 if (put_user(TIMER_MAGIC | timer_index, arg3, target_timer_t)) {
11784                     return -TARGET_EFAULT;
11785                 }
11786             }
11787         }
11788         return ret;
11789     }
11790 #endif
11791 
11792 #ifdef TARGET_NR_timer_settime
11793     case TARGET_NR_timer_settime:
11794     {
11795         /* args: timer_t timerid, int flags, const struct itimerspec *new_value,
11796          * struct itimerspec * old_value */
11797         target_timer_t timerid = get_timer_id(arg1);
11798 
11799         if (timerid < 0) {
11800             ret = timerid;
11801         } else if (arg3 == 0) {
11802             ret = -TARGET_EINVAL;
11803         } else {
11804             timer_t htimer = g_posix_timers[timerid];
11805             struct itimerspec hspec_new = {{0},}, hspec_old = {{0},};
11806 
11807             if (target_to_host_itimerspec(&hspec_new, arg3)) {
11808                 return -TARGET_EFAULT;
11809             }
11810             ret = get_errno(
11811                           timer_settime(htimer, arg2, &hspec_new, &hspec_old));
11812             if (arg4 && host_to_target_itimerspec(arg4, &hspec_old)) {
11813                 return -TARGET_EFAULT;
11814             }
11815         }
11816         return ret;
11817     }
11818 #endif
11819 
11820 #ifdef TARGET_NR_timer_gettime
11821     case TARGET_NR_timer_gettime:
11822     {
11823         /* args: timer_t timerid, struct itimerspec *curr_value */
11824         target_timer_t timerid = get_timer_id(arg1);
11825 
11826         if (timerid < 0) {
11827             ret = timerid;
11828         } else if (!arg2) {
11829             ret = -TARGET_EFAULT;
11830         } else {
11831             timer_t htimer = g_posix_timers[timerid];
11832             struct itimerspec hspec;
11833             ret = get_errno(timer_gettime(htimer, &hspec));
11834 
11835             if (host_to_target_itimerspec(arg2, &hspec)) {
11836                 ret = -TARGET_EFAULT;
11837             }
11838         }
11839         return ret;
11840     }
11841 #endif
11842 
11843 #ifdef TARGET_NR_timer_getoverrun
11844     case TARGET_NR_timer_getoverrun:
11845     {
11846         /* args: timer_t timerid */
11847         target_timer_t timerid = get_timer_id(arg1);
11848 
11849         if (timerid < 0) {
11850             ret = timerid;
11851         } else {
11852             timer_t htimer = g_posix_timers[timerid];
11853             ret = get_errno(timer_getoverrun(htimer));
11854         }
11855         return ret;
11856     }
11857 #endif
11858 
11859 #ifdef TARGET_NR_timer_delete
11860     case TARGET_NR_timer_delete:
11861     {
11862         /* args: timer_t timerid */
11863         target_timer_t timerid = get_timer_id(arg1);
11864 
11865         if (timerid < 0) {
11866             ret = timerid;
11867         } else {
11868             timer_t htimer = g_posix_timers[timerid];
11869             ret = get_errno(timer_delete(htimer));
11870             g_posix_timers[timerid] = 0;
11871         }
11872         return ret;
11873     }
11874 #endif
11875 
11876 #if defined(TARGET_NR_timerfd_create) && defined(CONFIG_TIMERFD)
11877     case TARGET_NR_timerfd_create:
11878         return get_errno(timerfd_create(arg1,
11879                           target_to_host_bitmask(arg2, fcntl_flags_tbl)));
11880 #endif
11881 
11882 #if defined(TARGET_NR_timerfd_gettime) && defined(CONFIG_TIMERFD)
11883     case TARGET_NR_timerfd_gettime:
11884         {
11885             struct itimerspec its_curr;
11886 
11887             ret = get_errno(timerfd_gettime(arg1, &its_curr));
11888 
11889             if (arg2 && host_to_target_itimerspec(arg2, &its_curr)) {
11890                 return -TARGET_EFAULT;
11891             }
11892         }
11893         return ret;
11894 #endif
11895 
11896 #if defined(TARGET_NR_timerfd_settime) && defined(CONFIG_TIMERFD)
11897     case TARGET_NR_timerfd_settime:
11898         {
11899             struct itimerspec its_new, its_old, *p_new;
11900 
11901             if (arg3) {
11902                 if (target_to_host_itimerspec(&its_new, arg3)) {
11903                     return -TARGET_EFAULT;
11904                 }
11905                 p_new = &its_new;
11906             } else {
11907                 p_new = NULL;
11908             }
11909 
11910             ret = get_errno(timerfd_settime(arg1, arg2, p_new, &its_old));
11911 
11912             if (arg4 && host_to_target_itimerspec(arg4, &its_old)) {
11913                 return -TARGET_EFAULT;
11914             }
11915         }
11916         return ret;
11917 #endif
11918 
11919 #if defined(TARGET_NR_ioprio_get) && defined(__NR_ioprio_get)
11920     case TARGET_NR_ioprio_get:
11921         return get_errno(ioprio_get(arg1, arg2));
11922 #endif
11923 
11924 #if defined(TARGET_NR_ioprio_set) && defined(__NR_ioprio_set)
11925     case TARGET_NR_ioprio_set:
11926         return get_errno(ioprio_set(arg1, arg2, arg3));
11927 #endif
11928 
11929 #if defined(TARGET_NR_setns) && defined(CONFIG_SETNS)
11930     case TARGET_NR_setns:
11931         return get_errno(setns(arg1, arg2));
11932 #endif
11933 #if defined(TARGET_NR_unshare) && defined(CONFIG_SETNS)
11934     case TARGET_NR_unshare:
11935         return get_errno(unshare(arg1));
11936 #endif
11937 #if defined(TARGET_NR_kcmp) && defined(__NR_kcmp)
11938     case TARGET_NR_kcmp:
11939         return get_errno(kcmp(arg1, arg2, arg3, arg4, arg5));
11940 #endif
11941 #ifdef TARGET_NR_swapcontext
11942     case TARGET_NR_swapcontext:
11943         /* PowerPC specific.  */
11944         return do_swapcontext(cpu_env, arg1, arg2, arg3);
11945 #endif
11946 #ifdef TARGET_NR_memfd_create
11947     case TARGET_NR_memfd_create:
11948         p = lock_user_string(arg1);
11949         if (!p) {
11950             return -TARGET_EFAULT;
11951         }
11952         ret = get_errno(memfd_create(p, arg2));
11953         fd_trans_unregister(ret);
11954         unlock_user(p, arg1, 0);
11955         return ret;
11956 #endif
11957 
11958     default:
11959         qemu_log_mask(LOG_UNIMP, "Unsupported syscall: %d\n", num);
11960         return -TARGET_ENOSYS;
11961     }
11962     return ret;
11963 }
11964 
11965 abi_long do_syscall(void *cpu_env, int num, abi_long arg1,
11966                     abi_long arg2, abi_long arg3, abi_long arg4,
11967                     abi_long arg5, abi_long arg6, abi_long arg7,
11968                     abi_long arg8)
11969 {
11970     CPUState *cpu = env_cpu(cpu_env);
11971     abi_long ret;
11972 
11973 #ifdef DEBUG_ERESTARTSYS
11974     /* Debug-only code for exercising the syscall-restart code paths
11975      * in the per-architecture cpu main loops: restart every syscall
11976      * the guest makes once before letting it through.
11977      */
11978     {
11979         static bool flag;
11980         flag = !flag;
11981         if (flag) {
11982             return -TARGET_ERESTARTSYS;
11983         }
11984     }
11985 #endif
11986 
11987     trace_guest_user_syscall(cpu, num, arg1, arg2, arg3, arg4,
11988                              arg5, arg6, arg7, arg8);
11989 
11990     if (unlikely(do_strace)) {
11991         print_syscall(num, arg1, arg2, arg3, arg4, arg5, arg6);
11992         ret = do_syscall1(cpu_env, num, arg1, arg2, arg3, arg4,
11993                           arg5, arg6, arg7, arg8);
11994         print_syscall_ret(num, ret);
11995     } else {
11996         ret = do_syscall1(cpu_env, num, arg1, arg2, arg3, arg4,
11997                           arg5, arg6, arg7, arg8);
11998     }
11999 
12000     trace_guest_user_syscall_ret(cpu, num, ret);
12001     return ret;
12002 }
12003