xref: /openbmc/qemu/linux-user/syscall.c (revision 2a53cff4)
1 /*
2  *  Linux syscalls
3  *
4  *  Copyright (c) 2003 Fabrice Bellard
5  *
6  *  This program is free software; you can redistribute it and/or modify
7  *  it under the terms of the GNU General Public License as published by
8  *  the Free Software Foundation; either version 2 of the License, or
9  *  (at your option) any later version.
10  *
11  *  This program is distributed in the hope that it will be useful,
12  *  but WITHOUT ANY WARRANTY; without even the implied warranty of
13  *  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
14  *  GNU General Public License for more details.
15  *
16  *  You should have received a copy of the GNU General Public License
17  *  along with this program; if not, see <http://www.gnu.org/licenses/>.
18  */
19 #define _ATFILE_SOURCE
20 #include "qemu/osdep.h"
21 #include "qemu/cutils.h"
22 #include "qemu/path.h"
23 #include <elf.h>
24 #include <endian.h>
25 #include <grp.h>
26 #include <sys/ipc.h>
27 #include <sys/msg.h>
28 #include <sys/wait.h>
29 #include <sys/mount.h>
30 #include <sys/file.h>
31 #include <sys/fsuid.h>
32 #include <sys/personality.h>
33 #include <sys/prctl.h>
34 #include <sys/resource.h>
35 #include <sys/swap.h>
36 #include <linux/capability.h>
37 #include <sched.h>
38 #include <sys/timex.h>
39 #include <sys/socket.h>
40 #include <sys/un.h>
41 #include <sys/uio.h>
42 #include <poll.h>
43 #include <sys/times.h>
44 #include <sys/shm.h>
45 #include <sys/sem.h>
46 #include <sys/statfs.h>
47 #include <utime.h>
48 #include <sys/sysinfo.h>
49 #include <sys/signalfd.h>
50 //#include <sys/user.h>
51 #include <netinet/ip.h>
52 #include <netinet/tcp.h>
53 #include <linux/wireless.h>
54 #include <linux/icmp.h>
55 #include <linux/icmpv6.h>
56 #include <linux/errqueue.h>
57 #include <linux/random.h>
58 #include "qemu-common.h"
59 #ifdef CONFIG_TIMERFD
60 #include <sys/timerfd.h>
61 #endif
62 #ifdef TARGET_GPROF
63 #include <sys/gmon.h>
64 #endif
65 #ifdef CONFIG_EVENTFD
66 #include <sys/eventfd.h>
67 #endif
68 #ifdef CONFIG_EPOLL
69 #include <sys/epoll.h>
70 #endif
71 #ifdef CONFIG_ATTR
72 #include "qemu/xattr.h"
73 #endif
74 #ifdef CONFIG_SENDFILE
75 #include <sys/sendfile.h>
76 #endif
77 
78 #define termios host_termios
79 #define winsize host_winsize
80 #define termio host_termio
81 #define sgttyb host_sgttyb /* same as target */
82 #define tchars host_tchars /* same as target */
83 #define ltchars host_ltchars /* same as target */
84 
85 #include <linux/termios.h>
86 #include <linux/unistd.h>
87 #include <linux/cdrom.h>
88 #include <linux/hdreg.h>
89 #include <linux/soundcard.h>
90 #include <linux/kd.h>
91 #include <linux/mtio.h>
92 #include <linux/fs.h>
93 #if defined(CONFIG_FIEMAP)
94 #include <linux/fiemap.h>
95 #endif
96 #include <linux/fb.h>
97 #if defined(CONFIG_USBFS)
98 #include <linux/usbdevice_fs.h>
99 #include <linux/usb/ch9.h>
100 #endif
101 #include <linux/vt.h>
102 #include <linux/dm-ioctl.h>
103 #include <linux/reboot.h>
104 #include <linux/route.h>
105 #include <linux/filter.h>
106 #include <linux/blkpg.h>
107 #include <netpacket/packet.h>
108 #include <linux/netlink.h>
109 #include "linux_loop.h"
110 #include "uname.h"
111 
112 #include "qemu.h"
113 #include "fd-trans.h"
114 
115 #ifndef CLONE_IO
116 #define CLONE_IO                0x80000000      /* Clone io context */
117 #endif
118 
119 /* We can't directly call the host clone syscall, because this will
120  * badly confuse libc (breaking mutexes, for example). So we must
121  * divide clone flags into:
122  *  * flag combinations that look like pthread_create()
123  *  * flag combinations that look like fork()
124  *  * flags we can implement within QEMU itself
125  *  * flags we can't support and will return an error for
126  */
127 /* For thread creation, all these flags must be present; for
128  * fork, none must be present.
129  */
130 #define CLONE_THREAD_FLAGS                              \
131     (CLONE_VM | CLONE_FS | CLONE_FILES |                \
132      CLONE_SIGHAND | CLONE_THREAD | CLONE_SYSVSEM)
133 
134 /* These flags are ignored:
135  * CLONE_DETACHED is now ignored by the kernel;
136  * CLONE_IO is just an optimisation hint to the I/O scheduler
137  */
138 #define CLONE_IGNORED_FLAGS                     \
139     (CLONE_DETACHED | CLONE_IO)
140 
141 /* Flags for fork which we can implement within QEMU itself */
142 #define CLONE_OPTIONAL_FORK_FLAGS               \
143     (CLONE_SETTLS | CLONE_PARENT_SETTID |       \
144      CLONE_CHILD_CLEARTID | CLONE_CHILD_SETTID)
145 
146 /* Flags for thread creation which we can implement within QEMU itself */
147 #define CLONE_OPTIONAL_THREAD_FLAGS                             \
148     (CLONE_SETTLS | CLONE_PARENT_SETTID |                       \
149      CLONE_CHILD_CLEARTID | CLONE_CHILD_SETTID | CLONE_PARENT)
150 
151 #define CLONE_INVALID_FORK_FLAGS                                        \
152     (~(CSIGNAL | CLONE_OPTIONAL_FORK_FLAGS | CLONE_IGNORED_FLAGS))
153 
154 #define CLONE_INVALID_THREAD_FLAGS                                      \
155     (~(CSIGNAL | CLONE_THREAD_FLAGS | CLONE_OPTIONAL_THREAD_FLAGS |     \
156        CLONE_IGNORED_FLAGS))
157 
158 /* CLONE_VFORK is special cased early in do_fork(). The other flag bits
159  * have almost all been allocated. We cannot support any of
160  * CLONE_NEWNS, CLONE_NEWCGROUP, CLONE_NEWUTS, CLONE_NEWIPC,
161  * CLONE_NEWUSER, CLONE_NEWPID, CLONE_NEWNET, CLONE_PTRACE, CLONE_UNTRACED.
162  * The checks against the invalid thread masks above will catch these.
163  * (The one remaining unallocated bit is 0x1000 which used to be CLONE_PID.)
164  */
165 
166 /* Define DEBUG_ERESTARTSYS to force every syscall to be restarted
167  * once. This exercises the codepaths for restart.
168  */
169 //#define DEBUG_ERESTARTSYS
170 
171 //#include <linux/msdos_fs.h>
172 #define	VFAT_IOCTL_READDIR_BOTH		_IOR('r', 1, struct linux_dirent [2])
173 #define	VFAT_IOCTL_READDIR_SHORT	_IOR('r', 2, struct linux_dirent [2])
174 
175 #undef _syscall0
176 #undef _syscall1
177 #undef _syscall2
178 #undef _syscall3
179 #undef _syscall4
180 #undef _syscall5
181 #undef _syscall6
182 
183 #define _syscall0(type,name)		\
184 static type name (void)			\
185 {					\
186 	return syscall(__NR_##name);	\
187 }
188 
189 #define _syscall1(type,name,type1,arg1)		\
190 static type name (type1 arg1)			\
191 {						\
192 	return syscall(__NR_##name, arg1);	\
193 }
194 
195 #define _syscall2(type,name,type1,arg1,type2,arg2)	\
196 static type name (type1 arg1,type2 arg2)		\
197 {							\
198 	return syscall(__NR_##name, arg1, arg2);	\
199 }
200 
201 #define _syscall3(type,name,type1,arg1,type2,arg2,type3,arg3)	\
202 static type name (type1 arg1,type2 arg2,type3 arg3)		\
203 {								\
204 	return syscall(__NR_##name, arg1, arg2, arg3);		\
205 }
206 
207 #define _syscall4(type,name,type1,arg1,type2,arg2,type3,arg3,type4,arg4)	\
208 static type name (type1 arg1,type2 arg2,type3 arg3,type4 arg4)			\
209 {										\
210 	return syscall(__NR_##name, arg1, arg2, arg3, arg4);			\
211 }
212 
213 #define _syscall5(type,name,type1,arg1,type2,arg2,type3,arg3,type4,arg4,	\
214 		  type5,arg5)							\
215 static type name (type1 arg1,type2 arg2,type3 arg3,type4 arg4,type5 arg5)	\
216 {										\
217 	return syscall(__NR_##name, arg1, arg2, arg3, arg4, arg5);		\
218 }
219 
220 
221 #define _syscall6(type,name,type1,arg1,type2,arg2,type3,arg3,type4,arg4,	\
222 		  type5,arg5,type6,arg6)					\
223 static type name (type1 arg1,type2 arg2,type3 arg3,type4 arg4,type5 arg5,	\
224                   type6 arg6)							\
225 {										\
226 	return syscall(__NR_##name, arg1, arg2, arg3, arg4, arg5, arg6);	\
227 }
228 
229 
230 #define __NR_sys_uname __NR_uname
231 #define __NR_sys_getcwd1 __NR_getcwd
232 #define __NR_sys_getdents __NR_getdents
233 #define __NR_sys_getdents64 __NR_getdents64
234 #define __NR_sys_getpriority __NR_getpriority
235 #define __NR_sys_rt_sigqueueinfo __NR_rt_sigqueueinfo
236 #define __NR_sys_rt_tgsigqueueinfo __NR_rt_tgsigqueueinfo
237 #define __NR_sys_syslog __NR_syslog
238 #define __NR_sys_futex __NR_futex
239 #define __NR_sys_inotify_init __NR_inotify_init
240 #define __NR_sys_inotify_add_watch __NR_inotify_add_watch
241 #define __NR_sys_inotify_rm_watch __NR_inotify_rm_watch
242 
243 #if defined(__alpha__) || defined(__x86_64__) || defined(__s390x__)
244 #define __NR__llseek __NR_lseek
245 #endif
246 
247 /* Newer kernel ports have llseek() instead of _llseek() */
248 #if defined(TARGET_NR_llseek) && !defined(TARGET_NR__llseek)
249 #define TARGET_NR__llseek TARGET_NR_llseek
250 #endif
251 
252 #ifdef __NR_gettid
253 _syscall0(int, gettid)
254 #else
255 /* This is a replacement for the host gettid() and must return a host
256    errno. */
257 static int gettid(void) {
258     return -ENOSYS;
259 }
260 #endif
261 
262 /* For the 64-bit guest on 32-bit host case we must emulate
263  * getdents using getdents64, because otherwise the host
264  * might hand us back more dirent records than we can fit
265  * into the guest buffer after structure format conversion.
266  * Otherwise we emulate getdents with getdents if the host has it.
267  */
268 #if defined(__NR_getdents) && HOST_LONG_BITS >= TARGET_ABI_BITS
269 #define EMULATE_GETDENTS_WITH_GETDENTS
270 #endif
271 
272 #if defined(TARGET_NR_getdents) && defined(EMULATE_GETDENTS_WITH_GETDENTS)
273 _syscall3(int, sys_getdents, uint, fd, struct linux_dirent *, dirp, uint, count);
274 #endif
275 #if (defined(TARGET_NR_getdents) && \
276       !defined(EMULATE_GETDENTS_WITH_GETDENTS)) || \
277     (defined(TARGET_NR_getdents64) && defined(__NR_getdents64))
278 _syscall3(int, sys_getdents64, uint, fd, struct linux_dirent64 *, dirp, uint, count);
279 #endif
280 #if defined(TARGET_NR__llseek) && defined(__NR_llseek)
281 _syscall5(int, _llseek,  uint,  fd, ulong, hi, ulong, lo,
282           loff_t *, res, uint, wh);
283 #endif
284 _syscall3(int, sys_rt_sigqueueinfo, pid_t, pid, int, sig, siginfo_t *, uinfo)
285 _syscall4(int, sys_rt_tgsigqueueinfo, pid_t, pid, pid_t, tid, int, sig,
286           siginfo_t *, uinfo)
287 _syscall3(int,sys_syslog,int,type,char*,bufp,int,len)
288 #ifdef __NR_exit_group
289 _syscall1(int,exit_group,int,error_code)
290 #endif
291 #if defined(TARGET_NR_set_tid_address) && defined(__NR_set_tid_address)
292 _syscall1(int,set_tid_address,int *,tidptr)
293 #endif
294 #if defined(TARGET_NR_futex) && defined(__NR_futex)
295 _syscall6(int,sys_futex,int *,uaddr,int,op,int,val,
296           const struct timespec *,timeout,int *,uaddr2,int,val3)
297 #endif
298 #define __NR_sys_sched_getaffinity __NR_sched_getaffinity
299 _syscall3(int, sys_sched_getaffinity, pid_t, pid, unsigned int, len,
300           unsigned long *, user_mask_ptr);
301 #define __NR_sys_sched_setaffinity __NR_sched_setaffinity
302 _syscall3(int, sys_sched_setaffinity, pid_t, pid, unsigned int, len,
303           unsigned long *, user_mask_ptr);
304 #define __NR_sys_getcpu __NR_getcpu
305 _syscall3(int, sys_getcpu, unsigned *, cpu, unsigned *, node, void *, tcache);
306 _syscall4(int, reboot, int, magic1, int, magic2, unsigned int, cmd,
307           void *, arg);
308 _syscall2(int, capget, struct __user_cap_header_struct *, header,
309           struct __user_cap_data_struct *, data);
310 _syscall2(int, capset, struct __user_cap_header_struct *, header,
311           struct __user_cap_data_struct *, data);
312 #if defined(TARGET_NR_ioprio_get) && defined(__NR_ioprio_get)
313 _syscall2(int, ioprio_get, int, which, int, who)
314 #endif
315 #if defined(TARGET_NR_ioprio_set) && defined(__NR_ioprio_set)
316 _syscall3(int, ioprio_set, int, which, int, who, int, ioprio)
317 #endif
318 #if defined(TARGET_NR_getrandom) && defined(__NR_getrandom)
319 _syscall3(int, getrandom, void *, buf, size_t, buflen, unsigned int, flags)
320 #endif
321 
322 #if defined(TARGET_NR_kcmp) && defined(__NR_kcmp)
323 _syscall5(int, kcmp, pid_t, pid1, pid_t, pid2, int, type,
324           unsigned long, idx1, unsigned long, idx2)
325 #endif
326 
327 static bitmask_transtbl fcntl_flags_tbl[] = {
328   { TARGET_O_ACCMODE,   TARGET_O_WRONLY,    O_ACCMODE,   O_WRONLY,    },
329   { TARGET_O_ACCMODE,   TARGET_O_RDWR,      O_ACCMODE,   O_RDWR,      },
330   { TARGET_O_CREAT,     TARGET_O_CREAT,     O_CREAT,     O_CREAT,     },
331   { TARGET_O_EXCL,      TARGET_O_EXCL,      O_EXCL,      O_EXCL,      },
332   { TARGET_O_NOCTTY,    TARGET_O_NOCTTY,    O_NOCTTY,    O_NOCTTY,    },
333   { TARGET_O_TRUNC,     TARGET_O_TRUNC,     O_TRUNC,     O_TRUNC,     },
334   { TARGET_O_APPEND,    TARGET_O_APPEND,    O_APPEND,    O_APPEND,    },
335   { TARGET_O_NONBLOCK,  TARGET_O_NONBLOCK,  O_NONBLOCK,  O_NONBLOCK,  },
336   { TARGET_O_SYNC,      TARGET_O_DSYNC,     O_SYNC,      O_DSYNC,     },
337   { TARGET_O_SYNC,      TARGET_O_SYNC,      O_SYNC,      O_SYNC,      },
338   { TARGET_FASYNC,      TARGET_FASYNC,      FASYNC,      FASYNC,      },
339   { TARGET_O_DIRECTORY, TARGET_O_DIRECTORY, O_DIRECTORY, O_DIRECTORY, },
340   { TARGET_O_NOFOLLOW,  TARGET_O_NOFOLLOW,  O_NOFOLLOW,  O_NOFOLLOW,  },
341 #if defined(O_DIRECT)
342   { TARGET_O_DIRECT,    TARGET_O_DIRECT,    O_DIRECT,    O_DIRECT,    },
343 #endif
344 #if defined(O_NOATIME)
345   { TARGET_O_NOATIME,   TARGET_O_NOATIME,   O_NOATIME,   O_NOATIME    },
346 #endif
347 #if defined(O_CLOEXEC)
348   { TARGET_O_CLOEXEC,   TARGET_O_CLOEXEC,   O_CLOEXEC,   O_CLOEXEC    },
349 #endif
350 #if defined(O_PATH)
351   { TARGET_O_PATH,      TARGET_O_PATH,      O_PATH,      O_PATH       },
352 #endif
353 #if defined(O_TMPFILE)
354   { TARGET_O_TMPFILE,   TARGET_O_TMPFILE,   O_TMPFILE,   O_TMPFILE    },
355 #endif
356   /* Don't terminate the list prematurely on 64-bit host+guest.  */
357 #if TARGET_O_LARGEFILE != 0 || O_LARGEFILE != 0
358   { TARGET_O_LARGEFILE, TARGET_O_LARGEFILE, O_LARGEFILE, O_LARGEFILE, },
359 #endif
360   { 0, 0, 0, 0 }
361 };
362 
363 static int sys_getcwd1(char *buf, size_t size)
364 {
365   if (getcwd(buf, size) == NULL) {
366       /* getcwd() sets errno */
367       return (-1);
368   }
369   return strlen(buf)+1;
370 }
371 
372 #ifdef TARGET_NR_utimensat
373 #if defined(__NR_utimensat)
374 #define __NR_sys_utimensat __NR_utimensat
375 _syscall4(int,sys_utimensat,int,dirfd,const char *,pathname,
376           const struct timespec *,tsp,int,flags)
377 #else
378 static int sys_utimensat(int dirfd, const char *pathname,
379                          const struct timespec times[2], int flags)
380 {
381     errno = ENOSYS;
382     return -1;
383 }
384 #endif
385 #endif /* TARGET_NR_utimensat */
386 
387 #ifdef TARGET_NR_renameat2
388 #if defined(__NR_renameat2)
389 #define __NR_sys_renameat2 __NR_renameat2
390 _syscall5(int, sys_renameat2, int, oldfd, const char *, old, int, newfd,
391           const char *, new, unsigned int, flags)
392 #else
393 static int sys_renameat2(int oldfd, const char *old,
394                          int newfd, const char *new, int flags)
395 {
396     if (flags == 0) {
397         return renameat(oldfd, old, newfd, new);
398     }
399     errno = ENOSYS;
400     return -1;
401 }
402 #endif
403 #endif /* TARGET_NR_renameat2 */
404 
405 #ifdef CONFIG_INOTIFY
406 #include <sys/inotify.h>
407 
408 #if defined(TARGET_NR_inotify_init) && defined(__NR_inotify_init)
409 static int sys_inotify_init(void)
410 {
411   return (inotify_init());
412 }
413 #endif
414 #if defined(TARGET_NR_inotify_add_watch) && defined(__NR_inotify_add_watch)
415 static int sys_inotify_add_watch(int fd,const char *pathname, int32_t mask)
416 {
417   return (inotify_add_watch(fd, pathname, mask));
418 }
419 #endif
420 #if defined(TARGET_NR_inotify_rm_watch) && defined(__NR_inotify_rm_watch)
421 static int sys_inotify_rm_watch(int fd, int32_t wd)
422 {
423   return (inotify_rm_watch(fd, wd));
424 }
425 #endif
426 #ifdef CONFIG_INOTIFY1
427 #if defined(TARGET_NR_inotify_init1) && defined(__NR_inotify_init1)
428 static int sys_inotify_init1(int flags)
429 {
430   return (inotify_init1(flags));
431 }
432 #endif
433 #endif
434 #else
435 /* Userspace can usually survive runtime without inotify */
436 #undef TARGET_NR_inotify_init
437 #undef TARGET_NR_inotify_init1
438 #undef TARGET_NR_inotify_add_watch
439 #undef TARGET_NR_inotify_rm_watch
440 #endif /* CONFIG_INOTIFY  */
441 
442 #if defined(TARGET_NR_prlimit64)
443 #ifndef __NR_prlimit64
444 # define __NR_prlimit64 -1
445 #endif
446 #define __NR_sys_prlimit64 __NR_prlimit64
447 /* The glibc rlimit structure may not be that used by the underlying syscall */
448 struct host_rlimit64 {
449     uint64_t rlim_cur;
450     uint64_t rlim_max;
451 };
452 _syscall4(int, sys_prlimit64, pid_t, pid, int, resource,
453           const struct host_rlimit64 *, new_limit,
454           struct host_rlimit64 *, old_limit)
455 #endif
456 
457 
458 #if defined(TARGET_NR_timer_create)
459 /* Maxiumum of 32 active POSIX timers allowed at any one time. */
460 static timer_t g_posix_timers[32] = { 0, } ;
461 
462 static inline int next_free_host_timer(void)
463 {
464     int k ;
465     /* FIXME: Does finding the next free slot require a lock? */
466     for (k = 0; k < ARRAY_SIZE(g_posix_timers); k++) {
467         if (g_posix_timers[k] == 0) {
468             g_posix_timers[k] = (timer_t) 1;
469             return k;
470         }
471     }
472     return -1;
473 }
474 #endif
475 
476 /* ARM EABI and MIPS expect 64bit types aligned even on pairs or registers */
477 #ifdef TARGET_ARM
478 static inline int regpairs_aligned(void *cpu_env, int num)
479 {
480     return ((((CPUARMState *)cpu_env)->eabi) == 1) ;
481 }
482 #elif defined(TARGET_MIPS) && (TARGET_ABI_BITS == 32)
483 static inline int regpairs_aligned(void *cpu_env, int num) { return 1; }
484 #elif defined(TARGET_PPC) && !defined(TARGET_PPC64)
485 /* SysV AVI for PPC32 expects 64bit parameters to be passed on odd/even pairs
486  * of registers which translates to the same as ARM/MIPS, because we start with
487  * r3 as arg1 */
488 static inline int regpairs_aligned(void *cpu_env, int num) { return 1; }
489 #elif defined(TARGET_SH4)
490 /* SH4 doesn't align register pairs, except for p{read,write}64 */
491 static inline int regpairs_aligned(void *cpu_env, int num)
492 {
493     switch (num) {
494     case TARGET_NR_pread64:
495     case TARGET_NR_pwrite64:
496         return 1;
497 
498     default:
499         return 0;
500     }
501 }
502 #elif defined(TARGET_XTENSA)
503 static inline int regpairs_aligned(void *cpu_env, int num) { return 1; }
504 #else
505 static inline int regpairs_aligned(void *cpu_env, int num) { return 0; }
506 #endif
507 
508 #define ERRNO_TABLE_SIZE 1200
509 
510 /* target_to_host_errno_table[] is initialized from
511  * host_to_target_errno_table[] in syscall_init(). */
512 static uint16_t target_to_host_errno_table[ERRNO_TABLE_SIZE] = {
513 };
514 
515 /*
516  * This list is the union of errno values overridden in asm-<arch>/errno.h
517  * minus the errnos that are not actually generic to all archs.
518  */
519 static uint16_t host_to_target_errno_table[ERRNO_TABLE_SIZE] = {
520     [EAGAIN]		= TARGET_EAGAIN,
521     [EIDRM]		= TARGET_EIDRM,
522     [ECHRNG]		= TARGET_ECHRNG,
523     [EL2NSYNC]		= TARGET_EL2NSYNC,
524     [EL3HLT]		= TARGET_EL3HLT,
525     [EL3RST]		= TARGET_EL3RST,
526     [ELNRNG]		= TARGET_ELNRNG,
527     [EUNATCH]		= TARGET_EUNATCH,
528     [ENOCSI]		= TARGET_ENOCSI,
529     [EL2HLT]		= TARGET_EL2HLT,
530     [EDEADLK]		= TARGET_EDEADLK,
531     [ENOLCK]		= TARGET_ENOLCK,
532     [EBADE]		= TARGET_EBADE,
533     [EBADR]		= TARGET_EBADR,
534     [EXFULL]		= TARGET_EXFULL,
535     [ENOANO]		= TARGET_ENOANO,
536     [EBADRQC]		= TARGET_EBADRQC,
537     [EBADSLT]		= TARGET_EBADSLT,
538     [EBFONT]		= TARGET_EBFONT,
539     [ENOSTR]		= TARGET_ENOSTR,
540     [ENODATA]		= TARGET_ENODATA,
541     [ETIME]		= TARGET_ETIME,
542     [ENOSR]		= TARGET_ENOSR,
543     [ENONET]		= TARGET_ENONET,
544     [ENOPKG]		= TARGET_ENOPKG,
545     [EREMOTE]		= TARGET_EREMOTE,
546     [ENOLINK]		= TARGET_ENOLINK,
547     [EADV]		= TARGET_EADV,
548     [ESRMNT]		= TARGET_ESRMNT,
549     [ECOMM]		= TARGET_ECOMM,
550     [EPROTO]		= TARGET_EPROTO,
551     [EDOTDOT]		= TARGET_EDOTDOT,
552     [EMULTIHOP]		= TARGET_EMULTIHOP,
553     [EBADMSG]		= TARGET_EBADMSG,
554     [ENAMETOOLONG]	= TARGET_ENAMETOOLONG,
555     [EOVERFLOW]		= TARGET_EOVERFLOW,
556     [ENOTUNIQ]		= TARGET_ENOTUNIQ,
557     [EBADFD]		= TARGET_EBADFD,
558     [EREMCHG]		= TARGET_EREMCHG,
559     [ELIBACC]		= TARGET_ELIBACC,
560     [ELIBBAD]		= TARGET_ELIBBAD,
561     [ELIBSCN]		= TARGET_ELIBSCN,
562     [ELIBMAX]		= TARGET_ELIBMAX,
563     [ELIBEXEC]		= TARGET_ELIBEXEC,
564     [EILSEQ]		= TARGET_EILSEQ,
565     [ENOSYS]		= TARGET_ENOSYS,
566     [ELOOP]		= TARGET_ELOOP,
567     [ERESTART]		= TARGET_ERESTART,
568     [ESTRPIPE]		= TARGET_ESTRPIPE,
569     [ENOTEMPTY]		= TARGET_ENOTEMPTY,
570     [EUSERS]		= TARGET_EUSERS,
571     [ENOTSOCK]		= TARGET_ENOTSOCK,
572     [EDESTADDRREQ]	= TARGET_EDESTADDRREQ,
573     [EMSGSIZE]		= TARGET_EMSGSIZE,
574     [EPROTOTYPE]	= TARGET_EPROTOTYPE,
575     [ENOPROTOOPT]	= TARGET_ENOPROTOOPT,
576     [EPROTONOSUPPORT]	= TARGET_EPROTONOSUPPORT,
577     [ESOCKTNOSUPPORT]	= TARGET_ESOCKTNOSUPPORT,
578     [EOPNOTSUPP]	= TARGET_EOPNOTSUPP,
579     [EPFNOSUPPORT]	= TARGET_EPFNOSUPPORT,
580     [EAFNOSUPPORT]	= TARGET_EAFNOSUPPORT,
581     [EADDRINUSE]	= TARGET_EADDRINUSE,
582     [EADDRNOTAVAIL]	= TARGET_EADDRNOTAVAIL,
583     [ENETDOWN]		= TARGET_ENETDOWN,
584     [ENETUNREACH]	= TARGET_ENETUNREACH,
585     [ENETRESET]		= TARGET_ENETRESET,
586     [ECONNABORTED]	= TARGET_ECONNABORTED,
587     [ECONNRESET]	= TARGET_ECONNRESET,
588     [ENOBUFS]		= TARGET_ENOBUFS,
589     [EISCONN]		= TARGET_EISCONN,
590     [ENOTCONN]		= TARGET_ENOTCONN,
591     [EUCLEAN]		= TARGET_EUCLEAN,
592     [ENOTNAM]		= TARGET_ENOTNAM,
593     [ENAVAIL]		= TARGET_ENAVAIL,
594     [EISNAM]		= TARGET_EISNAM,
595     [EREMOTEIO]		= TARGET_EREMOTEIO,
596     [EDQUOT]            = TARGET_EDQUOT,
597     [ESHUTDOWN]		= TARGET_ESHUTDOWN,
598     [ETOOMANYREFS]	= TARGET_ETOOMANYREFS,
599     [ETIMEDOUT]		= TARGET_ETIMEDOUT,
600     [ECONNREFUSED]	= TARGET_ECONNREFUSED,
601     [EHOSTDOWN]		= TARGET_EHOSTDOWN,
602     [EHOSTUNREACH]	= TARGET_EHOSTUNREACH,
603     [EALREADY]		= TARGET_EALREADY,
604     [EINPROGRESS]	= TARGET_EINPROGRESS,
605     [ESTALE]		= TARGET_ESTALE,
606     [ECANCELED]		= TARGET_ECANCELED,
607     [ENOMEDIUM]		= TARGET_ENOMEDIUM,
608     [EMEDIUMTYPE]	= TARGET_EMEDIUMTYPE,
609 #ifdef ENOKEY
610     [ENOKEY]		= TARGET_ENOKEY,
611 #endif
612 #ifdef EKEYEXPIRED
613     [EKEYEXPIRED]	= TARGET_EKEYEXPIRED,
614 #endif
615 #ifdef EKEYREVOKED
616     [EKEYREVOKED]	= TARGET_EKEYREVOKED,
617 #endif
618 #ifdef EKEYREJECTED
619     [EKEYREJECTED]	= TARGET_EKEYREJECTED,
620 #endif
621 #ifdef EOWNERDEAD
622     [EOWNERDEAD]	= TARGET_EOWNERDEAD,
623 #endif
624 #ifdef ENOTRECOVERABLE
625     [ENOTRECOVERABLE]	= TARGET_ENOTRECOVERABLE,
626 #endif
627 #ifdef ENOMSG
628     [ENOMSG]            = TARGET_ENOMSG,
629 #endif
630 #ifdef ERKFILL
631     [ERFKILL]           = TARGET_ERFKILL,
632 #endif
633 #ifdef EHWPOISON
634     [EHWPOISON]         = TARGET_EHWPOISON,
635 #endif
636 };
637 
638 static inline int host_to_target_errno(int err)
639 {
640     if (err >= 0 && err < ERRNO_TABLE_SIZE &&
641         host_to_target_errno_table[err]) {
642         return host_to_target_errno_table[err];
643     }
644     return err;
645 }
646 
647 static inline int target_to_host_errno(int err)
648 {
649     if (err >= 0 && err < ERRNO_TABLE_SIZE &&
650         target_to_host_errno_table[err]) {
651         return target_to_host_errno_table[err];
652     }
653     return err;
654 }
655 
656 static inline abi_long get_errno(abi_long ret)
657 {
658     if (ret == -1)
659         return -host_to_target_errno(errno);
660     else
661         return ret;
662 }
663 
664 const char *target_strerror(int err)
665 {
666     if (err == TARGET_ERESTARTSYS) {
667         return "To be restarted";
668     }
669     if (err == TARGET_QEMU_ESIGRETURN) {
670         return "Successful exit from sigreturn";
671     }
672 
673     if ((err >= ERRNO_TABLE_SIZE) || (err < 0)) {
674         return NULL;
675     }
676     return strerror(target_to_host_errno(err));
677 }
678 
679 #define safe_syscall0(type, name) \
680 static type safe_##name(void) \
681 { \
682     return safe_syscall(__NR_##name); \
683 }
684 
685 #define safe_syscall1(type, name, type1, arg1) \
686 static type safe_##name(type1 arg1) \
687 { \
688     return safe_syscall(__NR_##name, arg1); \
689 }
690 
691 #define safe_syscall2(type, name, type1, arg1, type2, arg2) \
692 static type safe_##name(type1 arg1, type2 arg2) \
693 { \
694     return safe_syscall(__NR_##name, arg1, arg2); \
695 }
696 
697 #define safe_syscall3(type, name, type1, arg1, type2, arg2, type3, arg3) \
698 static type safe_##name(type1 arg1, type2 arg2, type3 arg3) \
699 { \
700     return safe_syscall(__NR_##name, arg1, arg2, arg3); \
701 }
702 
703 #define safe_syscall4(type, name, type1, arg1, type2, arg2, type3, arg3, \
704     type4, arg4) \
705 static type safe_##name(type1 arg1, type2 arg2, type3 arg3, type4 arg4) \
706 { \
707     return safe_syscall(__NR_##name, arg1, arg2, arg3, arg4); \
708 }
709 
710 #define safe_syscall5(type, name, type1, arg1, type2, arg2, type3, arg3, \
711     type4, arg4, type5, arg5) \
712 static type safe_##name(type1 arg1, type2 arg2, type3 arg3, type4 arg4, \
713     type5 arg5) \
714 { \
715     return safe_syscall(__NR_##name, arg1, arg2, arg3, arg4, arg5); \
716 }
717 
718 #define safe_syscall6(type, name, type1, arg1, type2, arg2, type3, arg3, \
719     type4, arg4, type5, arg5, type6, arg6) \
720 static type safe_##name(type1 arg1, type2 arg2, type3 arg3, type4 arg4, \
721     type5 arg5, type6 arg6) \
722 { \
723     return safe_syscall(__NR_##name, arg1, arg2, arg3, arg4, arg5, arg6); \
724 }
725 
726 safe_syscall3(ssize_t, read, int, fd, void *, buff, size_t, count)
727 safe_syscall3(ssize_t, write, int, fd, const void *, buff, size_t, count)
728 safe_syscall4(int, openat, int, dirfd, const char *, pathname, \
729               int, flags, mode_t, mode)
730 safe_syscall4(pid_t, wait4, pid_t, pid, int *, status, int, options, \
731               struct rusage *, rusage)
732 safe_syscall5(int, waitid, idtype_t, idtype, id_t, id, siginfo_t *, infop, \
733               int, options, struct rusage *, rusage)
734 safe_syscall3(int, execve, const char *, filename, char **, argv, char **, envp)
735 safe_syscall6(int, pselect6, int, nfds, fd_set *, readfds, fd_set *, writefds, \
736               fd_set *, exceptfds, struct timespec *, timeout, void *, sig)
737 safe_syscall5(int, ppoll, struct pollfd *, ufds, unsigned int, nfds,
738               struct timespec *, tsp, const sigset_t *, sigmask,
739               size_t, sigsetsize)
740 safe_syscall6(int, epoll_pwait, int, epfd, struct epoll_event *, events,
741               int, maxevents, int, timeout, const sigset_t *, sigmask,
742               size_t, sigsetsize)
743 safe_syscall6(int,futex,int *,uaddr,int,op,int,val, \
744               const struct timespec *,timeout,int *,uaddr2,int,val3)
745 safe_syscall2(int, rt_sigsuspend, sigset_t *, newset, size_t, sigsetsize)
746 safe_syscall2(int, kill, pid_t, pid, int, sig)
747 safe_syscall2(int, tkill, int, tid, int, sig)
748 safe_syscall3(int, tgkill, int, tgid, int, pid, int, sig)
749 safe_syscall3(ssize_t, readv, int, fd, const struct iovec *, iov, int, iovcnt)
750 safe_syscall3(ssize_t, writev, int, fd, const struct iovec *, iov, int, iovcnt)
751 safe_syscall5(ssize_t, preadv, int, fd, const struct iovec *, iov, int, iovcnt,
752               unsigned long, pos_l, unsigned long, pos_h)
753 safe_syscall5(ssize_t, pwritev, int, fd, const struct iovec *, iov, int, iovcnt,
754               unsigned long, pos_l, unsigned long, pos_h)
755 safe_syscall3(int, connect, int, fd, const struct sockaddr *, addr,
756               socklen_t, addrlen)
757 safe_syscall6(ssize_t, sendto, int, fd, const void *, buf, size_t, len,
758               int, flags, const struct sockaddr *, addr, socklen_t, addrlen)
759 safe_syscall6(ssize_t, recvfrom, int, fd, void *, buf, size_t, len,
760               int, flags, struct sockaddr *, addr, socklen_t *, addrlen)
761 safe_syscall3(ssize_t, sendmsg, int, fd, const struct msghdr *, msg, int, flags)
762 safe_syscall3(ssize_t, recvmsg, int, fd, struct msghdr *, msg, int, flags)
763 safe_syscall2(int, flock, int, fd, int, operation)
764 safe_syscall4(int, rt_sigtimedwait, const sigset_t *, these, siginfo_t *, uinfo,
765               const struct timespec *, uts, size_t, sigsetsize)
766 safe_syscall4(int, accept4, int, fd, struct sockaddr *, addr, socklen_t *, len,
767               int, flags)
768 safe_syscall2(int, nanosleep, const struct timespec *, req,
769               struct timespec *, rem)
770 #ifdef TARGET_NR_clock_nanosleep
771 safe_syscall4(int, clock_nanosleep, const clockid_t, clock, int, flags,
772               const struct timespec *, req, struct timespec *, rem)
773 #endif
774 #ifdef __NR_msgsnd
775 safe_syscall4(int, msgsnd, int, msgid, const void *, msgp, size_t, sz,
776               int, flags)
777 safe_syscall5(int, msgrcv, int, msgid, void *, msgp, size_t, sz,
778               long, msgtype, int, flags)
779 safe_syscall4(int, semtimedop, int, semid, struct sembuf *, tsops,
780               unsigned, nsops, const struct timespec *, timeout)
781 #else
782 /* This host kernel architecture uses a single ipc syscall; fake up
783  * wrappers for the sub-operations to hide this implementation detail.
784  * Annoyingly we can't include linux/ipc.h to get the constant definitions
785  * for the call parameter because some structs in there conflict with the
786  * sys/ipc.h ones. So we just define them here, and rely on them being
787  * the same for all host architectures.
788  */
789 #define Q_SEMTIMEDOP 4
790 #define Q_MSGSND 11
791 #define Q_MSGRCV 12
792 #define Q_IPCCALL(VERSION, OP) ((VERSION) << 16 | (OP))
793 
794 safe_syscall6(int, ipc, int, call, long, first, long, second, long, third,
795               void *, ptr, long, fifth)
796 static int safe_msgsnd(int msgid, const void *msgp, size_t sz, int flags)
797 {
798     return safe_ipc(Q_IPCCALL(0, Q_MSGSND), msgid, sz, flags, (void *)msgp, 0);
799 }
800 static int safe_msgrcv(int msgid, void *msgp, size_t sz, long type, int flags)
801 {
802     return safe_ipc(Q_IPCCALL(1, Q_MSGRCV), msgid, sz, flags, msgp, type);
803 }
804 static int safe_semtimedop(int semid, struct sembuf *tsops, unsigned nsops,
805                            const struct timespec *timeout)
806 {
807     return safe_ipc(Q_IPCCALL(0, Q_SEMTIMEDOP), semid, nsops, 0, tsops,
808                     (long)timeout);
809 }
810 #endif
811 #if defined(TARGET_NR_mq_open) && defined(__NR_mq_open)
812 safe_syscall5(int, mq_timedsend, int, mqdes, const char *, msg_ptr,
813               size_t, len, unsigned, prio, const struct timespec *, timeout)
814 safe_syscall5(int, mq_timedreceive, int, mqdes, char *, msg_ptr,
815               size_t, len, unsigned *, prio, const struct timespec *, timeout)
816 #endif
817 /* We do ioctl like this rather than via safe_syscall3 to preserve the
818  * "third argument might be integer or pointer or not present" behaviour of
819  * the libc function.
820  */
821 #define safe_ioctl(...) safe_syscall(__NR_ioctl, __VA_ARGS__)
822 /* Similarly for fcntl. Note that callers must always:
823  *  pass the F_GETLK64 etc constants rather than the unsuffixed F_GETLK
824  *  use the flock64 struct rather than unsuffixed flock
825  * This will then work and use a 64-bit offset for both 32-bit and 64-bit hosts.
826  */
827 #ifdef __NR_fcntl64
828 #define safe_fcntl(...) safe_syscall(__NR_fcntl64, __VA_ARGS__)
829 #else
830 #define safe_fcntl(...) safe_syscall(__NR_fcntl, __VA_ARGS__)
831 #endif
832 
833 static inline int host_to_target_sock_type(int host_type)
834 {
835     int target_type;
836 
837     switch (host_type & 0xf /* SOCK_TYPE_MASK */) {
838     case SOCK_DGRAM:
839         target_type = TARGET_SOCK_DGRAM;
840         break;
841     case SOCK_STREAM:
842         target_type = TARGET_SOCK_STREAM;
843         break;
844     default:
845         target_type = host_type & 0xf /* SOCK_TYPE_MASK */;
846         break;
847     }
848 
849 #if defined(SOCK_CLOEXEC)
850     if (host_type & SOCK_CLOEXEC) {
851         target_type |= TARGET_SOCK_CLOEXEC;
852     }
853 #endif
854 
855 #if defined(SOCK_NONBLOCK)
856     if (host_type & SOCK_NONBLOCK) {
857         target_type |= TARGET_SOCK_NONBLOCK;
858     }
859 #endif
860 
861     return target_type;
862 }
863 
864 static abi_ulong target_brk;
865 static abi_ulong target_original_brk;
866 static abi_ulong brk_page;
867 
868 void target_set_brk(abi_ulong new_brk)
869 {
870     target_original_brk = target_brk = HOST_PAGE_ALIGN(new_brk);
871     brk_page = HOST_PAGE_ALIGN(target_brk);
872 }
873 
874 //#define DEBUGF_BRK(message, args...) do { fprintf(stderr, (message), ## args); } while (0)
875 #define DEBUGF_BRK(message, args...)
876 
877 /* do_brk() must return target values and target errnos. */
878 abi_long do_brk(abi_ulong new_brk)
879 {
880     abi_long mapped_addr;
881     abi_ulong new_alloc_size;
882 
883     DEBUGF_BRK("do_brk(" TARGET_ABI_FMT_lx ") -> ", new_brk);
884 
885     if (!new_brk) {
886         DEBUGF_BRK(TARGET_ABI_FMT_lx " (!new_brk)\n", target_brk);
887         return target_brk;
888     }
889     if (new_brk < target_original_brk) {
890         DEBUGF_BRK(TARGET_ABI_FMT_lx " (new_brk < target_original_brk)\n",
891                    target_brk);
892         return target_brk;
893     }
894 
895     /* If the new brk is less than the highest page reserved to the
896      * target heap allocation, set it and we're almost done...  */
897     if (new_brk <= brk_page) {
898         /* Heap contents are initialized to zero, as for anonymous
899          * mapped pages.  */
900         if (new_brk > target_brk) {
901             memset(g2h(target_brk), 0, new_brk - target_brk);
902         }
903 	target_brk = new_brk;
904         DEBUGF_BRK(TARGET_ABI_FMT_lx " (new_brk <= brk_page)\n", target_brk);
905 	return target_brk;
906     }
907 
908     /* We need to allocate more memory after the brk... Note that
909      * we don't use MAP_FIXED because that will map over the top of
910      * any existing mapping (like the one with the host libc or qemu
911      * itself); instead we treat "mapped but at wrong address" as
912      * a failure and unmap again.
913      */
914     new_alloc_size = HOST_PAGE_ALIGN(new_brk - brk_page);
915     mapped_addr = get_errno(target_mmap(brk_page, new_alloc_size,
916                                         PROT_READ|PROT_WRITE,
917                                         MAP_ANON|MAP_PRIVATE, 0, 0));
918 
919     if (mapped_addr == brk_page) {
920         /* Heap contents are initialized to zero, as for anonymous
921          * mapped pages.  Technically the new pages are already
922          * initialized to zero since they *are* anonymous mapped
923          * pages, however we have to take care with the contents that
924          * come from the remaining part of the previous page: it may
925          * contains garbage data due to a previous heap usage (grown
926          * then shrunken).  */
927         memset(g2h(target_brk), 0, brk_page - target_brk);
928 
929         target_brk = new_brk;
930         brk_page = HOST_PAGE_ALIGN(target_brk);
931         DEBUGF_BRK(TARGET_ABI_FMT_lx " (mapped_addr == brk_page)\n",
932             target_brk);
933         return target_brk;
934     } else if (mapped_addr != -1) {
935         /* Mapped but at wrong address, meaning there wasn't actually
936          * enough space for this brk.
937          */
938         target_munmap(mapped_addr, new_alloc_size);
939         mapped_addr = -1;
940         DEBUGF_BRK(TARGET_ABI_FMT_lx " (mapped_addr != -1)\n", target_brk);
941     }
942     else {
943         DEBUGF_BRK(TARGET_ABI_FMT_lx " (otherwise)\n", target_brk);
944     }
945 
946 #if defined(TARGET_ALPHA)
947     /* We (partially) emulate OSF/1 on Alpha, which requires we
948        return a proper errno, not an unchanged brk value.  */
949     return -TARGET_ENOMEM;
950 #endif
951     /* For everything else, return the previous break. */
952     return target_brk;
953 }
954 
955 static inline abi_long copy_from_user_fdset(fd_set *fds,
956                                             abi_ulong target_fds_addr,
957                                             int n)
958 {
959     int i, nw, j, k;
960     abi_ulong b, *target_fds;
961 
962     nw = DIV_ROUND_UP(n, TARGET_ABI_BITS);
963     if (!(target_fds = lock_user(VERIFY_READ,
964                                  target_fds_addr,
965                                  sizeof(abi_ulong) * nw,
966                                  1)))
967         return -TARGET_EFAULT;
968 
969     FD_ZERO(fds);
970     k = 0;
971     for (i = 0; i < nw; i++) {
972         /* grab the abi_ulong */
973         __get_user(b, &target_fds[i]);
974         for (j = 0; j < TARGET_ABI_BITS; j++) {
975             /* check the bit inside the abi_ulong */
976             if ((b >> j) & 1)
977                 FD_SET(k, fds);
978             k++;
979         }
980     }
981 
982     unlock_user(target_fds, target_fds_addr, 0);
983 
984     return 0;
985 }
986 
987 static inline abi_ulong copy_from_user_fdset_ptr(fd_set *fds, fd_set **fds_ptr,
988                                                  abi_ulong target_fds_addr,
989                                                  int n)
990 {
991     if (target_fds_addr) {
992         if (copy_from_user_fdset(fds, target_fds_addr, n))
993             return -TARGET_EFAULT;
994         *fds_ptr = fds;
995     } else {
996         *fds_ptr = NULL;
997     }
998     return 0;
999 }
1000 
1001 static inline abi_long copy_to_user_fdset(abi_ulong target_fds_addr,
1002                                           const fd_set *fds,
1003                                           int n)
1004 {
1005     int i, nw, j, k;
1006     abi_long v;
1007     abi_ulong *target_fds;
1008 
1009     nw = DIV_ROUND_UP(n, TARGET_ABI_BITS);
1010     if (!(target_fds = lock_user(VERIFY_WRITE,
1011                                  target_fds_addr,
1012                                  sizeof(abi_ulong) * nw,
1013                                  0)))
1014         return -TARGET_EFAULT;
1015 
1016     k = 0;
1017     for (i = 0; i < nw; i++) {
1018         v = 0;
1019         for (j = 0; j < TARGET_ABI_BITS; j++) {
1020             v |= ((abi_ulong)(FD_ISSET(k, fds) != 0) << j);
1021             k++;
1022         }
1023         __put_user(v, &target_fds[i]);
1024     }
1025 
1026     unlock_user(target_fds, target_fds_addr, sizeof(abi_ulong) * nw);
1027 
1028     return 0;
1029 }
1030 
1031 #if defined(__alpha__)
1032 #define HOST_HZ 1024
1033 #else
1034 #define HOST_HZ 100
1035 #endif
1036 
1037 static inline abi_long host_to_target_clock_t(long ticks)
1038 {
1039 #if HOST_HZ == TARGET_HZ
1040     return ticks;
1041 #else
1042     return ((int64_t)ticks * TARGET_HZ) / HOST_HZ;
1043 #endif
1044 }
1045 
1046 static inline abi_long host_to_target_rusage(abi_ulong target_addr,
1047                                              const struct rusage *rusage)
1048 {
1049     struct target_rusage *target_rusage;
1050 
1051     if (!lock_user_struct(VERIFY_WRITE, target_rusage, target_addr, 0))
1052         return -TARGET_EFAULT;
1053     target_rusage->ru_utime.tv_sec = tswapal(rusage->ru_utime.tv_sec);
1054     target_rusage->ru_utime.tv_usec = tswapal(rusage->ru_utime.tv_usec);
1055     target_rusage->ru_stime.tv_sec = tswapal(rusage->ru_stime.tv_sec);
1056     target_rusage->ru_stime.tv_usec = tswapal(rusage->ru_stime.tv_usec);
1057     target_rusage->ru_maxrss = tswapal(rusage->ru_maxrss);
1058     target_rusage->ru_ixrss = tswapal(rusage->ru_ixrss);
1059     target_rusage->ru_idrss = tswapal(rusage->ru_idrss);
1060     target_rusage->ru_isrss = tswapal(rusage->ru_isrss);
1061     target_rusage->ru_minflt = tswapal(rusage->ru_minflt);
1062     target_rusage->ru_majflt = tswapal(rusage->ru_majflt);
1063     target_rusage->ru_nswap = tswapal(rusage->ru_nswap);
1064     target_rusage->ru_inblock = tswapal(rusage->ru_inblock);
1065     target_rusage->ru_oublock = tswapal(rusage->ru_oublock);
1066     target_rusage->ru_msgsnd = tswapal(rusage->ru_msgsnd);
1067     target_rusage->ru_msgrcv = tswapal(rusage->ru_msgrcv);
1068     target_rusage->ru_nsignals = tswapal(rusage->ru_nsignals);
1069     target_rusage->ru_nvcsw = tswapal(rusage->ru_nvcsw);
1070     target_rusage->ru_nivcsw = tswapal(rusage->ru_nivcsw);
1071     unlock_user_struct(target_rusage, target_addr, 1);
1072 
1073     return 0;
1074 }
1075 
1076 static inline rlim_t target_to_host_rlim(abi_ulong target_rlim)
1077 {
1078     abi_ulong target_rlim_swap;
1079     rlim_t result;
1080 
1081     target_rlim_swap = tswapal(target_rlim);
1082     if (target_rlim_swap == TARGET_RLIM_INFINITY)
1083         return RLIM_INFINITY;
1084 
1085     result = target_rlim_swap;
1086     if (target_rlim_swap != (rlim_t)result)
1087         return RLIM_INFINITY;
1088 
1089     return result;
1090 }
1091 
1092 static inline abi_ulong host_to_target_rlim(rlim_t rlim)
1093 {
1094     abi_ulong target_rlim_swap;
1095     abi_ulong result;
1096 
1097     if (rlim == RLIM_INFINITY || rlim != (abi_long)rlim)
1098         target_rlim_swap = TARGET_RLIM_INFINITY;
1099     else
1100         target_rlim_swap = rlim;
1101     result = tswapal(target_rlim_swap);
1102 
1103     return result;
1104 }
1105 
1106 static inline int target_to_host_resource(int code)
1107 {
1108     switch (code) {
1109     case TARGET_RLIMIT_AS:
1110         return RLIMIT_AS;
1111     case TARGET_RLIMIT_CORE:
1112         return RLIMIT_CORE;
1113     case TARGET_RLIMIT_CPU:
1114         return RLIMIT_CPU;
1115     case TARGET_RLIMIT_DATA:
1116         return RLIMIT_DATA;
1117     case TARGET_RLIMIT_FSIZE:
1118         return RLIMIT_FSIZE;
1119     case TARGET_RLIMIT_LOCKS:
1120         return RLIMIT_LOCKS;
1121     case TARGET_RLIMIT_MEMLOCK:
1122         return RLIMIT_MEMLOCK;
1123     case TARGET_RLIMIT_MSGQUEUE:
1124         return RLIMIT_MSGQUEUE;
1125     case TARGET_RLIMIT_NICE:
1126         return RLIMIT_NICE;
1127     case TARGET_RLIMIT_NOFILE:
1128         return RLIMIT_NOFILE;
1129     case TARGET_RLIMIT_NPROC:
1130         return RLIMIT_NPROC;
1131     case TARGET_RLIMIT_RSS:
1132         return RLIMIT_RSS;
1133     case TARGET_RLIMIT_RTPRIO:
1134         return RLIMIT_RTPRIO;
1135     case TARGET_RLIMIT_SIGPENDING:
1136         return RLIMIT_SIGPENDING;
1137     case TARGET_RLIMIT_STACK:
1138         return RLIMIT_STACK;
1139     default:
1140         return code;
1141     }
1142 }
1143 
1144 static inline abi_long copy_from_user_timeval(struct timeval *tv,
1145                                               abi_ulong target_tv_addr)
1146 {
1147     struct target_timeval *target_tv;
1148 
1149     if (!lock_user_struct(VERIFY_READ, target_tv, target_tv_addr, 1))
1150         return -TARGET_EFAULT;
1151 
1152     __get_user(tv->tv_sec, &target_tv->tv_sec);
1153     __get_user(tv->tv_usec, &target_tv->tv_usec);
1154 
1155     unlock_user_struct(target_tv, target_tv_addr, 0);
1156 
1157     return 0;
1158 }
1159 
1160 static inline abi_long copy_to_user_timeval(abi_ulong target_tv_addr,
1161                                             const struct timeval *tv)
1162 {
1163     struct target_timeval *target_tv;
1164 
1165     if (!lock_user_struct(VERIFY_WRITE, target_tv, target_tv_addr, 0))
1166         return -TARGET_EFAULT;
1167 
1168     __put_user(tv->tv_sec, &target_tv->tv_sec);
1169     __put_user(tv->tv_usec, &target_tv->tv_usec);
1170 
1171     unlock_user_struct(target_tv, target_tv_addr, 1);
1172 
1173     return 0;
1174 }
1175 
1176 static inline abi_long copy_from_user_timezone(struct timezone *tz,
1177                                                abi_ulong target_tz_addr)
1178 {
1179     struct target_timezone *target_tz;
1180 
1181     if (!lock_user_struct(VERIFY_READ, target_tz, target_tz_addr, 1)) {
1182         return -TARGET_EFAULT;
1183     }
1184 
1185     __get_user(tz->tz_minuteswest, &target_tz->tz_minuteswest);
1186     __get_user(tz->tz_dsttime, &target_tz->tz_dsttime);
1187 
1188     unlock_user_struct(target_tz, target_tz_addr, 0);
1189 
1190     return 0;
1191 }
1192 
1193 #if defined(TARGET_NR_mq_open) && defined(__NR_mq_open)
1194 #include <mqueue.h>
1195 
1196 static inline abi_long copy_from_user_mq_attr(struct mq_attr *attr,
1197                                               abi_ulong target_mq_attr_addr)
1198 {
1199     struct target_mq_attr *target_mq_attr;
1200 
1201     if (!lock_user_struct(VERIFY_READ, target_mq_attr,
1202                           target_mq_attr_addr, 1))
1203         return -TARGET_EFAULT;
1204 
1205     __get_user(attr->mq_flags, &target_mq_attr->mq_flags);
1206     __get_user(attr->mq_maxmsg, &target_mq_attr->mq_maxmsg);
1207     __get_user(attr->mq_msgsize, &target_mq_attr->mq_msgsize);
1208     __get_user(attr->mq_curmsgs, &target_mq_attr->mq_curmsgs);
1209 
1210     unlock_user_struct(target_mq_attr, target_mq_attr_addr, 0);
1211 
1212     return 0;
1213 }
1214 
1215 static inline abi_long copy_to_user_mq_attr(abi_ulong target_mq_attr_addr,
1216                                             const struct mq_attr *attr)
1217 {
1218     struct target_mq_attr *target_mq_attr;
1219 
1220     if (!lock_user_struct(VERIFY_WRITE, target_mq_attr,
1221                           target_mq_attr_addr, 0))
1222         return -TARGET_EFAULT;
1223 
1224     __put_user(attr->mq_flags, &target_mq_attr->mq_flags);
1225     __put_user(attr->mq_maxmsg, &target_mq_attr->mq_maxmsg);
1226     __put_user(attr->mq_msgsize, &target_mq_attr->mq_msgsize);
1227     __put_user(attr->mq_curmsgs, &target_mq_attr->mq_curmsgs);
1228 
1229     unlock_user_struct(target_mq_attr, target_mq_attr_addr, 1);
1230 
1231     return 0;
1232 }
1233 #endif
1234 
1235 #if defined(TARGET_NR_select) || defined(TARGET_NR__newselect)
1236 /* do_select() must return target values and target errnos. */
1237 static abi_long do_select(int n,
1238                           abi_ulong rfd_addr, abi_ulong wfd_addr,
1239                           abi_ulong efd_addr, abi_ulong target_tv_addr)
1240 {
1241     fd_set rfds, wfds, efds;
1242     fd_set *rfds_ptr, *wfds_ptr, *efds_ptr;
1243     struct timeval tv;
1244     struct timespec ts, *ts_ptr;
1245     abi_long ret;
1246 
1247     ret = copy_from_user_fdset_ptr(&rfds, &rfds_ptr, rfd_addr, n);
1248     if (ret) {
1249         return ret;
1250     }
1251     ret = copy_from_user_fdset_ptr(&wfds, &wfds_ptr, wfd_addr, n);
1252     if (ret) {
1253         return ret;
1254     }
1255     ret = copy_from_user_fdset_ptr(&efds, &efds_ptr, efd_addr, n);
1256     if (ret) {
1257         return ret;
1258     }
1259 
1260     if (target_tv_addr) {
1261         if (copy_from_user_timeval(&tv, target_tv_addr))
1262             return -TARGET_EFAULT;
1263         ts.tv_sec = tv.tv_sec;
1264         ts.tv_nsec = tv.tv_usec * 1000;
1265         ts_ptr = &ts;
1266     } else {
1267         ts_ptr = NULL;
1268     }
1269 
1270     ret = get_errno(safe_pselect6(n, rfds_ptr, wfds_ptr, efds_ptr,
1271                                   ts_ptr, NULL));
1272 
1273     if (!is_error(ret)) {
1274         if (rfd_addr && copy_to_user_fdset(rfd_addr, &rfds, n))
1275             return -TARGET_EFAULT;
1276         if (wfd_addr && copy_to_user_fdset(wfd_addr, &wfds, n))
1277             return -TARGET_EFAULT;
1278         if (efd_addr && copy_to_user_fdset(efd_addr, &efds, n))
1279             return -TARGET_EFAULT;
1280 
1281         if (target_tv_addr) {
1282             tv.tv_sec = ts.tv_sec;
1283             tv.tv_usec = ts.tv_nsec / 1000;
1284             if (copy_to_user_timeval(target_tv_addr, &tv)) {
1285                 return -TARGET_EFAULT;
1286             }
1287         }
1288     }
1289 
1290     return ret;
1291 }
1292 
1293 #if defined(TARGET_WANT_OLD_SYS_SELECT)
1294 static abi_long do_old_select(abi_ulong arg1)
1295 {
1296     struct target_sel_arg_struct *sel;
1297     abi_ulong inp, outp, exp, tvp;
1298     long nsel;
1299 
1300     if (!lock_user_struct(VERIFY_READ, sel, arg1, 1)) {
1301         return -TARGET_EFAULT;
1302     }
1303 
1304     nsel = tswapal(sel->n);
1305     inp = tswapal(sel->inp);
1306     outp = tswapal(sel->outp);
1307     exp = tswapal(sel->exp);
1308     tvp = tswapal(sel->tvp);
1309 
1310     unlock_user_struct(sel, arg1, 0);
1311 
1312     return do_select(nsel, inp, outp, exp, tvp);
1313 }
1314 #endif
1315 #endif
1316 
1317 static abi_long do_pipe2(int host_pipe[], int flags)
1318 {
1319 #ifdef CONFIG_PIPE2
1320     return pipe2(host_pipe, flags);
1321 #else
1322     return -ENOSYS;
1323 #endif
1324 }
1325 
1326 static abi_long do_pipe(void *cpu_env, abi_ulong pipedes,
1327                         int flags, int is_pipe2)
1328 {
1329     int host_pipe[2];
1330     abi_long ret;
1331     ret = flags ? do_pipe2(host_pipe, flags) : pipe(host_pipe);
1332 
1333     if (is_error(ret))
1334         return get_errno(ret);
1335 
1336     /* Several targets have special calling conventions for the original
1337        pipe syscall, but didn't replicate this into the pipe2 syscall.  */
1338     if (!is_pipe2) {
1339 #if defined(TARGET_ALPHA)
1340         ((CPUAlphaState *)cpu_env)->ir[IR_A4] = host_pipe[1];
1341         return host_pipe[0];
1342 #elif defined(TARGET_MIPS)
1343         ((CPUMIPSState*)cpu_env)->active_tc.gpr[3] = host_pipe[1];
1344         return host_pipe[0];
1345 #elif defined(TARGET_SH4)
1346         ((CPUSH4State*)cpu_env)->gregs[1] = host_pipe[1];
1347         return host_pipe[0];
1348 #elif defined(TARGET_SPARC)
1349         ((CPUSPARCState*)cpu_env)->regwptr[1] = host_pipe[1];
1350         return host_pipe[0];
1351 #endif
1352     }
1353 
1354     if (put_user_s32(host_pipe[0], pipedes)
1355         || put_user_s32(host_pipe[1], pipedes + sizeof(host_pipe[0])))
1356         return -TARGET_EFAULT;
1357     return get_errno(ret);
1358 }
1359 
1360 static inline abi_long target_to_host_ip_mreq(struct ip_mreqn *mreqn,
1361                                               abi_ulong target_addr,
1362                                               socklen_t len)
1363 {
1364     struct target_ip_mreqn *target_smreqn;
1365 
1366     target_smreqn = lock_user(VERIFY_READ, target_addr, len, 1);
1367     if (!target_smreqn)
1368         return -TARGET_EFAULT;
1369     mreqn->imr_multiaddr.s_addr = target_smreqn->imr_multiaddr.s_addr;
1370     mreqn->imr_address.s_addr = target_smreqn->imr_address.s_addr;
1371     if (len == sizeof(struct target_ip_mreqn))
1372         mreqn->imr_ifindex = tswapal(target_smreqn->imr_ifindex);
1373     unlock_user(target_smreqn, target_addr, 0);
1374 
1375     return 0;
1376 }
1377 
1378 static inline abi_long target_to_host_sockaddr(int fd, struct sockaddr *addr,
1379                                                abi_ulong target_addr,
1380                                                socklen_t len)
1381 {
1382     const socklen_t unix_maxlen = sizeof (struct sockaddr_un);
1383     sa_family_t sa_family;
1384     struct target_sockaddr *target_saddr;
1385 
1386     if (fd_trans_target_to_host_addr(fd)) {
1387         return fd_trans_target_to_host_addr(fd)(addr, target_addr, len);
1388     }
1389 
1390     target_saddr = lock_user(VERIFY_READ, target_addr, len, 1);
1391     if (!target_saddr)
1392         return -TARGET_EFAULT;
1393 
1394     sa_family = tswap16(target_saddr->sa_family);
1395 
1396     /* Oops. The caller might send a incomplete sun_path; sun_path
1397      * must be terminated by \0 (see the manual page), but
1398      * unfortunately it is quite common to specify sockaddr_un
1399      * length as "strlen(x->sun_path)" while it should be
1400      * "strlen(...) + 1". We'll fix that here if needed.
1401      * Linux kernel has a similar feature.
1402      */
1403 
1404     if (sa_family == AF_UNIX) {
1405         if (len < unix_maxlen && len > 0) {
1406             char *cp = (char*)target_saddr;
1407 
1408             if ( cp[len-1] && !cp[len] )
1409                 len++;
1410         }
1411         if (len > unix_maxlen)
1412             len = unix_maxlen;
1413     }
1414 
1415     memcpy(addr, target_saddr, len);
1416     addr->sa_family = sa_family;
1417     if (sa_family == AF_NETLINK) {
1418         struct sockaddr_nl *nladdr;
1419 
1420         nladdr = (struct sockaddr_nl *)addr;
1421         nladdr->nl_pid = tswap32(nladdr->nl_pid);
1422         nladdr->nl_groups = tswap32(nladdr->nl_groups);
1423     } else if (sa_family == AF_PACKET) {
1424 	struct target_sockaddr_ll *lladdr;
1425 
1426 	lladdr = (struct target_sockaddr_ll *)addr;
1427 	lladdr->sll_ifindex = tswap32(lladdr->sll_ifindex);
1428 	lladdr->sll_hatype = tswap16(lladdr->sll_hatype);
1429     }
1430     unlock_user(target_saddr, target_addr, 0);
1431 
1432     return 0;
1433 }
1434 
1435 static inline abi_long host_to_target_sockaddr(abi_ulong target_addr,
1436                                                struct sockaddr *addr,
1437                                                socklen_t len)
1438 {
1439     struct target_sockaddr *target_saddr;
1440 
1441     if (len == 0) {
1442         return 0;
1443     }
1444     assert(addr);
1445 
1446     target_saddr = lock_user(VERIFY_WRITE, target_addr, len, 0);
1447     if (!target_saddr)
1448         return -TARGET_EFAULT;
1449     memcpy(target_saddr, addr, len);
1450     if (len >= offsetof(struct target_sockaddr, sa_family) +
1451         sizeof(target_saddr->sa_family)) {
1452         target_saddr->sa_family = tswap16(addr->sa_family);
1453     }
1454     if (addr->sa_family == AF_NETLINK && len >= sizeof(struct sockaddr_nl)) {
1455         struct sockaddr_nl *target_nl = (struct sockaddr_nl *)target_saddr;
1456         target_nl->nl_pid = tswap32(target_nl->nl_pid);
1457         target_nl->nl_groups = tswap32(target_nl->nl_groups);
1458     } else if (addr->sa_family == AF_PACKET) {
1459         struct sockaddr_ll *target_ll = (struct sockaddr_ll *)target_saddr;
1460         target_ll->sll_ifindex = tswap32(target_ll->sll_ifindex);
1461         target_ll->sll_hatype = tswap16(target_ll->sll_hatype);
1462     } else if (addr->sa_family == AF_INET6 &&
1463                len >= sizeof(struct target_sockaddr_in6)) {
1464         struct target_sockaddr_in6 *target_in6 =
1465                (struct target_sockaddr_in6 *)target_saddr;
1466         target_in6->sin6_scope_id = tswap16(target_in6->sin6_scope_id);
1467     }
1468     unlock_user(target_saddr, target_addr, len);
1469 
1470     return 0;
1471 }
1472 
1473 static inline abi_long target_to_host_cmsg(struct msghdr *msgh,
1474                                            struct target_msghdr *target_msgh)
1475 {
1476     struct cmsghdr *cmsg = CMSG_FIRSTHDR(msgh);
1477     abi_long msg_controllen;
1478     abi_ulong target_cmsg_addr;
1479     struct target_cmsghdr *target_cmsg, *target_cmsg_start;
1480     socklen_t space = 0;
1481 
1482     msg_controllen = tswapal(target_msgh->msg_controllen);
1483     if (msg_controllen < sizeof (struct target_cmsghdr))
1484         goto the_end;
1485     target_cmsg_addr = tswapal(target_msgh->msg_control);
1486     target_cmsg = lock_user(VERIFY_READ, target_cmsg_addr, msg_controllen, 1);
1487     target_cmsg_start = target_cmsg;
1488     if (!target_cmsg)
1489         return -TARGET_EFAULT;
1490 
1491     while (cmsg && target_cmsg) {
1492         void *data = CMSG_DATA(cmsg);
1493         void *target_data = TARGET_CMSG_DATA(target_cmsg);
1494 
1495         int len = tswapal(target_cmsg->cmsg_len)
1496             - sizeof(struct target_cmsghdr);
1497 
1498         space += CMSG_SPACE(len);
1499         if (space > msgh->msg_controllen) {
1500             space -= CMSG_SPACE(len);
1501             /* This is a QEMU bug, since we allocated the payload
1502              * area ourselves (unlike overflow in host-to-target
1503              * conversion, which is just the guest giving us a buffer
1504              * that's too small). It can't happen for the payload types
1505              * we currently support; if it becomes an issue in future
1506              * we would need to improve our allocation strategy to
1507              * something more intelligent than "twice the size of the
1508              * target buffer we're reading from".
1509              */
1510             gemu_log("Host cmsg overflow\n");
1511             break;
1512         }
1513 
1514         if (tswap32(target_cmsg->cmsg_level) == TARGET_SOL_SOCKET) {
1515             cmsg->cmsg_level = SOL_SOCKET;
1516         } else {
1517             cmsg->cmsg_level = tswap32(target_cmsg->cmsg_level);
1518         }
1519         cmsg->cmsg_type = tswap32(target_cmsg->cmsg_type);
1520         cmsg->cmsg_len = CMSG_LEN(len);
1521 
1522         if (cmsg->cmsg_level == SOL_SOCKET && cmsg->cmsg_type == SCM_RIGHTS) {
1523             int *fd = (int *)data;
1524             int *target_fd = (int *)target_data;
1525             int i, numfds = len / sizeof(int);
1526 
1527             for (i = 0; i < numfds; i++) {
1528                 __get_user(fd[i], target_fd + i);
1529             }
1530         } else if (cmsg->cmsg_level == SOL_SOCKET
1531                &&  cmsg->cmsg_type == SCM_CREDENTIALS) {
1532             struct ucred *cred = (struct ucred *)data;
1533             struct target_ucred *target_cred =
1534                 (struct target_ucred *)target_data;
1535 
1536             __get_user(cred->pid, &target_cred->pid);
1537             __get_user(cred->uid, &target_cred->uid);
1538             __get_user(cred->gid, &target_cred->gid);
1539         } else {
1540             gemu_log("Unsupported ancillary data: %d/%d\n",
1541                                         cmsg->cmsg_level, cmsg->cmsg_type);
1542             memcpy(data, target_data, len);
1543         }
1544 
1545         cmsg = CMSG_NXTHDR(msgh, cmsg);
1546         target_cmsg = TARGET_CMSG_NXTHDR(target_msgh, target_cmsg,
1547                                          target_cmsg_start);
1548     }
1549     unlock_user(target_cmsg, target_cmsg_addr, 0);
1550  the_end:
1551     msgh->msg_controllen = space;
1552     return 0;
1553 }
1554 
1555 static inline abi_long host_to_target_cmsg(struct target_msghdr *target_msgh,
1556                                            struct msghdr *msgh)
1557 {
1558     struct cmsghdr *cmsg = CMSG_FIRSTHDR(msgh);
1559     abi_long msg_controllen;
1560     abi_ulong target_cmsg_addr;
1561     struct target_cmsghdr *target_cmsg, *target_cmsg_start;
1562     socklen_t space = 0;
1563 
1564     msg_controllen = tswapal(target_msgh->msg_controllen);
1565     if (msg_controllen < sizeof (struct target_cmsghdr))
1566         goto the_end;
1567     target_cmsg_addr = tswapal(target_msgh->msg_control);
1568     target_cmsg = lock_user(VERIFY_WRITE, target_cmsg_addr, msg_controllen, 0);
1569     target_cmsg_start = target_cmsg;
1570     if (!target_cmsg)
1571         return -TARGET_EFAULT;
1572 
1573     while (cmsg && target_cmsg) {
1574         void *data = CMSG_DATA(cmsg);
1575         void *target_data = TARGET_CMSG_DATA(target_cmsg);
1576 
1577         int len = cmsg->cmsg_len - sizeof(struct cmsghdr);
1578         int tgt_len, tgt_space;
1579 
1580         /* We never copy a half-header but may copy half-data;
1581          * this is Linux's behaviour in put_cmsg(). Note that
1582          * truncation here is a guest problem (which we report
1583          * to the guest via the CTRUNC bit), unlike truncation
1584          * in target_to_host_cmsg, which is a QEMU bug.
1585          */
1586         if (msg_controllen < sizeof(struct target_cmsghdr)) {
1587             target_msgh->msg_flags |= tswap32(MSG_CTRUNC);
1588             break;
1589         }
1590 
1591         if (cmsg->cmsg_level == SOL_SOCKET) {
1592             target_cmsg->cmsg_level = tswap32(TARGET_SOL_SOCKET);
1593         } else {
1594             target_cmsg->cmsg_level = tswap32(cmsg->cmsg_level);
1595         }
1596         target_cmsg->cmsg_type = tswap32(cmsg->cmsg_type);
1597 
1598         /* Payload types which need a different size of payload on
1599          * the target must adjust tgt_len here.
1600          */
1601         tgt_len = len;
1602         switch (cmsg->cmsg_level) {
1603         case SOL_SOCKET:
1604             switch (cmsg->cmsg_type) {
1605             case SO_TIMESTAMP:
1606                 tgt_len = sizeof(struct target_timeval);
1607                 break;
1608             default:
1609                 break;
1610             }
1611             break;
1612         default:
1613             break;
1614         }
1615 
1616         if (msg_controllen < TARGET_CMSG_LEN(tgt_len)) {
1617             target_msgh->msg_flags |= tswap32(MSG_CTRUNC);
1618             tgt_len = msg_controllen - sizeof(struct target_cmsghdr);
1619         }
1620 
1621         /* We must now copy-and-convert len bytes of payload
1622          * into tgt_len bytes of destination space. Bear in mind
1623          * that in both source and destination we may be dealing
1624          * with a truncated value!
1625          */
1626         switch (cmsg->cmsg_level) {
1627         case SOL_SOCKET:
1628             switch (cmsg->cmsg_type) {
1629             case SCM_RIGHTS:
1630             {
1631                 int *fd = (int *)data;
1632                 int *target_fd = (int *)target_data;
1633                 int i, numfds = tgt_len / sizeof(int);
1634 
1635                 for (i = 0; i < numfds; i++) {
1636                     __put_user(fd[i], target_fd + i);
1637                 }
1638                 break;
1639             }
1640             case SO_TIMESTAMP:
1641             {
1642                 struct timeval *tv = (struct timeval *)data;
1643                 struct target_timeval *target_tv =
1644                     (struct target_timeval *)target_data;
1645 
1646                 if (len != sizeof(struct timeval) ||
1647                     tgt_len != sizeof(struct target_timeval)) {
1648                     goto unimplemented;
1649                 }
1650 
1651                 /* copy struct timeval to target */
1652                 __put_user(tv->tv_sec, &target_tv->tv_sec);
1653                 __put_user(tv->tv_usec, &target_tv->tv_usec);
1654                 break;
1655             }
1656             case SCM_CREDENTIALS:
1657             {
1658                 struct ucred *cred = (struct ucred *)data;
1659                 struct target_ucred *target_cred =
1660                     (struct target_ucred *)target_data;
1661 
1662                 __put_user(cred->pid, &target_cred->pid);
1663                 __put_user(cred->uid, &target_cred->uid);
1664                 __put_user(cred->gid, &target_cred->gid);
1665                 break;
1666             }
1667             default:
1668                 goto unimplemented;
1669             }
1670             break;
1671 
1672         case SOL_IP:
1673             switch (cmsg->cmsg_type) {
1674             case IP_TTL:
1675             {
1676                 uint32_t *v = (uint32_t *)data;
1677                 uint32_t *t_int = (uint32_t *)target_data;
1678 
1679                 if (len != sizeof(uint32_t) ||
1680                     tgt_len != sizeof(uint32_t)) {
1681                     goto unimplemented;
1682                 }
1683                 __put_user(*v, t_int);
1684                 break;
1685             }
1686             case IP_RECVERR:
1687             {
1688                 struct errhdr_t {
1689                    struct sock_extended_err ee;
1690                    struct sockaddr_in offender;
1691                 };
1692                 struct errhdr_t *errh = (struct errhdr_t *)data;
1693                 struct errhdr_t *target_errh =
1694                     (struct errhdr_t *)target_data;
1695 
1696                 if (len != sizeof(struct errhdr_t) ||
1697                     tgt_len != sizeof(struct errhdr_t)) {
1698                     goto unimplemented;
1699                 }
1700                 __put_user(errh->ee.ee_errno, &target_errh->ee.ee_errno);
1701                 __put_user(errh->ee.ee_origin, &target_errh->ee.ee_origin);
1702                 __put_user(errh->ee.ee_type,  &target_errh->ee.ee_type);
1703                 __put_user(errh->ee.ee_code, &target_errh->ee.ee_code);
1704                 __put_user(errh->ee.ee_pad, &target_errh->ee.ee_pad);
1705                 __put_user(errh->ee.ee_info, &target_errh->ee.ee_info);
1706                 __put_user(errh->ee.ee_data, &target_errh->ee.ee_data);
1707                 host_to_target_sockaddr((unsigned long) &target_errh->offender,
1708                     (void *) &errh->offender, sizeof(errh->offender));
1709                 break;
1710             }
1711             default:
1712                 goto unimplemented;
1713             }
1714             break;
1715 
1716         case SOL_IPV6:
1717             switch (cmsg->cmsg_type) {
1718             case IPV6_HOPLIMIT:
1719             {
1720                 uint32_t *v = (uint32_t *)data;
1721                 uint32_t *t_int = (uint32_t *)target_data;
1722 
1723                 if (len != sizeof(uint32_t) ||
1724                     tgt_len != sizeof(uint32_t)) {
1725                     goto unimplemented;
1726                 }
1727                 __put_user(*v, t_int);
1728                 break;
1729             }
1730             case IPV6_RECVERR:
1731             {
1732                 struct errhdr6_t {
1733                    struct sock_extended_err ee;
1734                    struct sockaddr_in6 offender;
1735                 };
1736                 struct errhdr6_t *errh = (struct errhdr6_t *)data;
1737                 struct errhdr6_t *target_errh =
1738                     (struct errhdr6_t *)target_data;
1739 
1740                 if (len != sizeof(struct errhdr6_t) ||
1741                     tgt_len != sizeof(struct errhdr6_t)) {
1742                     goto unimplemented;
1743                 }
1744                 __put_user(errh->ee.ee_errno, &target_errh->ee.ee_errno);
1745                 __put_user(errh->ee.ee_origin, &target_errh->ee.ee_origin);
1746                 __put_user(errh->ee.ee_type,  &target_errh->ee.ee_type);
1747                 __put_user(errh->ee.ee_code, &target_errh->ee.ee_code);
1748                 __put_user(errh->ee.ee_pad, &target_errh->ee.ee_pad);
1749                 __put_user(errh->ee.ee_info, &target_errh->ee.ee_info);
1750                 __put_user(errh->ee.ee_data, &target_errh->ee.ee_data);
1751                 host_to_target_sockaddr((unsigned long) &target_errh->offender,
1752                     (void *) &errh->offender, sizeof(errh->offender));
1753                 break;
1754             }
1755             default:
1756                 goto unimplemented;
1757             }
1758             break;
1759 
1760         default:
1761         unimplemented:
1762             gemu_log("Unsupported ancillary data: %d/%d\n",
1763                                         cmsg->cmsg_level, cmsg->cmsg_type);
1764             memcpy(target_data, data, MIN(len, tgt_len));
1765             if (tgt_len > len) {
1766                 memset(target_data + len, 0, tgt_len - len);
1767             }
1768         }
1769 
1770         target_cmsg->cmsg_len = tswapal(TARGET_CMSG_LEN(tgt_len));
1771         tgt_space = TARGET_CMSG_SPACE(tgt_len);
1772         if (msg_controllen < tgt_space) {
1773             tgt_space = msg_controllen;
1774         }
1775         msg_controllen -= tgt_space;
1776         space += tgt_space;
1777         cmsg = CMSG_NXTHDR(msgh, cmsg);
1778         target_cmsg = TARGET_CMSG_NXTHDR(target_msgh, target_cmsg,
1779                                          target_cmsg_start);
1780     }
1781     unlock_user(target_cmsg, target_cmsg_addr, space);
1782  the_end:
1783     target_msgh->msg_controllen = tswapal(space);
1784     return 0;
1785 }
1786 
1787 /* do_setsockopt() Must return target values and target errnos. */
1788 static abi_long do_setsockopt(int sockfd, int level, int optname,
1789                               abi_ulong optval_addr, socklen_t optlen)
1790 {
1791     abi_long ret;
1792     int val;
1793     struct ip_mreqn *ip_mreq;
1794     struct ip_mreq_source *ip_mreq_source;
1795 
1796     switch(level) {
1797     case SOL_TCP:
1798         /* TCP options all take an 'int' value.  */
1799         if (optlen < sizeof(uint32_t))
1800             return -TARGET_EINVAL;
1801 
1802         if (get_user_u32(val, optval_addr))
1803             return -TARGET_EFAULT;
1804         ret = get_errno(setsockopt(sockfd, level, optname, &val, sizeof(val)));
1805         break;
1806     case SOL_IP:
1807         switch(optname) {
1808         case IP_TOS:
1809         case IP_TTL:
1810         case IP_HDRINCL:
1811         case IP_ROUTER_ALERT:
1812         case IP_RECVOPTS:
1813         case IP_RETOPTS:
1814         case IP_PKTINFO:
1815         case IP_MTU_DISCOVER:
1816         case IP_RECVERR:
1817         case IP_RECVTTL:
1818         case IP_RECVTOS:
1819 #ifdef IP_FREEBIND
1820         case IP_FREEBIND:
1821 #endif
1822         case IP_MULTICAST_TTL:
1823         case IP_MULTICAST_LOOP:
1824             val = 0;
1825             if (optlen >= sizeof(uint32_t)) {
1826                 if (get_user_u32(val, optval_addr))
1827                     return -TARGET_EFAULT;
1828             } else if (optlen >= 1) {
1829                 if (get_user_u8(val, optval_addr))
1830                     return -TARGET_EFAULT;
1831             }
1832             ret = get_errno(setsockopt(sockfd, level, optname, &val, sizeof(val)));
1833             break;
1834         case IP_ADD_MEMBERSHIP:
1835         case IP_DROP_MEMBERSHIP:
1836             if (optlen < sizeof (struct target_ip_mreq) ||
1837                 optlen > sizeof (struct target_ip_mreqn))
1838                 return -TARGET_EINVAL;
1839 
1840             ip_mreq = (struct ip_mreqn *) alloca(optlen);
1841             target_to_host_ip_mreq(ip_mreq, optval_addr, optlen);
1842             ret = get_errno(setsockopt(sockfd, level, optname, ip_mreq, optlen));
1843             break;
1844 
1845         case IP_BLOCK_SOURCE:
1846         case IP_UNBLOCK_SOURCE:
1847         case IP_ADD_SOURCE_MEMBERSHIP:
1848         case IP_DROP_SOURCE_MEMBERSHIP:
1849             if (optlen != sizeof (struct target_ip_mreq_source))
1850                 return -TARGET_EINVAL;
1851 
1852             ip_mreq_source = lock_user(VERIFY_READ, optval_addr, optlen, 1);
1853             ret = get_errno(setsockopt(sockfd, level, optname, ip_mreq_source, optlen));
1854             unlock_user (ip_mreq_source, optval_addr, 0);
1855             break;
1856 
1857         default:
1858             goto unimplemented;
1859         }
1860         break;
1861     case SOL_IPV6:
1862         switch (optname) {
1863         case IPV6_MTU_DISCOVER:
1864         case IPV6_MTU:
1865         case IPV6_V6ONLY:
1866         case IPV6_RECVPKTINFO:
1867         case IPV6_UNICAST_HOPS:
1868         case IPV6_MULTICAST_HOPS:
1869         case IPV6_MULTICAST_LOOP:
1870         case IPV6_RECVERR:
1871         case IPV6_RECVHOPLIMIT:
1872         case IPV6_2292HOPLIMIT:
1873         case IPV6_CHECKSUM:
1874             val = 0;
1875             if (optlen < sizeof(uint32_t)) {
1876                 return -TARGET_EINVAL;
1877             }
1878             if (get_user_u32(val, optval_addr)) {
1879                 return -TARGET_EFAULT;
1880             }
1881             ret = get_errno(setsockopt(sockfd, level, optname,
1882                                        &val, sizeof(val)));
1883             break;
1884         case IPV6_PKTINFO:
1885         {
1886             struct in6_pktinfo pki;
1887 
1888             if (optlen < sizeof(pki)) {
1889                 return -TARGET_EINVAL;
1890             }
1891 
1892             if (copy_from_user(&pki, optval_addr, sizeof(pki))) {
1893                 return -TARGET_EFAULT;
1894             }
1895 
1896             pki.ipi6_ifindex = tswap32(pki.ipi6_ifindex);
1897 
1898             ret = get_errno(setsockopt(sockfd, level, optname,
1899                                        &pki, sizeof(pki)));
1900             break;
1901         }
1902         default:
1903             goto unimplemented;
1904         }
1905         break;
1906     case SOL_ICMPV6:
1907         switch (optname) {
1908         case ICMPV6_FILTER:
1909         {
1910             struct icmp6_filter icmp6f;
1911 
1912             if (optlen > sizeof(icmp6f)) {
1913                 optlen = sizeof(icmp6f);
1914             }
1915 
1916             if (copy_from_user(&icmp6f, optval_addr, optlen)) {
1917                 return -TARGET_EFAULT;
1918             }
1919 
1920             for (val = 0; val < 8; val++) {
1921                 icmp6f.data[val] = tswap32(icmp6f.data[val]);
1922             }
1923 
1924             ret = get_errno(setsockopt(sockfd, level, optname,
1925                                        &icmp6f, optlen));
1926             break;
1927         }
1928         default:
1929             goto unimplemented;
1930         }
1931         break;
1932     case SOL_RAW:
1933         switch (optname) {
1934         case ICMP_FILTER:
1935         case IPV6_CHECKSUM:
1936             /* those take an u32 value */
1937             if (optlen < sizeof(uint32_t)) {
1938                 return -TARGET_EINVAL;
1939             }
1940 
1941             if (get_user_u32(val, optval_addr)) {
1942                 return -TARGET_EFAULT;
1943             }
1944             ret = get_errno(setsockopt(sockfd, level, optname,
1945                                        &val, sizeof(val)));
1946             break;
1947 
1948         default:
1949             goto unimplemented;
1950         }
1951         break;
1952     case TARGET_SOL_SOCKET:
1953         switch (optname) {
1954         case TARGET_SO_RCVTIMEO:
1955         {
1956                 struct timeval tv;
1957 
1958                 optname = SO_RCVTIMEO;
1959 
1960 set_timeout:
1961                 if (optlen != sizeof(struct target_timeval)) {
1962                     return -TARGET_EINVAL;
1963                 }
1964 
1965                 if (copy_from_user_timeval(&tv, optval_addr)) {
1966                     return -TARGET_EFAULT;
1967                 }
1968 
1969                 ret = get_errno(setsockopt(sockfd, SOL_SOCKET, optname,
1970                                 &tv, sizeof(tv)));
1971                 return ret;
1972         }
1973         case TARGET_SO_SNDTIMEO:
1974                 optname = SO_SNDTIMEO;
1975                 goto set_timeout;
1976         case TARGET_SO_ATTACH_FILTER:
1977         {
1978                 struct target_sock_fprog *tfprog;
1979                 struct target_sock_filter *tfilter;
1980                 struct sock_fprog fprog;
1981                 struct sock_filter *filter;
1982                 int i;
1983 
1984                 if (optlen != sizeof(*tfprog)) {
1985                     return -TARGET_EINVAL;
1986                 }
1987                 if (!lock_user_struct(VERIFY_READ, tfprog, optval_addr, 0)) {
1988                     return -TARGET_EFAULT;
1989                 }
1990                 if (!lock_user_struct(VERIFY_READ, tfilter,
1991                                       tswapal(tfprog->filter), 0)) {
1992                     unlock_user_struct(tfprog, optval_addr, 1);
1993                     return -TARGET_EFAULT;
1994                 }
1995 
1996                 fprog.len = tswap16(tfprog->len);
1997                 filter = g_try_new(struct sock_filter, fprog.len);
1998                 if (filter == NULL) {
1999                     unlock_user_struct(tfilter, tfprog->filter, 1);
2000                     unlock_user_struct(tfprog, optval_addr, 1);
2001                     return -TARGET_ENOMEM;
2002                 }
2003                 for (i = 0; i < fprog.len; i++) {
2004                     filter[i].code = tswap16(tfilter[i].code);
2005                     filter[i].jt = tfilter[i].jt;
2006                     filter[i].jf = tfilter[i].jf;
2007                     filter[i].k = tswap32(tfilter[i].k);
2008                 }
2009                 fprog.filter = filter;
2010 
2011                 ret = get_errno(setsockopt(sockfd, SOL_SOCKET,
2012                                 SO_ATTACH_FILTER, &fprog, sizeof(fprog)));
2013                 g_free(filter);
2014 
2015                 unlock_user_struct(tfilter, tfprog->filter, 1);
2016                 unlock_user_struct(tfprog, optval_addr, 1);
2017                 return ret;
2018         }
2019 	case TARGET_SO_BINDTODEVICE:
2020 	{
2021 		char *dev_ifname, *addr_ifname;
2022 
2023 		if (optlen > IFNAMSIZ - 1) {
2024 		    optlen = IFNAMSIZ - 1;
2025 		}
2026 		dev_ifname = lock_user(VERIFY_READ, optval_addr, optlen, 1);
2027 		if (!dev_ifname) {
2028 		    return -TARGET_EFAULT;
2029 		}
2030 		optname = SO_BINDTODEVICE;
2031 		addr_ifname = alloca(IFNAMSIZ);
2032 		memcpy(addr_ifname, dev_ifname, optlen);
2033 		addr_ifname[optlen] = 0;
2034 		ret = get_errno(setsockopt(sockfd, SOL_SOCKET, optname,
2035                                            addr_ifname, optlen));
2036 		unlock_user (dev_ifname, optval_addr, 0);
2037 		return ret;
2038 	}
2039         case TARGET_SO_LINGER:
2040         {
2041                 struct linger lg;
2042                 struct target_linger *tlg;
2043 
2044                 if (optlen != sizeof(struct target_linger)) {
2045                     return -TARGET_EINVAL;
2046                 }
2047                 if (!lock_user_struct(VERIFY_READ, tlg, optval_addr, 1)) {
2048                     return -TARGET_EFAULT;
2049                 }
2050                 __get_user(lg.l_onoff, &tlg->l_onoff);
2051                 __get_user(lg.l_linger, &tlg->l_linger);
2052                 ret = get_errno(setsockopt(sockfd, SOL_SOCKET, SO_LINGER,
2053                                 &lg, sizeof(lg)));
2054                 unlock_user_struct(tlg, optval_addr, 0);
2055                 return ret;
2056         }
2057             /* Options with 'int' argument.  */
2058         case TARGET_SO_DEBUG:
2059 		optname = SO_DEBUG;
2060 		break;
2061         case TARGET_SO_REUSEADDR:
2062 		optname = SO_REUSEADDR;
2063 		break;
2064 #ifdef SO_REUSEPORT
2065         case TARGET_SO_REUSEPORT:
2066                 optname = SO_REUSEPORT;
2067                 break;
2068 #endif
2069         case TARGET_SO_TYPE:
2070 		optname = SO_TYPE;
2071 		break;
2072         case TARGET_SO_ERROR:
2073 		optname = SO_ERROR;
2074 		break;
2075         case TARGET_SO_DONTROUTE:
2076 		optname = SO_DONTROUTE;
2077 		break;
2078         case TARGET_SO_BROADCAST:
2079 		optname = SO_BROADCAST;
2080 		break;
2081         case TARGET_SO_SNDBUF:
2082 		optname = SO_SNDBUF;
2083 		break;
2084         case TARGET_SO_SNDBUFFORCE:
2085                 optname = SO_SNDBUFFORCE;
2086                 break;
2087         case TARGET_SO_RCVBUF:
2088 		optname = SO_RCVBUF;
2089 		break;
2090         case TARGET_SO_RCVBUFFORCE:
2091                 optname = SO_RCVBUFFORCE;
2092                 break;
2093         case TARGET_SO_KEEPALIVE:
2094 		optname = SO_KEEPALIVE;
2095 		break;
2096         case TARGET_SO_OOBINLINE:
2097 		optname = SO_OOBINLINE;
2098 		break;
2099         case TARGET_SO_NO_CHECK:
2100 		optname = SO_NO_CHECK;
2101 		break;
2102         case TARGET_SO_PRIORITY:
2103 		optname = SO_PRIORITY;
2104 		break;
2105 #ifdef SO_BSDCOMPAT
2106         case TARGET_SO_BSDCOMPAT:
2107 		optname = SO_BSDCOMPAT;
2108 		break;
2109 #endif
2110         case TARGET_SO_PASSCRED:
2111 		optname = SO_PASSCRED;
2112 		break;
2113         case TARGET_SO_PASSSEC:
2114                 optname = SO_PASSSEC;
2115                 break;
2116         case TARGET_SO_TIMESTAMP:
2117 		optname = SO_TIMESTAMP;
2118 		break;
2119         case TARGET_SO_RCVLOWAT:
2120 		optname = SO_RCVLOWAT;
2121 		break;
2122         default:
2123             goto unimplemented;
2124         }
2125 	if (optlen < sizeof(uint32_t))
2126             return -TARGET_EINVAL;
2127 
2128 	if (get_user_u32(val, optval_addr))
2129             return -TARGET_EFAULT;
2130 	ret = get_errno(setsockopt(sockfd, SOL_SOCKET, optname, &val, sizeof(val)));
2131         break;
2132     default:
2133     unimplemented:
2134         gemu_log("Unsupported setsockopt level=%d optname=%d\n", level, optname);
2135         ret = -TARGET_ENOPROTOOPT;
2136     }
2137     return ret;
2138 }
2139 
2140 /* do_getsockopt() Must return target values and target errnos. */
2141 static abi_long do_getsockopt(int sockfd, int level, int optname,
2142                               abi_ulong optval_addr, abi_ulong optlen)
2143 {
2144     abi_long ret;
2145     int len, val;
2146     socklen_t lv;
2147 
2148     switch(level) {
2149     case TARGET_SOL_SOCKET:
2150         level = SOL_SOCKET;
2151         switch (optname) {
2152         /* These don't just return a single integer */
2153         case TARGET_SO_RCVTIMEO:
2154         case TARGET_SO_SNDTIMEO:
2155         case TARGET_SO_PEERNAME:
2156             goto unimplemented;
2157         case TARGET_SO_PEERCRED: {
2158             struct ucred cr;
2159             socklen_t crlen;
2160             struct target_ucred *tcr;
2161 
2162             if (get_user_u32(len, optlen)) {
2163                 return -TARGET_EFAULT;
2164             }
2165             if (len < 0) {
2166                 return -TARGET_EINVAL;
2167             }
2168 
2169             crlen = sizeof(cr);
2170             ret = get_errno(getsockopt(sockfd, level, SO_PEERCRED,
2171                                        &cr, &crlen));
2172             if (ret < 0) {
2173                 return ret;
2174             }
2175             if (len > crlen) {
2176                 len = crlen;
2177             }
2178             if (!lock_user_struct(VERIFY_WRITE, tcr, optval_addr, 0)) {
2179                 return -TARGET_EFAULT;
2180             }
2181             __put_user(cr.pid, &tcr->pid);
2182             __put_user(cr.uid, &tcr->uid);
2183             __put_user(cr.gid, &tcr->gid);
2184             unlock_user_struct(tcr, optval_addr, 1);
2185             if (put_user_u32(len, optlen)) {
2186                 return -TARGET_EFAULT;
2187             }
2188             break;
2189         }
2190         case TARGET_SO_LINGER:
2191         {
2192             struct linger lg;
2193             socklen_t lglen;
2194             struct target_linger *tlg;
2195 
2196             if (get_user_u32(len, optlen)) {
2197                 return -TARGET_EFAULT;
2198             }
2199             if (len < 0) {
2200                 return -TARGET_EINVAL;
2201             }
2202 
2203             lglen = sizeof(lg);
2204             ret = get_errno(getsockopt(sockfd, level, SO_LINGER,
2205                                        &lg, &lglen));
2206             if (ret < 0) {
2207                 return ret;
2208             }
2209             if (len > lglen) {
2210                 len = lglen;
2211             }
2212             if (!lock_user_struct(VERIFY_WRITE, tlg, optval_addr, 0)) {
2213                 return -TARGET_EFAULT;
2214             }
2215             __put_user(lg.l_onoff, &tlg->l_onoff);
2216             __put_user(lg.l_linger, &tlg->l_linger);
2217             unlock_user_struct(tlg, optval_addr, 1);
2218             if (put_user_u32(len, optlen)) {
2219                 return -TARGET_EFAULT;
2220             }
2221             break;
2222         }
2223         /* Options with 'int' argument.  */
2224         case TARGET_SO_DEBUG:
2225             optname = SO_DEBUG;
2226             goto int_case;
2227         case TARGET_SO_REUSEADDR:
2228             optname = SO_REUSEADDR;
2229             goto int_case;
2230 #ifdef SO_REUSEPORT
2231         case TARGET_SO_REUSEPORT:
2232             optname = SO_REUSEPORT;
2233             goto int_case;
2234 #endif
2235         case TARGET_SO_TYPE:
2236             optname = SO_TYPE;
2237             goto int_case;
2238         case TARGET_SO_ERROR:
2239             optname = SO_ERROR;
2240             goto int_case;
2241         case TARGET_SO_DONTROUTE:
2242             optname = SO_DONTROUTE;
2243             goto int_case;
2244         case TARGET_SO_BROADCAST:
2245             optname = SO_BROADCAST;
2246             goto int_case;
2247         case TARGET_SO_SNDBUF:
2248             optname = SO_SNDBUF;
2249             goto int_case;
2250         case TARGET_SO_RCVBUF:
2251             optname = SO_RCVBUF;
2252             goto int_case;
2253         case TARGET_SO_KEEPALIVE:
2254             optname = SO_KEEPALIVE;
2255             goto int_case;
2256         case TARGET_SO_OOBINLINE:
2257             optname = SO_OOBINLINE;
2258             goto int_case;
2259         case TARGET_SO_NO_CHECK:
2260             optname = SO_NO_CHECK;
2261             goto int_case;
2262         case TARGET_SO_PRIORITY:
2263             optname = SO_PRIORITY;
2264             goto int_case;
2265 #ifdef SO_BSDCOMPAT
2266         case TARGET_SO_BSDCOMPAT:
2267             optname = SO_BSDCOMPAT;
2268             goto int_case;
2269 #endif
2270         case TARGET_SO_PASSCRED:
2271             optname = SO_PASSCRED;
2272             goto int_case;
2273         case TARGET_SO_TIMESTAMP:
2274             optname = SO_TIMESTAMP;
2275             goto int_case;
2276         case TARGET_SO_RCVLOWAT:
2277             optname = SO_RCVLOWAT;
2278             goto int_case;
2279         case TARGET_SO_ACCEPTCONN:
2280             optname = SO_ACCEPTCONN;
2281             goto int_case;
2282         default:
2283             goto int_case;
2284         }
2285         break;
2286     case SOL_TCP:
2287         /* TCP options all take an 'int' value.  */
2288     int_case:
2289         if (get_user_u32(len, optlen))
2290             return -TARGET_EFAULT;
2291         if (len < 0)
2292             return -TARGET_EINVAL;
2293         lv = sizeof(lv);
2294         ret = get_errno(getsockopt(sockfd, level, optname, &val, &lv));
2295         if (ret < 0)
2296             return ret;
2297         if (optname == SO_TYPE) {
2298             val = host_to_target_sock_type(val);
2299         }
2300         if (len > lv)
2301             len = lv;
2302         if (len == 4) {
2303             if (put_user_u32(val, optval_addr))
2304                 return -TARGET_EFAULT;
2305         } else {
2306             if (put_user_u8(val, optval_addr))
2307                 return -TARGET_EFAULT;
2308         }
2309         if (put_user_u32(len, optlen))
2310             return -TARGET_EFAULT;
2311         break;
2312     case SOL_IP:
2313         switch(optname) {
2314         case IP_TOS:
2315         case IP_TTL:
2316         case IP_HDRINCL:
2317         case IP_ROUTER_ALERT:
2318         case IP_RECVOPTS:
2319         case IP_RETOPTS:
2320         case IP_PKTINFO:
2321         case IP_MTU_DISCOVER:
2322         case IP_RECVERR:
2323         case IP_RECVTOS:
2324 #ifdef IP_FREEBIND
2325         case IP_FREEBIND:
2326 #endif
2327         case IP_MULTICAST_TTL:
2328         case IP_MULTICAST_LOOP:
2329             if (get_user_u32(len, optlen))
2330                 return -TARGET_EFAULT;
2331             if (len < 0)
2332                 return -TARGET_EINVAL;
2333             lv = sizeof(lv);
2334             ret = get_errno(getsockopt(sockfd, level, optname, &val, &lv));
2335             if (ret < 0)
2336                 return ret;
2337             if (len < sizeof(int) && len > 0 && val >= 0 && val < 255) {
2338                 len = 1;
2339                 if (put_user_u32(len, optlen)
2340                     || put_user_u8(val, optval_addr))
2341                     return -TARGET_EFAULT;
2342             } else {
2343                 if (len > sizeof(int))
2344                     len = sizeof(int);
2345                 if (put_user_u32(len, optlen)
2346                     || put_user_u32(val, optval_addr))
2347                     return -TARGET_EFAULT;
2348             }
2349             break;
2350         default:
2351             ret = -TARGET_ENOPROTOOPT;
2352             break;
2353         }
2354         break;
2355     case SOL_IPV6:
2356         switch (optname) {
2357         case IPV6_MTU_DISCOVER:
2358         case IPV6_MTU:
2359         case IPV6_V6ONLY:
2360         case IPV6_RECVPKTINFO:
2361         case IPV6_UNICAST_HOPS:
2362         case IPV6_MULTICAST_HOPS:
2363         case IPV6_MULTICAST_LOOP:
2364         case IPV6_RECVERR:
2365         case IPV6_RECVHOPLIMIT:
2366         case IPV6_2292HOPLIMIT:
2367         case IPV6_CHECKSUM:
2368             if (get_user_u32(len, optlen))
2369                 return -TARGET_EFAULT;
2370             if (len < 0)
2371                 return -TARGET_EINVAL;
2372             lv = sizeof(lv);
2373             ret = get_errno(getsockopt(sockfd, level, optname, &val, &lv));
2374             if (ret < 0)
2375                 return ret;
2376             if (len < sizeof(int) && len > 0 && val >= 0 && val < 255) {
2377                 len = 1;
2378                 if (put_user_u32(len, optlen)
2379                     || put_user_u8(val, optval_addr))
2380                     return -TARGET_EFAULT;
2381             } else {
2382                 if (len > sizeof(int))
2383                     len = sizeof(int);
2384                 if (put_user_u32(len, optlen)
2385                     || put_user_u32(val, optval_addr))
2386                     return -TARGET_EFAULT;
2387             }
2388             break;
2389         default:
2390             ret = -TARGET_ENOPROTOOPT;
2391             break;
2392         }
2393         break;
2394     default:
2395     unimplemented:
2396         gemu_log("getsockopt level=%d optname=%d not yet supported\n",
2397                  level, optname);
2398         ret = -TARGET_EOPNOTSUPP;
2399         break;
2400     }
2401     return ret;
2402 }
2403 
2404 /* Convert target low/high pair representing file offset into the host
2405  * low/high pair. This function doesn't handle offsets bigger than 64 bits
2406  * as the kernel doesn't handle them either.
2407  */
2408 static void target_to_host_low_high(abi_ulong tlow,
2409                                     abi_ulong thigh,
2410                                     unsigned long *hlow,
2411                                     unsigned long *hhigh)
2412 {
2413     uint64_t off = tlow |
2414         ((unsigned long long)thigh << TARGET_LONG_BITS / 2) <<
2415         TARGET_LONG_BITS / 2;
2416 
2417     *hlow = off;
2418     *hhigh = (off >> HOST_LONG_BITS / 2) >> HOST_LONG_BITS / 2;
2419 }
2420 
2421 static struct iovec *lock_iovec(int type, abi_ulong target_addr,
2422                                 abi_ulong count, int copy)
2423 {
2424     struct target_iovec *target_vec;
2425     struct iovec *vec;
2426     abi_ulong total_len, max_len;
2427     int i;
2428     int err = 0;
2429     bool bad_address = false;
2430 
2431     if (count == 0) {
2432         errno = 0;
2433         return NULL;
2434     }
2435     if (count > IOV_MAX) {
2436         errno = EINVAL;
2437         return NULL;
2438     }
2439 
2440     vec = g_try_new0(struct iovec, count);
2441     if (vec == NULL) {
2442         errno = ENOMEM;
2443         return NULL;
2444     }
2445 
2446     target_vec = lock_user(VERIFY_READ, target_addr,
2447                            count * sizeof(struct target_iovec), 1);
2448     if (target_vec == NULL) {
2449         err = EFAULT;
2450         goto fail2;
2451     }
2452 
2453     /* ??? If host page size > target page size, this will result in a
2454        value larger than what we can actually support.  */
2455     max_len = 0x7fffffff & TARGET_PAGE_MASK;
2456     total_len = 0;
2457 
2458     for (i = 0; i < count; i++) {
2459         abi_ulong base = tswapal(target_vec[i].iov_base);
2460         abi_long len = tswapal(target_vec[i].iov_len);
2461 
2462         if (len < 0) {
2463             err = EINVAL;
2464             goto fail;
2465         } else if (len == 0) {
2466             /* Zero length pointer is ignored.  */
2467             vec[i].iov_base = 0;
2468         } else {
2469             vec[i].iov_base = lock_user(type, base, len, copy);
2470             /* If the first buffer pointer is bad, this is a fault.  But
2471              * subsequent bad buffers will result in a partial write; this
2472              * is realized by filling the vector with null pointers and
2473              * zero lengths. */
2474             if (!vec[i].iov_base) {
2475                 if (i == 0) {
2476                     err = EFAULT;
2477                     goto fail;
2478                 } else {
2479                     bad_address = true;
2480                 }
2481             }
2482             if (bad_address) {
2483                 len = 0;
2484             }
2485             if (len > max_len - total_len) {
2486                 len = max_len - total_len;
2487             }
2488         }
2489         vec[i].iov_len = len;
2490         total_len += len;
2491     }
2492 
2493     unlock_user(target_vec, target_addr, 0);
2494     return vec;
2495 
2496  fail:
2497     while (--i >= 0) {
2498         if (tswapal(target_vec[i].iov_len) > 0) {
2499             unlock_user(vec[i].iov_base, tswapal(target_vec[i].iov_base), 0);
2500         }
2501     }
2502     unlock_user(target_vec, target_addr, 0);
2503  fail2:
2504     g_free(vec);
2505     errno = err;
2506     return NULL;
2507 }
2508 
2509 static void unlock_iovec(struct iovec *vec, abi_ulong target_addr,
2510                          abi_ulong count, int copy)
2511 {
2512     struct target_iovec *target_vec;
2513     int i;
2514 
2515     target_vec = lock_user(VERIFY_READ, target_addr,
2516                            count * sizeof(struct target_iovec), 1);
2517     if (target_vec) {
2518         for (i = 0; i < count; i++) {
2519             abi_ulong base = tswapal(target_vec[i].iov_base);
2520             abi_long len = tswapal(target_vec[i].iov_len);
2521             if (len < 0) {
2522                 break;
2523             }
2524             unlock_user(vec[i].iov_base, base, copy ? vec[i].iov_len : 0);
2525         }
2526         unlock_user(target_vec, target_addr, 0);
2527     }
2528 
2529     g_free(vec);
2530 }
2531 
2532 static inline int target_to_host_sock_type(int *type)
2533 {
2534     int host_type = 0;
2535     int target_type = *type;
2536 
2537     switch (target_type & TARGET_SOCK_TYPE_MASK) {
2538     case TARGET_SOCK_DGRAM:
2539         host_type = SOCK_DGRAM;
2540         break;
2541     case TARGET_SOCK_STREAM:
2542         host_type = SOCK_STREAM;
2543         break;
2544     default:
2545         host_type = target_type & TARGET_SOCK_TYPE_MASK;
2546         break;
2547     }
2548     if (target_type & TARGET_SOCK_CLOEXEC) {
2549 #if defined(SOCK_CLOEXEC)
2550         host_type |= SOCK_CLOEXEC;
2551 #else
2552         return -TARGET_EINVAL;
2553 #endif
2554     }
2555     if (target_type & TARGET_SOCK_NONBLOCK) {
2556 #if defined(SOCK_NONBLOCK)
2557         host_type |= SOCK_NONBLOCK;
2558 #elif !defined(O_NONBLOCK)
2559         return -TARGET_EINVAL;
2560 #endif
2561     }
2562     *type = host_type;
2563     return 0;
2564 }
2565 
2566 /* Try to emulate socket type flags after socket creation.  */
2567 static int sock_flags_fixup(int fd, int target_type)
2568 {
2569 #if !defined(SOCK_NONBLOCK) && defined(O_NONBLOCK)
2570     if (target_type & TARGET_SOCK_NONBLOCK) {
2571         int flags = fcntl(fd, F_GETFL);
2572         if (fcntl(fd, F_SETFL, O_NONBLOCK | flags) == -1) {
2573             close(fd);
2574             return -TARGET_EINVAL;
2575         }
2576     }
2577 #endif
2578     return fd;
2579 }
2580 
2581 /* do_socket() Must return target values and target errnos. */
2582 static abi_long do_socket(int domain, int type, int protocol)
2583 {
2584     int target_type = type;
2585     int ret;
2586 
2587     ret = target_to_host_sock_type(&type);
2588     if (ret) {
2589         return ret;
2590     }
2591 
2592     if (domain == PF_NETLINK && !(
2593 #ifdef CONFIG_RTNETLINK
2594          protocol == NETLINK_ROUTE ||
2595 #endif
2596          protocol == NETLINK_KOBJECT_UEVENT ||
2597          protocol == NETLINK_AUDIT)) {
2598         return -EPFNOSUPPORT;
2599     }
2600 
2601     if (domain == AF_PACKET ||
2602         (domain == AF_INET && type == SOCK_PACKET)) {
2603         protocol = tswap16(protocol);
2604     }
2605 
2606     ret = get_errno(socket(domain, type, protocol));
2607     if (ret >= 0) {
2608         ret = sock_flags_fixup(ret, target_type);
2609         if (type == SOCK_PACKET) {
2610             /* Manage an obsolete case :
2611              * if socket type is SOCK_PACKET, bind by name
2612              */
2613             fd_trans_register(ret, &target_packet_trans);
2614         } else if (domain == PF_NETLINK) {
2615             switch (protocol) {
2616 #ifdef CONFIG_RTNETLINK
2617             case NETLINK_ROUTE:
2618                 fd_trans_register(ret, &target_netlink_route_trans);
2619                 break;
2620 #endif
2621             case NETLINK_KOBJECT_UEVENT:
2622                 /* nothing to do: messages are strings */
2623                 break;
2624             case NETLINK_AUDIT:
2625                 fd_trans_register(ret, &target_netlink_audit_trans);
2626                 break;
2627             default:
2628                 g_assert_not_reached();
2629             }
2630         }
2631     }
2632     return ret;
2633 }
2634 
2635 /* do_bind() Must return target values and target errnos. */
2636 static abi_long do_bind(int sockfd, abi_ulong target_addr,
2637                         socklen_t addrlen)
2638 {
2639     void *addr;
2640     abi_long ret;
2641 
2642     if ((int)addrlen < 0) {
2643         return -TARGET_EINVAL;
2644     }
2645 
2646     addr = alloca(addrlen+1);
2647 
2648     ret = target_to_host_sockaddr(sockfd, addr, target_addr, addrlen);
2649     if (ret)
2650         return ret;
2651 
2652     return get_errno(bind(sockfd, addr, addrlen));
2653 }
2654 
2655 /* do_connect() Must return target values and target errnos. */
2656 static abi_long do_connect(int sockfd, abi_ulong target_addr,
2657                            socklen_t addrlen)
2658 {
2659     void *addr;
2660     abi_long ret;
2661 
2662     if ((int)addrlen < 0) {
2663         return -TARGET_EINVAL;
2664     }
2665 
2666     addr = alloca(addrlen+1);
2667 
2668     ret = target_to_host_sockaddr(sockfd, addr, target_addr, addrlen);
2669     if (ret)
2670         return ret;
2671 
2672     return get_errno(safe_connect(sockfd, addr, addrlen));
2673 }
2674 
2675 /* do_sendrecvmsg_locked() Must return target values and target errnos. */
2676 static abi_long do_sendrecvmsg_locked(int fd, struct target_msghdr *msgp,
2677                                       int flags, int send)
2678 {
2679     abi_long ret, len;
2680     struct msghdr msg;
2681     abi_ulong count;
2682     struct iovec *vec;
2683     abi_ulong target_vec;
2684 
2685     if (msgp->msg_name) {
2686         msg.msg_namelen = tswap32(msgp->msg_namelen);
2687         msg.msg_name = alloca(msg.msg_namelen+1);
2688         ret = target_to_host_sockaddr(fd, msg.msg_name,
2689                                       tswapal(msgp->msg_name),
2690                                       msg.msg_namelen);
2691         if (ret == -TARGET_EFAULT) {
2692             /* For connected sockets msg_name and msg_namelen must
2693              * be ignored, so returning EFAULT immediately is wrong.
2694              * Instead, pass a bad msg_name to the host kernel, and
2695              * let it decide whether to return EFAULT or not.
2696              */
2697             msg.msg_name = (void *)-1;
2698         } else if (ret) {
2699             goto out2;
2700         }
2701     } else {
2702         msg.msg_name = NULL;
2703         msg.msg_namelen = 0;
2704     }
2705     msg.msg_controllen = 2 * tswapal(msgp->msg_controllen);
2706     msg.msg_control = alloca(msg.msg_controllen);
2707     memset(msg.msg_control, 0, msg.msg_controllen);
2708 
2709     msg.msg_flags = tswap32(msgp->msg_flags);
2710 
2711     count = tswapal(msgp->msg_iovlen);
2712     target_vec = tswapal(msgp->msg_iov);
2713 
2714     if (count > IOV_MAX) {
2715         /* sendrcvmsg returns a different errno for this condition than
2716          * readv/writev, so we must catch it here before lock_iovec() does.
2717          */
2718         ret = -TARGET_EMSGSIZE;
2719         goto out2;
2720     }
2721 
2722     vec = lock_iovec(send ? VERIFY_READ : VERIFY_WRITE,
2723                      target_vec, count, send);
2724     if (vec == NULL) {
2725         ret = -host_to_target_errno(errno);
2726         goto out2;
2727     }
2728     msg.msg_iovlen = count;
2729     msg.msg_iov = vec;
2730 
2731     if (send) {
2732         if (fd_trans_target_to_host_data(fd)) {
2733             void *host_msg;
2734 
2735             host_msg = g_malloc(msg.msg_iov->iov_len);
2736             memcpy(host_msg, msg.msg_iov->iov_base, msg.msg_iov->iov_len);
2737             ret = fd_trans_target_to_host_data(fd)(host_msg,
2738                                                    msg.msg_iov->iov_len);
2739             if (ret >= 0) {
2740                 msg.msg_iov->iov_base = host_msg;
2741                 ret = get_errno(safe_sendmsg(fd, &msg, flags));
2742             }
2743             g_free(host_msg);
2744         } else {
2745             ret = target_to_host_cmsg(&msg, msgp);
2746             if (ret == 0) {
2747                 ret = get_errno(safe_sendmsg(fd, &msg, flags));
2748             }
2749         }
2750     } else {
2751         ret = get_errno(safe_recvmsg(fd, &msg, flags));
2752         if (!is_error(ret)) {
2753             len = ret;
2754             if (fd_trans_host_to_target_data(fd)) {
2755                 ret = fd_trans_host_to_target_data(fd)(msg.msg_iov->iov_base,
2756                                                MIN(msg.msg_iov->iov_len, len));
2757             } else {
2758                 ret = host_to_target_cmsg(msgp, &msg);
2759             }
2760             if (!is_error(ret)) {
2761                 msgp->msg_namelen = tswap32(msg.msg_namelen);
2762                 msgp->msg_flags = tswap32(msg.msg_flags);
2763                 if (msg.msg_name != NULL && msg.msg_name != (void *)-1) {
2764                     ret = host_to_target_sockaddr(tswapal(msgp->msg_name),
2765                                     msg.msg_name, msg.msg_namelen);
2766                     if (ret) {
2767                         goto out;
2768                     }
2769                 }
2770 
2771                 ret = len;
2772             }
2773         }
2774     }
2775 
2776 out:
2777     unlock_iovec(vec, target_vec, count, !send);
2778 out2:
2779     return ret;
2780 }
2781 
2782 static abi_long do_sendrecvmsg(int fd, abi_ulong target_msg,
2783                                int flags, int send)
2784 {
2785     abi_long ret;
2786     struct target_msghdr *msgp;
2787 
2788     if (!lock_user_struct(send ? VERIFY_READ : VERIFY_WRITE,
2789                           msgp,
2790                           target_msg,
2791                           send ? 1 : 0)) {
2792         return -TARGET_EFAULT;
2793     }
2794     ret = do_sendrecvmsg_locked(fd, msgp, flags, send);
2795     unlock_user_struct(msgp, target_msg, send ? 0 : 1);
2796     return ret;
2797 }
2798 
2799 /* We don't rely on the C library to have sendmmsg/recvmmsg support,
2800  * so it might not have this *mmsg-specific flag either.
2801  */
2802 #ifndef MSG_WAITFORONE
2803 #define MSG_WAITFORONE 0x10000
2804 #endif
2805 
2806 static abi_long do_sendrecvmmsg(int fd, abi_ulong target_msgvec,
2807                                 unsigned int vlen, unsigned int flags,
2808                                 int send)
2809 {
2810     struct target_mmsghdr *mmsgp;
2811     abi_long ret = 0;
2812     int i;
2813 
2814     if (vlen > UIO_MAXIOV) {
2815         vlen = UIO_MAXIOV;
2816     }
2817 
2818     mmsgp = lock_user(VERIFY_WRITE, target_msgvec, sizeof(*mmsgp) * vlen, 1);
2819     if (!mmsgp) {
2820         return -TARGET_EFAULT;
2821     }
2822 
2823     for (i = 0; i < vlen; i++) {
2824         ret = do_sendrecvmsg_locked(fd, &mmsgp[i].msg_hdr, flags, send);
2825         if (is_error(ret)) {
2826             break;
2827         }
2828         mmsgp[i].msg_len = tswap32(ret);
2829         /* MSG_WAITFORONE turns on MSG_DONTWAIT after one packet */
2830         if (flags & MSG_WAITFORONE) {
2831             flags |= MSG_DONTWAIT;
2832         }
2833     }
2834 
2835     unlock_user(mmsgp, target_msgvec, sizeof(*mmsgp) * i);
2836 
2837     /* Return number of datagrams sent if we sent any at all;
2838      * otherwise return the error.
2839      */
2840     if (i) {
2841         return i;
2842     }
2843     return ret;
2844 }
2845 
2846 /* do_accept4() Must return target values and target errnos. */
2847 static abi_long do_accept4(int fd, abi_ulong target_addr,
2848                            abi_ulong target_addrlen_addr, int flags)
2849 {
2850     socklen_t addrlen, ret_addrlen;
2851     void *addr;
2852     abi_long ret;
2853     int host_flags;
2854 
2855     host_flags = target_to_host_bitmask(flags, fcntl_flags_tbl);
2856 
2857     if (target_addr == 0) {
2858         return get_errno(safe_accept4(fd, NULL, NULL, host_flags));
2859     }
2860 
2861     /* linux returns EINVAL if addrlen pointer is invalid */
2862     if (get_user_u32(addrlen, target_addrlen_addr))
2863         return -TARGET_EINVAL;
2864 
2865     if ((int)addrlen < 0) {
2866         return -TARGET_EINVAL;
2867     }
2868 
2869     if (!access_ok(VERIFY_WRITE, target_addr, addrlen))
2870         return -TARGET_EINVAL;
2871 
2872     addr = alloca(addrlen);
2873 
2874     ret_addrlen = addrlen;
2875     ret = get_errno(safe_accept4(fd, addr, &ret_addrlen, host_flags));
2876     if (!is_error(ret)) {
2877         host_to_target_sockaddr(target_addr, addr, MIN(addrlen, ret_addrlen));
2878         if (put_user_u32(ret_addrlen, target_addrlen_addr)) {
2879             ret = -TARGET_EFAULT;
2880         }
2881     }
2882     return ret;
2883 }
2884 
2885 /* do_getpeername() Must return target values and target errnos. */
2886 static abi_long do_getpeername(int fd, abi_ulong target_addr,
2887                                abi_ulong target_addrlen_addr)
2888 {
2889     socklen_t addrlen, ret_addrlen;
2890     void *addr;
2891     abi_long ret;
2892 
2893     if (get_user_u32(addrlen, target_addrlen_addr))
2894         return -TARGET_EFAULT;
2895 
2896     if ((int)addrlen < 0) {
2897         return -TARGET_EINVAL;
2898     }
2899 
2900     if (!access_ok(VERIFY_WRITE, target_addr, addrlen))
2901         return -TARGET_EFAULT;
2902 
2903     addr = alloca(addrlen);
2904 
2905     ret_addrlen = addrlen;
2906     ret = get_errno(getpeername(fd, addr, &ret_addrlen));
2907     if (!is_error(ret)) {
2908         host_to_target_sockaddr(target_addr, addr, MIN(addrlen, ret_addrlen));
2909         if (put_user_u32(ret_addrlen, target_addrlen_addr)) {
2910             ret = -TARGET_EFAULT;
2911         }
2912     }
2913     return ret;
2914 }
2915 
2916 /* do_getsockname() Must return target values and target errnos. */
2917 static abi_long do_getsockname(int fd, abi_ulong target_addr,
2918                                abi_ulong target_addrlen_addr)
2919 {
2920     socklen_t addrlen, ret_addrlen;
2921     void *addr;
2922     abi_long ret;
2923 
2924     if (get_user_u32(addrlen, target_addrlen_addr))
2925         return -TARGET_EFAULT;
2926 
2927     if ((int)addrlen < 0) {
2928         return -TARGET_EINVAL;
2929     }
2930 
2931     if (!access_ok(VERIFY_WRITE, target_addr, addrlen))
2932         return -TARGET_EFAULT;
2933 
2934     addr = alloca(addrlen);
2935 
2936     ret_addrlen = addrlen;
2937     ret = get_errno(getsockname(fd, addr, &ret_addrlen));
2938     if (!is_error(ret)) {
2939         host_to_target_sockaddr(target_addr, addr, MIN(addrlen, ret_addrlen));
2940         if (put_user_u32(ret_addrlen, target_addrlen_addr)) {
2941             ret = -TARGET_EFAULT;
2942         }
2943     }
2944     return ret;
2945 }
2946 
2947 /* do_socketpair() Must return target values and target errnos. */
2948 static abi_long do_socketpair(int domain, int type, int protocol,
2949                               abi_ulong target_tab_addr)
2950 {
2951     int tab[2];
2952     abi_long ret;
2953 
2954     target_to_host_sock_type(&type);
2955 
2956     ret = get_errno(socketpair(domain, type, protocol, tab));
2957     if (!is_error(ret)) {
2958         if (put_user_s32(tab[0], target_tab_addr)
2959             || put_user_s32(tab[1], target_tab_addr + sizeof(tab[0])))
2960             ret = -TARGET_EFAULT;
2961     }
2962     return ret;
2963 }
2964 
2965 /* do_sendto() Must return target values and target errnos. */
2966 static abi_long do_sendto(int fd, abi_ulong msg, size_t len, int flags,
2967                           abi_ulong target_addr, socklen_t addrlen)
2968 {
2969     void *addr;
2970     void *host_msg;
2971     void *copy_msg = NULL;
2972     abi_long ret;
2973 
2974     if ((int)addrlen < 0) {
2975         return -TARGET_EINVAL;
2976     }
2977 
2978     host_msg = lock_user(VERIFY_READ, msg, len, 1);
2979     if (!host_msg)
2980         return -TARGET_EFAULT;
2981     if (fd_trans_target_to_host_data(fd)) {
2982         copy_msg = host_msg;
2983         host_msg = g_malloc(len);
2984         memcpy(host_msg, copy_msg, len);
2985         ret = fd_trans_target_to_host_data(fd)(host_msg, len);
2986         if (ret < 0) {
2987             goto fail;
2988         }
2989     }
2990     if (target_addr) {
2991         addr = alloca(addrlen+1);
2992         ret = target_to_host_sockaddr(fd, addr, target_addr, addrlen);
2993         if (ret) {
2994             goto fail;
2995         }
2996         ret = get_errno(safe_sendto(fd, host_msg, len, flags, addr, addrlen));
2997     } else {
2998         ret = get_errno(safe_sendto(fd, host_msg, len, flags, NULL, 0));
2999     }
3000 fail:
3001     if (copy_msg) {
3002         g_free(host_msg);
3003         host_msg = copy_msg;
3004     }
3005     unlock_user(host_msg, msg, 0);
3006     return ret;
3007 }
3008 
3009 /* do_recvfrom() Must return target values and target errnos. */
3010 static abi_long do_recvfrom(int fd, abi_ulong msg, size_t len, int flags,
3011                             abi_ulong target_addr,
3012                             abi_ulong target_addrlen)
3013 {
3014     socklen_t addrlen, ret_addrlen;
3015     void *addr;
3016     void *host_msg;
3017     abi_long ret;
3018 
3019     host_msg = lock_user(VERIFY_WRITE, msg, len, 0);
3020     if (!host_msg)
3021         return -TARGET_EFAULT;
3022     if (target_addr) {
3023         if (get_user_u32(addrlen, target_addrlen)) {
3024             ret = -TARGET_EFAULT;
3025             goto fail;
3026         }
3027         if ((int)addrlen < 0) {
3028             ret = -TARGET_EINVAL;
3029             goto fail;
3030         }
3031         addr = alloca(addrlen);
3032         ret_addrlen = addrlen;
3033         ret = get_errno(safe_recvfrom(fd, host_msg, len, flags,
3034                                       addr, &ret_addrlen));
3035     } else {
3036         addr = NULL; /* To keep compiler quiet.  */
3037         addrlen = 0; /* To keep compiler quiet.  */
3038         ret = get_errno(safe_recvfrom(fd, host_msg, len, flags, NULL, 0));
3039     }
3040     if (!is_error(ret)) {
3041         if (fd_trans_host_to_target_data(fd)) {
3042             abi_long trans;
3043             trans = fd_trans_host_to_target_data(fd)(host_msg, MIN(ret, len));
3044             if (is_error(trans)) {
3045                 ret = trans;
3046                 goto fail;
3047             }
3048         }
3049         if (target_addr) {
3050             host_to_target_sockaddr(target_addr, addr,
3051                                     MIN(addrlen, ret_addrlen));
3052             if (put_user_u32(ret_addrlen, target_addrlen)) {
3053                 ret = -TARGET_EFAULT;
3054                 goto fail;
3055             }
3056         }
3057         unlock_user(host_msg, msg, len);
3058     } else {
3059 fail:
3060         unlock_user(host_msg, msg, 0);
3061     }
3062     return ret;
3063 }
3064 
3065 #ifdef TARGET_NR_socketcall
3066 /* do_socketcall() must return target values and target errnos. */
3067 static abi_long do_socketcall(int num, abi_ulong vptr)
3068 {
3069     static const unsigned nargs[] = { /* number of arguments per operation */
3070         [TARGET_SYS_SOCKET] = 3,      /* domain, type, protocol */
3071         [TARGET_SYS_BIND] = 3,        /* fd, addr, addrlen */
3072         [TARGET_SYS_CONNECT] = 3,     /* fd, addr, addrlen */
3073         [TARGET_SYS_LISTEN] = 2,      /* fd, backlog */
3074         [TARGET_SYS_ACCEPT] = 3,      /* fd, addr, addrlen */
3075         [TARGET_SYS_GETSOCKNAME] = 3, /* fd, addr, addrlen */
3076         [TARGET_SYS_GETPEERNAME] = 3, /* fd, addr, addrlen */
3077         [TARGET_SYS_SOCKETPAIR] = 4,  /* domain, type, protocol, tab */
3078         [TARGET_SYS_SEND] = 4,        /* fd, msg, len, flags */
3079         [TARGET_SYS_RECV] = 4,        /* fd, msg, len, flags */
3080         [TARGET_SYS_SENDTO] = 6,      /* fd, msg, len, flags, addr, addrlen */
3081         [TARGET_SYS_RECVFROM] = 6,    /* fd, msg, len, flags, addr, addrlen */
3082         [TARGET_SYS_SHUTDOWN] = 2,    /* fd, how */
3083         [TARGET_SYS_SETSOCKOPT] = 5,  /* fd, level, optname, optval, optlen */
3084         [TARGET_SYS_GETSOCKOPT] = 5,  /* fd, level, optname, optval, optlen */
3085         [TARGET_SYS_SENDMSG] = 3,     /* fd, msg, flags */
3086         [TARGET_SYS_RECVMSG] = 3,     /* fd, msg, flags */
3087         [TARGET_SYS_ACCEPT4] = 4,     /* fd, addr, addrlen, flags */
3088         [TARGET_SYS_RECVMMSG] = 4,    /* fd, msgvec, vlen, flags */
3089         [TARGET_SYS_SENDMMSG] = 4,    /* fd, msgvec, vlen, flags */
3090     };
3091     abi_long a[6]; /* max 6 args */
3092     unsigned i;
3093 
3094     /* check the range of the first argument num */
3095     /* (TARGET_SYS_SENDMMSG is the highest among TARGET_SYS_xxx) */
3096     if (num < 1 || num > TARGET_SYS_SENDMMSG) {
3097         return -TARGET_EINVAL;
3098     }
3099     /* ensure we have space for args */
3100     if (nargs[num] > ARRAY_SIZE(a)) {
3101         return -TARGET_EINVAL;
3102     }
3103     /* collect the arguments in a[] according to nargs[] */
3104     for (i = 0; i < nargs[num]; ++i) {
3105         if (get_user_ual(a[i], vptr + i * sizeof(abi_long)) != 0) {
3106             return -TARGET_EFAULT;
3107         }
3108     }
3109     /* now when we have the args, invoke the appropriate underlying function */
3110     switch (num) {
3111     case TARGET_SYS_SOCKET: /* domain, type, protocol */
3112         return do_socket(a[0], a[1], a[2]);
3113     case TARGET_SYS_BIND: /* sockfd, addr, addrlen */
3114         return do_bind(a[0], a[1], a[2]);
3115     case TARGET_SYS_CONNECT: /* sockfd, addr, addrlen */
3116         return do_connect(a[0], a[1], a[2]);
3117     case TARGET_SYS_LISTEN: /* sockfd, backlog */
3118         return get_errno(listen(a[0], a[1]));
3119     case TARGET_SYS_ACCEPT: /* sockfd, addr, addrlen */
3120         return do_accept4(a[0], a[1], a[2], 0);
3121     case TARGET_SYS_GETSOCKNAME: /* sockfd, addr, addrlen */
3122         return do_getsockname(a[0], a[1], a[2]);
3123     case TARGET_SYS_GETPEERNAME: /* sockfd, addr, addrlen */
3124         return do_getpeername(a[0], a[1], a[2]);
3125     case TARGET_SYS_SOCKETPAIR: /* domain, type, protocol, tab */
3126         return do_socketpair(a[0], a[1], a[2], a[3]);
3127     case TARGET_SYS_SEND: /* sockfd, msg, len, flags */
3128         return do_sendto(a[0], a[1], a[2], a[3], 0, 0);
3129     case TARGET_SYS_RECV: /* sockfd, msg, len, flags */
3130         return do_recvfrom(a[0], a[1], a[2], a[3], 0, 0);
3131     case TARGET_SYS_SENDTO: /* sockfd, msg, len, flags, addr, addrlen */
3132         return do_sendto(a[0], a[1], a[2], a[3], a[4], a[5]);
3133     case TARGET_SYS_RECVFROM: /* sockfd, msg, len, flags, addr, addrlen */
3134         return do_recvfrom(a[0], a[1], a[2], a[3], a[4], a[5]);
3135     case TARGET_SYS_SHUTDOWN: /* sockfd, how */
3136         return get_errno(shutdown(a[0], a[1]));
3137     case TARGET_SYS_SETSOCKOPT: /* sockfd, level, optname, optval, optlen */
3138         return do_setsockopt(a[0], a[1], a[2], a[3], a[4]);
3139     case TARGET_SYS_GETSOCKOPT: /* sockfd, level, optname, optval, optlen */
3140         return do_getsockopt(a[0], a[1], a[2], a[3], a[4]);
3141     case TARGET_SYS_SENDMSG: /* sockfd, msg, flags */
3142         return do_sendrecvmsg(a[0], a[1], a[2], 1);
3143     case TARGET_SYS_RECVMSG: /* sockfd, msg, flags */
3144         return do_sendrecvmsg(a[0], a[1], a[2], 0);
3145     case TARGET_SYS_ACCEPT4: /* sockfd, addr, addrlen, flags */
3146         return do_accept4(a[0], a[1], a[2], a[3]);
3147     case TARGET_SYS_RECVMMSG: /* sockfd, msgvec, vlen, flags */
3148         return do_sendrecvmmsg(a[0], a[1], a[2], a[3], 0);
3149     case TARGET_SYS_SENDMMSG: /* sockfd, msgvec, vlen, flags */
3150         return do_sendrecvmmsg(a[0], a[1], a[2], a[3], 1);
3151     default:
3152         gemu_log("Unsupported socketcall: %d\n", num);
3153         return -TARGET_EINVAL;
3154     }
3155 }
3156 #endif
3157 
3158 #define N_SHM_REGIONS	32
3159 
3160 static struct shm_region {
3161     abi_ulong start;
3162     abi_ulong size;
3163     bool in_use;
3164 } shm_regions[N_SHM_REGIONS];
3165 
3166 #ifndef TARGET_SEMID64_DS
3167 /* asm-generic version of this struct */
3168 struct target_semid64_ds
3169 {
3170   struct target_ipc_perm sem_perm;
3171   abi_ulong sem_otime;
3172 #if TARGET_ABI_BITS == 32
3173   abi_ulong __unused1;
3174 #endif
3175   abi_ulong sem_ctime;
3176 #if TARGET_ABI_BITS == 32
3177   abi_ulong __unused2;
3178 #endif
3179   abi_ulong sem_nsems;
3180   abi_ulong __unused3;
3181   abi_ulong __unused4;
3182 };
3183 #endif
3184 
3185 static inline abi_long target_to_host_ipc_perm(struct ipc_perm *host_ip,
3186                                                abi_ulong target_addr)
3187 {
3188     struct target_ipc_perm *target_ip;
3189     struct target_semid64_ds *target_sd;
3190 
3191     if (!lock_user_struct(VERIFY_READ, target_sd, target_addr, 1))
3192         return -TARGET_EFAULT;
3193     target_ip = &(target_sd->sem_perm);
3194     host_ip->__key = tswap32(target_ip->__key);
3195     host_ip->uid = tswap32(target_ip->uid);
3196     host_ip->gid = tswap32(target_ip->gid);
3197     host_ip->cuid = tswap32(target_ip->cuid);
3198     host_ip->cgid = tswap32(target_ip->cgid);
3199 #if defined(TARGET_ALPHA) || defined(TARGET_MIPS) || defined(TARGET_PPC)
3200     host_ip->mode = tswap32(target_ip->mode);
3201 #else
3202     host_ip->mode = tswap16(target_ip->mode);
3203 #endif
3204 #if defined(TARGET_PPC)
3205     host_ip->__seq = tswap32(target_ip->__seq);
3206 #else
3207     host_ip->__seq = tswap16(target_ip->__seq);
3208 #endif
3209     unlock_user_struct(target_sd, target_addr, 0);
3210     return 0;
3211 }
3212 
3213 static inline abi_long host_to_target_ipc_perm(abi_ulong target_addr,
3214                                                struct ipc_perm *host_ip)
3215 {
3216     struct target_ipc_perm *target_ip;
3217     struct target_semid64_ds *target_sd;
3218 
3219     if (!lock_user_struct(VERIFY_WRITE, target_sd, target_addr, 0))
3220         return -TARGET_EFAULT;
3221     target_ip = &(target_sd->sem_perm);
3222     target_ip->__key = tswap32(host_ip->__key);
3223     target_ip->uid = tswap32(host_ip->uid);
3224     target_ip->gid = tswap32(host_ip->gid);
3225     target_ip->cuid = tswap32(host_ip->cuid);
3226     target_ip->cgid = tswap32(host_ip->cgid);
3227 #if defined(TARGET_ALPHA) || defined(TARGET_MIPS) || defined(TARGET_PPC)
3228     target_ip->mode = tswap32(host_ip->mode);
3229 #else
3230     target_ip->mode = tswap16(host_ip->mode);
3231 #endif
3232 #if defined(TARGET_PPC)
3233     target_ip->__seq = tswap32(host_ip->__seq);
3234 #else
3235     target_ip->__seq = tswap16(host_ip->__seq);
3236 #endif
3237     unlock_user_struct(target_sd, target_addr, 1);
3238     return 0;
3239 }
3240 
3241 static inline abi_long target_to_host_semid_ds(struct semid_ds *host_sd,
3242                                                abi_ulong target_addr)
3243 {
3244     struct target_semid64_ds *target_sd;
3245 
3246     if (!lock_user_struct(VERIFY_READ, target_sd, target_addr, 1))
3247         return -TARGET_EFAULT;
3248     if (target_to_host_ipc_perm(&(host_sd->sem_perm),target_addr))
3249         return -TARGET_EFAULT;
3250     host_sd->sem_nsems = tswapal(target_sd->sem_nsems);
3251     host_sd->sem_otime = tswapal(target_sd->sem_otime);
3252     host_sd->sem_ctime = tswapal(target_sd->sem_ctime);
3253     unlock_user_struct(target_sd, target_addr, 0);
3254     return 0;
3255 }
3256 
3257 static inline abi_long host_to_target_semid_ds(abi_ulong target_addr,
3258                                                struct semid_ds *host_sd)
3259 {
3260     struct target_semid64_ds *target_sd;
3261 
3262     if (!lock_user_struct(VERIFY_WRITE, target_sd, target_addr, 0))
3263         return -TARGET_EFAULT;
3264     if (host_to_target_ipc_perm(target_addr,&(host_sd->sem_perm)))
3265         return -TARGET_EFAULT;
3266     target_sd->sem_nsems = tswapal(host_sd->sem_nsems);
3267     target_sd->sem_otime = tswapal(host_sd->sem_otime);
3268     target_sd->sem_ctime = tswapal(host_sd->sem_ctime);
3269     unlock_user_struct(target_sd, target_addr, 1);
3270     return 0;
3271 }
3272 
3273 struct target_seminfo {
3274     int semmap;
3275     int semmni;
3276     int semmns;
3277     int semmnu;
3278     int semmsl;
3279     int semopm;
3280     int semume;
3281     int semusz;
3282     int semvmx;
3283     int semaem;
3284 };
3285 
3286 static inline abi_long host_to_target_seminfo(abi_ulong target_addr,
3287                                               struct seminfo *host_seminfo)
3288 {
3289     struct target_seminfo *target_seminfo;
3290     if (!lock_user_struct(VERIFY_WRITE, target_seminfo, target_addr, 0))
3291         return -TARGET_EFAULT;
3292     __put_user(host_seminfo->semmap, &target_seminfo->semmap);
3293     __put_user(host_seminfo->semmni, &target_seminfo->semmni);
3294     __put_user(host_seminfo->semmns, &target_seminfo->semmns);
3295     __put_user(host_seminfo->semmnu, &target_seminfo->semmnu);
3296     __put_user(host_seminfo->semmsl, &target_seminfo->semmsl);
3297     __put_user(host_seminfo->semopm, &target_seminfo->semopm);
3298     __put_user(host_seminfo->semume, &target_seminfo->semume);
3299     __put_user(host_seminfo->semusz, &target_seminfo->semusz);
3300     __put_user(host_seminfo->semvmx, &target_seminfo->semvmx);
3301     __put_user(host_seminfo->semaem, &target_seminfo->semaem);
3302     unlock_user_struct(target_seminfo, target_addr, 1);
3303     return 0;
3304 }
3305 
3306 union semun {
3307 	int val;
3308 	struct semid_ds *buf;
3309 	unsigned short *array;
3310 	struct seminfo *__buf;
3311 };
3312 
3313 union target_semun {
3314 	int val;
3315 	abi_ulong buf;
3316 	abi_ulong array;
3317 	abi_ulong __buf;
3318 };
3319 
3320 static inline abi_long target_to_host_semarray(int semid, unsigned short **host_array,
3321                                                abi_ulong target_addr)
3322 {
3323     int nsems;
3324     unsigned short *array;
3325     union semun semun;
3326     struct semid_ds semid_ds;
3327     int i, ret;
3328 
3329     semun.buf = &semid_ds;
3330 
3331     ret = semctl(semid, 0, IPC_STAT, semun);
3332     if (ret == -1)
3333         return get_errno(ret);
3334 
3335     nsems = semid_ds.sem_nsems;
3336 
3337     *host_array = g_try_new(unsigned short, nsems);
3338     if (!*host_array) {
3339         return -TARGET_ENOMEM;
3340     }
3341     array = lock_user(VERIFY_READ, target_addr,
3342                       nsems*sizeof(unsigned short), 1);
3343     if (!array) {
3344         g_free(*host_array);
3345         return -TARGET_EFAULT;
3346     }
3347 
3348     for(i=0; i<nsems; i++) {
3349         __get_user((*host_array)[i], &array[i]);
3350     }
3351     unlock_user(array, target_addr, 0);
3352 
3353     return 0;
3354 }
3355 
3356 static inline abi_long host_to_target_semarray(int semid, abi_ulong target_addr,
3357                                                unsigned short **host_array)
3358 {
3359     int nsems;
3360     unsigned short *array;
3361     union semun semun;
3362     struct semid_ds semid_ds;
3363     int i, ret;
3364 
3365     semun.buf = &semid_ds;
3366 
3367     ret = semctl(semid, 0, IPC_STAT, semun);
3368     if (ret == -1)
3369         return get_errno(ret);
3370 
3371     nsems = semid_ds.sem_nsems;
3372 
3373     array = lock_user(VERIFY_WRITE, target_addr,
3374                       nsems*sizeof(unsigned short), 0);
3375     if (!array)
3376         return -TARGET_EFAULT;
3377 
3378     for(i=0; i<nsems; i++) {
3379         __put_user((*host_array)[i], &array[i]);
3380     }
3381     g_free(*host_array);
3382     unlock_user(array, target_addr, 1);
3383 
3384     return 0;
3385 }
3386 
3387 static inline abi_long do_semctl(int semid, int semnum, int cmd,
3388                                  abi_ulong target_arg)
3389 {
3390     union target_semun target_su = { .buf = target_arg };
3391     union semun arg;
3392     struct semid_ds dsarg;
3393     unsigned short *array = NULL;
3394     struct seminfo seminfo;
3395     abi_long ret = -TARGET_EINVAL;
3396     abi_long err;
3397     cmd &= 0xff;
3398 
3399     switch( cmd ) {
3400 	case GETVAL:
3401 	case SETVAL:
3402             /* In 64 bit cross-endian situations, we will erroneously pick up
3403              * the wrong half of the union for the "val" element.  To rectify
3404              * this, the entire 8-byte structure is byteswapped, followed by
3405 	     * a swap of the 4 byte val field. In other cases, the data is
3406 	     * already in proper host byte order. */
3407 	    if (sizeof(target_su.val) != (sizeof(target_su.buf))) {
3408 		target_su.buf = tswapal(target_su.buf);
3409 		arg.val = tswap32(target_su.val);
3410 	    } else {
3411 		arg.val = target_su.val;
3412 	    }
3413             ret = get_errno(semctl(semid, semnum, cmd, arg));
3414             break;
3415 	case GETALL:
3416 	case SETALL:
3417             err = target_to_host_semarray(semid, &array, target_su.array);
3418             if (err)
3419                 return err;
3420             arg.array = array;
3421             ret = get_errno(semctl(semid, semnum, cmd, arg));
3422             err = host_to_target_semarray(semid, target_su.array, &array);
3423             if (err)
3424                 return err;
3425             break;
3426 	case IPC_STAT:
3427 	case IPC_SET:
3428 	case SEM_STAT:
3429             err = target_to_host_semid_ds(&dsarg, target_su.buf);
3430             if (err)
3431                 return err;
3432             arg.buf = &dsarg;
3433             ret = get_errno(semctl(semid, semnum, cmd, arg));
3434             err = host_to_target_semid_ds(target_su.buf, &dsarg);
3435             if (err)
3436                 return err;
3437             break;
3438 	case IPC_INFO:
3439 	case SEM_INFO:
3440             arg.__buf = &seminfo;
3441             ret = get_errno(semctl(semid, semnum, cmd, arg));
3442             err = host_to_target_seminfo(target_su.__buf, &seminfo);
3443             if (err)
3444                 return err;
3445             break;
3446 	case IPC_RMID:
3447 	case GETPID:
3448 	case GETNCNT:
3449 	case GETZCNT:
3450             ret = get_errno(semctl(semid, semnum, cmd, NULL));
3451             break;
3452     }
3453 
3454     return ret;
3455 }
3456 
3457 struct target_sembuf {
3458     unsigned short sem_num;
3459     short sem_op;
3460     short sem_flg;
3461 };
3462 
3463 static inline abi_long target_to_host_sembuf(struct sembuf *host_sembuf,
3464                                              abi_ulong target_addr,
3465                                              unsigned nsops)
3466 {
3467     struct target_sembuf *target_sembuf;
3468     int i;
3469 
3470     target_sembuf = lock_user(VERIFY_READ, target_addr,
3471                               nsops*sizeof(struct target_sembuf), 1);
3472     if (!target_sembuf)
3473         return -TARGET_EFAULT;
3474 
3475     for(i=0; i<nsops; i++) {
3476         __get_user(host_sembuf[i].sem_num, &target_sembuf[i].sem_num);
3477         __get_user(host_sembuf[i].sem_op, &target_sembuf[i].sem_op);
3478         __get_user(host_sembuf[i].sem_flg, &target_sembuf[i].sem_flg);
3479     }
3480 
3481     unlock_user(target_sembuf, target_addr, 0);
3482 
3483     return 0;
3484 }
3485 
3486 static inline abi_long do_semop(int semid, abi_long ptr, unsigned nsops)
3487 {
3488     struct sembuf sops[nsops];
3489 
3490     if (target_to_host_sembuf(sops, ptr, nsops))
3491         return -TARGET_EFAULT;
3492 
3493     return get_errno(safe_semtimedop(semid, sops, nsops, NULL));
3494 }
3495 
3496 struct target_msqid_ds
3497 {
3498     struct target_ipc_perm msg_perm;
3499     abi_ulong msg_stime;
3500 #if TARGET_ABI_BITS == 32
3501     abi_ulong __unused1;
3502 #endif
3503     abi_ulong msg_rtime;
3504 #if TARGET_ABI_BITS == 32
3505     abi_ulong __unused2;
3506 #endif
3507     abi_ulong msg_ctime;
3508 #if TARGET_ABI_BITS == 32
3509     abi_ulong __unused3;
3510 #endif
3511     abi_ulong __msg_cbytes;
3512     abi_ulong msg_qnum;
3513     abi_ulong msg_qbytes;
3514     abi_ulong msg_lspid;
3515     abi_ulong msg_lrpid;
3516     abi_ulong __unused4;
3517     abi_ulong __unused5;
3518 };
3519 
3520 static inline abi_long target_to_host_msqid_ds(struct msqid_ds *host_md,
3521                                                abi_ulong target_addr)
3522 {
3523     struct target_msqid_ds *target_md;
3524 
3525     if (!lock_user_struct(VERIFY_READ, target_md, target_addr, 1))
3526         return -TARGET_EFAULT;
3527     if (target_to_host_ipc_perm(&(host_md->msg_perm),target_addr))
3528         return -TARGET_EFAULT;
3529     host_md->msg_stime = tswapal(target_md->msg_stime);
3530     host_md->msg_rtime = tswapal(target_md->msg_rtime);
3531     host_md->msg_ctime = tswapal(target_md->msg_ctime);
3532     host_md->__msg_cbytes = tswapal(target_md->__msg_cbytes);
3533     host_md->msg_qnum = tswapal(target_md->msg_qnum);
3534     host_md->msg_qbytes = tswapal(target_md->msg_qbytes);
3535     host_md->msg_lspid = tswapal(target_md->msg_lspid);
3536     host_md->msg_lrpid = tswapal(target_md->msg_lrpid);
3537     unlock_user_struct(target_md, target_addr, 0);
3538     return 0;
3539 }
3540 
3541 static inline abi_long host_to_target_msqid_ds(abi_ulong target_addr,
3542                                                struct msqid_ds *host_md)
3543 {
3544     struct target_msqid_ds *target_md;
3545 
3546     if (!lock_user_struct(VERIFY_WRITE, target_md, target_addr, 0))
3547         return -TARGET_EFAULT;
3548     if (host_to_target_ipc_perm(target_addr,&(host_md->msg_perm)))
3549         return -TARGET_EFAULT;
3550     target_md->msg_stime = tswapal(host_md->msg_stime);
3551     target_md->msg_rtime = tswapal(host_md->msg_rtime);
3552     target_md->msg_ctime = tswapal(host_md->msg_ctime);
3553     target_md->__msg_cbytes = tswapal(host_md->__msg_cbytes);
3554     target_md->msg_qnum = tswapal(host_md->msg_qnum);
3555     target_md->msg_qbytes = tswapal(host_md->msg_qbytes);
3556     target_md->msg_lspid = tswapal(host_md->msg_lspid);
3557     target_md->msg_lrpid = tswapal(host_md->msg_lrpid);
3558     unlock_user_struct(target_md, target_addr, 1);
3559     return 0;
3560 }
3561 
3562 struct target_msginfo {
3563     int msgpool;
3564     int msgmap;
3565     int msgmax;
3566     int msgmnb;
3567     int msgmni;
3568     int msgssz;
3569     int msgtql;
3570     unsigned short int msgseg;
3571 };
3572 
3573 static inline abi_long host_to_target_msginfo(abi_ulong target_addr,
3574                                               struct msginfo *host_msginfo)
3575 {
3576     struct target_msginfo *target_msginfo;
3577     if (!lock_user_struct(VERIFY_WRITE, target_msginfo, target_addr, 0))
3578         return -TARGET_EFAULT;
3579     __put_user(host_msginfo->msgpool, &target_msginfo->msgpool);
3580     __put_user(host_msginfo->msgmap, &target_msginfo->msgmap);
3581     __put_user(host_msginfo->msgmax, &target_msginfo->msgmax);
3582     __put_user(host_msginfo->msgmnb, &target_msginfo->msgmnb);
3583     __put_user(host_msginfo->msgmni, &target_msginfo->msgmni);
3584     __put_user(host_msginfo->msgssz, &target_msginfo->msgssz);
3585     __put_user(host_msginfo->msgtql, &target_msginfo->msgtql);
3586     __put_user(host_msginfo->msgseg, &target_msginfo->msgseg);
3587     unlock_user_struct(target_msginfo, target_addr, 1);
3588     return 0;
3589 }
3590 
3591 static inline abi_long do_msgctl(int msgid, int cmd, abi_long ptr)
3592 {
3593     struct msqid_ds dsarg;
3594     struct msginfo msginfo;
3595     abi_long ret = -TARGET_EINVAL;
3596 
3597     cmd &= 0xff;
3598 
3599     switch (cmd) {
3600     case IPC_STAT:
3601     case IPC_SET:
3602     case MSG_STAT:
3603         if (target_to_host_msqid_ds(&dsarg,ptr))
3604             return -TARGET_EFAULT;
3605         ret = get_errno(msgctl(msgid, cmd, &dsarg));
3606         if (host_to_target_msqid_ds(ptr,&dsarg))
3607             return -TARGET_EFAULT;
3608         break;
3609     case IPC_RMID:
3610         ret = get_errno(msgctl(msgid, cmd, NULL));
3611         break;
3612     case IPC_INFO:
3613     case MSG_INFO:
3614         ret = get_errno(msgctl(msgid, cmd, (struct msqid_ds *)&msginfo));
3615         if (host_to_target_msginfo(ptr, &msginfo))
3616             return -TARGET_EFAULT;
3617         break;
3618     }
3619 
3620     return ret;
3621 }
3622 
3623 struct target_msgbuf {
3624     abi_long mtype;
3625     char	mtext[1];
3626 };
3627 
3628 static inline abi_long do_msgsnd(int msqid, abi_long msgp,
3629                                  ssize_t msgsz, int msgflg)
3630 {
3631     struct target_msgbuf *target_mb;
3632     struct msgbuf *host_mb;
3633     abi_long ret = 0;
3634 
3635     if (msgsz < 0) {
3636         return -TARGET_EINVAL;
3637     }
3638 
3639     if (!lock_user_struct(VERIFY_READ, target_mb, msgp, 0))
3640         return -TARGET_EFAULT;
3641     host_mb = g_try_malloc(msgsz + sizeof(long));
3642     if (!host_mb) {
3643         unlock_user_struct(target_mb, msgp, 0);
3644         return -TARGET_ENOMEM;
3645     }
3646     host_mb->mtype = (abi_long) tswapal(target_mb->mtype);
3647     memcpy(host_mb->mtext, target_mb->mtext, msgsz);
3648     ret = get_errno(safe_msgsnd(msqid, host_mb, msgsz, msgflg));
3649     g_free(host_mb);
3650     unlock_user_struct(target_mb, msgp, 0);
3651 
3652     return ret;
3653 }
3654 
3655 static inline abi_long do_msgrcv(int msqid, abi_long msgp,
3656                                  ssize_t msgsz, abi_long msgtyp,
3657                                  int msgflg)
3658 {
3659     struct target_msgbuf *target_mb;
3660     char *target_mtext;
3661     struct msgbuf *host_mb;
3662     abi_long ret = 0;
3663 
3664     if (msgsz < 0) {
3665         return -TARGET_EINVAL;
3666     }
3667 
3668     if (!lock_user_struct(VERIFY_WRITE, target_mb, msgp, 0))
3669         return -TARGET_EFAULT;
3670 
3671     host_mb = g_try_malloc(msgsz + sizeof(long));
3672     if (!host_mb) {
3673         ret = -TARGET_ENOMEM;
3674         goto end;
3675     }
3676     ret = get_errno(safe_msgrcv(msqid, host_mb, msgsz, msgtyp, msgflg));
3677 
3678     if (ret > 0) {
3679         abi_ulong target_mtext_addr = msgp + sizeof(abi_ulong);
3680         target_mtext = lock_user(VERIFY_WRITE, target_mtext_addr, ret, 0);
3681         if (!target_mtext) {
3682             ret = -TARGET_EFAULT;
3683             goto end;
3684         }
3685         memcpy(target_mb->mtext, host_mb->mtext, ret);
3686         unlock_user(target_mtext, target_mtext_addr, ret);
3687     }
3688 
3689     target_mb->mtype = tswapal(host_mb->mtype);
3690 
3691 end:
3692     if (target_mb)
3693         unlock_user_struct(target_mb, msgp, 1);
3694     g_free(host_mb);
3695     return ret;
3696 }
3697 
3698 static inline abi_long target_to_host_shmid_ds(struct shmid_ds *host_sd,
3699                                                abi_ulong target_addr)
3700 {
3701     struct target_shmid_ds *target_sd;
3702 
3703     if (!lock_user_struct(VERIFY_READ, target_sd, target_addr, 1))
3704         return -TARGET_EFAULT;
3705     if (target_to_host_ipc_perm(&(host_sd->shm_perm), target_addr))
3706         return -TARGET_EFAULT;
3707     __get_user(host_sd->shm_segsz, &target_sd->shm_segsz);
3708     __get_user(host_sd->shm_atime, &target_sd->shm_atime);
3709     __get_user(host_sd->shm_dtime, &target_sd->shm_dtime);
3710     __get_user(host_sd->shm_ctime, &target_sd->shm_ctime);
3711     __get_user(host_sd->shm_cpid, &target_sd->shm_cpid);
3712     __get_user(host_sd->shm_lpid, &target_sd->shm_lpid);
3713     __get_user(host_sd->shm_nattch, &target_sd->shm_nattch);
3714     unlock_user_struct(target_sd, target_addr, 0);
3715     return 0;
3716 }
3717 
3718 static inline abi_long host_to_target_shmid_ds(abi_ulong target_addr,
3719                                                struct shmid_ds *host_sd)
3720 {
3721     struct target_shmid_ds *target_sd;
3722 
3723     if (!lock_user_struct(VERIFY_WRITE, target_sd, target_addr, 0))
3724         return -TARGET_EFAULT;
3725     if (host_to_target_ipc_perm(target_addr, &(host_sd->shm_perm)))
3726         return -TARGET_EFAULT;
3727     __put_user(host_sd->shm_segsz, &target_sd->shm_segsz);
3728     __put_user(host_sd->shm_atime, &target_sd->shm_atime);
3729     __put_user(host_sd->shm_dtime, &target_sd->shm_dtime);
3730     __put_user(host_sd->shm_ctime, &target_sd->shm_ctime);
3731     __put_user(host_sd->shm_cpid, &target_sd->shm_cpid);
3732     __put_user(host_sd->shm_lpid, &target_sd->shm_lpid);
3733     __put_user(host_sd->shm_nattch, &target_sd->shm_nattch);
3734     unlock_user_struct(target_sd, target_addr, 1);
3735     return 0;
3736 }
3737 
3738 struct  target_shminfo {
3739     abi_ulong shmmax;
3740     abi_ulong shmmin;
3741     abi_ulong shmmni;
3742     abi_ulong shmseg;
3743     abi_ulong shmall;
3744 };
3745 
3746 static inline abi_long host_to_target_shminfo(abi_ulong target_addr,
3747                                               struct shminfo *host_shminfo)
3748 {
3749     struct target_shminfo *target_shminfo;
3750     if (!lock_user_struct(VERIFY_WRITE, target_shminfo, target_addr, 0))
3751         return -TARGET_EFAULT;
3752     __put_user(host_shminfo->shmmax, &target_shminfo->shmmax);
3753     __put_user(host_shminfo->shmmin, &target_shminfo->shmmin);
3754     __put_user(host_shminfo->shmmni, &target_shminfo->shmmni);
3755     __put_user(host_shminfo->shmseg, &target_shminfo->shmseg);
3756     __put_user(host_shminfo->shmall, &target_shminfo->shmall);
3757     unlock_user_struct(target_shminfo, target_addr, 1);
3758     return 0;
3759 }
3760 
3761 struct target_shm_info {
3762     int used_ids;
3763     abi_ulong shm_tot;
3764     abi_ulong shm_rss;
3765     abi_ulong shm_swp;
3766     abi_ulong swap_attempts;
3767     abi_ulong swap_successes;
3768 };
3769 
3770 static inline abi_long host_to_target_shm_info(abi_ulong target_addr,
3771                                                struct shm_info *host_shm_info)
3772 {
3773     struct target_shm_info *target_shm_info;
3774     if (!lock_user_struct(VERIFY_WRITE, target_shm_info, target_addr, 0))
3775         return -TARGET_EFAULT;
3776     __put_user(host_shm_info->used_ids, &target_shm_info->used_ids);
3777     __put_user(host_shm_info->shm_tot, &target_shm_info->shm_tot);
3778     __put_user(host_shm_info->shm_rss, &target_shm_info->shm_rss);
3779     __put_user(host_shm_info->shm_swp, &target_shm_info->shm_swp);
3780     __put_user(host_shm_info->swap_attempts, &target_shm_info->swap_attempts);
3781     __put_user(host_shm_info->swap_successes, &target_shm_info->swap_successes);
3782     unlock_user_struct(target_shm_info, target_addr, 1);
3783     return 0;
3784 }
3785 
3786 static inline abi_long do_shmctl(int shmid, int cmd, abi_long buf)
3787 {
3788     struct shmid_ds dsarg;
3789     struct shminfo shminfo;
3790     struct shm_info shm_info;
3791     abi_long ret = -TARGET_EINVAL;
3792 
3793     cmd &= 0xff;
3794 
3795     switch(cmd) {
3796     case IPC_STAT:
3797     case IPC_SET:
3798     case SHM_STAT:
3799         if (target_to_host_shmid_ds(&dsarg, buf))
3800             return -TARGET_EFAULT;
3801         ret = get_errno(shmctl(shmid, cmd, &dsarg));
3802         if (host_to_target_shmid_ds(buf, &dsarg))
3803             return -TARGET_EFAULT;
3804         break;
3805     case IPC_INFO:
3806         ret = get_errno(shmctl(shmid, cmd, (struct shmid_ds *)&shminfo));
3807         if (host_to_target_shminfo(buf, &shminfo))
3808             return -TARGET_EFAULT;
3809         break;
3810     case SHM_INFO:
3811         ret = get_errno(shmctl(shmid, cmd, (struct shmid_ds *)&shm_info));
3812         if (host_to_target_shm_info(buf, &shm_info))
3813             return -TARGET_EFAULT;
3814         break;
3815     case IPC_RMID:
3816     case SHM_LOCK:
3817     case SHM_UNLOCK:
3818         ret = get_errno(shmctl(shmid, cmd, NULL));
3819         break;
3820     }
3821 
3822     return ret;
3823 }
3824 
3825 #ifndef TARGET_FORCE_SHMLBA
3826 /* For most architectures, SHMLBA is the same as the page size;
3827  * some architectures have larger values, in which case they should
3828  * define TARGET_FORCE_SHMLBA and provide a target_shmlba() function.
3829  * This corresponds to the kernel arch code defining __ARCH_FORCE_SHMLBA
3830  * and defining its own value for SHMLBA.
3831  *
3832  * The kernel also permits SHMLBA to be set by the architecture to a
3833  * value larger than the page size without setting __ARCH_FORCE_SHMLBA;
3834  * this means that addresses are rounded to the large size if
3835  * SHM_RND is set but addresses not aligned to that size are not rejected
3836  * as long as they are at least page-aligned. Since the only architecture
3837  * which uses this is ia64 this code doesn't provide for that oddity.
3838  */
3839 static inline abi_ulong target_shmlba(CPUArchState *cpu_env)
3840 {
3841     return TARGET_PAGE_SIZE;
3842 }
3843 #endif
3844 
3845 static inline abi_ulong do_shmat(CPUArchState *cpu_env,
3846                                  int shmid, abi_ulong shmaddr, int shmflg)
3847 {
3848     abi_long raddr;
3849     void *host_raddr;
3850     struct shmid_ds shm_info;
3851     int i,ret;
3852     abi_ulong shmlba;
3853 
3854     /* find out the length of the shared memory segment */
3855     ret = get_errno(shmctl(shmid, IPC_STAT, &shm_info));
3856     if (is_error(ret)) {
3857         /* can't get length, bail out */
3858         return ret;
3859     }
3860 
3861     shmlba = target_shmlba(cpu_env);
3862 
3863     if (shmaddr & (shmlba - 1)) {
3864         if (shmflg & SHM_RND) {
3865             shmaddr &= ~(shmlba - 1);
3866         } else {
3867             return -TARGET_EINVAL;
3868         }
3869     }
3870     if (!guest_range_valid(shmaddr, shm_info.shm_segsz)) {
3871         return -TARGET_EINVAL;
3872     }
3873 
3874     mmap_lock();
3875 
3876     if (shmaddr)
3877         host_raddr = shmat(shmid, (void *)g2h(shmaddr), shmflg);
3878     else {
3879         abi_ulong mmap_start;
3880 
3881         mmap_start = mmap_find_vma(0, shm_info.shm_segsz);
3882 
3883         if (mmap_start == -1) {
3884             errno = ENOMEM;
3885             host_raddr = (void *)-1;
3886         } else
3887             host_raddr = shmat(shmid, g2h(mmap_start), shmflg | SHM_REMAP);
3888     }
3889 
3890     if (host_raddr == (void *)-1) {
3891         mmap_unlock();
3892         return get_errno((long)host_raddr);
3893     }
3894     raddr=h2g((unsigned long)host_raddr);
3895 
3896     page_set_flags(raddr, raddr + shm_info.shm_segsz,
3897                    PAGE_VALID | PAGE_READ |
3898                    ((shmflg & SHM_RDONLY)? 0 : PAGE_WRITE));
3899 
3900     for (i = 0; i < N_SHM_REGIONS; i++) {
3901         if (!shm_regions[i].in_use) {
3902             shm_regions[i].in_use = true;
3903             shm_regions[i].start = raddr;
3904             shm_regions[i].size = shm_info.shm_segsz;
3905             break;
3906         }
3907     }
3908 
3909     mmap_unlock();
3910     return raddr;
3911 
3912 }
3913 
3914 static inline abi_long do_shmdt(abi_ulong shmaddr)
3915 {
3916     int i;
3917     abi_long rv;
3918 
3919     mmap_lock();
3920 
3921     for (i = 0; i < N_SHM_REGIONS; ++i) {
3922         if (shm_regions[i].in_use && shm_regions[i].start == shmaddr) {
3923             shm_regions[i].in_use = false;
3924             page_set_flags(shmaddr, shmaddr + shm_regions[i].size, 0);
3925             break;
3926         }
3927     }
3928     rv = get_errno(shmdt(g2h(shmaddr)));
3929 
3930     mmap_unlock();
3931 
3932     return rv;
3933 }
3934 
3935 #ifdef TARGET_NR_ipc
3936 /* ??? This only works with linear mappings.  */
3937 /* do_ipc() must return target values and target errnos. */
3938 static abi_long do_ipc(CPUArchState *cpu_env,
3939                        unsigned int call, abi_long first,
3940                        abi_long second, abi_long third,
3941                        abi_long ptr, abi_long fifth)
3942 {
3943     int version;
3944     abi_long ret = 0;
3945 
3946     version = call >> 16;
3947     call &= 0xffff;
3948 
3949     switch (call) {
3950     case IPCOP_semop:
3951         ret = do_semop(first, ptr, second);
3952         break;
3953 
3954     case IPCOP_semget:
3955         ret = get_errno(semget(first, second, third));
3956         break;
3957 
3958     case IPCOP_semctl: {
3959         /* The semun argument to semctl is passed by value, so dereference the
3960          * ptr argument. */
3961         abi_ulong atptr;
3962         get_user_ual(atptr, ptr);
3963         ret = do_semctl(first, second, third, atptr);
3964         break;
3965     }
3966 
3967     case IPCOP_msgget:
3968         ret = get_errno(msgget(first, second));
3969         break;
3970 
3971     case IPCOP_msgsnd:
3972         ret = do_msgsnd(first, ptr, second, third);
3973         break;
3974 
3975     case IPCOP_msgctl:
3976         ret = do_msgctl(first, second, ptr);
3977         break;
3978 
3979     case IPCOP_msgrcv:
3980         switch (version) {
3981         case 0:
3982             {
3983                 struct target_ipc_kludge {
3984                     abi_long msgp;
3985                     abi_long msgtyp;
3986                 } *tmp;
3987 
3988                 if (!lock_user_struct(VERIFY_READ, tmp, ptr, 1)) {
3989                     ret = -TARGET_EFAULT;
3990                     break;
3991                 }
3992 
3993                 ret = do_msgrcv(first, tswapal(tmp->msgp), second, tswapal(tmp->msgtyp), third);
3994 
3995                 unlock_user_struct(tmp, ptr, 0);
3996                 break;
3997             }
3998         default:
3999             ret = do_msgrcv(first, ptr, second, fifth, third);
4000         }
4001         break;
4002 
4003     case IPCOP_shmat:
4004         switch (version) {
4005         default:
4006         {
4007             abi_ulong raddr;
4008             raddr = do_shmat(cpu_env, first, ptr, second);
4009             if (is_error(raddr))
4010                 return get_errno(raddr);
4011             if (put_user_ual(raddr, third))
4012                 return -TARGET_EFAULT;
4013             break;
4014         }
4015         case 1:
4016             ret = -TARGET_EINVAL;
4017             break;
4018         }
4019 	break;
4020     case IPCOP_shmdt:
4021         ret = do_shmdt(ptr);
4022 	break;
4023 
4024     case IPCOP_shmget:
4025 	/* IPC_* flag values are the same on all linux platforms */
4026 	ret = get_errno(shmget(first, second, third));
4027 	break;
4028 
4029 	/* IPC_* and SHM_* command values are the same on all linux platforms */
4030     case IPCOP_shmctl:
4031         ret = do_shmctl(first, second, ptr);
4032         break;
4033     default:
4034 	gemu_log("Unsupported ipc call: %d (version %d)\n", call, version);
4035 	ret = -TARGET_ENOSYS;
4036 	break;
4037     }
4038     return ret;
4039 }
4040 #endif
4041 
4042 /* kernel structure types definitions */
4043 
4044 #define STRUCT(name, ...) STRUCT_ ## name,
4045 #define STRUCT_SPECIAL(name) STRUCT_ ## name,
4046 enum {
4047 #include "syscall_types.h"
4048 STRUCT_MAX
4049 };
4050 #undef STRUCT
4051 #undef STRUCT_SPECIAL
4052 
4053 #define STRUCT(name, ...) static const argtype struct_ ## name ## _def[] = {  __VA_ARGS__, TYPE_NULL };
4054 #define STRUCT_SPECIAL(name)
4055 #include "syscall_types.h"
4056 #undef STRUCT
4057 #undef STRUCT_SPECIAL
4058 
4059 typedef struct IOCTLEntry IOCTLEntry;
4060 
4061 typedef abi_long do_ioctl_fn(const IOCTLEntry *ie, uint8_t *buf_temp,
4062                              int fd, int cmd, abi_long arg);
4063 
4064 struct IOCTLEntry {
4065     int target_cmd;
4066     unsigned int host_cmd;
4067     const char *name;
4068     int access;
4069     do_ioctl_fn *do_ioctl;
4070     const argtype arg_type[5];
4071 };
4072 
4073 #define IOC_R 0x0001
4074 #define IOC_W 0x0002
4075 #define IOC_RW (IOC_R | IOC_W)
4076 
4077 #define MAX_STRUCT_SIZE 4096
4078 
4079 #ifdef CONFIG_FIEMAP
4080 /* So fiemap access checks don't overflow on 32 bit systems.
4081  * This is very slightly smaller than the limit imposed by
4082  * the underlying kernel.
4083  */
4084 #define FIEMAP_MAX_EXTENTS ((UINT_MAX - sizeof(struct fiemap))  \
4085                             / sizeof(struct fiemap_extent))
4086 
4087 static abi_long do_ioctl_fs_ioc_fiemap(const IOCTLEntry *ie, uint8_t *buf_temp,
4088                                        int fd, int cmd, abi_long arg)
4089 {
4090     /* The parameter for this ioctl is a struct fiemap followed
4091      * by an array of struct fiemap_extent whose size is set
4092      * in fiemap->fm_extent_count. The array is filled in by the
4093      * ioctl.
4094      */
4095     int target_size_in, target_size_out;
4096     struct fiemap *fm;
4097     const argtype *arg_type = ie->arg_type;
4098     const argtype extent_arg_type[] = { MK_STRUCT(STRUCT_fiemap_extent) };
4099     void *argptr, *p;
4100     abi_long ret;
4101     int i, extent_size = thunk_type_size(extent_arg_type, 0);
4102     uint32_t outbufsz;
4103     int free_fm = 0;
4104 
4105     assert(arg_type[0] == TYPE_PTR);
4106     assert(ie->access == IOC_RW);
4107     arg_type++;
4108     target_size_in = thunk_type_size(arg_type, 0);
4109     argptr = lock_user(VERIFY_READ, arg, target_size_in, 1);
4110     if (!argptr) {
4111         return -TARGET_EFAULT;
4112     }
4113     thunk_convert(buf_temp, argptr, arg_type, THUNK_HOST);
4114     unlock_user(argptr, arg, 0);
4115     fm = (struct fiemap *)buf_temp;
4116     if (fm->fm_extent_count > FIEMAP_MAX_EXTENTS) {
4117         return -TARGET_EINVAL;
4118     }
4119 
4120     outbufsz = sizeof (*fm) +
4121         (sizeof(struct fiemap_extent) * fm->fm_extent_count);
4122 
4123     if (outbufsz > MAX_STRUCT_SIZE) {
4124         /* We can't fit all the extents into the fixed size buffer.
4125          * Allocate one that is large enough and use it instead.
4126          */
4127         fm = g_try_malloc(outbufsz);
4128         if (!fm) {
4129             return -TARGET_ENOMEM;
4130         }
4131         memcpy(fm, buf_temp, sizeof(struct fiemap));
4132         free_fm = 1;
4133     }
4134     ret = get_errno(safe_ioctl(fd, ie->host_cmd, fm));
4135     if (!is_error(ret)) {
4136         target_size_out = target_size_in;
4137         /* An extent_count of 0 means we were only counting the extents
4138          * so there are no structs to copy
4139          */
4140         if (fm->fm_extent_count != 0) {
4141             target_size_out += fm->fm_mapped_extents * extent_size;
4142         }
4143         argptr = lock_user(VERIFY_WRITE, arg, target_size_out, 0);
4144         if (!argptr) {
4145             ret = -TARGET_EFAULT;
4146         } else {
4147             /* Convert the struct fiemap */
4148             thunk_convert(argptr, fm, arg_type, THUNK_TARGET);
4149             if (fm->fm_extent_count != 0) {
4150                 p = argptr + target_size_in;
4151                 /* ...and then all the struct fiemap_extents */
4152                 for (i = 0; i < fm->fm_mapped_extents; i++) {
4153                     thunk_convert(p, &fm->fm_extents[i], extent_arg_type,
4154                                   THUNK_TARGET);
4155                     p += extent_size;
4156                 }
4157             }
4158             unlock_user(argptr, arg, target_size_out);
4159         }
4160     }
4161     if (free_fm) {
4162         g_free(fm);
4163     }
4164     return ret;
4165 }
4166 #endif
4167 
4168 static abi_long do_ioctl_ifconf(const IOCTLEntry *ie, uint8_t *buf_temp,
4169                                 int fd, int cmd, abi_long arg)
4170 {
4171     const argtype *arg_type = ie->arg_type;
4172     int target_size;
4173     void *argptr;
4174     int ret;
4175     struct ifconf *host_ifconf;
4176     uint32_t outbufsz;
4177     const argtype ifreq_arg_type[] = { MK_STRUCT(STRUCT_sockaddr_ifreq) };
4178     int target_ifreq_size;
4179     int nb_ifreq;
4180     int free_buf = 0;
4181     int i;
4182     int target_ifc_len;
4183     abi_long target_ifc_buf;
4184     int host_ifc_len;
4185     char *host_ifc_buf;
4186 
4187     assert(arg_type[0] == TYPE_PTR);
4188     assert(ie->access == IOC_RW);
4189 
4190     arg_type++;
4191     target_size = thunk_type_size(arg_type, 0);
4192 
4193     argptr = lock_user(VERIFY_READ, arg, target_size, 1);
4194     if (!argptr)
4195         return -TARGET_EFAULT;
4196     thunk_convert(buf_temp, argptr, arg_type, THUNK_HOST);
4197     unlock_user(argptr, arg, 0);
4198 
4199     host_ifconf = (struct ifconf *)(unsigned long)buf_temp;
4200     target_ifc_buf = (abi_long)(unsigned long)host_ifconf->ifc_buf;
4201     target_ifreq_size = thunk_type_size(ifreq_arg_type, 0);
4202 
4203     if (target_ifc_buf != 0) {
4204         target_ifc_len = host_ifconf->ifc_len;
4205         nb_ifreq = target_ifc_len / target_ifreq_size;
4206         host_ifc_len = nb_ifreq * sizeof(struct ifreq);
4207 
4208         outbufsz = sizeof(*host_ifconf) + host_ifc_len;
4209         if (outbufsz > MAX_STRUCT_SIZE) {
4210             /*
4211              * We can't fit all the extents into the fixed size buffer.
4212              * Allocate one that is large enough and use it instead.
4213              */
4214             host_ifconf = malloc(outbufsz);
4215             if (!host_ifconf) {
4216                 return -TARGET_ENOMEM;
4217             }
4218             memcpy(host_ifconf, buf_temp, sizeof(*host_ifconf));
4219             free_buf = 1;
4220         }
4221         host_ifc_buf = (char *)host_ifconf + sizeof(*host_ifconf);
4222 
4223         host_ifconf->ifc_len = host_ifc_len;
4224     } else {
4225       host_ifc_buf = NULL;
4226     }
4227     host_ifconf->ifc_buf = host_ifc_buf;
4228 
4229     ret = get_errno(safe_ioctl(fd, ie->host_cmd, host_ifconf));
4230     if (!is_error(ret)) {
4231 	/* convert host ifc_len to target ifc_len */
4232 
4233         nb_ifreq = host_ifconf->ifc_len / sizeof(struct ifreq);
4234         target_ifc_len = nb_ifreq * target_ifreq_size;
4235         host_ifconf->ifc_len = target_ifc_len;
4236 
4237 	/* restore target ifc_buf */
4238 
4239         host_ifconf->ifc_buf = (char *)(unsigned long)target_ifc_buf;
4240 
4241 	/* copy struct ifconf to target user */
4242 
4243         argptr = lock_user(VERIFY_WRITE, arg, target_size, 0);
4244         if (!argptr)
4245             return -TARGET_EFAULT;
4246         thunk_convert(argptr, host_ifconf, arg_type, THUNK_TARGET);
4247         unlock_user(argptr, arg, target_size);
4248 
4249         if (target_ifc_buf != 0) {
4250             /* copy ifreq[] to target user */
4251             argptr = lock_user(VERIFY_WRITE, target_ifc_buf, target_ifc_len, 0);
4252             for (i = 0; i < nb_ifreq ; i++) {
4253                 thunk_convert(argptr + i * target_ifreq_size,
4254                               host_ifc_buf + i * sizeof(struct ifreq),
4255                               ifreq_arg_type, THUNK_TARGET);
4256             }
4257             unlock_user(argptr, target_ifc_buf, target_ifc_len);
4258         }
4259     }
4260 
4261     if (free_buf) {
4262         free(host_ifconf);
4263     }
4264 
4265     return ret;
4266 }
4267 
4268 #if defined(CONFIG_USBFS)
4269 #if HOST_LONG_BITS > 64
4270 #error USBDEVFS thunks do not support >64 bit hosts yet.
4271 #endif
4272 struct live_urb {
4273     uint64_t target_urb_adr;
4274     uint64_t target_buf_adr;
4275     char *target_buf_ptr;
4276     struct usbdevfs_urb host_urb;
4277 };
4278 
4279 static GHashTable *usbdevfs_urb_hashtable(void)
4280 {
4281     static GHashTable *urb_hashtable;
4282 
4283     if (!urb_hashtable) {
4284         urb_hashtable = g_hash_table_new(g_int64_hash, g_int64_equal);
4285     }
4286     return urb_hashtable;
4287 }
4288 
4289 static void urb_hashtable_insert(struct live_urb *urb)
4290 {
4291     GHashTable *urb_hashtable = usbdevfs_urb_hashtable();
4292     g_hash_table_insert(urb_hashtable, urb, urb);
4293 }
4294 
4295 static struct live_urb *urb_hashtable_lookup(uint64_t target_urb_adr)
4296 {
4297     GHashTable *urb_hashtable = usbdevfs_urb_hashtable();
4298     return g_hash_table_lookup(urb_hashtable, &target_urb_adr);
4299 }
4300 
4301 static void urb_hashtable_remove(struct live_urb *urb)
4302 {
4303     GHashTable *urb_hashtable = usbdevfs_urb_hashtable();
4304     g_hash_table_remove(urb_hashtable, urb);
4305 }
4306 
4307 static abi_long
4308 do_ioctl_usbdevfs_reapurb(const IOCTLEntry *ie, uint8_t *buf_temp,
4309                           int fd, int cmd, abi_long arg)
4310 {
4311     const argtype usbfsurb_arg_type[] = { MK_STRUCT(STRUCT_usbdevfs_urb) };
4312     const argtype ptrvoid_arg_type[] = { TYPE_PTRVOID, 0, 0 };
4313     struct live_urb *lurb;
4314     void *argptr;
4315     uint64_t hurb;
4316     int target_size;
4317     uintptr_t target_urb_adr;
4318     abi_long ret;
4319 
4320     target_size = thunk_type_size(usbfsurb_arg_type, THUNK_TARGET);
4321 
4322     memset(buf_temp, 0, sizeof(uint64_t));
4323     ret = get_errno(safe_ioctl(fd, ie->host_cmd, buf_temp));
4324     if (is_error(ret)) {
4325         return ret;
4326     }
4327 
4328     memcpy(&hurb, buf_temp, sizeof(uint64_t));
4329     lurb = (void *)((uintptr_t)hurb - offsetof(struct live_urb, host_urb));
4330     if (!lurb->target_urb_adr) {
4331         return -TARGET_EFAULT;
4332     }
4333     urb_hashtable_remove(lurb);
4334     unlock_user(lurb->target_buf_ptr, lurb->target_buf_adr,
4335         lurb->host_urb.buffer_length);
4336     lurb->target_buf_ptr = NULL;
4337 
4338     /* restore the guest buffer pointer */
4339     lurb->host_urb.buffer = (void *)(uintptr_t)lurb->target_buf_adr;
4340 
4341     /* update the guest urb struct */
4342     argptr = lock_user(VERIFY_WRITE, lurb->target_urb_adr, target_size, 0);
4343     if (!argptr) {
4344         g_free(lurb);
4345         return -TARGET_EFAULT;
4346     }
4347     thunk_convert(argptr, &lurb->host_urb, usbfsurb_arg_type, THUNK_TARGET);
4348     unlock_user(argptr, lurb->target_urb_adr, target_size);
4349 
4350     target_size = thunk_type_size(ptrvoid_arg_type, THUNK_TARGET);
4351     /* write back the urb handle */
4352     argptr = lock_user(VERIFY_WRITE, arg, target_size, 0);
4353     if (!argptr) {
4354         g_free(lurb);
4355         return -TARGET_EFAULT;
4356     }
4357 
4358     /* GHashTable uses 64-bit keys but thunk_convert expects uintptr_t */
4359     target_urb_adr = lurb->target_urb_adr;
4360     thunk_convert(argptr, &target_urb_adr, ptrvoid_arg_type, THUNK_TARGET);
4361     unlock_user(argptr, arg, target_size);
4362 
4363     g_free(lurb);
4364     return ret;
4365 }
4366 
4367 static abi_long
4368 do_ioctl_usbdevfs_discardurb(const IOCTLEntry *ie,
4369                              uint8_t *buf_temp __attribute__((unused)),
4370                              int fd, int cmd, abi_long arg)
4371 {
4372     struct live_urb *lurb;
4373 
4374     /* map target address back to host URB with metadata. */
4375     lurb = urb_hashtable_lookup(arg);
4376     if (!lurb) {
4377         return -TARGET_EFAULT;
4378     }
4379     return get_errno(safe_ioctl(fd, ie->host_cmd, &lurb->host_urb));
4380 }
4381 
4382 static abi_long
4383 do_ioctl_usbdevfs_submiturb(const IOCTLEntry *ie, uint8_t *buf_temp,
4384                             int fd, int cmd, abi_long arg)
4385 {
4386     const argtype *arg_type = ie->arg_type;
4387     int target_size;
4388     abi_long ret;
4389     void *argptr;
4390     int rw_dir;
4391     struct live_urb *lurb;
4392 
4393     /*
4394      * each submitted URB needs to map to a unique ID for the
4395      * kernel, and that unique ID needs to be a pointer to
4396      * host memory.  hence, we need to malloc for each URB.
4397      * isochronous transfers have a variable length struct.
4398      */
4399     arg_type++;
4400     target_size = thunk_type_size(arg_type, THUNK_TARGET);
4401 
4402     /* construct host copy of urb and metadata */
4403     lurb = g_try_malloc0(sizeof(struct live_urb));
4404     if (!lurb) {
4405         return -TARGET_ENOMEM;
4406     }
4407 
4408     argptr = lock_user(VERIFY_READ, arg, target_size, 1);
4409     if (!argptr) {
4410         g_free(lurb);
4411         return -TARGET_EFAULT;
4412     }
4413     thunk_convert(&lurb->host_urb, argptr, arg_type, THUNK_HOST);
4414     unlock_user(argptr, arg, 0);
4415 
4416     lurb->target_urb_adr = arg;
4417     lurb->target_buf_adr = (uintptr_t)lurb->host_urb.buffer;
4418 
4419     /* buffer space used depends on endpoint type so lock the entire buffer */
4420     /* control type urbs should check the buffer contents for true direction */
4421     rw_dir = lurb->host_urb.endpoint & USB_DIR_IN ? VERIFY_WRITE : VERIFY_READ;
4422     lurb->target_buf_ptr = lock_user(rw_dir, lurb->target_buf_adr,
4423         lurb->host_urb.buffer_length, 1);
4424     if (lurb->target_buf_ptr == NULL) {
4425         g_free(lurb);
4426         return -TARGET_EFAULT;
4427     }
4428 
4429     /* update buffer pointer in host copy */
4430     lurb->host_urb.buffer = lurb->target_buf_ptr;
4431 
4432     ret = get_errno(safe_ioctl(fd, ie->host_cmd, &lurb->host_urb));
4433     if (is_error(ret)) {
4434         unlock_user(lurb->target_buf_ptr, lurb->target_buf_adr, 0);
4435         g_free(lurb);
4436     } else {
4437         urb_hashtable_insert(lurb);
4438     }
4439 
4440     return ret;
4441 }
4442 #endif /* CONFIG_USBFS */
4443 
4444 static abi_long do_ioctl_dm(const IOCTLEntry *ie, uint8_t *buf_temp, int fd,
4445                             int cmd, abi_long arg)
4446 {
4447     void *argptr;
4448     struct dm_ioctl *host_dm;
4449     abi_long guest_data;
4450     uint32_t guest_data_size;
4451     int target_size;
4452     const argtype *arg_type = ie->arg_type;
4453     abi_long ret;
4454     void *big_buf = NULL;
4455     char *host_data;
4456 
4457     arg_type++;
4458     target_size = thunk_type_size(arg_type, 0);
4459     argptr = lock_user(VERIFY_READ, arg, target_size, 1);
4460     if (!argptr) {
4461         ret = -TARGET_EFAULT;
4462         goto out;
4463     }
4464     thunk_convert(buf_temp, argptr, arg_type, THUNK_HOST);
4465     unlock_user(argptr, arg, 0);
4466 
4467     /* buf_temp is too small, so fetch things into a bigger buffer */
4468     big_buf = g_malloc0(((struct dm_ioctl*)buf_temp)->data_size * 2);
4469     memcpy(big_buf, buf_temp, target_size);
4470     buf_temp = big_buf;
4471     host_dm = big_buf;
4472 
4473     guest_data = arg + host_dm->data_start;
4474     if ((guest_data - arg) < 0) {
4475         ret = -TARGET_EINVAL;
4476         goto out;
4477     }
4478     guest_data_size = host_dm->data_size - host_dm->data_start;
4479     host_data = (char*)host_dm + host_dm->data_start;
4480 
4481     argptr = lock_user(VERIFY_READ, guest_data, guest_data_size, 1);
4482     if (!argptr) {
4483         ret = -TARGET_EFAULT;
4484         goto out;
4485     }
4486 
4487     switch (ie->host_cmd) {
4488     case DM_REMOVE_ALL:
4489     case DM_LIST_DEVICES:
4490     case DM_DEV_CREATE:
4491     case DM_DEV_REMOVE:
4492     case DM_DEV_SUSPEND:
4493     case DM_DEV_STATUS:
4494     case DM_DEV_WAIT:
4495     case DM_TABLE_STATUS:
4496     case DM_TABLE_CLEAR:
4497     case DM_TABLE_DEPS:
4498     case DM_LIST_VERSIONS:
4499         /* no input data */
4500         break;
4501     case DM_DEV_RENAME:
4502     case DM_DEV_SET_GEOMETRY:
4503         /* data contains only strings */
4504         memcpy(host_data, argptr, guest_data_size);
4505         break;
4506     case DM_TARGET_MSG:
4507         memcpy(host_data, argptr, guest_data_size);
4508         *(uint64_t*)host_data = tswap64(*(uint64_t*)argptr);
4509         break;
4510     case DM_TABLE_LOAD:
4511     {
4512         void *gspec = argptr;
4513         void *cur_data = host_data;
4514         const argtype arg_type[] = { MK_STRUCT(STRUCT_dm_target_spec) };
4515         int spec_size = thunk_type_size(arg_type, 0);
4516         int i;
4517 
4518         for (i = 0; i < host_dm->target_count; i++) {
4519             struct dm_target_spec *spec = cur_data;
4520             uint32_t next;
4521             int slen;
4522 
4523             thunk_convert(spec, gspec, arg_type, THUNK_HOST);
4524             slen = strlen((char*)gspec + spec_size) + 1;
4525             next = spec->next;
4526             spec->next = sizeof(*spec) + slen;
4527             strcpy((char*)&spec[1], gspec + spec_size);
4528             gspec += next;
4529             cur_data += spec->next;
4530         }
4531         break;
4532     }
4533     default:
4534         ret = -TARGET_EINVAL;
4535         unlock_user(argptr, guest_data, 0);
4536         goto out;
4537     }
4538     unlock_user(argptr, guest_data, 0);
4539 
4540     ret = get_errno(safe_ioctl(fd, ie->host_cmd, buf_temp));
4541     if (!is_error(ret)) {
4542         guest_data = arg + host_dm->data_start;
4543         guest_data_size = host_dm->data_size - host_dm->data_start;
4544         argptr = lock_user(VERIFY_WRITE, guest_data, guest_data_size, 0);
4545         switch (ie->host_cmd) {
4546         case DM_REMOVE_ALL:
4547         case DM_DEV_CREATE:
4548         case DM_DEV_REMOVE:
4549         case DM_DEV_RENAME:
4550         case DM_DEV_SUSPEND:
4551         case DM_DEV_STATUS:
4552         case DM_TABLE_LOAD:
4553         case DM_TABLE_CLEAR:
4554         case DM_TARGET_MSG:
4555         case DM_DEV_SET_GEOMETRY:
4556             /* no return data */
4557             break;
4558         case DM_LIST_DEVICES:
4559         {
4560             struct dm_name_list *nl = (void*)host_dm + host_dm->data_start;
4561             uint32_t remaining_data = guest_data_size;
4562             void *cur_data = argptr;
4563             const argtype arg_type[] = { MK_STRUCT(STRUCT_dm_name_list) };
4564             int nl_size = 12; /* can't use thunk_size due to alignment */
4565 
4566             while (1) {
4567                 uint32_t next = nl->next;
4568                 if (next) {
4569                     nl->next = nl_size + (strlen(nl->name) + 1);
4570                 }
4571                 if (remaining_data < nl->next) {
4572                     host_dm->flags |= DM_BUFFER_FULL_FLAG;
4573                     break;
4574                 }
4575                 thunk_convert(cur_data, nl, arg_type, THUNK_TARGET);
4576                 strcpy(cur_data + nl_size, nl->name);
4577                 cur_data += nl->next;
4578                 remaining_data -= nl->next;
4579                 if (!next) {
4580                     break;
4581                 }
4582                 nl = (void*)nl + next;
4583             }
4584             break;
4585         }
4586         case DM_DEV_WAIT:
4587         case DM_TABLE_STATUS:
4588         {
4589             struct dm_target_spec *spec = (void*)host_dm + host_dm->data_start;
4590             void *cur_data = argptr;
4591             const argtype arg_type[] = { MK_STRUCT(STRUCT_dm_target_spec) };
4592             int spec_size = thunk_type_size(arg_type, 0);
4593             int i;
4594 
4595             for (i = 0; i < host_dm->target_count; i++) {
4596                 uint32_t next = spec->next;
4597                 int slen = strlen((char*)&spec[1]) + 1;
4598                 spec->next = (cur_data - argptr) + spec_size + slen;
4599                 if (guest_data_size < spec->next) {
4600                     host_dm->flags |= DM_BUFFER_FULL_FLAG;
4601                     break;
4602                 }
4603                 thunk_convert(cur_data, spec, arg_type, THUNK_TARGET);
4604                 strcpy(cur_data + spec_size, (char*)&spec[1]);
4605                 cur_data = argptr + spec->next;
4606                 spec = (void*)host_dm + host_dm->data_start + next;
4607             }
4608             break;
4609         }
4610         case DM_TABLE_DEPS:
4611         {
4612             void *hdata = (void*)host_dm + host_dm->data_start;
4613             int count = *(uint32_t*)hdata;
4614             uint64_t *hdev = hdata + 8;
4615             uint64_t *gdev = argptr + 8;
4616             int i;
4617 
4618             *(uint32_t*)argptr = tswap32(count);
4619             for (i = 0; i < count; i++) {
4620                 *gdev = tswap64(*hdev);
4621                 gdev++;
4622                 hdev++;
4623             }
4624             break;
4625         }
4626         case DM_LIST_VERSIONS:
4627         {
4628             struct dm_target_versions *vers = (void*)host_dm + host_dm->data_start;
4629             uint32_t remaining_data = guest_data_size;
4630             void *cur_data = argptr;
4631             const argtype arg_type[] = { MK_STRUCT(STRUCT_dm_target_versions) };
4632             int vers_size = thunk_type_size(arg_type, 0);
4633 
4634             while (1) {
4635                 uint32_t next = vers->next;
4636                 if (next) {
4637                     vers->next = vers_size + (strlen(vers->name) + 1);
4638                 }
4639                 if (remaining_data < vers->next) {
4640                     host_dm->flags |= DM_BUFFER_FULL_FLAG;
4641                     break;
4642                 }
4643                 thunk_convert(cur_data, vers, arg_type, THUNK_TARGET);
4644                 strcpy(cur_data + vers_size, vers->name);
4645                 cur_data += vers->next;
4646                 remaining_data -= vers->next;
4647                 if (!next) {
4648                     break;
4649                 }
4650                 vers = (void*)vers + next;
4651             }
4652             break;
4653         }
4654         default:
4655             unlock_user(argptr, guest_data, 0);
4656             ret = -TARGET_EINVAL;
4657             goto out;
4658         }
4659         unlock_user(argptr, guest_data, guest_data_size);
4660 
4661         argptr = lock_user(VERIFY_WRITE, arg, target_size, 0);
4662         if (!argptr) {
4663             ret = -TARGET_EFAULT;
4664             goto out;
4665         }
4666         thunk_convert(argptr, buf_temp, arg_type, THUNK_TARGET);
4667         unlock_user(argptr, arg, target_size);
4668     }
4669 out:
4670     g_free(big_buf);
4671     return ret;
4672 }
4673 
4674 static abi_long do_ioctl_blkpg(const IOCTLEntry *ie, uint8_t *buf_temp, int fd,
4675                                int cmd, abi_long arg)
4676 {
4677     void *argptr;
4678     int target_size;
4679     const argtype *arg_type = ie->arg_type;
4680     const argtype part_arg_type[] = { MK_STRUCT(STRUCT_blkpg_partition) };
4681     abi_long ret;
4682 
4683     struct blkpg_ioctl_arg *host_blkpg = (void*)buf_temp;
4684     struct blkpg_partition host_part;
4685 
4686     /* Read and convert blkpg */
4687     arg_type++;
4688     target_size = thunk_type_size(arg_type, 0);
4689     argptr = lock_user(VERIFY_READ, arg, target_size, 1);
4690     if (!argptr) {
4691         ret = -TARGET_EFAULT;
4692         goto out;
4693     }
4694     thunk_convert(buf_temp, argptr, arg_type, THUNK_HOST);
4695     unlock_user(argptr, arg, 0);
4696 
4697     switch (host_blkpg->op) {
4698     case BLKPG_ADD_PARTITION:
4699     case BLKPG_DEL_PARTITION:
4700         /* payload is struct blkpg_partition */
4701         break;
4702     default:
4703         /* Unknown opcode */
4704         ret = -TARGET_EINVAL;
4705         goto out;
4706     }
4707 
4708     /* Read and convert blkpg->data */
4709     arg = (abi_long)(uintptr_t)host_blkpg->data;
4710     target_size = thunk_type_size(part_arg_type, 0);
4711     argptr = lock_user(VERIFY_READ, arg, target_size, 1);
4712     if (!argptr) {
4713         ret = -TARGET_EFAULT;
4714         goto out;
4715     }
4716     thunk_convert(&host_part, argptr, part_arg_type, THUNK_HOST);
4717     unlock_user(argptr, arg, 0);
4718 
4719     /* Swizzle the data pointer to our local copy and call! */
4720     host_blkpg->data = &host_part;
4721     ret = get_errno(safe_ioctl(fd, ie->host_cmd, host_blkpg));
4722 
4723 out:
4724     return ret;
4725 }
4726 
4727 static abi_long do_ioctl_rt(const IOCTLEntry *ie, uint8_t *buf_temp,
4728                                 int fd, int cmd, abi_long arg)
4729 {
4730     const argtype *arg_type = ie->arg_type;
4731     const StructEntry *se;
4732     const argtype *field_types;
4733     const int *dst_offsets, *src_offsets;
4734     int target_size;
4735     void *argptr;
4736     abi_ulong *target_rt_dev_ptr = NULL;
4737     unsigned long *host_rt_dev_ptr = NULL;
4738     abi_long ret;
4739     int i;
4740 
4741     assert(ie->access == IOC_W);
4742     assert(*arg_type == TYPE_PTR);
4743     arg_type++;
4744     assert(*arg_type == TYPE_STRUCT);
4745     target_size = thunk_type_size(arg_type, 0);
4746     argptr = lock_user(VERIFY_READ, arg, target_size, 1);
4747     if (!argptr) {
4748         return -TARGET_EFAULT;
4749     }
4750     arg_type++;
4751     assert(*arg_type == (int)STRUCT_rtentry);
4752     se = struct_entries + *arg_type++;
4753     assert(se->convert[0] == NULL);
4754     /* convert struct here to be able to catch rt_dev string */
4755     field_types = se->field_types;
4756     dst_offsets = se->field_offsets[THUNK_HOST];
4757     src_offsets = se->field_offsets[THUNK_TARGET];
4758     for (i = 0; i < se->nb_fields; i++) {
4759         if (dst_offsets[i] == offsetof(struct rtentry, rt_dev)) {
4760             assert(*field_types == TYPE_PTRVOID);
4761             target_rt_dev_ptr = (abi_ulong *)(argptr + src_offsets[i]);
4762             host_rt_dev_ptr = (unsigned long *)(buf_temp + dst_offsets[i]);
4763             if (*target_rt_dev_ptr != 0) {
4764                 *host_rt_dev_ptr = (unsigned long)lock_user_string(
4765                                                   tswapal(*target_rt_dev_ptr));
4766                 if (!*host_rt_dev_ptr) {
4767                     unlock_user(argptr, arg, 0);
4768                     return -TARGET_EFAULT;
4769                 }
4770             } else {
4771                 *host_rt_dev_ptr = 0;
4772             }
4773             field_types++;
4774             continue;
4775         }
4776         field_types = thunk_convert(buf_temp + dst_offsets[i],
4777                                     argptr + src_offsets[i],
4778                                     field_types, THUNK_HOST);
4779     }
4780     unlock_user(argptr, arg, 0);
4781 
4782     ret = get_errno(safe_ioctl(fd, ie->host_cmd, buf_temp));
4783 
4784     assert(host_rt_dev_ptr != NULL);
4785     assert(target_rt_dev_ptr != NULL);
4786     if (*host_rt_dev_ptr != 0) {
4787         unlock_user((void *)*host_rt_dev_ptr,
4788                     *target_rt_dev_ptr, 0);
4789     }
4790     return ret;
4791 }
4792 
4793 static abi_long do_ioctl_kdsigaccept(const IOCTLEntry *ie, uint8_t *buf_temp,
4794                                      int fd, int cmd, abi_long arg)
4795 {
4796     int sig = target_to_host_signal(arg);
4797     return get_errno(safe_ioctl(fd, ie->host_cmd, sig));
4798 }
4799 
4800 #ifdef TIOCGPTPEER
4801 static abi_long do_ioctl_tiocgptpeer(const IOCTLEntry *ie, uint8_t *buf_temp,
4802                                      int fd, int cmd, abi_long arg)
4803 {
4804     int flags = target_to_host_bitmask(arg, fcntl_flags_tbl);
4805     return get_errno(safe_ioctl(fd, ie->host_cmd, flags));
4806 }
4807 #endif
4808 
4809 static IOCTLEntry ioctl_entries[] = {
4810 #define IOCTL(cmd, access, ...) \
4811     { TARGET_ ## cmd, cmd, #cmd, access, 0, {  __VA_ARGS__ } },
4812 #define IOCTL_SPECIAL(cmd, access, dofn, ...)                      \
4813     { TARGET_ ## cmd, cmd, #cmd, access, dofn, {  __VA_ARGS__ } },
4814 #define IOCTL_IGNORE(cmd) \
4815     { TARGET_ ## cmd, 0, #cmd },
4816 #include "ioctls.h"
4817     { 0, 0, },
4818 };
4819 
4820 /* ??? Implement proper locking for ioctls.  */
4821 /* do_ioctl() Must return target values and target errnos. */
4822 static abi_long do_ioctl(int fd, int cmd, abi_long arg)
4823 {
4824     const IOCTLEntry *ie;
4825     const argtype *arg_type;
4826     abi_long ret;
4827     uint8_t buf_temp[MAX_STRUCT_SIZE];
4828     int target_size;
4829     void *argptr;
4830 
4831     ie = ioctl_entries;
4832     for(;;) {
4833         if (ie->target_cmd == 0) {
4834             gemu_log("Unsupported ioctl: cmd=0x%04lx\n", (long)cmd);
4835             return -TARGET_ENOSYS;
4836         }
4837         if (ie->target_cmd == cmd)
4838             break;
4839         ie++;
4840     }
4841     arg_type = ie->arg_type;
4842     if (ie->do_ioctl) {
4843         return ie->do_ioctl(ie, buf_temp, fd, cmd, arg);
4844     } else if (!ie->host_cmd) {
4845         /* Some architectures define BSD ioctls in their headers
4846            that are not implemented in Linux.  */
4847         return -TARGET_ENOSYS;
4848     }
4849 
4850     switch(arg_type[0]) {
4851     case TYPE_NULL:
4852         /* no argument */
4853         ret = get_errno(safe_ioctl(fd, ie->host_cmd));
4854         break;
4855     case TYPE_PTRVOID:
4856     case TYPE_INT:
4857         ret = get_errno(safe_ioctl(fd, ie->host_cmd, arg));
4858         break;
4859     case TYPE_PTR:
4860         arg_type++;
4861         target_size = thunk_type_size(arg_type, 0);
4862         switch(ie->access) {
4863         case IOC_R:
4864             ret = get_errno(safe_ioctl(fd, ie->host_cmd, buf_temp));
4865             if (!is_error(ret)) {
4866                 argptr = lock_user(VERIFY_WRITE, arg, target_size, 0);
4867                 if (!argptr)
4868                     return -TARGET_EFAULT;
4869                 thunk_convert(argptr, buf_temp, arg_type, THUNK_TARGET);
4870                 unlock_user(argptr, arg, target_size);
4871             }
4872             break;
4873         case IOC_W:
4874             argptr = lock_user(VERIFY_READ, arg, target_size, 1);
4875             if (!argptr)
4876                 return -TARGET_EFAULT;
4877             thunk_convert(buf_temp, argptr, arg_type, THUNK_HOST);
4878             unlock_user(argptr, arg, 0);
4879             ret = get_errno(safe_ioctl(fd, ie->host_cmd, buf_temp));
4880             break;
4881         default:
4882         case IOC_RW:
4883             argptr = lock_user(VERIFY_READ, arg, target_size, 1);
4884             if (!argptr)
4885                 return -TARGET_EFAULT;
4886             thunk_convert(buf_temp, argptr, arg_type, THUNK_HOST);
4887             unlock_user(argptr, arg, 0);
4888             ret = get_errno(safe_ioctl(fd, ie->host_cmd, buf_temp));
4889             if (!is_error(ret)) {
4890                 argptr = lock_user(VERIFY_WRITE, arg, target_size, 0);
4891                 if (!argptr)
4892                     return -TARGET_EFAULT;
4893                 thunk_convert(argptr, buf_temp, arg_type, THUNK_TARGET);
4894                 unlock_user(argptr, arg, target_size);
4895             }
4896             break;
4897         }
4898         break;
4899     default:
4900         gemu_log("Unsupported ioctl type: cmd=0x%04lx type=%d\n",
4901                  (long)cmd, arg_type[0]);
4902         ret = -TARGET_ENOSYS;
4903         break;
4904     }
4905     return ret;
4906 }
4907 
4908 static const bitmask_transtbl iflag_tbl[] = {
4909         { TARGET_IGNBRK, TARGET_IGNBRK, IGNBRK, IGNBRK },
4910         { TARGET_BRKINT, TARGET_BRKINT, BRKINT, BRKINT },
4911         { TARGET_IGNPAR, TARGET_IGNPAR, IGNPAR, IGNPAR },
4912         { TARGET_PARMRK, TARGET_PARMRK, PARMRK, PARMRK },
4913         { TARGET_INPCK, TARGET_INPCK, INPCK, INPCK },
4914         { TARGET_ISTRIP, TARGET_ISTRIP, ISTRIP, ISTRIP },
4915         { TARGET_INLCR, TARGET_INLCR, INLCR, INLCR },
4916         { TARGET_IGNCR, TARGET_IGNCR, IGNCR, IGNCR },
4917         { TARGET_ICRNL, TARGET_ICRNL, ICRNL, ICRNL },
4918         { TARGET_IUCLC, TARGET_IUCLC, IUCLC, IUCLC },
4919         { TARGET_IXON, TARGET_IXON, IXON, IXON },
4920         { TARGET_IXANY, TARGET_IXANY, IXANY, IXANY },
4921         { TARGET_IXOFF, TARGET_IXOFF, IXOFF, IXOFF },
4922         { TARGET_IMAXBEL, TARGET_IMAXBEL, IMAXBEL, IMAXBEL },
4923         { 0, 0, 0, 0 }
4924 };
4925 
4926 static const bitmask_transtbl oflag_tbl[] = {
4927 	{ TARGET_OPOST, TARGET_OPOST, OPOST, OPOST },
4928 	{ TARGET_OLCUC, TARGET_OLCUC, OLCUC, OLCUC },
4929 	{ TARGET_ONLCR, TARGET_ONLCR, ONLCR, ONLCR },
4930 	{ TARGET_OCRNL, TARGET_OCRNL, OCRNL, OCRNL },
4931 	{ TARGET_ONOCR, TARGET_ONOCR, ONOCR, ONOCR },
4932 	{ TARGET_ONLRET, TARGET_ONLRET, ONLRET, ONLRET },
4933 	{ TARGET_OFILL, TARGET_OFILL, OFILL, OFILL },
4934 	{ TARGET_OFDEL, TARGET_OFDEL, OFDEL, OFDEL },
4935 	{ TARGET_NLDLY, TARGET_NL0, NLDLY, NL0 },
4936 	{ TARGET_NLDLY, TARGET_NL1, NLDLY, NL1 },
4937 	{ TARGET_CRDLY, TARGET_CR0, CRDLY, CR0 },
4938 	{ TARGET_CRDLY, TARGET_CR1, CRDLY, CR1 },
4939 	{ TARGET_CRDLY, TARGET_CR2, CRDLY, CR2 },
4940 	{ TARGET_CRDLY, TARGET_CR3, CRDLY, CR3 },
4941 	{ TARGET_TABDLY, TARGET_TAB0, TABDLY, TAB0 },
4942 	{ TARGET_TABDLY, TARGET_TAB1, TABDLY, TAB1 },
4943 	{ TARGET_TABDLY, TARGET_TAB2, TABDLY, TAB2 },
4944 	{ TARGET_TABDLY, TARGET_TAB3, TABDLY, TAB3 },
4945 	{ TARGET_BSDLY, TARGET_BS0, BSDLY, BS0 },
4946 	{ TARGET_BSDLY, TARGET_BS1, BSDLY, BS1 },
4947 	{ TARGET_VTDLY, TARGET_VT0, VTDLY, VT0 },
4948 	{ TARGET_VTDLY, TARGET_VT1, VTDLY, VT1 },
4949 	{ TARGET_FFDLY, TARGET_FF0, FFDLY, FF0 },
4950 	{ TARGET_FFDLY, TARGET_FF1, FFDLY, FF1 },
4951 	{ 0, 0, 0, 0 }
4952 };
4953 
4954 static const bitmask_transtbl cflag_tbl[] = {
4955 	{ TARGET_CBAUD, TARGET_B0, CBAUD, B0 },
4956 	{ TARGET_CBAUD, TARGET_B50, CBAUD, B50 },
4957 	{ TARGET_CBAUD, TARGET_B75, CBAUD, B75 },
4958 	{ TARGET_CBAUD, TARGET_B110, CBAUD, B110 },
4959 	{ TARGET_CBAUD, TARGET_B134, CBAUD, B134 },
4960 	{ TARGET_CBAUD, TARGET_B150, CBAUD, B150 },
4961 	{ TARGET_CBAUD, TARGET_B200, CBAUD, B200 },
4962 	{ TARGET_CBAUD, TARGET_B300, CBAUD, B300 },
4963 	{ TARGET_CBAUD, TARGET_B600, CBAUD, B600 },
4964 	{ TARGET_CBAUD, TARGET_B1200, CBAUD, B1200 },
4965 	{ TARGET_CBAUD, TARGET_B1800, CBAUD, B1800 },
4966 	{ TARGET_CBAUD, TARGET_B2400, CBAUD, B2400 },
4967 	{ TARGET_CBAUD, TARGET_B4800, CBAUD, B4800 },
4968 	{ TARGET_CBAUD, TARGET_B9600, CBAUD, B9600 },
4969 	{ TARGET_CBAUD, TARGET_B19200, CBAUD, B19200 },
4970 	{ TARGET_CBAUD, TARGET_B38400, CBAUD, B38400 },
4971 	{ TARGET_CBAUD, TARGET_B57600, CBAUD, B57600 },
4972 	{ TARGET_CBAUD, TARGET_B115200, CBAUD, B115200 },
4973 	{ TARGET_CBAUD, TARGET_B230400, CBAUD, B230400 },
4974 	{ TARGET_CBAUD, TARGET_B460800, CBAUD, B460800 },
4975 	{ TARGET_CSIZE, TARGET_CS5, CSIZE, CS5 },
4976 	{ TARGET_CSIZE, TARGET_CS6, CSIZE, CS6 },
4977 	{ TARGET_CSIZE, TARGET_CS7, CSIZE, CS7 },
4978 	{ TARGET_CSIZE, TARGET_CS8, CSIZE, CS8 },
4979 	{ TARGET_CSTOPB, TARGET_CSTOPB, CSTOPB, CSTOPB },
4980 	{ TARGET_CREAD, TARGET_CREAD, CREAD, CREAD },
4981 	{ TARGET_PARENB, TARGET_PARENB, PARENB, PARENB },
4982 	{ TARGET_PARODD, TARGET_PARODD, PARODD, PARODD },
4983 	{ TARGET_HUPCL, TARGET_HUPCL, HUPCL, HUPCL },
4984 	{ TARGET_CLOCAL, TARGET_CLOCAL, CLOCAL, CLOCAL },
4985 	{ TARGET_CRTSCTS, TARGET_CRTSCTS, CRTSCTS, CRTSCTS },
4986 	{ 0, 0, 0, 0 }
4987 };
4988 
4989 static const bitmask_transtbl lflag_tbl[] = {
4990 	{ TARGET_ISIG, TARGET_ISIG, ISIG, ISIG },
4991 	{ TARGET_ICANON, TARGET_ICANON, ICANON, ICANON },
4992 	{ TARGET_XCASE, TARGET_XCASE, XCASE, XCASE },
4993 	{ TARGET_ECHO, TARGET_ECHO, ECHO, ECHO },
4994 	{ TARGET_ECHOE, TARGET_ECHOE, ECHOE, ECHOE },
4995 	{ TARGET_ECHOK, TARGET_ECHOK, ECHOK, ECHOK },
4996 	{ TARGET_ECHONL, TARGET_ECHONL, ECHONL, ECHONL },
4997 	{ TARGET_NOFLSH, TARGET_NOFLSH, NOFLSH, NOFLSH },
4998 	{ TARGET_TOSTOP, TARGET_TOSTOP, TOSTOP, TOSTOP },
4999 	{ TARGET_ECHOCTL, TARGET_ECHOCTL, ECHOCTL, ECHOCTL },
5000 	{ TARGET_ECHOPRT, TARGET_ECHOPRT, ECHOPRT, ECHOPRT },
5001 	{ TARGET_ECHOKE, TARGET_ECHOKE, ECHOKE, ECHOKE },
5002 	{ TARGET_FLUSHO, TARGET_FLUSHO, FLUSHO, FLUSHO },
5003 	{ TARGET_PENDIN, TARGET_PENDIN, PENDIN, PENDIN },
5004 	{ TARGET_IEXTEN, TARGET_IEXTEN, IEXTEN, IEXTEN },
5005 	{ 0, 0, 0, 0 }
5006 };
5007 
5008 static void target_to_host_termios (void *dst, const void *src)
5009 {
5010     struct host_termios *host = dst;
5011     const struct target_termios *target = src;
5012 
5013     host->c_iflag =
5014         target_to_host_bitmask(tswap32(target->c_iflag), iflag_tbl);
5015     host->c_oflag =
5016         target_to_host_bitmask(tswap32(target->c_oflag), oflag_tbl);
5017     host->c_cflag =
5018         target_to_host_bitmask(tswap32(target->c_cflag), cflag_tbl);
5019     host->c_lflag =
5020         target_to_host_bitmask(tswap32(target->c_lflag), lflag_tbl);
5021     host->c_line = target->c_line;
5022 
5023     memset(host->c_cc, 0, sizeof(host->c_cc));
5024     host->c_cc[VINTR] = target->c_cc[TARGET_VINTR];
5025     host->c_cc[VQUIT] = target->c_cc[TARGET_VQUIT];
5026     host->c_cc[VERASE] = target->c_cc[TARGET_VERASE];
5027     host->c_cc[VKILL] = target->c_cc[TARGET_VKILL];
5028     host->c_cc[VEOF] = target->c_cc[TARGET_VEOF];
5029     host->c_cc[VTIME] = target->c_cc[TARGET_VTIME];
5030     host->c_cc[VMIN] = target->c_cc[TARGET_VMIN];
5031     host->c_cc[VSWTC] = target->c_cc[TARGET_VSWTC];
5032     host->c_cc[VSTART] = target->c_cc[TARGET_VSTART];
5033     host->c_cc[VSTOP] = target->c_cc[TARGET_VSTOP];
5034     host->c_cc[VSUSP] = target->c_cc[TARGET_VSUSP];
5035     host->c_cc[VEOL] = target->c_cc[TARGET_VEOL];
5036     host->c_cc[VREPRINT] = target->c_cc[TARGET_VREPRINT];
5037     host->c_cc[VDISCARD] = target->c_cc[TARGET_VDISCARD];
5038     host->c_cc[VWERASE] = target->c_cc[TARGET_VWERASE];
5039     host->c_cc[VLNEXT] = target->c_cc[TARGET_VLNEXT];
5040     host->c_cc[VEOL2] = target->c_cc[TARGET_VEOL2];
5041 }
5042 
5043 static void host_to_target_termios (void *dst, const void *src)
5044 {
5045     struct target_termios *target = dst;
5046     const struct host_termios *host = src;
5047 
5048     target->c_iflag =
5049         tswap32(host_to_target_bitmask(host->c_iflag, iflag_tbl));
5050     target->c_oflag =
5051         tswap32(host_to_target_bitmask(host->c_oflag, oflag_tbl));
5052     target->c_cflag =
5053         tswap32(host_to_target_bitmask(host->c_cflag, cflag_tbl));
5054     target->c_lflag =
5055         tswap32(host_to_target_bitmask(host->c_lflag, lflag_tbl));
5056     target->c_line = host->c_line;
5057 
5058     memset(target->c_cc, 0, sizeof(target->c_cc));
5059     target->c_cc[TARGET_VINTR] = host->c_cc[VINTR];
5060     target->c_cc[TARGET_VQUIT] = host->c_cc[VQUIT];
5061     target->c_cc[TARGET_VERASE] = host->c_cc[VERASE];
5062     target->c_cc[TARGET_VKILL] = host->c_cc[VKILL];
5063     target->c_cc[TARGET_VEOF] = host->c_cc[VEOF];
5064     target->c_cc[TARGET_VTIME] = host->c_cc[VTIME];
5065     target->c_cc[TARGET_VMIN] = host->c_cc[VMIN];
5066     target->c_cc[TARGET_VSWTC] = host->c_cc[VSWTC];
5067     target->c_cc[TARGET_VSTART] = host->c_cc[VSTART];
5068     target->c_cc[TARGET_VSTOP] = host->c_cc[VSTOP];
5069     target->c_cc[TARGET_VSUSP] = host->c_cc[VSUSP];
5070     target->c_cc[TARGET_VEOL] = host->c_cc[VEOL];
5071     target->c_cc[TARGET_VREPRINT] = host->c_cc[VREPRINT];
5072     target->c_cc[TARGET_VDISCARD] = host->c_cc[VDISCARD];
5073     target->c_cc[TARGET_VWERASE] = host->c_cc[VWERASE];
5074     target->c_cc[TARGET_VLNEXT] = host->c_cc[VLNEXT];
5075     target->c_cc[TARGET_VEOL2] = host->c_cc[VEOL2];
5076 }
5077 
5078 static const StructEntry struct_termios_def = {
5079     .convert = { host_to_target_termios, target_to_host_termios },
5080     .size = { sizeof(struct target_termios), sizeof(struct host_termios) },
5081     .align = { __alignof__(struct target_termios), __alignof__(struct host_termios) },
5082 };
5083 
5084 static bitmask_transtbl mmap_flags_tbl[] = {
5085     { TARGET_MAP_SHARED, TARGET_MAP_SHARED, MAP_SHARED, MAP_SHARED },
5086     { TARGET_MAP_PRIVATE, TARGET_MAP_PRIVATE, MAP_PRIVATE, MAP_PRIVATE },
5087     { TARGET_MAP_FIXED, TARGET_MAP_FIXED, MAP_FIXED, MAP_FIXED },
5088     { TARGET_MAP_ANONYMOUS, TARGET_MAP_ANONYMOUS,
5089       MAP_ANONYMOUS, MAP_ANONYMOUS },
5090     { TARGET_MAP_GROWSDOWN, TARGET_MAP_GROWSDOWN,
5091       MAP_GROWSDOWN, MAP_GROWSDOWN },
5092     { TARGET_MAP_DENYWRITE, TARGET_MAP_DENYWRITE,
5093       MAP_DENYWRITE, MAP_DENYWRITE },
5094     { TARGET_MAP_EXECUTABLE, TARGET_MAP_EXECUTABLE,
5095       MAP_EXECUTABLE, MAP_EXECUTABLE },
5096     { TARGET_MAP_LOCKED, TARGET_MAP_LOCKED, MAP_LOCKED, MAP_LOCKED },
5097     { TARGET_MAP_NORESERVE, TARGET_MAP_NORESERVE,
5098       MAP_NORESERVE, MAP_NORESERVE },
5099     { TARGET_MAP_HUGETLB, TARGET_MAP_HUGETLB, MAP_HUGETLB, MAP_HUGETLB },
5100     /* MAP_STACK had been ignored by the kernel for quite some time.
5101        Recognize it for the target insofar as we do not want to pass
5102        it through to the host.  */
5103     { TARGET_MAP_STACK, TARGET_MAP_STACK, 0, 0 },
5104     { 0, 0, 0, 0 }
5105 };
5106 
5107 #if defined(TARGET_I386)
5108 
5109 /* NOTE: there is really one LDT for all the threads */
5110 static uint8_t *ldt_table;
5111 
5112 static abi_long read_ldt(abi_ulong ptr, unsigned long bytecount)
5113 {
5114     int size;
5115     void *p;
5116 
5117     if (!ldt_table)
5118         return 0;
5119     size = TARGET_LDT_ENTRIES * TARGET_LDT_ENTRY_SIZE;
5120     if (size > bytecount)
5121         size = bytecount;
5122     p = lock_user(VERIFY_WRITE, ptr, size, 0);
5123     if (!p)
5124         return -TARGET_EFAULT;
5125     /* ??? Should this by byteswapped?  */
5126     memcpy(p, ldt_table, size);
5127     unlock_user(p, ptr, size);
5128     return size;
5129 }
5130 
5131 /* XXX: add locking support */
5132 static abi_long write_ldt(CPUX86State *env,
5133                           abi_ulong ptr, unsigned long bytecount, int oldmode)
5134 {
5135     struct target_modify_ldt_ldt_s ldt_info;
5136     struct target_modify_ldt_ldt_s *target_ldt_info;
5137     int seg_32bit, contents, read_exec_only, limit_in_pages;
5138     int seg_not_present, useable, lm;
5139     uint32_t *lp, entry_1, entry_2;
5140 
5141     if (bytecount != sizeof(ldt_info))
5142         return -TARGET_EINVAL;
5143     if (!lock_user_struct(VERIFY_READ, target_ldt_info, ptr, 1))
5144         return -TARGET_EFAULT;
5145     ldt_info.entry_number = tswap32(target_ldt_info->entry_number);
5146     ldt_info.base_addr = tswapal(target_ldt_info->base_addr);
5147     ldt_info.limit = tswap32(target_ldt_info->limit);
5148     ldt_info.flags = tswap32(target_ldt_info->flags);
5149     unlock_user_struct(target_ldt_info, ptr, 0);
5150 
5151     if (ldt_info.entry_number >= TARGET_LDT_ENTRIES)
5152         return -TARGET_EINVAL;
5153     seg_32bit = ldt_info.flags & 1;
5154     contents = (ldt_info.flags >> 1) & 3;
5155     read_exec_only = (ldt_info.flags >> 3) & 1;
5156     limit_in_pages = (ldt_info.flags >> 4) & 1;
5157     seg_not_present = (ldt_info.flags >> 5) & 1;
5158     useable = (ldt_info.flags >> 6) & 1;
5159 #ifdef TARGET_ABI32
5160     lm = 0;
5161 #else
5162     lm = (ldt_info.flags >> 7) & 1;
5163 #endif
5164     if (contents == 3) {
5165         if (oldmode)
5166             return -TARGET_EINVAL;
5167         if (seg_not_present == 0)
5168             return -TARGET_EINVAL;
5169     }
5170     /* allocate the LDT */
5171     if (!ldt_table) {
5172         env->ldt.base = target_mmap(0,
5173                                     TARGET_LDT_ENTRIES * TARGET_LDT_ENTRY_SIZE,
5174                                     PROT_READ|PROT_WRITE,
5175                                     MAP_ANONYMOUS|MAP_PRIVATE, -1, 0);
5176         if (env->ldt.base == -1)
5177             return -TARGET_ENOMEM;
5178         memset(g2h(env->ldt.base), 0,
5179                TARGET_LDT_ENTRIES * TARGET_LDT_ENTRY_SIZE);
5180         env->ldt.limit = 0xffff;
5181         ldt_table = g2h(env->ldt.base);
5182     }
5183 
5184     /* NOTE: same code as Linux kernel */
5185     /* Allow LDTs to be cleared by the user. */
5186     if (ldt_info.base_addr == 0 && ldt_info.limit == 0) {
5187         if (oldmode ||
5188             (contents == 0		&&
5189              read_exec_only == 1	&&
5190              seg_32bit == 0		&&
5191              limit_in_pages == 0	&&
5192              seg_not_present == 1	&&
5193              useable == 0 )) {
5194             entry_1 = 0;
5195             entry_2 = 0;
5196             goto install;
5197         }
5198     }
5199 
5200     entry_1 = ((ldt_info.base_addr & 0x0000ffff) << 16) |
5201         (ldt_info.limit & 0x0ffff);
5202     entry_2 = (ldt_info.base_addr & 0xff000000) |
5203         ((ldt_info.base_addr & 0x00ff0000) >> 16) |
5204         (ldt_info.limit & 0xf0000) |
5205         ((read_exec_only ^ 1) << 9) |
5206         (contents << 10) |
5207         ((seg_not_present ^ 1) << 15) |
5208         (seg_32bit << 22) |
5209         (limit_in_pages << 23) |
5210         (lm << 21) |
5211         0x7000;
5212     if (!oldmode)
5213         entry_2 |= (useable << 20);
5214 
5215     /* Install the new entry ...  */
5216 install:
5217     lp = (uint32_t *)(ldt_table + (ldt_info.entry_number << 3));
5218     lp[0] = tswap32(entry_1);
5219     lp[1] = tswap32(entry_2);
5220     return 0;
5221 }
5222 
5223 /* specific and weird i386 syscalls */
5224 static abi_long do_modify_ldt(CPUX86State *env, int func, abi_ulong ptr,
5225                               unsigned long bytecount)
5226 {
5227     abi_long ret;
5228 
5229     switch (func) {
5230     case 0:
5231         ret = read_ldt(ptr, bytecount);
5232         break;
5233     case 1:
5234         ret = write_ldt(env, ptr, bytecount, 1);
5235         break;
5236     case 0x11:
5237         ret = write_ldt(env, ptr, bytecount, 0);
5238         break;
5239     default:
5240         ret = -TARGET_ENOSYS;
5241         break;
5242     }
5243     return ret;
5244 }
5245 
5246 #if defined(TARGET_I386) && defined(TARGET_ABI32)
5247 abi_long do_set_thread_area(CPUX86State *env, abi_ulong ptr)
5248 {
5249     uint64_t *gdt_table = g2h(env->gdt.base);
5250     struct target_modify_ldt_ldt_s ldt_info;
5251     struct target_modify_ldt_ldt_s *target_ldt_info;
5252     int seg_32bit, contents, read_exec_only, limit_in_pages;
5253     int seg_not_present, useable, lm;
5254     uint32_t *lp, entry_1, entry_2;
5255     int i;
5256 
5257     lock_user_struct(VERIFY_WRITE, target_ldt_info, ptr, 1);
5258     if (!target_ldt_info)
5259         return -TARGET_EFAULT;
5260     ldt_info.entry_number = tswap32(target_ldt_info->entry_number);
5261     ldt_info.base_addr = tswapal(target_ldt_info->base_addr);
5262     ldt_info.limit = tswap32(target_ldt_info->limit);
5263     ldt_info.flags = tswap32(target_ldt_info->flags);
5264     if (ldt_info.entry_number == -1) {
5265         for (i=TARGET_GDT_ENTRY_TLS_MIN; i<=TARGET_GDT_ENTRY_TLS_MAX; i++) {
5266             if (gdt_table[i] == 0) {
5267                 ldt_info.entry_number = i;
5268                 target_ldt_info->entry_number = tswap32(i);
5269                 break;
5270             }
5271         }
5272     }
5273     unlock_user_struct(target_ldt_info, ptr, 1);
5274 
5275     if (ldt_info.entry_number < TARGET_GDT_ENTRY_TLS_MIN ||
5276         ldt_info.entry_number > TARGET_GDT_ENTRY_TLS_MAX)
5277            return -TARGET_EINVAL;
5278     seg_32bit = ldt_info.flags & 1;
5279     contents = (ldt_info.flags >> 1) & 3;
5280     read_exec_only = (ldt_info.flags >> 3) & 1;
5281     limit_in_pages = (ldt_info.flags >> 4) & 1;
5282     seg_not_present = (ldt_info.flags >> 5) & 1;
5283     useable = (ldt_info.flags >> 6) & 1;
5284 #ifdef TARGET_ABI32
5285     lm = 0;
5286 #else
5287     lm = (ldt_info.flags >> 7) & 1;
5288 #endif
5289 
5290     if (contents == 3) {
5291         if (seg_not_present == 0)
5292             return -TARGET_EINVAL;
5293     }
5294 
5295     /* NOTE: same code as Linux kernel */
5296     /* Allow LDTs to be cleared by the user. */
5297     if (ldt_info.base_addr == 0 && ldt_info.limit == 0) {
5298         if ((contents == 0             &&
5299              read_exec_only == 1       &&
5300              seg_32bit == 0            &&
5301              limit_in_pages == 0       &&
5302              seg_not_present == 1      &&
5303              useable == 0 )) {
5304             entry_1 = 0;
5305             entry_2 = 0;
5306             goto install;
5307         }
5308     }
5309 
5310     entry_1 = ((ldt_info.base_addr & 0x0000ffff) << 16) |
5311         (ldt_info.limit & 0x0ffff);
5312     entry_2 = (ldt_info.base_addr & 0xff000000) |
5313         ((ldt_info.base_addr & 0x00ff0000) >> 16) |
5314         (ldt_info.limit & 0xf0000) |
5315         ((read_exec_only ^ 1) << 9) |
5316         (contents << 10) |
5317         ((seg_not_present ^ 1) << 15) |
5318         (seg_32bit << 22) |
5319         (limit_in_pages << 23) |
5320         (useable << 20) |
5321         (lm << 21) |
5322         0x7000;
5323 
5324     /* Install the new entry ...  */
5325 install:
5326     lp = (uint32_t *)(gdt_table + ldt_info.entry_number);
5327     lp[0] = tswap32(entry_1);
5328     lp[1] = tswap32(entry_2);
5329     return 0;
5330 }
5331 
5332 static abi_long do_get_thread_area(CPUX86State *env, abi_ulong ptr)
5333 {
5334     struct target_modify_ldt_ldt_s *target_ldt_info;
5335     uint64_t *gdt_table = g2h(env->gdt.base);
5336     uint32_t base_addr, limit, flags;
5337     int seg_32bit, contents, read_exec_only, limit_in_pages, idx;
5338     int seg_not_present, useable, lm;
5339     uint32_t *lp, entry_1, entry_2;
5340 
5341     lock_user_struct(VERIFY_WRITE, target_ldt_info, ptr, 1);
5342     if (!target_ldt_info)
5343         return -TARGET_EFAULT;
5344     idx = tswap32(target_ldt_info->entry_number);
5345     if (idx < TARGET_GDT_ENTRY_TLS_MIN ||
5346         idx > TARGET_GDT_ENTRY_TLS_MAX) {
5347         unlock_user_struct(target_ldt_info, ptr, 1);
5348         return -TARGET_EINVAL;
5349     }
5350     lp = (uint32_t *)(gdt_table + idx);
5351     entry_1 = tswap32(lp[0]);
5352     entry_2 = tswap32(lp[1]);
5353 
5354     read_exec_only = ((entry_2 >> 9) & 1) ^ 1;
5355     contents = (entry_2 >> 10) & 3;
5356     seg_not_present = ((entry_2 >> 15) & 1) ^ 1;
5357     seg_32bit = (entry_2 >> 22) & 1;
5358     limit_in_pages = (entry_2 >> 23) & 1;
5359     useable = (entry_2 >> 20) & 1;
5360 #ifdef TARGET_ABI32
5361     lm = 0;
5362 #else
5363     lm = (entry_2 >> 21) & 1;
5364 #endif
5365     flags = (seg_32bit << 0) | (contents << 1) |
5366         (read_exec_only << 3) | (limit_in_pages << 4) |
5367         (seg_not_present << 5) | (useable << 6) | (lm << 7);
5368     limit = (entry_1 & 0xffff) | (entry_2  & 0xf0000);
5369     base_addr = (entry_1 >> 16) |
5370         (entry_2 & 0xff000000) |
5371         ((entry_2 & 0xff) << 16);
5372     target_ldt_info->base_addr = tswapal(base_addr);
5373     target_ldt_info->limit = tswap32(limit);
5374     target_ldt_info->flags = tswap32(flags);
5375     unlock_user_struct(target_ldt_info, ptr, 1);
5376     return 0;
5377 }
5378 #endif /* TARGET_I386 && TARGET_ABI32 */
5379 
5380 #ifndef TARGET_ABI32
5381 abi_long do_arch_prctl(CPUX86State *env, int code, abi_ulong addr)
5382 {
5383     abi_long ret = 0;
5384     abi_ulong val;
5385     int idx;
5386 
5387     switch(code) {
5388     case TARGET_ARCH_SET_GS:
5389     case TARGET_ARCH_SET_FS:
5390         if (code == TARGET_ARCH_SET_GS)
5391             idx = R_GS;
5392         else
5393             idx = R_FS;
5394         cpu_x86_load_seg(env, idx, 0);
5395         env->segs[idx].base = addr;
5396         break;
5397     case TARGET_ARCH_GET_GS:
5398     case TARGET_ARCH_GET_FS:
5399         if (code == TARGET_ARCH_GET_GS)
5400             idx = R_GS;
5401         else
5402             idx = R_FS;
5403         val = env->segs[idx].base;
5404         if (put_user(val, addr, abi_ulong))
5405             ret = -TARGET_EFAULT;
5406         break;
5407     default:
5408         ret = -TARGET_EINVAL;
5409         break;
5410     }
5411     return ret;
5412 }
5413 #endif
5414 
5415 #endif /* defined(TARGET_I386) */
5416 
5417 #define NEW_STACK_SIZE 0x40000
5418 
5419 
5420 static pthread_mutex_t clone_lock = PTHREAD_MUTEX_INITIALIZER;
5421 typedef struct {
5422     CPUArchState *env;
5423     pthread_mutex_t mutex;
5424     pthread_cond_t cond;
5425     pthread_t thread;
5426     uint32_t tid;
5427     abi_ulong child_tidptr;
5428     abi_ulong parent_tidptr;
5429     sigset_t sigmask;
5430 } new_thread_info;
5431 
5432 static void *clone_func(void *arg)
5433 {
5434     new_thread_info *info = arg;
5435     CPUArchState *env;
5436     CPUState *cpu;
5437     TaskState *ts;
5438 
5439     rcu_register_thread();
5440     tcg_register_thread();
5441     env = info->env;
5442     cpu = ENV_GET_CPU(env);
5443     thread_cpu = cpu;
5444     ts = (TaskState *)cpu->opaque;
5445     info->tid = gettid();
5446     task_settid(ts);
5447     if (info->child_tidptr)
5448         put_user_u32(info->tid, info->child_tidptr);
5449     if (info->parent_tidptr)
5450         put_user_u32(info->tid, info->parent_tidptr);
5451     /* Enable signals.  */
5452     sigprocmask(SIG_SETMASK, &info->sigmask, NULL);
5453     /* Signal to the parent that we're ready.  */
5454     pthread_mutex_lock(&info->mutex);
5455     pthread_cond_broadcast(&info->cond);
5456     pthread_mutex_unlock(&info->mutex);
5457     /* Wait until the parent has finished initializing the tls state.  */
5458     pthread_mutex_lock(&clone_lock);
5459     pthread_mutex_unlock(&clone_lock);
5460     cpu_loop(env);
5461     /* never exits */
5462     return NULL;
5463 }
5464 
5465 /* do_fork() Must return host values and target errnos (unlike most
5466    do_*() functions). */
5467 static int do_fork(CPUArchState *env, unsigned int flags, abi_ulong newsp,
5468                    abi_ulong parent_tidptr, target_ulong newtls,
5469                    abi_ulong child_tidptr)
5470 {
5471     CPUState *cpu = ENV_GET_CPU(env);
5472     int ret;
5473     TaskState *ts;
5474     CPUState *new_cpu;
5475     CPUArchState *new_env;
5476     sigset_t sigmask;
5477 
5478     flags &= ~CLONE_IGNORED_FLAGS;
5479 
5480     /* Emulate vfork() with fork() */
5481     if (flags & CLONE_VFORK)
5482         flags &= ~(CLONE_VFORK | CLONE_VM);
5483 
5484     if (flags & CLONE_VM) {
5485         TaskState *parent_ts = (TaskState *)cpu->opaque;
5486         new_thread_info info;
5487         pthread_attr_t attr;
5488 
5489         if (((flags & CLONE_THREAD_FLAGS) != CLONE_THREAD_FLAGS) ||
5490             (flags & CLONE_INVALID_THREAD_FLAGS)) {
5491             return -TARGET_EINVAL;
5492         }
5493 
5494         ts = g_new0(TaskState, 1);
5495         init_task_state(ts);
5496 
5497         /* Grab a mutex so that thread setup appears atomic.  */
5498         pthread_mutex_lock(&clone_lock);
5499 
5500         /* we create a new CPU instance. */
5501         new_env = cpu_copy(env);
5502         /* Init regs that differ from the parent.  */
5503         cpu_clone_regs(new_env, newsp);
5504         new_cpu = ENV_GET_CPU(new_env);
5505         new_cpu->opaque = ts;
5506         ts->bprm = parent_ts->bprm;
5507         ts->info = parent_ts->info;
5508         ts->signal_mask = parent_ts->signal_mask;
5509 
5510         if (flags & CLONE_CHILD_CLEARTID) {
5511             ts->child_tidptr = child_tidptr;
5512         }
5513 
5514         if (flags & CLONE_SETTLS) {
5515             cpu_set_tls (new_env, newtls);
5516         }
5517 
5518         memset(&info, 0, sizeof(info));
5519         pthread_mutex_init(&info.mutex, NULL);
5520         pthread_mutex_lock(&info.mutex);
5521         pthread_cond_init(&info.cond, NULL);
5522         info.env = new_env;
5523         if (flags & CLONE_CHILD_SETTID) {
5524             info.child_tidptr = child_tidptr;
5525         }
5526         if (flags & CLONE_PARENT_SETTID) {
5527             info.parent_tidptr = parent_tidptr;
5528         }
5529 
5530         ret = pthread_attr_init(&attr);
5531         ret = pthread_attr_setstacksize(&attr, NEW_STACK_SIZE);
5532         ret = pthread_attr_setdetachstate(&attr, PTHREAD_CREATE_DETACHED);
5533         /* It is not safe to deliver signals until the child has finished
5534            initializing, so temporarily block all signals.  */
5535         sigfillset(&sigmask);
5536         sigprocmask(SIG_BLOCK, &sigmask, &info.sigmask);
5537 
5538         /* If this is our first additional thread, we need to ensure we
5539          * generate code for parallel execution and flush old translations.
5540          */
5541         if (!parallel_cpus) {
5542             parallel_cpus = true;
5543             tb_flush(cpu);
5544         }
5545 
5546         ret = pthread_create(&info.thread, &attr, clone_func, &info);
5547         /* TODO: Free new CPU state if thread creation failed.  */
5548 
5549         sigprocmask(SIG_SETMASK, &info.sigmask, NULL);
5550         pthread_attr_destroy(&attr);
5551         if (ret == 0) {
5552             /* Wait for the child to initialize.  */
5553             pthread_cond_wait(&info.cond, &info.mutex);
5554             ret = info.tid;
5555         } else {
5556             ret = -1;
5557         }
5558         pthread_mutex_unlock(&info.mutex);
5559         pthread_cond_destroy(&info.cond);
5560         pthread_mutex_destroy(&info.mutex);
5561         pthread_mutex_unlock(&clone_lock);
5562     } else {
5563         /* if no CLONE_VM, we consider it is a fork */
5564         if (flags & CLONE_INVALID_FORK_FLAGS) {
5565             return -TARGET_EINVAL;
5566         }
5567 
5568         /* We can't support custom termination signals */
5569         if ((flags & CSIGNAL) != TARGET_SIGCHLD) {
5570             return -TARGET_EINVAL;
5571         }
5572 
5573         if (block_signals()) {
5574             return -TARGET_ERESTARTSYS;
5575         }
5576 
5577         fork_start();
5578         ret = fork();
5579         if (ret == 0) {
5580             /* Child Process.  */
5581             cpu_clone_regs(env, newsp);
5582             fork_end(1);
5583             /* There is a race condition here.  The parent process could
5584                theoretically read the TID in the child process before the child
5585                tid is set.  This would require using either ptrace
5586                (not implemented) or having *_tidptr to point at a shared memory
5587                mapping.  We can't repeat the spinlock hack used above because
5588                the child process gets its own copy of the lock.  */
5589             if (flags & CLONE_CHILD_SETTID)
5590                 put_user_u32(gettid(), child_tidptr);
5591             if (flags & CLONE_PARENT_SETTID)
5592                 put_user_u32(gettid(), parent_tidptr);
5593             ts = (TaskState *)cpu->opaque;
5594             if (flags & CLONE_SETTLS)
5595                 cpu_set_tls (env, newtls);
5596             if (flags & CLONE_CHILD_CLEARTID)
5597                 ts->child_tidptr = child_tidptr;
5598         } else {
5599             fork_end(0);
5600         }
5601     }
5602     return ret;
5603 }
5604 
5605 /* warning : doesn't handle linux specific flags... */
5606 static int target_to_host_fcntl_cmd(int cmd)
5607 {
5608     int ret;
5609 
5610     switch(cmd) {
5611     case TARGET_F_DUPFD:
5612     case TARGET_F_GETFD:
5613     case TARGET_F_SETFD:
5614     case TARGET_F_GETFL:
5615     case TARGET_F_SETFL:
5616         ret = cmd;
5617         break;
5618     case TARGET_F_GETLK:
5619         ret = F_GETLK64;
5620         break;
5621     case TARGET_F_SETLK:
5622         ret = F_SETLK64;
5623         break;
5624     case TARGET_F_SETLKW:
5625         ret = F_SETLKW64;
5626         break;
5627     case TARGET_F_GETOWN:
5628         ret = F_GETOWN;
5629         break;
5630     case TARGET_F_SETOWN:
5631         ret = F_SETOWN;
5632         break;
5633     case TARGET_F_GETSIG:
5634         ret = F_GETSIG;
5635         break;
5636     case TARGET_F_SETSIG:
5637         ret = F_SETSIG;
5638         break;
5639 #if TARGET_ABI_BITS == 32
5640     case TARGET_F_GETLK64:
5641         ret = F_GETLK64;
5642         break;
5643     case TARGET_F_SETLK64:
5644         ret = F_SETLK64;
5645         break;
5646     case TARGET_F_SETLKW64:
5647         ret = F_SETLKW64;
5648         break;
5649 #endif
5650     case TARGET_F_SETLEASE:
5651         ret = F_SETLEASE;
5652         break;
5653     case TARGET_F_GETLEASE:
5654         ret = F_GETLEASE;
5655         break;
5656 #ifdef F_DUPFD_CLOEXEC
5657     case TARGET_F_DUPFD_CLOEXEC:
5658         ret = F_DUPFD_CLOEXEC;
5659         break;
5660 #endif
5661     case TARGET_F_NOTIFY:
5662         ret = F_NOTIFY;
5663         break;
5664 #ifdef F_GETOWN_EX
5665     case TARGET_F_GETOWN_EX:
5666         ret = F_GETOWN_EX;
5667         break;
5668 #endif
5669 #ifdef F_SETOWN_EX
5670     case TARGET_F_SETOWN_EX:
5671         ret = F_SETOWN_EX;
5672         break;
5673 #endif
5674 #ifdef F_SETPIPE_SZ
5675     case TARGET_F_SETPIPE_SZ:
5676         ret = F_SETPIPE_SZ;
5677         break;
5678     case TARGET_F_GETPIPE_SZ:
5679         ret = F_GETPIPE_SZ;
5680         break;
5681 #endif
5682     default:
5683         ret = -TARGET_EINVAL;
5684         break;
5685     }
5686 
5687 #if defined(__powerpc64__)
5688     /* On PPC64, glibc headers has the F_*LK* defined to 12, 13 and 14 and
5689      * is not supported by kernel. The glibc fcntl call actually adjusts
5690      * them to 5, 6 and 7 before making the syscall(). Since we make the
5691      * syscall directly, adjust to what is supported by the kernel.
5692      */
5693     if (ret >= F_GETLK64 && ret <= F_SETLKW64) {
5694         ret -= F_GETLK64 - 5;
5695     }
5696 #endif
5697 
5698     return ret;
5699 }
5700 
5701 #define FLOCK_TRANSTBL \
5702     switch (type) { \
5703     TRANSTBL_CONVERT(F_RDLCK); \
5704     TRANSTBL_CONVERT(F_WRLCK); \
5705     TRANSTBL_CONVERT(F_UNLCK); \
5706     TRANSTBL_CONVERT(F_EXLCK); \
5707     TRANSTBL_CONVERT(F_SHLCK); \
5708     }
5709 
5710 static int target_to_host_flock(int type)
5711 {
5712 #define TRANSTBL_CONVERT(a) case TARGET_##a: return a
5713     FLOCK_TRANSTBL
5714 #undef  TRANSTBL_CONVERT
5715     return -TARGET_EINVAL;
5716 }
5717 
5718 static int host_to_target_flock(int type)
5719 {
5720 #define TRANSTBL_CONVERT(a) case a: return TARGET_##a
5721     FLOCK_TRANSTBL
5722 #undef  TRANSTBL_CONVERT
5723     /* if we don't know how to convert the value coming
5724      * from the host we copy to the target field as-is
5725      */
5726     return type;
5727 }
5728 
5729 static inline abi_long copy_from_user_flock(struct flock64 *fl,
5730                                             abi_ulong target_flock_addr)
5731 {
5732     struct target_flock *target_fl;
5733     int l_type;
5734 
5735     if (!lock_user_struct(VERIFY_READ, target_fl, target_flock_addr, 1)) {
5736         return -TARGET_EFAULT;
5737     }
5738 
5739     __get_user(l_type, &target_fl->l_type);
5740     l_type = target_to_host_flock(l_type);
5741     if (l_type < 0) {
5742         return l_type;
5743     }
5744     fl->l_type = l_type;
5745     __get_user(fl->l_whence, &target_fl->l_whence);
5746     __get_user(fl->l_start, &target_fl->l_start);
5747     __get_user(fl->l_len, &target_fl->l_len);
5748     __get_user(fl->l_pid, &target_fl->l_pid);
5749     unlock_user_struct(target_fl, target_flock_addr, 0);
5750     return 0;
5751 }
5752 
5753 static inline abi_long copy_to_user_flock(abi_ulong target_flock_addr,
5754                                           const struct flock64 *fl)
5755 {
5756     struct target_flock *target_fl;
5757     short l_type;
5758 
5759     if (!lock_user_struct(VERIFY_WRITE, target_fl, target_flock_addr, 0)) {
5760         return -TARGET_EFAULT;
5761     }
5762 
5763     l_type = host_to_target_flock(fl->l_type);
5764     __put_user(l_type, &target_fl->l_type);
5765     __put_user(fl->l_whence, &target_fl->l_whence);
5766     __put_user(fl->l_start, &target_fl->l_start);
5767     __put_user(fl->l_len, &target_fl->l_len);
5768     __put_user(fl->l_pid, &target_fl->l_pid);
5769     unlock_user_struct(target_fl, target_flock_addr, 1);
5770     return 0;
5771 }
5772 
5773 typedef abi_long from_flock64_fn(struct flock64 *fl, abi_ulong target_addr);
5774 typedef abi_long to_flock64_fn(abi_ulong target_addr, const struct flock64 *fl);
5775 
5776 #if defined(TARGET_ARM) && TARGET_ABI_BITS == 32
5777 static inline abi_long copy_from_user_oabi_flock64(struct flock64 *fl,
5778                                                    abi_ulong target_flock_addr)
5779 {
5780     struct target_oabi_flock64 *target_fl;
5781     int l_type;
5782 
5783     if (!lock_user_struct(VERIFY_READ, target_fl, target_flock_addr, 1)) {
5784         return -TARGET_EFAULT;
5785     }
5786 
5787     __get_user(l_type, &target_fl->l_type);
5788     l_type = target_to_host_flock(l_type);
5789     if (l_type < 0) {
5790         return l_type;
5791     }
5792     fl->l_type = l_type;
5793     __get_user(fl->l_whence, &target_fl->l_whence);
5794     __get_user(fl->l_start, &target_fl->l_start);
5795     __get_user(fl->l_len, &target_fl->l_len);
5796     __get_user(fl->l_pid, &target_fl->l_pid);
5797     unlock_user_struct(target_fl, target_flock_addr, 0);
5798     return 0;
5799 }
5800 
5801 static inline abi_long copy_to_user_oabi_flock64(abi_ulong target_flock_addr,
5802                                                  const struct flock64 *fl)
5803 {
5804     struct target_oabi_flock64 *target_fl;
5805     short l_type;
5806 
5807     if (!lock_user_struct(VERIFY_WRITE, target_fl, target_flock_addr, 0)) {
5808         return -TARGET_EFAULT;
5809     }
5810 
5811     l_type = host_to_target_flock(fl->l_type);
5812     __put_user(l_type, &target_fl->l_type);
5813     __put_user(fl->l_whence, &target_fl->l_whence);
5814     __put_user(fl->l_start, &target_fl->l_start);
5815     __put_user(fl->l_len, &target_fl->l_len);
5816     __put_user(fl->l_pid, &target_fl->l_pid);
5817     unlock_user_struct(target_fl, target_flock_addr, 1);
5818     return 0;
5819 }
5820 #endif
5821 
5822 static inline abi_long copy_from_user_flock64(struct flock64 *fl,
5823                                               abi_ulong target_flock_addr)
5824 {
5825     struct target_flock64 *target_fl;
5826     int l_type;
5827 
5828     if (!lock_user_struct(VERIFY_READ, target_fl, target_flock_addr, 1)) {
5829         return -TARGET_EFAULT;
5830     }
5831 
5832     __get_user(l_type, &target_fl->l_type);
5833     l_type = target_to_host_flock(l_type);
5834     if (l_type < 0) {
5835         return l_type;
5836     }
5837     fl->l_type = l_type;
5838     __get_user(fl->l_whence, &target_fl->l_whence);
5839     __get_user(fl->l_start, &target_fl->l_start);
5840     __get_user(fl->l_len, &target_fl->l_len);
5841     __get_user(fl->l_pid, &target_fl->l_pid);
5842     unlock_user_struct(target_fl, target_flock_addr, 0);
5843     return 0;
5844 }
5845 
5846 static inline abi_long copy_to_user_flock64(abi_ulong target_flock_addr,
5847                                             const struct flock64 *fl)
5848 {
5849     struct target_flock64 *target_fl;
5850     short l_type;
5851 
5852     if (!lock_user_struct(VERIFY_WRITE, target_fl, target_flock_addr, 0)) {
5853         return -TARGET_EFAULT;
5854     }
5855 
5856     l_type = host_to_target_flock(fl->l_type);
5857     __put_user(l_type, &target_fl->l_type);
5858     __put_user(fl->l_whence, &target_fl->l_whence);
5859     __put_user(fl->l_start, &target_fl->l_start);
5860     __put_user(fl->l_len, &target_fl->l_len);
5861     __put_user(fl->l_pid, &target_fl->l_pid);
5862     unlock_user_struct(target_fl, target_flock_addr, 1);
5863     return 0;
5864 }
5865 
5866 static abi_long do_fcntl(int fd, int cmd, abi_ulong arg)
5867 {
5868     struct flock64 fl64;
5869 #ifdef F_GETOWN_EX
5870     struct f_owner_ex fox;
5871     struct target_f_owner_ex *target_fox;
5872 #endif
5873     abi_long ret;
5874     int host_cmd = target_to_host_fcntl_cmd(cmd);
5875 
5876     if (host_cmd == -TARGET_EINVAL)
5877 	    return host_cmd;
5878 
5879     switch(cmd) {
5880     case TARGET_F_GETLK:
5881         ret = copy_from_user_flock(&fl64, arg);
5882         if (ret) {
5883             return ret;
5884         }
5885         ret = get_errno(safe_fcntl(fd, host_cmd, &fl64));
5886         if (ret == 0) {
5887             ret = copy_to_user_flock(arg, &fl64);
5888         }
5889         break;
5890 
5891     case TARGET_F_SETLK:
5892     case TARGET_F_SETLKW:
5893         ret = copy_from_user_flock(&fl64, arg);
5894         if (ret) {
5895             return ret;
5896         }
5897         ret = get_errno(safe_fcntl(fd, host_cmd, &fl64));
5898         break;
5899 
5900     case TARGET_F_GETLK64:
5901         ret = copy_from_user_flock64(&fl64, arg);
5902         if (ret) {
5903             return ret;
5904         }
5905         ret = get_errno(safe_fcntl(fd, host_cmd, &fl64));
5906         if (ret == 0) {
5907             ret = copy_to_user_flock64(arg, &fl64);
5908         }
5909         break;
5910     case TARGET_F_SETLK64:
5911     case TARGET_F_SETLKW64:
5912         ret = copy_from_user_flock64(&fl64, arg);
5913         if (ret) {
5914             return ret;
5915         }
5916         ret = get_errno(safe_fcntl(fd, host_cmd, &fl64));
5917         break;
5918 
5919     case TARGET_F_GETFL:
5920         ret = get_errno(safe_fcntl(fd, host_cmd, arg));
5921         if (ret >= 0) {
5922             ret = host_to_target_bitmask(ret, fcntl_flags_tbl);
5923         }
5924         break;
5925 
5926     case TARGET_F_SETFL:
5927         ret = get_errno(safe_fcntl(fd, host_cmd,
5928                                    target_to_host_bitmask(arg,
5929                                                           fcntl_flags_tbl)));
5930         break;
5931 
5932 #ifdef F_GETOWN_EX
5933     case TARGET_F_GETOWN_EX:
5934         ret = get_errno(safe_fcntl(fd, host_cmd, &fox));
5935         if (ret >= 0) {
5936             if (!lock_user_struct(VERIFY_WRITE, target_fox, arg, 0))
5937                 return -TARGET_EFAULT;
5938             target_fox->type = tswap32(fox.type);
5939             target_fox->pid = tswap32(fox.pid);
5940             unlock_user_struct(target_fox, arg, 1);
5941         }
5942         break;
5943 #endif
5944 
5945 #ifdef F_SETOWN_EX
5946     case TARGET_F_SETOWN_EX:
5947         if (!lock_user_struct(VERIFY_READ, target_fox, arg, 1))
5948             return -TARGET_EFAULT;
5949         fox.type = tswap32(target_fox->type);
5950         fox.pid = tswap32(target_fox->pid);
5951         unlock_user_struct(target_fox, arg, 0);
5952         ret = get_errno(safe_fcntl(fd, host_cmd, &fox));
5953         break;
5954 #endif
5955 
5956     case TARGET_F_SETOWN:
5957     case TARGET_F_GETOWN:
5958     case TARGET_F_SETSIG:
5959     case TARGET_F_GETSIG:
5960     case TARGET_F_SETLEASE:
5961     case TARGET_F_GETLEASE:
5962     case TARGET_F_SETPIPE_SZ:
5963     case TARGET_F_GETPIPE_SZ:
5964         ret = get_errno(safe_fcntl(fd, host_cmd, arg));
5965         break;
5966 
5967     default:
5968         ret = get_errno(safe_fcntl(fd, cmd, arg));
5969         break;
5970     }
5971     return ret;
5972 }
5973 
5974 #ifdef USE_UID16
5975 
5976 static inline int high2lowuid(int uid)
5977 {
5978     if (uid > 65535)
5979         return 65534;
5980     else
5981         return uid;
5982 }
5983 
5984 static inline int high2lowgid(int gid)
5985 {
5986     if (gid > 65535)
5987         return 65534;
5988     else
5989         return gid;
5990 }
5991 
5992 static inline int low2highuid(int uid)
5993 {
5994     if ((int16_t)uid == -1)
5995         return -1;
5996     else
5997         return uid;
5998 }
5999 
6000 static inline int low2highgid(int gid)
6001 {
6002     if ((int16_t)gid == -1)
6003         return -1;
6004     else
6005         return gid;
6006 }
6007 static inline int tswapid(int id)
6008 {
6009     return tswap16(id);
6010 }
6011 
6012 #define put_user_id(x, gaddr) put_user_u16(x, gaddr)
6013 
6014 #else /* !USE_UID16 */
6015 static inline int high2lowuid(int uid)
6016 {
6017     return uid;
6018 }
6019 static inline int high2lowgid(int gid)
6020 {
6021     return gid;
6022 }
6023 static inline int low2highuid(int uid)
6024 {
6025     return uid;
6026 }
6027 static inline int low2highgid(int gid)
6028 {
6029     return gid;
6030 }
6031 static inline int tswapid(int id)
6032 {
6033     return tswap32(id);
6034 }
6035 
6036 #define put_user_id(x, gaddr) put_user_u32(x, gaddr)
6037 
6038 #endif /* USE_UID16 */
6039 
6040 /* We must do direct syscalls for setting UID/GID, because we want to
6041  * implement the Linux system call semantics of "change only for this thread",
6042  * not the libc/POSIX semantics of "change for all threads in process".
6043  * (See http://ewontfix.com/17/ for more details.)
6044  * We use the 32-bit version of the syscalls if present; if it is not
6045  * then either the host architecture supports 32-bit UIDs natively with
6046  * the standard syscall, or the 16-bit UID is the best we can do.
6047  */
6048 #ifdef __NR_setuid32
6049 #define __NR_sys_setuid __NR_setuid32
6050 #else
6051 #define __NR_sys_setuid __NR_setuid
6052 #endif
6053 #ifdef __NR_setgid32
6054 #define __NR_sys_setgid __NR_setgid32
6055 #else
6056 #define __NR_sys_setgid __NR_setgid
6057 #endif
6058 #ifdef __NR_setresuid32
6059 #define __NR_sys_setresuid __NR_setresuid32
6060 #else
6061 #define __NR_sys_setresuid __NR_setresuid
6062 #endif
6063 #ifdef __NR_setresgid32
6064 #define __NR_sys_setresgid __NR_setresgid32
6065 #else
6066 #define __NR_sys_setresgid __NR_setresgid
6067 #endif
6068 
6069 _syscall1(int, sys_setuid, uid_t, uid)
6070 _syscall1(int, sys_setgid, gid_t, gid)
6071 _syscall3(int, sys_setresuid, uid_t, ruid, uid_t, euid, uid_t, suid)
6072 _syscall3(int, sys_setresgid, gid_t, rgid, gid_t, egid, gid_t, sgid)
6073 
6074 void syscall_init(void)
6075 {
6076     IOCTLEntry *ie;
6077     const argtype *arg_type;
6078     int size;
6079     int i;
6080 
6081     thunk_init(STRUCT_MAX);
6082 
6083 #define STRUCT(name, ...) thunk_register_struct(STRUCT_ ## name, #name, struct_ ## name ## _def);
6084 #define STRUCT_SPECIAL(name) thunk_register_struct_direct(STRUCT_ ## name, #name, &struct_ ## name ## _def);
6085 #include "syscall_types.h"
6086 #undef STRUCT
6087 #undef STRUCT_SPECIAL
6088 
6089     /* Build target_to_host_errno_table[] table from
6090      * host_to_target_errno_table[]. */
6091     for (i = 0; i < ERRNO_TABLE_SIZE; i++) {
6092         target_to_host_errno_table[host_to_target_errno_table[i]] = i;
6093     }
6094 
6095     /* we patch the ioctl size if necessary. We rely on the fact that
6096        no ioctl has all the bits at '1' in the size field */
6097     ie = ioctl_entries;
6098     while (ie->target_cmd != 0) {
6099         if (((ie->target_cmd >> TARGET_IOC_SIZESHIFT) & TARGET_IOC_SIZEMASK) ==
6100             TARGET_IOC_SIZEMASK) {
6101             arg_type = ie->arg_type;
6102             if (arg_type[0] != TYPE_PTR) {
6103                 fprintf(stderr, "cannot patch size for ioctl 0x%x\n",
6104                         ie->target_cmd);
6105                 exit(1);
6106             }
6107             arg_type++;
6108             size = thunk_type_size(arg_type, 0);
6109             ie->target_cmd = (ie->target_cmd &
6110                               ~(TARGET_IOC_SIZEMASK << TARGET_IOC_SIZESHIFT)) |
6111                 (size << TARGET_IOC_SIZESHIFT);
6112         }
6113 
6114         /* automatic consistency check if same arch */
6115 #if (defined(__i386__) && defined(TARGET_I386) && defined(TARGET_ABI32)) || \
6116     (defined(__x86_64__) && defined(TARGET_X86_64))
6117         if (unlikely(ie->target_cmd != ie->host_cmd)) {
6118             fprintf(stderr, "ERROR: ioctl(%s): target=0x%x host=0x%x\n",
6119                     ie->name, ie->target_cmd, ie->host_cmd);
6120         }
6121 #endif
6122         ie++;
6123     }
6124 }
6125 
6126 #if TARGET_ABI_BITS == 32
6127 static inline uint64_t target_offset64(uint32_t word0, uint32_t word1)
6128 {
6129 #ifdef TARGET_WORDS_BIGENDIAN
6130     return ((uint64_t)word0 << 32) | word1;
6131 #else
6132     return ((uint64_t)word1 << 32) | word0;
6133 #endif
6134 }
6135 #else /* TARGET_ABI_BITS == 32 */
6136 static inline uint64_t target_offset64(uint64_t word0, uint64_t word1)
6137 {
6138     return word0;
6139 }
6140 #endif /* TARGET_ABI_BITS != 32 */
6141 
6142 #ifdef TARGET_NR_truncate64
6143 static inline abi_long target_truncate64(void *cpu_env, const char *arg1,
6144                                          abi_long arg2,
6145                                          abi_long arg3,
6146                                          abi_long arg4)
6147 {
6148     if (regpairs_aligned(cpu_env, TARGET_NR_truncate64)) {
6149         arg2 = arg3;
6150         arg3 = arg4;
6151     }
6152     return get_errno(truncate64(arg1, target_offset64(arg2, arg3)));
6153 }
6154 #endif
6155 
6156 #ifdef TARGET_NR_ftruncate64
6157 static inline abi_long target_ftruncate64(void *cpu_env, abi_long arg1,
6158                                           abi_long arg2,
6159                                           abi_long arg3,
6160                                           abi_long arg4)
6161 {
6162     if (regpairs_aligned(cpu_env, TARGET_NR_ftruncate64)) {
6163         arg2 = arg3;
6164         arg3 = arg4;
6165     }
6166     return get_errno(ftruncate64(arg1, target_offset64(arg2, arg3)));
6167 }
6168 #endif
6169 
6170 static inline abi_long target_to_host_timespec(struct timespec *host_ts,
6171                                                abi_ulong target_addr)
6172 {
6173     struct target_timespec *target_ts;
6174 
6175     if (!lock_user_struct(VERIFY_READ, target_ts, target_addr, 1))
6176         return -TARGET_EFAULT;
6177     __get_user(host_ts->tv_sec, &target_ts->tv_sec);
6178     __get_user(host_ts->tv_nsec, &target_ts->tv_nsec);
6179     unlock_user_struct(target_ts, target_addr, 0);
6180     return 0;
6181 }
6182 
6183 static inline abi_long host_to_target_timespec(abi_ulong target_addr,
6184                                                struct timespec *host_ts)
6185 {
6186     struct target_timespec *target_ts;
6187 
6188     if (!lock_user_struct(VERIFY_WRITE, target_ts, target_addr, 0))
6189         return -TARGET_EFAULT;
6190     __put_user(host_ts->tv_sec, &target_ts->tv_sec);
6191     __put_user(host_ts->tv_nsec, &target_ts->tv_nsec);
6192     unlock_user_struct(target_ts, target_addr, 1);
6193     return 0;
6194 }
6195 
6196 static inline abi_long target_to_host_itimerspec(struct itimerspec *host_itspec,
6197                                                  abi_ulong target_addr)
6198 {
6199     struct target_itimerspec *target_itspec;
6200 
6201     if (!lock_user_struct(VERIFY_READ, target_itspec, target_addr, 1)) {
6202         return -TARGET_EFAULT;
6203     }
6204 
6205     host_itspec->it_interval.tv_sec =
6206                             tswapal(target_itspec->it_interval.tv_sec);
6207     host_itspec->it_interval.tv_nsec =
6208                             tswapal(target_itspec->it_interval.tv_nsec);
6209     host_itspec->it_value.tv_sec = tswapal(target_itspec->it_value.tv_sec);
6210     host_itspec->it_value.tv_nsec = tswapal(target_itspec->it_value.tv_nsec);
6211 
6212     unlock_user_struct(target_itspec, target_addr, 1);
6213     return 0;
6214 }
6215 
6216 static inline abi_long host_to_target_itimerspec(abi_ulong target_addr,
6217                                                struct itimerspec *host_its)
6218 {
6219     struct target_itimerspec *target_itspec;
6220 
6221     if (!lock_user_struct(VERIFY_WRITE, target_itspec, target_addr, 0)) {
6222         return -TARGET_EFAULT;
6223     }
6224 
6225     target_itspec->it_interval.tv_sec = tswapal(host_its->it_interval.tv_sec);
6226     target_itspec->it_interval.tv_nsec = tswapal(host_its->it_interval.tv_nsec);
6227 
6228     target_itspec->it_value.tv_sec = tswapal(host_its->it_value.tv_sec);
6229     target_itspec->it_value.tv_nsec = tswapal(host_its->it_value.tv_nsec);
6230 
6231     unlock_user_struct(target_itspec, target_addr, 0);
6232     return 0;
6233 }
6234 
6235 static inline abi_long target_to_host_timex(struct timex *host_tx,
6236                                             abi_long target_addr)
6237 {
6238     struct target_timex *target_tx;
6239 
6240     if (!lock_user_struct(VERIFY_READ, target_tx, target_addr, 1)) {
6241         return -TARGET_EFAULT;
6242     }
6243 
6244     __get_user(host_tx->modes, &target_tx->modes);
6245     __get_user(host_tx->offset, &target_tx->offset);
6246     __get_user(host_tx->freq, &target_tx->freq);
6247     __get_user(host_tx->maxerror, &target_tx->maxerror);
6248     __get_user(host_tx->esterror, &target_tx->esterror);
6249     __get_user(host_tx->status, &target_tx->status);
6250     __get_user(host_tx->constant, &target_tx->constant);
6251     __get_user(host_tx->precision, &target_tx->precision);
6252     __get_user(host_tx->tolerance, &target_tx->tolerance);
6253     __get_user(host_tx->time.tv_sec, &target_tx->time.tv_sec);
6254     __get_user(host_tx->time.tv_usec, &target_tx->time.tv_usec);
6255     __get_user(host_tx->tick, &target_tx->tick);
6256     __get_user(host_tx->ppsfreq, &target_tx->ppsfreq);
6257     __get_user(host_tx->jitter, &target_tx->jitter);
6258     __get_user(host_tx->shift, &target_tx->shift);
6259     __get_user(host_tx->stabil, &target_tx->stabil);
6260     __get_user(host_tx->jitcnt, &target_tx->jitcnt);
6261     __get_user(host_tx->calcnt, &target_tx->calcnt);
6262     __get_user(host_tx->errcnt, &target_tx->errcnt);
6263     __get_user(host_tx->stbcnt, &target_tx->stbcnt);
6264     __get_user(host_tx->tai, &target_tx->tai);
6265 
6266     unlock_user_struct(target_tx, target_addr, 0);
6267     return 0;
6268 }
6269 
6270 static inline abi_long host_to_target_timex(abi_long target_addr,
6271                                             struct timex *host_tx)
6272 {
6273     struct target_timex *target_tx;
6274 
6275     if (!lock_user_struct(VERIFY_WRITE, target_tx, target_addr, 0)) {
6276         return -TARGET_EFAULT;
6277     }
6278 
6279     __put_user(host_tx->modes, &target_tx->modes);
6280     __put_user(host_tx->offset, &target_tx->offset);
6281     __put_user(host_tx->freq, &target_tx->freq);
6282     __put_user(host_tx->maxerror, &target_tx->maxerror);
6283     __put_user(host_tx->esterror, &target_tx->esterror);
6284     __put_user(host_tx->status, &target_tx->status);
6285     __put_user(host_tx->constant, &target_tx->constant);
6286     __put_user(host_tx->precision, &target_tx->precision);
6287     __put_user(host_tx->tolerance, &target_tx->tolerance);
6288     __put_user(host_tx->time.tv_sec, &target_tx->time.tv_sec);
6289     __put_user(host_tx->time.tv_usec, &target_tx->time.tv_usec);
6290     __put_user(host_tx->tick, &target_tx->tick);
6291     __put_user(host_tx->ppsfreq, &target_tx->ppsfreq);
6292     __put_user(host_tx->jitter, &target_tx->jitter);
6293     __put_user(host_tx->shift, &target_tx->shift);
6294     __put_user(host_tx->stabil, &target_tx->stabil);
6295     __put_user(host_tx->jitcnt, &target_tx->jitcnt);
6296     __put_user(host_tx->calcnt, &target_tx->calcnt);
6297     __put_user(host_tx->errcnt, &target_tx->errcnt);
6298     __put_user(host_tx->stbcnt, &target_tx->stbcnt);
6299     __put_user(host_tx->tai, &target_tx->tai);
6300 
6301     unlock_user_struct(target_tx, target_addr, 1);
6302     return 0;
6303 }
6304 
6305 
6306 static inline abi_long target_to_host_sigevent(struct sigevent *host_sevp,
6307                                                abi_ulong target_addr)
6308 {
6309     struct target_sigevent *target_sevp;
6310 
6311     if (!lock_user_struct(VERIFY_READ, target_sevp, target_addr, 1)) {
6312         return -TARGET_EFAULT;
6313     }
6314 
6315     /* This union is awkward on 64 bit systems because it has a 32 bit
6316      * integer and a pointer in it; we follow the conversion approach
6317      * used for handling sigval types in signal.c so the guest should get
6318      * the correct value back even if we did a 64 bit byteswap and it's
6319      * using the 32 bit integer.
6320      */
6321     host_sevp->sigev_value.sival_ptr =
6322         (void *)(uintptr_t)tswapal(target_sevp->sigev_value.sival_ptr);
6323     host_sevp->sigev_signo =
6324         target_to_host_signal(tswap32(target_sevp->sigev_signo));
6325     host_sevp->sigev_notify = tswap32(target_sevp->sigev_notify);
6326     host_sevp->_sigev_un._tid = tswap32(target_sevp->_sigev_un._tid);
6327 
6328     unlock_user_struct(target_sevp, target_addr, 1);
6329     return 0;
6330 }
6331 
6332 #if defined(TARGET_NR_mlockall)
6333 static inline int target_to_host_mlockall_arg(int arg)
6334 {
6335     int result = 0;
6336 
6337     if (arg & TARGET_MLOCKALL_MCL_CURRENT) {
6338         result |= MCL_CURRENT;
6339     }
6340     if (arg & TARGET_MLOCKALL_MCL_FUTURE) {
6341         result |= MCL_FUTURE;
6342     }
6343     return result;
6344 }
6345 #endif
6346 
6347 #if (defined(TARGET_NR_stat64) || defined(TARGET_NR_lstat64) ||     \
6348      defined(TARGET_NR_fstat64) || defined(TARGET_NR_fstatat64) ||  \
6349      defined(TARGET_NR_newfstatat))
6350 static inline abi_long host_to_target_stat64(void *cpu_env,
6351                                              abi_ulong target_addr,
6352                                              struct stat *host_st)
6353 {
6354 #if defined(TARGET_ARM) && defined(TARGET_ABI32)
6355     if (((CPUARMState *)cpu_env)->eabi) {
6356         struct target_eabi_stat64 *target_st;
6357 
6358         if (!lock_user_struct(VERIFY_WRITE, target_st, target_addr, 0))
6359             return -TARGET_EFAULT;
6360         memset(target_st, 0, sizeof(struct target_eabi_stat64));
6361         __put_user(host_st->st_dev, &target_st->st_dev);
6362         __put_user(host_st->st_ino, &target_st->st_ino);
6363 #ifdef TARGET_STAT64_HAS_BROKEN_ST_INO
6364         __put_user(host_st->st_ino, &target_st->__st_ino);
6365 #endif
6366         __put_user(host_st->st_mode, &target_st->st_mode);
6367         __put_user(host_st->st_nlink, &target_st->st_nlink);
6368         __put_user(host_st->st_uid, &target_st->st_uid);
6369         __put_user(host_st->st_gid, &target_st->st_gid);
6370         __put_user(host_st->st_rdev, &target_st->st_rdev);
6371         __put_user(host_st->st_size, &target_st->st_size);
6372         __put_user(host_st->st_blksize, &target_st->st_blksize);
6373         __put_user(host_st->st_blocks, &target_st->st_blocks);
6374         __put_user(host_st->st_atime, &target_st->target_st_atime);
6375         __put_user(host_st->st_mtime, &target_st->target_st_mtime);
6376         __put_user(host_st->st_ctime, &target_st->target_st_ctime);
6377         unlock_user_struct(target_st, target_addr, 1);
6378     } else
6379 #endif
6380     {
6381 #if defined(TARGET_HAS_STRUCT_STAT64)
6382         struct target_stat64 *target_st;
6383 #else
6384         struct target_stat *target_st;
6385 #endif
6386 
6387         if (!lock_user_struct(VERIFY_WRITE, target_st, target_addr, 0))
6388             return -TARGET_EFAULT;
6389         memset(target_st, 0, sizeof(*target_st));
6390         __put_user(host_st->st_dev, &target_st->st_dev);
6391         __put_user(host_st->st_ino, &target_st->st_ino);
6392 #ifdef TARGET_STAT64_HAS_BROKEN_ST_INO
6393         __put_user(host_st->st_ino, &target_st->__st_ino);
6394 #endif
6395         __put_user(host_st->st_mode, &target_st->st_mode);
6396         __put_user(host_st->st_nlink, &target_st->st_nlink);
6397         __put_user(host_st->st_uid, &target_st->st_uid);
6398         __put_user(host_st->st_gid, &target_st->st_gid);
6399         __put_user(host_st->st_rdev, &target_st->st_rdev);
6400         /* XXX: better use of kernel struct */
6401         __put_user(host_st->st_size, &target_st->st_size);
6402         __put_user(host_st->st_blksize, &target_st->st_blksize);
6403         __put_user(host_st->st_blocks, &target_st->st_blocks);
6404         __put_user(host_st->st_atime, &target_st->target_st_atime);
6405         __put_user(host_st->st_mtime, &target_st->target_st_mtime);
6406         __put_user(host_st->st_ctime, &target_st->target_st_ctime);
6407         unlock_user_struct(target_st, target_addr, 1);
6408     }
6409 
6410     return 0;
6411 }
6412 #endif
6413 
6414 /* ??? Using host futex calls even when target atomic operations
6415    are not really atomic probably breaks things.  However implementing
6416    futexes locally would make futexes shared between multiple processes
6417    tricky.  However they're probably useless because guest atomic
6418    operations won't work either.  */
6419 static int do_futex(target_ulong uaddr, int op, int val, target_ulong timeout,
6420                     target_ulong uaddr2, int val3)
6421 {
6422     struct timespec ts, *pts;
6423     int base_op;
6424 
6425     /* ??? We assume FUTEX_* constants are the same on both host
6426        and target.  */
6427 #ifdef FUTEX_CMD_MASK
6428     base_op = op & FUTEX_CMD_MASK;
6429 #else
6430     base_op = op;
6431 #endif
6432     switch (base_op) {
6433     case FUTEX_WAIT:
6434     case FUTEX_WAIT_BITSET:
6435         if (timeout) {
6436             pts = &ts;
6437             target_to_host_timespec(pts, timeout);
6438         } else {
6439             pts = NULL;
6440         }
6441         return get_errno(safe_futex(g2h(uaddr), op, tswap32(val),
6442                          pts, NULL, val3));
6443     case FUTEX_WAKE:
6444         return get_errno(safe_futex(g2h(uaddr), op, val, NULL, NULL, 0));
6445     case FUTEX_FD:
6446         return get_errno(safe_futex(g2h(uaddr), op, val, NULL, NULL, 0));
6447     case FUTEX_REQUEUE:
6448     case FUTEX_CMP_REQUEUE:
6449     case FUTEX_WAKE_OP:
6450         /* For FUTEX_REQUEUE, FUTEX_CMP_REQUEUE, and FUTEX_WAKE_OP, the
6451            TIMEOUT parameter is interpreted as a uint32_t by the kernel.
6452            But the prototype takes a `struct timespec *'; insert casts
6453            to satisfy the compiler.  We do not need to tswap TIMEOUT
6454            since it's not compared to guest memory.  */
6455         pts = (struct timespec *)(uintptr_t) timeout;
6456         return get_errno(safe_futex(g2h(uaddr), op, val, pts,
6457                                     g2h(uaddr2),
6458                                     (base_op == FUTEX_CMP_REQUEUE
6459                                      ? tswap32(val3)
6460                                      : val3)));
6461     default:
6462         return -TARGET_ENOSYS;
6463     }
6464 }
6465 #if defined(TARGET_NR_name_to_handle_at) && defined(CONFIG_OPEN_BY_HANDLE)
6466 static abi_long do_name_to_handle_at(abi_long dirfd, abi_long pathname,
6467                                      abi_long handle, abi_long mount_id,
6468                                      abi_long flags)
6469 {
6470     struct file_handle *target_fh;
6471     struct file_handle *fh;
6472     int mid = 0;
6473     abi_long ret;
6474     char *name;
6475     unsigned int size, total_size;
6476 
6477     if (get_user_s32(size, handle)) {
6478         return -TARGET_EFAULT;
6479     }
6480 
6481     name = lock_user_string(pathname);
6482     if (!name) {
6483         return -TARGET_EFAULT;
6484     }
6485 
6486     total_size = sizeof(struct file_handle) + size;
6487     target_fh = lock_user(VERIFY_WRITE, handle, total_size, 0);
6488     if (!target_fh) {
6489         unlock_user(name, pathname, 0);
6490         return -TARGET_EFAULT;
6491     }
6492 
6493     fh = g_malloc0(total_size);
6494     fh->handle_bytes = size;
6495 
6496     ret = get_errno(name_to_handle_at(dirfd, path(name), fh, &mid, flags));
6497     unlock_user(name, pathname, 0);
6498 
6499     /* man name_to_handle_at(2):
6500      * Other than the use of the handle_bytes field, the caller should treat
6501      * the file_handle structure as an opaque data type
6502      */
6503 
6504     memcpy(target_fh, fh, total_size);
6505     target_fh->handle_bytes = tswap32(fh->handle_bytes);
6506     target_fh->handle_type = tswap32(fh->handle_type);
6507     g_free(fh);
6508     unlock_user(target_fh, handle, total_size);
6509 
6510     if (put_user_s32(mid, mount_id)) {
6511         return -TARGET_EFAULT;
6512     }
6513 
6514     return ret;
6515 
6516 }
6517 #endif
6518 
6519 #if defined(TARGET_NR_open_by_handle_at) && defined(CONFIG_OPEN_BY_HANDLE)
6520 static abi_long do_open_by_handle_at(abi_long mount_fd, abi_long handle,
6521                                      abi_long flags)
6522 {
6523     struct file_handle *target_fh;
6524     struct file_handle *fh;
6525     unsigned int size, total_size;
6526     abi_long ret;
6527 
6528     if (get_user_s32(size, handle)) {
6529         return -TARGET_EFAULT;
6530     }
6531 
6532     total_size = sizeof(struct file_handle) + size;
6533     target_fh = lock_user(VERIFY_READ, handle, total_size, 1);
6534     if (!target_fh) {
6535         return -TARGET_EFAULT;
6536     }
6537 
6538     fh = g_memdup(target_fh, total_size);
6539     fh->handle_bytes = size;
6540     fh->handle_type = tswap32(target_fh->handle_type);
6541 
6542     ret = get_errno(open_by_handle_at(mount_fd, fh,
6543                     target_to_host_bitmask(flags, fcntl_flags_tbl)));
6544 
6545     g_free(fh);
6546 
6547     unlock_user(target_fh, handle, total_size);
6548 
6549     return ret;
6550 }
6551 #endif
6552 
6553 #if defined(TARGET_NR_signalfd) || defined(TARGET_NR_signalfd4)
6554 
6555 static abi_long do_signalfd4(int fd, abi_long mask, int flags)
6556 {
6557     int host_flags;
6558     target_sigset_t *target_mask;
6559     sigset_t host_mask;
6560     abi_long ret;
6561 
6562     if (flags & ~(TARGET_O_NONBLOCK | TARGET_O_CLOEXEC)) {
6563         return -TARGET_EINVAL;
6564     }
6565     if (!lock_user_struct(VERIFY_READ, target_mask, mask, 1)) {
6566         return -TARGET_EFAULT;
6567     }
6568 
6569     target_to_host_sigset(&host_mask, target_mask);
6570 
6571     host_flags = target_to_host_bitmask(flags, fcntl_flags_tbl);
6572 
6573     ret = get_errno(signalfd(fd, &host_mask, host_flags));
6574     if (ret >= 0) {
6575         fd_trans_register(ret, &target_signalfd_trans);
6576     }
6577 
6578     unlock_user_struct(target_mask, mask, 0);
6579 
6580     return ret;
6581 }
6582 #endif
6583 
6584 /* Map host to target signal numbers for the wait family of syscalls.
6585    Assume all other status bits are the same.  */
6586 int host_to_target_waitstatus(int status)
6587 {
6588     if (WIFSIGNALED(status)) {
6589         return host_to_target_signal(WTERMSIG(status)) | (status & ~0x7f);
6590     }
6591     if (WIFSTOPPED(status)) {
6592         return (host_to_target_signal(WSTOPSIG(status)) << 8)
6593                | (status & 0xff);
6594     }
6595     return status;
6596 }
6597 
6598 static int open_self_cmdline(void *cpu_env, int fd)
6599 {
6600     CPUState *cpu = ENV_GET_CPU((CPUArchState *)cpu_env);
6601     struct linux_binprm *bprm = ((TaskState *)cpu->opaque)->bprm;
6602     int i;
6603 
6604     for (i = 0; i < bprm->argc; i++) {
6605         size_t len = strlen(bprm->argv[i]) + 1;
6606 
6607         if (write(fd, bprm->argv[i], len) != len) {
6608             return -1;
6609         }
6610     }
6611 
6612     return 0;
6613 }
6614 
6615 static int open_self_maps(void *cpu_env, int fd)
6616 {
6617     CPUState *cpu = ENV_GET_CPU((CPUArchState *)cpu_env);
6618     TaskState *ts = cpu->opaque;
6619     FILE *fp;
6620     char *line = NULL;
6621     size_t len = 0;
6622     ssize_t read;
6623 
6624     fp = fopen("/proc/self/maps", "r");
6625     if (fp == NULL) {
6626         return -1;
6627     }
6628 
6629     while ((read = getline(&line, &len, fp)) != -1) {
6630         int fields, dev_maj, dev_min, inode;
6631         uint64_t min, max, offset;
6632         char flag_r, flag_w, flag_x, flag_p;
6633         char path[512] = "";
6634         fields = sscanf(line, "%"PRIx64"-%"PRIx64" %c%c%c%c %"PRIx64" %x:%x %d"
6635                         " %512s", &min, &max, &flag_r, &flag_w, &flag_x,
6636                         &flag_p, &offset, &dev_maj, &dev_min, &inode, path);
6637 
6638         if ((fields < 10) || (fields > 11)) {
6639             continue;
6640         }
6641         if (h2g_valid(min)) {
6642             int flags = page_get_flags(h2g(min));
6643             max = h2g_valid(max - 1) ? max : (uintptr_t)g2h(GUEST_ADDR_MAX) + 1;
6644             if (page_check_range(h2g(min), max - min, flags) == -1) {
6645                 continue;
6646             }
6647             if (h2g(min) == ts->info->stack_limit) {
6648                 pstrcpy(path, sizeof(path), "      [stack]");
6649             }
6650             dprintf(fd, TARGET_ABI_FMT_ptr "-" TARGET_ABI_FMT_ptr
6651                     " %c%c%c%c %08" PRIx64 " %02x:%02x %d %s%s\n",
6652                     h2g(min), h2g(max - 1) + 1, flag_r, flag_w,
6653                     flag_x, flag_p, offset, dev_maj, dev_min, inode,
6654                     path[0] ? "         " : "", path);
6655         }
6656     }
6657 
6658     free(line);
6659     fclose(fp);
6660 
6661     return 0;
6662 }
6663 
6664 static int open_self_stat(void *cpu_env, int fd)
6665 {
6666     CPUState *cpu = ENV_GET_CPU((CPUArchState *)cpu_env);
6667     TaskState *ts = cpu->opaque;
6668     abi_ulong start_stack = ts->info->start_stack;
6669     int i;
6670 
6671     for (i = 0; i < 44; i++) {
6672       char buf[128];
6673       int len;
6674       uint64_t val = 0;
6675 
6676       if (i == 0) {
6677         /* pid */
6678         val = getpid();
6679         snprintf(buf, sizeof(buf), "%"PRId64 " ", val);
6680       } else if (i == 1) {
6681         /* app name */
6682         snprintf(buf, sizeof(buf), "(%s) ", ts->bprm->argv[0]);
6683       } else if (i == 27) {
6684         /* stack bottom */
6685         val = start_stack;
6686         snprintf(buf, sizeof(buf), "%"PRId64 " ", val);
6687       } else {
6688         /* for the rest, there is MasterCard */
6689         snprintf(buf, sizeof(buf), "0%c", i == 43 ? '\n' : ' ');
6690       }
6691 
6692       len = strlen(buf);
6693       if (write(fd, buf, len) != len) {
6694           return -1;
6695       }
6696     }
6697 
6698     return 0;
6699 }
6700 
6701 static int open_self_auxv(void *cpu_env, int fd)
6702 {
6703     CPUState *cpu = ENV_GET_CPU((CPUArchState *)cpu_env);
6704     TaskState *ts = cpu->opaque;
6705     abi_ulong auxv = ts->info->saved_auxv;
6706     abi_ulong len = ts->info->auxv_len;
6707     char *ptr;
6708 
6709     /*
6710      * Auxiliary vector is stored in target process stack.
6711      * read in whole auxv vector and copy it to file
6712      */
6713     ptr = lock_user(VERIFY_READ, auxv, len, 0);
6714     if (ptr != NULL) {
6715         while (len > 0) {
6716             ssize_t r;
6717             r = write(fd, ptr, len);
6718             if (r <= 0) {
6719                 break;
6720             }
6721             len -= r;
6722             ptr += r;
6723         }
6724         lseek(fd, 0, SEEK_SET);
6725         unlock_user(ptr, auxv, len);
6726     }
6727 
6728     return 0;
6729 }
6730 
6731 static int is_proc_myself(const char *filename, const char *entry)
6732 {
6733     if (!strncmp(filename, "/proc/", strlen("/proc/"))) {
6734         filename += strlen("/proc/");
6735         if (!strncmp(filename, "self/", strlen("self/"))) {
6736             filename += strlen("self/");
6737         } else if (*filename >= '1' && *filename <= '9') {
6738             char myself[80];
6739             snprintf(myself, sizeof(myself), "%d/", getpid());
6740             if (!strncmp(filename, myself, strlen(myself))) {
6741                 filename += strlen(myself);
6742             } else {
6743                 return 0;
6744             }
6745         } else {
6746             return 0;
6747         }
6748         if (!strcmp(filename, entry)) {
6749             return 1;
6750         }
6751     }
6752     return 0;
6753 }
6754 
6755 #if defined(HOST_WORDS_BIGENDIAN) != defined(TARGET_WORDS_BIGENDIAN)
6756 static int is_proc(const char *filename, const char *entry)
6757 {
6758     return strcmp(filename, entry) == 0;
6759 }
6760 
6761 static int open_net_route(void *cpu_env, int fd)
6762 {
6763     FILE *fp;
6764     char *line = NULL;
6765     size_t len = 0;
6766     ssize_t read;
6767 
6768     fp = fopen("/proc/net/route", "r");
6769     if (fp == NULL) {
6770         return -1;
6771     }
6772 
6773     /* read header */
6774 
6775     read = getline(&line, &len, fp);
6776     dprintf(fd, "%s", line);
6777 
6778     /* read routes */
6779 
6780     while ((read = getline(&line, &len, fp)) != -1) {
6781         char iface[16];
6782         uint32_t dest, gw, mask;
6783         unsigned int flags, refcnt, use, metric, mtu, window, irtt;
6784         int fields;
6785 
6786         fields = sscanf(line,
6787                         "%s\t%08x\t%08x\t%04x\t%d\t%d\t%d\t%08x\t%d\t%u\t%u\n",
6788                         iface, &dest, &gw, &flags, &refcnt, &use, &metric,
6789                         &mask, &mtu, &window, &irtt);
6790         if (fields != 11) {
6791             continue;
6792         }
6793         dprintf(fd, "%s\t%08x\t%08x\t%04x\t%d\t%d\t%d\t%08x\t%d\t%u\t%u\n",
6794                 iface, tswap32(dest), tswap32(gw), flags, refcnt, use,
6795                 metric, tswap32(mask), mtu, window, irtt);
6796     }
6797 
6798     free(line);
6799     fclose(fp);
6800 
6801     return 0;
6802 }
6803 #endif
6804 
6805 static int do_openat(void *cpu_env, int dirfd, const char *pathname, int flags, mode_t mode)
6806 {
6807     struct fake_open {
6808         const char *filename;
6809         int (*fill)(void *cpu_env, int fd);
6810         int (*cmp)(const char *s1, const char *s2);
6811     };
6812     const struct fake_open *fake_open;
6813     static const struct fake_open fakes[] = {
6814         { "maps", open_self_maps, is_proc_myself },
6815         { "stat", open_self_stat, is_proc_myself },
6816         { "auxv", open_self_auxv, is_proc_myself },
6817         { "cmdline", open_self_cmdline, is_proc_myself },
6818 #if defined(HOST_WORDS_BIGENDIAN) != defined(TARGET_WORDS_BIGENDIAN)
6819         { "/proc/net/route", open_net_route, is_proc },
6820 #endif
6821         { NULL, NULL, NULL }
6822     };
6823 
6824     if (is_proc_myself(pathname, "exe")) {
6825         int execfd = qemu_getauxval(AT_EXECFD);
6826         return execfd ? execfd : safe_openat(dirfd, exec_path, flags, mode);
6827     }
6828 
6829     for (fake_open = fakes; fake_open->filename; fake_open++) {
6830         if (fake_open->cmp(pathname, fake_open->filename)) {
6831             break;
6832         }
6833     }
6834 
6835     if (fake_open->filename) {
6836         const char *tmpdir;
6837         char filename[PATH_MAX];
6838         int fd, r;
6839 
6840         /* create temporary file to map stat to */
6841         tmpdir = getenv("TMPDIR");
6842         if (!tmpdir)
6843             tmpdir = "/tmp";
6844         snprintf(filename, sizeof(filename), "%s/qemu-open.XXXXXX", tmpdir);
6845         fd = mkstemp(filename);
6846         if (fd < 0) {
6847             return fd;
6848         }
6849         unlink(filename);
6850 
6851         if ((r = fake_open->fill(cpu_env, fd))) {
6852             int e = errno;
6853             close(fd);
6854             errno = e;
6855             return r;
6856         }
6857         lseek(fd, 0, SEEK_SET);
6858 
6859         return fd;
6860     }
6861 
6862     return safe_openat(dirfd, path(pathname), flags, mode);
6863 }
6864 
6865 #define TIMER_MAGIC 0x0caf0000
6866 #define TIMER_MAGIC_MASK 0xffff0000
6867 
6868 /* Convert QEMU provided timer ID back to internal 16bit index format */
6869 static target_timer_t get_timer_id(abi_long arg)
6870 {
6871     target_timer_t timerid = arg;
6872 
6873     if ((timerid & TIMER_MAGIC_MASK) != TIMER_MAGIC) {
6874         return -TARGET_EINVAL;
6875     }
6876 
6877     timerid &= 0xffff;
6878 
6879     if (timerid >= ARRAY_SIZE(g_posix_timers)) {
6880         return -TARGET_EINVAL;
6881     }
6882 
6883     return timerid;
6884 }
6885 
6886 static int target_to_host_cpu_mask(unsigned long *host_mask,
6887                                    size_t host_size,
6888                                    abi_ulong target_addr,
6889                                    size_t target_size)
6890 {
6891     unsigned target_bits = sizeof(abi_ulong) * 8;
6892     unsigned host_bits = sizeof(*host_mask) * 8;
6893     abi_ulong *target_mask;
6894     unsigned i, j;
6895 
6896     assert(host_size >= target_size);
6897 
6898     target_mask = lock_user(VERIFY_READ, target_addr, target_size, 1);
6899     if (!target_mask) {
6900         return -TARGET_EFAULT;
6901     }
6902     memset(host_mask, 0, host_size);
6903 
6904     for (i = 0 ; i < target_size / sizeof(abi_ulong); i++) {
6905         unsigned bit = i * target_bits;
6906         abi_ulong val;
6907 
6908         __get_user(val, &target_mask[i]);
6909         for (j = 0; j < target_bits; j++, bit++) {
6910             if (val & (1UL << j)) {
6911                 host_mask[bit / host_bits] |= 1UL << (bit % host_bits);
6912             }
6913         }
6914     }
6915 
6916     unlock_user(target_mask, target_addr, 0);
6917     return 0;
6918 }
6919 
6920 static int host_to_target_cpu_mask(const unsigned long *host_mask,
6921                                    size_t host_size,
6922                                    abi_ulong target_addr,
6923                                    size_t target_size)
6924 {
6925     unsigned target_bits = sizeof(abi_ulong) * 8;
6926     unsigned host_bits = sizeof(*host_mask) * 8;
6927     abi_ulong *target_mask;
6928     unsigned i, j;
6929 
6930     assert(host_size >= target_size);
6931 
6932     target_mask = lock_user(VERIFY_WRITE, target_addr, target_size, 0);
6933     if (!target_mask) {
6934         return -TARGET_EFAULT;
6935     }
6936 
6937     for (i = 0 ; i < target_size / sizeof(abi_ulong); i++) {
6938         unsigned bit = i * target_bits;
6939         abi_ulong val = 0;
6940 
6941         for (j = 0; j < target_bits; j++, bit++) {
6942             if (host_mask[bit / host_bits] & (1UL << (bit % host_bits))) {
6943                 val |= 1UL << j;
6944             }
6945         }
6946         __put_user(val, &target_mask[i]);
6947     }
6948 
6949     unlock_user(target_mask, target_addr, target_size);
6950     return 0;
6951 }
6952 
6953 /* This is an internal helper for do_syscall so that it is easier
6954  * to have a single return point, so that actions, such as logging
6955  * of syscall results, can be performed.
6956  * All errnos that do_syscall() returns must be -TARGET_<errcode>.
6957  */
6958 static abi_long do_syscall1(void *cpu_env, int num, abi_long arg1,
6959                             abi_long arg2, abi_long arg3, abi_long arg4,
6960                             abi_long arg5, abi_long arg6, abi_long arg7,
6961                             abi_long arg8)
6962 {
6963     CPUState *cpu = ENV_GET_CPU(cpu_env);
6964     abi_long ret;
6965 #if defined(TARGET_NR_stat) || defined(TARGET_NR_stat64) \
6966     || defined(TARGET_NR_lstat) || defined(TARGET_NR_lstat64) \
6967     || defined(TARGET_NR_fstat) || defined(TARGET_NR_fstat64)
6968     struct stat st;
6969 #endif
6970 #if defined(TARGET_NR_statfs) || defined(TARGET_NR_statfs64) \
6971     || defined(TARGET_NR_fstatfs)
6972     struct statfs stfs;
6973 #endif
6974     void *p;
6975 
6976     switch(num) {
6977     case TARGET_NR_exit:
6978         /* In old applications this may be used to implement _exit(2).
6979            However in threaded applictions it is used for thread termination,
6980            and _exit_group is used for application termination.
6981            Do thread termination if we have more then one thread.  */
6982 
6983         if (block_signals()) {
6984             return -TARGET_ERESTARTSYS;
6985         }
6986 
6987         cpu_list_lock();
6988 
6989         if (CPU_NEXT(first_cpu)) {
6990             TaskState *ts;
6991 
6992             /* Remove the CPU from the list.  */
6993             QTAILQ_REMOVE_RCU(&cpus, cpu, node);
6994 
6995             cpu_list_unlock();
6996 
6997             ts = cpu->opaque;
6998             if (ts->child_tidptr) {
6999                 put_user_u32(0, ts->child_tidptr);
7000                 sys_futex(g2h(ts->child_tidptr), FUTEX_WAKE, INT_MAX,
7001                           NULL, NULL, 0);
7002             }
7003             thread_cpu = NULL;
7004             object_unref(OBJECT(cpu));
7005             g_free(ts);
7006             rcu_unregister_thread();
7007             pthread_exit(NULL);
7008         }
7009 
7010         cpu_list_unlock();
7011         preexit_cleanup(cpu_env, arg1);
7012         _exit(arg1);
7013         return 0; /* avoid warning */
7014     case TARGET_NR_read:
7015         if (arg2 == 0 && arg3 == 0) {
7016             return get_errno(safe_read(arg1, 0, 0));
7017         } else {
7018             if (!(p = lock_user(VERIFY_WRITE, arg2, arg3, 0)))
7019                 return -TARGET_EFAULT;
7020             ret = get_errno(safe_read(arg1, p, arg3));
7021             if (ret >= 0 &&
7022                 fd_trans_host_to_target_data(arg1)) {
7023                 ret = fd_trans_host_to_target_data(arg1)(p, ret);
7024             }
7025             unlock_user(p, arg2, ret);
7026         }
7027         return ret;
7028     case TARGET_NR_write:
7029         if (arg2 == 0 && arg3 == 0) {
7030             return get_errno(safe_write(arg1, 0, 0));
7031         }
7032         if (!(p = lock_user(VERIFY_READ, arg2, arg3, 1)))
7033             return -TARGET_EFAULT;
7034         if (fd_trans_target_to_host_data(arg1)) {
7035             void *copy = g_malloc(arg3);
7036             memcpy(copy, p, arg3);
7037             ret = fd_trans_target_to_host_data(arg1)(copy, arg3);
7038             if (ret >= 0) {
7039                 ret = get_errno(safe_write(arg1, copy, ret));
7040             }
7041             g_free(copy);
7042         } else {
7043             ret = get_errno(safe_write(arg1, p, arg3));
7044         }
7045         unlock_user(p, arg2, 0);
7046         return ret;
7047 
7048 #ifdef TARGET_NR_open
7049     case TARGET_NR_open:
7050         if (!(p = lock_user_string(arg1)))
7051             return -TARGET_EFAULT;
7052         ret = get_errno(do_openat(cpu_env, AT_FDCWD, p,
7053                                   target_to_host_bitmask(arg2, fcntl_flags_tbl),
7054                                   arg3));
7055         fd_trans_unregister(ret);
7056         unlock_user(p, arg1, 0);
7057         return ret;
7058 #endif
7059     case TARGET_NR_openat:
7060         if (!(p = lock_user_string(arg2)))
7061             return -TARGET_EFAULT;
7062         ret = get_errno(do_openat(cpu_env, arg1, p,
7063                                   target_to_host_bitmask(arg3, fcntl_flags_tbl),
7064                                   arg4));
7065         fd_trans_unregister(ret);
7066         unlock_user(p, arg2, 0);
7067         return ret;
7068 #if defined(TARGET_NR_name_to_handle_at) && defined(CONFIG_OPEN_BY_HANDLE)
7069     case TARGET_NR_name_to_handle_at:
7070         ret = do_name_to_handle_at(arg1, arg2, arg3, arg4, arg5);
7071         return ret;
7072 #endif
7073 #if defined(TARGET_NR_open_by_handle_at) && defined(CONFIG_OPEN_BY_HANDLE)
7074     case TARGET_NR_open_by_handle_at:
7075         ret = do_open_by_handle_at(arg1, arg2, arg3);
7076         fd_trans_unregister(ret);
7077         return ret;
7078 #endif
7079     case TARGET_NR_close:
7080         fd_trans_unregister(arg1);
7081         return get_errno(close(arg1));
7082 
7083     case TARGET_NR_brk:
7084         return do_brk(arg1);
7085 #ifdef TARGET_NR_fork
7086     case TARGET_NR_fork:
7087         return get_errno(do_fork(cpu_env, TARGET_SIGCHLD, 0, 0, 0, 0));
7088 #endif
7089 #ifdef TARGET_NR_waitpid
7090     case TARGET_NR_waitpid:
7091         {
7092             int status;
7093             ret = get_errno(safe_wait4(arg1, &status, arg3, 0));
7094             if (!is_error(ret) && arg2 && ret
7095                 && put_user_s32(host_to_target_waitstatus(status), arg2))
7096                 return -TARGET_EFAULT;
7097         }
7098         return ret;
7099 #endif
7100 #ifdef TARGET_NR_waitid
7101     case TARGET_NR_waitid:
7102         {
7103             siginfo_t info;
7104             info.si_pid = 0;
7105             ret = get_errno(safe_waitid(arg1, arg2, &info, arg4, NULL));
7106             if (!is_error(ret) && arg3 && info.si_pid != 0) {
7107                 if (!(p = lock_user(VERIFY_WRITE, arg3, sizeof(target_siginfo_t), 0)))
7108                     return -TARGET_EFAULT;
7109                 host_to_target_siginfo(p, &info);
7110                 unlock_user(p, arg3, sizeof(target_siginfo_t));
7111             }
7112         }
7113         return ret;
7114 #endif
7115 #ifdef TARGET_NR_creat /* not on alpha */
7116     case TARGET_NR_creat:
7117         if (!(p = lock_user_string(arg1)))
7118             return -TARGET_EFAULT;
7119         ret = get_errno(creat(p, arg2));
7120         fd_trans_unregister(ret);
7121         unlock_user(p, arg1, 0);
7122         return ret;
7123 #endif
7124 #ifdef TARGET_NR_link
7125     case TARGET_NR_link:
7126         {
7127             void * p2;
7128             p = lock_user_string(arg1);
7129             p2 = lock_user_string(arg2);
7130             if (!p || !p2)
7131                 ret = -TARGET_EFAULT;
7132             else
7133                 ret = get_errno(link(p, p2));
7134             unlock_user(p2, arg2, 0);
7135             unlock_user(p, arg1, 0);
7136         }
7137         return ret;
7138 #endif
7139 #if defined(TARGET_NR_linkat)
7140     case TARGET_NR_linkat:
7141         {
7142             void * p2 = NULL;
7143             if (!arg2 || !arg4)
7144                 return -TARGET_EFAULT;
7145             p  = lock_user_string(arg2);
7146             p2 = lock_user_string(arg4);
7147             if (!p || !p2)
7148                 ret = -TARGET_EFAULT;
7149             else
7150                 ret = get_errno(linkat(arg1, p, arg3, p2, arg5));
7151             unlock_user(p, arg2, 0);
7152             unlock_user(p2, arg4, 0);
7153         }
7154         return ret;
7155 #endif
7156 #ifdef TARGET_NR_unlink
7157     case TARGET_NR_unlink:
7158         if (!(p = lock_user_string(arg1)))
7159             return -TARGET_EFAULT;
7160         ret = get_errno(unlink(p));
7161         unlock_user(p, arg1, 0);
7162         return ret;
7163 #endif
7164 #if defined(TARGET_NR_unlinkat)
7165     case TARGET_NR_unlinkat:
7166         if (!(p = lock_user_string(arg2)))
7167             return -TARGET_EFAULT;
7168         ret = get_errno(unlinkat(arg1, p, arg3));
7169         unlock_user(p, arg2, 0);
7170         return ret;
7171 #endif
7172     case TARGET_NR_execve:
7173         {
7174             char **argp, **envp;
7175             int argc, envc;
7176             abi_ulong gp;
7177             abi_ulong guest_argp;
7178             abi_ulong guest_envp;
7179             abi_ulong addr;
7180             char **q;
7181             int total_size = 0;
7182 
7183             argc = 0;
7184             guest_argp = arg2;
7185             for (gp = guest_argp; gp; gp += sizeof(abi_ulong)) {
7186                 if (get_user_ual(addr, gp))
7187                     return -TARGET_EFAULT;
7188                 if (!addr)
7189                     break;
7190                 argc++;
7191             }
7192             envc = 0;
7193             guest_envp = arg3;
7194             for (gp = guest_envp; gp; gp += sizeof(abi_ulong)) {
7195                 if (get_user_ual(addr, gp))
7196                     return -TARGET_EFAULT;
7197                 if (!addr)
7198                     break;
7199                 envc++;
7200             }
7201 
7202             argp = g_new0(char *, argc + 1);
7203             envp = g_new0(char *, envc + 1);
7204 
7205             for (gp = guest_argp, q = argp; gp;
7206                   gp += sizeof(abi_ulong), q++) {
7207                 if (get_user_ual(addr, gp))
7208                     goto execve_efault;
7209                 if (!addr)
7210                     break;
7211                 if (!(*q = lock_user_string(addr)))
7212                     goto execve_efault;
7213                 total_size += strlen(*q) + 1;
7214             }
7215             *q = NULL;
7216 
7217             for (gp = guest_envp, q = envp; gp;
7218                   gp += sizeof(abi_ulong), q++) {
7219                 if (get_user_ual(addr, gp))
7220                     goto execve_efault;
7221                 if (!addr)
7222                     break;
7223                 if (!(*q = lock_user_string(addr)))
7224                     goto execve_efault;
7225                 total_size += strlen(*q) + 1;
7226             }
7227             *q = NULL;
7228 
7229             if (!(p = lock_user_string(arg1)))
7230                 goto execve_efault;
7231             /* Although execve() is not an interruptible syscall it is
7232              * a special case where we must use the safe_syscall wrapper:
7233              * if we allow a signal to happen before we make the host
7234              * syscall then we will 'lose' it, because at the point of
7235              * execve the process leaves QEMU's control. So we use the
7236              * safe syscall wrapper to ensure that we either take the
7237              * signal as a guest signal, or else it does not happen
7238              * before the execve completes and makes it the other
7239              * program's problem.
7240              */
7241             ret = get_errno(safe_execve(p, argp, envp));
7242             unlock_user(p, arg1, 0);
7243 
7244             goto execve_end;
7245 
7246         execve_efault:
7247             ret = -TARGET_EFAULT;
7248 
7249         execve_end:
7250             for (gp = guest_argp, q = argp; *q;
7251                   gp += sizeof(abi_ulong), q++) {
7252                 if (get_user_ual(addr, gp)
7253                     || !addr)
7254                     break;
7255                 unlock_user(*q, addr, 0);
7256             }
7257             for (gp = guest_envp, q = envp; *q;
7258                   gp += sizeof(abi_ulong), q++) {
7259                 if (get_user_ual(addr, gp)
7260                     || !addr)
7261                     break;
7262                 unlock_user(*q, addr, 0);
7263             }
7264 
7265             g_free(argp);
7266             g_free(envp);
7267         }
7268         return ret;
7269     case TARGET_NR_chdir:
7270         if (!(p = lock_user_string(arg1)))
7271             return -TARGET_EFAULT;
7272         ret = get_errno(chdir(p));
7273         unlock_user(p, arg1, 0);
7274         return ret;
7275 #ifdef TARGET_NR_time
7276     case TARGET_NR_time:
7277         {
7278             time_t host_time;
7279             ret = get_errno(time(&host_time));
7280             if (!is_error(ret)
7281                 && arg1
7282                 && put_user_sal(host_time, arg1))
7283                 return -TARGET_EFAULT;
7284         }
7285         return ret;
7286 #endif
7287 #ifdef TARGET_NR_mknod
7288     case TARGET_NR_mknod:
7289         if (!(p = lock_user_string(arg1)))
7290             return -TARGET_EFAULT;
7291         ret = get_errno(mknod(p, arg2, arg3));
7292         unlock_user(p, arg1, 0);
7293         return ret;
7294 #endif
7295 #if defined(TARGET_NR_mknodat)
7296     case TARGET_NR_mknodat:
7297         if (!(p = lock_user_string(arg2)))
7298             return -TARGET_EFAULT;
7299         ret = get_errno(mknodat(arg1, p, arg3, arg4));
7300         unlock_user(p, arg2, 0);
7301         return ret;
7302 #endif
7303 #ifdef TARGET_NR_chmod
7304     case TARGET_NR_chmod:
7305         if (!(p = lock_user_string(arg1)))
7306             return -TARGET_EFAULT;
7307         ret = get_errno(chmod(p, arg2));
7308         unlock_user(p, arg1, 0);
7309         return ret;
7310 #endif
7311 #ifdef TARGET_NR_lseek
7312     case TARGET_NR_lseek:
7313         return get_errno(lseek(arg1, arg2, arg3));
7314 #endif
7315 #if defined(TARGET_NR_getxpid) && defined(TARGET_ALPHA)
7316     /* Alpha specific */
7317     case TARGET_NR_getxpid:
7318         ((CPUAlphaState *)cpu_env)->ir[IR_A4] = getppid();
7319         return get_errno(getpid());
7320 #endif
7321 #ifdef TARGET_NR_getpid
7322     case TARGET_NR_getpid:
7323         return get_errno(getpid());
7324 #endif
7325     case TARGET_NR_mount:
7326         {
7327             /* need to look at the data field */
7328             void *p2, *p3;
7329 
7330             if (arg1) {
7331                 p = lock_user_string(arg1);
7332                 if (!p) {
7333                     return -TARGET_EFAULT;
7334                 }
7335             } else {
7336                 p = NULL;
7337             }
7338 
7339             p2 = lock_user_string(arg2);
7340             if (!p2) {
7341                 if (arg1) {
7342                     unlock_user(p, arg1, 0);
7343                 }
7344                 return -TARGET_EFAULT;
7345             }
7346 
7347             if (arg3) {
7348                 p3 = lock_user_string(arg3);
7349                 if (!p3) {
7350                     if (arg1) {
7351                         unlock_user(p, arg1, 0);
7352                     }
7353                     unlock_user(p2, arg2, 0);
7354                     return -TARGET_EFAULT;
7355                 }
7356             } else {
7357                 p3 = NULL;
7358             }
7359 
7360             /* FIXME - arg5 should be locked, but it isn't clear how to
7361              * do that since it's not guaranteed to be a NULL-terminated
7362              * string.
7363              */
7364             if (!arg5) {
7365                 ret = mount(p, p2, p3, (unsigned long)arg4, NULL);
7366             } else {
7367                 ret = mount(p, p2, p3, (unsigned long)arg4, g2h(arg5));
7368             }
7369             ret = get_errno(ret);
7370 
7371             if (arg1) {
7372                 unlock_user(p, arg1, 0);
7373             }
7374             unlock_user(p2, arg2, 0);
7375             if (arg3) {
7376                 unlock_user(p3, arg3, 0);
7377             }
7378         }
7379         return ret;
7380 #ifdef TARGET_NR_umount
7381     case TARGET_NR_umount:
7382         if (!(p = lock_user_string(arg1)))
7383             return -TARGET_EFAULT;
7384         ret = get_errno(umount(p));
7385         unlock_user(p, arg1, 0);
7386         return ret;
7387 #endif
7388 #ifdef TARGET_NR_stime /* not on alpha */
7389     case TARGET_NR_stime:
7390         {
7391             time_t host_time;
7392             if (get_user_sal(host_time, arg1))
7393                 return -TARGET_EFAULT;
7394             return get_errno(stime(&host_time));
7395         }
7396 #endif
7397 #ifdef TARGET_NR_alarm /* not on alpha */
7398     case TARGET_NR_alarm:
7399         return alarm(arg1);
7400 #endif
7401 #ifdef TARGET_NR_pause /* not on alpha */
7402     case TARGET_NR_pause:
7403         if (!block_signals()) {
7404             sigsuspend(&((TaskState *)cpu->opaque)->signal_mask);
7405         }
7406         return -TARGET_EINTR;
7407 #endif
7408 #ifdef TARGET_NR_utime
7409     case TARGET_NR_utime:
7410         {
7411             struct utimbuf tbuf, *host_tbuf;
7412             struct target_utimbuf *target_tbuf;
7413             if (arg2) {
7414                 if (!lock_user_struct(VERIFY_READ, target_tbuf, arg2, 1))
7415                     return -TARGET_EFAULT;
7416                 tbuf.actime = tswapal(target_tbuf->actime);
7417                 tbuf.modtime = tswapal(target_tbuf->modtime);
7418                 unlock_user_struct(target_tbuf, arg2, 0);
7419                 host_tbuf = &tbuf;
7420             } else {
7421                 host_tbuf = NULL;
7422             }
7423             if (!(p = lock_user_string(arg1)))
7424                 return -TARGET_EFAULT;
7425             ret = get_errno(utime(p, host_tbuf));
7426             unlock_user(p, arg1, 0);
7427         }
7428         return ret;
7429 #endif
7430 #ifdef TARGET_NR_utimes
7431     case TARGET_NR_utimes:
7432         {
7433             struct timeval *tvp, tv[2];
7434             if (arg2) {
7435                 if (copy_from_user_timeval(&tv[0], arg2)
7436                     || copy_from_user_timeval(&tv[1],
7437                                               arg2 + sizeof(struct target_timeval)))
7438                     return -TARGET_EFAULT;
7439                 tvp = tv;
7440             } else {
7441                 tvp = NULL;
7442             }
7443             if (!(p = lock_user_string(arg1)))
7444                 return -TARGET_EFAULT;
7445             ret = get_errno(utimes(p, tvp));
7446             unlock_user(p, arg1, 0);
7447         }
7448         return ret;
7449 #endif
7450 #if defined(TARGET_NR_futimesat)
7451     case TARGET_NR_futimesat:
7452         {
7453             struct timeval *tvp, tv[2];
7454             if (arg3) {
7455                 if (copy_from_user_timeval(&tv[0], arg3)
7456                     || copy_from_user_timeval(&tv[1],
7457                                               arg3 + sizeof(struct target_timeval)))
7458                     return -TARGET_EFAULT;
7459                 tvp = tv;
7460             } else {
7461                 tvp = NULL;
7462             }
7463             if (!(p = lock_user_string(arg2))) {
7464                 return -TARGET_EFAULT;
7465             }
7466             ret = get_errno(futimesat(arg1, path(p), tvp));
7467             unlock_user(p, arg2, 0);
7468         }
7469         return ret;
7470 #endif
7471 #ifdef TARGET_NR_access
7472     case TARGET_NR_access:
7473         if (!(p = lock_user_string(arg1))) {
7474             return -TARGET_EFAULT;
7475         }
7476         ret = get_errno(access(path(p), arg2));
7477         unlock_user(p, arg1, 0);
7478         return ret;
7479 #endif
7480 #if defined(TARGET_NR_faccessat) && defined(__NR_faccessat)
7481     case TARGET_NR_faccessat:
7482         if (!(p = lock_user_string(arg2))) {
7483             return -TARGET_EFAULT;
7484         }
7485         ret = get_errno(faccessat(arg1, p, arg3, 0));
7486         unlock_user(p, arg2, 0);
7487         return ret;
7488 #endif
7489 #ifdef TARGET_NR_nice /* not on alpha */
7490     case TARGET_NR_nice:
7491         return get_errno(nice(arg1));
7492 #endif
7493     case TARGET_NR_sync:
7494         sync();
7495         return 0;
7496 #if defined(TARGET_NR_syncfs) && defined(CONFIG_SYNCFS)
7497     case TARGET_NR_syncfs:
7498         return get_errno(syncfs(arg1));
7499 #endif
7500     case TARGET_NR_kill:
7501         return get_errno(safe_kill(arg1, target_to_host_signal(arg2)));
7502 #ifdef TARGET_NR_rename
7503     case TARGET_NR_rename:
7504         {
7505             void *p2;
7506             p = lock_user_string(arg1);
7507             p2 = lock_user_string(arg2);
7508             if (!p || !p2)
7509                 ret = -TARGET_EFAULT;
7510             else
7511                 ret = get_errno(rename(p, p2));
7512             unlock_user(p2, arg2, 0);
7513             unlock_user(p, arg1, 0);
7514         }
7515         return ret;
7516 #endif
7517 #if defined(TARGET_NR_renameat)
7518     case TARGET_NR_renameat:
7519         {
7520             void *p2;
7521             p  = lock_user_string(arg2);
7522             p2 = lock_user_string(arg4);
7523             if (!p || !p2)
7524                 ret = -TARGET_EFAULT;
7525             else
7526                 ret = get_errno(renameat(arg1, p, arg3, p2));
7527             unlock_user(p2, arg4, 0);
7528             unlock_user(p, arg2, 0);
7529         }
7530         return ret;
7531 #endif
7532 #if defined(TARGET_NR_renameat2)
7533     case TARGET_NR_renameat2:
7534         {
7535             void *p2;
7536             p  = lock_user_string(arg2);
7537             p2 = lock_user_string(arg4);
7538             if (!p || !p2) {
7539                 ret = -TARGET_EFAULT;
7540             } else {
7541                 ret = get_errno(sys_renameat2(arg1, p, arg3, p2, arg5));
7542             }
7543             unlock_user(p2, arg4, 0);
7544             unlock_user(p, arg2, 0);
7545         }
7546         return ret;
7547 #endif
7548 #ifdef TARGET_NR_mkdir
7549     case TARGET_NR_mkdir:
7550         if (!(p = lock_user_string(arg1)))
7551             return -TARGET_EFAULT;
7552         ret = get_errno(mkdir(p, arg2));
7553         unlock_user(p, arg1, 0);
7554         return ret;
7555 #endif
7556 #if defined(TARGET_NR_mkdirat)
7557     case TARGET_NR_mkdirat:
7558         if (!(p = lock_user_string(arg2)))
7559             return -TARGET_EFAULT;
7560         ret = get_errno(mkdirat(arg1, p, arg3));
7561         unlock_user(p, arg2, 0);
7562         return ret;
7563 #endif
7564 #ifdef TARGET_NR_rmdir
7565     case TARGET_NR_rmdir:
7566         if (!(p = lock_user_string(arg1)))
7567             return -TARGET_EFAULT;
7568         ret = get_errno(rmdir(p));
7569         unlock_user(p, arg1, 0);
7570         return ret;
7571 #endif
7572     case TARGET_NR_dup:
7573         ret = get_errno(dup(arg1));
7574         if (ret >= 0) {
7575             fd_trans_dup(arg1, ret);
7576         }
7577         return ret;
7578 #ifdef TARGET_NR_pipe
7579     case TARGET_NR_pipe:
7580         return do_pipe(cpu_env, arg1, 0, 0);
7581 #endif
7582 #ifdef TARGET_NR_pipe2
7583     case TARGET_NR_pipe2:
7584         return do_pipe(cpu_env, arg1,
7585                        target_to_host_bitmask(arg2, fcntl_flags_tbl), 1);
7586 #endif
7587     case TARGET_NR_times:
7588         {
7589             struct target_tms *tmsp;
7590             struct tms tms;
7591             ret = get_errno(times(&tms));
7592             if (arg1) {
7593                 tmsp = lock_user(VERIFY_WRITE, arg1, sizeof(struct target_tms), 0);
7594                 if (!tmsp)
7595                     return -TARGET_EFAULT;
7596                 tmsp->tms_utime = tswapal(host_to_target_clock_t(tms.tms_utime));
7597                 tmsp->tms_stime = tswapal(host_to_target_clock_t(tms.tms_stime));
7598                 tmsp->tms_cutime = tswapal(host_to_target_clock_t(tms.tms_cutime));
7599                 tmsp->tms_cstime = tswapal(host_to_target_clock_t(tms.tms_cstime));
7600             }
7601             if (!is_error(ret))
7602                 ret = host_to_target_clock_t(ret);
7603         }
7604         return ret;
7605     case TARGET_NR_acct:
7606         if (arg1 == 0) {
7607             ret = get_errno(acct(NULL));
7608         } else {
7609             if (!(p = lock_user_string(arg1))) {
7610                 return -TARGET_EFAULT;
7611             }
7612             ret = get_errno(acct(path(p)));
7613             unlock_user(p, arg1, 0);
7614         }
7615         return ret;
7616 #ifdef TARGET_NR_umount2
7617     case TARGET_NR_umount2:
7618         if (!(p = lock_user_string(arg1)))
7619             return -TARGET_EFAULT;
7620         ret = get_errno(umount2(p, arg2));
7621         unlock_user(p, arg1, 0);
7622         return ret;
7623 #endif
7624     case TARGET_NR_ioctl:
7625         return do_ioctl(arg1, arg2, arg3);
7626 #ifdef TARGET_NR_fcntl
7627     case TARGET_NR_fcntl:
7628         return do_fcntl(arg1, arg2, arg3);
7629 #endif
7630     case TARGET_NR_setpgid:
7631         return get_errno(setpgid(arg1, arg2));
7632     case TARGET_NR_umask:
7633         return get_errno(umask(arg1));
7634     case TARGET_NR_chroot:
7635         if (!(p = lock_user_string(arg1)))
7636             return -TARGET_EFAULT;
7637         ret = get_errno(chroot(p));
7638         unlock_user(p, arg1, 0);
7639         return ret;
7640 #ifdef TARGET_NR_dup2
7641     case TARGET_NR_dup2:
7642         ret = get_errno(dup2(arg1, arg2));
7643         if (ret >= 0) {
7644             fd_trans_dup(arg1, arg2);
7645         }
7646         return ret;
7647 #endif
7648 #if defined(CONFIG_DUP3) && defined(TARGET_NR_dup3)
7649     case TARGET_NR_dup3:
7650     {
7651         int host_flags;
7652 
7653         if ((arg3 & ~TARGET_O_CLOEXEC) != 0) {
7654             return -EINVAL;
7655         }
7656         host_flags = target_to_host_bitmask(arg3, fcntl_flags_tbl);
7657         ret = get_errno(dup3(arg1, arg2, host_flags));
7658         if (ret >= 0) {
7659             fd_trans_dup(arg1, arg2);
7660         }
7661         return ret;
7662     }
7663 #endif
7664 #ifdef TARGET_NR_getppid /* not on alpha */
7665     case TARGET_NR_getppid:
7666         return get_errno(getppid());
7667 #endif
7668 #ifdef TARGET_NR_getpgrp
7669     case TARGET_NR_getpgrp:
7670         return get_errno(getpgrp());
7671 #endif
7672     case TARGET_NR_setsid:
7673         return get_errno(setsid());
7674 #ifdef TARGET_NR_sigaction
7675     case TARGET_NR_sigaction:
7676         {
7677 #if defined(TARGET_ALPHA)
7678             struct target_sigaction act, oact, *pact = 0;
7679             struct target_old_sigaction *old_act;
7680             if (arg2) {
7681                 if (!lock_user_struct(VERIFY_READ, old_act, arg2, 1))
7682                     return -TARGET_EFAULT;
7683                 act._sa_handler = old_act->_sa_handler;
7684                 target_siginitset(&act.sa_mask, old_act->sa_mask);
7685                 act.sa_flags = old_act->sa_flags;
7686                 act.sa_restorer = 0;
7687                 unlock_user_struct(old_act, arg2, 0);
7688                 pact = &act;
7689             }
7690             ret = get_errno(do_sigaction(arg1, pact, &oact));
7691             if (!is_error(ret) && arg3) {
7692                 if (!lock_user_struct(VERIFY_WRITE, old_act, arg3, 0))
7693                     return -TARGET_EFAULT;
7694                 old_act->_sa_handler = oact._sa_handler;
7695                 old_act->sa_mask = oact.sa_mask.sig[0];
7696                 old_act->sa_flags = oact.sa_flags;
7697                 unlock_user_struct(old_act, arg3, 1);
7698             }
7699 #elif defined(TARGET_MIPS)
7700 	    struct target_sigaction act, oact, *pact, *old_act;
7701 
7702 	    if (arg2) {
7703                 if (!lock_user_struct(VERIFY_READ, old_act, arg2, 1))
7704                     return -TARGET_EFAULT;
7705 		act._sa_handler = old_act->_sa_handler;
7706 		target_siginitset(&act.sa_mask, old_act->sa_mask.sig[0]);
7707 		act.sa_flags = old_act->sa_flags;
7708 		unlock_user_struct(old_act, arg2, 0);
7709 		pact = &act;
7710 	    } else {
7711 		pact = NULL;
7712 	    }
7713 
7714 	    ret = get_errno(do_sigaction(arg1, pact, &oact));
7715 
7716 	    if (!is_error(ret) && arg3) {
7717                 if (!lock_user_struct(VERIFY_WRITE, old_act, arg3, 0))
7718                     return -TARGET_EFAULT;
7719 		old_act->_sa_handler = oact._sa_handler;
7720 		old_act->sa_flags = oact.sa_flags;
7721 		old_act->sa_mask.sig[0] = oact.sa_mask.sig[0];
7722 		old_act->sa_mask.sig[1] = 0;
7723 		old_act->sa_mask.sig[2] = 0;
7724 		old_act->sa_mask.sig[3] = 0;
7725 		unlock_user_struct(old_act, arg3, 1);
7726 	    }
7727 #else
7728             struct target_old_sigaction *old_act;
7729             struct target_sigaction act, oact, *pact;
7730             if (arg2) {
7731                 if (!lock_user_struct(VERIFY_READ, old_act, arg2, 1))
7732                     return -TARGET_EFAULT;
7733                 act._sa_handler = old_act->_sa_handler;
7734                 target_siginitset(&act.sa_mask, old_act->sa_mask);
7735                 act.sa_flags = old_act->sa_flags;
7736                 act.sa_restorer = old_act->sa_restorer;
7737 #ifdef TARGET_ARCH_HAS_KA_RESTORER
7738                 act.ka_restorer = 0;
7739 #endif
7740                 unlock_user_struct(old_act, arg2, 0);
7741                 pact = &act;
7742             } else {
7743                 pact = NULL;
7744             }
7745             ret = get_errno(do_sigaction(arg1, pact, &oact));
7746             if (!is_error(ret) && arg3) {
7747                 if (!lock_user_struct(VERIFY_WRITE, old_act, arg3, 0))
7748                     return -TARGET_EFAULT;
7749                 old_act->_sa_handler = oact._sa_handler;
7750                 old_act->sa_mask = oact.sa_mask.sig[0];
7751                 old_act->sa_flags = oact.sa_flags;
7752                 old_act->sa_restorer = oact.sa_restorer;
7753                 unlock_user_struct(old_act, arg3, 1);
7754             }
7755 #endif
7756         }
7757         return ret;
7758 #endif
7759     case TARGET_NR_rt_sigaction:
7760         {
7761 #if defined(TARGET_ALPHA)
7762             /* For Alpha and SPARC this is a 5 argument syscall, with
7763              * a 'restorer' parameter which must be copied into the
7764              * sa_restorer field of the sigaction struct.
7765              * For Alpha that 'restorer' is arg5; for SPARC it is arg4,
7766              * and arg5 is the sigsetsize.
7767              * Alpha also has a separate rt_sigaction struct that it uses
7768              * here; SPARC uses the usual sigaction struct.
7769              */
7770             struct target_rt_sigaction *rt_act;
7771             struct target_sigaction act, oact, *pact = 0;
7772 
7773             if (arg4 != sizeof(target_sigset_t)) {
7774                 return -TARGET_EINVAL;
7775             }
7776             if (arg2) {
7777                 if (!lock_user_struct(VERIFY_READ, rt_act, arg2, 1))
7778                     return -TARGET_EFAULT;
7779                 act._sa_handler = rt_act->_sa_handler;
7780                 act.sa_mask = rt_act->sa_mask;
7781                 act.sa_flags = rt_act->sa_flags;
7782                 act.sa_restorer = arg5;
7783                 unlock_user_struct(rt_act, arg2, 0);
7784                 pact = &act;
7785             }
7786             ret = get_errno(do_sigaction(arg1, pact, &oact));
7787             if (!is_error(ret) && arg3) {
7788                 if (!lock_user_struct(VERIFY_WRITE, rt_act, arg3, 0))
7789                     return -TARGET_EFAULT;
7790                 rt_act->_sa_handler = oact._sa_handler;
7791                 rt_act->sa_mask = oact.sa_mask;
7792                 rt_act->sa_flags = oact.sa_flags;
7793                 unlock_user_struct(rt_act, arg3, 1);
7794             }
7795 #else
7796 #ifdef TARGET_SPARC
7797             target_ulong restorer = arg4;
7798             target_ulong sigsetsize = arg5;
7799 #else
7800             target_ulong sigsetsize = arg4;
7801 #endif
7802             struct target_sigaction *act;
7803             struct target_sigaction *oact;
7804 
7805             if (sigsetsize != sizeof(target_sigset_t)) {
7806                 return -TARGET_EINVAL;
7807             }
7808             if (arg2) {
7809                 if (!lock_user_struct(VERIFY_READ, act, arg2, 1)) {
7810                     return -TARGET_EFAULT;
7811                 }
7812 #ifdef TARGET_ARCH_HAS_KA_RESTORER
7813                 act->ka_restorer = restorer;
7814 #endif
7815             } else {
7816                 act = NULL;
7817             }
7818             if (arg3) {
7819                 if (!lock_user_struct(VERIFY_WRITE, oact, arg3, 0)) {
7820                     ret = -TARGET_EFAULT;
7821                     goto rt_sigaction_fail;
7822                 }
7823             } else
7824                 oact = NULL;
7825             ret = get_errno(do_sigaction(arg1, act, oact));
7826 	rt_sigaction_fail:
7827             if (act)
7828                 unlock_user_struct(act, arg2, 0);
7829             if (oact)
7830                 unlock_user_struct(oact, arg3, 1);
7831 #endif
7832         }
7833         return ret;
7834 #ifdef TARGET_NR_sgetmask /* not on alpha */
7835     case TARGET_NR_sgetmask:
7836         {
7837             sigset_t cur_set;
7838             abi_ulong target_set;
7839             ret = do_sigprocmask(0, NULL, &cur_set);
7840             if (!ret) {
7841                 host_to_target_old_sigset(&target_set, &cur_set);
7842                 ret = target_set;
7843             }
7844         }
7845         return ret;
7846 #endif
7847 #ifdef TARGET_NR_ssetmask /* not on alpha */
7848     case TARGET_NR_ssetmask:
7849         {
7850             sigset_t set, oset;
7851             abi_ulong target_set = arg1;
7852             target_to_host_old_sigset(&set, &target_set);
7853             ret = do_sigprocmask(SIG_SETMASK, &set, &oset);
7854             if (!ret) {
7855                 host_to_target_old_sigset(&target_set, &oset);
7856                 ret = target_set;
7857             }
7858         }
7859         return ret;
7860 #endif
7861 #ifdef TARGET_NR_sigprocmask
7862     case TARGET_NR_sigprocmask:
7863         {
7864 #if defined(TARGET_ALPHA)
7865             sigset_t set, oldset;
7866             abi_ulong mask;
7867             int how;
7868 
7869             switch (arg1) {
7870             case TARGET_SIG_BLOCK:
7871                 how = SIG_BLOCK;
7872                 break;
7873             case TARGET_SIG_UNBLOCK:
7874                 how = SIG_UNBLOCK;
7875                 break;
7876             case TARGET_SIG_SETMASK:
7877                 how = SIG_SETMASK;
7878                 break;
7879             default:
7880                 return -TARGET_EINVAL;
7881             }
7882             mask = arg2;
7883             target_to_host_old_sigset(&set, &mask);
7884 
7885             ret = do_sigprocmask(how, &set, &oldset);
7886             if (!is_error(ret)) {
7887                 host_to_target_old_sigset(&mask, &oldset);
7888                 ret = mask;
7889                 ((CPUAlphaState *)cpu_env)->ir[IR_V0] = 0; /* force no error */
7890             }
7891 #else
7892             sigset_t set, oldset, *set_ptr;
7893             int how;
7894 
7895             if (arg2) {
7896                 switch (arg1) {
7897                 case TARGET_SIG_BLOCK:
7898                     how = SIG_BLOCK;
7899                     break;
7900                 case TARGET_SIG_UNBLOCK:
7901                     how = SIG_UNBLOCK;
7902                     break;
7903                 case TARGET_SIG_SETMASK:
7904                     how = SIG_SETMASK;
7905                     break;
7906                 default:
7907                     return -TARGET_EINVAL;
7908                 }
7909                 if (!(p = lock_user(VERIFY_READ, arg2, sizeof(target_sigset_t), 1)))
7910                     return -TARGET_EFAULT;
7911                 target_to_host_old_sigset(&set, p);
7912                 unlock_user(p, arg2, 0);
7913                 set_ptr = &set;
7914             } else {
7915                 how = 0;
7916                 set_ptr = NULL;
7917             }
7918             ret = do_sigprocmask(how, set_ptr, &oldset);
7919             if (!is_error(ret) && arg3) {
7920                 if (!(p = lock_user(VERIFY_WRITE, arg3, sizeof(target_sigset_t), 0)))
7921                     return -TARGET_EFAULT;
7922                 host_to_target_old_sigset(p, &oldset);
7923                 unlock_user(p, arg3, sizeof(target_sigset_t));
7924             }
7925 #endif
7926         }
7927         return ret;
7928 #endif
7929     case TARGET_NR_rt_sigprocmask:
7930         {
7931             int how = arg1;
7932             sigset_t set, oldset, *set_ptr;
7933 
7934             if (arg4 != sizeof(target_sigset_t)) {
7935                 return -TARGET_EINVAL;
7936             }
7937 
7938             if (arg2) {
7939                 switch(how) {
7940                 case TARGET_SIG_BLOCK:
7941                     how = SIG_BLOCK;
7942                     break;
7943                 case TARGET_SIG_UNBLOCK:
7944                     how = SIG_UNBLOCK;
7945                     break;
7946                 case TARGET_SIG_SETMASK:
7947                     how = SIG_SETMASK;
7948                     break;
7949                 default:
7950                     return -TARGET_EINVAL;
7951                 }
7952                 if (!(p = lock_user(VERIFY_READ, arg2, sizeof(target_sigset_t), 1)))
7953                     return -TARGET_EFAULT;
7954                 target_to_host_sigset(&set, p);
7955                 unlock_user(p, arg2, 0);
7956                 set_ptr = &set;
7957             } else {
7958                 how = 0;
7959                 set_ptr = NULL;
7960             }
7961             ret = do_sigprocmask(how, set_ptr, &oldset);
7962             if (!is_error(ret) && arg3) {
7963                 if (!(p = lock_user(VERIFY_WRITE, arg3, sizeof(target_sigset_t), 0)))
7964                     return -TARGET_EFAULT;
7965                 host_to_target_sigset(p, &oldset);
7966                 unlock_user(p, arg3, sizeof(target_sigset_t));
7967             }
7968         }
7969         return ret;
7970 #ifdef TARGET_NR_sigpending
7971     case TARGET_NR_sigpending:
7972         {
7973             sigset_t set;
7974             ret = get_errno(sigpending(&set));
7975             if (!is_error(ret)) {
7976                 if (!(p = lock_user(VERIFY_WRITE, arg1, sizeof(target_sigset_t), 0)))
7977                     return -TARGET_EFAULT;
7978                 host_to_target_old_sigset(p, &set);
7979                 unlock_user(p, arg1, sizeof(target_sigset_t));
7980             }
7981         }
7982         return ret;
7983 #endif
7984     case TARGET_NR_rt_sigpending:
7985         {
7986             sigset_t set;
7987 
7988             /* Yes, this check is >, not != like most. We follow the kernel's
7989              * logic and it does it like this because it implements
7990              * NR_sigpending through the same code path, and in that case
7991              * the old_sigset_t is smaller in size.
7992              */
7993             if (arg2 > sizeof(target_sigset_t)) {
7994                 return -TARGET_EINVAL;
7995             }
7996 
7997             ret = get_errno(sigpending(&set));
7998             if (!is_error(ret)) {
7999                 if (!(p = lock_user(VERIFY_WRITE, arg1, sizeof(target_sigset_t), 0)))
8000                     return -TARGET_EFAULT;
8001                 host_to_target_sigset(p, &set);
8002                 unlock_user(p, arg1, sizeof(target_sigset_t));
8003             }
8004         }
8005         return ret;
8006 #ifdef TARGET_NR_sigsuspend
8007     case TARGET_NR_sigsuspend:
8008         {
8009             TaskState *ts = cpu->opaque;
8010 #if defined(TARGET_ALPHA)
8011             abi_ulong mask = arg1;
8012             target_to_host_old_sigset(&ts->sigsuspend_mask, &mask);
8013 #else
8014             if (!(p = lock_user(VERIFY_READ, arg1, sizeof(target_sigset_t), 1)))
8015                 return -TARGET_EFAULT;
8016             target_to_host_old_sigset(&ts->sigsuspend_mask, p);
8017             unlock_user(p, arg1, 0);
8018 #endif
8019             ret = get_errno(safe_rt_sigsuspend(&ts->sigsuspend_mask,
8020                                                SIGSET_T_SIZE));
8021             if (ret != -TARGET_ERESTARTSYS) {
8022                 ts->in_sigsuspend = 1;
8023             }
8024         }
8025         return ret;
8026 #endif
8027     case TARGET_NR_rt_sigsuspend:
8028         {
8029             TaskState *ts = cpu->opaque;
8030 
8031             if (arg2 != sizeof(target_sigset_t)) {
8032                 return -TARGET_EINVAL;
8033             }
8034             if (!(p = lock_user(VERIFY_READ, arg1, sizeof(target_sigset_t), 1)))
8035                 return -TARGET_EFAULT;
8036             target_to_host_sigset(&ts->sigsuspend_mask, p);
8037             unlock_user(p, arg1, 0);
8038             ret = get_errno(safe_rt_sigsuspend(&ts->sigsuspend_mask,
8039                                                SIGSET_T_SIZE));
8040             if (ret != -TARGET_ERESTARTSYS) {
8041                 ts->in_sigsuspend = 1;
8042             }
8043         }
8044         return ret;
8045     case TARGET_NR_rt_sigtimedwait:
8046         {
8047             sigset_t set;
8048             struct timespec uts, *puts;
8049             siginfo_t uinfo;
8050 
8051             if (arg4 != sizeof(target_sigset_t)) {
8052                 return -TARGET_EINVAL;
8053             }
8054 
8055             if (!(p = lock_user(VERIFY_READ, arg1, sizeof(target_sigset_t), 1)))
8056                 return -TARGET_EFAULT;
8057             target_to_host_sigset(&set, p);
8058             unlock_user(p, arg1, 0);
8059             if (arg3) {
8060                 puts = &uts;
8061                 target_to_host_timespec(puts, arg3);
8062             } else {
8063                 puts = NULL;
8064             }
8065             ret = get_errno(safe_rt_sigtimedwait(&set, &uinfo, puts,
8066                                                  SIGSET_T_SIZE));
8067             if (!is_error(ret)) {
8068                 if (arg2) {
8069                     p = lock_user(VERIFY_WRITE, arg2, sizeof(target_siginfo_t),
8070                                   0);
8071                     if (!p) {
8072                         return -TARGET_EFAULT;
8073                     }
8074                     host_to_target_siginfo(p, &uinfo);
8075                     unlock_user(p, arg2, sizeof(target_siginfo_t));
8076                 }
8077                 ret = host_to_target_signal(ret);
8078             }
8079         }
8080         return ret;
8081     case TARGET_NR_rt_sigqueueinfo:
8082         {
8083             siginfo_t uinfo;
8084 
8085             p = lock_user(VERIFY_READ, arg3, sizeof(target_siginfo_t), 1);
8086             if (!p) {
8087                 return -TARGET_EFAULT;
8088             }
8089             target_to_host_siginfo(&uinfo, p);
8090             unlock_user(p, arg3, 0);
8091             ret = get_errno(sys_rt_sigqueueinfo(arg1, arg2, &uinfo));
8092         }
8093         return ret;
8094     case TARGET_NR_rt_tgsigqueueinfo:
8095         {
8096             siginfo_t uinfo;
8097 
8098             p = lock_user(VERIFY_READ, arg4, sizeof(target_siginfo_t), 1);
8099             if (!p) {
8100                 return -TARGET_EFAULT;
8101             }
8102             target_to_host_siginfo(&uinfo, p);
8103             unlock_user(p, arg4, 0);
8104             ret = get_errno(sys_rt_tgsigqueueinfo(arg1, arg2, arg3, &uinfo));
8105         }
8106         return ret;
8107 #ifdef TARGET_NR_sigreturn
8108     case TARGET_NR_sigreturn:
8109         if (block_signals()) {
8110             return -TARGET_ERESTARTSYS;
8111         }
8112         return do_sigreturn(cpu_env);
8113 #endif
8114     case TARGET_NR_rt_sigreturn:
8115         if (block_signals()) {
8116             return -TARGET_ERESTARTSYS;
8117         }
8118         return do_rt_sigreturn(cpu_env);
8119     case TARGET_NR_sethostname:
8120         if (!(p = lock_user_string(arg1)))
8121             return -TARGET_EFAULT;
8122         ret = get_errno(sethostname(p, arg2));
8123         unlock_user(p, arg1, 0);
8124         return ret;
8125 #ifdef TARGET_NR_setrlimit
8126     case TARGET_NR_setrlimit:
8127         {
8128             int resource = target_to_host_resource(arg1);
8129             struct target_rlimit *target_rlim;
8130             struct rlimit rlim;
8131             if (!lock_user_struct(VERIFY_READ, target_rlim, arg2, 1))
8132                 return -TARGET_EFAULT;
8133             rlim.rlim_cur = target_to_host_rlim(target_rlim->rlim_cur);
8134             rlim.rlim_max = target_to_host_rlim(target_rlim->rlim_max);
8135             unlock_user_struct(target_rlim, arg2, 0);
8136             /*
8137              * If we just passed through resource limit settings for memory then
8138              * they would also apply to QEMU's own allocations, and QEMU will
8139              * crash or hang or die if its allocations fail. Ideally we would
8140              * track the guest allocations in QEMU and apply the limits ourselves.
8141              * For now, just tell the guest the call succeeded but don't actually
8142              * limit anything.
8143              */
8144             if (resource != RLIMIT_AS &&
8145                 resource != RLIMIT_DATA &&
8146                 resource != RLIMIT_STACK) {
8147                 return get_errno(setrlimit(resource, &rlim));
8148             } else {
8149                 return 0;
8150             }
8151         }
8152 #endif
8153 #ifdef TARGET_NR_getrlimit
8154     case TARGET_NR_getrlimit:
8155         {
8156             int resource = target_to_host_resource(arg1);
8157             struct target_rlimit *target_rlim;
8158             struct rlimit rlim;
8159 
8160             ret = get_errno(getrlimit(resource, &rlim));
8161             if (!is_error(ret)) {
8162                 if (!lock_user_struct(VERIFY_WRITE, target_rlim, arg2, 0))
8163                     return -TARGET_EFAULT;
8164                 target_rlim->rlim_cur = host_to_target_rlim(rlim.rlim_cur);
8165                 target_rlim->rlim_max = host_to_target_rlim(rlim.rlim_max);
8166                 unlock_user_struct(target_rlim, arg2, 1);
8167             }
8168         }
8169         return ret;
8170 #endif
8171     case TARGET_NR_getrusage:
8172         {
8173             struct rusage rusage;
8174             ret = get_errno(getrusage(arg1, &rusage));
8175             if (!is_error(ret)) {
8176                 ret = host_to_target_rusage(arg2, &rusage);
8177             }
8178         }
8179         return ret;
8180     case TARGET_NR_gettimeofday:
8181         {
8182             struct timeval tv;
8183             ret = get_errno(gettimeofday(&tv, NULL));
8184             if (!is_error(ret)) {
8185                 if (copy_to_user_timeval(arg1, &tv))
8186                     return -TARGET_EFAULT;
8187             }
8188         }
8189         return ret;
8190     case TARGET_NR_settimeofday:
8191         {
8192             struct timeval tv, *ptv = NULL;
8193             struct timezone tz, *ptz = NULL;
8194 
8195             if (arg1) {
8196                 if (copy_from_user_timeval(&tv, arg1)) {
8197                     return -TARGET_EFAULT;
8198                 }
8199                 ptv = &tv;
8200             }
8201 
8202             if (arg2) {
8203                 if (copy_from_user_timezone(&tz, arg2)) {
8204                     return -TARGET_EFAULT;
8205                 }
8206                 ptz = &tz;
8207             }
8208 
8209             return get_errno(settimeofday(ptv, ptz));
8210         }
8211 #if defined(TARGET_NR_select)
8212     case TARGET_NR_select:
8213 #if defined(TARGET_WANT_NI_OLD_SELECT)
8214         /* some architectures used to have old_select here
8215          * but now ENOSYS it.
8216          */
8217         ret = -TARGET_ENOSYS;
8218 #elif defined(TARGET_WANT_OLD_SYS_SELECT)
8219         ret = do_old_select(arg1);
8220 #else
8221         ret = do_select(arg1, arg2, arg3, arg4, arg5);
8222 #endif
8223         return ret;
8224 #endif
8225 #ifdef TARGET_NR_pselect6
8226     case TARGET_NR_pselect6:
8227         {
8228             abi_long rfd_addr, wfd_addr, efd_addr, n, ts_addr;
8229             fd_set rfds, wfds, efds;
8230             fd_set *rfds_ptr, *wfds_ptr, *efds_ptr;
8231             struct timespec ts, *ts_ptr;
8232 
8233             /*
8234              * The 6th arg is actually two args smashed together,
8235              * so we cannot use the C library.
8236              */
8237             sigset_t set;
8238             struct {
8239                 sigset_t *set;
8240                 size_t size;
8241             } sig, *sig_ptr;
8242 
8243             abi_ulong arg_sigset, arg_sigsize, *arg7;
8244             target_sigset_t *target_sigset;
8245 
8246             n = arg1;
8247             rfd_addr = arg2;
8248             wfd_addr = arg3;
8249             efd_addr = arg4;
8250             ts_addr = arg5;
8251 
8252             ret = copy_from_user_fdset_ptr(&rfds, &rfds_ptr, rfd_addr, n);
8253             if (ret) {
8254                 return ret;
8255             }
8256             ret = copy_from_user_fdset_ptr(&wfds, &wfds_ptr, wfd_addr, n);
8257             if (ret) {
8258                 return ret;
8259             }
8260             ret = copy_from_user_fdset_ptr(&efds, &efds_ptr, efd_addr, n);
8261             if (ret) {
8262                 return ret;
8263             }
8264 
8265             /*
8266              * This takes a timespec, and not a timeval, so we cannot
8267              * use the do_select() helper ...
8268              */
8269             if (ts_addr) {
8270                 if (target_to_host_timespec(&ts, ts_addr)) {
8271                     return -TARGET_EFAULT;
8272                 }
8273                 ts_ptr = &ts;
8274             } else {
8275                 ts_ptr = NULL;
8276             }
8277 
8278             /* Extract the two packed args for the sigset */
8279             if (arg6) {
8280                 sig_ptr = &sig;
8281                 sig.size = SIGSET_T_SIZE;
8282 
8283                 arg7 = lock_user(VERIFY_READ, arg6, sizeof(*arg7) * 2, 1);
8284                 if (!arg7) {
8285                     return -TARGET_EFAULT;
8286                 }
8287                 arg_sigset = tswapal(arg7[0]);
8288                 arg_sigsize = tswapal(arg7[1]);
8289                 unlock_user(arg7, arg6, 0);
8290 
8291                 if (arg_sigset) {
8292                     sig.set = &set;
8293                     if (arg_sigsize != sizeof(*target_sigset)) {
8294                         /* Like the kernel, we enforce correct size sigsets */
8295                         return -TARGET_EINVAL;
8296                     }
8297                     target_sigset = lock_user(VERIFY_READ, arg_sigset,
8298                                               sizeof(*target_sigset), 1);
8299                     if (!target_sigset) {
8300                         return -TARGET_EFAULT;
8301                     }
8302                     target_to_host_sigset(&set, target_sigset);
8303                     unlock_user(target_sigset, arg_sigset, 0);
8304                 } else {
8305                     sig.set = NULL;
8306                 }
8307             } else {
8308                 sig_ptr = NULL;
8309             }
8310 
8311             ret = get_errno(safe_pselect6(n, rfds_ptr, wfds_ptr, efds_ptr,
8312                                           ts_ptr, sig_ptr));
8313 
8314             if (!is_error(ret)) {
8315                 if (rfd_addr && copy_to_user_fdset(rfd_addr, &rfds, n))
8316                     return -TARGET_EFAULT;
8317                 if (wfd_addr && copy_to_user_fdset(wfd_addr, &wfds, n))
8318                     return -TARGET_EFAULT;
8319                 if (efd_addr && copy_to_user_fdset(efd_addr, &efds, n))
8320                     return -TARGET_EFAULT;
8321 
8322                 if (ts_addr && host_to_target_timespec(ts_addr, &ts))
8323                     return -TARGET_EFAULT;
8324             }
8325         }
8326         return ret;
8327 #endif
8328 #ifdef TARGET_NR_symlink
8329     case TARGET_NR_symlink:
8330         {
8331             void *p2;
8332             p = lock_user_string(arg1);
8333             p2 = lock_user_string(arg2);
8334             if (!p || !p2)
8335                 ret = -TARGET_EFAULT;
8336             else
8337                 ret = get_errno(symlink(p, p2));
8338             unlock_user(p2, arg2, 0);
8339             unlock_user(p, arg1, 0);
8340         }
8341         return ret;
8342 #endif
8343 #if defined(TARGET_NR_symlinkat)
8344     case TARGET_NR_symlinkat:
8345         {
8346             void *p2;
8347             p  = lock_user_string(arg1);
8348             p2 = lock_user_string(arg3);
8349             if (!p || !p2)
8350                 ret = -TARGET_EFAULT;
8351             else
8352                 ret = get_errno(symlinkat(p, arg2, p2));
8353             unlock_user(p2, arg3, 0);
8354             unlock_user(p, arg1, 0);
8355         }
8356         return ret;
8357 #endif
8358 #ifdef TARGET_NR_readlink
8359     case TARGET_NR_readlink:
8360         {
8361             void *p2;
8362             p = lock_user_string(arg1);
8363             p2 = lock_user(VERIFY_WRITE, arg2, arg3, 0);
8364             if (!p || !p2) {
8365                 ret = -TARGET_EFAULT;
8366             } else if (!arg3) {
8367                 /* Short circuit this for the magic exe check. */
8368                 ret = -TARGET_EINVAL;
8369             } else if (is_proc_myself((const char *)p, "exe")) {
8370                 char real[PATH_MAX], *temp;
8371                 temp = realpath(exec_path, real);
8372                 /* Return value is # of bytes that we wrote to the buffer. */
8373                 if (temp == NULL) {
8374                     ret = get_errno(-1);
8375                 } else {
8376                     /* Don't worry about sign mismatch as earlier mapping
8377                      * logic would have thrown a bad address error. */
8378                     ret = MIN(strlen(real), arg3);
8379                     /* We cannot NUL terminate the string. */
8380                     memcpy(p2, real, ret);
8381                 }
8382             } else {
8383                 ret = get_errno(readlink(path(p), p2, arg3));
8384             }
8385             unlock_user(p2, arg2, ret);
8386             unlock_user(p, arg1, 0);
8387         }
8388         return ret;
8389 #endif
8390 #if defined(TARGET_NR_readlinkat)
8391     case TARGET_NR_readlinkat:
8392         {
8393             void *p2;
8394             p  = lock_user_string(arg2);
8395             p2 = lock_user(VERIFY_WRITE, arg3, arg4, 0);
8396             if (!p || !p2) {
8397                 ret = -TARGET_EFAULT;
8398             } else if (is_proc_myself((const char *)p, "exe")) {
8399                 char real[PATH_MAX], *temp;
8400                 temp = realpath(exec_path, real);
8401                 ret = temp == NULL ? get_errno(-1) : strlen(real) ;
8402                 snprintf((char *)p2, arg4, "%s", real);
8403             } else {
8404                 ret = get_errno(readlinkat(arg1, path(p), p2, arg4));
8405             }
8406             unlock_user(p2, arg3, ret);
8407             unlock_user(p, arg2, 0);
8408         }
8409         return ret;
8410 #endif
8411 #ifdef TARGET_NR_swapon
8412     case TARGET_NR_swapon:
8413         if (!(p = lock_user_string(arg1)))
8414             return -TARGET_EFAULT;
8415         ret = get_errno(swapon(p, arg2));
8416         unlock_user(p, arg1, 0);
8417         return ret;
8418 #endif
8419     case TARGET_NR_reboot:
8420         if (arg3 == LINUX_REBOOT_CMD_RESTART2) {
8421            /* arg4 must be ignored in all other cases */
8422            p = lock_user_string(arg4);
8423            if (!p) {
8424                return -TARGET_EFAULT;
8425            }
8426            ret = get_errno(reboot(arg1, arg2, arg3, p));
8427            unlock_user(p, arg4, 0);
8428         } else {
8429            ret = get_errno(reboot(arg1, arg2, arg3, NULL));
8430         }
8431         return ret;
8432 #ifdef TARGET_NR_mmap
8433     case TARGET_NR_mmap:
8434 #if (defined(TARGET_I386) && defined(TARGET_ABI32)) || \
8435     (defined(TARGET_ARM) && defined(TARGET_ABI32)) || \
8436     defined(TARGET_M68K) || defined(TARGET_CRIS) || defined(TARGET_MICROBLAZE) \
8437     || defined(TARGET_S390X)
8438         {
8439             abi_ulong *v;
8440             abi_ulong v1, v2, v3, v4, v5, v6;
8441             if (!(v = lock_user(VERIFY_READ, arg1, 6 * sizeof(abi_ulong), 1)))
8442                 return -TARGET_EFAULT;
8443             v1 = tswapal(v[0]);
8444             v2 = tswapal(v[1]);
8445             v3 = tswapal(v[2]);
8446             v4 = tswapal(v[3]);
8447             v5 = tswapal(v[4]);
8448             v6 = tswapal(v[5]);
8449             unlock_user(v, arg1, 0);
8450             ret = get_errno(target_mmap(v1, v2, v3,
8451                                         target_to_host_bitmask(v4, mmap_flags_tbl),
8452                                         v5, v6));
8453         }
8454 #else
8455         ret = get_errno(target_mmap(arg1, arg2, arg3,
8456                                     target_to_host_bitmask(arg4, mmap_flags_tbl),
8457                                     arg5,
8458                                     arg6));
8459 #endif
8460         return ret;
8461 #endif
8462 #ifdef TARGET_NR_mmap2
8463     case TARGET_NR_mmap2:
8464 #ifndef MMAP_SHIFT
8465 #define MMAP_SHIFT 12
8466 #endif
8467         ret = target_mmap(arg1, arg2, arg3,
8468                           target_to_host_bitmask(arg4, mmap_flags_tbl),
8469                           arg5, arg6 << MMAP_SHIFT);
8470         return get_errno(ret);
8471 #endif
8472     case TARGET_NR_munmap:
8473         return get_errno(target_munmap(arg1, arg2));
8474     case TARGET_NR_mprotect:
8475         {
8476             TaskState *ts = cpu->opaque;
8477             /* Special hack to detect libc making the stack executable.  */
8478             if ((arg3 & PROT_GROWSDOWN)
8479                 && arg1 >= ts->info->stack_limit
8480                 && arg1 <= ts->info->start_stack) {
8481                 arg3 &= ~PROT_GROWSDOWN;
8482                 arg2 = arg2 + arg1 - ts->info->stack_limit;
8483                 arg1 = ts->info->stack_limit;
8484             }
8485         }
8486         return get_errno(target_mprotect(arg1, arg2, arg3));
8487 #ifdef TARGET_NR_mremap
8488     case TARGET_NR_mremap:
8489         return get_errno(target_mremap(arg1, arg2, arg3, arg4, arg5));
8490 #endif
8491         /* ??? msync/mlock/munlock are broken for softmmu.  */
8492 #ifdef TARGET_NR_msync
8493     case TARGET_NR_msync:
8494         return get_errno(msync(g2h(arg1), arg2, arg3));
8495 #endif
8496 #ifdef TARGET_NR_mlock
8497     case TARGET_NR_mlock:
8498         return get_errno(mlock(g2h(arg1), arg2));
8499 #endif
8500 #ifdef TARGET_NR_munlock
8501     case TARGET_NR_munlock:
8502         return get_errno(munlock(g2h(arg1), arg2));
8503 #endif
8504 #ifdef TARGET_NR_mlockall
8505     case TARGET_NR_mlockall:
8506         return get_errno(mlockall(target_to_host_mlockall_arg(arg1)));
8507 #endif
8508 #ifdef TARGET_NR_munlockall
8509     case TARGET_NR_munlockall:
8510         return get_errno(munlockall());
8511 #endif
8512 #ifdef TARGET_NR_truncate
8513     case TARGET_NR_truncate:
8514         if (!(p = lock_user_string(arg1)))
8515             return -TARGET_EFAULT;
8516         ret = get_errno(truncate(p, arg2));
8517         unlock_user(p, arg1, 0);
8518         return ret;
8519 #endif
8520 #ifdef TARGET_NR_ftruncate
8521     case TARGET_NR_ftruncate:
8522         return get_errno(ftruncate(arg1, arg2));
8523 #endif
8524     case TARGET_NR_fchmod:
8525         return get_errno(fchmod(arg1, arg2));
8526 #if defined(TARGET_NR_fchmodat)
8527     case TARGET_NR_fchmodat:
8528         if (!(p = lock_user_string(arg2)))
8529             return -TARGET_EFAULT;
8530         ret = get_errno(fchmodat(arg1, p, arg3, 0));
8531         unlock_user(p, arg2, 0);
8532         return ret;
8533 #endif
8534     case TARGET_NR_getpriority:
8535         /* Note that negative values are valid for getpriority, so we must
8536            differentiate based on errno settings.  */
8537         errno = 0;
8538         ret = getpriority(arg1, arg2);
8539         if (ret == -1 && errno != 0) {
8540             return -host_to_target_errno(errno);
8541         }
8542 #ifdef TARGET_ALPHA
8543         /* Return value is the unbiased priority.  Signal no error.  */
8544         ((CPUAlphaState *)cpu_env)->ir[IR_V0] = 0;
8545 #else
8546         /* Return value is a biased priority to avoid negative numbers.  */
8547         ret = 20 - ret;
8548 #endif
8549         return ret;
8550     case TARGET_NR_setpriority:
8551         return get_errno(setpriority(arg1, arg2, arg3));
8552 #ifdef TARGET_NR_statfs
8553     case TARGET_NR_statfs:
8554         if (!(p = lock_user_string(arg1))) {
8555             return -TARGET_EFAULT;
8556         }
8557         ret = get_errno(statfs(path(p), &stfs));
8558         unlock_user(p, arg1, 0);
8559     convert_statfs:
8560         if (!is_error(ret)) {
8561             struct target_statfs *target_stfs;
8562 
8563             if (!lock_user_struct(VERIFY_WRITE, target_stfs, arg2, 0))
8564                 return -TARGET_EFAULT;
8565             __put_user(stfs.f_type, &target_stfs->f_type);
8566             __put_user(stfs.f_bsize, &target_stfs->f_bsize);
8567             __put_user(stfs.f_blocks, &target_stfs->f_blocks);
8568             __put_user(stfs.f_bfree, &target_stfs->f_bfree);
8569             __put_user(stfs.f_bavail, &target_stfs->f_bavail);
8570             __put_user(stfs.f_files, &target_stfs->f_files);
8571             __put_user(stfs.f_ffree, &target_stfs->f_ffree);
8572             __put_user(stfs.f_fsid.__val[0], &target_stfs->f_fsid.val[0]);
8573             __put_user(stfs.f_fsid.__val[1], &target_stfs->f_fsid.val[1]);
8574             __put_user(stfs.f_namelen, &target_stfs->f_namelen);
8575             __put_user(stfs.f_frsize, &target_stfs->f_frsize);
8576 #ifdef _STATFS_F_FLAGS
8577             __put_user(stfs.f_flags, &target_stfs->f_flags);
8578 #else
8579             __put_user(0, &target_stfs->f_flags);
8580 #endif
8581             memset(target_stfs->f_spare, 0, sizeof(target_stfs->f_spare));
8582             unlock_user_struct(target_stfs, arg2, 1);
8583         }
8584         return ret;
8585 #endif
8586 #ifdef TARGET_NR_fstatfs
8587     case TARGET_NR_fstatfs:
8588         ret = get_errno(fstatfs(arg1, &stfs));
8589         goto convert_statfs;
8590 #endif
8591 #ifdef TARGET_NR_statfs64
8592     case TARGET_NR_statfs64:
8593         if (!(p = lock_user_string(arg1))) {
8594             return -TARGET_EFAULT;
8595         }
8596         ret = get_errno(statfs(path(p), &stfs));
8597         unlock_user(p, arg1, 0);
8598     convert_statfs64:
8599         if (!is_error(ret)) {
8600             struct target_statfs64 *target_stfs;
8601 
8602             if (!lock_user_struct(VERIFY_WRITE, target_stfs, arg3, 0))
8603                 return -TARGET_EFAULT;
8604             __put_user(stfs.f_type, &target_stfs->f_type);
8605             __put_user(stfs.f_bsize, &target_stfs->f_bsize);
8606             __put_user(stfs.f_blocks, &target_stfs->f_blocks);
8607             __put_user(stfs.f_bfree, &target_stfs->f_bfree);
8608             __put_user(stfs.f_bavail, &target_stfs->f_bavail);
8609             __put_user(stfs.f_files, &target_stfs->f_files);
8610             __put_user(stfs.f_ffree, &target_stfs->f_ffree);
8611             __put_user(stfs.f_fsid.__val[0], &target_stfs->f_fsid.val[0]);
8612             __put_user(stfs.f_fsid.__val[1], &target_stfs->f_fsid.val[1]);
8613             __put_user(stfs.f_namelen, &target_stfs->f_namelen);
8614             __put_user(stfs.f_frsize, &target_stfs->f_frsize);
8615             memset(target_stfs->f_spare, 0, sizeof(target_stfs->f_spare));
8616             unlock_user_struct(target_stfs, arg3, 1);
8617         }
8618         return ret;
8619     case TARGET_NR_fstatfs64:
8620         ret = get_errno(fstatfs(arg1, &stfs));
8621         goto convert_statfs64;
8622 #endif
8623 #ifdef TARGET_NR_socketcall
8624     case TARGET_NR_socketcall:
8625         return do_socketcall(arg1, arg2);
8626 #endif
8627 #ifdef TARGET_NR_accept
8628     case TARGET_NR_accept:
8629         return do_accept4(arg1, arg2, arg3, 0);
8630 #endif
8631 #ifdef TARGET_NR_accept4
8632     case TARGET_NR_accept4:
8633         return do_accept4(arg1, arg2, arg3, arg4);
8634 #endif
8635 #ifdef TARGET_NR_bind
8636     case TARGET_NR_bind:
8637         return do_bind(arg1, arg2, arg3);
8638 #endif
8639 #ifdef TARGET_NR_connect
8640     case TARGET_NR_connect:
8641         return do_connect(arg1, arg2, arg3);
8642 #endif
8643 #ifdef TARGET_NR_getpeername
8644     case TARGET_NR_getpeername:
8645         return do_getpeername(arg1, arg2, arg3);
8646 #endif
8647 #ifdef TARGET_NR_getsockname
8648     case TARGET_NR_getsockname:
8649         return do_getsockname(arg1, arg2, arg3);
8650 #endif
8651 #ifdef TARGET_NR_getsockopt
8652     case TARGET_NR_getsockopt:
8653         return do_getsockopt(arg1, arg2, arg3, arg4, arg5);
8654 #endif
8655 #ifdef TARGET_NR_listen
8656     case TARGET_NR_listen:
8657         return get_errno(listen(arg1, arg2));
8658 #endif
8659 #ifdef TARGET_NR_recv
8660     case TARGET_NR_recv:
8661         return do_recvfrom(arg1, arg2, arg3, arg4, 0, 0);
8662 #endif
8663 #ifdef TARGET_NR_recvfrom
8664     case TARGET_NR_recvfrom:
8665         return do_recvfrom(arg1, arg2, arg3, arg4, arg5, arg6);
8666 #endif
8667 #ifdef TARGET_NR_recvmsg
8668     case TARGET_NR_recvmsg:
8669         return do_sendrecvmsg(arg1, arg2, arg3, 0);
8670 #endif
8671 #ifdef TARGET_NR_send
8672     case TARGET_NR_send:
8673         return do_sendto(arg1, arg2, arg3, arg4, 0, 0);
8674 #endif
8675 #ifdef TARGET_NR_sendmsg
8676     case TARGET_NR_sendmsg:
8677         return do_sendrecvmsg(arg1, arg2, arg3, 1);
8678 #endif
8679 #ifdef TARGET_NR_sendmmsg
8680     case TARGET_NR_sendmmsg:
8681         return do_sendrecvmmsg(arg1, arg2, arg3, arg4, 1);
8682     case TARGET_NR_recvmmsg:
8683         return do_sendrecvmmsg(arg1, arg2, arg3, arg4, 0);
8684 #endif
8685 #ifdef TARGET_NR_sendto
8686     case TARGET_NR_sendto:
8687         return do_sendto(arg1, arg2, arg3, arg4, arg5, arg6);
8688 #endif
8689 #ifdef TARGET_NR_shutdown
8690     case TARGET_NR_shutdown:
8691         return get_errno(shutdown(arg1, arg2));
8692 #endif
8693 #if defined(TARGET_NR_getrandom) && defined(__NR_getrandom)
8694     case TARGET_NR_getrandom:
8695         p = lock_user(VERIFY_WRITE, arg1, arg2, 0);
8696         if (!p) {
8697             return -TARGET_EFAULT;
8698         }
8699         ret = get_errno(getrandom(p, arg2, arg3));
8700         unlock_user(p, arg1, ret);
8701         return ret;
8702 #endif
8703 #ifdef TARGET_NR_socket
8704     case TARGET_NR_socket:
8705         return do_socket(arg1, arg2, arg3);
8706 #endif
8707 #ifdef TARGET_NR_socketpair
8708     case TARGET_NR_socketpair:
8709         return do_socketpair(arg1, arg2, arg3, arg4);
8710 #endif
8711 #ifdef TARGET_NR_setsockopt
8712     case TARGET_NR_setsockopt:
8713         return do_setsockopt(arg1, arg2, arg3, arg4, (socklen_t) arg5);
8714 #endif
8715 #if defined(TARGET_NR_syslog)
8716     case TARGET_NR_syslog:
8717         {
8718             int len = arg2;
8719 
8720             switch (arg1) {
8721             case TARGET_SYSLOG_ACTION_CLOSE:         /* Close log */
8722             case TARGET_SYSLOG_ACTION_OPEN:          /* Open log */
8723             case TARGET_SYSLOG_ACTION_CLEAR:         /* Clear ring buffer */
8724             case TARGET_SYSLOG_ACTION_CONSOLE_OFF:   /* Disable logging */
8725             case TARGET_SYSLOG_ACTION_CONSOLE_ON:    /* Enable logging */
8726             case TARGET_SYSLOG_ACTION_CONSOLE_LEVEL: /* Set messages level */
8727             case TARGET_SYSLOG_ACTION_SIZE_UNREAD:   /* Number of chars */
8728             case TARGET_SYSLOG_ACTION_SIZE_BUFFER:   /* Size of the buffer */
8729                 return get_errno(sys_syslog((int)arg1, NULL, (int)arg3));
8730             case TARGET_SYSLOG_ACTION_READ:          /* Read from log */
8731             case TARGET_SYSLOG_ACTION_READ_CLEAR:    /* Read/clear msgs */
8732             case TARGET_SYSLOG_ACTION_READ_ALL:      /* Read last messages */
8733                 {
8734                     if (len < 0) {
8735                         return -TARGET_EINVAL;
8736                     }
8737                     if (len == 0) {
8738                         return 0;
8739                     }
8740                     p = lock_user(VERIFY_WRITE, arg2, arg3, 0);
8741                     if (!p) {
8742                         return -TARGET_EFAULT;
8743                     }
8744                     ret = get_errno(sys_syslog((int)arg1, p, (int)arg3));
8745                     unlock_user(p, arg2, arg3);
8746                 }
8747                 return ret;
8748             default:
8749                 return -TARGET_EINVAL;
8750             }
8751         }
8752         break;
8753 #endif
8754     case TARGET_NR_setitimer:
8755         {
8756             struct itimerval value, ovalue, *pvalue;
8757 
8758             if (arg2) {
8759                 pvalue = &value;
8760                 if (copy_from_user_timeval(&pvalue->it_interval, arg2)
8761                     || copy_from_user_timeval(&pvalue->it_value,
8762                                               arg2 + sizeof(struct target_timeval)))
8763                     return -TARGET_EFAULT;
8764             } else {
8765                 pvalue = NULL;
8766             }
8767             ret = get_errno(setitimer(arg1, pvalue, &ovalue));
8768             if (!is_error(ret) && arg3) {
8769                 if (copy_to_user_timeval(arg3,
8770                                          &ovalue.it_interval)
8771                     || copy_to_user_timeval(arg3 + sizeof(struct target_timeval),
8772                                             &ovalue.it_value))
8773                     return -TARGET_EFAULT;
8774             }
8775         }
8776         return ret;
8777     case TARGET_NR_getitimer:
8778         {
8779             struct itimerval value;
8780 
8781             ret = get_errno(getitimer(arg1, &value));
8782             if (!is_error(ret) && arg2) {
8783                 if (copy_to_user_timeval(arg2,
8784                                          &value.it_interval)
8785                     || copy_to_user_timeval(arg2 + sizeof(struct target_timeval),
8786                                             &value.it_value))
8787                     return -TARGET_EFAULT;
8788             }
8789         }
8790         return ret;
8791 #ifdef TARGET_NR_stat
8792     case TARGET_NR_stat:
8793         if (!(p = lock_user_string(arg1))) {
8794             return -TARGET_EFAULT;
8795         }
8796         ret = get_errno(stat(path(p), &st));
8797         unlock_user(p, arg1, 0);
8798         goto do_stat;
8799 #endif
8800 #ifdef TARGET_NR_lstat
8801     case TARGET_NR_lstat:
8802         if (!(p = lock_user_string(arg1))) {
8803             return -TARGET_EFAULT;
8804         }
8805         ret = get_errno(lstat(path(p), &st));
8806         unlock_user(p, arg1, 0);
8807         goto do_stat;
8808 #endif
8809 #ifdef TARGET_NR_fstat
8810     case TARGET_NR_fstat:
8811         {
8812             ret = get_errno(fstat(arg1, &st));
8813 #if defined(TARGET_NR_stat) || defined(TARGET_NR_lstat)
8814         do_stat:
8815 #endif
8816             if (!is_error(ret)) {
8817                 struct target_stat *target_st;
8818 
8819                 if (!lock_user_struct(VERIFY_WRITE, target_st, arg2, 0))
8820                     return -TARGET_EFAULT;
8821                 memset(target_st, 0, sizeof(*target_st));
8822                 __put_user(st.st_dev, &target_st->st_dev);
8823                 __put_user(st.st_ino, &target_st->st_ino);
8824                 __put_user(st.st_mode, &target_st->st_mode);
8825                 __put_user(st.st_uid, &target_st->st_uid);
8826                 __put_user(st.st_gid, &target_st->st_gid);
8827                 __put_user(st.st_nlink, &target_st->st_nlink);
8828                 __put_user(st.st_rdev, &target_st->st_rdev);
8829                 __put_user(st.st_size, &target_st->st_size);
8830                 __put_user(st.st_blksize, &target_st->st_blksize);
8831                 __put_user(st.st_blocks, &target_st->st_blocks);
8832                 __put_user(st.st_atime, &target_st->target_st_atime);
8833                 __put_user(st.st_mtime, &target_st->target_st_mtime);
8834                 __put_user(st.st_ctime, &target_st->target_st_ctime);
8835                 unlock_user_struct(target_st, arg2, 1);
8836             }
8837         }
8838         return ret;
8839 #endif
8840     case TARGET_NR_vhangup:
8841         return get_errno(vhangup());
8842 #ifdef TARGET_NR_syscall
8843     case TARGET_NR_syscall:
8844         return do_syscall(cpu_env, arg1 & 0xffff, arg2, arg3, arg4, arg5,
8845                           arg6, arg7, arg8, 0);
8846 #endif
8847     case TARGET_NR_wait4:
8848         {
8849             int status;
8850             abi_long status_ptr = arg2;
8851             struct rusage rusage, *rusage_ptr;
8852             abi_ulong target_rusage = arg4;
8853             abi_long rusage_err;
8854             if (target_rusage)
8855                 rusage_ptr = &rusage;
8856             else
8857                 rusage_ptr = NULL;
8858             ret = get_errno(safe_wait4(arg1, &status, arg3, rusage_ptr));
8859             if (!is_error(ret)) {
8860                 if (status_ptr && ret) {
8861                     status = host_to_target_waitstatus(status);
8862                     if (put_user_s32(status, status_ptr))
8863                         return -TARGET_EFAULT;
8864                 }
8865                 if (target_rusage) {
8866                     rusage_err = host_to_target_rusage(target_rusage, &rusage);
8867                     if (rusage_err) {
8868                         ret = rusage_err;
8869                     }
8870                 }
8871             }
8872         }
8873         return ret;
8874 #ifdef TARGET_NR_swapoff
8875     case TARGET_NR_swapoff:
8876         if (!(p = lock_user_string(arg1)))
8877             return -TARGET_EFAULT;
8878         ret = get_errno(swapoff(p));
8879         unlock_user(p, arg1, 0);
8880         return ret;
8881 #endif
8882     case TARGET_NR_sysinfo:
8883         {
8884             struct target_sysinfo *target_value;
8885             struct sysinfo value;
8886             ret = get_errno(sysinfo(&value));
8887             if (!is_error(ret) && arg1)
8888             {
8889                 if (!lock_user_struct(VERIFY_WRITE, target_value, arg1, 0))
8890                     return -TARGET_EFAULT;
8891                 __put_user(value.uptime, &target_value->uptime);
8892                 __put_user(value.loads[0], &target_value->loads[0]);
8893                 __put_user(value.loads[1], &target_value->loads[1]);
8894                 __put_user(value.loads[2], &target_value->loads[2]);
8895                 __put_user(value.totalram, &target_value->totalram);
8896                 __put_user(value.freeram, &target_value->freeram);
8897                 __put_user(value.sharedram, &target_value->sharedram);
8898                 __put_user(value.bufferram, &target_value->bufferram);
8899                 __put_user(value.totalswap, &target_value->totalswap);
8900                 __put_user(value.freeswap, &target_value->freeswap);
8901                 __put_user(value.procs, &target_value->procs);
8902                 __put_user(value.totalhigh, &target_value->totalhigh);
8903                 __put_user(value.freehigh, &target_value->freehigh);
8904                 __put_user(value.mem_unit, &target_value->mem_unit);
8905                 unlock_user_struct(target_value, arg1, 1);
8906             }
8907         }
8908         return ret;
8909 #ifdef TARGET_NR_ipc
8910     case TARGET_NR_ipc:
8911         return do_ipc(cpu_env, arg1, arg2, arg3, arg4, arg5, arg6);
8912 #endif
8913 #ifdef TARGET_NR_semget
8914     case TARGET_NR_semget:
8915         return get_errno(semget(arg1, arg2, arg3));
8916 #endif
8917 #ifdef TARGET_NR_semop
8918     case TARGET_NR_semop:
8919         return do_semop(arg1, arg2, arg3);
8920 #endif
8921 #ifdef TARGET_NR_semctl
8922     case TARGET_NR_semctl:
8923         return do_semctl(arg1, arg2, arg3, arg4);
8924 #endif
8925 #ifdef TARGET_NR_msgctl
8926     case TARGET_NR_msgctl:
8927         return do_msgctl(arg1, arg2, arg3);
8928 #endif
8929 #ifdef TARGET_NR_msgget
8930     case TARGET_NR_msgget:
8931         return get_errno(msgget(arg1, arg2));
8932 #endif
8933 #ifdef TARGET_NR_msgrcv
8934     case TARGET_NR_msgrcv:
8935         return do_msgrcv(arg1, arg2, arg3, arg4, arg5);
8936 #endif
8937 #ifdef TARGET_NR_msgsnd
8938     case TARGET_NR_msgsnd:
8939         return do_msgsnd(arg1, arg2, arg3, arg4);
8940 #endif
8941 #ifdef TARGET_NR_shmget
8942     case TARGET_NR_shmget:
8943         return get_errno(shmget(arg1, arg2, arg3));
8944 #endif
8945 #ifdef TARGET_NR_shmctl
8946     case TARGET_NR_shmctl:
8947         return do_shmctl(arg1, arg2, arg3);
8948 #endif
8949 #ifdef TARGET_NR_shmat
8950     case TARGET_NR_shmat:
8951         return do_shmat(cpu_env, arg1, arg2, arg3);
8952 #endif
8953 #ifdef TARGET_NR_shmdt
8954     case TARGET_NR_shmdt:
8955         return do_shmdt(arg1);
8956 #endif
8957     case TARGET_NR_fsync:
8958         return get_errno(fsync(arg1));
8959     case TARGET_NR_clone:
8960         /* Linux manages to have three different orderings for its
8961          * arguments to clone(); the BACKWARDS and BACKWARDS2 defines
8962          * match the kernel's CONFIG_CLONE_* settings.
8963          * Microblaze is further special in that it uses a sixth
8964          * implicit argument to clone for the TLS pointer.
8965          */
8966 #if defined(TARGET_MICROBLAZE)
8967         ret = get_errno(do_fork(cpu_env, arg1, arg2, arg4, arg6, arg5));
8968 #elif defined(TARGET_CLONE_BACKWARDS)
8969         ret = get_errno(do_fork(cpu_env, arg1, arg2, arg3, arg4, arg5));
8970 #elif defined(TARGET_CLONE_BACKWARDS2)
8971         ret = get_errno(do_fork(cpu_env, arg2, arg1, arg3, arg5, arg4));
8972 #else
8973         ret = get_errno(do_fork(cpu_env, arg1, arg2, arg3, arg5, arg4));
8974 #endif
8975         return ret;
8976 #ifdef __NR_exit_group
8977         /* new thread calls */
8978     case TARGET_NR_exit_group:
8979         preexit_cleanup(cpu_env, arg1);
8980         return get_errno(exit_group(arg1));
8981 #endif
8982     case TARGET_NR_setdomainname:
8983         if (!(p = lock_user_string(arg1)))
8984             return -TARGET_EFAULT;
8985         ret = get_errno(setdomainname(p, arg2));
8986         unlock_user(p, arg1, 0);
8987         return ret;
8988     case TARGET_NR_uname:
8989         /* no need to transcode because we use the linux syscall */
8990         {
8991             struct new_utsname * buf;
8992 
8993             if (!lock_user_struct(VERIFY_WRITE, buf, arg1, 0))
8994                 return -TARGET_EFAULT;
8995             ret = get_errno(sys_uname(buf));
8996             if (!is_error(ret)) {
8997                 /* Overwrite the native machine name with whatever is being
8998                    emulated. */
8999                 g_strlcpy(buf->machine, cpu_to_uname_machine(cpu_env),
9000                           sizeof(buf->machine));
9001                 /* Allow the user to override the reported release.  */
9002                 if (qemu_uname_release && *qemu_uname_release) {
9003                     g_strlcpy(buf->release, qemu_uname_release,
9004                               sizeof(buf->release));
9005                 }
9006             }
9007             unlock_user_struct(buf, arg1, 1);
9008         }
9009         return ret;
9010 #ifdef TARGET_I386
9011     case TARGET_NR_modify_ldt:
9012         return do_modify_ldt(cpu_env, arg1, arg2, arg3);
9013 #if !defined(TARGET_X86_64)
9014     case TARGET_NR_vm86:
9015         return do_vm86(cpu_env, arg1, arg2);
9016 #endif
9017 #endif
9018     case TARGET_NR_adjtimex:
9019         {
9020             struct timex host_buf;
9021 
9022             if (target_to_host_timex(&host_buf, arg1) != 0) {
9023                 return -TARGET_EFAULT;
9024             }
9025             ret = get_errno(adjtimex(&host_buf));
9026             if (!is_error(ret)) {
9027                 if (host_to_target_timex(arg1, &host_buf) != 0) {
9028                     return -TARGET_EFAULT;
9029                 }
9030             }
9031         }
9032         return ret;
9033 #if defined(TARGET_NR_clock_adjtime) && defined(CONFIG_CLOCK_ADJTIME)
9034     case TARGET_NR_clock_adjtime:
9035         {
9036             struct timex htx, *phtx = &htx;
9037 
9038             if (target_to_host_timex(phtx, arg2) != 0) {
9039                 return -TARGET_EFAULT;
9040             }
9041             ret = get_errno(clock_adjtime(arg1, phtx));
9042             if (!is_error(ret) && phtx) {
9043                 if (host_to_target_timex(arg2, phtx) != 0) {
9044                     return -TARGET_EFAULT;
9045                 }
9046             }
9047         }
9048         return ret;
9049 #endif
9050     case TARGET_NR_getpgid:
9051         return get_errno(getpgid(arg1));
9052     case TARGET_NR_fchdir:
9053         return get_errno(fchdir(arg1));
9054     case TARGET_NR_personality:
9055         return get_errno(personality(arg1));
9056 #ifdef TARGET_NR__llseek /* Not on alpha */
9057     case TARGET_NR__llseek:
9058         {
9059             int64_t res;
9060 #if !defined(__NR_llseek)
9061             res = lseek(arg1, ((uint64_t)arg2 << 32) | (abi_ulong)arg3, arg5);
9062             if (res == -1) {
9063                 ret = get_errno(res);
9064             } else {
9065                 ret = 0;
9066             }
9067 #else
9068             ret = get_errno(_llseek(arg1, arg2, arg3, &res, arg5));
9069 #endif
9070             if ((ret == 0) && put_user_s64(res, arg4)) {
9071                 return -TARGET_EFAULT;
9072             }
9073         }
9074         return ret;
9075 #endif
9076 #ifdef TARGET_NR_getdents
9077     case TARGET_NR_getdents:
9078 #ifdef EMULATE_GETDENTS_WITH_GETDENTS
9079 #if TARGET_ABI_BITS == 32 && HOST_LONG_BITS == 64
9080         {
9081             struct target_dirent *target_dirp;
9082             struct linux_dirent *dirp;
9083             abi_long count = arg3;
9084 
9085             dirp = g_try_malloc(count);
9086             if (!dirp) {
9087                 return -TARGET_ENOMEM;
9088             }
9089 
9090             ret = get_errno(sys_getdents(arg1, dirp, count));
9091             if (!is_error(ret)) {
9092                 struct linux_dirent *de;
9093 		struct target_dirent *tde;
9094                 int len = ret;
9095                 int reclen, treclen;
9096 		int count1, tnamelen;
9097 
9098 		count1 = 0;
9099                 de = dirp;
9100                 if (!(target_dirp = lock_user(VERIFY_WRITE, arg2, count, 0)))
9101                     return -TARGET_EFAULT;
9102 		tde = target_dirp;
9103                 while (len > 0) {
9104                     reclen = de->d_reclen;
9105                     tnamelen = reclen - offsetof(struct linux_dirent, d_name);
9106                     assert(tnamelen >= 0);
9107                     treclen = tnamelen + offsetof(struct target_dirent, d_name);
9108                     assert(count1 + treclen <= count);
9109                     tde->d_reclen = tswap16(treclen);
9110                     tde->d_ino = tswapal(de->d_ino);
9111                     tde->d_off = tswapal(de->d_off);
9112                     memcpy(tde->d_name, de->d_name, tnamelen);
9113                     de = (struct linux_dirent *)((char *)de + reclen);
9114                     len -= reclen;
9115                     tde = (struct target_dirent *)((char *)tde + treclen);
9116 		    count1 += treclen;
9117                 }
9118 		ret = count1;
9119                 unlock_user(target_dirp, arg2, ret);
9120             }
9121             g_free(dirp);
9122         }
9123 #else
9124         {
9125             struct linux_dirent *dirp;
9126             abi_long count = arg3;
9127 
9128             if (!(dirp = lock_user(VERIFY_WRITE, arg2, count, 0)))
9129                 return -TARGET_EFAULT;
9130             ret = get_errno(sys_getdents(arg1, dirp, count));
9131             if (!is_error(ret)) {
9132                 struct linux_dirent *de;
9133                 int len = ret;
9134                 int reclen;
9135                 de = dirp;
9136                 while (len > 0) {
9137                     reclen = de->d_reclen;
9138                     if (reclen > len)
9139                         break;
9140                     de->d_reclen = tswap16(reclen);
9141                     tswapls(&de->d_ino);
9142                     tswapls(&de->d_off);
9143                     de = (struct linux_dirent *)((char *)de + reclen);
9144                     len -= reclen;
9145                 }
9146             }
9147             unlock_user(dirp, arg2, ret);
9148         }
9149 #endif
9150 #else
9151         /* Implement getdents in terms of getdents64 */
9152         {
9153             struct linux_dirent64 *dirp;
9154             abi_long count = arg3;
9155 
9156             dirp = lock_user(VERIFY_WRITE, arg2, count, 0);
9157             if (!dirp) {
9158                 return -TARGET_EFAULT;
9159             }
9160             ret = get_errno(sys_getdents64(arg1, dirp, count));
9161             if (!is_error(ret)) {
9162                 /* Convert the dirent64 structs to target dirent.  We do this
9163                  * in-place, since we can guarantee that a target_dirent is no
9164                  * larger than a dirent64; however this means we have to be
9165                  * careful to read everything before writing in the new format.
9166                  */
9167                 struct linux_dirent64 *de;
9168                 struct target_dirent *tde;
9169                 int len = ret;
9170                 int tlen = 0;
9171 
9172                 de = dirp;
9173                 tde = (struct target_dirent *)dirp;
9174                 while (len > 0) {
9175                     int namelen, treclen;
9176                     int reclen = de->d_reclen;
9177                     uint64_t ino = de->d_ino;
9178                     int64_t off = de->d_off;
9179                     uint8_t type = de->d_type;
9180 
9181                     namelen = strlen(de->d_name);
9182                     treclen = offsetof(struct target_dirent, d_name)
9183                         + namelen + 2;
9184                     treclen = QEMU_ALIGN_UP(treclen, sizeof(abi_long));
9185 
9186                     memmove(tde->d_name, de->d_name, namelen + 1);
9187                     tde->d_ino = tswapal(ino);
9188                     tde->d_off = tswapal(off);
9189                     tde->d_reclen = tswap16(treclen);
9190                     /* The target_dirent type is in what was formerly a padding
9191                      * byte at the end of the structure:
9192                      */
9193                     *(((char *)tde) + treclen - 1) = type;
9194 
9195                     de = (struct linux_dirent64 *)((char *)de + reclen);
9196                     tde = (struct target_dirent *)((char *)tde + treclen);
9197                     len -= reclen;
9198                     tlen += treclen;
9199                 }
9200                 ret = tlen;
9201             }
9202             unlock_user(dirp, arg2, ret);
9203         }
9204 #endif
9205         return ret;
9206 #endif /* TARGET_NR_getdents */
9207 #if defined(TARGET_NR_getdents64) && defined(__NR_getdents64)
9208     case TARGET_NR_getdents64:
9209         {
9210             struct linux_dirent64 *dirp;
9211             abi_long count = arg3;
9212             if (!(dirp = lock_user(VERIFY_WRITE, arg2, count, 0)))
9213                 return -TARGET_EFAULT;
9214             ret = get_errno(sys_getdents64(arg1, dirp, count));
9215             if (!is_error(ret)) {
9216                 struct linux_dirent64 *de;
9217                 int len = ret;
9218                 int reclen;
9219                 de = dirp;
9220                 while (len > 0) {
9221                     reclen = de->d_reclen;
9222                     if (reclen > len)
9223                         break;
9224                     de->d_reclen = tswap16(reclen);
9225                     tswap64s((uint64_t *)&de->d_ino);
9226                     tswap64s((uint64_t *)&de->d_off);
9227                     de = (struct linux_dirent64 *)((char *)de + reclen);
9228                     len -= reclen;
9229                 }
9230             }
9231             unlock_user(dirp, arg2, ret);
9232         }
9233         return ret;
9234 #endif /* TARGET_NR_getdents64 */
9235 #if defined(TARGET_NR__newselect)
9236     case TARGET_NR__newselect:
9237         return do_select(arg1, arg2, arg3, arg4, arg5);
9238 #endif
9239 #if defined(TARGET_NR_poll) || defined(TARGET_NR_ppoll)
9240 # ifdef TARGET_NR_poll
9241     case TARGET_NR_poll:
9242 # endif
9243 # ifdef TARGET_NR_ppoll
9244     case TARGET_NR_ppoll:
9245 # endif
9246         {
9247             struct target_pollfd *target_pfd;
9248             unsigned int nfds = arg2;
9249             struct pollfd *pfd;
9250             unsigned int i;
9251 
9252             pfd = NULL;
9253             target_pfd = NULL;
9254             if (nfds) {
9255                 if (nfds > (INT_MAX / sizeof(struct target_pollfd))) {
9256                     return -TARGET_EINVAL;
9257                 }
9258 
9259                 target_pfd = lock_user(VERIFY_WRITE, arg1,
9260                                        sizeof(struct target_pollfd) * nfds, 1);
9261                 if (!target_pfd) {
9262                     return -TARGET_EFAULT;
9263                 }
9264 
9265                 pfd = alloca(sizeof(struct pollfd) * nfds);
9266                 for (i = 0; i < nfds; i++) {
9267                     pfd[i].fd = tswap32(target_pfd[i].fd);
9268                     pfd[i].events = tswap16(target_pfd[i].events);
9269                 }
9270             }
9271 
9272             switch (num) {
9273 # ifdef TARGET_NR_ppoll
9274             case TARGET_NR_ppoll:
9275             {
9276                 struct timespec _timeout_ts, *timeout_ts = &_timeout_ts;
9277                 target_sigset_t *target_set;
9278                 sigset_t _set, *set = &_set;
9279 
9280                 if (arg3) {
9281                     if (target_to_host_timespec(timeout_ts, arg3)) {
9282                         unlock_user(target_pfd, arg1, 0);
9283                         return -TARGET_EFAULT;
9284                     }
9285                 } else {
9286                     timeout_ts = NULL;
9287                 }
9288 
9289                 if (arg4) {
9290                     if (arg5 != sizeof(target_sigset_t)) {
9291                         unlock_user(target_pfd, arg1, 0);
9292                         return -TARGET_EINVAL;
9293                     }
9294 
9295                     target_set = lock_user(VERIFY_READ, arg4, sizeof(target_sigset_t), 1);
9296                     if (!target_set) {
9297                         unlock_user(target_pfd, arg1, 0);
9298                         return -TARGET_EFAULT;
9299                     }
9300                     target_to_host_sigset(set, target_set);
9301                 } else {
9302                     set = NULL;
9303                 }
9304 
9305                 ret = get_errno(safe_ppoll(pfd, nfds, timeout_ts,
9306                                            set, SIGSET_T_SIZE));
9307 
9308                 if (!is_error(ret) && arg3) {
9309                     host_to_target_timespec(arg3, timeout_ts);
9310                 }
9311                 if (arg4) {
9312                     unlock_user(target_set, arg4, 0);
9313                 }
9314                 break;
9315             }
9316 # endif
9317 # ifdef TARGET_NR_poll
9318             case TARGET_NR_poll:
9319             {
9320                 struct timespec ts, *pts;
9321 
9322                 if (arg3 >= 0) {
9323                     /* Convert ms to secs, ns */
9324                     ts.tv_sec = arg3 / 1000;
9325                     ts.tv_nsec = (arg3 % 1000) * 1000000LL;
9326                     pts = &ts;
9327                 } else {
9328                     /* -ve poll() timeout means "infinite" */
9329                     pts = NULL;
9330                 }
9331                 ret = get_errno(safe_ppoll(pfd, nfds, pts, NULL, 0));
9332                 break;
9333             }
9334 # endif
9335             default:
9336                 g_assert_not_reached();
9337             }
9338 
9339             if (!is_error(ret)) {
9340                 for(i = 0; i < nfds; i++) {
9341                     target_pfd[i].revents = tswap16(pfd[i].revents);
9342                 }
9343             }
9344             unlock_user(target_pfd, arg1, sizeof(struct target_pollfd) * nfds);
9345         }
9346         return ret;
9347 #endif
9348     case TARGET_NR_flock:
9349         /* NOTE: the flock constant seems to be the same for every
9350            Linux platform */
9351         return get_errno(safe_flock(arg1, arg2));
9352     case TARGET_NR_readv:
9353         {
9354             struct iovec *vec = lock_iovec(VERIFY_WRITE, arg2, arg3, 0);
9355             if (vec != NULL) {
9356                 ret = get_errno(safe_readv(arg1, vec, arg3));
9357                 unlock_iovec(vec, arg2, arg3, 1);
9358             } else {
9359                 ret = -host_to_target_errno(errno);
9360             }
9361         }
9362         return ret;
9363     case TARGET_NR_writev:
9364         {
9365             struct iovec *vec = lock_iovec(VERIFY_READ, arg2, arg3, 1);
9366             if (vec != NULL) {
9367                 ret = get_errno(safe_writev(arg1, vec, arg3));
9368                 unlock_iovec(vec, arg2, arg3, 0);
9369             } else {
9370                 ret = -host_to_target_errno(errno);
9371             }
9372         }
9373         return ret;
9374 #if defined(TARGET_NR_preadv)
9375     case TARGET_NR_preadv:
9376         {
9377             struct iovec *vec = lock_iovec(VERIFY_WRITE, arg2, arg3, 0);
9378             if (vec != NULL) {
9379                 unsigned long low, high;
9380 
9381                 target_to_host_low_high(arg4, arg5, &low, &high);
9382                 ret = get_errno(safe_preadv(arg1, vec, arg3, low, high));
9383                 unlock_iovec(vec, arg2, arg3, 1);
9384             } else {
9385                 ret = -host_to_target_errno(errno);
9386            }
9387         }
9388         return ret;
9389 #endif
9390 #if defined(TARGET_NR_pwritev)
9391     case TARGET_NR_pwritev:
9392         {
9393             struct iovec *vec = lock_iovec(VERIFY_READ, arg2, arg3, 1);
9394             if (vec != NULL) {
9395                 unsigned long low, high;
9396 
9397                 target_to_host_low_high(arg4, arg5, &low, &high);
9398                 ret = get_errno(safe_pwritev(arg1, vec, arg3, low, high));
9399                 unlock_iovec(vec, arg2, arg3, 0);
9400             } else {
9401                 ret = -host_to_target_errno(errno);
9402            }
9403         }
9404         return ret;
9405 #endif
9406     case TARGET_NR_getsid:
9407         return get_errno(getsid(arg1));
9408 #if defined(TARGET_NR_fdatasync) /* Not on alpha (osf_datasync ?) */
9409     case TARGET_NR_fdatasync:
9410         return get_errno(fdatasync(arg1));
9411 #endif
9412 #ifdef TARGET_NR__sysctl
9413     case TARGET_NR__sysctl:
9414         /* We don't implement this, but ENOTDIR is always a safe
9415            return value. */
9416         return -TARGET_ENOTDIR;
9417 #endif
9418     case TARGET_NR_sched_getaffinity:
9419         {
9420             unsigned int mask_size;
9421             unsigned long *mask;
9422 
9423             /*
9424              * sched_getaffinity needs multiples of ulong, so need to take
9425              * care of mismatches between target ulong and host ulong sizes.
9426              */
9427             if (arg2 & (sizeof(abi_ulong) - 1)) {
9428                 return -TARGET_EINVAL;
9429             }
9430             mask_size = (arg2 + (sizeof(*mask) - 1)) & ~(sizeof(*mask) - 1);
9431 
9432             mask = alloca(mask_size);
9433             memset(mask, 0, mask_size);
9434             ret = get_errno(sys_sched_getaffinity(arg1, mask_size, mask));
9435 
9436             if (!is_error(ret)) {
9437                 if (ret > arg2) {
9438                     /* More data returned than the caller's buffer will fit.
9439                      * This only happens if sizeof(abi_long) < sizeof(long)
9440                      * and the caller passed us a buffer holding an odd number
9441                      * of abi_longs. If the host kernel is actually using the
9442                      * extra 4 bytes then fail EINVAL; otherwise we can just
9443                      * ignore them and only copy the interesting part.
9444                      */
9445                     int numcpus = sysconf(_SC_NPROCESSORS_CONF);
9446                     if (numcpus > arg2 * 8) {
9447                         return -TARGET_EINVAL;
9448                     }
9449                     ret = arg2;
9450                 }
9451 
9452                 if (host_to_target_cpu_mask(mask, mask_size, arg3, ret)) {
9453                     return -TARGET_EFAULT;
9454                 }
9455             }
9456         }
9457         return ret;
9458     case TARGET_NR_sched_setaffinity:
9459         {
9460             unsigned int mask_size;
9461             unsigned long *mask;
9462 
9463             /*
9464              * sched_setaffinity needs multiples of ulong, so need to take
9465              * care of mismatches between target ulong and host ulong sizes.
9466              */
9467             if (arg2 & (sizeof(abi_ulong) - 1)) {
9468                 return -TARGET_EINVAL;
9469             }
9470             mask_size = (arg2 + (sizeof(*mask) - 1)) & ~(sizeof(*mask) - 1);
9471             mask = alloca(mask_size);
9472 
9473             ret = target_to_host_cpu_mask(mask, mask_size, arg3, arg2);
9474             if (ret) {
9475                 return ret;
9476             }
9477 
9478             return get_errno(sys_sched_setaffinity(arg1, mask_size, mask));
9479         }
9480     case TARGET_NR_getcpu:
9481         {
9482             unsigned cpu, node;
9483             ret = get_errno(sys_getcpu(arg1 ? &cpu : NULL,
9484                                        arg2 ? &node : NULL,
9485                                        NULL));
9486             if (is_error(ret)) {
9487                 return ret;
9488             }
9489             if (arg1 && put_user_u32(cpu, arg1)) {
9490                 return -TARGET_EFAULT;
9491             }
9492             if (arg2 && put_user_u32(node, arg2)) {
9493                 return -TARGET_EFAULT;
9494             }
9495         }
9496         return ret;
9497     case TARGET_NR_sched_setparam:
9498         {
9499             struct sched_param *target_schp;
9500             struct sched_param schp;
9501 
9502             if (arg2 == 0) {
9503                 return -TARGET_EINVAL;
9504             }
9505             if (!lock_user_struct(VERIFY_READ, target_schp, arg2, 1))
9506                 return -TARGET_EFAULT;
9507             schp.sched_priority = tswap32(target_schp->sched_priority);
9508             unlock_user_struct(target_schp, arg2, 0);
9509             return get_errno(sched_setparam(arg1, &schp));
9510         }
9511     case TARGET_NR_sched_getparam:
9512         {
9513             struct sched_param *target_schp;
9514             struct sched_param schp;
9515 
9516             if (arg2 == 0) {
9517                 return -TARGET_EINVAL;
9518             }
9519             ret = get_errno(sched_getparam(arg1, &schp));
9520             if (!is_error(ret)) {
9521                 if (!lock_user_struct(VERIFY_WRITE, target_schp, arg2, 0))
9522                     return -TARGET_EFAULT;
9523                 target_schp->sched_priority = tswap32(schp.sched_priority);
9524                 unlock_user_struct(target_schp, arg2, 1);
9525             }
9526         }
9527         return ret;
9528     case TARGET_NR_sched_setscheduler:
9529         {
9530             struct sched_param *target_schp;
9531             struct sched_param schp;
9532             if (arg3 == 0) {
9533                 return -TARGET_EINVAL;
9534             }
9535             if (!lock_user_struct(VERIFY_READ, target_schp, arg3, 1))
9536                 return -TARGET_EFAULT;
9537             schp.sched_priority = tswap32(target_schp->sched_priority);
9538             unlock_user_struct(target_schp, arg3, 0);
9539             return get_errno(sched_setscheduler(arg1, arg2, &schp));
9540         }
9541     case TARGET_NR_sched_getscheduler:
9542         return get_errno(sched_getscheduler(arg1));
9543     case TARGET_NR_sched_yield:
9544         return get_errno(sched_yield());
9545     case TARGET_NR_sched_get_priority_max:
9546         return get_errno(sched_get_priority_max(arg1));
9547     case TARGET_NR_sched_get_priority_min:
9548         return get_errno(sched_get_priority_min(arg1));
9549     case TARGET_NR_sched_rr_get_interval:
9550         {
9551             struct timespec ts;
9552             ret = get_errno(sched_rr_get_interval(arg1, &ts));
9553             if (!is_error(ret)) {
9554                 ret = host_to_target_timespec(arg2, &ts);
9555             }
9556         }
9557         return ret;
9558     case TARGET_NR_nanosleep:
9559         {
9560             struct timespec req, rem;
9561             target_to_host_timespec(&req, arg1);
9562             ret = get_errno(safe_nanosleep(&req, &rem));
9563             if (is_error(ret) && arg2) {
9564                 host_to_target_timespec(arg2, &rem);
9565             }
9566         }
9567         return ret;
9568     case TARGET_NR_prctl:
9569         switch (arg1) {
9570         case PR_GET_PDEATHSIG:
9571         {
9572             int deathsig;
9573             ret = get_errno(prctl(arg1, &deathsig, arg3, arg4, arg5));
9574             if (!is_error(ret) && arg2
9575                 && put_user_ual(deathsig, arg2)) {
9576                 return -TARGET_EFAULT;
9577             }
9578             return ret;
9579         }
9580 #ifdef PR_GET_NAME
9581         case PR_GET_NAME:
9582         {
9583             void *name = lock_user(VERIFY_WRITE, arg2, 16, 1);
9584             if (!name) {
9585                 return -TARGET_EFAULT;
9586             }
9587             ret = get_errno(prctl(arg1, (unsigned long)name,
9588                                   arg3, arg4, arg5));
9589             unlock_user(name, arg2, 16);
9590             return ret;
9591         }
9592         case PR_SET_NAME:
9593         {
9594             void *name = lock_user(VERIFY_READ, arg2, 16, 1);
9595             if (!name) {
9596                 return -TARGET_EFAULT;
9597             }
9598             ret = get_errno(prctl(arg1, (unsigned long)name,
9599                                   arg3, arg4, arg5));
9600             unlock_user(name, arg2, 0);
9601             return ret;
9602         }
9603 #endif
9604 #ifdef TARGET_MIPS
9605         case TARGET_PR_GET_FP_MODE:
9606         {
9607             CPUMIPSState *env = ((CPUMIPSState *)cpu_env);
9608             ret = 0;
9609             if (env->CP0_Status & (1 << CP0St_FR)) {
9610                 ret |= TARGET_PR_FP_MODE_FR;
9611             }
9612             if (env->CP0_Config5 & (1 << CP0C5_FRE)) {
9613                 ret |= TARGET_PR_FP_MODE_FRE;
9614             }
9615             return ret;
9616         }
9617         case TARGET_PR_SET_FP_MODE:
9618         {
9619             CPUMIPSState *env = ((CPUMIPSState *)cpu_env);
9620             bool old_fr = env->CP0_Status & (1 << CP0St_FR);
9621             bool old_fre = env->CP0_Config5 & (1 << CP0C5_FRE);
9622             bool new_fr = arg2 & TARGET_PR_FP_MODE_FR;
9623             bool new_fre = arg2 & TARGET_PR_FP_MODE_FRE;
9624 
9625             const unsigned int known_bits = TARGET_PR_FP_MODE_FR |
9626                                             TARGET_PR_FP_MODE_FRE;
9627 
9628             /* If nothing to change, return right away, successfully.  */
9629             if (old_fr == new_fr && old_fre == new_fre) {
9630                 return 0;
9631             }
9632             /* Check the value is valid */
9633             if (arg2 & ~known_bits) {
9634                 return -TARGET_EOPNOTSUPP;
9635             }
9636             /* Setting FRE without FR is not supported.  */
9637             if (new_fre && !new_fr) {
9638                 return -TARGET_EOPNOTSUPP;
9639             }
9640             if (new_fr && !(env->active_fpu.fcr0 & (1 << FCR0_F64))) {
9641                 /* FR1 is not supported */
9642                 return -TARGET_EOPNOTSUPP;
9643             }
9644             if (!new_fr && (env->active_fpu.fcr0 & (1 << FCR0_F64))
9645                 && !(env->CP0_Status_rw_bitmask & (1 << CP0St_FR))) {
9646                 /* cannot set FR=0 */
9647                 return -TARGET_EOPNOTSUPP;
9648             }
9649             if (new_fre && !(env->active_fpu.fcr0 & (1 << FCR0_FREP))) {
9650                 /* Cannot set FRE=1 */
9651                 return -TARGET_EOPNOTSUPP;
9652             }
9653 
9654             int i;
9655             fpr_t *fpr = env->active_fpu.fpr;
9656             for (i = 0; i < 32 ; i += 2) {
9657                 if (!old_fr && new_fr) {
9658                     fpr[i].w[!FP_ENDIAN_IDX] = fpr[i + 1].w[FP_ENDIAN_IDX];
9659                 } else if (old_fr && !new_fr) {
9660                     fpr[i + 1].w[FP_ENDIAN_IDX] = fpr[i].w[!FP_ENDIAN_IDX];
9661                 }
9662             }
9663 
9664             if (new_fr) {
9665                 env->CP0_Status |= (1 << CP0St_FR);
9666                 env->hflags |= MIPS_HFLAG_F64;
9667             } else {
9668                 env->CP0_Status &= ~(1 << CP0St_FR);
9669                 env->hflags &= ~MIPS_HFLAG_F64;
9670             }
9671             if (new_fre) {
9672                 env->CP0_Config5 |= (1 << CP0C5_FRE);
9673                 if (env->active_fpu.fcr0 & (1 << FCR0_FREP)) {
9674                     env->hflags |= MIPS_HFLAG_FRE;
9675                 }
9676             } else {
9677                 env->CP0_Config5 &= ~(1 << CP0C5_FRE);
9678                 env->hflags &= ~MIPS_HFLAG_FRE;
9679             }
9680 
9681             return 0;
9682         }
9683 #endif /* MIPS */
9684 #ifdef TARGET_AARCH64
9685         case TARGET_PR_SVE_SET_VL:
9686             /*
9687              * We cannot support either PR_SVE_SET_VL_ONEXEC or
9688              * PR_SVE_VL_INHERIT.  Note the kernel definition
9689              * of sve_vl_valid allows for VQ=512, i.e. VL=8192,
9690              * even though the current architectural maximum is VQ=16.
9691              */
9692             ret = -TARGET_EINVAL;
9693             if (cpu_isar_feature(aa64_sve, arm_env_get_cpu(cpu_env))
9694                 && arg2 >= 0 && arg2 <= 512 * 16 && !(arg2 & 15)) {
9695                 CPUARMState *env = cpu_env;
9696                 ARMCPU *cpu = arm_env_get_cpu(env);
9697                 uint32_t vq, old_vq;
9698 
9699                 old_vq = (env->vfp.zcr_el[1] & 0xf) + 1;
9700                 vq = MAX(arg2 / 16, 1);
9701                 vq = MIN(vq, cpu->sve_max_vq);
9702 
9703                 if (vq < old_vq) {
9704                     aarch64_sve_narrow_vq(env, vq);
9705                 }
9706                 env->vfp.zcr_el[1] = vq - 1;
9707                 ret = vq * 16;
9708             }
9709             return ret;
9710         case TARGET_PR_SVE_GET_VL:
9711             ret = -TARGET_EINVAL;
9712             {
9713                 ARMCPU *cpu = arm_env_get_cpu(cpu_env);
9714                 if (cpu_isar_feature(aa64_sve, cpu)) {
9715                     ret = ((cpu->env.vfp.zcr_el[1] & 0xf) + 1) * 16;
9716                 }
9717             }
9718             return ret;
9719         case TARGET_PR_PAC_RESET_KEYS:
9720             {
9721                 CPUARMState *env = cpu_env;
9722                 ARMCPU *cpu = arm_env_get_cpu(env);
9723 
9724                 if (arg3 || arg4 || arg5) {
9725                     return -TARGET_EINVAL;
9726                 }
9727                 if (cpu_isar_feature(aa64_pauth, cpu)) {
9728                     int all = (TARGET_PR_PAC_APIAKEY | TARGET_PR_PAC_APIBKEY |
9729                                TARGET_PR_PAC_APDAKEY | TARGET_PR_PAC_APDBKEY |
9730                                TARGET_PR_PAC_APGAKEY);
9731                     if (arg2 == 0) {
9732                         arg2 = all;
9733                     } else if (arg2 & ~all) {
9734                         return -TARGET_EINVAL;
9735                     }
9736                     if (arg2 & TARGET_PR_PAC_APIAKEY) {
9737                         arm_init_pauth_key(&env->apia_key);
9738                     }
9739                     if (arg2 & TARGET_PR_PAC_APIBKEY) {
9740                         arm_init_pauth_key(&env->apib_key);
9741                     }
9742                     if (arg2 & TARGET_PR_PAC_APDAKEY) {
9743                         arm_init_pauth_key(&env->apda_key);
9744                     }
9745                     if (arg2 & TARGET_PR_PAC_APDBKEY) {
9746                         arm_init_pauth_key(&env->apdb_key);
9747                     }
9748                     if (arg2 & TARGET_PR_PAC_APGAKEY) {
9749                         arm_init_pauth_key(&env->apga_key);
9750                     }
9751                     return 0;
9752                 }
9753             }
9754             return -TARGET_EINVAL;
9755 #endif /* AARCH64 */
9756         case PR_GET_SECCOMP:
9757         case PR_SET_SECCOMP:
9758             /* Disable seccomp to prevent the target disabling syscalls we
9759              * need. */
9760             return -TARGET_EINVAL;
9761         default:
9762             /* Most prctl options have no pointer arguments */
9763             return get_errno(prctl(arg1, arg2, arg3, arg4, arg5));
9764         }
9765         break;
9766 #ifdef TARGET_NR_arch_prctl
9767     case TARGET_NR_arch_prctl:
9768 #if defined(TARGET_I386) && !defined(TARGET_ABI32)
9769         return do_arch_prctl(cpu_env, arg1, arg2);
9770 #else
9771 #error unreachable
9772 #endif
9773 #endif
9774 #ifdef TARGET_NR_pread64
9775     case TARGET_NR_pread64:
9776         if (regpairs_aligned(cpu_env, num)) {
9777             arg4 = arg5;
9778             arg5 = arg6;
9779         }
9780         if (arg2 == 0 && arg3 == 0) {
9781             /* Special-case NULL buffer and zero length, which should succeed */
9782             p = 0;
9783         } else {
9784             p = lock_user(VERIFY_WRITE, arg2, arg3, 0);
9785             if (!p) {
9786                 return -TARGET_EFAULT;
9787             }
9788         }
9789         ret = get_errno(pread64(arg1, p, arg3, target_offset64(arg4, arg5)));
9790         unlock_user(p, arg2, ret);
9791         return ret;
9792     case TARGET_NR_pwrite64:
9793         if (regpairs_aligned(cpu_env, num)) {
9794             arg4 = arg5;
9795             arg5 = arg6;
9796         }
9797         if (arg2 == 0 && arg3 == 0) {
9798             /* Special-case NULL buffer and zero length, which should succeed */
9799             p = 0;
9800         } else {
9801             p = lock_user(VERIFY_READ, arg2, arg3, 1);
9802             if (!p) {
9803                 return -TARGET_EFAULT;
9804             }
9805         }
9806         ret = get_errno(pwrite64(arg1, p, arg3, target_offset64(arg4, arg5)));
9807         unlock_user(p, arg2, 0);
9808         return ret;
9809 #endif
9810     case TARGET_NR_getcwd:
9811         if (!(p = lock_user(VERIFY_WRITE, arg1, arg2, 0)))
9812             return -TARGET_EFAULT;
9813         ret = get_errno(sys_getcwd1(p, arg2));
9814         unlock_user(p, arg1, ret);
9815         return ret;
9816     case TARGET_NR_capget:
9817     case TARGET_NR_capset:
9818     {
9819         struct target_user_cap_header *target_header;
9820         struct target_user_cap_data *target_data = NULL;
9821         struct __user_cap_header_struct header;
9822         struct __user_cap_data_struct data[2];
9823         struct __user_cap_data_struct *dataptr = NULL;
9824         int i, target_datalen;
9825         int data_items = 1;
9826 
9827         if (!lock_user_struct(VERIFY_WRITE, target_header, arg1, 1)) {
9828             return -TARGET_EFAULT;
9829         }
9830         header.version = tswap32(target_header->version);
9831         header.pid = tswap32(target_header->pid);
9832 
9833         if (header.version != _LINUX_CAPABILITY_VERSION) {
9834             /* Version 2 and up takes pointer to two user_data structs */
9835             data_items = 2;
9836         }
9837 
9838         target_datalen = sizeof(*target_data) * data_items;
9839 
9840         if (arg2) {
9841             if (num == TARGET_NR_capget) {
9842                 target_data = lock_user(VERIFY_WRITE, arg2, target_datalen, 0);
9843             } else {
9844                 target_data = lock_user(VERIFY_READ, arg2, target_datalen, 1);
9845             }
9846             if (!target_data) {
9847                 unlock_user_struct(target_header, arg1, 0);
9848                 return -TARGET_EFAULT;
9849             }
9850 
9851             if (num == TARGET_NR_capset) {
9852                 for (i = 0; i < data_items; i++) {
9853                     data[i].effective = tswap32(target_data[i].effective);
9854                     data[i].permitted = tswap32(target_data[i].permitted);
9855                     data[i].inheritable = tswap32(target_data[i].inheritable);
9856                 }
9857             }
9858 
9859             dataptr = data;
9860         }
9861 
9862         if (num == TARGET_NR_capget) {
9863             ret = get_errno(capget(&header, dataptr));
9864         } else {
9865             ret = get_errno(capset(&header, dataptr));
9866         }
9867 
9868         /* The kernel always updates version for both capget and capset */
9869         target_header->version = tswap32(header.version);
9870         unlock_user_struct(target_header, arg1, 1);
9871 
9872         if (arg2) {
9873             if (num == TARGET_NR_capget) {
9874                 for (i = 0; i < data_items; i++) {
9875                     target_data[i].effective = tswap32(data[i].effective);
9876                     target_data[i].permitted = tswap32(data[i].permitted);
9877                     target_data[i].inheritable = tswap32(data[i].inheritable);
9878                 }
9879                 unlock_user(target_data, arg2, target_datalen);
9880             } else {
9881                 unlock_user(target_data, arg2, 0);
9882             }
9883         }
9884         return ret;
9885     }
9886     case TARGET_NR_sigaltstack:
9887         return do_sigaltstack(arg1, arg2,
9888                               get_sp_from_cpustate((CPUArchState *)cpu_env));
9889 
9890 #ifdef CONFIG_SENDFILE
9891 #ifdef TARGET_NR_sendfile
9892     case TARGET_NR_sendfile:
9893     {
9894         off_t *offp = NULL;
9895         off_t off;
9896         if (arg3) {
9897             ret = get_user_sal(off, arg3);
9898             if (is_error(ret)) {
9899                 return ret;
9900             }
9901             offp = &off;
9902         }
9903         ret = get_errno(sendfile(arg1, arg2, offp, arg4));
9904         if (!is_error(ret) && arg3) {
9905             abi_long ret2 = put_user_sal(off, arg3);
9906             if (is_error(ret2)) {
9907                 ret = ret2;
9908             }
9909         }
9910         return ret;
9911     }
9912 #endif
9913 #ifdef TARGET_NR_sendfile64
9914     case TARGET_NR_sendfile64:
9915     {
9916         off_t *offp = NULL;
9917         off_t off;
9918         if (arg3) {
9919             ret = get_user_s64(off, arg3);
9920             if (is_error(ret)) {
9921                 return ret;
9922             }
9923             offp = &off;
9924         }
9925         ret = get_errno(sendfile(arg1, arg2, offp, arg4));
9926         if (!is_error(ret) && arg3) {
9927             abi_long ret2 = put_user_s64(off, arg3);
9928             if (is_error(ret2)) {
9929                 ret = ret2;
9930             }
9931         }
9932         return ret;
9933     }
9934 #endif
9935 #endif
9936 #ifdef TARGET_NR_vfork
9937     case TARGET_NR_vfork:
9938         return get_errno(do_fork(cpu_env,
9939                          CLONE_VFORK | CLONE_VM | TARGET_SIGCHLD,
9940                          0, 0, 0, 0));
9941 #endif
9942 #ifdef TARGET_NR_ugetrlimit
9943     case TARGET_NR_ugetrlimit:
9944     {
9945 	struct rlimit rlim;
9946 	int resource = target_to_host_resource(arg1);
9947 	ret = get_errno(getrlimit(resource, &rlim));
9948 	if (!is_error(ret)) {
9949 	    struct target_rlimit *target_rlim;
9950             if (!lock_user_struct(VERIFY_WRITE, target_rlim, arg2, 0))
9951                 return -TARGET_EFAULT;
9952 	    target_rlim->rlim_cur = host_to_target_rlim(rlim.rlim_cur);
9953 	    target_rlim->rlim_max = host_to_target_rlim(rlim.rlim_max);
9954             unlock_user_struct(target_rlim, arg2, 1);
9955 	}
9956         return ret;
9957     }
9958 #endif
9959 #ifdef TARGET_NR_truncate64
9960     case TARGET_NR_truncate64:
9961         if (!(p = lock_user_string(arg1)))
9962             return -TARGET_EFAULT;
9963 	ret = target_truncate64(cpu_env, p, arg2, arg3, arg4);
9964         unlock_user(p, arg1, 0);
9965         return ret;
9966 #endif
9967 #ifdef TARGET_NR_ftruncate64
9968     case TARGET_NR_ftruncate64:
9969         return target_ftruncate64(cpu_env, arg1, arg2, arg3, arg4);
9970 #endif
9971 #ifdef TARGET_NR_stat64
9972     case TARGET_NR_stat64:
9973         if (!(p = lock_user_string(arg1))) {
9974             return -TARGET_EFAULT;
9975         }
9976         ret = get_errno(stat(path(p), &st));
9977         unlock_user(p, arg1, 0);
9978         if (!is_error(ret))
9979             ret = host_to_target_stat64(cpu_env, arg2, &st);
9980         return ret;
9981 #endif
9982 #ifdef TARGET_NR_lstat64
9983     case TARGET_NR_lstat64:
9984         if (!(p = lock_user_string(arg1))) {
9985             return -TARGET_EFAULT;
9986         }
9987         ret = get_errno(lstat(path(p), &st));
9988         unlock_user(p, arg1, 0);
9989         if (!is_error(ret))
9990             ret = host_to_target_stat64(cpu_env, arg2, &st);
9991         return ret;
9992 #endif
9993 #ifdef TARGET_NR_fstat64
9994     case TARGET_NR_fstat64:
9995         ret = get_errno(fstat(arg1, &st));
9996         if (!is_error(ret))
9997             ret = host_to_target_stat64(cpu_env, arg2, &st);
9998         return ret;
9999 #endif
10000 #if (defined(TARGET_NR_fstatat64) || defined(TARGET_NR_newfstatat))
10001 #ifdef TARGET_NR_fstatat64
10002     case TARGET_NR_fstatat64:
10003 #endif
10004 #ifdef TARGET_NR_newfstatat
10005     case TARGET_NR_newfstatat:
10006 #endif
10007         if (!(p = lock_user_string(arg2))) {
10008             return -TARGET_EFAULT;
10009         }
10010         ret = get_errno(fstatat(arg1, path(p), &st, arg4));
10011         unlock_user(p, arg2, 0);
10012         if (!is_error(ret))
10013             ret = host_to_target_stat64(cpu_env, arg3, &st);
10014         return ret;
10015 #endif
10016 #ifdef TARGET_NR_lchown
10017     case TARGET_NR_lchown:
10018         if (!(p = lock_user_string(arg1)))
10019             return -TARGET_EFAULT;
10020         ret = get_errno(lchown(p, low2highuid(arg2), low2highgid(arg3)));
10021         unlock_user(p, arg1, 0);
10022         return ret;
10023 #endif
10024 #ifdef TARGET_NR_getuid
10025     case TARGET_NR_getuid:
10026         return get_errno(high2lowuid(getuid()));
10027 #endif
10028 #ifdef TARGET_NR_getgid
10029     case TARGET_NR_getgid:
10030         return get_errno(high2lowgid(getgid()));
10031 #endif
10032 #ifdef TARGET_NR_geteuid
10033     case TARGET_NR_geteuid:
10034         return get_errno(high2lowuid(geteuid()));
10035 #endif
10036 #ifdef TARGET_NR_getegid
10037     case TARGET_NR_getegid:
10038         return get_errno(high2lowgid(getegid()));
10039 #endif
10040     case TARGET_NR_setreuid:
10041         return get_errno(setreuid(low2highuid(arg1), low2highuid(arg2)));
10042     case TARGET_NR_setregid:
10043         return get_errno(setregid(low2highgid(arg1), low2highgid(arg2)));
10044     case TARGET_NR_getgroups:
10045         {
10046             int gidsetsize = arg1;
10047             target_id *target_grouplist;
10048             gid_t *grouplist;
10049             int i;
10050 
10051             grouplist = alloca(gidsetsize * sizeof(gid_t));
10052             ret = get_errno(getgroups(gidsetsize, grouplist));
10053             if (gidsetsize == 0)
10054                 return ret;
10055             if (!is_error(ret)) {
10056                 target_grouplist = lock_user(VERIFY_WRITE, arg2, gidsetsize * sizeof(target_id), 0);
10057                 if (!target_grouplist)
10058                     return -TARGET_EFAULT;
10059                 for(i = 0;i < ret; i++)
10060                     target_grouplist[i] = tswapid(high2lowgid(grouplist[i]));
10061                 unlock_user(target_grouplist, arg2, gidsetsize * sizeof(target_id));
10062             }
10063         }
10064         return ret;
10065     case TARGET_NR_setgroups:
10066         {
10067             int gidsetsize = arg1;
10068             target_id *target_grouplist;
10069             gid_t *grouplist = NULL;
10070             int i;
10071             if (gidsetsize) {
10072                 grouplist = alloca(gidsetsize * sizeof(gid_t));
10073                 target_grouplist = lock_user(VERIFY_READ, arg2, gidsetsize * sizeof(target_id), 1);
10074                 if (!target_grouplist) {
10075                     return -TARGET_EFAULT;
10076                 }
10077                 for (i = 0; i < gidsetsize; i++) {
10078                     grouplist[i] = low2highgid(tswapid(target_grouplist[i]));
10079                 }
10080                 unlock_user(target_grouplist, arg2, 0);
10081             }
10082             return get_errno(setgroups(gidsetsize, grouplist));
10083         }
10084     case TARGET_NR_fchown:
10085         return get_errno(fchown(arg1, low2highuid(arg2), low2highgid(arg3)));
10086 #if defined(TARGET_NR_fchownat)
10087     case TARGET_NR_fchownat:
10088         if (!(p = lock_user_string(arg2)))
10089             return -TARGET_EFAULT;
10090         ret = get_errno(fchownat(arg1, p, low2highuid(arg3),
10091                                  low2highgid(arg4), arg5));
10092         unlock_user(p, arg2, 0);
10093         return ret;
10094 #endif
10095 #ifdef TARGET_NR_setresuid
10096     case TARGET_NR_setresuid:
10097         return get_errno(sys_setresuid(low2highuid(arg1),
10098                                        low2highuid(arg2),
10099                                        low2highuid(arg3)));
10100 #endif
10101 #ifdef TARGET_NR_getresuid
10102     case TARGET_NR_getresuid:
10103         {
10104             uid_t ruid, euid, suid;
10105             ret = get_errno(getresuid(&ruid, &euid, &suid));
10106             if (!is_error(ret)) {
10107                 if (put_user_id(high2lowuid(ruid), arg1)
10108                     || put_user_id(high2lowuid(euid), arg2)
10109                     || put_user_id(high2lowuid(suid), arg3))
10110                     return -TARGET_EFAULT;
10111             }
10112         }
10113         return ret;
10114 #endif
10115 #ifdef TARGET_NR_getresgid
10116     case TARGET_NR_setresgid:
10117         return get_errno(sys_setresgid(low2highgid(arg1),
10118                                        low2highgid(arg2),
10119                                        low2highgid(arg3)));
10120 #endif
10121 #ifdef TARGET_NR_getresgid
10122     case TARGET_NR_getresgid:
10123         {
10124             gid_t rgid, egid, sgid;
10125             ret = get_errno(getresgid(&rgid, &egid, &sgid));
10126             if (!is_error(ret)) {
10127                 if (put_user_id(high2lowgid(rgid), arg1)
10128                     || put_user_id(high2lowgid(egid), arg2)
10129                     || put_user_id(high2lowgid(sgid), arg3))
10130                     return -TARGET_EFAULT;
10131             }
10132         }
10133         return ret;
10134 #endif
10135 #ifdef TARGET_NR_chown
10136     case TARGET_NR_chown:
10137         if (!(p = lock_user_string(arg1)))
10138             return -TARGET_EFAULT;
10139         ret = get_errno(chown(p, low2highuid(arg2), low2highgid(arg3)));
10140         unlock_user(p, arg1, 0);
10141         return ret;
10142 #endif
10143     case TARGET_NR_setuid:
10144         return get_errno(sys_setuid(low2highuid(arg1)));
10145     case TARGET_NR_setgid:
10146         return get_errno(sys_setgid(low2highgid(arg1)));
10147     case TARGET_NR_setfsuid:
10148         return get_errno(setfsuid(arg1));
10149     case TARGET_NR_setfsgid:
10150         return get_errno(setfsgid(arg1));
10151 
10152 #ifdef TARGET_NR_lchown32
10153     case TARGET_NR_lchown32:
10154         if (!(p = lock_user_string(arg1)))
10155             return -TARGET_EFAULT;
10156         ret = get_errno(lchown(p, arg2, arg3));
10157         unlock_user(p, arg1, 0);
10158         return ret;
10159 #endif
10160 #ifdef TARGET_NR_getuid32
10161     case TARGET_NR_getuid32:
10162         return get_errno(getuid());
10163 #endif
10164 
10165 #if defined(TARGET_NR_getxuid) && defined(TARGET_ALPHA)
10166    /* Alpha specific */
10167     case TARGET_NR_getxuid:
10168          {
10169             uid_t euid;
10170             euid=geteuid();
10171             ((CPUAlphaState *)cpu_env)->ir[IR_A4]=euid;
10172          }
10173         return get_errno(getuid());
10174 #endif
10175 #if defined(TARGET_NR_getxgid) && defined(TARGET_ALPHA)
10176    /* Alpha specific */
10177     case TARGET_NR_getxgid:
10178          {
10179             uid_t egid;
10180             egid=getegid();
10181             ((CPUAlphaState *)cpu_env)->ir[IR_A4]=egid;
10182          }
10183         return get_errno(getgid());
10184 #endif
10185 #if defined(TARGET_NR_osf_getsysinfo) && defined(TARGET_ALPHA)
10186     /* Alpha specific */
10187     case TARGET_NR_osf_getsysinfo:
10188         ret = -TARGET_EOPNOTSUPP;
10189         switch (arg1) {
10190           case TARGET_GSI_IEEE_FP_CONTROL:
10191             {
10192                 uint64_t swcr, fpcr = cpu_alpha_load_fpcr (cpu_env);
10193 
10194                 /* Copied from linux ieee_fpcr_to_swcr.  */
10195                 swcr = (fpcr >> 35) & SWCR_STATUS_MASK;
10196                 swcr |= (fpcr >> 36) & SWCR_MAP_DMZ;
10197                 swcr |= (~fpcr >> 48) & (SWCR_TRAP_ENABLE_INV
10198                                         | SWCR_TRAP_ENABLE_DZE
10199                                         | SWCR_TRAP_ENABLE_OVF);
10200                 swcr |= (~fpcr >> 57) & (SWCR_TRAP_ENABLE_UNF
10201                                         | SWCR_TRAP_ENABLE_INE);
10202                 swcr |= (fpcr >> 47) & SWCR_MAP_UMZ;
10203                 swcr |= (~fpcr >> 41) & SWCR_TRAP_ENABLE_DNO;
10204 
10205                 if (put_user_u64 (swcr, arg2))
10206                         return -TARGET_EFAULT;
10207                 ret = 0;
10208             }
10209             break;
10210 
10211           /* case GSI_IEEE_STATE_AT_SIGNAL:
10212              -- Not implemented in linux kernel.
10213              case GSI_UACPROC:
10214              -- Retrieves current unaligned access state; not much used.
10215              case GSI_PROC_TYPE:
10216              -- Retrieves implver information; surely not used.
10217              case GSI_GET_HWRPB:
10218              -- Grabs a copy of the HWRPB; surely not used.
10219           */
10220         }
10221         return ret;
10222 #endif
10223 #if defined(TARGET_NR_osf_setsysinfo) && defined(TARGET_ALPHA)
10224     /* Alpha specific */
10225     case TARGET_NR_osf_setsysinfo:
10226         ret = -TARGET_EOPNOTSUPP;
10227         switch (arg1) {
10228           case TARGET_SSI_IEEE_FP_CONTROL:
10229             {
10230                 uint64_t swcr, fpcr, orig_fpcr;
10231 
10232                 if (get_user_u64 (swcr, arg2)) {
10233                     return -TARGET_EFAULT;
10234                 }
10235                 orig_fpcr = cpu_alpha_load_fpcr(cpu_env);
10236                 fpcr = orig_fpcr & FPCR_DYN_MASK;
10237 
10238                 /* Copied from linux ieee_swcr_to_fpcr.  */
10239                 fpcr |= (swcr & SWCR_STATUS_MASK) << 35;
10240                 fpcr |= (swcr & SWCR_MAP_DMZ) << 36;
10241                 fpcr |= (~swcr & (SWCR_TRAP_ENABLE_INV
10242                                   | SWCR_TRAP_ENABLE_DZE
10243                                   | SWCR_TRAP_ENABLE_OVF)) << 48;
10244                 fpcr |= (~swcr & (SWCR_TRAP_ENABLE_UNF
10245                                   | SWCR_TRAP_ENABLE_INE)) << 57;
10246                 fpcr |= (swcr & SWCR_MAP_UMZ ? FPCR_UNDZ | FPCR_UNFD : 0);
10247                 fpcr |= (~swcr & SWCR_TRAP_ENABLE_DNO) << 41;
10248 
10249                 cpu_alpha_store_fpcr(cpu_env, fpcr);
10250                 ret = 0;
10251             }
10252             break;
10253 
10254           case TARGET_SSI_IEEE_RAISE_EXCEPTION:
10255             {
10256                 uint64_t exc, fpcr, orig_fpcr;
10257                 int si_code;
10258 
10259                 if (get_user_u64(exc, arg2)) {
10260                     return -TARGET_EFAULT;
10261                 }
10262 
10263                 orig_fpcr = cpu_alpha_load_fpcr(cpu_env);
10264 
10265                 /* We only add to the exception status here.  */
10266                 fpcr = orig_fpcr | ((exc & SWCR_STATUS_MASK) << 35);
10267 
10268                 cpu_alpha_store_fpcr(cpu_env, fpcr);
10269                 ret = 0;
10270 
10271                 /* Old exceptions are not signaled.  */
10272                 fpcr &= ~(orig_fpcr & FPCR_STATUS_MASK);
10273 
10274                 /* If any exceptions set by this call,
10275                    and are unmasked, send a signal.  */
10276                 si_code = 0;
10277                 if ((fpcr & (FPCR_INE | FPCR_INED)) == FPCR_INE) {
10278                     si_code = TARGET_FPE_FLTRES;
10279                 }
10280                 if ((fpcr & (FPCR_UNF | FPCR_UNFD)) == FPCR_UNF) {
10281                     si_code = TARGET_FPE_FLTUND;
10282                 }
10283                 if ((fpcr & (FPCR_OVF | FPCR_OVFD)) == FPCR_OVF) {
10284                     si_code = TARGET_FPE_FLTOVF;
10285                 }
10286                 if ((fpcr & (FPCR_DZE | FPCR_DZED)) == FPCR_DZE) {
10287                     si_code = TARGET_FPE_FLTDIV;
10288                 }
10289                 if ((fpcr & (FPCR_INV | FPCR_INVD)) == FPCR_INV) {
10290                     si_code = TARGET_FPE_FLTINV;
10291                 }
10292                 if (si_code != 0) {
10293                     target_siginfo_t info;
10294                     info.si_signo = SIGFPE;
10295                     info.si_errno = 0;
10296                     info.si_code = si_code;
10297                     info._sifields._sigfault._addr
10298                         = ((CPUArchState *)cpu_env)->pc;
10299                     queue_signal((CPUArchState *)cpu_env, info.si_signo,
10300                                  QEMU_SI_FAULT, &info);
10301                 }
10302             }
10303             break;
10304 
10305           /* case SSI_NVPAIRS:
10306              -- Used with SSIN_UACPROC to enable unaligned accesses.
10307              case SSI_IEEE_STATE_AT_SIGNAL:
10308              case SSI_IEEE_IGNORE_STATE_AT_SIGNAL:
10309              -- Not implemented in linux kernel
10310           */
10311         }
10312         return ret;
10313 #endif
10314 #ifdef TARGET_NR_osf_sigprocmask
10315     /* Alpha specific.  */
10316     case TARGET_NR_osf_sigprocmask:
10317         {
10318             abi_ulong mask;
10319             int how;
10320             sigset_t set, oldset;
10321 
10322             switch(arg1) {
10323             case TARGET_SIG_BLOCK:
10324                 how = SIG_BLOCK;
10325                 break;
10326             case TARGET_SIG_UNBLOCK:
10327                 how = SIG_UNBLOCK;
10328                 break;
10329             case TARGET_SIG_SETMASK:
10330                 how = SIG_SETMASK;
10331                 break;
10332             default:
10333                 return -TARGET_EINVAL;
10334             }
10335             mask = arg2;
10336             target_to_host_old_sigset(&set, &mask);
10337             ret = do_sigprocmask(how, &set, &oldset);
10338             if (!ret) {
10339                 host_to_target_old_sigset(&mask, &oldset);
10340                 ret = mask;
10341             }
10342         }
10343         return ret;
10344 #endif
10345 
10346 #ifdef TARGET_NR_getgid32
10347     case TARGET_NR_getgid32:
10348         return get_errno(getgid());
10349 #endif
10350 #ifdef TARGET_NR_geteuid32
10351     case TARGET_NR_geteuid32:
10352         return get_errno(geteuid());
10353 #endif
10354 #ifdef TARGET_NR_getegid32
10355     case TARGET_NR_getegid32:
10356         return get_errno(getegid());
10357 #endif
10358 #ifdef TARGET_NR_setreuid32
10359     case TARGET_NR_setreuid32:
10360         return get_errno(setreuid(arg1, arg2));
10361 #endif
10362 #ifdef TARGET_NR_setregid32
10363     case TARGET_NR_setregid32:
10364         return get_errno(setregid(arg1, arg2));
10365 #endif
10366 #ifdef TARGET_NR_getgroups32
10367     case TARGET_NR_getgroups32:
10368         {
10369             int gidsetsize = arg1;
10370             uint32_t *target_grouplist;
10371             gid_t *grouplist;
10372             int i;
10373 
10374             grouplist = alloca(gidsetsize * sizeof(gid_t));
10375             ret = get_errno(getgroups(gidsetsize, grouplist));
10376             if (gidsetsize == 0)
10377                 return ret;
10378             if (!is_error(ret)) {
10379                 target_grouplist = lock_user(VERIFY_WRITE, arg2, gidsetsize * 4, 0);
10380                 if (!target_grouplist) {
10381                     return -TARGET_EFAULT;
10382                 }
10383                 for(i = 0;i < ret; i++)
10384                     target_grouplist[i] = tswap32(grouplist[i]);
10385                 unlock_user(target_grouplist, arg2, gidsetsize * 4);
10386             }
10387         }
10388         return ret;
10389 #endif
10390 #ifdef TARGET_NR_setgroups32
10391     case TARGET_NR_setgroups32:
10392         {
10393             int gidsetsize = arg1;
10394             uint32_t *target_grouplist;
10395             gid_t *grouplist;
10396             int i;
10397 
10398             grouplist = alloca(gidsetsize * sizeof(gid_t));
10399             target_grouplist = lock_user(VERIFY_READ, arg2, gidsetsize * 4, 1);
10400             if (!target_grouplist) {
10401                 return -TARGET_EFAULT;
10402             }
10403             for(i = 0;i < gidsetsize; i++)
10404                 grouplist[i] = tswap32(target_grouplist[i]);
10405             unlock_user(target_grouplist, arg2, 0);
10406             return get_errno(setgroups(gidsetsize, grouplist));
10407         }
10408 #endif
10409 #ifdef TARGET_NR_fchown32
10410     case TARGET_NR_fchown32:
10411         return get_errno(fchown(arg1, arg2, arg3));
10412 #endif
10413 #ifdef TARGET_NR_setresuid32
10414     case TARGET_NR_setresuid32:
10415         return get_errno(sys_setresuid(arg1, arg2, arg3));
10416 #endif
10417 #ifdef TARGET_NR_getresuid32
10418     case TARGET_NR_getresuid32:
10419         {
10420             uid_t ruid, euid, suid;
10421             ret = get_errno(getresuid(&ruid, &euid, &suid));
10422             if (!is_error(ret)) {
10423                 if (put_user_u32(ruid, arg1)
10424                     || put_user_u32(euid, arg2)
10425                     || put_user_u32(suid, arg3))
10426                     return -TARGET_EFAULT;
10427             }
10428         }
10429         return ret;
10430 #endif
10431 #ifdef TARGET_NR_setresgid32
10432     case TARGET_NR_setresgid32:
10433         return get_errno(sys_setresgid(arg1, arg2, arg3));
10434 #endif
10435 #ifdef TARGET_NR_getresgid32
10436     case TARGET_NR_getresgid32:
10437         {
10438             gid_t rgid, egid, sgid;
10439             ret = get_errno(getresgid(&rgid, &egid, &sgid));
10440             if (!is_error(ret)) {
10441                 if (put_user_u32(rgid, arg1)
10442                     || put_user_u32(egid, arg2)
10443                     || put_user_u32(sgid, arg3))
10444                     return -TARGET_EFAULT;
10445             }
10446         }
10447         return ret;
10448 #endif
10449 #ifdef TARGET_NR_chown32
10450     case TARGET_NR_chown32:
10451         if (!(p = lock_user_string(arg1)))
10452             return -TARGET_EFAULT;
10453         ret = get_errno(chown(p, arg2, arg3));
10454         unlock_user(p, arg1, 0);
10455         return ret;
10456 #endif
10457 #ifdef TARGET_NR_setuid32
10458     case TARGET_NR_setuid32:
10459         return get_errno(sys_setuid(arg1));
10460 #endif
10461 #ifdef TARGET_NR_setgid32
10462     case TARGET_NR_setgid32:
10463         return get_errno(sys_setgid(arg1));
10464 #endif
10465 #ifdef TARGET_NR_setfsuid32
10466     case TARGET_NR_setfsuid32:
10467         return get_errno(setfsuid(arg1));
10468 #endif
10469 #ifdef TARGET_NR_setfsgid32
10470     case TARGET_NR_setfsgid32:
10471         return get_errno(setfsgid(arg1));
10472 #endif
10473 #ifdef TARGET_NR_mincore
10474     case TARGET_NR_mincore:
10475         {
10476             void *a = lock_user(VERIFY_READ, arg1, arg2, 0);
10477             if (!a) {
10478                 return -TARGET_ENOMEM;
10479             }
10480             p = lock_user_string(arg3);
10481             if (!p) {
10482                 ret = -TARGET_EFAULT;
10483             } else {
10484                 ret = get_errno(mincore(a, arg2, p));
10485                 unlock_user(p, arg3, ret);
10486             }
10487             unlock_user(a, arg1, 0);
10488         }
10489         return ret;
10490 #endif
10491 #ifdef TARGET_NR_arm_fadvise64_64
10492     case TARGET_NR_arm_fadvise64_64:
10493         /* arm_fadvise64_64 looks like fadvise64_64 but
10494          * with different argument order: fd, advice, offset, len
10495          * rather than the usual fd, offset, len, advice.
10496          * Note that offset and len are both 64-bit so appear as
10497          * pairs of 32-bit registers.
10498          */
10499         ret = posix_fadvise(arg1, target_offset64(arg3, arg4),
10500                             target_offset64(arg5, arg6), arg2);
10501         return -host_to_target_errno(ret);
10502 #endif
10503 
10504 #if TARGET_ABI_BITS == 32
10505 
10506 #ifdef TARGET_NR_fadvise64_64
10507     case TARGET_NR_fadvise64_64:
10508 #if defined(TARGET_PPC) || defined(TARGET_XTENSA)
10509         /* 6 args: fd, advice, offset (high, low), len (high, low) */
10510         ret = arg2;
10511         arg2 = arg3;
10512         arg3 = arg4;
10513         arg4 = arg5;
10514         arg5 = arg6;
10515         arg6 = ret;
10516 #else
10517         /* 6 args: fd, offset (high, low), len (high, low), advice */
10518         if (regpairs_aligned(cpu_env, num)) {
10519             /* offset is in (3,4), len in (5,6) and advice in 7 */
10520             arg2 = arg3;
10521             arg3 = arg4;
10522             arg4 = arg5;
10523             arg5 = arg6;
10524             arg6 = arg7;
10525         }
10526 #endif
10527         ret = posix_fadvise(arg1, target_offset64(arg2, arg3),
10528                             target_offset64(arg4, arg5), arg6);
10529         return -host_to_target_errno(ret);
10530 #endif
10531 
10532 #ifdef TARGET_NR_fadvise64
10533     case TARGET_NR_fadvise64:
10534         /* 5 args: fd, offset (high, low), len, advice */
10535         if (regpairs_aligned(cpu_env, num)) {
10536             /* offset is in (3,4), len in 5 and advice in 6 */
10537             arg2 = arg3;
10538             arg3 = arg4;
10539             arg4 = arg5;
10540             arg5 = arg6;
10541         }
10542         ret = posix_fadvise(arg1, target_offset64(arg2, arg3), arg4, arg5);
10543         return -host_to_target_errno(ret);
10544 #endif
10545 
10546 #else /* not a 32-bit ABI */
10547 #if defined(TARGET_NR_fadvise64_64) || defined(TARGET_NR_fadvise64)
10548 #ifdef TARGET_NR_fadvise64_64
10549     case TARGET_NR_fadvise64_64:
10550 #endif
10551 #ifdef TARGET_NR_fadvise64
10552     case TARGET_NR_fadvise64:
10553 #endif
10554 #ifdef TARGET_S390X
10555         switch (arg4) {
10556         case 4: arg4 = POSIX_FADV_NOREUSE + 1; break; /* make sure it's an invalid value */
10557         case 5: arg4 = POSIX_FADV_NOREUSE + 2; break; /* ditto */
10558         case 6: arg4 = POSIX_FADV_DONTNEED; break;
10559         case 7: arg4 = POSIX_FADV_NOREUSE; break;
10560         default: break;
10561         }
10562 #endif
10563         return -host_to_target_errno(posix_fadvise(arg1, arg2, arg3, arg4));
10564 #endif
10565 #endif /* end of 64-bit ABI fadvise handling */
10566 
10567 #ifdef TARGET_NR_madvise
10568     case TARGET_NR_madvise:
10569         /* A straight passthrough may not be safe because qemu sometimes
10570            turns private file-backed mappings into anonymous mappings.
10571            This will break MADV_DONTNEED.
10572            This is a hint, so ignoring and returning success is ok.  */
10573         return 0;
10574 #endif
10575 #if TARGET_ABI_BITS == 32
10576     case TARGET_NR_fcntl64:
10577     {
10578 	int cmd;
10579 	struct flock64 fl;
10580         from_flock64_fn *copyfrom = copy_from_user_flock64;
10581         to_flock64_fn *copyto = copy_to_user_flock64;
10582 
10583 #ifdef TARGET_ARM
10584         if (!((CPUARMState *)cpu_env)->eabi) {
10585             copyfrom = copy_from_user_oabi_flock64;
10586             copyto = copy_to_user_oabi_flock64;
10587         }
10588 #endif
10589 
10590 	cmd = target_to_host_fcntl_cmd(arg2);
10591         if (cmd == -TARGET_EINVAL) {
10592             return cmd;
10593         }
10594 
10595         switch(arg2) {
10596         case TARGET_F_GETLK64:
10597             ret = copyfrom(&fl, arg3);
10598             if (ret) {
10599                 break;
10600             }
10601             ret = get_errno(safe_fcntl(arg1, cmd, &fl));
10602             if (ret == 0) {
10603                 ret = copyto(arg3, &fl);
10604             }
10605 	    break;
10606 
10607         case TARGET_F_SETLK64:
10608         case TARGET_F_SETLKW64:
10609             ret = copyfrom(&fl, arg3);
10610             if (ret) {
10611                 break;
10612             }
10613             ret = get_errno(safe_fcntl(arg1, cmd, &fl));
10614 	    break;
10615         default:
10616             ret = do_fcntl(arg1, arg2, arg3);
10617             break;
10618         }
10619         return ret;
10620     }
10621 #endif
10622 #ifdef TARGET_NR_cacheflush
10623     case TARGET_NR_cacheflush:
10624         /* self-modifying code is handled automatically, so nothing needed */
10625         return 0;
10626 #endif
10627 #ifdef TARGET_NR_getpagesize
10628     case TARGET_NR_getpagesize:
10629         return TARGET_PAGE_SIZE;
10630 #endif
10631     case TARGET_NR_gettid:
10632         return get_errno(gettid());
10633 #ifdef TARGET_NR_readahead
10634     case TARGET_NR_readahead:
10635 #if TARGET_ABI_BITS == 32
10636         if (regpairs_aligned(cpu_env, num)) {
10637             arg2 = arg3;
10638             arg3 = arg4;
10639             arg4 = arg5;
10640         }
10641         ret = get_errno(readahead(arg1, target_offset64(arg2, arg3) , arg4));
10642 #else
10643         ret = get_errno(readahead(arg1, arg2, arg3));
10644 #endif
10645         return ret;
10646 #endif
10647 #ifdef CONFIG_ATTR
10648 #ifdef TARGET_NR_setxattr
10649     case TARGET_NR_listxattr:
10650     case TARGET_NR_llistxattr:
10651     {
10652         void *p, *b = 0;
10653         if (arg2) {
10654             b = lock_user(VERIFY_WRITE, arg2, arg3, 0);
10655             if (!b) {
10656                 return -TARGET_EFAULT;
10657             }
10658         }
10659         p = lock_user_string(arg1);
10660         if (p) {
10661             if (num == TARGET_NR_listxattr) {
10662                 ret = get_errno(listxattr(p, b, arg3));
10663             } else {
10664                 ret = get_errno(llistxattr(p, b, arg3));
10665             }
10666         } else {
10667             ret = -TARGET_EFAULT;
10668         }
10669         unlock_user(p, arg1, 0);
10670         unlock_user(b, arg2, arg3);
10671         return ret;
10672     }
10673     case TARGET_NR_flistxattr:
10674     {
10675         void *b = 0;
10676         if (arg2) {
10677             b = lock_user(VERIFY_WRITE, arg2, arg3, 0);
10678             if (!b) {
10679                 return -TARGET_EFAULT;
10680             }
10681         }
10682         ret = get_errno(flistxattr(arg1, b, arg3));
10683         unlock_user(b, arg2, arg3);
10684         return ret;
10685     }
10686     case TARGET_NR_setxattr:
10687     case TARGET_NR_lsetxattr:
10688         {
10689             void *p, *n, *v = 0;
10690             if (arg3) {
10691                 v = lock_user(VERIFY_READ, arg3, arg4, 1);
10692                 if (!v) {
10693                     return -TARGET_EFAULT;
10694                 }
10695             }
10696             p = lock_user_string(arg1);
10697             n = lock_user_string(arg2);
10698             if (p && n) {
10699                 if (num == TARGET_NR_setxattr) {
10700                     ret = get_errno(setxattr(p, n, v, arg4, arg5));
10701                 } else {
10702                     ret = get_errno(lsetxattr(p, n, v, arg4, arg5));
10703                 }
10704             } else {
10705                 ret = -TARGET_EFAULT;
10706             }
10707             unlock_user(p, arg1, 0);
10708             unlock_user(n, arg2, 0);
10709             unlock_user(v, arg3, 0);
10710         }
10711         return ret;
10712     case TARGET_NR_fsetxattr:
10713         {
10714             void *n, *v = 0;
10715             if (arg3) {
10716                 v = lock_user(VERIFY_READ, arg3, arg4, 1);
10717                 if (!v) {
10718                     return -TARGET_EFAULT;
10719                 }
10720             }
10721             n = lock_user_string(arg2);
10722             if (n) {
10723                 ret = get_errno(fsetxattr(arg1, n, v, arg4, arg5));
10724             } else {
10725                 ret = -TARGET_EFAULT;
10726             }
10727             unlock_user(n, arg2, 0);
10728             unlock_user(v, arg3, 0);
10729         }
10730         return ret;
10731     case TARGET_NR_getxattr:
10732     case TARGET_NR_lgetxattr:
10733         {
10734             void *p, *n, *v = 0;
10735             if (arg3) {
10736                 v = lock_user(VERIFY_WRITE, arg3, arg4, 0);
10737                 if (!v) {
10738                     return -TARGET_EFAULT;
10739                 }
10740             }
10741             p = lock_user_string(arg1);
10742             n = lock_user_string(arg2);
10743             if (p && n) {
10744                 if (num == TARGET_NR_getxattr) {
10745                     ret = get_errno(getxattr(p, n, v, arg4));
10746                 } else {
10747                     ret = get_errno(lgetxattr(p, n, v, arg4));
10748                 }
10749             } else {
10750                 ret = -TARGET_EFAULT;
10751             }
10752             unlock_user(p, arg1, 0);
10753             unlock_user(n, arg2, 0);
10754             unlock_user(v, arg3, arg4);
10755         }
10756         return ret;
10757     case TARGET_NR_fgetxattr:
10758         {
10759             void *n, *v = 0;
10760             if (arg3) {
10761                 v = lock_user(VERIFY_WRITE, arg3, arg4, 0);
10762                 if (!v) {
10763                     return -TARGET_EFAULT;
10764                 }
10765             }
10766             n = lock_user_string(arg2);
10767             if (n) {
10768                 ret = get_errno(fgetxattr(arg1, n, v, arg4));
10769             } else {
10770                 ret = -TARGET_EFAULT;
10771             }
10772             unlock_user(n, arg2, 0);
10773             unlock_user(v, arg3, arg4);
10774         }
10775         return ret;
10776     case TARGET_NR_removexattr:
10777     case TARGET_NR_lremovexattr:
10778         {
10779             void *p, *n;
10780             p = lock_user_string(arg1);
10781             n = lock_user_string(arg2);
10782             if (p && n) {
10783                 if (num == TARGET_NR_removexattr) {
10784                     ret = get_errno(removexattr(p, n));
10785                 } else {
10786                     ret = get_errno(lremovexattr(p, n));
10787                 }
10788             } else {
10789                 ret = -TARGET_EFAULT;
10790             }
10791             unlock_user(p, arg1, 0);
10792             unlock_user(n, arg2, 0);
10793         }
10794         return ret;
10795     case TARGET_NR_fremovexattr:
10796         {
10797             void *n;
10798             n = lock_user_string(arg2);
10799             if (n) {
10800                 ret = get_errno(fremovexattr(arg1, n));
10801             } else {
10802                 ret = -TARGET_EFAULT;
10803             }
10804             unlock_user(n, arg2, 0);
10805         }
10806         return ret;
10807 #endif
10808 #endif /* CONFIG_ATTR */
10809 #ifdef TARGET_NR_set_thread_area
10810     case TARGET_NR_set_thread_area:
10811 #if defined(TARGET_MIPS)
10812       ((CPUMIPSState *) cpu_env)->active_tc.CP0_UserLocal = arg1;
10813       return 0;
10814 #elif defined(TARGET_CRIS)
10815       if (arg1 & 0xff)
10816           ret = -TARGET_EINVAL;
10817       else {
10818           ((CPUCRISState *) cpu_env)->pregs[PR_PID] = arg1;
10819           ret = 0;
10820       }
10821       return ret;
10822 #elif defined(TARGET_I386) && defined(TARGET_ABI32)
10823       return do_set_thread_area(cpu_env, arg1);
10824 #elif defined(TARGET_M68K)
10825       {
10826           TaskState *ts = cpu->opaque;
10827           ts->tp_value = arg1;
10828           return 0;
10829       }
10830 #else
10831       return -TARGET_ENOSYS;
10832 #endif
10833 #endif
10834 #ifdef TARGET_NR_get_thread_area
10835     case TARGET_NR_get_thread_area:
10836 #if defined(TARGET_I386) && defined(TARGET_ABI32)
10837         return do_get_thread_area(cpu_env, arg1);
10838 #elif defined(TARGET_M68K)
10839         {
10840             TaskState *ts = cpu->opaque;
10841             return ts->tp_value;
10842         }
10843 #else
10844         return -TARGET_ENOSYS;
10845 #endif
10846 #endif
10847 #ifdef TARGET_NR_getdomainname
10848     case TARGET_NR_getdomainname:
10849         return -TARGET_ENOSYS;
10850 #endif
10851 
10852 #ifdef TARGET_NR_clock_settime
10853     case TARGET_NR_clock_settime:
10854     {
10855         struct timespec ts;
10856 
10857         ret = target_to_host_timespec(&ts, arg2);
10858         if (!is_error(ret)) {
10859             ret = get_errno(clock_settime(arg1, &ts));
10860         }
10861         return ret;
10862     }
10863 #endif
10864 #ifdef TARGET_NR_clock_gettime
10865     case TARGET_NR_clock_gettime:
10866     {
10867         struct timespec ts;
10868         ret = get_errno(clock_gettime(arg1, &ts));
10869         if (!is_error(ret)) {
10870             ret = host_to_target_timespec(arg2, &ts);
10871         }
10872         return ret;
10873     }
10874 #endif
10875 #ifdef TARGET_NR_clock_getres
10876     case TARGET_NR_clock_getres:
10877     {
10878         struct timespec ts;
10879         ret = get_errno(clock_getres(arg1, &ts));
10880         if (!is_error(ret)) {
10881             host_to_target_timespec(arg2, &ts);
10882         }
10883         return ret;
10884     }
10885 #endif
10886 #ifdef TARGET_NR_clock_nanosleep
10887     case TARGET_NR_clock_nanosleep:
10888     {
10889         struct timespec ts;
10890         target_to_host_timespec(&ts, arg3);
10891         ret = get_errno(safe_clock_nanosleep(arg1, arg2,
10892                                              &ts, arg4 ? &ts : NULL));
10893         if (arg4)
10894             host_to_target_timespec(arg4, &ts);
10895 
10896 #if defined(TARGET_PPC)
10897         /* clock_nanosleep is odd in that it returns positive errno values.
10898          * On PPC, CR0 bit 3 should be set in such a situation. */
10899         if (ret && ret != -TARGET_ERESTARTSYS) {
10900             ((CPUPPCState *)cpu_env)->crf[0] |= 1;
10901         }
10902 #endif
10903         return ret;
10904     }
10905 #endif
10906 
10907 #if defined(TARGET_NR_set_tid_address) && defined(__NR_set_tid_address)
10908     case TARGET_NR_set_tid_address:
10909         return get_errno(set_tid_address((int *)g2h(arg1)));
10910 #endif
10911 
10912     case TARGET_NR_tkill:
10913         return get_errno(safe_tkill((int)arg1, target_to_host_signal(arg2)));
10914 
10915     case TARGET_NR_tgkill:
10916         return get_errno(safe_tgkill((int)arg1, (int)arg2,
10917                          target_to_host_signal(arg3)));
10918 
10919 #ifdef TARGET_NR_set_robust_list
10920     case TARGET_NR_set_robust_list:
10921     case TARGET_NR_get_robust_list:
10922         /* The ABI for supporting robust futexes has userspace pass
10923          * the kernel a pointer to a linked list which is updated by
10924          * userspace after the syscall; the list is walked by the kernel
10925          * when the thread exits. Since the linked list in QEMU guest
10926          * memory isn't a valid linked list for the host and we have
10927          * no way to reliably intercept the thread-death event, we can't
10928          * support these. Silently return ENOSYS so that guest userspace
10929          * falls back to a non-robust futex implementation (which should
10930          * be OK except in the corner case of the guest crashing while
10931          * holding a mutex that is shared with another process via
10932          * shared memory).
10933          */
10934         return -TARGET_ENOSYS;
10935 #endif
10936 
10937 #if defined(TARGET_NR_utimensat)
10938     case TARGET_NR_utimensat:
10939         {
10940             struct timespec *tsp, ts[2];
10941             if (!arg3) {
10942                 tsp = NULL;
10943             } else {
10944                 target_to_host_timespec(ts, arg3);
10945                 target_to_host_timespec(ts+1, arg3+sizeof(struct target_timespec));
10946                 tsp = ts;
10947             }
10948             if (!arg2)
10949                 ret = get_errno(sys_utimensat(arg1, NULL, tsp, arg4));
10950             else {
10951                 if (!(p = lock_user_string(arg2))) {
10952                     return -TARGET_EFAULT;
10953                 }
10954                 ret = get_errno(sys_utimensat(arg1, path(p), tsp, arg4));
10955                 unlock_user(p, arg2, 0);
10956             }
10957         }
10958         return ret;
10959 #endif
10960     case TARGET_NR_futex:
10961         return do_futex(arg1, arg2, arg3, arg4, arg5, arg6);
10962 #if defined(TARGET_NR_inotify_init) && defined(__NR_inotify_init)
10963     case TARGET_NR_inotify_init:
10964         ret = get_errno(sys_inotify_init());
10965         if (ret >= 0) {
10966             fd_trans_register(ret, &target_inotify_trans);
10967         }
10968         return ret;
10969 #endif
10970 #ifdef CONFIG_INOTIFY1
10971 #if defined(TARGET_NR_inotify_init1) && defined(__NR_inotify_init1)
10972     case TARGET_NR_inotify_init1:
10973         ret = get_errno(sys_inotify_init1(target_to_host_bitmask(arg1,
10974                                           fcntl_flags_tbl)));
10975         if (ret >= 0) {
10976             fd_trans_register(ret, &target_inotify_trans);
10977         }
10978         return ret;
10979 #endif
10980 #endif
10981 #if defined(TARGET_NR_inotify_add_watch) && defined(__NR_inotify_add_watch)
10982     case TARGET_NR_inotify_add_watch:
10983         p = lock_user_string(arg2);
10984         ret = get_errno(sys_inotify_add_watch(arg1, path(p), arg3));
10985         unlock_user(p, arg2, 0);
10986         return ret;
10987 #endif
10988 #if defined(TARGET_NR_inotify_rm_watch) && defined(__NR_inotify_rm_watch)
10989     case TARGET_NR_inotify_rm_watch:
10990         return get_errno(sys_inotify_rm_watch(arg1, arg2));
10991 #endif
10992 
10993 #if defined(TARGET_NR_mq_open) && defined(__NR_mq_open)
10994     case TARGET_NR_mq_open:
10995         {
10996             struct mq_attr posix_mq_attr;
10997             struct mq_attr *pposix_mq_attr;
10998             int host_flags;
10999 
11000             host_flags = target_to_host_bitmask(arg2, fcntl_flags_tbl);
11001             pposix_mq_attr = NULL;
11002             if (arg4) {
11003                 if (copy_from_user_mq_attr(&posix_mq_attr, arg4) != 0) {
11004                     return -TARGET_EFAULT;
11005                 }
11006                 pposix_mq_attr = &posix_mq_attr;
11007             }
11008             p = lock_user_string(arg1 - 1);
11009             if (!p) {
11010                 return -TARGET_EFAULT;
11011             }
11012             ret = get_errno(mq_open(p, host_flags, arg3, pposix_mq_attr));
11013             unlock_user (p, arg1, 0);
11014         }
11015         return ret;
11016 
11017     case TARGET_NR_mq_unlink:
11018         p = lock_user_string(arg1 - 1);
11019         if (!p) {
11020             return -TARGET_EFAULT;
11021         }
11022         ret = get_errno(mq_unlink(p));
11023         unlock_user (p, arg1, 0);
11024         return ret;
11025 
11026     case TARGET_NR_mq_timedsend:
11027         {
11028             struct timespec ts;
11029 
11030             p = lock_user (VERIFY_READ, arg2, arg3, 1);
11031             if (arg5 != 0) {
11032                 target_to_host_timespec(&ts, arg5);
11033                 ret = get_errno(safe_mq_timedsend(arg1, p, arg3, arg4, &ts));
11034                 host_to_target_timespec(arg5, &ts);
11035             } else {
11036                 ret = get_errno(safe_mq_timedsend(arg1, p, arg3, arg4, NULL));
11037             }
11038             unlock_user (p, arg2, arg3);
11039         }
11040         return ret;
11041 
11042     case TARGET_NR_mq_timedreceive:
11043         {
11044             struct timespec ts;
11045             unsigned int prio;
11046 
11047             p = lock_user (VERIFY_READ, arg2, arg3, 1);
11048             if (arg5 != 0) {
11049                 target_to_host_timespec(&ts, arg5);
11050                 ret = get_errno(safe_mq_timedreceive(arg1, p, arg3,
11051                                                      &prio, &ts));
11052                 host_to_target_timespec(arg5, &ts);
11053             } else {
11054                 ret = get_errno(safe_mq_timedreceive(arg1, p, arg3,
11055                                                      &prio, NULL));
11056             }
11057             unlock_user (p, arg2, arg3);
11058             if (arg4 != 0)
11059                 put_user_u32(prio, arg4);
11060         }
11061         return ret;
11062 
11063     /* Not implemented for now... */
11064 /*     case TARGET_NR_mq_notify: */
11065 /*         break; */
11066 
11067     case TARGET_NR_mq_getsetattr:
11068         {
11069             struct mq_attr posix_mq_attr_in, posix_mq_attr_out;
11070             ret = 0;
11071             if (arg2 != 0) {
11072                 copy_from_user_mq_attr(&posix_mq_attr_in, arg2);
11073                 ret = get_errno(mq_setattr(arg1, &posix_mq_attr_in,
11074                                            &posix_mq_attr_out));
11075             } else if (arg3 != 0) {
11076                 ret = get_errno(mq_getattr(arg1, &posix_mq_attr_out));
11077             }
11078             if (ret == 0 && arg3 != 0) {
11079                 copy_to_user_mq_attr(arg3, &posix_mq_attr_out);
11080             }
11081         }
11082         return ret;
11083 #endif
11084 
11085 #ifdef CONFIG_SPLICE
11086 #ifdef TARGET_NR_tee
11087     case TARGET_NR_tee:
11088         {
11089             ret = get_errno(tee(arg1,arg2,arg3,arg4));
11090         }
11091         return ret;
11092 #endif
11093 #ifdef TARGET_NR_splice
11094     case TARGET_NR_splice:
11095         {
11096             loff_t loff_in, loff_out;
11097             loff_t *ploff_in = NULL, *ploff_out = NULL;
11098             if (arg2) {
11099                 if (get_user_u64(loff_in, arg2)) {
11100                     return -TARGET_EFAULT;
11101                 }
11102                 ploff_in = &loff_in;
11103             }
11104             if (arg4) {
11105                 if (get_user_u64(loff_out, arg4)) {
11106                     return -TARGET_EFAULT;
11107                 }
11108                 ploff_out = &loff_out;
11109             }
11110             ret = get_errno(splice(arg1, ploff_in, arg3, ploff_out, arg5, arg6));
11111             if (arg2) {
11112                 if (put_user_u64(loff_in, arg2)) {
11113                     return -TARGET_EFAULT;
11114                 }
11115             }
11116             if (arg4) {
11117                 if (put_user_u64(loff_out, arg4)) {
11118                     return -TARGET_EFAULT;
11119                 }
11120             }
11121         }
11122         return ret;
11123 #endif
11124 #ifdef TARGET_NR_vmsplice
11125 	case TARGET_NR_vmsplice:
11126         {
11127             struct iovec *vec = lock_iovec(VERIFY_READ, arg2, arg3, 1);
11128             if (vec != NULL) {
11129                 ret = get_errno(vmsplice(arg1, vec, arg3, arg4));
11130                 unlock_iovec(vec, arg2, arg3, 0);
11131             } else {
11132                 ret = -host_to_target_errno(errno);
11133             }
11134         }
11135         return ret;
11136 #endif
11137 #endif /* CONFIG_SPLICE */
11138 #ifdef CONFIG_EVENTFD
11139 #if defined(TARGET_NR_eventfd)
11140     case TARGET_NR_eventfd:
11141         ret = get_errno(eventfd(arg1, 0));
11142         if (ret >= 0) {
11143             fd_trans_register(ret, &target_eventfd_trans);
11144         }
11145         return ret;
11146 #endif
11147 #if defined(TARGET_NR_eventfd2)
11148     case TARGET_NR_eventfd2:
11149     {
11150         int host_flags = arg2 & (~(TARGET_O_NONBLOCK | TARGET_O_CLOEXEC));
11151         if (arg2 & TARGET_O_NONBLOCK) {
11152             host_flags |= O_NONBLOCK;
11153         }
11154         if (arg2 & TARGET_O_CLOEXEC) {
11155             host_flags |= O_CLOEXEC;
11156         }
11157         ret = get_errno(eventfd(arg1, host_flags));
11158         if (ret >= 0) {
11159             fd_trans_register(ret, &target_eventfd_trans);
11160         }
11161         return ret;
11162     }
11163 #endif
11164 #endif /* CONFIG_EVENTFD  */
11165 #if defined(CONFIG_FALLOCATE) && defined(TARGET_NR_fallocate)
11166     case TARGET_NR_fallocate:
11167 #if TARGET_ABI_BITS == 32
11168         ret = get_errno(fallocate(arg1, arg2, target_offset64(arg3, arg4),
11169                                   target_offset64(arg5, arg6)));
11170 #else
11171         ret = get_errno(fallocate(arg1, arg2, arg3, arg4));
11172 #endif
11173         return ret;
11174 #endif
11175 #if defined(CONFIG_SYNC_FILE_RANGE)
11176 #if defined(TARGET_NR_sync_file_range)
11177     case TARGET_NR_sync_file_range:
11178 #if TARGET_ABI_BITS == 32
11179 #if defined(TARGET_MIPS)
11180         ret = get_errno(sync_file_range(arg1, target_offset64(arg3, arg4),
11181                                         target_offset64(arg5, arg6), arg7));
11182 #else
11183         ret = get_errno(sync_file_range(arg1, target_offset64(arg2, arg3),
11184                                         target_offset64(arg4, arg5), arg6));
11185 #endif /* !TARGET_MIPS */
11186 #else
11187         ret = get_errno(sync_file_range(arg1, arg2, arg3, arg4));
11188 #endif
11189         return ret;
11190 #endif
11191 #if defined(TARGET_NR_sync_file_range2)
11192     case TARGET_NR_sync_file_range2:
11193         /* This is like sync_file_range but the arguments are reordered */
11194 #if TARGET_ABI_BITS == 32
11195         ret = get_errno(sync_file_range(arg1, target_offset64(arg3, arg4),
11196                                         target_offset64(arg5, arg6), arg2));
11197 #else
11198         ret = get_errno(sync_file_range(arg1, arg3, arg4, arg2));
11199 #endif
11200         return ret;
11201 #endif
11202 #endif
11203 #if defined(TARGET_NR_signalfd4)
11204     case TARGET_NR_signalfd4:
11205         return do_signalfd4(arg1, arg2, arg4);
11206 #endif
11207 #if defined(TARGET_NR_signalfd)
11208     case TARGET_NR_signalfd:
11209         return do_signalfd4(arg1, arg2, 0);
11210 #endif
11211 #if defined(CONFIG_EPOLL)
11212 #if defined(TARGET_NR_epoll_create)
11213     case TARGET_NR_epoll_create:
11214         return get_errno(epoll_create(arg1));
11215 #endif
11216 #if defined(TARGET_NR_epoll_create1) && defined(CONFIG_EPOLL_CREATE1)
11217     case TARGET_NR_epoll_create1:
11218         return get_errno(epoll_create1(arg1));
11219 #endif
11220 #if defined(TARGET_NR_epoll_ctl)
11221     case TARGET_NR_epoll_ctl:
11222     {
11223         struct epoll_event ep;
11224         struct epoll_event *epp = 0;
11225         if (arg4) {
11226             struct target_epoll_event *target_ep;
11227             if (!lock_user_struct(VERIFY_READ, target_ep, arg4, 1)) {
11228                 return -TARGET_EFAULT;
11229             }
11230             ep.events = tswap32(target_ep->events);
11231             /* The epoll_data_t union is just opaque data to the kernel,
11232              * so we transfer all 64 bits across and need not worry what
11233              * actual data type it is.
11234              */
11235             ep.data.u64 = tswap64(target_ep->data.u64);
11236             unlock_user_struct(target_ep, arg4, 0);
11237             epp = &ep;
11238         }
11239         return get_errno(epoll_ctl(arg1, arg2, arg3, epp));
11240     }
11241 #endif
11242 
11243 #if defined(TARGET_NR_epoll_wait) || defined(TARGET_NR_epoll_pwait)
11244 #if defined(TARGET_NR_epoll_wait)
11245     case TARGET_NR_epoll_wait:
11246 #endif
11247 #if defined(TARGET_NR_epoll_pwait)
11248     case TARGET_NR_epoll_pwait:
11249 #endif
11250     {
11251         struct target_epoll_event *target_ep;
11252         struct epoll_event *ep;
11253         int epfd = arg1;
11254         int maxevents = arg3;
11255         int timeout = arg4;
11256 
11257         if (maxevents <= 0 || maxevents > TARGET_EP_MAX_EVENTS) {
11258             return -TARGET_EINVAL;
11259         }
11260 
11261         target_ep = lock_user(VERIFY_WRITE, arg2,
11262                               maxevents * sizeof(struct target_epoll_event), 1);
11263         if (!target_ep) {
11264             return -TARGET_EFAULT;
11265         }
11266 
11267         ep = g_try_new(struct epoll_event, maxevents);
11268         if (!ep) {
11269             unlock_user(target_ep, arg2, 0);
11270             return -TARGET_ENOMEM;
11271         }
11272 
11273         switch (num) {
11274 #if defined(TARGET_NR_epoll_pwait)
11275         case TARGET_NR_epoll_pwait:
11276         {
11277             target_sigset_t *target_set;
11278             sigset_t _set, *set = &_set;
11279 
11280             if (arg5) {
11281                 if (arg6 != sizeof(target_sigset_t)) {
11282                     ret = -TARGET_EINVAL;
11283                     break;
11284                 }
11285 
11286                 target_set = lock_user(VERIFY_READ, arg5,
11287                                        sizeof(target_sigset_t), 1);
11288                 if (!target_set) {
11289                     ret = -TARGET_EFAULT;
11290                     break;
11291                 }
11292                 target_to_host_sigset(set, target_set);
11293                 unlock_user(target_set, arg5, 0);
11294             } else {
11295                 set = NULL;
11296             }
11297 
11298             ret = get_errno(safe_epoll_pwait(epfd, ep, maxevents, timeout,
11299                                              set, SIGSET_T_SIZE));
11300             break;
11301         }
11302 #endif
11303 #if defined(TARGET_NR_epoll_wait)
11304         case TARGET_NR_epoll_wait:
11305             ret = get_errno(safe_epoll_pwait(epfd, ep, maxevents, timeout,
11306                                              NULL, 0));
11307             break;
11308 #endif
11309         default:
11310             ret = -TARGET_ENOSYS;
11311         }
11312         if (!is_error(ret)) {
11313             int i;
11314             for (i = 0; i < ret; i++) {
11315                 target_ep[i].events = tswap32(ep[i].events);
11316                 target_ep[i].data.u64 = tswap64(ep[i].data.u64);
11317             }
11318             unlock_user(target_ep, arg2,
11319                         ret * sizeof(struct target_epoll_event));
11320         } else {
11321             unlock_user(target_ep, arg2, 0);
11322         }
11323         g_free(ep);
11324         return ret;
11325     }
11326 #endif
11327 #endif
11328 #ifdef TARGET_NR_prlimit64
11329     case TARGET_NR_prlimit64:
11330     {
11331         /* args: pid, resource number, ptr to new rlimit, ptr to old rlimit */
11332         struct target_rlimit64 *target_rnew, *target_rold;
11333         struct host_rlimit64 rnew, rold, *rnewp = 0;
11334         int resource = target_to_host_resource(arg2);
11335         if (arg3) {
11336             if (!lock_user_struct(VERIFY_READ, target_rnew, arg3, 1)) {
11337                 return -TARGET_EFAULT;
11338             }
11339             rnew.rlim_cur = tswap64(target_rnew->rlim_cur);
11340             rnew.rlim_max = tswap64(target_rnew->rlim_max);
11341             unlock_user_struct(target_rnew, arg3, 0);
11342             rnewp = &rnew;
11343         }
11344 
11345         ret = get_errno(sys_prlimit64(arg1, resource, rnewp, arg4 ? &rold : 0));
11346         if (!is_error(ret) && arg4) {
11347             if (!lock_user_struct(VERIFY_WRITE, target_rold, arg4, 1)) {
11348                 return -TARGET_EFAULT;
11349             }
11350             target_rold->rlim_cur = tswap64(rold.rlim_cur);
11351             target_rold->rlim_max = tswap64(rold.rlim_max);
11352             unlock_user_struct(target_rold, arg4, 1);
11353         }
11354         return ret;
11355     }
11356 #endif
11357 #ifdef TARGET_NR_gethostname
11358     case TARGET_NR_gethostname:
11359     {
11360         char *name = lock_user(VERIFY_WRITE, arg1, arg2, 0);
11361         if (name) {
11362             ret = get_errno(gethostname(name, arg2));
11363             unlock_user(name, arg1, arg2);
11364         } else {
11365             ret = -TARGET_EFAULT;
11366         }
11367         return ret;
11368     }
11369 #endif
11370 #ifdef TARGET_NR_atomic_cmpxchg_32
11371     case TARGET_NR_atomic_cmpxchg_32:
11372     {
11373         /* should use start_exclusive from main.c */
11374         abi_ulong mem_value;
11375         if (get_user_u32(mem_value, arg6)) {
11376             target_siginfo_t info;
11377             info.si_signo = SIGSEGV;
11378             info.si_errno = 0;
11379             info.si_code = TARGET_SEGV_MAPERR;
11380             info._sifields._sigfault._addr = arg6;
11381             queue_signal((CPUArchState *)cpu_env, info.si_signo,
11382                          QEMU_SI_FAULT, &info);
11383             ret = 0xdeadbeef;
11384 
11385         }
11386         if (mem_value == arg2)
11387             put_user_u32(arg1, arg6);
11388         return mem_value;
11389     }
11390 #endif
11391 #ifdef TARGET_NR_atomic_barrier
11392     case TARGET_NR_atomic_barrier:
11393         /* Like the kernel implementation and the
11394            qemu arm barrier, no-op this? */
11395         return 0;
11396 #endif
11397 
11398 #ifdef TARGET_NR_timer_create
11399     case TARGET_NR_timer_create:
11400     {
11401         /* args: clockid_t clockid, struct sigevent *sevp, timer_t *timerid */
11402 
11403         struct sigevent host_sevp = { {0}, }, *phost_sevp = NULL;
11404 
11405         int clkid = arg1;
11406         int timer_index = next_free_host_timer();
11407 
11408         if (timer_index < 0) {
11409             ret = -TARGET_EAGAIN;
11410         } else {
11411             timer_t *phtimer = g_posix_timers  + timer_index;
11412 
11413             if (arg2) {
11414                 phost_sevp = &host_sevp;
11415                 ret = target_to_host_sigevent(phost_sevp, arg2);
11416                 if (ret != 0) {
11417                     return ret;
11418                 }
11419             }
11420 
11421             ret = get_errno(timer_create(clkid, phost_sevp, phtimer));
11422             if (ret) {
11423                 phtimer = NULL;
11424             } else {
11425                 if (put_user(TIMER_MAGIC | timer_index, arg3, target_timer_t)) {
11426                     return -TARGET_EFAULT;
11427                 }
11428             }
11429         }
11430         return ret;
11431     }
11432 #endif
11433 
11434 #ifdef TARGET_NR_timer_settime
11435     case TARGET_NR_timer_settime:
11436     {
11437         /* args: timer_t timerid, int flags, const struct itimerspec *new_value,
11438          * struct itimerspec * old_value */
11439         target_timer_t timerid = get_timer_id(arg1);
11440 
11441         if (timerid < 0) {
11442             ret = timerid;
11443         } else if (arg3 == 0) {
11444             ret = -TARGET_EINVAL;
11445         } else {
11446             timer_t htimer = g_posix_timers[timerid];
11447             struct itimerspec hspec_new = {{0},}, hspec_old = {{0},};
11448 
11449             if (target_to_host_itimerspec(&hspec_new, arg3)) {
11450                 return -TARGET_EFAULT;
11451             }
11452             ret = get_errno(
11453                           timer_settime(htimer, arg2, &hspec_new, &hspec_old));
11454             if (arg4 && host_to_target_itimerspec(arg4, &hspec_old)) {
11455                 return -TARGET_EFAULT;
11456             }
11457         }
11458         return ret;
11459     }
11460 #endif
11461 
11462 #ifdef TARGET_NR_timer_gettime
11463     case TARGET_NR_timer_gettime:
11464     {
11465         /* args: timer_t timerid, struct itimerspec *curr_value */
11466         target_timer_t timerid = get_timer_id(arg1);
11467 
11468         if (timerid < 0) {
11469             ret = timerid;
11470         } else if (!arg2) {
11471             ret = -TARGET_EFAULT;
11472         } else {
11473             timer_t htimer = g_posix_timers[timerid];
11474             struct itimerspec hspec;
11475             ret = get_errno(timer_gettime(htimer, &hspec));
11476 
11477             if (host_to_target_itimerspec(arg2, &hspec)) {
11478                 ret = -TARGET_EFAULT;
11479             }
11480         }
11481         return ret;
11482     }
11483 #endif
11484 
11485 #ifdef TARGET_NR_timer_getoverrun
11486     case TARGET_NR_timer_getoverrun:
11487     {
11488         /* args: timer_t timerid */
11489         target_timer_t timerid = get_timer_id(arg1);
11490 
11491         if (timerid < 0) {
11492             ret = timerid;
11493         } else {
11494             timer_t htimer = g_posix_timers[timerid];
11495             ret = get_errno(timer_getoverrun(htimer));
11496         }
11497         fd_trans_unregister(ret);
11498         return ret;
11499     }
11500 #endif
11501 
11502 #ifdef TARGET_NR_timer_delete
11503     case TARGET_NR_timer_delete:
11504     {
11505         /* args: timer_t timerid */
11506         target_timer_t timerid = get_timer_id(arg1);
11507 
11508         if (timerid < 0) {
11509             ret = timerid;
11510         } else {
11511             timer_t htimer = g_posix_timers[timerid];
11512             ret = get_errno(timer_delete(htimer));
11513             g_posix_timers[timerid] = 0;
11514         }
11515         return ret;
11516     }
11517 #endif
11518 
11519 #if defined(TARGET_NR_timerfd_create) && defined(CONFIG_TIMERFD)
11520     case TARGET_NR_timerfd_create:
11521         return get_errno(timerfd_create(arg1,
11522                           target_to_host_bitmask(arg2, fcntl_flags_tbl)));
11523 #endif
11524 
11525 #if defined(TARGET_NR_timerfd_gettime) && defined(CONFIG_TIMERFD)
11526     case TARGET_NR_timerfd_gettime:
11527         {
11528             struct itimerspec its_curr;
11529 
11530             ret = get_errno(timerfd_gettime(arg1, &its_curr));
11531 
11532             if (arg2 && host_to_target_itimerspec(arg2, &its_curr)) {
11533                 return -TARGET_EFAULT;
11534             }
11535         }
11536         return ret;
11537 #endif
11538 
11539 #if defined(TARGET_NR_timerfd_settime) && defined(CONFIG_TIMERFD)
11540     case TARGET_NR_timerfd_settime:
11541         {
11542             struct itimerspec its_new, its_old, *p_new;
11543 
11544             if (arg3) {
11545                 if (target_to_host_itimerspec(&its_new, arg3)) {
11546                     return -TARGET_EFAULT;
11547                 }
11548                 p_new = &its_new;
11549             } else {
11550                 p_new = NULL;
11551             }
11552 
11553             ret = get_errno(timerfd_settime(arg1, arg2, p_new, &its_old));
11554 
11555             if (arg4 && host_to_target_itimerspec(arg4, &its_old)) {
11556                 return -TARGET_EFAULT;
11557             }
11558         }
11559         return ret;
11560 #endif
11561 
11562 #if defined(TARGET_NR_ioprio_get) && defined(__NR_ioprio_get)
11563     case TARGET_NR_ioprio_get:
11564         return get_errno(ioprio_get(arg1, arg2));
11565 #endif
11566 
11567 #if defined(TARGET_NR_ioprio_set) && defined(__NR_ioprio_set)
11568     case TARGET_NR_ioprio_set:
11569         return get_errno(ioprio_set(arg1, arg2, arg3));
11570 #endif
11571 
11572 #if defined(TARGET_NR_setns) && defined(CONFIG_SETNS)
11573     case TARGET_NR_setns:
11574         return get_errno(setns(arg1, arg2));
11575 #endif
11576 #if defined(TARGET_NR_unshare) && defined(CONFIG_SETNS)
11577     case TARGET_NR_unshare:
11578         return get_errno(unshare(arg1));
11579 #endif
11580 #if defined(TARGET_NR_kcmp) && defined(__NR_kcmp)
11581     case TARGET_NR_kcmp:
11582         return get_errno(kcmp(arg1, arg2, arg3, arg4, arg5));
11583 #endif
11584 #ifdef TARGET_NR_swapcontext
11585     case TARGET_NR_swapcontext:
11586         /* PowerPC specific.  */
11587         return do_swapcontext(cpu_env, arg1, arg2, arg3);
11588 #endif
11589 
11590     default:
11591         qemu_log_mask(LOG_UNIMP, "Unsupported syscall: %d\n", num);
11592         return -TARGET_ENOSYS;
11593     }
11594     return ret;
11595 }
11596 
11597 abi_long do_syscall(void *cpu_env, int num, abi_long arg1,
11598                     abi_long arg2, abi_long arg3, abi_long arg4,
11599                     abi_long arg5, abi_long arg6, abi_long arg7,
11600                     abi_long arg8)
11601 {
11602     CPUState *cpu = ENV_GET_CPU(cpu_env);
11603     abi_long ret;
11604 
11605 #ifdef DEBUG_ERESTARTSYS
11606     /* Debug-only code for exercising the syscall-restart code paths
11607      * in the per-architecture cpu main loops: restart every syscall
11608      * the guest makes once before letting it through.
11609      */
11610     {
11611         static bool flag;
11612         flag = !flag;
11613         if (flag) {
11614             return -TARGET_ERESTARTSYS;
11615         }
11616     }
11617 #endif
11618 
11619     trace_guest_user_syscall(cpu, num, arg1, arg2, arg3, arg4,
11620                              arg5, arg6, arg7, arg8);
11621 
11622     if (unlikely(do_strace)) {
11623         print_syscall(num, arg1, arg2, arg3, arg4, arg5, arg6);
11624         ret = do_syscall1(cpu_env, num, arg1, arg2, arg3, arg4,
11625                           arg5, arg6, arg7, arg8);
11626         print_syscall_ret(num, ret);
11627     } else {
11628         ret = do_syscall1(cpu_env, num, arg1, arg2, arg3, arg4,
11629                           arg5, arg6, arg7, arg8);
11630     }
11631 
11632     trace_guest_user_syscall_ret(cpu, num, ret);
11633     return ret;
11634 }
11635