xref: /openbmc/qemu/linux-user/syscall.c (revision 524fa3408ed745a2fed0642fb0d92c934d10ff64)
1 /*
2  *  Linux syscalls
3  *
4  *  Copyright (c) 2003 Fabrice Bellard
5  *
6  *  This program is free software; you can redistribute it and/or modify
7  *  it under the terms of the GNU General Public License as published by
8  *  the Free Software Foundation; either version 2 of the License, or
9  *  (at your option) any later version.
10  *
11  *  This program is distributed in the hope that it will be useful,
12  *  but WITHOUT ANY WARRANTY; without even the implied warranty of
13  *  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
14  *  GNU General Public License for more details.
15  *
16  *  You should have received a copy of the GNU General Public License
17  *  along with this program; if not, see <http://www.gnu.org/licenses/>.
18  */
19 #define _ATFILE_SOURCE
20 #include "qemu/osdep.h"
21 #include "qemu/cutils.h"
22 #include "qemu/path.h"
23 #include <elf.h>
24 #include <endian.h>
25 #include <grp.h>
26 #include <sys/ipc.h>
27 #include <sys/msg.h>
28 #include <sys/wait.h>
29 #include <sys/mount.h>
30 #include <sys/file.h>
31 #include <sys/fsuid.h>
32 #include <sys/personality.h>
33 #include <sys/prctl.h>
34 #include <sys/resource.h>
35 #include <sys/swap.h>
36 #include <linux/capability.h>
37 #include <sched.h>
38 #include <sys/timex.h>
39 #include <sys/socket.h>
40 #include <sys/un.h>
41 #include <sys/uio.h>
42 #include <poll.h>
43 #include <sys/times.h>
44 #include <sys/shm.h>
45 #include <sys/sem.h>
46 #include <sys/statfs.h>
47 #include <utime.h>
48 #include <sys/sysinfo.h>
49 #include <sys/signalfd.h>
50 //#include <sys/user.h>
51 #include <netinet/ip.h>
52 #include <netinet/tcp.h>
53 #include <linux/wireless.h>
54 #include <linux/icmp.h>
55 #include <linux/icmpv6.h>
56 #include <linux/errqueue.h>
57 #include <linux/random.h>
58 #ifdef CONFIG_TIMERFD
59 #include <sys/timerfd.h>
60 #endif
61 #ifdef CONFIG_EVENTFD
62 #include <sys/eventfd.h>
63 #endif
64 #ifdef CONFIG_EPOLL
65 #include <sys/epoll.h>
66 #endif
67 #ifdef CONFIG_ATTR
68 #include "qemu/xattr.h"
69 #endif
70 #ifdef CONFIG_SENDFILE
71 #include <sys/sendfile.h>
72 #endif
73 
74 #define termios host_termios
75 #define winsize host_winsize
76 #define termio host_termio
77 #define sgttyb host_sgttyb /* same as target */
78 #define tchars host_tchars /* same as target */
79 #define ltchars host_ltchars /* same as target */
80 
81 #include <linux/termios.h>
82 #include <linux/unistd.h>
83 #include <linux/cdrom.h>
84 #include <linux/hdreg.h>
85 #include <linux/soundcard.h>
86 #include <linux/kd.h>
87 #include <linux/mtio.h>
88 #include <linux/fs.h>
89 #if defined(CONFIG_FIEMAP)
90 #include <linux/fiemap.h>
91 #endif
92 #include <linux/fb.h>
93 #if defined(CONFIG_USBFS)
94 #include <linux/usbdevice_fs.h>
95 #include <linux/usb/ch9.h>
96 #endif
97 #include <linux/vt.h>
98 #include <linux/dm-ioctl.h>
99 #include <linux/reboot.h>
100 #include <linux/route.h>
101 #include <linux/filter.h>
102 #include <linux/blkpg.h>
103 #include <netpacket/packet.h>
104 #include <linux/netlink.h>
105 #include "linux_loop.h"
106 #include "uname.h"
107 
108 #include "qemu.h"
109 #include "qemu/guest-random.h"
110 #include "qapi/error.h"
111 #include "fd-trans.h"
112 
113 #ifndef CLONE_IO
114 #define CLONE_IO                0x80000000      /* Clone io context */
115 #endif
116 
117 /* We can't directly call the host clone syscall, because this will
118  * badly confuse libc (breaking mutexes, for example). So we must
119  * divide clone flags into:
120  *  * flag combinations that look like pthread_create()
121  *  * flag combinations that look like fork()
122  *  * flags we can implement within QEMU itself
123  *  * flags we can't support and will return an error for
124  */
125 /* For thread creation, all these flags must be present; for
126  * fork, none must be present.
127  */
128 #define CLONE_THREAD_FLAGS                              \
129     (CLONE_VM | CLONE_FS | CLONE_FILES |                \
130      CLONE_SIGHAND | CLONE_THREAD | CLONE_SYSVSEM)
131 
132 /* These flags are ignored:
133  * CLONE_DETACHED is now ignored by the kernel;
134  * CLONE_IO is just an optimisation hint to the I/O scheduler
135  */
136 #define CLONE_IGNORED_FLAGS                     \
137     (CLONE_DETACHED | CLONE_IO)
138 
139 /* Flags for fork which we can implement within QEMU itself */
140 #define CLONE_OPTIONAL_FORK_FLAGS               \
141     (CLONE_SETTLS | CLONE_PARENT_SETTID |       \
142      CLONE_CHILD_CLEARTID | CLONE_CHILD_SETTID)
143 
144 /* Flags for thread creation which we can implement within QEMU itself */
145 #define CLONE_OPTIONAL_THREAD_FLAGS                             \
146     (CLONE_SETTLS | CLONE_PARENT_SETTID |                       \
147      CLONE_CHILD_CLEARTID | CLONE_CHILD_SETTID | CLONE_PARENT)
148 
149 #define CLONE_INVALID_FORK_FLAGS                                        \
150     (~(CSIGNAL | CLONE_OPTIONAL_FORK_FLAGS | CLONE_IGNORED_FLAGS))
151 
152 #define CLONE_INVALID_THREAD_FLAGS                                      \
153     (~(CSIGNAL | CLONE_THREAD_FLAGS | CLONE_OPTIONAL_THREAD_FLAGS |     \
154        CLONE_IGNORED_FLAGS))
155 
156 /* CLONE_VFORK is special cased early in do_fork(). The other flag bits
157  * have almost all been allocated. We cannot support any of
158  * CLONE_NEWNS, CLONE_NEWCGROUP, CLONE_NEWUTS, CLONE_NEWIPC,
159  * CLONE_NEWUSER, CLONE_NEWPID, CLONE_NEWNET, CLONE_PTRACE, CLONE_UNTRACED.
160  * The checks against the invalid thread masks above will catch these.
161  * (The one remaining unallocated bit is 0x1000 which used to be CLONE_PID.)
162  */
163 
164 /* Define DEBUG_ERESTARTSYS to force every syscall to be restarted
165  * once. This exercises the codepaths for restart.
166  */
167 //#define DEBUG_ERESTARTSYS
168 
169 //#include <linux/msdos_fs.h>
170 #define	VFAT_IOCTL_READDIR_BOTH		_IOR('r', 1, struct linux_dirent [2])
171 #define	VFAT_IOCTL_READDIR_SHORT	_IOR('r', 2, struct linux_dirent [2])
172 
173 #undef _syscall0
174 #undef _syscall1
175 #undef _syscall2
176 #undef _syscall3
177 #undef _syscall4
178 #undef _syscall5
179 #undef _syscall6
180 
181 #define _syscall0(type,name)		\
182 static type name (void)			\
183 {					\
184 	return syscall(__NR_##name);	\
185 }
186 
187 #define _syscall1(type,name,type1,arg1)		\
188 static type name (type1 arg1)			\
189 {						\
190 	return syscall(__NR_##name, arg1);	\
191 }
192 
193 #define _syscall2(type,name,type1,arg1,type2,arg2)	\
194 static type name (type1 arg1,type2 arg2)		\
195 {							\
196 	return syscall(__NR_##name, arg1, arg2);	\
197 }
198 
199 #define _syscall3(type,name,type1,arg1,type2,arg2,type3,arg3)	\
200 static type name (type1 arg1,type2 arg2,type3 arg3)		\
201 {								\
202 	return syscall(__NR_##name, arg1, arg2, arg3);		\
203 }
204 
205 #define _syscall4(type,name,type1,arg1,type2,arg2,type3,arg3,type4,arg4)	\
206 static type name (type1 arg1,type2 arg2,type3 arg3,type4 arg4)			\
207 {										\
208 	return syscall(__NR_##name, arg1, arg2, arg3, arg4);			\
209 }
210 
211 #define _syscall5(type,name,type1,arg1,type2,arg2,type3,arg3,type4,arg4,	\
212 		  type5,arg5)							\
213 static type name (type1 arg1,type2 arg2,type3 arg3,type4 arg4,type5 arg5)	\
214 {										\
215 	return syscall(__NR_##name, arg1, arg2, arg3, arg4, arg5);		\
216 }
217 
218 
219 #define _syscall6(type,name,type1,arg1,type2,arg2,type3,arg3,type4,arg4,	\
220 		  type5,arg5,type6,arg6)					\
221 static type name (type1 arg1,type2 arg2,type3 arg3,type4 arg4,type5 arg5,	\
222                   type6 arg6)							\
223 {										\
224 	return syscall(__NR_##name, arg1, arg2, arg3, arg4, arg5, arg6);	\
225 }
226 
227 
228 #define __NR_sys_uname __NR_uname
229 #define __NR_sys_getcwd1 __NR_getcwd
230 #define __NR_sys_getdents __NR_getdents
231 #define __NR_sys_getdents64 __NR_getdents64
232 #define __NR_sys_getpriority __NR_getpriority
233 #define __NR_sys_rt_sigqueueinfo __NR_rt_sigqueueinfo
234 #define __NR_sys_rt_tgsigqueueinfo __NR_rt_tgsigqueueinfo
235 #define __NR_sys_syslog __NR_syslog
236 #define __NR_sys_futex __NR_futex
237 #define __NR_sys_inotify_init __NR_inotify_init
238 #define __NR_sys_inotify_add_watch __NR_inotify_add_watch
239 #define __NR_sys_inotify_rm_watch __NR_inotify_rm_watch
240 
241 #if defined(__alpha__) || defined(__x86_64__) || defined(__s390x__)
242 #define __NR__llseek __NR_lseek
243 #endif
244 
245 /* Newer kernel ports have llseek() instead of _llseek() */
246 #if defined(TARGET_NR_llseek) && !defined(TARGET_NR__llseek)
247 #define TARGET_NR__llseek TARGET_NR_llseek
248 #endif
249 
250 #define __NR_sys_gettid __NR_gettid
251 _syscall0(int, sys_gettid)
252 
253 /* For the 64-bit guest on 32-bit host case we must emulate
254  * getdents using getdents64, because otherwise the host
255  * might hand us back more dirent records than we can fit
256  * into the guest buffer after structure format conversion.
257  * Otherwise we emulate getdents with getdents if the host has it.
258  */
259 #if defined(__NR_getdents) && HOST_LONG_BITS >= TARGET_ABI_BITS
260 #define EMULATE_GETDENTS_WITH_GETDENTS
261 #endif
262 
263 #if defined(TARGET_NR_getdents) && defined(EMULATE_GETDENTS_WITH_GETDENTS)
264 _syscall3(int, sys_getdents, uint, fd, struct linux_dirent *, dirp, uint, count);
265 #endif
266 #if (defined(TARGET_NR_getdents) && \
267       !defined(EMULATE_GETDENTS_WITH_GETDENTS)) || \
268     (defined(TARGET_NR_getdents64) && defined(__NR_getdents64))
269 _syscall3(int, sys_getdents64, uint, fd, struct linux_dirent64 *, dirp, uint, count);
270 #endif
271 #if defined(TARGET_NR__llseek) && defined(__NR_llseek)
272 _syscall5(int, _llseek,  uint,  fd, ulong, hi, ulong, lo,
273           loff_t *, res, uint, wh);
274 #endif
275 _syscall3(int, sys_rt_sigqueueinfo, pid_t, pid, int, sig, siginfo_t *, uinfo)
276 _syscall4(int, sys_rt_tgsigqueueinfo, pid_t, pid, pid_t, tid, int, sig,
277           siginfo_t *, uinfo)
278 _syscall3(int,sys_syslog,int,type,char*,bufp,int,len)
279 #ifdef __NR_exit_group
280 _syscall1(int,exit_group,int,error_code)
281 #endif
282 #if defined(TARGET_NR_set_tid_address) && defined(__NR_set_tid_address)
283 _syscall1(int,set_tid_address,int *,tidptr)
284 #endif
285 #if defined(TARGET_NR_futex) && defined(__NR_futex)
286 _syscall6(int,sys_futex,int *,uaddr,int,op,int,val,
287           const struct timespec *,timeout,int *,uaddr2,int,val3)
288 #endif
289 #define __NR_sys_sched_getaffinity __NR_sched_getaffinity
290 _syscall3(int, sys_sched_getaffinity, pid_t, pid, unsigned int, len,
291           unsigned long *, user_mask_ptr);
292 #define __NR_sys_sched_setaffinity __NR_sched_setaffinity
293 _syscall3(int, sys_sched_setaffinity, pid_t, pid, unsigned int, len,
294           unsigned long *, user_mask_ptr);
295 #define __NR_sys_getcpu __NR_getcpu
296 _syscall3(int, sys_getcpu, unsigned *, cpu, unsigned *, node, void *, tcache);
297 _syscall4(int, reboot, int, magic1, int, magic2, unsigned int, cmd,
298           void *, arg);
299 _syscall2(int, capget, struct __user_cap_header_struct *, header,
300           struct __user_cap_data_struct *, data);
301 _syscall2(int, capset, struct __user_cap_header_struct *, header,
302           struct __user_cap_data_struct *, data);
303 #if defined(TARGET_NR_ioprio_get) && defined(__NR_ioprio_get)
304 _syscall2(int, ioprio_get, int, which, int, who)
305 #endif
306 #if defined(TARGET_NR_ioprio_set) && defined(__NR_ioprio_set)
307 _syscall3(int, ioprio_set, int, which, int, who, int, ioprio)
308 #endif
309 #if defined(TARGET_NR_getrandom) && defined(__NR_getrandom)
310 _syscall3(int, getrandom, void *, buf, size_t, buflen, unsigned int, flags)
311 #endif
312 
313 #if defined(TARGET_NR_kcmp) && defined(__NR_kcmp)
314 _syscall5(int, kcmp, pid_t, pid1, pid_t, pid2, int, type,
315           unsigned long, idx1, unsigned long, idx2)
316 #endif
317 
318 static bitmask_transtbl fcntl_flags_tbl[] = {
319   { TARGET_O_ACCMODE,   TARGET_O_WRONLY,    O_ACCMODE,   O_WRONLY,    },
320   { TARGET_O_ACCMODE,   TARGET_O_RDWR,      O_ACCMODE,   O_RDWR,      },
321   { TARGET_O_CREAT,     TARGET_O_CREAT,     O_CREAT,     O_CREAT,     },
322   { TARGET_O_EXCL,      TARGET_O_EXCL,      O_EXCL,      O_EXCL,      },
323   { TARGET_O_NOCTTY,    TARGET_O_NOCTTY,    O_NOCTTY,    O_NOCTTY,    },
324   { TARGET_O_TRUNC,     TARGET_O_TRUNC,     O_TRUNC,     O_TRUNC,     },
325   { TARGET_O_APPEND,    TARGET_O_APPEND,    O_APPEND,    O_APPEND,    },
326   { TARGET_O_NONBLOCK,  TARGET_O_NONBLOCK,  O_NONBLOCK,  O_NONBLOCK,  },
327   { TARGET_O_SYNC,      TARGET_O_DSYNC,     O_SYNC,      O_DSYNC,     },
328   { TARGET_O_SYNC,      TARGET_O_SYNC,      O_SYNC,      O_SYNC,      },
329   { TARGET_FASYNC,      TARGET_FASYNC,      FASYNC,      FASYNC,      },
330   { TARGET_O_DIRECTORY, TARGET_O_DIRECTORY, O_DIRECTORY, O_DIRECTORY, },
331   { TARGET_O_NOFOLLOW,  TARGET_O_NOFOLLOW,  O_NOFOLLOW,  O_NOFOLLOW,  },
332 #if defined(O_DIRECT)
333   { TARGET_O_DIRECT,    TARGET_O_DIRECT,    O_DIRECT,    O_DIRECT,    },
334 #endif
335 #if defined(O_NOATIME)
336   { TARGET_O_NOATIME,   TARGET_O_NOATIME,   O_NOATIME,   O_NOATIME    },
337 #endif
338 #if defined(O_CLOEXEC)
339   { TARGET_O_CLOEXEC,   TARGET_O_CLOEXEC,   O_CLOEXEC,   O_CLOEXEC    },
340 #endif
341 #if defined(O_PATH)
342   { TARGET_O_PATH,      TARGET_O_PATH,      O_PATH,      O_PATH       },
343 #endif
344 #if defined(O_TMPFILE)
345   { TARGET_O_TMPFILE,   TARGET_O_TMPFILE,   O_TMPFILE,   O_TMPFILE    },
346 #endif
347   /* Don't terminate the list prematurely on 64-bit host+guest.  */
348 #if TARGET_O_LARGEFILE != 0 || O_LARGEFILE != 0
349   { TARGET_O_LARGEFILE, TARGET_O_LARGEFILE, O_LARGEFILE, O_LARGEFILE, },
350 #endif
351   { 0, 0, 0, 0 }
352 };
353 
354 static int sys_getcwd1(char *buf, size_t size)
355 {
356   if (getcwd(buf, size) == NULL) {
357       /* getcwd() sets errno */
358       return (-1);
359   }
360   return strlen(buf)+1;
361 }
362 
363 #ifdef TARGET_NR_utimensat
364 #if defined(__NR_utimensat)
365 #define __NR_sys_utimensat __NR_utimensat
366 _syscall4(int,sys_utimensat,int,dirfd,const char *,pathname,
367           const struct timespec *,tsp,int,flags)
368 #else
369 static int sys_utimensat(int dirfd, const char *pathname,
370                          const struct timespec times[2], int flags)
371 {
372     errno = ENOSYS;
373     return -1;
374 }
375 #endif
376 #endif /* TARGET_NR_utimensat */
377 
378 #ifdef TARGET_NR_renameat2
379 #if defined(__NR_renameat2)
380 #define __NR_sys_renameat2 __NR_renameat2
381 _syscall5(int, sys_renameat2, int, oldfd, const char *, old, int, newfd,
382           const char *, new, unsigned int, flags)
383 #else
384 static int sys_renameat2(int oldfd, const char *old,
385                          int newfd, const char *new, int flags)
386 {
387     if (flags == 0) {
388         return renameat(oldfd, old, newfd, new);
389     }
390     errno = ENOSYS;
391     return -1;
392 }
393 #endif
394 #endif /* TARGET_NR_renameat2 */
395 
396 #ifdef CONFIG_INOTIFY
397 #include <sys/inotify.h>
398 
399 #if defined(TARGET_NR_inotify_init) && defined(__NR_inotify_init)
400 static int sys_inotify_init(void)
401 {
402   return (inotify_init());
403 }
404 #endif
405 #if defined(TARGET_NR_inotify_add_watch) && defined(__NR_inotify_add_watch)
406 static int sys_inotify_add_watch(int fd,const char *pathname, int32_t mask)
407 {
408   return (inotify_add_watch(fd, pathname, mask));
409 }
410 #endif
411 #if defined(TARGET_NR_inotify_rm_watch) && defined(__NR_inotify_rm_watch)
412 static int sys_inotify_rm_watch(int fd, int32_t wd)
413 {
414   return (inotify_rm_watch(fd, wd));
415 }
416 #endif
417 #ifdef CONFIG_INOTIFY1
418 #if defined(TARGET_NR_inotify_init1) && defined(__NR_inotify_init1)
419 static int sys_inotify_init1(int flags)
420 {
421   return (inotify_init1(flags));
422 }
423 #endif
424 #endif
425 #else
426 /* Userspace can usually survive runtime without inotify */
427 #undef TARGET_NR_inotify_init
428 #undef TARGET_NR_inotify_init1
429 #undef TARGET_NR_inotify_add_watch
430 #undef TARGET_NR_inotify_rm_watch
431 #endif /* CONFIG_INOTIFY  */
432 
433 #if defined(TARGET_NR_prlimit64)
434 #ifndef __NR_prlimit64
435 # define __NR_prlimit64 -1
436 #endif
437 #define __NR_sys_prlimit64 __NR_prlimit64
438 /* The glibc rlimit structure may not be that used by the underlying syscall */
439 struct host_rlimit64 {
440     uint64_t rlim_cur;
441     uint64_t rlim_max;
442 };
443 _syscall4(int, sys_prlimit64, pid_t, pid, int, resource,
444           const struct host_rlimit64 *, new_limit,
445           struct host_rlimit64 *, old_limit)
446 #endif
447 
448 
449 #if defined(TARGET_NR_timer_create)
450 /* Maxiumum of 32 active POSIX timers allowed at any one time. */
451 static timer_t g_posix_timers[32] = { 0, } ;
452 
453 static inline int next_free_host_timer(void)
454 {
455     int k ;
456     /* FIXME: Does finding the next free slot require a lock? */
457     for (k = 0; k < ARRAY_SIZE(g_posix_timers); k++) {
458         if (g_posix_timers[k] == 0) {
459             g_posix_timers[k] = (timer_t) 1;
460             return k;
461         }
462     }
463     return -1;
464 }
465 #endif
466 
467 /* ARM EABI and MIPS expect 64bit types aligned even on pairs or registers */
468 #ifdef TARGET_ARM
469 static inline int regpairs_aligned(void *cpu_env, int num)
470 {
471     return ((((CPUARMState *)cpu_env)->eabi) == 1) ;
472 }
473 #elif defined(TARGET_MIPS) && (TARGET_ABI_BITS == 32)
474 static inline int regpairs_aligned(void *cpu_env, int num) { return 1; }
475 #elif defined(TARGET_PPC) && !defined(TARGET_PPC64)
476 /* SysV AVI for PPC32 expects 64bit parameters to be passed on odd/even pairs
477  * of registers which translates to the same as ARM/MIPS, because we start with
478  * r3 as arg1 */
479 static inline int regpairs_aligned(void *cpu_env, int num) { return 1; }
480 #elif defined(TARGET_SH4)
481 /* SH4 doesn't align register pairs, except for p{read,write}64 */
482 static inline int regpairs_aligned(void *cpu_env, int num)
483 {
484     switch (num) {
485     case TARGET_NR_pread64:
486     case TARGET_NR_pwrite64:
487         return 1;
488 
489     default:
490         return 0;
491     }
492 }
493 #elif defined(TARGET_XTENSA)
494 static inline int regpairs_aligned(void *cpu_env, int num) { return 1; }
495 #else
496 static inline int regpairs_aligned(void *cpu_env, int num) { return 0; }
497 #endif
498 
499 #define ERRNO_TABLE_SIZE 1200
500 
501 /* target_to_host_errno_table[] is initialized from
502  * host_to_target_errno_table[] in syscall_init(). */
503 static uint16_t target_to_host_errno_table[ERRNO_TABLE_SIZE] = {
504 };
505 
506 /*
507  * This list is the union of errno values overridden in asm-<arch>/errno.h
508  * minus the errnos that are not actually generic to all archs.
509  */
510 static uint16_t host_to_target_errno_table[ERRNO_TABLE_SIZE] = {
511     [EAGAIN]		= TARGET_EAGAIN,
512     [EIDRM]		= TARGET_EIDRM,
513     [ECHRNG]		= TARGET_ECHRNG,
514     [EL2NSYNC]		= TARGET_EL2NSYNC,
515     [EL3HLT]		= TARGET_EL3HLT,
516     [EL3RST]		= TARGET_EL3RST,
517     [ELNRNG]		= TARGET_ELNRNG,
518     [EUNATCH]		= TARGET_EUNATCH,
519     [ENOCSI]		= TARGET_ENOCSI,
520     [EL2HLT]		= TARGET_EL2HLT,
521     [EDEADLK]		= TARGET_EDEADLK,
522     [ENOLCK]		= TARGET_ENOLCK,
523     [EBADE]		= TARGET_EBADE,
524     [EBADR]		= TARGET_EBADR,
525     [EXFULL]		= TARGET_EXFULL,
526     [ENOANO]		= TARGET_ENOANO,
527     [EBADRQC]		= TARGET_EBADRQC,
528     [EBADSLT]		= TARGET_EBADSLT,
529     [EBFONT]		= TARGET_EBFONT,
530     [ENOSTR]		= TARGET_ENOSTR,
531     [ENODATA]		= TARGET_ENODATA,
532     [ETIME]		= TARGET_ETIME,
533     [ENOSR]		= TARGET_ENOSR,
534     [ENONET]		= TARGET_ENONET,
535     [ENOPKG]		= TARGET_ENOPKG,
536     [EREMOTE]		= TARGET_EREMOTE,
537     [ENOLINK]		= TARGET_ENOLINK,
538     [EADV]		= TARGET_EADV,
539     [ESRMNT]		= TARGET_ESRMNT,
540     [ECOMM]		= TARGET_ECOMM,
541     [EPROTO]		= TARGET_EPROTO,
542     [EDOTDOT]		= TARGET_EDOTDOT,
543     [EMULTIHOP]		= TARGET_EMULTIHOP,
544     [EBADMSG]		= TARGET_EBADMSG,
545     [ENAMETOOLONG]	= TARGET_ENAMETOOLONG,
546     [EOVERFLOW]		= TARGET_EOVERFLOW,
547     [ENOTUNIQ]		= TARGET_ENOTUNIQ,
548     [EBADFD]		= TARGET_EBADFD,
549     [EREMCHG]		= TARGET_EREMCHG,
550     [ELIBACC]		= TARGET_ELIBACC,
551     [ELIBBAD]		= TARGET_ELIBBAD,
552     [ELIBSCN]		= TARGET_ELIBSCN,
553     [ELIBMAX]		= TARGET_ELIBMAX,
554     [ELIBEXEC]		= TARGET_ELIBEXEC,
555     [EILSEQ]		= TARGET_EILSEQ,
556     [ENOSYS]		= TARGET_ENOSYS,
557     [ELOOP]		= TARGET_ELOOP,
558     [ERESTART]		= TARGET_ERESTART,
559     [ESTRPIPE]		= TARGET_ESTRPIPE,
560     [ENOTEMPTY]		= TARGET_ENOTEMPTY,
561     [EUSERS]		= TARGET_EUSERS,
562     [ENOTSOCK]		= TARGET_ENOTSOCK,
563     [EDESTADDRREQ]	= TARGET_EDESTADDRREQ,
564     [EMSGSIZE]		= TARGET_EMSGSIZE,
565     [EPROTOTYPE]	= TARGET_EPROTOTYPE,
566     [ENOPROTOOPT]	= TARGET_ENOPROTOOPT,
567     [EPROTONOSUPPORT]	= TARGET_EPROTONOSUPPORT,
568     [ESOCKTNOSUPPORT]	= TARGET_ESOCKTNOSUPPORT,
569     [EOPNOTSUPP]	= TARGET_EOPNOTSUPP,
570     [EPFNOSUPPORT]	= TARGET_EPFNOSUPPORT,
571     [EAFNOSUPPORT]	= TARGET_EAFNOSUPPORT,
572     [EADDRINUSE]	= TARGET_EADDRINUSE,
573     [EADDRNOTAVAIL]	= TARGET_EADDRNOTAVAIL,
574     [ENETDOWN]		= TARGET_ENETDOWN,
575     [ENETUNREACH]	= TARGET_ENETUNREACH,
576     [ENETRESET]		= TARGET_ENETRESET,
577     [ECONNABORTED]	= TARGET_ECONNABORTED,
578     [ECONNRESET]	= TARGET_ECONNRESET,
579     [ENOBUFS]		= TARGET_ENOBUFS,
580     [EISCONN]		= TARGET_EISCONN,
581     [ENOTCONN]		= TARGET_ENOTCONN,
582     [EUCLEAN]		= TARGET_EUCLEAN,
583     [ENOTNAM]		= TARGET_ENOTNAM,
584     [ENAVAIL]		= TARGET_ENAVAIL,
585     [EISNAM]		= TARGET_EISNAM,
586     [EREMOTEIO]		= TARGET_EREMOTEIO,
587     [EDQUOT]            = TARGET_EDQUOT,
588     [ESHUTDOWN]		= TARGET_ESHUTDOWN,
589     [ETOOMANYREFS]	= TARGET_ETOOMANYREFS,
590     [ETIMEDOUT]		= TARGET_ETIMEDOUT,
591     [ECONNREFUSED]	= TARGET_ECONNREFUSED,
592     [EHOSTDOWN]		= TARGET_EHOSTDOWN,
593     [EHOSTUNREACH]	= TARGET_EHOSTUNREACH,
594     [EALREADY]		= TARGET_EALREADY,
595     [EINPROGRESS]	= TARGET_EINPROGRESS,
596     [ESTALE]		= TARGET_ESTALE,
597     [ECANCELED]		= TARGET_ECANCELED,
598     [ENOMEDIUM]		= TARGET_ENOMEDIUM,
599     [EMEDIUMTYPE]	= TARGET_EMEDIUMTYPE,
600 #ifdef ENOKEY
601     [ENOKEY]		= TARGET_ENOKEY,
602 #endif
603 #ifdef EKEYEXPIRED
604     [EKEYEXPIRED]	= TARGET_EKEYEXPIRED,
605 #endif
606 #ifdef EKEYREVOKED
607     [EKEYREVOKED]	= TARGET_EKEYREVOKED,
608 #endif
609 #ifdef EKEYREJECTED
610     [EKEYREJECTED]	= TARGET_EKEYREJECTED,
611 #endif
612 #ifdef EOWNERDEAD
613     [EOWNERDEAD]	= TARGET_EOWNERDEAD,
614 #endif
615 #ifdef ENOTRECOVERABLE
616     [ENOTRECOVERABLE]	= TARGET_ENOTRECOVERABLE,
617 #endif
618 #ifdef ENOMSG
619     [ENOMSG]            = TARGET_ENOMSG,
620 #endif
621 #ifdef ERKFILL
622     [ERFKILL]           = TARGET_ERFKILL,
623 #endif
624 #ifdef EHWPOISON
625     [EHWPOISON]         = TARGET_EHWPOISON,
626 #endif
627 };
628 
629 static inline int host_to_target_errno(int err)
630 {
631     if (err >= 0 && err < ERRNO_TABLE_SIZE &&
632         host_to_target_errno_table[err]) {
633         return host_to_target_errno_table[err];
634     }
635     return err;
636 }
637 
638 static inline int target_to_host_errno(int err)
639 {
640     if (err >= 0 && err < ERRNO_TABLE_SIZE &&
641         target_to_host_errno_table[err]) {
642         return target_to_host_errno_table[err];
643     }
644     return err;
645 }
646 
647 static inline abi_long get_errno(abi_long ret)
648 {
649     if (ret == -1)
650         return -host_to_target_errno(errno);
651     else
652         return ret;
653 }
654 
655 const char *target_strerror(int err)
656 {
657     if (err == TARGET_ERESTARTSYS) {
658         return "To be restarted";
659     }
660     if (err == TARGET_QEMU_ESIGRETURN) {
661         return "Successful exit from sigreturn";
662     }
663 
664     if ((err >= ERRNO_TABLE_SIZE) || (err < 0)) {
665         return NULL;
666     }
667     return strerror(target_to_host_errno(err));
668 }
669 
670 #define safe_syscall0(type, name) \
671 static type safe_##name(void) \
672 { \
673     return safe_syscall(__NR_##name); \
674 }
675 
676 #define safe_syscall1(type, name, type1, arg1) \
677 static type safe_##name(type1 arg1) \
678 { \
679     return safe_syscall(__NR_##name, arg1); \
680 }
681 
682 #define safe_syscall2(type, name, type1, arg1, type2, arg2) \
683 static type safe_##name(type1 arg1, type2 arg2) \
684 { \
685     return safe_syscall(__NR_##name, arg1, arg2); \
686 }
687 
688 #define safe_syscall3(type, name, type1, arg1, type2, arg2, type3, arg3) \
689 static type safe_##name(type1 arg1, type2 arg2, type3 arg3) \
690 { \
691     return safe_syscall(__NR_##name, arg1, arg2, arg3); \
692 }
693 
694 #define safe_syscall4(type, name, type1, arg1, type2, arg2, type3, arg3, \
695     type4, arg4) \
696 static type safe_##name(type1 arg1, type2 arg2, type3 arg3, type4 arg4) \
697 { \
698     return safe_syscall(__NR_##name, arg1, arg2, arg3, arg4); \
699 }
700 
701 #define safe_syscall5(type, name, type1, arg1, type2, arg2, type3, arg3, \
702     type4, arg4, type5, arg5) \
703 static type safe_##name(type1 arg1, type2 arg2, type3 arg3, type4 arg4, \
704     type5 arg5) \
705 { \
706     return safe_syscall(__NR_##name, arg1, arg2, arg3, arg4, arg5); \
707 }
708 
709 #define safe_syscall6(type, name, type1, arg1, type2, arg2, type3, arg3, \
710     type4, arg4, type5, arg5, type6, arg6) \
711 static type safe_##name(type1 arg1, type2 arg2, type3 arg3, type4 arg4, \
712     type5 arg5, type6 arg6) \
713 { \
714     return safe_syscall(__NR_##name, arg1, arg2, arg3, arg4, arg5, arg6); \
715 }
716 
717 safe_syscall3(ssize_t, read, int, fd, void *, buff, size_t, count)
718 safe_syscall3(ssize_t, write, int, fd, const void *, buff, size_t, count)
719 safe_syscall4(int, openat, int, dirfd, const char *, pathname, \
720               int, flags, mode_t, mode)
721 safe_syscall4(pid_t, wait4, pid_t, pid, int *, status, int, options, \
722               struct rusage *, rusage)
723 safe_syscall5(int, waitid, idtype_t, idtype, id_t, id, siginfo_t *, infop, \
724               int, options, struct rusage *, rusage)
725 safe_syscall3(int, execve, const char *, filename, char **, argv, char **, envp)
726 safe_syscall6(int, pselect6, int, nfds, fd_set *, readfds, fd_set *, writefds, \
727               fd_set *, exceptfds, struct timespec *, timeout, void *, sig)
728 safe_syscall5(int, ppoll, struct pollfd *, ufds, unsigned int, nfds,
729               struct timespec *, tsp, const sigset_t *, sigmask,
730               size_t, sigsetsize)
731 safe_syscall6(int, epoll_pwait, int, epfd, struct epoll_event *, events,
732               int, maxevents, int, timeout, const sigset_t *, sigmask,
733               size_t, sigsetsize)
734 safe_syscall6(int,futex,int *,uaddr,int,op,int,val, \
735               const struct timespec *,timeout,int *,uaddr2,int,val3)
736 safe_syscall2(int, rt_sigsuspend, sigset_t *, newset, size_t, sigsetsize)
737 safe_syscall2(int, kill, pid_t, pid, int, sig)
738 safe_syscall2(int, tkill, int, tid, int, sig)
739 safe_syscall3(int, tgkill, int, tgid, int, pid, int, sig)
740 safe_syscall3(ssize_t, readv, int, fd, const struct iovec *, iov, int, iovcnt)
741 safe_syscall3(ssize_t, writev, int, fd, const struct iovec *, iov, int, iovcnt)
742 safe_syscall5(ssize_t, preadv, int, fd, const struct iovec *, iov, int, iovcnt,
743               unsigned long, pos_l, unsigned long, pos_h)
744 safe_syscall5(ssize_t, pwritev, int, fd, const struct iovec *, iov, int, iovcnt,
745               unsigned long, pos_l, unsigned long, pos_h)
746 safe_syscall3(int, connect, int, fd, const struct sockaddr *, addr,
747               socklen_t, addrlen)
748 safe_syscall6(ssize_t, sendto, int, fd, const void *, buf, size_t, len,
749               int, flags, const struct sockaddr *, addr, socklen_t, addrlen)
750 safe_syscall6(ssize_t, recvfrom, int, fd, void *, buf, size_t, len,
751               int, flags, struct sockaddr *, addr, socklen_t *, addrlen)
752 safe_syscall3(ssize_t, sendmsg, int, fd, const struct msghdr *, msg, int, flags)
753 safe_syscall3(ssize_t, recvmsg, int, fd, struct msghdr *, msg, int, flags)
754 safe_syscall2(int, flock, int, fd, int, operation)
755 safe_syscall4(int, rt_sigtimedwait, const sigset_t *, these, siginfo_t *, uinfo,
756               const struct timespec *, uts, size_t, sigsetsize)
757 safe_syscall4(int, accept4, int, fd, struct sockaddr *, addr, socklen_t *, len,
758               int, flags)
759 safe_syscall2(int, nanosleep, const struct timespec *, req,
760               struct timespec *, rem)
761 #ifdef TARGET_NR_clock_nanosleep
762 safe_syscall4(int, clock_nanosleep, const clockid_t, clock, int, flags,
763               const struct timespec *, req, struct timespec *, rem)
764 #endif
765 #ifdef __NR_ipc
766 safe_syscall6(int, ipc, int, call, long, first, long, second, long, third,
767               void *, ptr, long, fifth)
768 #endif
769 #ifdef __NR_msgsnd
770 safe_syscall4(int, msgsnd, int, msgid, const void *, msgp, size_t, sz,
771               int, flags)
772 #endif
773 #ifdef __NR_msgrcv
774 safe_syscall5(int, msgrcv, int, msgid, void *, msgp, size_t, sz,
775               long, msgtype, int, flags)
776 #endif
777 #ifdef __NR_semtimedop
778 safe_syscall4(int, semtimedop, int, semid, struct sembuf *, tsops,
779               unsigned, nsops, const struct timespec *, timeout)
780 #endif
781 #if defined(TARGET_NR_mq_open) && defined(__NR_mq_open)
782 safe_syscall5(int, mq_timedsend, int, mqdes, const char *, msg_ptr,
783               size_t, len, unsigned, prio, const struct timespec *, timeout)
784 safe_syscall5(int, mq_timedreceive, int, mqdes, char *, msg_ptr,
785               size_t, len, unsigned *, prio, const struct timespec *, timeout)
786 #endif
787 /* We do ioctl like this rather than via safe_syscall3 to preserve the
788  * "third argument might be integer or pointer or not present" behaviour of
789  * the libc function.
790  */
791 #define safe_ioctl(...) safe_syscall(__NR_ioctl, __VA_ARGS__)
792 /* Similarly for fcntl. Note that callers must always:
793  *  pass the F_GETLK64 etc constants rather than the unsuffixed F_GETLK
794  *  use the flock64 struct rather than unsuffixed flock
795  * This will then work and use a 64-bit offset for both 32-bit and 64-bit hosts.
796  */
797 #ifdef __NR_fcntl64
798 #define safe_fcntl(...) safe_syscall(__NR_fcntl64, __VA_ARGS__)
799 #else
800 #define safe_fcntl(...) safe_syscall(__NR_fcntl, __VA_ARGS__)
801 #endif
802 
803 static inline int host_to_target_sock_type(int host_type)
804 {
805     int target_type;
806 
807     switch (host_type & 0xf /* SOCK_TYPE_MASK */) {
808     case SOCK_DGRAM:
809         target_type = TARGET_SOCK_DGRAM;
810         break;
811     case SOCK_STREAM:
812         target_type = TARGET_SOCK_STREAM;
813         break;
814     default:
815         target_type = host_type & 0xf /* SOCK_TYPE_MASK */;
816         break;
817     }
818 
819 #if defined(SOCK_CLOEXEC)
820     if (host_type & SOCK_CLOEXEC) {
821         target_type |= TARGET_SOCK_CLOEXEC;
822     }
823 #endif
824 
825 #if defined(SOCK_NONBLOCK)
826     if (host_type & SOCK_NONBLOCK) {
827         target_type |= TARGET_SOCK_NONBLOCK;
828     }
829 #endif
830 
831     return target_type;
832 }
833 
834 static abi_ulong target_brk;
835 static abi_ulong target_original_brk;
836 static abi_ulong brk_page;
837 
838 void target_set_brk(abi_ulong new_brk)
839 {
840     target_original_brk = target_brk = HOST_PAGE_ALIGN(new_brk);
841     brk_page = HOST_PAGE_ALIGN(target_brk);
842 }
843 
844 //#define DEBUGF_BRK(message, args...) do { fprintf(stderr, (message), ## args); } while (0)
845 #define DEBUGF_BRK(message, args...)
846 
847 /* do_brk() must return target values and target errnos. */
848 abi_long do_brk(abi_ulong new_brk)
849 {
850     abi_long mapped_addr;
851     abi_ulong new_alloc_size;
852 
853     DEBUGF_BRK("do_brk(" TARGET_ABI_FMT_lx ") -> ", new_brk);
854 
855     if (!new_brk) {
856         DEBUGF_BRK(TARGET_ABI_FMT_lx " (!new_brk)\n", target_brk);
857         return target_brk;
858     }
859     if (new_brk < target_original_brk) {
860         DEBUGF_BRK(TARGET_ABI_FMT_lx " (new_brk < target_original_brk)\n",
861                    target_brk);
862         return target_brk;
863     }
864 
865     /* If the new brk is less than the highest page reserved to the
866      * target heap allocation, set it and we're almost done...  */
867     if (new_brk <= brk_page) {
868         /* Heap contents are initialized to zero, as for anonymous
869          * mapped pages.  */
870         if (new_brk > target_brk) {
871             memset(g2h(target_brk), 0, new_brk - target_brk);
872         }
873 	target_brk = new_brk;
874         DEBUGF_BRK(TARGET_ABI_FMT_lx " (new_brk <= brk_page)\n", target_brk);
875 	return target_brk;
876     }
877 
878     /* We need to allocate more memory after the brk... Note that
879      * we don't use MAP_FIXED because that will map over the top of
880      * any existing mapping (like the one with the host libc or qemu
881      * itself); instead we treat "mapped but at wrong address" as
882      * a failure and unmap again.
883      */
884     new_alloc_size = HOST_PAGE_ALIGN(new_brk - brk_page);
885     mapped_addr = get_errno(target_mmap(brk_page, new_alloc_size,
886                                         PROT_READ|PROT_WRITE,
887                                         MAP_ANON|MAP_PRIVATE, 0, 0));
888 
889     if (mapped_addr == brk_page) {
890         /* Heap contents are initialized to zero, as for anonymous
891          * mapped pages.  Technically the new pages are already
892          * initialized to zero since they *are* anonymous mapped
893          * pages, however we have to take care with the contents that
894          * come from the remaining part of the previous page: it may
895          * contains garbage data due to a previous heap usage (grown
896          * then shrunken).  */
897         memset(g2h(target_brk), 0, brk_page - target_brk);
898 
899         target_brk = new_brk;
900         brk_page = HOST_PAGE_ALIGN(target_brk);
901         DEBUGF_BRK(TARGET_ABI_FMT_lx " (mapped_addr == brk_page)\n",
902             target_brk);
903         return target_brk;
904     } else if (mapped_addr != -1) {
905         /* Mapped but at wrong address, meaning there wasn't actually
906          * enough space for this brk.
907          */
908         target_munmap(mapped_addr, new_alloc_size);
909         mapped_addr = -1;
910         DEBUGF_BRK(TARGET_ABI_FMT_lx " (mapped_addr != -1)\n", target_brk);
911     }
912     else {
913         DEBUGF_BRK(TARGET_ABI_FMT_lx " (otherwise)\n", target_brk);
914     }
915 
916 #if defined(TARGET_ALPHA)
917     /* We (partially) emulate OSF/1 on Alpha, which requires we
918        return a proper errno, not an unchanged brk value.  */
919     return -TARGET_ENOMEM;
920 #endif
921     /* For everything else, return the previous break. */
922     return target_brk;
923 }
924 
925 static inline abi_long copy_from_user_fdset(fd_set *fds,
926                                             abi_ulong target_fds_addr,
927                                             int n)
928 {
929     int i, nw, j, k;
930     abi_ulong b, *target_fds;
931 
932     nw = DIV_ROUND_UP(n, TARGET_ABI_BITS);
933     if (!(target_fds = lock_user(VERIFY_READ,
934                                  target_fds_addr,
935                                  sizeof(abi_ulong) * nw,
936                                  1)))
937         return -TARGET_EFAULT;
938 
939     FD_ZERO(fds);
940     k = 0;
941     for (i = 0; i < nw; i++) {
942         /* grab the abi_ulong */
943         __get_user(b, &target_fds[i]);
944         for (j = 0; j < TARGET_ABI_BITS; j++) {
945             /* check the bit inside the abi_ulong */
946             if ((b >> j) & 1)
947                 FD_SET(k, fds);
948             k++;
949         }
950     }
951 
952     unlock_user(target_fds, target_fds_addr, 0);
953 
954     return 0;
955 }
956 
957 static inline abi_ulong copy_from_user_fdset_ptr(fd_set *fds, fd_set **fds_ptr,
958                                                  abi_ulong target_fds_addr,
959                                                  int n)
960 {
961     if (target_fds_addr) {
962         if (copy_from_user_fdset(fds, target_fds_addr, n))
963             return -TARGET_EFAULT;
964         *fds_ptr = fds;
965     } else {
966         *fds_ptr = NULL;
967     }
968     return 0;
969 }
970 
971 static inline abi_long copy_to_user_fdset(abi_ulong target_fds_addr,
972                                           const fd_set *fds,
973                                           int n)
974 {
975     int i, nw, j, k;
976     abi_long v;
977     abi_ulong *target_fds;
978 
979     nw = DIV_ROUND_UP(n, TARGET_ABI_BITS);
980     if (!(target_fds = lock_user(VERIFY_WRITE,
981                                  target_fds_addr,
982                                  sizeof(abi_ulong) * nw,
983                                  0)))
984         return -TARGET_EFAULT;
985 
986     k = 0;
987     for (i = 0; i < nw; i++) {
988         v = 0;
989         for (j = 0; j < TARGET_ABI_BITS; j++) {
990             v |= ((abi_ulong)(FD_ISSET(k, fds) != 0) << j);
991             k++;
992         }
993         __put_user(v, &target_fds[i]);
994     }
995 
996     unlock_user(target_fds, target_fds_addr, sizeof(abi_ulong) * nw);
997 
998     return 0;
999 }
1000 
1001 #if defined(__alpha__)
1002 #define HOST_HZ 1024
1003 #else
1004 #define HOST_HZ 100
1005 #endif
1006 
1007 static inline abi_long host_to_target_clock_t(long ticks)
1008 {
1009 #if HOST_HZ == TARGET_HZ
1010     return ticks;
1011 #else
1012     return ((int64_t)ticks * TARGET_HZ) / HOST_HZ;
1013 #endif
1014 }
1015 
1016 static inline abi_long host_to_target_rusage(abi_ulong target_addr,
1017                                              const struct rusage *rusage)
1018 {
1019     struct target_rusage *target_rusage;
1020 
1021     if (!lock_user_struct(VERIFY_WRITE, target_rusage, target_addr, 0))
1022         return -TARGET_EFAULT;
1023     target_rusage->ru_utime.tv_sec = tswapal(rusage->ru_utime.tv_sec);
1024     target_rusage->ru_utime.tv_usec = tswapal(rusage->ru_utime.tv_usec);
1025     target_rusage->ru_stime.tv_sec = tswapal(rusage->ru_stime.tv_sec);
1026     target_rusage->ru_stime.tv_usec = tswapal(rusage->ru_stime.tv_usec);
1027     target_rusage->ru_maxrss = tswapal(rusage->ru_maxrss);
1028     target_rusage->ru_ixrss = tswapal(rusage->ru_ixrss);
1029     target_rusage->ru_idrss = tswapal(rusage->ru_idrss);
1030     target_rusage->ru_isrss = tswapal(rusage->ru_isrss);
1031     target_rusage->ru_minflt = tswapal(rusage->ru_minflt);
1032     target_rusage->ru_majflt = tswapal(rusage->ru_majflt);
1033     target_rusage->ru_nswap = tswapal(rusage->ru_nswap);
1034     target_rusage->ru_inblock = tswapal(rusage->ru_inblock);
1035     target_rusage->ru_oublock = tswapal(rusage->ru_oublock);
1036     target_rusage->ru_msgsnd = tswapal(rusage->ru_msgsnd);
1037     target_rusage->ru_msgrcv = tswapal(rusage->ru_msgrcv);
1038     target_rusage->ru_nsignals = tswapal(rusage->ru_nsignals);
1039     target_rusage->ru_nvcsw = tswapal(rusage->ru_nvcsw);
1040     target_rusage->ru_nivcsw = tswapal(rusage->ru_nivcsw);
1041     unlock_user_struct(target_rusage, target_addr, 1);
1042 
1043     return 0;
1044 }
1045 
1046 static inline rlim_t target_to_host_rlim(abi_ulong target_rlim)
1047 {
1048     abi_ulong target_rlim_swap;
1049     rlim_t result;
1050 
1051     target_rlim_swap = tswapal(target_rlim);
1052     if (target_rlim_swap == TARGET_RLIM_INFINITY)
1053         return RLIM_INFINITY;
1054 
1055     result = target_rlim_swap;
1056     if (target_rlim_swap != (rlim_t)result)
1057         return RLIM_INFINITY;
1058 
1059     return result;
1060 }
1061 
1062 static inline abi_ulong host_to_target_rlim(rlim_t rlim)
1063 {
1064     abi_ulong target_rlim_swap;
1065     abi_ulong result;
1066 
1067     if (rlim == RLIM_INFINITY || rlim != (abi_long)rlim)
1068         target_rlim_swap = TARGET_RLIM_INFINITY;
1069     else
1070         target_rlim_swap = rlim;
1071     result = tswapal(target_rlim_swap);
1072 
1073     return result;
1074 }
1075 
1076 static inline int target_to_host_resource(int code)
1077 {
1078     switch (code) {
1079     case TARGET_RLIMIT_AS:
1080         return RLIMIT_AS;
1081     case TARGET_RLIMIT_CORE:
1082         return RLIMIT_CORE;
1083     case TARGET_RLIMIT_CPU:
1084         return RLIMIT_CPU;
1085     case TARGET_RLIMIT_DATA:
1086         return RLIMIT_DATA;
1087     case TARGET_RLIMIT_FSIZE:
1088         return RLIMIT_FSIZE;
1089     case TARGET_RLIMIT_LOCKS:
1090         return RLIMIT_LOCKS;
1091     case TARGET_RLIMIT_MEMLOCK:
1092         return RLIMIT_MEMLOCK;
1093     case TARGET_RLIMIT_MSGQUEUE:
1094         return RLIMIT_MSGQUEUE;
1095     case TARGET_RLIMIT_NICE:
1096         return RLIMIT_NICE;
1097     case TARGET_RLIMIT_NOFILE:
1098         return RLIMIT_NOFILE;
1099     case TARGET_RLIMIT_NPROC:
1100         return RLIMIT_NPROC;
1101     case TARGET_RLIMIT_RSS:
1102         return RLIMIT_RSS;
1103     case TARGET_RLIMIT_RTPRIO:
1104         return RLIMIT_RTPRIO;
1105     case TARGET_RLIMIT_SIGPENDING:
1106         return RLIMIT_SIGPENDING;
1107     case TARGET_RLIMIT_STACK:
1108         return RLIMIT_STACK;
1109     default:
1110         return code;
1111     }
1112 }
1113 
1114 static inline abi_long copy_from_user_timeval(struct timeval *tv,
1115                                               abi_ulong target_tv_addr)
1116 {
1117     struct target_timeval *target_tv;
1118 
1119     if (!lock_user_struct(VERIFY_READ, target_tv, target_tv_addr, 1))
1120         return -TARGET_EFAULT;
1121 
1122     __get_user(tv->tv_sec, &target_tv->tv_sec);
1123     __get_user(tv->tv_usec, &target_tv->tv_usec);
1124 
1125     unlock_user_struct(target_tv, target_tv_addr, 0);
1126 
1127     return 0;
1128 }
1129 
1130 static inline abi_long copy_to_user_timeval(abi_ulong target_tv_addr,
1131                                             const struct timeval *tv)
1132 {
1133     struct target_timeval *target_tv;
1134 
1135     if (!lock_user_struct(VERIFY_WRITE, target_tv, target_tv_addr, 0))
1136         return -TARGET_EFAULT;
1137 
1138     __put_user(tv->tv_sec, &target_tv->tv_sec);
1139     __put_user(tv->tv_usec, &target_tv->tv_usec);
1140 
1141     unlock_user_struct(target_tv, target_tv_addr, 1);
1142 
1143     return 0;
1144 }
1145 
1146 static inline abi_long copy_from_user_timezone(struct timezone *tz,
1147                                                abi_ulong target_tz_addr)
1148 {
1149     struct target_timezone *target_tz;
1150 
1151     if (!lock_user_struct(VERIFY_READ, target_tz, target_tz_addr, 1)) {
1152         return -TARGET_EFAULT;
1153     }
1154 
1155     __get_user(tz->tz_minuteswest, &target_tz->tz_minuteswest);
1156     __get_user(tz->tz_dsttime, &target_tz->tz_dsttime);
1157 
1158     unlock_user_struct(target_tz, target_tz_addr, 0);
1159 
1160     return 0;
1161 }
1162 
1163 #if defined(TARGET_NR_mq_open) && defined(__NR_mq_open)
1164 #include <mqueue.h>
1165 
1166 static inline abi_long copy_from_user_mq_attr(struct mq_attr *attr,
1167                                               abi_ulong target_mq_attr_addr)
1168 {
1169     struct target_mq_attr *target_mq_attr;
1170 
1171     if (!lock_user_struct(VERIFY_READ, target_mq_attr,
1172                           target_mq_attr_addr, 1))
1173         return -TARGET_EFAULT;
1174 
1175     __get_user(attr->mq_flags, &target_mq_attr->mq_flags);
1176     __get_user(attr->mq_maxmsg, &target_mq_attr->mq_maxmsg);
1177     __get_user(attr->mq_msgsize, &target_mq_attr->mq_msgsize);
1178     __get_user(attr->mq_curmsgs, &target_mq_attr->mq_curmsgs);
1179 
1180     unlock_user_struct(target_mq_attr, target_mq_attr_addr, 0);
1181 
1182     return 0;
1183 }
1184 
1185 static inline abi_long copy_to_user_mq_attr(abi_ulong target_mq_attr_addr,
1186                                             const struct mq_attr *attr)
1187 {
1188     struct target_mq_attr *target_mq_attr;
1189 
1190     if (!lock_user_struct(VERIFY_WRITE, target_mq_attr,
1191                           target_mq_attr_addr, 0))
1192         return -TARGET_EFAULT;
1193 
1194     __put_user(attr->mq_flags, &target_mq_attr->mq_flags);
1195     __put_user(attr->mq_maxmsg, &target_mq_attr->mq_maxmsg);
1196     __put_user(attr->mq_msgsize, &target_mq_attr->mq_msgsize);
1197     __put_user(attr->mq_curmsgs, &target_mq_attr->mq_curmsgs);
1198 
1199     unlock_user_struct(target_mq_attr, target_mq_attr_addr, 1);
1200 
1201     return 0;
1202 }
1203 #endif
1204 
1205 #if defined(TARGET_NR_select) || defined(TARGET_NR__newselect)
1206 /* do_select() must return target values and target errnos. */
1207 static abi_long do_select(int n,
1208                           abi_ulong rfd_addr, abi_ulong wfd_addr,
1209                           abi_ulong efd_addr, abi_ulong target_tv_addr)
1210 {
1211     fd_set rfds, wfds, efds;
1212     fd_set *rfds_ptr, *wfds_ptr, *efds_ptr;
1213     struct timeval tv;
1214     struct timespec ts, *ts_ptr;
1215     abi_long ret;
1216 
1217     ret = copy_from_user_fdset_ptr(&rfds, &rfds_ptr, rfd_addr, n);
1218     if (ret) {
1219         return ret;
1220     }
1221     ret = copy_from_user_fdset_ptr(&wfds, &wfds_ptr, wfd_addr, n);
1222     if (ret) {
1223         return ret;
1224     }
1225     ret = copy_from_user_fdset_ptr(&efds, &efds_ptr, efd_addr, n);
1226     if (ret) {
1227         return ret;
1228     }
1229 
1230     if (target_tv_addr) {
1231         if (copy_from_user_timeval(&tv, target_tv_addr))
1232             return -TARGET_EFAULT;
1233         ts.tv_sec = tv.tv_sec;
1234         ts.tv_nsec = tv.tv_usec * 1000;
1235         ts_ptr = &ts;
1236     } else {
1237         ts_ptr = NULL;
1238     }
1239 
1240     ret = get_errno(safe_pselect6(n, rfds_ptr, wfds_ptr, efds_ptr,
1241                                   ts_ptr, NULL));
1242 
1243     if (!is_error(ret)) {
1244         if (rfd_addr && copy_to_user_fdset(rfd_addr, &rfds, n))
1245             return -TARGET_EFAULT;
1246         if (wfd_addr && copy_to_user_fdset(wfd_addr, &wfds, n))
1247             return -TARGET_EFAULT;
1248         if (efd_addr && copy_to_user_fdset(efd_addr, &efds, n))
1249             return -TARGET_EFAULT;
1250 
1251         if (target_tv_addr) {
1252             tv.tv_sec = ts.tv_sec;
1253             tv.tv_usec = ts.tv_nsec / 1000;
1254             if (copy_to_user_timeval(target_tv_addr, &tv)) {
1255                 return -TARGET_EFAULT;
1256             }
1257         }
1258     }
1259 
1260     return ret;
1261 }
1262 
1263 #if defined(TARGET_WANT_OLD_SYS_SELECT)
1264 static abi_long do_old_select(abi_ulong arg1)
1265 {
1266     struct target_sel_arg_struct *sel;
1267     abi_ulong inp, outp, exp, tvp;
1268     long nsel;
1269 
1270     if (!lock_user_struct(VERIFY_READ, sel, arg1, 1)) {
1271         return -TARGET_EFAULT;
1272     }
1273 
1274     nsel = tswapal(sel->n);
1275     inp = tswapal(sel->inp);
1276     outp = tswapal(sel->outp);
1277     exp = tswapal(sel->exp);
1278     tvp = tswapal(sel->tvp);
1279 
1280     unlock_user_struct(sel, arg1, 0);
1281 
1282     return do_select(nsel, inp, outp, exp, tvp);
1283 }
1284 #endif
1285 #endif
1286 
1287 static abi_long do_pipe2(int host_pipe[], int flags)
1288 {
1289 #ifdef CONFIG_PIPE2
1290     return pipe2(host_pipe, flags);
1291 #else
1292     return -ENOSYS;
1293 #endif
1294 }
1295 
1296 static abi_long do_pipe(void *cpu_env, abi_ulong pipedes,
1297                         int flags, int is_pipe2)
1298 {
1299     int host_pipe[2];
1300     abi_long ret;
1301     ret = flags ? do_pipe2(host_pipe, flags) : pipe(host_pipe);
1302 
1303     if (is_error(ret))
1304         return get_errno(ret);
1305 
1306     /* Several targets have special calling conventions for the original
1307        pipe syscall, but didn't replicate this into the pipe2 syscall.  */
1308     if (!is_pipe2) {
1309 #if defined(TARGET_ALPHA)
1310         ((CPUAlphaState *)cpu_env)->ir[IR_A4] = host_pipe[1];
1311         return host_pipe[0];
1312 #elif defined(TARGET_MIPS)
1313         ((CPUMIPSState*)cpu_env)->active_tc.gpr[3] = host_pipe[1];
1314         return host_pipe[0];
1315 #elif defined(TARGET_SH4)
1316         ((CPUSH4State*)cpu_env)->gregs[1] = host_pipe[1];
1317         return host_pipe[0];
1318 #elif defined(TARGET_SPARC)
1319         ((CPUSPARCState*)cpu_env)->regwptr[1] = host_pipe[1];
1320         return host_pipe[0];
1321 #endif
1322     }
1323 
1324     if (put_user_s32(host_pipe[0], pipedes)
1325         || put_user_s32(host_pipe[1], pipedes + sizeof(host_pipe[0])))
1326         return -TARGET_EFAULT;
1327     return get_errno(ret);
1328 }
1329 
1330 static inline abi_long target_to_host_ip_mreq(struct ip_mreqn *mreqn,
1331                                               abi_ulong target_addr,
1332                                               socklen_t len)
1333 {
1334     struct target_ip_mreqn *target_smreqn;
1335 
1336     target_smreqn = lock_user(VERIFY_READ, target_addr, len, 1);
1337     if (!target_smreqn)
1338         return -TARGET_EFAULT;
1339     mreqn->imr_multiaddr.s_addr = target_smreqn->imr_multiaddr.s_addr;
1340     mreqn->imr_address.s_addr = target_smreqn->imr_address.s_addr;
1341     if (len == sizeof(struct target_ip_mreqn))
1342         mreqn->imr_ifindex = tswapal(target_smreqn->imr_ifindex);
1343     unlock_user(target_smreqn, target_addr, 0);
1344 
1345     return 0;
1346 }
1347 
1348 static inline abi_long target_to_host_sockaddr(int fd, struct sockaddr *addr,
1349                                                abi_ulong target_addr,
1350                                                socklen_t len)
1351 {
1352     const socklen_t unix_maxlen = sizeof (struct sockaddr_un);
1353     sa_family_t sa_family;
1354     struct target_sockaddr *target_saddr;
1355 
1356     if (fd_trans_target_to_host_addr(fd)) {
1357         return fd_trans_target_to_host_addr(fd)(addr, target_addr, len);
1358     }
1359 
1360     target_saddr = lock_user(VERIFY_READ, target_addr, len, 1);
1361     if (!target_saddr)
1362         return -TARGET_EFAULT;
1363 
1364     sa_family = tswap16(target_saddr->sa_family);
1365 
1366     /* Oops. The caller might send a incomplete sun_path; sun_path
1367      * must be terminated by \0 (see the manual page), but
1368      * unfortunately it is quite common to specify sockaddr_un
1369      * length as "strlen(x->sun_path)" while it should be
1370      * "strlen(...) + 1". We'll fix that here if needed.
1371      * Linux kernel has a similar feature.
1372      */
1373 
1374     if (sa_family == AF_UNIX) {
1375         if (len < unix_maxlen && len > 0) {
1376             char *cp = (char*)target_saddr;
1377 
1378             if ( cp[len-1] && !cp[len] )
1379                 len++;
1380         }
1381         if (len > unix_maxlen)
1382             len = unix_maxlen;
1383     }
1384 
1385     memcpy(addr, target_saddr, len);
1386     addr->sa_family = sa_family;
1387     if (sa_family == AF_NETLINK) {
1388         struct sockaddr_nl *nladdr;
1389 
1390         nladdr = (struct sockaddr_nl *)addr;
1391         nladdr->nl_pid = tswap32(nladdr->nl_pid);
1392         nladdr->nl_groups = tswap32(nladdr->nl_groups);
1393     } else if (sa_family == AF_PACKET) {
1394 	struct target_sockaddr_ll *lladdr;
1395 
1396 	lladdr = (struct target_sockaddr_ll *)addr;
1397 	lladdr->sll_ifindex = tswap32(lladdr->sll_ifindex);
1398 	lladdr->sll_hatype = tswap16(lladdr->sll_hatype);
1399     }
1400     unlock_user(target_saddr, target_addr, 0);
1401 
1402     return 0;
1403 }
1404 
1405 static inline abi_long host_to_target_sockaddr(abi_ulong target_addr,
1406                                                struct sockaddr *addr,
1407                                                socklen_t len)
1408 {
1409     struct target_sockaddr *target_saddr;
1410 
1411     if (len == 0) {
1412         return 0;
1413     }
1414     assert(addr);
1415 
1416     target_saddr = lock_user(VERIFY_WRITE, target_addr, len, 0);
1417     if (!target_saddr)
1418         return -TARGET_EFAULT;
1419     memcpy(target_saddr, addr, len);
1420     if (len >= offsetof(struct target_sockaddr, sa_family) +
1421         sizeof(target_saddr->sa_family)) {
1422         target_saddr->sa_family = tswap16(addr->sa_family);
1423     }
1424     if (addr->sa_family == AF_NETLINK && len >= sizeof(struct sockaddr_nl)) {
1425         struct sockaddr_nl *target_nl = (struct sockaddr_nl *)target_saddr;
1426         target_nl->nl_pid = tswap32(target_nl->nl_pid);
1427         target_nl->nl_groups = tswap32(target_nl->nl_groups);
1428     } else if (addr->sa_family == AF_PACKET) {
1429         struct sockaddr_ll *target_ll = (struct sockaddr_ll *)target_saddr;
1430         target_ll->sll_ifindex = tswap32(target_ll->sll_ifindex);
1431         target_ll->sll_hatype = tswap16(target_ll->sll_hatype);
1432     } else if (addr->sa_family == AF_INET6 &&
1433                len >= sizeof(struct target_sockaddr_in6)) {
1434         struct target_sockaddr_in6 *target_in6 =
1435                (struct target_sockaddr_in6 *)target_saddr;
1436         target_in6->sin6_scope_id = tswap16(target_in6->sin6_scope_id);
1437     }
1438     unlock_user(target_saddr, target_addr, len);
1439 
1440     return 0;
1441 }
1442 
1443 static inline abi_long target_to_host_cmsg(struct msghdr *msgh,
1444                                            struct target_msghdr *target_msgh)
1445 {
1446     struct cmsghdr *cmsg = CMSG_FIRSTHDR(msgh);
1447     abi_long msg_controllen;
1448     abi_ulong target_cmsg_addr;
1449     struct target_cmsghdr *target_cmsg, *target_cmsg_start;
1450     socklen_t space = 0;
1451 
1452     msg_controllen = tswapal(target_msgh->msg_controllen);
1453     if (msg_controllen < sizeof (struct target_cmsghdr))
1454         goto the_end;
1455     target_cmsg_addr = tswapal(target_msgh->msg_control);
1456     target_cmsg = lock_user(VERIFY_READ, target_cmsg_addr, msg_controllen, 1);
1457     target_cmsg_start = target_cmsg;
1458     if (!target_cmsg)
1459         return -TARGET_EFAULT;
1460 
1461     while (cmsg && target_cmsg) {
1462         void *data = CMSG_DATA(cmsg);
1463         void *target_data = TARGET_CMSG_DATA(target_cmsg);
1464 
1465         int len = tswapal(target_cmsg->cmsg_len)
1466             - sizeof(struct target_cmsghdr);
1467 
1468         space += CMSG_SPACE(len);
1469         if (space > msgh->msg_controllen) {
1470             space -= CMSG_SPACE(len);
1471             /* This is a QEMU bug, since we allocated the payload
1472              * area ourselves (unlike overflow in host-to-target
1473              * conversion, which is just the guest giving us a buffer
1474              * that's too small). It can't happen for the payload types
1475              * we currently support; if it becomes an issue in future
1476              * we would need to improve our allocation strategy to
1477              * something more intelligent than "twice the size of the
1478              * target buffer we're reading from".
1479              */
1480             gemu_log("Host cmsg overflow\n");
1481             break;
1482         }
1483 
1484         if (tswap32(target_cmsg->cmsg_level) == TARGET_SOL_SOCKET) {
1485             cmsg->cmsg_level = SOL_SOCKET;
1486         } else {
1487             cmsg->cmsg_level = tswap32(target_cmsg->cmsg_level);
1488         }
1489         cmsg->cmsg_type = tswap32(target_cmsg->cmsg_type);
1490         cmsg->cmsg_len = CMSG_LEN(len);
1491 
1492         if (cmsg->cmsg_level == SOL_SOCKET && cmsg->cmsg_type == SCM_RIGHTS) {
1493             int *fd = (int *)data;
1494             int *target_fd = (int *)target_data;
1495             int i, numfds = len / sizeof(int);
1496 
1497             for (i = 0; i < numfds; i++) {
1498                 __get_user(fd[i], target_fd + i);
1499             }
1500         } else if (cmsg->cmsg_level == SOL_SOCKET
1501                &&  cmsg->cmsg_type == SCM_CREDENTIALS) {
1502             struct ucred *cred = (struct ucred *)data;
1503             struct target_ucred *target_cred =
1504                 (struct target_ucred *)target_data;
1505 
1506             __get_user(cred->pid, &target_cred->pid);
1507             __get_user(cred->uid, &target_cred->uid);
1508             __get_user(cred->gid, &target_cred->gid);
1509         } else {
1510             gemu_log("Unsupported ancillary data: %d/%d\n",
1511                                         cmsg->cmsg_level, cmsg->cmsg_type);
1512             memcpy(data, target_data, len);
1513         }
1514 
1515         cmsg = CMSG_NXTHDR(msgh, cmsg);
1516         target_cmsg = TARGET_CMSG_NXTHDR(target_msgh, target_cmsg,
1517                                          target_cmsg_start);
1518     }
1519     unlock_user(target_cmsg, target_cmsg_addr, 0);
1520  the_end:
1521     msgh->msg_controllen = space;
1522     return 0;
1523 }
1524 
1525 static inline abi_long host_to_target_cmsg(struct target_msghdr *target_msgh,
1526                                            struct msghdr *msgh)
1527 {
1528     struct cmsghdr *cmsg = CMSG_FIRSTHDR(msgh);
1529     abi_long msg_controllen;
1530     abi_ulong target_cmsg_addr;
1531     struct target_cmsghdr *target_cmsg, *target_cmsg_start;
1532     socklen_t space = 0;
1533 
1534     msg_controllen = tswapal(target_msgh->msg_controllen);
1535     if (msg_controllen < sizeof (struct target_cmsghdr))
1536         goto the_end;
1537     target_cmsg_addr = tswapal(target_msgh->msg_control);
1538     target_cmsg = lock_user(VERIFY_WRITE, target_cmsg_addr, msg_controllen, 0);
1539     target_cmsg_start = target_cmsg;
1540     if (!target_cmsg)
1541         return -TARGET_EFAULT;
1542 
1543     while (cmsg && target_cmsg) {
1544         void *data = CMSG_DATA(cmsg);
1545         void *target_data = TARGET_CMSG_DATA(target_cmsg);
1546 
1547         int len = cmsg->cmsg_len - sizeof(struct cmsghdr);
1548         int tgt_len, tgt_space;
1549 
1550         /* We never copy a half-header but may copy half-data;
1551          * this is Linux's behaviour in put_cmsg(). Note that
1552          * truncation here is a guest problem (which we report
1553          * to the guest via the CTRUNC bit), unlike truncation
1554          * in target_to_host_cmsg, which is a QEMU bug.
1555          */
1556         if (msg_controllen < sizeof(struct target_cmsghdr)) {
1557             target_msgh->msg_flags |= tswap32(MSG_CTRUNC);
1558             break;
1559         }
1560 
1561         if (cmsg->cmsg_level == SOL_SOCKET) {
1562             target_cmsg->cmsg_level = tswap32(TARGET_SOL_SOCKET);
1563         } else {
1564             target_cmsg->cmsg_level = tswap32(cmsg->cmsg_level);
1565         }
1566         target_cmsg->cmsg_type = tswap32(cmsg->cmsg_type);
1567 
1568         /* Payload types which need a different size of payload on
1569          * the target must adjust tgt_len here.
1570          */
1571         tgt_len = len;
1572         switch (cmsg->cmsg_level) {
1573         case SOL_SOCKET:
1574             switch (cmsg->cmsg_type) {
1575             case SO_TIMESTAMP:
1576                 tgt_len = sizeof(struct target_timeval);
1577                 break;
1578             default:
1579                 break;
1580             }
1581             break;
1582         default:
1583             break;
1584         }
1585 
1586         if (msg_controllen < TARGET_CMSG_LEN(tgt_len)) {
1587             target_msgh->msg_flags |= tswap32(MSG_CTRUNC);
1588             tgt_len = msg_controllen - sizeof(struct target_cmsghdr);
1589         }
1590 
1591         /* We must now copy-and-convert len bytes of payload
1592          * into tgt_len bytes of destination space. Bear in mind
1593          * that in both source and destination we may be dealing
1594          * with a truncated value!
1595          */
1596         switch (cmsg->cmsg_level) {
1597         case SOL_SOCKET:
1598             switch (cmsg->cmsg_type) {
1599             case SCM_RIGHTS:
1600             {
1601                 int *fd = (int *)data;
1602                 int *target_fd = (int *)target_data;
1603                 int i, numfds = tgt_len / sizeof(int);
1604 
1605                 for (i = 0; i < numfds; i++) {
1606                     __put_user(fd[i], target_fd + i);
1607                 }
1608                 break;
1609             }
1610             case SO_TIMESTAMP:
1611             {
1612                 struct timeval *tv = (struct timeval *)data;
1613                 struct target_timeval *target_tv =
1614                     (struct target_timeval *)target_data;
1615 
1616                 if (len != sizeof(struct timeval) ||
1617                     tgt_len != sizeof(struct target_timeval)) {
1618                     goto unimplemented;
1619                 }
1620 
1621                 /* copy struct timeval to target */
1622                 __put_user(tv->tv_sec, &target_tv->tv_sec);
1623                 __put_user(tv->tv_usec, &target_tv->tv_usec);
1624                 break;
1625             }
1626             case SCM_CREDENTIALS:
1627             {
1628                 struct ucred *cred = (struct ucred *)data;
1629                 struct target_ucred *target_cred =
1630                     (struct target_ucred *)target_data;
1631 
1632                 __put_user(cred->pid, &target_cred->pid);
1633                 __put_user(cred->uid, &target_cred->uid);
1634                 __put_user(cred->gid, &target_cred->gid);
1635                 break;
1636             }
1637             default:
1638                 goto unimplemented;
1639             }
1640             break;
1641 
1642         case SOL_IP:
1643             switch (cmsg->cmsg_type) {
1644             case IP_TTL:
1645             {
1646                 uint32_t *v = (uint32_t *)data;
1647                 uint32_t *t_int = (uint32_t *)target_data;
1648 
1649                 if (len != sizeof(uint32_t) ||
1650                     tgt_len != sizeof(uint32_t)) {
1651                     goto unimplemented;
1652                 }
1653                 __put_user(*v, t_int);
1654                 break;
1655             }
1656             case IP_RECVERR:
1657             {
1658                 struct errhdr_t {
1659                    struct sock_extended_err ee;
1660                    struct sockaddr_in offender;
1661                 };
1662                 struct errhdr_t *errh = (struct errhdr_t *)data;
1663                 struct errhdr_t *target_errh =
1664                     (struct errhdr_t *)target_data;
1665 
1666                 if (len != sizeof(struct errhdr_t) ||
1667                     tgt_len != sizeof(struct errhdr_t)) {
1668                     goto unimplemented;
1669                 }
1670                 __put_user(errh->ee.ee_errno, &target_errh->ee.ee_errno);
1671                 __put_user(errh->ee.ee_origin, &target_errh->ee.ee_origin);
1672                 __put_user(errh->ee.ee_type,  &target_errh->ee.ee_type);
1673                 __put_user(errh->ee.ee_code, &target_errh->ee.ee_code);
1674                 __put_user(errh->ee.ee_pad, &target_errh->ee.ee_pad);
1675                 __put_user(errh->ee.ee_info, &target_errh->ee.ee_info);
1676                 __put_user(errh->ee.ee_data, &target_errh->ee.ee_data);
1677                 host_to_target_sockaddr((unsigned long) &target_errh->offender,
1678                     (void *) &errh->offender, sizeof(errh->offender));
1679                 break;
1680             }
1681             default:
1682                 goto unimplemented;
1683             }
1684             break;
1685 
1686         case SOL_IPV6:
1687             switch (cmsg->cmsg_type) {
1688             case IPV6_HOPLIMIT:
1689             {
1690                 uint32_t *v = (uint32_t *)data;
1691                 uint32_t *t_int = (uint32_t *)target_data;
1692 
1693                 if (len != sizeof(uint32_t) ||
1694                     tgt_len != sizeof(uint32_t)) {
1695                     goto unimplemented;
1696                 }
1697                 __put_user(*v, t_int);
1698                 break;
1699             }
1700             case IPV6_RECVERR:
1701             {
1702                 struct errhdr6_t {
1703                    struct sock_extended_err ee;
1704                    struct sockaddr_in6 offender;
1705                 };
1706                 struct errhdr6_t *errh = (struct errhdr6_t *)data;
1707                 struct errhdr6_t *target_errh =
1708                     (struct errhdr6_t *)target_data;
1709 
1710                 if (len != sizeof(struct errhdr6_t) ||
1711                     tgt_len != sizeof(struct errhdr6_t)) {
1712                     goto unimplemented;
1713                 }
1714                 __put_user(errh->ee.ee_errno, &target_errh->ee.ee_errno);
1715                 __put_user(errh->ee.ee_origin, &target_errh->ee.ee_origin);
1716                 __put_user(errh->ee.ee_type,  &target_errh->ee.ee_type);
1717                 __put_user(errh->ee.ee_code, &target_errh->ee.ee_code);
1718                 __put_user(errh->ee.ee_pad, &target_errh->ee.ee_pad);
1719                 __put_user(errh->ee.ee_info, &target_errh->ee.ee_info);
1720                 __put_user(errh->ee.ee_data, &target_errh->ee.ee_data);
1721                 host_to_target_sockaddr((unsigned long) &target_errh->offender,
1722                     (void *) &errh->offender, sizeof(errh->offender));
1723                 break;
1724             }
1725             default:
1726                 goto unimplemented;
1727             }
1728             break;
1729 
1730         default:
1731         unimplemented:
1732             gemu_log("Unsupported ancillary data: %d/%d\n",
1733                                         cmsg->cmsg_level, cmsg->cmsg_type);
1734             memcpy(target_data, data, MIN(len, tgt_len));
1735             if (tgt_len > len) {
1736                 memset(target_data + len, 0, tgt_len - len);
1737             }
1738         }
1739 
1740         target_cmsg->cmsg_len = tswapal(TARGET_CMSG_LEN(tgt_len));
1741         tgt_space = TARGET_CMSG_SPACE(tgt_len);
1742         if (msg_controllen < tgt_space) {
1743             tgt_space = msg_controllen;
1744         }
1745         msg_controllen -= tgt_space;
1746         space += tgt_space;
1747         cmsg = CMSG_NXTHDR(msgh, cmsg);
1748         target_cmsg = TARGET_CMSG_NXTHDR(target_msgh, target_cmsg,
1749                                          target_cmsg_start);
1750     }
1751     unlock_user(target_cmsg, target_cmsg_addr, space);
1752  the_end:
1753     target_msgh->msg_controllen = tswapal(space);
1754     return 0;
1755 }
1756 
1757 /* do_setsockopt() Must return target values and target errnos. */
1758 static abi_long do_setsockopt(int sockfd, int level, int optname,
1759                               abi_ulong optval_addr, socklen_t optlen)
1760 {
1761     abi_long ret;
1762     int val;
1763     struct ip_mreqn *ip_mreq;
1764     struct ip_mreq_source *ip_mreq_source;
1765 
1766     switch(level) {
1767     case SOL_TCP:
1768         /* TCP options all take an 'int' value.  */
1769         if (optlen < sizeof(uint32_t))
1770             return -TARGET_EINVAL;
1771 
1772         if (get_user_u32(val, optval_addr))
1773             return -TARGET_EFAULT;
1774         ret = get_errno(setsockopt(sockfd, level, optname, &val, sizeof(val)));
1775         break;
1776     case SOL_IP:
1777         switch(optname) {
1778         case IP_TOS:
1779         case IP_TTL:
1780         case IP_HDRINCL:
1781         case IP_ROUTER_ALERT:
1782         case IP_RECVOPTS:
1783         case IP_RETOPTS:
1784         case IP_PKTINFO:
1785         case IP_MTU_DISCOVER:
1786         case IP_RECVERR:
1787         case IP_RECVTTL:
1788         case IP_RECVTOS:
1789 #ifdef IP_FREEBIND
1790         case IP_FREEBIND:
1791 #endif
1792         case IP_MULTICAST_TTL:
1793         case IP_MULTICAST_LOOP:
1794             val = 0;
1795             if (optlen >= sizeof(uint32_t)) {
1796                 if (get_user_u32(val, optval_addr))
1797                     return -TARGET_EFAULT;
1798             } else if (optlen >= 1) {
1799                 if (get_user_u8(val, optval_addr))
1800                     return -TARGET_EFAULT;
1801             }
1802             ret = get_errno(setsockopt(sockfd, level, optname, &val, sizeof(val)));
1803             break;
1804         case IP_ADD_MEMBERSHIP:
1805         case IP_DROP_MEMBERSHIP:
1806             if (optlen < sizeof (struct target_ip_mreq) ||
1807                 optlen > sizeof (struct target_ip_mreqn))
1808                 return -TARGET_EINVAL;
1809 
1810             ip_mreq = (struct ip_mreqn *) alloca(optlen);
1811             target_to_host_ip_mreq(ip_mreq, optval_addr, optlen);
1812             ret = get_errno(setsockopt(sockfd, level, optname, ip_mreq, optlen));
1813             break;
1814 
1815         case IP_BLOCK_SOURCE:
1816         case IP_UNBLOCK_SOURCE:
1817         case IP_ADD_SOURCE_MEMBERSHIP:
1818         case IP_DROP_SOURCE_MEMBERSHIP:
1819             if (optlen != sizeof (struct target_ip_mreq_source))
1820                 return -TARGET_EINVAL;
1821 
1822             ip_mreq_source = lock_user(VERIFY_READ, optval_addr, optlen, 1);
1823             ret = get_errno(setsockopt(sockfd, level, optname, ip_mreq_source, optlen));
1824             unlock_user (ip_mreq_source, optval_addr, 0);
1825             break;
1826 
1827         default:
1828             goto unimplemented;
1829         }
1830         break;
1831     case SOL_IPV6:
1832         switch (optname) {
1833         case IPV6_MTU_DISCOVER:
1834         case IPV6_MTU:
1835         case IPV6_V6ONLY:
1836         case IPV6_RECVPKTINFO:
1837         case IPV6_UNICAST_HOPS:
1838         case IPV6_MULTICAST_HOPS:
1839         case IPV6_MULTICAST_LOOP:
1840         case IPV6_RECVERR:
1841         case IPV6_RECVHOPLIMIT:
1842         case IPV6_2292HOPLIMIT:
1843         case IPV6_CHECKSUM:
1844         case IPV6_ADDRFORM:
1845         case IPV6_2292PKTINFO:
1846         case IPV6_RECVTCLASS:
1847         case IPV6_RECVRTHDR:
1848         case IPV6_2292RTHDR:
1849         case IPV6_RECVHOPOPTS:
1850         case IPV6_2292HOPOPTS:
1851         case IPV6_RECVDSTOPTS:
1852         case IPV6_2292DSTOPTS:
1853         case IPV6_TCLASS:
1854 #ifdef IPV6_RECVPATHMTU
1855         case IPV6_RECVPATHMTU:
1856 #endif
1857 #ifdef IPV6_TRANSPARENT
1858         case IPV6_TRANSPARENT:
1859 #endif
1860 #ifdef IPV6_FREEBIND
1861         case IPV6_FREEBIND:
1862 #endif
1863 #ifdef IPV6_RECVORIGDSTADDR
1864         case IPV6_RECVORIGDSTADDR:
1865 #endif
1866             val = 0;
1867             if (optlen < sizeof(uint32_t)) {
1868                 return -TARGET_EINVAL;
1869             }
1870             if (get_user_u32(val, optval_addr)) {
1871                 return -TARGET_EFAULT;
1872             }
1873             ret = get_errno(setsockopt(sockfd, level, optname,
1874                                        &val, sizeof(val)));
1875             break;
1876         case IPV6_PKTINFO:
1877         {
1878             struct in6_pktinfo pki;
1879 
1880             if (optlen < sizeof(pki)) {
1881                 return -TARGET_EINVAL;
1882             }
1883 
1884             if (copy_from_user(&pki, optval_addr, sizeof(pki))) {
1885                 return -TARGET_EFAULT;
1886             }
1887 
1888             pki.ipi6_ifindex = tswap32(pki.ipi6_ifindex);
1889 
1890             ret = get_errno(setsockopt(sockfd, level, optname,
1891                                        &pki, sizeof(pki)));
1892             break;
1893         }
1894         default:
1895             goto unimplemented;
1896         }
1897         break;
1898     case SOL_ICMPV6:
1899         switch (optname) {
1900         case ICMPV6_FILTER:
1901         {
1902             struct icmp6_filter icmp6f;
1903 
1904             if (optlen > sizeof(icmp6f)) {
1905                 optlen = sizeof(icmp6f);
1906             }
1907 
1908             if (copy_from_user(&icmp6f, optval_addr, optlen)) {
1909                 return -TARGET_EFAULT;
1910             }
1911 
1912             for (val = 0; val < 8; val++) {
1913                 icmp6f.data[val] = tswap32(icmp6f.data[val]);
1914             }
1915 
1916             ret = get_errno(setsockopt(sockfd, level, optname,
1917                                        &icmp6f, optlen));
1918             break;
1919         }
1920         default:
1921             goto unimplemented;
1922         }
1923         break;
1924     case SOL_RAW:
1925         switch (optname) {
1926         case ICMP_FILTER:
1927         case IPV6_CHECKSUM:
1928             /* those take an u32 value */
1929             if (optlen < sizeof(uint32_t)) {
1930                 return -TARGET_EINVAL;
1931             }
1932 
1933             if (get_user_u32(val, optval_addr)) {
1934                 return -TARGET_EFAULT;
1935             }
1936             ret = get_errno(setsockopt(sockfd, level, optname,
1937                                        &val, sizeof(val)));
1938             break;
1939 
1940         default:
1941             goto unimplemented;
1942         }
1943         break;
1944     case TARGET_SOL_SOCKET:
1945         switch (optname) {
1946         case TARGET_SO_RCVTIMEO:
1947         {
1948                 struct timeval tv;
1949 
1950                 optname = SO_RCVTIMEO;
1951 
1952 set_timeout:
1953                 if (optlen != sizeof(struct target_timeval)) {
1954                     return -TARGET_EINVAL;
1955                 }
1956 
1957                 if (copy_from_user_timeval(&tv, optval_addr)) {
1958                     return -TARGET_EFAULT;
1959                 }
1960 
1961                 ret = get_errno(setsockopt(sockfd, SOL_SOCKET, optname,
1962                                 &tv, sizeof(tv)));
1963                 return ret;
1964         }
1965         case TARGET_SO_SNDTIMEO:
1966                 optname = SO_SNDTIMEO;
1967                 goto set_timeout;
1968         case TARGET_SO_ATTACH_FILTER:
1969         {
1970                 struct target_sock_fprog *tfprog;
1971                 struct target_sock_filter *tfilter;
1972                 struct sock_fprog fprog;
1973                 struct sock_filter *filter;
1974                 int i;
1975 
1976                 if (optlen != sizeof(*tfprog)) {
1977                     return -TARGET_EINVAL;
1978                 }
1979                 if (!lock_user_struct(VERIFY_READ, tfprog, optval_addr, 0)) {
1980                     return -TARGET_EFAULT;
1981                 }
1982                 if (!lock_user_struct(VERIFY_READ, tfilter,
1983                                       tswapal(tfprog->filter), 0)) {
1984                     unlock_user_struct(tfprog, optval_addr, 1);
1985                     return -TARGET_EFAULT;
1986                 }
1987 
1988                 fprog.len = tswap16(tfprog->len);
1989                 filter = g_try_new(struct sock_filter, fprog.len);
1990                 if (filter == NULL) {
1991                     unlock_user_struct(tfilter, tfprog->filter, 1);
1992                     unlock_user_struct(tfprog, optval_addr, 1);
1993                     return -TARGET_ENOMEM;
1994                 }
1995                 for (i = 0; i < fprog.len; i++) {
1996                     filter[i].code = tswap16(tfilter[i].code);
1997                     filter[i].jt = tfilter[i].jt;
1998                     filter[i].jf = tfilter[i].jf;
1999                     filter[i].k = tswap32(tfilter[i].k);
2000                 }
2001                 fprog.filter = filter;
2002 
2003                 ret = get_errno(setsockopt(sockfd, SOL_SOCKET,
2004                                 SO_ATTACH_FILTER, &fprog, sizeof(fprog)));
2005                 g_free(filter);
2006 
2007                 unlock_user_struct(tfilter, tfprog->filter, 1);
2008                 unlock_user_struct(tfprog, optval_addr, 1);
2009                 return ret;
2010         }
2011 	case TARGET_SO_BINDTODEVICE:
2012 	{
2013 		char *dev_ifname, *addr_ifname;
2014 
2015 		if (optlen > IFNAMSIZ - 1) {
2016 		    optlen = IFNAMSIZ - 1;
2017 		}
2018 		dev_ifname = lock_user(VERIFY_READ, optval_addr, optlen, 1);
2019 		if (!dev_ifname) {
2020 		    return -TARGET_EFAULT;
2021 		}
2022 		optname = SO_BINDTODEVICE;
2023 		addr_ifname = alloca(IFNAMSIZ);
2024 		memcpy(addr_ifname, dev_ifname, optlen);
2025 		addr_ifname[optlen] = 0;
2026 		ret = get_errno(setsockopt(sockfd, SOL_SOCKET, optname,
2027                                            addr_ifname, optlen));
2028 		unlock_user (dev_ifname, optval_addr, 0);
2029 		return ret;
2030 	}
2031         case TARGET_SO_LINGER:
2032         {
2033                 struct linger lg;
2034                 struct target_linger *tlg;
2035 
2036                 if (optlen != sizeof(struct target_linger)) {
2037                     return -TARGET_EINVAL;
2038                 }
2039                 if (!lock_user_struct(VERIFY_READ, tlg, optval_addr, 1)) {
2040                     return -TARGET_EFAULT;
2041                 }
2042                 __get_user(lg.l_onoff, &tlg->l_onoff);
2043                 __get_user(lg.l_linger, &tlg->l_linger);
2044                 ret = get_errno(setsockopt(sockfd, SOL_SOCKET, SO_LINGER,
2045                                 &lg, sizeof(lg)));
2046                 unlock_user_struct(tlg, optval_addr, 0);
2047                 return ret;
2048         }
2049             /* Options with 'int' argument.  */
2050         case TARGET_SO_DEBUG:
2051 		optname = SO_DEBUG;
2052 		break;
2053         case TARGET_SO_REUSEADDR:
2054 		optname = SO_REUSEADDR;
2055 		break;
2056 #ifdef SO_REUSEPORT
2057         case TARGET_SO_REUSEPORT:
2058                 optname = SO_REUSEPORT;
2059                 break;
2060 #endif
2061         case TARGET_SO_TYPE:
2062 		optname = SO_TYPE;
2063 		break;
2064         case TARGET_SO_ERROR:
2065 		optname = SO_ERROR;
2066 		break;
2067         case TARGET_SO_DONTROUTE:
2068 		optname = SO_DONTROUTE;
2069 		break;
2070         case TARGET_SO_BROADCAST:
2071 		optname = SO_BROADCAST;
2072 		break;
2073         case TARGET_SO_SNDBUF:
2074 		optname = SO_SNDBUF;
2075 		break;
2076         case TARGET_SO_SNDBUFFORCE:
2077                 optname = SO_SNDBUFFORCE;
2078                 break;
2079         case TARGET_SO_RCVBUF:
2080 		optname = SO_RCVBUF;
2081 		break;
2082         case TARGET_SO_RCVBUFFORCE:
2083                 optname = SO_RCVBUFFORCE;
2084                 break;
2085         case TARGET_SO_KEEPALIVE:
2086 		optname = SO_KEEPALIVE;
2087 		break;
2088         case TARGET_SO_OOBINLINE:
2089 		optname = SO_OOBINLINE;
2090 		break;
2091         case TARGET_SO_NO_CHECK:
2092 		optname = SO_NO_CHECK;
2093 		break;
2094         case TARGET_SO_PRIORITY:
2095 		optname = SO_PRIORITY;
2096 		break;
2097 #ifdef SO_BSDCOMPAT
2098         case TARGET_SO_BSDCOMPAT:
2099 		optname = SO_BSDCOMPAT;
2100 		break;
2101 #endif
2102         case TARGET_SO_PASSCRED:
2103 		optname = SO_PASSCRED;
2104 		break;
2105         case TARGET_SO_PASSSEC:
2106                 optname = SO_PASSSEC;
2107                 break;
2108         case TARGET_SO_TIMESTAMP:
2109 		optname = SO_TIMESTAMP;
2110 		break;
2111         case TARGET_SO_RCVLOWAT:
2112 		optname = SO_RCVLOWAT;
2113 		break;
2114         default:
2115             goto unimplemented;
2116         }
2117 	if (optlen < sizeof(uint32_t))
2118             return -TARGET_EINVAL;
2119 
2120 	if (get_user_u32(val, optval_addr))
2121             return -TARGET_EFAULT;
2122 	ret = get_errno(setsockopt(sockfd, SOL_SOCKET, optname, &val, sizeof(val)));
2123         break;
2124     default:
2125     unimplemented:
2126         gemu_log("Unsupported setsockopt level=%d optname=%d\n", level, optname);
2127         ret = -TARGET_ENOPROTOOPT;
2128     }
2129     return ret;
2130 }
2131 
2132 /* do_getsockopt() Must return target values and target errnos. */
2133 static abi_long do_getsockopt(int sockfd, int level, int optname,
2134                               abi_ulong optval_addr, abi_ulong optlen)
2135 {
2136     abi_long ret;
2137     int len, val;
2138     socklen_t lv;
2139 
2140     switch(level) {
2141     case TARGET_SOL_SOCKET:
2142         level = SOL_SOCKET;
2143         switch (optname) {
2144         /* These don't just return a single integer */
2145         case TARGET_SO_RCVTIMEO:
2146         case TARGET_SO_SNDTIMEO:
2147         case TARGET_SO_PEERNAME:
2148             goto unimplemented;
2149         case TARGET_SO_PEERCRED: {
2150             struct ucred cr;
2151             socklen_t crlen;
2152             struct target_ucred *tcr;
2153 
2154             if (get_user_u32(len, optlen)) {
2155                 return -TARGET_EFAULT;
2156             }
2157             if (len < 0) {
2158                 return -TARGET_EINVAL;
2159             }
2160 
2161             crlen = sizeof(cr);
2162             ret = get_errno(getsockopt(sockfd, level, SO_PEERCRED,
2163                                        &cr, &crlen));
2164             if (ret < 0) {
2165                 return ret;
2166             }
2167             if (len > crlen) {
2168                 len = crlen;
2169             }
2170             if (!lock_user_struct(VERIFY_WRITE, tcr, optval_addr, 0)) {
2171                 return -TARGET_EFAULT;
2172             }
2173             __put_user(cr.pid, &tcr->pid);
2174             __put_user(cr.uid, &tcr->uid);
2175             __put_user(cr.gid, &tcr->gid);
2176             unlock_user_struct(tcr, optval_addr, 1);
2177             if (put_user_u32(len, optlen)) {
2178                 return -TARGET_EFAULT;
2179             }
2180             break;
2181         }
2182         case TARGET_SO_LINGER:
2183         {
2184             struct linger lg;
2185             socklen_t lglen;
2186             struct target_linger *tlg;
2187 
2188             if (get_user_u32(len, optlen)) {
2189                 return -TARGET_EFAULT;
2190             }
2191             if (len < 0) {
2192                 return -TARGET_EINVAL;
2193             }
2194 
2195             lglen = sizeof(lg);
2196             ret = get_errno(getsockopt(sockfd, level, SO_LINGER,
2197                                        &lg, &lglen));
2198             if (ret < 0) {
2199                 return ret;
2200             }
2201             if (len > lglen) {
2202                 len = lglen;
2203             }
2204             if (!lock_user_struct(VERIFY_WRITE, tlg, optval_addr, 0)) {
2205                 return -TARGET_EFAULT;
2206             }
2207             __put_user(lg.l_onoff, &tlg->l_onoff);
2208             __put_user(lg.l_linger, &tlg->l_linger);
2209             unlock_user_struct(tlg, optval_addr, 1);
2210             if (put_user_u32(len, optlen)) {
2211                 return -TARGET_EFAULT;
2212             }
2213             break;
2214         }
2215         /* Options with 'int' argument.  */
2216         case TARGET_SO_DEBUG:
2217             optname = SO_DEBUG;
2218             goto int_case;
2219         case TARGET_SO_REUSEADDR:
2220             optname = SO_REUSEADDR;
2221             goto int_case;
2222 #ifdef SO_REUSEPORT
2223         case TARGET_SO_REUSEPORT:
2224             optname = SO_REUSEPORT;
2225             goto int_case;
2226 #endif
2227         case TARGET_SO_TYPE:
2228             optname = SO_TYPE;
2229             goto int_case;
2230         case TARGET_SO_ERROR:
2231             optname = SO_ERROR;
2232             goto int_case;
2233         case TARGET_SO_DONTROUTE:
2234             optname = SO_DONTROUTE;
2235             goto int_case;
2236         case TARGET_SO_BROADCAST:
2237             optname = SO_BROADCAST;
2238             goto int_case;
2239         case TARGET_SO_SNDBUF:
2240             optname = SO_SNDBUF;
2241             goto int_case;
2242         case TARGET_SO_RCVBUF:
2243             optname = SO_RCVBUF;
2244             goto int_case;
2245         case TARGET_SO_KEEPALIVE:
2246             optname = SO_KEEPALIVE;
2247             goto int_case;
2248         case TARGET_SO_OOBINLINE:
2249             optname = SO_OOBINLINE;
2250             goto int_case;
2251         case TARGET_SO_NO_CHECK:
2252             optname = SO_NO_CHECK;
2253             goto int_case;
2254         case TARGET_SO_PRIORITY:
2255             optname = SO_PRIORITY;
2256             goto int_case;
2257 #ifdef SO_BSDCOMPAT
2258         case TARGET_SO_BSDCOMPAT:
2259             optname = SO_BSDCOMPAT;
2260             goto int_case;
2261 #endif
2262         case TARGET_SO_PASSCRED:
2263             optname = SO_PASSCRED;
2264             goto int_case;
2265         case TARGET_SO_TIMESTAMP:
2266             optname = SO_TIMESTAMP;
2267             goto int_case;
2268         case TARGET_SO_RCVLOWAT:
2269             optname = SO_RCVLOWAT;
2270             goto int_case;
2271         case TARGET_SO_ACCEPTCONN:
2272             optname = SO_ACCEPTCONN;
2273             goto int_case;
2274         default:
2275             goto int_case;
2276         }
2277         break;
2278     case SOL_TCP:
2279         /* TCP options all take an 'int' value.  */
2280     int_case:
2281         if (get_user_u32(len, optlen))
2282             return -TARGET_EFAULT;
2283         if (len < 0)
2284             return -TARGET_EINVAL;
2285         lv = sizeof(lv);
2286         ret = get_errno(getsockopt(sockfd, level, optname, &val, &lv));
2287         if (ret < 0)
2288             return ret;
2289         if (optname == SO_TYPE) {
2290             val = host_to_target_sock_type(val);
2291         }
2292         if (len > lv)
2293             len = lv;
2294         if (len == 4) {
2295             if (put_user_u32(val, optval_addr))
2296                 return -TARGET_EFAULT;
2297         } else {
2298             if (put_user_u8(val, optval_addr))
2299                 return -TARGET_EFAULT;
2300         }
2301         if (put_user_u32(len, optlen))
2302             return -TARGET_EFAULT;
2303         break;
2304     case SOL_IP:
2305         switch(optname) {
2306         case IP_TOS:
2307         case IP_TTL:
2308         case IP_HDRINCL:
2309         case IP_ROUTER_ALERT:
2310         case IP_RECVOPTS:
2311         case IP_RETOPTS:
2312         case IP_PKTINFO:
2313         case IP_MTU_DISCOVER:
2314         case IP_RECVERR:
2315         case IP_RECVTOS:
2316 #ifdef IP_FREEBIND
2317         case IP_FREEBIND:
2318 #endif
2319         case IP_MULTICAST_TTL:
2320         case IP_MULTICAST_LOOP:
2321             if (get_user_u32(len, optlen))
2322                 return -TARGET_EFAULT;
2323             if (len < 0)
2324                 return -TARGET_EINVAL;
2325             lv = sizeof(lv);
2326             ret = get_errno(getsockopt(sockfd, level, optname, &val, &lv));
2327             if (ret < 0)
2328                 return ret;
2329             if (len < sizeof(int) && len > 0 && val >= 0 && val < 255) {
2330                 len = 1;
2331                 if (put_user_u32(len, optlen)
2332                     || put_user_u8(val, optval_addr))
2333                     return -TARGET_EFAULT;
2334             } else {
2335                 if (len > sizeof(int))
2336                     len = sizeof(int);
2337                 if (put_user_u32(len, optlen)
2338                     || put_user_u32(val, optval_addr))
2339                     return -TARGET_EFAULT;
2340             }
2341             break;
2342         default:
2343             ret = -TARGET_ENOPROTOOPT;
2344             break;
2345         }
2346         break;
2347     case SOL_IPV6:
2348         switch (optname) {
2349         case IPV6_MTU_DISCOVER:
2350         case IPV6_MTU:
2351         case IPV6_V6ONLY:
2352         case IPV6_RECVPKTINFO:
2353         case IPV6_UNICAST_HOPS:
2354         case IPV6_MULTICAST_HOPS:
2355         case IPV6_MULTICAST_LOOP:
2356         case IPV6_RECVERR:
2357         case IPV6_RECVHOPLIMIT:
2358         case IPV6_2292HOPLIMIT:
2359         case IPV6_CHECKSUM:
2360         case IPV6_ADDRFORM:
2361         case IPV6_2292PKTINFO:
2362         case IPV6_RECVTCLASS:
2363         case IPV6_RECVRTHDR:
2364         case IPV6_2292RTHDR:
2365         case IPV6_RECVHOPOPTS:
2366         case IPV6_2292HOPOPTS:
2367         case IPV6_RECVDSTOPTS:
2368         case IPV6_2292DSTOPTS:
2369         case IPV6_TCLASS:
2370 #ifdef IPV6_RECVPATHMTU
2371         case IPV6_RECVPATHMTU:
2372 #endif
2373 #ifdef IPV6_TRANSPARENT
2374         case IPV6_TRANSPARENT:
2375 #endif
2376 #ifdef IPV6_FREEBIND
2377         case IPV6_FREEBIND:
2378 #endif
2379 #ifdef IPV6_RECVORIGDSTADDR
2380         case IPV6_RECVORIGDSTADDR:
2381 #endif
2382             if (get_user_u32(len, optlen))
2383                 return -TARGET_EFAULT;
2384             if (len < 0)
2385                 return -TARGET_EINVAL;
2386             lv = sizeof(lv);
2387             ret = get_errno(getsockopt(sockfd, level, optname, &val, &lv));
2388             if (ret < 0)
2389                 return ret;
2390             if (len < sizeof(int) && len > 0 && val >= 0 && val < 255) {
2391                 len = 1;
2392                 if (put_user_u32(len, optlen)
2393                     || put_user_u8(val, optval_addr))
2394                     return -TARGET_EFAULT;
2395             } else {
2396                 if (len > sizeof(int))
2397                     len = sizeof(int);
2398                 if (put_user_u32(len, optlen)
2399                     || put_user_u32(val, optval_addr))
2400                     return -TARGET_EFAULT;
2401             }
2402             break;
2403         default:
2404             ret = -TARGET_ENOPROTOOPT;
2405             break;
2406         }
2407         break;
2408     default:
2409     unimplemented:
2410         gemu_log("getsockopt level=%d optname=%d not yet supported\n",
2411                  level, optname);
2412         ret = -TARGET_EOPNOTSUPP;
2413         break;
2414     }
2415     return ret;
2416 }
2417 
2418 /* Convert target low/high pair representing file offset into the host
2419  * low/high pair. This function doesn't handle offsets bigger than 64 bits
2420  * as the kernel doesn't handle them either.
2421  */
2422 static void target_to_host_low_high(abi_ulong tlow,
2423                                     abi_ulong thigh,
2424                                     unsigned long *hlow,
2425                                     unsigned long *hhigh)
2426 {
2427     uint64_t off = tlow |
2428         ((unsigned long long)thigh << TARGET_LONG_BITS / 2) <<
2429         TARGET_LONG_BITS / 2;
2430 
2431     *hlow = off;
2432     *hhigh = (off >> HOST_LONG_BITS / 2) >> HOST_LONG_BITS / 2;
2433 }
2434 
2435 static struct iovec *lock_iovec(int type, abi_ulong target_addr,
2436                                 abi_ulong count, int copy)
2437 {
2438     struct target_iovec *target_vec;
2439     struct iovec *vec;
2440     abi_ulong total_len, max_len;
2441     int i;
2442     int err = 0;
2443     bool bad_address = false;
2444 
2445     if (count == 0) {
2446         errno = 0;
2447         return NULL;
2448     }
2449     if (count > IOV_MAX) {
2450         errno = EINVAL;
2451         return NULL;
2452     }
2453 
2454     vec = g_try_new0(struct iovec, count);
2455     if (vec == NULL) {
2456         errno = ENOMEM;
2457         return NULL;
2458     }
2459 
2460     target_vec = lock_user(VERIFY_READ, target_addr,
2461                            count * sizeof(struct target_iovec), 1);
2462     if (target_vec == NULL) {
2463         err = EFAULT;
2464         goto fail2;
2465     }
2466 
2467     /* ??? If host page size > target page size, this will result in a
2468        value larger than what we can actually support.  */
2469     max_len = 0x7fffffff & TARGET_PAGE_MASK;
2470     total_len = 0;
2471 
2472     for (i = 0; i < count; i++) {
2473         abi_ulong base = tswapal(target_vec[i].iov_base);
2474         abi_long len = tswapal(target_vec[i].iov_len);
2475 
2476         if (len < 0) {
2477             err = EINVAL;
2478             goto fail;
2479         } else if (len == 0) {
2480             /* Zero length pointer is ignored.  */
2481             vec[i].iov_base = 0;
2482         } else {
2483             vec[i].iov_base = lock_user(type, base, len, copy);
2484             /* If the first buffer pointer is bad, this is a fault.  But
2485              * subsequent bad buffers will result in a partial write; this
2486              * is realized by filling the vector with null pointers and
2487              * zero lengths. */
2488             if (!vec[i].iov_base) {
2489                 if (i == 0) {
2490                     err = EFAULT;
2491                     goto fail;
2492                 } else {
2493                     bad_address = true;
2494                 }
2495             }
2496             if (bad_address) {
2497                 len = 0;
2498             }
2499             if (len > max_len - total_len) {
2500                 len = max_len - total_len;
2501             }
2502         }
2503         vec[i].iov_len = len;
2504         total_len += len;
2505     }
2506 
2507     unlock_user(target_vec, target_addr, 0);
2508     return vec;
2509 
2510  fail:
2511     while (--i >= 0) {
2512         if (tswapal(target_vec[i].iov_len) > 0) {
2513             unlock_user(vec[i].iov_base, tswapal(target_vec[i].iov_base), 0);
2514         }
2515     }
2516     unlock_user(target_vec, target_addr, 0);
2517  fail2:
2518     g_free(vec);
2519     errno = err;
2520     return NULL;
2521 }
2522 
2523 static void unlock_iovec(struct iovec *vec, abi_ulong target_addr,
2524                          abi_ulong count, int copy)
2525 {
2526     struct target_iovec *target_vec;
2527     int i;
2528 
2529     target_vec = lock_user(VERIFY_READ, target_addr,
2530                            count * sizeof(struct target_iovec), 1);
2531     if (target_vec) {
2532         for (i = 0; i < count; i++) {
2533             abi_ulong base = tswapal(target_vec[i].iov_base);
2534             abi_long len = tswapal(target_vec[i].iov_len);
2535             if (len < 0) {
2536                 break;
2537             }
2538             unlock_user(vec[i].iov_base, base, copy ? vec[i].iov_len : 0);
2539         }
2540         unlock_user(target_vec, target_addr, 0);
2541     }
2542 
2543     g_free(vec);
2544 }
2545 
2546 static inline int target_to_host_sock_type(int *type)
2547 {
2548     int host_type = 0;
2549     int target_type = *type;
2550 
2551     switch (target_type & TARGET_SOCK_TYPE_MASK) {
2552     case TARGET_SOCK_DGRAM:
2553         host_type = SOCK_DGRAM;
2554         break;
2555     case TARGET_SOCK_STREAM:
2556         host_type = SOCK_STREAM;
2557         break;
2558     default:
2559         host_type = target_type & TARGET_SOCK_TYPE_MASK;
2560         break;
2561     }
2562     if (target_type & TARGET_SOCK_CLOEXEC) {
2563 #if defined(SOCK_CLOEXEC)
2564         host_type |= SOCK_CLOEXEC;
2565 #else
2566         return -TARGET_EINVAL;
2567 #endif
2568     }
2569     if (target_type & TARGET_SOCK_NONBLOCK) {
2570 #if defined(SOCK_NONBLOCK)
2571         host_type |= SOCK_NONBLOCK;
2572 #elif !defined(O_NONBLOCK)
2573         return -TARGET_EINVAL;
2574 #endif
2575     }
2576     *type = host_type;
2577     return 0;
2578 }
2579 
2580 /* Try to emulate socket type flags after socket creation.  */
2581 static int sock_flags_fixup(int fd, int target_type)
2582 {
2583 #if !defined(SOCK_NONBLOCK) && defined(O_NONBLOCK)
2584     if (target_type & TARGET_SOCK_NONBLOCK) {
2585         int flags = fcntl(fd, F_GETFL);
2586         if (fcntl(fd, F_SETFL, O_NONBLOCK | flags) == -1) {
2587             close(fd);
2588             return -TARGET_EINVAL;
2589         }
2590     }
2591 #endif
2592     return fd;
2593 }
2594 
2595 /* do_socket() Must return target values and target errnos. */
2596 static abi_long do_socket(int domain, int type, int protocol)
2597 {
2598     int target_type = type;
2599     int ret;
2600 
2601     ret = target_to_host_sock_type(&type);
2602     if (ret) {
2603         return ret;
2604     }
2605 
2606     if (domain == PF_NETLINK && !(
2607 #ifdef CONFIG_RTNETLINK
2608          protocol == NETLINK_ROUTE ||
2609 #endif
2610          protocol == NETLINK_KOBJECT_UEVENT ||
2611          protocol == NETLINK_AUDIT)) {
2612         return -EPFNOSUPPORT;
2613     }
2614 
2615     if (domain == AF_PACKET ||
2616         (domain == AF_INET && type == SOCK_PACKET)) {
2617         protocol = tswap16(protocol);
2618     }
2619 
2620     ret = get_errno(socket(domain, type, protocol));
2621     if (ret >= 0) {
2622         ret = sock_flags_fixup(ret, target_type);
2623         if (type == SOCK_PACKET) {
2624             /* Manage an obsolete case :
2625              * if socket type is SOCK_PACKET, bind by name
2626              */
2627             fd_trans_register(ret, &target_packet_trans);
2628         } else if (domain == PF_NETLINK) {
2629             switch (protocol) {
2630 #ifdef CONFIG_RTNETLINK
2631             case NETLINK_ROUTE:
2632                 fd_trans_register(ret, &target_netlink_route_trans);
2633                 break;
2634 #endif
2635             case NETLINK_KOBJECT_UEVENT:
2636                 /* nothing to do: messages are strings */
2637                 break;
2638             case NETLINK_AUDIT:
2639                 fd_trans_register(ret, &target_netlink_audit_trans);
2640                 break;
2641             default:
2642                 g_assert_not_reached();
2643             }
2644         }
2645     }
2646     return ret;
2647 }
2648 
2649 /* do_bind() Must return target values and target errnos. */
2650 static abi_long do_bind(int sockfd, abi_ulong target_addr,
2651                         socklen_t addrlen)
2652 {
2653     void *addr;
2654     abi_long ret;
2655 
2656     if ((int)addrlen < 0) {
2657         return -TARGET_EINVAL;
2658     }
2659 
2660     addr = alloca(addrlen+1);
2661 
2662     ret = target_to_host_sockaddr(sockfd, addr, target_addr, addrlen);
2663     if (ret)
2664         return ret;
2665 
2666     return get_errno(bind(sockfd, addr, addrlen));
2667 }
2668 
2669 /* do_connect() Must return target values and target errnos. */
2670 static abi_long do_connect(int sockfd, abi_ulong target_addr,
2671                            socklen_t addrlen)
2672 {
2673     void *addr;
2674     abi_long ret;
2675 
2676     if ((int)addrlen < 0) {
2677         return -TARGET_EINVAL;
2678     }
2679 
2680     addr = alloca(addrlen+1);
2681 
2682     ret = target_to_host_sockaddr(sockfd, addr, target_addr, addrlen);
2683     if (ret)
2684         return ret;
2685 
2686     return get_errno(safe_connect(sockfd, addr, addrlen));
2687 }
2688 
2689 /* do_sendrecvmsg_locked() Must return target values and target errnos. */
2690 static abi_long do_sendrecvmsg_locked(int fd, struct target_msghdr *msgp,
2691                                       int flags, int send)
2692 {
2693     abi_long ret, len;
2694     struct msghdr msg;
2695     abi_ulong count;
2696     struct iovec *vec;
2697     abi_ulong target_vec;
2698 
2699     if (msgp->msg_name) {
2700         msg.msg_namelen = tswap32(msgp->msg_namelen);
2701         msg.msg_name = alloca(msg.msg_namelen+1);
2702         ret = target_to_host_sockaddr(fd, msg.msg_name,
2703                                       tswapal(msgp->msg_name),
2704                                       msg.msg_namelen);
2705         if (ret == -TARGET_EFAULT) {
2706             /* For connected sockets msg_name and msg_namelen must
2707              * be ignored, so returning EFAULT immediately is wrong.
2708              * Instead, pass a bad msg_name to the host kernel, and
2709              * let it decide whether to return EFAULT or not.
2710              */
2711             msg.msg_name = (void *)-1;
2712         } else if (ret) {
2713             goto out2;
2714         }
2715     } else {
2716         msg.msg_name = NULL;
2717         msg.msg_namelen = 0;
2718     }
2719     msg.msg_controllen = 2 * tswapal(msgp->msg_controllen);
2720     msg.msg_control = alloca(msg.msg_controllen);
2721     memset(msg.msg_control, 0, msg.msg_controllen);
2722 
2723     msg.msg_flags = tswap32(msgp->msg_flags);
2724 
2725     count = tswapal(msgp->msg_iovlen);
2726     target_vec = tswapal(msgp->msg_iov);
2727 
2728     if (count > IOV_MAX) {
2729         /* sendrcvmsg returns a different errno for this condition than
2730          * readv/writev, so we must catch it here before lock_iovec() does.
2731          */
2732         ret = -TARGET_EMSGSIZE;
2733         goto out2;
2734     }
2735 
2736     vec = lock_iovec(send ? VERIFY_READ : VERIFY_WRITE,
2737                      target_vec, count, send);
2738     if (vec == NULL) {
2739         ret = -host_to_target_errno(errno);
2740         goto out2;
2741     }
2742     msg.msg_iovlen = count;
2743     msg.msg_iov = vec;
2744 
2745     if (send) {
2746         if (fd_trans_target_to_host_data(fd)) {
2747             void *host_msg;
2748 
2749             host_msg = g_malloc(msg.msg_iov->iov_len);
2750             memcpy(host_msg, msg.msg_iov->iov_base, msg.msg_iov->iov_len);
2751             ret = fd_trans_target_to_host_data(fd)(host_msg,
2752                                                    msg.msg_iov->iov_len);
2753             if (ret >= 0) {
2754                 msg.msg_iov->iov_base = host_msg;
2755                 ret = get_errno(safe_sendmsg(fd, &msg, flags));
2756             }
2757             g_free(host_msg);
2758         } else {
2759             ret = target_to_host_cmsg(&msg, msgp);
2760             if (ret == 0) {
2761                 ret = get_errno(safe_sendmsg(fd, &msg, flags));
2762             }
2763         }
2764     } else {
2765         ret = get_errno(safe_recvmsg(fd, &msg, flags));
2766         if (!is_error(ret)) {
2767             len = ret;
2768             if (fd_trans_host_to_target_data(fd)) {
2769                 ret = fd_trans_host_to_target_data(fd)(msg.msg_iov->iov_base,
2770                                                MIN(msg.msg_iov->iov_len, len));
2771             } else {
2772                 ret = host_to_target_cmsg(msgp, &msg);
2773             }
2774             if (!is_error(ret)) {
2775                 msgp->msg_namelen = tswap32(msg.msg_namelen);
2776                 msgp->msg_flags = tswap32(msg.msg_flags);
2777                 if (msg.msg_name != NULL && msg.msg_name != (void *)-1) {
2778                     ret = host_to_target_sockaddr(tswapal(msgp->msg_name),
2779                                     msg.msg_name, msg.msg_namelen);
2780                     if (ret) {
2781                         goto out;
2782                     }
2783                 }
2784 
2785                 ret = len;
2786             }
2787         }
2788     }
2789 
2790 out:
2791     unlock_iovec(vec, target_vec, count, !send);
2792 out2:
2793     return ret;
2794 }
2795 
2796 static abi_long do_sendrecvmsg(int fd, abi_ulong target_msg,
2797                                int flags, int send)
2798 {
2799     abi_long ret;
2800     struct target_msghdr *msgp;
2801 
2802     if (!lock_user_struct(send ? VERIFY_READ : VERIFY_WRITE,
2803                           msgp,
2804                           target_msg,
2805                           send ? 1 : 0)) {
2806         return -TARGET_EFAULT;
2807     }
2808     ret = do_sendrecvmsg_locked(fd, msgp, flags, send);
2809     unlock_user_struct(msgp, target_msg, send ? 0 : 1);
2810     return ret;
2811 }
2812 
2813 /* We don't rely on the C library to have sendmmsg/recvmmsg support,
2814  * so it might not have this *mmsg-specific flag either.
2815  */
2816 #ifndef MSG_WAITFORONE
2817 #define MSG_WAITFORONE 0x10000
2818 #endif
2819 
2820 static abi_long do_sendrecvmmsg(int fd, abi_ulong target_msgvec,
2821                                 unsigned int vlen, unsigned int flags,
2822                                 int send)
2823 {
2824     struct target_mmsghdr *mmsgp;
2825     abi_long ret = 0;
2826     int i;
2827 
2828     if (vlen > UIO_MAXIOV) {
2829         vlen = UIO_MAXIOV;
2830     }
2831 
2832     mmsgp = lock_user(VERIFY_WRITE, target_msgvec, sizeof(*mmsgp) * vlen, 1);
2833     if (!mmsgp) {
2834         return -TARGET_EFAULT;
2835     }
2836 
2837     for (i = 0; i < vlen; i++) {
2838         ret = do_sendrecvmsg_locked(fd, &mmsgp[i].msg_hdr, flags, send);
2839         if (is_error(ret)) {
2840             break;
2841         }
2842         mmsgp[i].msg_len = tswap32(ret);
2843         /* MSG_WAITFORONE turns on MSG_DONTWAIT after one packet */
2844         if (flags & MSG_WAITFORONE) {
2845             flags |= MSG_DONTWAIT;
2846         }
2847     }
2848 
2849     unlock_user(mmsgp, target_msgvec, sizeof(*mmsgp) * i);
2850 
2851     /* Return number of datagrams sent if we sent any at all;
2852      * otherwise return the error.
2853      */
2854     if (i) {
2855         return i;
2856     }
2857     return ret;
2858 }
2859 
2860 /* do_accept4() Must return target values and target errnos. */
2861 static abi_long do_accept4(int fd, abi_ulong target_addr,
2862                            abi_ulong target_addrlen_addr, int flags)
2863 {
2864     socklen_t addrlen, ret_addrlen;
2865     void *addr;
2866     abi_long ret;
2867     int host_flags;
2868 
2869     host_flags = target_to_host_bitmask(flags, fcntl_flags_tbl);
2870 
2871     if (target_addr == 0) {
2872         return get_errno(safe_accept4(fd, NULL, NULL, host_flags));
2873     }
2874 
2875     /* linux returns EINVAL if addrlen pointer is invalid */
2876     if (get_user_u32(addrlen, target_addrlen_addr))
2877         return -TARGET_EINVAL;
2878 
2879     if ((int)addrlen < 0) {
2880         return -TARGET_EINVAL;
2881     }
2882 
2883     if (!access_ok(VERIFY_WRITE, target_addr, addrlen))
2884         return -TARGET_EINVAL;
2885 
2886     addr = alloca(addrlen);
2887 
2888     ret_addrlen = addrlen;
2889     ret = get_errno(safe_accept4(fd, addr, &ret_addrlen, host_flags));
2890     if (!is_error(ret)) {
2891         host_to_target_sockaddr(target_addr, addr, MIN(addrlen, ret_addrlen));
2892         if (put_user_u32(ret_addrlen, target_addrlen_addr)) {
2893             ret = -TARGET_EFAULT;
2894         }
2895     }
2896     return ret;
2897 }
2898 
2899 /* do_getpeername() Must return target values and target errnos. */
2900 static abi_long do_getpeername(int fd, abi_ulong target_addr,
2901                                abi_ulong target_addrlen_addr)
2902 {
2903     socklen_t addrlen, ret_addrlen;
2904     void *addr;
2905     abi_long ret;
2906 
2907     if (get_user_u32(addrlen, target_addrlen_addr))
2908         return -TARGET_EFAULT;
2909 
2910     if ((int)addrlen < 0) {
2911         return -TARGET_EINVAL;
2912     }
2913 
2914     if (!access_ok(VERIFY_WRITE, target_addr, addrlen))
2915         return -TARGET_EFAULT;
2916 
2917     addr = alloca(addrlen);
2918 
2919     ret_addrlen = addrlen;
2920     ret = get_errno(getpeername(fd, addr, &ret_addrlen));
2921     if (!is_error(ret)) {
2922         host_to_target_sockaddr(target_addr, addr, MIN(addrlen, ret_addrlen));
2923         if (put_user_u32(ret_addrlen, target_addrlen_addr)) {
2924             ret = -TARGET_EFAULT;
2925         }
2926     }
2927     return ret;
2928 }
2929 
2930 /* do_getsockname() Must return target values and target errnos. */
2931 static abi_long do_getsockname(int fd, abi_ulong target_addr,
2932                                abi_ulong target_addrlen_addr)
2933 {
2934     socklen_t addrlen, ret_addrlen;
2935     void *addr;
2936     abi_long ret;
2937 
2938     if (get_user_u32(addrlen, target_addrlen_addr))
2939         return -TARGET_EFAULT;
2940 
2941     if ((int)addrlen < 0) {
2942         return -TARGET_EINVAL;
2943     }
2944 
2945     if (!access_ok(VERIFY_WRITE, target_addr, addrlen))
2946         return -TARGET_EFAULT;
2947 
2948     addr = alloca(addrlen);
2949 
2950     ret_addrlen = addrlen;
2951     ret = get_errno(getsockname(fd, addr, &ret_addrlen));
2952     if (!is_error(ret)) {
2953         host_to_target_sockaddr(target_addr, addr, MIN(addrlen, ret_addrlen));
2954         if (put_user_u32(ret_addrlen, target_addrlen_addr)) {
2955             ret = -TARGET_EFAULT;
2956         }
2957     }
2958     return ret;
2959 }
2960 
2961 /* do_socketpair() Must return target values and target errnos. */
2962 static abi_long do_socketpair(int domain, int type, int protocol,
2963                               abi_ulong target_tab_addr)
2964 {
2965     int tab[2];
2966     abi_long ret;
2967 
2968     target_to_host_sock_type(&type);
2969 
2970     ret = get_errno(socketpair(domain, type, protocol, tab));
2971     if (!is_error(ret)) {
2972         if (put_user_s32(tab[0], target_tab_addr)
2973             || put_user_s32(tab[1], target_tab_addr + sizeof(tab[0])))
2974             ret = -TARGET_EFAULT;
2975     }
2976     return ret;
2977 }
2978 
2979 /* do_sendto() Must return target values and target errnos. */
2980 static abi_long do_sendto(int fd, abi_ulong msg, size_t len, int flags,
2981                           abi_ulong target_addr, socklen_t addrlen)
2982 {
2983     void *addr;
2984     void *host_msg;
2985     void *copy_msg = NULL;
2986     abi_long ret;
2987 
2988     if ((int)addrlen < 0) {
2989         return -TARGET_EINVAL;
2990     }
2991 
2992     host_msg = lock_user(VERIFY_READ, msg, len, 1);
2993     if (!host_msg)
2994         return -TARGET_EFAULT;
2995     if (fd_trans_target_to_host_data(fd)) {
2996         copy_msg = host_msg;
2997         host_msg = g_malloc(len);
2998         memcpy(host_msg, copy_msg, len);
2999         ret = fd_trans_target_to_host_data(fd)(host_msg, len);
3000         if (ret < 0) {
3001             goto fail;
3002         }
3003     }
3004     if (target_addr) {
3005         addr = alloca(addrlen+1);
3006         ret = target_to_host_sockaddr(fd, addr, target_addr, addrlen);
3007         if (ret) {
3008             goto fail;
3009         }
3010         ret = get_errno(safe_sendto(fd, host_msg, len, flags, addr, addrlen));
3011     } else {
3012         ret = get_errno(safe_sendto(fd, host_msg, len, flags, NULL, 0));
3013     }
3014 fail:
3015     if (copy_msg) {
3016         g_free(host_msg);
3017         host_msg = copy_msg;
3018     }
3019     unlock_user(host_msg, msg, 0);
3020     return ret;
3021 }
3022 
3023 /* do_recvfrom() Must return target values and target errnos. */
3024 static abi_long do_recvfrom(int fd, abi_ulong msg, size_t len, int flags,
3025                             abi_ulong target_addr,
3026                             abi_ulong target_addrlen)
3027 {
3028     socklen_t addrlen, ret_addrlen;
3029     void *addr;
3030     void *host_msg;
3031     abi_long ret;
3032 
3033     host_msg = lock_user(VERIFY_WRITE, msg, len, 0);
3034     if (!host_msg)
3035         return -TARGET_EFAULT;
3036     if (target_addr) {
3037         if (get_user_u32(addrlen, target_addrlen)) {
3038             ret = -TARGET_EFAULT;
3039             goto fail;
3040         }
3041         if ((int)addrlen < 0) {
3042             ret = -TARGET_EINVAL;
3043             goto fail;
3044         }
3045         addr = alloca(addrlen);
3046         ret_addrlen = addrlen;
3047         ret = get_errno(safe_recvfrom(fd, host_msg, len, flags,
3048                                       addr, &ret_addrlen));
3049     } else {
3050         addr = NULL; /* To keep compiler quiet.  */
3051         addrlen = 0; /* To keep compiler quiet.  */
3052         ret = get_errno(safe_recvfrom(fd, host_msg, len, flags, NULL, 0));
3053     }
3054     if (!is_error(ret)) {
3055         if (fd_trans_host_to_target_data(fd)) {
3056             abi_long trans;
3057             trans = fd_trans_host_to_target_data(fd)(host_msg, MIN(ret, len));
3058             if (is_error(trans)) {
3059                 ret = trans;
3060                 goto fail;
3061             }
3062         }
3063         if (target_addr) {
3064             host_to_target_sockaddr(target_addr, addr,
3065                                     MIN(addrlen, ret_addrlen));
3066             if (put_user_u32(ret_addrlen, target_addrlen)) {
3067                 ret = -TARGET_EFAULT;
3068                 goto fail;
3069             }
3070         }
3071         unlock_user(host_msg, msg, len);
3072     } else {
3073 fail:
3074         unlock_user(host_msg, msg, 0);
3075     }
3076     return ret;
3077 }
3078 
3079 #ifdef TARGET_NR_socketcall
3080 /* do_socketcall() must return target values and target errnos. */
3081 static abi_long do_socketcall(int num, abi_ulong vptr)
3082 {
3083     static const unsigned nargs[] = { /* number of arguments per operation */
3084         [TARGET_SYS_SOCKET] = 3,      /* domain, type, protocol */
3085         [TARGET_SYS_BIND] = 3,        /* fd, addr, addrlen */
3086         [TARGET_SYS_CONNECT] = 3,     /* fd, addr, addrlen */
3087         [TARGET_SYS_LISTEN] = 2,      /* fd, backlog */
3088         [TARGET_SYS_ACCEPT] = 3,      /* fd, addr, addrlen */
3089         [TARGET_SYS_GETSOCKNAME] = 3, /* fd, addr, addrlen */
3090         [TARGET_SYS_GETPEERNAME] = 3, /* fd, addr, addrlen */
3091         [TARGET_SYS_SOCKETPAIR] = 4,  /* domain, type, protocol, tab */
3092         [TARGET_SYS_SEND] = 4,        /* fd, msg, len, flags */
3093         [TARGET_SYS_RECV] = 4,        /* fd, msg, len, flags */
3094         [TARGET_SYS_SENDTO] = 6,      /* fd, msg, len, flags, addr, addrlen */
3095         [TARGET_SYS_RECVFROM] = 6,    /* fd, msg, len, flags, addr, addrlen */
3096         [TARGET_SYS_SHUTDOWN] = 2,    /* fd, how */
3097         [TARGET_SYS_SETSOCKOPT] = 5,  /* fd, level, optname, optval, optlen */
3098         [TARGET_SYS_GETSOCKOPT] = 5,  /* fd, level, optname, optval, optlen */
3099         [TARGET_SYS_SENDMSG] = 3,     /* fd, msg, flags */
3100         [TARGET_SYS_RECVMSG] = 3,     /* fd, msg, flags */
3101         [TARGET_SYS_ACCEPT4] = 4,     /* fd, addr, addrlen, flags */
3102         [TARGET_SYS_RECVMMSG] = 4,    /* fd, msgvec, vlen, flags */
3103         [TARGET_SYS_SENDMMSG] = 4,    /* fd, msgvec, vlen, flags */
3104     };
3105     abi_long a[6]; /* max 6 args */
3106     unsigned i;
3107 
3108     /* check the range of the first argument num */
3109     /* (TARGET_SYS_SENDMMSG is the highest among TARGET_SYS_xxx) */
3110     if (num < 1 || num > TARGET_SYS_SENDMMSG) {
3111         return -TARGET_EINVAL;
3112     }
3113     /* ensure we have space for args */
3114     if (nargs[num] > ARRAY_SIZE(a)) {
3115         return -TARGET_EINVAL;
3116     }
3117     /* collect the arguments in a[] according to nargs[] */
3118     for (i = 0; i < nargs[num]; ++i) {
3119         if (get_user_ual(a[i], vptr + i * sizeof(abi_long)) != 0) {
3120             return -TARGET_EFAULT;
3121         }
3122     }
3123     /* now when we have the args, invoke the appropriate underlying function */
3124     switch (num) {
3125     case TARGET_SYS_SOCKET: /* domain, type, protocol */
3126         return do_socket(a[0], a[1], a[2]);
3127     case TARGET_SYS_BIND: /* sockfd, addr, addrlen */
3128         return do_bind(a[0], a[1], a[2]);
3129     case TARGET_SYS_CONNECT: /* sockfd, addr, addrlen */
3130         return do_connect(a[0], a[1], a[2]);
3131     case TARGET_SYS_LISTEN: /* sockfd, backlog */
3132         return get_errno(listen(a[0], a[1]));
3133     case TARGET_SYS_ACCEPT: /* sockfd, addr, addrlen */
3134         return do_accept4(a[0], a[1], a[2], 0);
3135     case TARGET_SYS_GETSOCKNAME: /* sockfd, addr, addrlen */
3136         return do_getsockname(a[0], a[1], a[2]);
3137     case TARGET_SYS_GETPEERNAME: /* sockfd, addr, addrlen */
3138         return do_getpeername(a[0], a[1], a[2]);
3139     case TARGET_SYS_SOCKETPAIR: /* domain, type, protocol, tab */
3140         return do_socketpair(a[0], a[1], a[2], a[3]);
3141     case TARGET_SYS_SEND: /* sockfd, msg, len, flags */
3142         return do_sendto(a[0], a[1], a[2], a[3], 0, 0);
3143     case TARGET_SYS_RECV: /* sockfd, msg, len, flags */
3144         return do_recvfrom(a[0], a[1], a[2], a[3], 0, 0);
3145     case TARGET_SYS_SENDTO: /* sockfd, msg, len, flags, addr, addrlen */
3146         return do_sendto(a[0], a[1], a[2], a[3], a[4], a[5]);
3147     case TARGET_SYS_RECVFROM: /* sockfd, msg, len, flags, addr, addrlen */
3148         return do_recvfrom(a[0], a[1], a[2], a[3], a[4], a[5]);
3149     case TARGET_SYS_SHUTDOWN: /* sockfd, how */
3150         return get_errno(shutdown(a[0], a[1]));
3151     case TARGET_SYS_SETSOCKOPT: /* sockfd, level, optname, optval, optlen */
3152         return do_setsockopt(a[0], a[1], a[2], a[3], a[4]);
3153     case TARGET_SYS_GETSOCKOPT: /* sockfd, level, optname, optval, optlen */
3154         return do_getsockopt(a[0], a[1], a[2], a[3], a[4]);
3155     case TARGET_SYS_SENDMSG: /* sockfd, msg, flags */
3156         return do_sendrecvmsg(a[0], a[1], a[2], 1);
3157     case TARGET_SYS_RECVMSG: /* sockfd, msg, flags */
3158         return do_sendrecvmsg(a[0], a[1], a[2], 0);
3159     case TARGET_SYS_ACCEPT4: /* sockfd, addr, addrlen, flags */
3160         return do_accept4(a[0], a[1], a[2], a[3]);
3161     case TARGET_SYS_RECVMMSG: /* sockfd, msgvec, vlen, flags */
3162         return do_sendrecvmmsg(a[0], a[1], a[2], a[3], 0);
3163     case TARGET_SYS_SENDMMSG: /* sockfd, msgvec, vlen, flags */
3164         return do_sendrecvmmsg(a[0], a[1], a[2], a[3], 1);
3165     default:
3166         gemu_log("Unsupported socketcall: %d\n", num);
3167         return -TARGET_EINVAL;
3168     }
3169 }
3170 #endif
3171 
3172 #define N_SHM_REGIONS	32
3173 
3174 static struct shm_region {
3175     abi_ulong start;
3176     abi_ulong size;
3177     bool in_use;
3178 } shm_regions[N_SHM_REGIONS];
3179 
3180 #ifndef TARGET_SEMID64_DS
3181 /* asm-generic version of this struct */
3182 struct target_semid64_ds
3183 {
3184   struct target_ipc_perm sem_perm;
3185   abi_ulong sem_otime;
3186 #if TARGET_ABI_BITS == 32
3187   abi_ulong __unused1;
3188 #endif
3189   abi_ulong sem_ctime;
3190 #if TARGET_ABI_BITS == 32
3191   abi_ulong __unused2;
3192 #endif
3193   abi_ulong sem_nsems;
3194   abi_ulong __unused3;
3195   abi_ulong __unused4;
3196 };
3197 #endif
3198 
3199 static inline abi_long target_to_host_ipc_perm(struct ipc_perm *host_ip,
3200                                                abi_ulong target_addr)
3201 {
3202     struct target_ipc_perm *target_ip;
3203     struct target_semid64_ds *target_sd;
3204 
3205     if (!lock_user_struct(VERIFY_READ, target_sd, target_addr, 1))
3206         return -TARGET_EFAULT;
3207     target_ip = &(target_sd->sem_perm);
3208     host_ip->__key = tswap32(target_ip->__key);
3209     host_ip->uid = tswap32(target_ip->uid);
3210     host_ip->gid = tswap32(target_ip->gid);
3211     host_ip->cuid = tswap32(target_ip->cuid);
3212     host_ip->cgid = tswap32(target_ip->cgid);
3213 #if defined(TARGET_ALPHA) || defined(TARGET_MIPS) || defined(TARGET_PPC)
3214     host_ip->mode = tswap32(target_ip->mode);
3215 #else
3216     host_ip->mode = tswap16(target_ip->mode);
3217 #endif
3218 #if defined(TARGET_PPC)
3219     host_ip->__seq = tswap32(target_ip->__seq);
3220 #else
3221     host_ip->__seq = tswap16(target_ip->__seq);
3222 #endif
3223     unlock_user_struct(target_sd, target_addr, 0);
3224     return 0;
3225 }
3226 
3227 static inline abi_long host_to_target_ipc_perm(abi_ulong target_addr,
3228                                                struct ipc_perm *host_ip)
3229 {
3230     struct target_ipc_perm *target_ip;
3231     struct target_semid64_ds *target_sd;
3232 
3233     if (!lock_user_struct(VERIFY_WRITE, target_sd, target_addr, 0))
3234         return -TARGET_EFAULT;
3235     target_ip = &(target_sd->sem_perm);
3236     target_ip->__key = tswap32(host_ip->__key);
3237     target_ip->uid = tswap32(host_ip->uid);
3238     target_ip->gid = tswap32(host_ip->gid);
3239     target_ip->cuid = tswap32(host_ip->cuid);
3240     target_ip->cgid = tswap32(host_ip->cgid);
3241 #if defined(TARGET_ALPHA) || defined(TARGET_MIPS) || defined(TARGET_PPC)
3242     target_ip->mode = tswap32(host_ip->mode);
3243 #else
3244     target_ip->mode = tswap16(host_ip->mode);
3245 #endif
3246 #if defined(TARGET_PPC)
3247     target_ip->__seq = tswap32(host_ip->__seq);
3248 #else
3249     target_ip->__seq = tswap16(host_ip->__seq);
3250 #endif
3251     unlock_user_struct(target_sd, target_addr, 1);
3252     return 0;
3253 }
3254 
3255 static inline abi_long target_to_host_semid_ds(struct semid_ds *host_sd,
3256                                                abi_ulong target_addr)
3257 {
3258     struct target_semid64_ds *target_sd;
3259 
3260     if (!lock_user_struct(VERIFY_READ, target_sd, target_addr, 1))
3261         return -TARGET_EFAULT;
3262     if (target_to_host_ipc_perm(&(host_sd->sem_perm),target_addr))
3263         return -TARGET_EFAULT;
3264     host_sd->sem_nsems = tswapal(target_sd->sem_nsems);
3265     host_sd->sem_otime = tswapal(target_sd->sem_otime);
3266     host_sd->sem_ctime = tswapal(target_sd->sem_ctime);
3267     unlock_user_struct(target_sd, target_addr, 0);
3268     return 0;
3269 }
3270 
3271 static inline abi_long host_to_target_semid_ds(abi_ulong target_addr,
3272                                                struct semid_ds *host_sd)
3273 {
3274     struct target_semid64_ds *target_sd;
3275 
3276     if (!lock_user_struct(VERIFY_WRITE, target_sd, target_addr, 0))
3277         return -TARGET_EFAULT;
3278     if (host_to_target_ipc_perm(target_addr,&(host_sd->sem_perm)))
3279         return -TARGET_EFAULT;
3280     target_sd->sem_nsems = tswapal(host_sd->sem_nsems);
3281     target_sd->sem_otime = tswapal(host_sd->sem_otime);
3282     target_sd->sem_ctime = tswapal(host_sd->sem_ctime);
3283     unlock_user_struct(target_sd, target_addr, 1);
3284     return 0;
3285 }
3286 
3287 struct target_seminfo {
3288     int semmap;
3289     int semmni;
3290     int semmns;
3291     int semmnu;
3292     int semmsl;
3293     int semopm;
3294     int semume;
3295     int semusz;
3296     int semvmx;
3297     int semaem;
3298 };
3299 
3300 static inline abi_long host_to_target_seminfo(abi_ulong target_addr,
3301                                               struct seminfo *host_seminfo)
3302 {
3303     struct target_seminfo *target_seminfo;
3304     if (!lock_user_struct(VERIFY_WRITE, target_seminfo, target_addr, 0))
3305         return -TARGET_EFAULT;
3306     __put_user(host_seminfo->semmap, &target_seminfo->semmap);
3307     __put_user(host_seminfo->semmni, &target_seminfo->semmni);
3308     __put_user(host_seminfo->semmns, &target_seminfo->semmns);
3309     __put_user(host_seminfo->semmnu, &target_seminfo->semmnu);
3310     __put_user(host_seminfo->semmsl, &target_seminfo->semmsl);
3311     __put_user(host_seminfo->semopm, &target_seminfo->semopm);
3312     __put_user(host_seminfo->semume, &target_seminfo->semume);
3313     __put_user(host_seminfo->semusz, &target_seminfo->semusz);
3314     __put_user(host_seminfo->semvmx, &target_seminfo->semvmx);
3315     __put_user(host_seminfo->semaem, &target_seminfo->semaem);
3316     unlock_user_struct(target_seminfo, target_addr, 1);
3317     return 0;
3318 }
3319 
3320 union semun {
3321 	int val;
3322 	struct semid_ds *buf;
3323 	unsigned short *array;
3324 	struct seminfo *__buf;
3325 };
3326 
3327 union target_semun {
3328 	int val;
3329 	abi_ulong buf;
3330 	abi_ulong array;
3331 	abi_ulong __buf;
3332 };
3333 
3334 static inline abi_long target_to_host_semarray(int semid, unsigned short **host_array,
3335                                                abi_ulong target_addr)
3336 {
3337     int nsems;
3338     unsigned short *array;
3339     union semun semun;
3340     struct semid_ds semid_ds;
3341     int i, ret;
3342 
3343     semun.buf = &semid_ds;
3344 
3345     ret = semctl(semid, 0, IPC_STAT, semun);
3346     if (ret == -1)
3347         return get_errno(ret);
3348 
3349     nsems = semid_ds.sem_nsems;
3350 
3351     *host_array = g_try_new(unsigned short, nsems);
3352     if (!*host_array) {
3353         return -TARGET_ENOMEM;
3354     }
3355     array = lock_user(VERIFY_READ, target_addr,
3356                       nsems*sizeof(unsigned short), 1);
3357     if (!array) {
3358         g_free(*host_array);
3359         return -TARGET_EFAULT;
3360     }
3361 
3362     for(i=0; i<nsems; i++) {
3363         __get_user((*host_array)[i], &array[i]);
3364     }
3365     unlock_user(array, target_addr, 0);
3366 
3367     return 0;
3368 }
3369 
3370 static inline abi_long host_to_target_semarray(int semid, abi_ulong target_addr,
3371                                                unsigned short **host_array)
3372 {
3373     int nsems;
3374     unsigned short *array;
3375     union semun semun;
3376     struct semid_ds semid_ds;
3377     int i, ret;
3378 
3379     semun.buf = &semid_ds;
3380 
3381     ret = semctl(semid, 0, IPC_STAT, semun);
3382     if (ret == -1)
3383         return get_errno(ret);
3384 
3385     nsems = semid_ds.sem_nsems;
3386 
3387     array = lock_user(VERIFY_WRITE, target_addr,
3388                       nsems*sizeof(unsigned short), 0);
3389     if (!array)
3390         return -TARGET_EFAULT;
3391 
3392     for(i=0; i<nsems; i++) {
3393         __put_user((*host_array)[i], &array[i]);
3394     }
3395     g_free(*host_array);
3396     unlock_user(array, target_addr, 1);
3397 
3398     return 0;
3399 }
3400 
3401 static inline abi_long do_semctl(int semid, int semnum, int cmd,
3402                                  abi_ulong target_arg)
3403 {
3404     union target_semun target_su = { .buf = target_arg };
3405     union semun arg;
3406     struct semid_ds dsarg;
3407     unsigned short *array = NULL;
3408     struct seminfo seminfo;
3409     abi_long ret = -TARGET_EINVAL;
3410     abi_long err;
3411     cmd &= 0xff;
3412 
3413     switch( cmd ) {
3414 	case GETVAL:
3415 	case SETVAL:
3416             /* In 64 bit cross-endian situations, we will erroneously pick up
3417              * the wrong half of the union for the "val" element.  To rectify
3418              * this, the entire 8-byte structure is byteswapped, followed by
3419 	     * a swap of the 4 byte val field. In other cases, the data is
3420 	     * already in proper host byte order. */
3421 	    if (sizeof(target_su.val) != (sizeof(target_su.buf))) {
3422 		target_su.buf = tswapal(target_su.buf);
3423 		arg.val = tswap32(target_su.val);
3424 	    } else {
3425 		arg.val = target_su.val;
3426 	    }
3427             ret = get_errno(semctl(semid, semnum, cmd, arg));
3428             break;
3429 	case GETALL:
3430 	case SETALL:
3431             err = target_to_host_semarray(semid, &array, target_su.array);
3432             if (err)
3433                 return err;
3434             arg.array = array;
3435             ret = get_errno(semctl(semid, semnum, cmd, arg));
3436             err = host_to_target_semarray(semid, target_su.array, &array);
3437             if (err)
3438                 return err;
3439             break;
3440 	case IPC_STAT:
3441 	case IPC_SET:
3442 	case SEM_STAT:
3443             err = target_to_host_semid_ds(&dsarg, target_su.buf);
3444             if (err)
3445                 return err;
3446             arg.buf = &dsarg;
3447             ret = get_errno(semctl(semid, semnum, cmd, arg));
3448             err = host_to_target_semid_ds(target_su.buf, &dsarg);
3449             if (err)
3450                 return err;
3451             break;
3452 	case IPC_INFO:
3453 	case SEM_INFO:
3454             arg.__buf = &seminfo;
3455             ret = get_errno(semctl(semid, semnum, cmd, arg));
3456             err = host_to_target_seminfo(target_su.__buf, &seminfo);
3457             if (err)
3458                 return err;
3459             break;
3460 	case IPC_RMID:
3461 	case GETPID:
3462 	case GETNCNT:
3463 	case GETZCNT:
3464             ret = get_errno(semctl(semid, semnum, cmd, NULL));
3465             break;
3466     }
3467 
3468     return ret;
3469 }
3470 
3471 struct target_sembuf {
3472     unsigned short sem_num;
3473     short sem_op;
3474     short sem_flg;
3475 };
3476 
3477 static inline abi_long target_to_host_sembuf(struct sembuf *host_sembuf,
3478                                              abi_ulong target_addr,
3479                                              unsigned nsops)
3480 {
3481     struct target_sembuf *target_sembuf;
3482     int i;
3483 
3484     target_sembuf = lock_user(VERIFY_READ, target_addr,
3485                               nsops*sizeof(struct target_sembuf), 1);
3486     if (!target_sembuf)
3487         return -TARGET_EFAULT;
3488 
3489     for(i=0; i<nsops; i++) {
3490         __get_user(host_sembuf[i].sem_num, &target_sembuf[i].sem_num);
3491         __get_user(host_sembuf[i].sem_op, &target_sembuf[i].sem_op);
3492         __get_user(host_sembuf[i].sem_flg, &target_sembuf[i].sem_flg);
3493     }
3494 
3495     unlock_user(target_sembuf, target_addr, 0);
3496 
3497     return 0;
3498 }
3499 
3500 static inline abi_long do_semop(int semid, abi_long ptr, unsigned nsops)
3501 {
3502     struct sembuf sops[nsops];
3503     abi_long ret;
3504 
3505     if (target_to_host_sembuf(sops, ptr, nsops))
3506         return -TARGET_EFAULT;
3507 
3508     ret = -TARGET_ENOSYS;
3509 #ifdef __NR_semtimedop
3510     ret = get_errno(safe_semtimedop(semid, sops, nsops, NULL));
3511 #endif
3512 #ifdef __NR_ipc
3513     if (ret == -TARGET_ENOSYS) {
3514         ret = get_errno(safe_ipc(IPCOP_semtimedop, semid, nsops, 0, sops, 0));
3515     }
3516 #endif
3517     return ret;
3518 }
3519 
3520 struct target_msqid_ds
3521 {
3522     struct target_ipc_perm msg_perm;
3523     abi_ulong msg_stime;
3524 #if TARGET_ABI_BITS == 32
3525     abi_ulong __unused1;
3526 #endif
3527     abi_ulong msg_rtime;
3528 #if TARGET_ABI_BITS == 32
3529     abi_ulong __unused2;
3530 #endif
3531     abi_ulong msg_ctime;
3532 #if TARGET_ABI_BITS == 32
3533     abi_ulong __unused3;
3534 #endif
3535     abi_ulong __msg_cbytes;
3536     abi_ulong msg_qnum;
3537     abi_ulong msg_qbytes;
3538     abi_ulong msg_lspid;
3539     abi_ulong msg_lrpid;
3540     abi_ulong __unused4;
3541     abi_ulong __unused5;
3542 };
3543 
3544 static inline abi_long target_to_host_msqid_ds(struct msqid_ds *host_md,
3545                                                abi_ulong target_addr)
3546 {
3547     struct target_msqid_ds *target_md;
3548 
3549     if (!lock_user_struct(VERIFY_READ, target_md, target_addr, 1))
3550         return -TARGET_EFAULT;
3551     if (target_to_host_ipc_perm(&(host_md->msg_perm),target_addr))
3552         return -TARGET_EFAULT;
3553     host_md->msg_stime = tswapal(target_md->msg_stime);
3554     host_md->msg_rtime = tswapal(target_md->msg_rtime);
3555     host_md->msg_ctime = tswapal(target_md->msg_ctime);
3556     host_md->__msg_cbytes = tswapal(target_md->__msg_cbytes);
3557     host_md->msg_qnum = tswapal(target_md->msg_qnum);
3558     host_md->msg_qbytes = tswapal(target_md->msg_qbytes);
3559     host_md->msg_lspid = tswapal(target_md->msg_lspid);
3560     host_md->msg_lrpid = tswapal(target_md->msg_lrpid);
3561     unlock_user_struct(target_md, target_addr, 0);
3562     return 0;
3563 }
3564 
3565 static inline abi_long host_to_target_msqid_ds(abi_ulong target_addr,
3566                                                struct msqid_ds *host_md)
3567 {
3568     struct target_msqid_ds *target_md;
3569 
3570     if (!lock_user_struct(VERIFY_WRITE, target_md, target_addr, 0))
3571         return -TARGET_EFAULT;
3572     if (host_to_target_ipc_perm(target_addr,&(host_md->msg_perm)))
3573         return -TARGET_EFAULT;
3574     target_md->msg_stime = tswapal(host_md->msg_stime);
3575     target_md->msg_rtime = tswapal(host_md->msg_rtime);
3576     target_md->msg_ctime = tswapal(host_md->msg_ctime);
3577     target_md->__msg_cbytes = tswapal(host_md->__msg_cbytes);
3578     target_md->msg_qnum = tswapal(host_md->msg_qnum);
3579     target_md->msg_qbytes = tswapal(host_md->msg_qbytes);
3580     target_md->msg_lspid = tswapal(host_md->msg_lspid);
3581     target_md->msg_lrpid = tswapal(host_md->msg_lrpid);
3582     unlock_user_struct(target_md, target_addr, 1);
3583     return 0;
3584 }
3585 
3586 struct target_msginfo {
3587     int msgpool;
3588     int msgmap;
3589     int msgmax;
3590     int msgmnb;
3591     int msgmni;
3592     int msgssz;
3593     int msgtql;
3594     unsigned short int msgseg;
3595 };
3596 
3597 static inline abi_long host_to_target_msginfo(abi_ulong target_addr,
3598                                               struct msginfo *host_msginfo)
3599 {
3600     struct target_msginfo *target_msginfo;
3601     if (!lock_user_struct(VERIFY_WRITE, target_msginfo, target_addr, 0))
3602         return -TARGET_EFAULT;
3603     __put_user(host_msginfo->msgpool, &target_msginfo->msgpool);
3604     __put_user(host_msginfo->msgmap, &target_msginfo->msgmap);
3605     __put_user(host_msginfo->msgmax, &target_msginfo->msgmax);
3606     __put_user(host_msginfo->msgmnb, &target_msginfo->msgmnb);
3607     __put_user(host_msginfo->msgmni, &target_msginfo->msgmni);
3608     __put_user(host_msginfo->msgssz, &target_msginfo->msgssz);
3609     __put_user(host_msginfo->msgtql, &target_msginfo->msgtql);
3610     __put_user(host_msginfo->msgseg, &target_msginfo->msgseg);
3611     unlock_user_struct(target_msginfo, target_addr, 1);
3612     return 0;
3613 }
3614 
3615 static inline abi_long do_msgctl(int msgid, int cmd, abi_long ptr)
3616 {
3617     struct msqid_ds dsarg;
3618     struct msginfo msginfo;
3619     abi_long ret = -TARGET_EINVAL;
3620 
3621     cmd &= 0xff;
3622 
3623     switch (cmd) {
3624     case IPC_STAT:
3625     case IPC_SET:
3626     case MSG_STAT:
3627         if (target_to_host_msqid_ds(&dsarg,ptr))
3628             return -TARGET_EFAULT;
3629         ret = get_errno(msgctl(msgid, cmd, &dsarg));
3630         if (host_to_target_msqid_ds(ptr,&dsarg))
3631             return -TARGET_EFAULT;
3632         break;
3633     case IPC_RMID:
3634         ret = get_errno(msgctl(msgid, cmd, NULL));
3635         break;
3636     case IPC_INFO:
3637     case MSG_INFO:
3638         ret = get_errno(msgctl(msgid, cmd, (struct msqid_ds *)&msginfo));
3639         if (host_to_target_msginfo(ptr, &msginfo))
3640             return -TARGET_EFAULT;
3641         break;
3642     }
3643 
3644     return ret;
3645 }
3646 
3647 struct target_msgbuf {
3648     abi_long mtype;
3649     char	mtext[1];
3650 };
3651 
3652 static inline abi_long do_msgsnd(int msqid, abi_long msgp,
3653                                  ssize_t msgsz, int msgflg)
3654 {
3655     struct target_msgbuf *target_mb;
3656     struct msgbuf *host_mb;
3657     abi_long ret = 0;
3658 
3659     if (msgsz < 0) {
3660         return -TARGET_EINVAL;
3661     }
3662 
3663     if (!lock_user_struct(VERIFY_READ, target_mb, msgp, 0))
3664         return -TARGET_EFAULT;
3665     host_mb = g_try_malloc(msgsz + sizeof(long));
3666     if (!host_mb) {
3667         unlock_user_struct(target_mb, msgp, 0);
3668         return -TARGET_ENOMEM;
3669     }
3670     host_mb->mtype = (abi_long) tswapal(target_mb->mtype);
3671     memcpy(host_mb->mtext, target_mb->mtext, msgsz);
3672     ret = -TARGET_ENOSYS;
3673 #ifdef __NR_msgsnd
3674     ret = get_errno(safe_msgsnd(msqid, host_mb, msgsz, msgflg));
3675 #endif
3676 #ifdef __NR_ipc
3677     if (ret == -TARGET_ENOSYS) {
3678         ret = get_errno(safe_ipc(IPCOP_msgsnd, msqid, msgsz, msgflg,
3679                                  host_mb, 0));
3680     }
3681 #endif
3682     g_free(host_mb);
3683     unlock_user_struct(target_mb, msgp, 0);
3684 
3685     return ret;
3686 }
3687 
3688 static inline abi_long do_msgrcv(int msqid, abi_long msgp,
3689                                  ssize_t msgsz, abi_long msgtyp,
3690                                  int msgflg)
3691 {
3692     struct target_msgbuf *target_mb;
3693     char *target_mtext;
3694     struct msgbuf *host_mb;
3695     abi_long ret = 0;
3696 
3697     if (msgsz < 0) {
3698         return -TARGET_EINVAL;
3699     }
3700 
3701     if (!lock_user_struct(VERIFY_WRITE, target_mb, msgp, 0))
3702         return -TARGET_EFAULT;
3703 
3704     host_mb = g_try_malloc(msgsz + sizeof(long));
3705     if (!host_mb) {
3706         ret = -TARGET_ENOMEM;
3707         goto end;
3708     }
3709     ret = -TARGET_ENOSYS;
3710 #ifdef __NR_msgrcv
3711     ret = get_errno(safe_msgrcv(msqid, host_mb, msgsz, msgtyp, msgflg));
3712 #endif
3713 #ifdef __NR_ipc
3714     if (ret == -TARGET_ENOSYS) {
3715         ret = get_errno(safe_ipc(IPCOP_CALL(1, IPCOP_msgrcv), msqid, msgsz,
3716                         msgflg, host_mb, msgtyp));
3717     }
3718 #endif
3719 
3720     if (ret > 0) {
3721         abi_ulong target_mtext_addr = msgp + sizeof(abi_ulong);
3722         target_mtext = lock_user(VERIFY_WRITE, target_mtext_addr, ret, 0);
3723         if (!target_mtext) {
3724             ret = -TARGET_EFAULT;
3725             goto end;
3726         }
3727         memcpy(target_mb->mtext, host_mb->mtext, ret);
3728         unlock_user(target_mtext, target_mtext_addr, ret);
3729     }
3730 
3731     target_mb->mtype = tswapal(host_mb->mtype);
3732 
3733 end:
3734     if (target_mb)
3735         unlock_user_struct(target_mb, msgp, 1);
3736     g_free(host_mb);
3737     return ret;
3738 }
3739 
3740 static inline abi_long target_to_host_shmid_ds(struct shmid_ds *host_sd,
3741                                                abi_ulong target_addr)
3742 {
3743     struct target_shmid_ds *target_sd;
3744 
3745     if (!lock_user_struct(VERIFY_READ, target_sd, target_addr, 1))
3746         return -TARGET_EFAULT;
3747     if (target_to_host_ipc_perm(&(host_sd->shm_perm), target_addr))
3748         return -TARGET_EFAULT;
3749     __get_user(host_sd->shm_segsz, &target_sd->shm_segsz);
3750     __get_user(host_sd->shm_atime, &target_sd->shm_atime);
3751     __get_user(host_sd->shm_dtime, &target_sd->shm_dtime);
3752     __get_user(host_sd->shm_ctime, &target_sd->shm_ctime);
3753     __get_user(host_sd->shm_cpid, &target_sd->shm_cpid);
3754     __get_user(host_sd->shm_lpid, &target_sd->shm_lpid);
3755     __get_user(host_sd->shm_nattch, &target_sd->shm_nattch);
3756     unlock_user_struct(target_sd, target_addr, 0);
3757     return 0;
3758 }
3759 
3760 static inline abi_long host_to_target_shmid_ds(abi_ulong target_addr,
3761                                                struct shmid_ds *host_sd)
3762 {
3763     struct target_shmid_ds *target_sd;
3764 
3765     if (!lock_user_struct(VERIFY_WRITE, target_sd, target_addr, 0))
3766         return -TARGET_EFAULT;
3767     if (host_to_target_ipc_perm(target_addr, &(host_sd->shm_perm)))
3768         return -TARGET_EFAULT;
3769     __put_user(host_sd->shm_segsz, &target_sd->shm_segsz);
3770     __put_user(host_sd->shm_atime, &target_sd->shm_atime);
3771     __put_user(host_sd->shm_dtime, &target_sd->shm_dtime);
3772     __put_user(host_sd->shm_ctime, &target_sd->shm_ctime);
3773     __put_user(host_sd->shm_cpid, &target_sd->shm_cpid);
3774     __put_user(host_sd->shm_lpid, &target_sd->shm_lpid);
3775     __put_user(host_sd->shm_nattch, &target_sd->shm_nattch);
3776     unlock_user_struct(target_sd, target_addr, 1);
3777     return 0;
3778 }
3779 
3780 struct  target_shminfo {
3781     abi_ulong shmmax;
3782     abi_ulong shmmin;
3783     abi_ulong shmmni;
3784     abi_ulong shmseg;
3785     abi_ulong shmall;
3786 };
3787 
3788 static inline abi_long host_to_target_shminfo(abi_ulong target_addr,
3789                                               struct shminfo *host_shminfo)
3790 {
3791     struct target_shminfo *target_shminfo;
3792     if (!lock_user_struct(VERIFY_WRITE, target_shminfo, target_addr, 0))
3793         return -TARGET_EFAULT;
3794     __put_user(host_shminfo->shmmax, &target_shminfo->shmmax);
3795     __put_user(host_shminfo->shmmin, &target_shminfo->shmmin);
3796     __put_user(host_shminfo->shmmni, &target_shminfo->shmmni);
3797     __put_user(host_shminfo->shmseg, &target_shminfo->shmseg);
3798     __put_user(host_shminfo->shmall, &target_shminfo->shmall);
3799     unlock_user_struct(target_shminfo, target_addr, 1);
3800     return 0;
3801 }
3802 
3803 struct target_shm_info {
3804     int used_ids;
3805     abi_ulong shm_tot;
3806     abi_ulong shm_rss;
3807     abi_ulong shm_swp;
3808     abi_ulong swap_attempts;
3809     abi_ulong swap_successes;
3810 };
3811 
3812 static inline abi_long host_to_target_shm_info(abi_ulong target_addr,
3813                                                struct shm_info *host_shm_info)
3814 {
3815     struct target_shm_info *target_shm_info;
3816     if (!lock_user_struct(VERIFY_WRITE, target_shm_info, target_addr, 0))
3817         return -TARGET_EFAULT;
3818     __put_user(host_shm_info->used_ids, &target_shm_info->used_ids);
3819     __put_user(host_shm_info->shm_tot, &target_shm_info->shm_tot);
3820     __put_user(host_shm_info->shm_rss, &target_shm_info->shm_rss);
3821     __put_user(host_shm_info->shm_swp, &target_shm_info->shm_swp);
3822     __put_user(host_shm_info->swap_attempts, &target_shm_info->swap_attempts);
3823     __put_user(host_shm_info->swap_successes, &target_shm_info->swap_successes);
3824     unlock_user_struct(target_shm_info, target_addr, 1);
3825     return 0;
3826 }
3827 
3828 static inline abi_long do_shmctl(int shmid, int cmd, abi_long buf)
3829 {
3830     struct shmid_ds dsarg;
3831     struct shminfo shminfo;
3832     struct shm_info shm_info;
3833     abi_long ret = -TARGET_EINVAL;
3834 
3835     cmd &= 0xff;
3836 
3837     switch(cmd) {
3838     case IPC_STAT:
3839     case IPC_SET:
3840     case SHM_STAT:
3841         if (target_to_host_shmid_ds(&dsarg, buf))
3842             return -TARGET_EFAULT;
3843         ret = get_errno(shmctl(shmid, cmd, &dsarg));
3844         if (host_to_target_shmid_ds(buf, &dsarg))
3845             return -TARGET_EFAULT;
3846         break;
3847     case IPC_INFO:
3848         ret = get_errno(shmctl(shmid, cmd, (struct shmid_ds *)&shminfo));
3849         if (host_to_target_shminfo(buf, &shminfo))
3850             return -TARGET_EFAULT;
3851         break;
3852     case SHM_INFO:
3853         ret = get_errno(shmctl(shmid, cmd, (struct shmid_ds *)&shm_info));
3854         if (host_to_target_shm_info(buf, &shm_info))
3855             return -TARGET_EFAULT;
3856         break;
3857     case IPC_RMID:
3858     case SHM_LOCK:
3859     case SHM_UNLOCK:
3860         ret = get_errno(shmctl(shmid, cmd, NULL));
3861         break;
3862     }
3863 
3864     return ret;
3865 }
3866 
3867 #ifndef TARGET_FORCE_SHMLBA
3868 /* For most architectures, SHMLBA is the same as the page size;
3869  * some architectures have larger values, in which case they should
3870  * define TARGET_FORCE_SHMLBA and provide a target_shmlba() function.
3871  * This corresponds to the kernel arch code defining __ARCH_FORCE_SHMLBA
3872  * and defining its own value for SHMLBA.
3873  *
3874  * The kernel also permits SHMLBA to be set by the architecture to a
3875  * value larger than the page size without setting __ARCH_FORCE_SHMLBA;
3876  * this means that addresses are rounded to the large size if
3877  * SHM_RND is set but addresses not aligned to that size are not rejected
3878  * as long as they are at least page-aligned. Since the only architecture
3879  * which uses this is ia64 this code doesn't provide for that oddity.
3880  */
3881 static inline abi_ulong target_shmlba(CPUArchState *cpu_env)
3882 {
3883     return TARGET_PAGE_SIZE;
3884 }
3885 #endif
3886 
3887 static inline abi_ulong do_shmat(CPUArchState *cpu_env,
3888                                  int shmid, abi_ulong shmaddr, int shmflg)
3889 {
3890     abi_long raddr;
3891     void *host_raddr;
3892     struct shmid_ds shm_info;
3893     int i,ret;
3894     abi_ulong shmlba;
3895 
3896     /* find out the length of the shared memory segment */
3897     ret = get_errno(shmctl(shmid, IPC_STAT, &shm_info));
3898     if (is_error(ret)) {
3899         /* can't get length, bail out */
3900         return ret;
3901     }
3902 
3903     shmlba = target_shmlba(cpu_env);
3904 
3905     if (shmaddr & (shmlba - 1)) {
3906         if (shmflg & SHM_RND) {
3907             shmaddr &= ~(shmlba - 1);
3908         } else {
3909             return -TARGET_EINVAL;
3910         }
3911     }
3912     if (!guest_range_valid(shmaddr, shm_info.shm_segsz)) {
3913         return -TARGET_EINVAL;
3914     }
3915 
3916     mmap_lock();
3917 
3918     if (shmaddr)
3919         host_raddr = shmat(shmid, (void *)g2h(shmaddr), shmflg);
3920     else {
3921         abi_ulong mmap_start;
3922 
3923         /* In order to use the host shmat, we need to honor host SHMLBA.  */
3924         mmap_start = mmap_find_vma(0, shm_info.shm_segsz, MAX(SHMLBA, shmlba));
3925 
3926         if (mmap_start == -1) {
3927             errno = ENOMEM;
3928             host_raddr = (void *)-1;
3929         } else
3930             host_raddr = shmat(shmid, g2h(mmap_start), shmflg | SHM_REMAP);
3931     }
3932 
3933     if (host_raddr == (void *)-1) {
3934         mmap_unlock();
3935         return get_errno((long)host_raddr);
3936     }
3937     raddr=h2g((unsigned long)host_raddr);
3938 
3939     page_set_flags(raddr, raddr + shm_info.shm_segsz,
3940                    PAGE_VALID | PAGE_READ |
3941                    ((shmflg & SHM_RDONLY)? 0 : PAGE_WRITE));
3942 
3943     for (i = 0; i < N_SHM_REGIONS; i++) {
3944         if (!shm_regions[i].in_use) {
3945             shm_regions[i].in_use = true;
3946             shm_regions[i].start = raddr;
3947             shm_regions[i].size = shm_info.shm_segsz;
3948             break;
3949         }
3950     }
3951 
3952     mmap_unlock();
3953     return raddr;
3954 
3955 }
3956 
3957 static inline abi_long do_shmdt(abi_ulong shmaddr)
3958 {
3959     int i;
3960     abi_long rv;
3961 
3962     mmap_lock();
3963 
3964     for (i = 0; i < N_SHM_REGIONS; ++i) {
3965         if (shm_regions[i].in_use && shm_regions[i].start == shmaddr) {
3966             shm_regions[i].in_use = false;
3967             page_set_flags(shmaddr, shmaddr + shm_regions[i].size, 0);
3968             break;
3969         }
3970     }
3971     rv = get_errno(shmdt(g2h(shmaddr)));
3972 
3973     mmap_unlock();
3974 
3975     return rv;
3976 }
3977 
3978 #ifdef TARGET_NR_ipc
3979 /* ??? This only works with linear mappings.  */
3980 /* do_ipc() must return target values and target errnos. */
3981 static abi_long do_ipc(CPUArchState *cpu_env,
3982                        unsigned int call, abi_long first,
3983                        abi_long second, abi_long third,
3984                        abi_long ptr, abi_long fifth)
3985 {
3986     int version;
3987     abi_long ret = 0;
3988 
3989     version = call >> 16;
3990     call &= 0xffff;
3991 
3992     switch (call) {
3993     case IPCOP_semop:
3994         ret = do_semop(first, ptr, second);
3995         break;
3996 
3997     case IPCOP_semget:
3998         ret = get_errno(semget(first, second, third));
3999         break;
4000 
4001     case IPCOP_semctl: {
4002         /* The semun argument to semctl is passed by value, so dereference the
4003          * ptr argument. */
4004         abi_ulong atptr;
4005         get_user_ual(atptr, ptr);
4006         ret = do_semctl(first, second, third, atptr);
4007         break;
4008     }
4009 
4010     case IPCOP_msgget:
4011         ret = get_errno(msgget(first, second));
4012         break;
4013 
4014     case IPCOP_msgsnd:
4015         ret = do_msgsnd(first, ptr, second, third);
4016         break;
4017 
4018     case IPCOP_msgctl:
4019         ret = do_msgctl(first, second, ptr);
4020         break;
4021 
4022     case IPCOP_msgrcv:
4023         switch (version) {
4024         case 0:
4025             {
4026                 struct target_ipc_kludge {
4027                     abi_long msgp;
4028                     abi_long msgtyp;
4029                 } *tmp;
4030 
4031                 if (!lock_user_struct(VERIFY_READ, tmp, ptr, 1)) {
4032                     ret = -TARGET_EFAULT;
4033                     break;
4034                 }
4035 
4036                 ret = do_msgrcv(first, tswapal(tmp->msgp), second, tswapal(tmp->msgtyp), third);
4037 
4038                 unlock_user_struct(tmp, ptr, 0);
4039                 break;
4040             }
4041         default:
4042             ret = do_msgrcv(first, ptr, second, fifth, third);
4043         }
4044         break;
4045 
4046     case IPCOP_shmat:
4047         switch (version) {
4048         default:
4049         {
4050             abi_ulong raddr;
4051             raddr = do_shmat(cpu_env, first, ptr, second);
4052             if (is_error(raddr))
4053                 return get_errno(raddr);
4054             if (put_user_ual(raddr, third))
4055                 return -TARGET_EFAULT;
4056             break;
4057         }
4058         case 1:
4059             ret = -TARGET_EINVAL;
4060             break;
4061         }
4062 	break;
4063     case IPCOP_shmdt:
4064         ret = do_shmdt(ptr);
4065 	break;
4066 
4067     case IPCOP_shmget:
4068 	/* IPC_* flag values are the same on all linux platforms */
4069 	ret = get_errno(shmget(first, second, third));
4070 	break;
4071 
4072 	/* IPC_* and SHM_* command values are the same on all linux platforms */
4073     case IPCOP_shmctl:
4074         ret = do_shmctl(first, second, ptr);
4075         break;
4076     default:
4077 	gemu_log("Unsupported ipc call: %d (version %d)\n", call, version);
4078 	ret = -TARGET_ENOSYS;
4079 	break;
4080     }
4081     return ret;
4082 }
4083 #endif
4084 
4085 /* kernel structure types definitions */
4086 
4087 #define STRUCT(name, ...) STRUCT_ ## name,
4088 #define STRUCT_SPECIAL(name) STRUCT_ ## name,
4089 enum {
4090 #include "syscall_types.h"
4091 STRUCT_MAX
4092 };
4093 #undef STRUCT
4094 #undef STRUCT_SPECIAL
4095 
4096 #define STRUCT(name, ...) static const argtype struct_ ## name ## _def[] = {  __VA_ARGS__, TYPE_NULL };
4097 #define STRUCT_SPECIAL(name)
4098 #include "syscall_types.h"
4099 #undef STRUCT
4100 #undef STRUCT_SPECIAL
4101 
4102 typedef struct IOCTLEntry IOCTLEntry;
4103 
4104 typedef abi_long do_ioctl_fn(const IOCTLEntry *ie, uint8_t *buf_temp,
4105                              int fd, int cmd, abi_long arg);
4106 
4107 struct IOCTLEntry {
4108     int target_cmd;
4109     unsigned int host_cmd;
4110     const char *name;
4111     int access;
4112     do_ioctl_fn *do_ioctl;
4113     const argtype arg_type[5];
4114 };
4115 
4116 #define IOC_R 0x0001
4117 #define IOC_W 0x0002
4118 #define IOC_RW (IOC_R | IOC_W)
4119 
4120 #define MAX_STRUCT_SIZE 4096
4121 
4122 #ifdef CONFIG_FIEMAP
4123 /* So fiemap access checks don't overflow on 32 bit systems.
4124  * This is very slightly smaller than the limit imposed by
4125  * the underlying kernel.
4126  */
4127 #define FIEMAP_MAX_EXTENTS ((UINT_MAX - sizeof(struct fiemap))  \
4128                             / sizeof(struct fiemap_extent))
4129 
4130 static abi_long do_ioctl_fs_ioc_fiemap(const IOCTLEntry *ie, uint8_t *buf_temp,
4131                                        int fd, int cmd, abi_long arg)
4132 {
4133     /* The parameter for this ioctl is a struct fiemap followed
4134      * by an array of struct fiemap_extent whose size is set
4135      * in fiemap->fm_extent_count. The array is filled in by the
4136      * ioctl.
4137      */
4138     int target_size_in, target_size_out;
4139     struct fiemap *fm;
4140     const argtype *arg_type = ie->arg_type;
4141     const argtype extent_arg_type[] = { MK_STRUCT(STRUCT_fiemap_extent) };
4142     void *argptr, *p;
4143     abi_long ret;
4144     int i, extent_size = thunk_type_size(extent_arg_type, 0);
4145     uint32_t outbufsz;
4146     int free_fm = 0;
4147 
4148     assert(arg_type[0] == TYPE_PTR);
4149     assert(ie->access == IOC_RW);
4150     arg_type++;
4151     target_size_in = thunk_type_size(arg_type, 0);
4152     argptr = lock_user(VERIFY_READ, arg, target_size_in, 1);
4153     if (!argptr) {
4154         return -TARGET_EFAULT;
4155     }
4156     thunk_convert(buf_temp, argptr, arg_type, THUNK_HOST);
4157     unlock_user(argptr, arg, 0);
4158     fm = (struct fiemap *)buf_temp;
4159     if (fm->fm_extent_count > FIEMAP_MAX_EXTENTS) {
4160         return -TARGET_EINVAL;
4161     }
4162 
4163     outbufsz = sizeof (*fm) +
4164         (sizeof(struct fiemap_extent) * fm->fm_extent_count);
4165 
4166     if (outbufsz > MAX_STRUCT_SIZE) {
4167         /* We can't fit all the extents into the fixed size buffer.
4168          * Allocate one that is large enough and use it instead.
4169          */
4170         fm = g_try_malloc(outbufsz);
4171         if (!fm) {
4172             return -TARGET_ENOMEM;
4173         }
4174         memcpy(fm, buf_temp, sizeof(struct fiemap));
4175         free_fm = 1;
4176     }
4177     ret = get_errno(safe_ioctl(fd, ie->host_cmd, fm));
4178     if (!is_error(ret)) {
4179         target_size_out = target_size_in;
4180         /* An extent_count of 0 means we were only counting the extents
4181          * so there are no structs to copy
4182          */
4183         if (fm->fm_extent_count != 0) {
4184             target_size_out += fm->fm_mapped_extents * extent_size;
4185         }
4186         argptr = lock_user(VERIFY_WRITE, arg, target_size_out, 0);
4187         if (!argptr) {
4188             ret = -TARGET_EFAULT;
4189         } else {
4190             /* Convert the struct fiemap */
4191             thunk_convert(argptr, fm, arg_type, THUNK_TARGET);
4192             if (fm->fm_extent_count != 0) {
4193                 p = argptr + target_size_in;
4194                 /* ...and then all the struct fiemap_extents */
4195                 for (i = 0; i < fm->fm_mapped_extents; i++) {
4196                     thunk_convert(p, &fm->fm_extents[i], extent_arg_type,
4197                                   THUNK_TARGET);
4198                     p += extent_size;
4199                 }
4200             }
4201             unlock_user(argptr, arg, target_size_out);
4202         }
4203     }
4204     if (free_fm) {
4205         g_free(fm);
4206     }
4207     return ret;
4208 }
4209 #endif
4210 
4211 static abi_long do_ioctl_ifconf(const IOCTLEntry *ie, uint8_t *buf_temp,
4212                                 int fd, int cmd, abi_long arg)
4213 {
4214     const argtype *arg_type = ie->arg_type;
4215     int target_size;
4216     void *argptr;
4217     int ret;
4218     struct ifconf *host_ifconf;
4219     uint32_t outbufsz;
4220     const argtype ifreq_arg_type[] = { MK_STRUCT(STRUCT_sockaddr_ifreq) };
4221     int target_ifreq_size;
4222     int nb_ifreq;
4223     int free_buf = 0;
4224     int i;
4225     int target_ifc_len;
4226     abi_long target_ifc_buf;
4227     int host_ifc_len;
4228     char *host_ifc_buf;
4229 
4230     assert(arg_type[0] == TYPE_PTR);
4231     assert(ie->access == IOC_RW);
4232 
4233     arg_type++;
4234     target_size = thunk_type_size(arg_type, 0);
4235 
4236     argptr = lock_user(VERIFY_READ, arg, target_size, 1);
4237     if (!argptr)
4238         return -TARGET_EFAULT;
4239     thunk_convert(buf_temp, argptr, arg_type, THUNK_HOST);
4240     unlock_user(argptr, arg, 0);
4241 
4242     host_ifconf = (struct ifconf *)(unsigned long)buf_temp;
4243     target_ifc_buf = (abi_long)(unsigned long)host_ifconf->ifc_buf;
4244     target_ifreq_size = thunk_type_size(ifreq_arg_type, 0);
4245 
4246     if (target_ifc_buf != 0) {
4247         target_ifc_len = host_ifconf->ifc_len;
4248         nb_ifreq = target_ifc_len / target_ifreq_size;
4249         host_ifc_len = nb_ifreq * sizeof(struct ifreq);
4250 
4251         outbufsz = sizeof(*host_ifconf) + host_ifc_len;
4252         if (outbufsz > MAX_STRUCT_SIZE) {
4253             /*
4254              * We can't fit all the extents into the fixed size buffer.
4255              * Allocate one that is large enough and use it instead.
4256              */
4257             host_ifconf = malloc(outbufsz);
4258             if (!host_ifconf) {
4259                 return -TARGET_ENOMEM;
4260             }
4261             memcpy(host_ifconf, buf_temp, sizeof(*host_ifconf));
4262             free_buf = 1;
4263         }
4264         host_ifc_buf = (char *)host_ifconf + sizeof(*host_ifconf);
4265 
4266         host_ifconf->ifc_len = host_ifc_len;
4267     } else {
4268       host_ifc_buf = NULL;
4269     }
4270     host_ifconf->ifc_buf = host_ifc_buf;
4271 
4272     ret = get_errno(safe_ioctl(fd, ie->host_cmd, host_ifconf));
4273     if (!is_error(ret)) {
4274 	/* convert host ifc_len to target ifc_len */
4275 
4276         nb_ifreq = host_ifconf->ifc_len / sizeof(struct ifreq);
4277         target_ifc_len = nb_ifreq * target_ifreq_size;
4278         host_ifconf->ifc_len = target_ifc_len;
4279 
4280 	/* restore target ifc_buf */
4281 
4282         host_ifconf->ifc_buf = (char *)(unsigned long)target_ifc_buf;
4283 
4284 	/* copy struct ifconf to target user */
4285 
4286         argptr = lock_user(VERIFY_WRITE, arg, target_size, 0);
4287         if (!argptr)
4288             return -TARGET_EFAULT;
4289         thunk_convert(argptr, host_ifconf, arg_type, THUNK_TARGET);
4290         unlock_user(argptr, arg, target_size);
4291 
4292         if (target_ifc_buf != 0) {
4293             /* copy ifreq[] to target user */
4294             argptr = lock_user(VERIFY_WRITE, target_ifc_buf, target_ifc_len, 0);
4295             for (i = 0; i < nb_ifreq ; i++) {
4296                 thunk_convert(argptr + i * target_ifreq_size,
4297                               host_ifc_buf + i * sizeof(struct ifreq),
4298                               ifreq_arg_type, THUNK_TARGET);
4299             }
4300             unlock_user(argptr, target_ifc_buf, target_ifc_len);
4301         }
4302     }
4303 
4304     if (free_buf) {
4305         free(host_ifconf);
4306     }
4307 
4308     return ret;
4309 }
4310 
4311 #if defined(CONFIG_USBFS)
4312 #if HOST_LONG_BITS > 64
4313 #error USBDEVFS thunks do not support >64 bit hosts yet.
4314 #endif
4315 struct live_urb {
4316     uint64_t target_urb_adr;
4317     uint64_t target_buf_adr;
4318     char *target_buf_ptr;
4319     struct usbdevfs_urb host_urb;
4320 };
4321 
4322 static GHashTable *usbdevfs_urb_hashtable(void)
4323 {
4324     static GHashTable *urb_hashtable;
4325 
4326     if (!urb_hashtable) {
4327         urb_hashtable = g_hash_table_new(g_int64_hash, g_int64_equal);
4328     }
4329     return urb_hashtable;
4330 }
4331 
4332 static void urb_hashtable_insert(struct live_urb *urb)
4333 {
4334     GHashTable *urb_hashtable = usbdevfs_urb_hashtable();
4335     g_hash_table_insert(urb_hashtable, urb, urb);
4336 }
4337 
4338 static struct live_urb *urb_hashtable_lookup(uint64_t target_urb_adr)
4339 {
4340     GHashTable *urb_hashtable = usbdevfs_urb_hashtable();
4341     return g_hash_table_lookup(urb_hashtable, &target_urb_adr);
4342 }
4343 
4344 static void urb_hashtable_remove(struct live_urb *urb)
4345 {
4346     GHashTable *urb_hashtable = usbdevfs_urb_hashtable();
4347     g_hash_table_remove(urb_hashtable, urb);
4348 }
4349 
4350 static abi_long
4351 do_ioctl_usbdevfs_reapurb(const IOCTLEntry *ie, uint8_t *buf_temp,
4352                           int fd, int cmd, abi_long arg)
4353 {
4354     const argtype usbfsurb_arg_type[] = { MK_STRUCT(STRUCT_usbdevfs_urb) };
4355     const argtype ptrvoid_arg_type[] = { TYPE_PTRVOID, 0, 0 };
4356     struct live_urb *lurb;
4357     void *argptr;
4358     uint64_t hurb;
4359     int target_size;
4360     uintptr_t target_urb_adr;
4361     abi_long ret;
4362 
4363     target_size = thunk_type_size(usbfsurb_arg_type, THUNK_TARGET);
4364 
4365     memset(buf_temp, 0, sizeof(uint64_t));
4366     ret = get_errno(safe_ioctl(fd, ie->host_cmd, buf_temp));
4367     if (is_error(ret)) {
4368         return ret;
4369     }
4370 
4371     memcpy(&hurb, buf_temp, sizeof(uint64_t));
4372     lurb = (void *)((uintptr_t)hurb - offsetof(struct live_urb, host_urb));
4373     if (!lurb->target_urb_adr) {
4374         return -TARGET_EFAULT;
4375     }
4376     urb_hashtable_remove(lurb);
4377     unlock_user(lurb->target_buf_ptr, lurb->target_buf_adr,
4378         lurb->host_urb.buffer_length);
4379     lurb->target_buf_ptr = NULL;
4380 
4381     /* restore the guest buffer pointer */
4382     lurb->host_urb.buffer = (void *)(uintptr_t)lurb->target_buf_adr;
4383 
4384     /* update the guest urb struct */
4385     argptr = lock_user(VERIFY_WRITE, lurb->target_urb_adr, target_size, 0);
4386     if (!argptr) {
4387         g_free(lurb);
4388         return -TARGET_EFAULT;
4389     }
4390     thunk_convert(argptr, &lurb->host_urb, usbfsurb_arg_type, THUNK_TARGET);
4391     unlock_user(argptr, lurb->target_urb_adr, target_size);
4392 
4393     target_size = thunk_type_size(ptrvoid_arg_type, THUNK_TARGET);
4394     /* write back the urb handle */
4395     argptr = lock_user(VERIFY_WRITE, arg, target_size, 0);
4396     if (!argptr) {
4397         g_free(lurb);
4398         return -TARGET_EFAULT;
4399     }
4400 
4401     /* GHashTable uses 64-bit keys but thunk_convert expects uintptr_t */
4402     target_urb_adr = lurb->target_urb_adr;
4403     thunk_convert(argptr, &target_urb_adr, ptrvoid_arg_type, THUNK_TARGET);
4404     unlock_user(argptr, arg, target_size);
4405 
4406     g_free(lurb);
4407     return ret;
4408 }
4409 
4410 static abi_long
4411 do_ioctl_usbdevfs_discardurb(const IOCTLEntry *ie,
4412                              uint8_t *buf_temp __attribute__((unused)),
4413                              int fd, int cmd, abi_long arg)
4414 {
4415     struct live_urb *lurb;
4416 
4417     /* map target address back to host URB with metadata. */
4418     lurb = urb_hashtable_lookup(arg);
4419     if (!lurb) {
4420         return -TARGET_EFAULT;
4421     }
4422     return get_errno(safe_ioctl(fd, ie->host_cmd, &lurb->host_urb));
4423 }
4424 
4425 static abi_long
4426 do_ioctl_usbdevfs_submiturb(const IOCTLEntry *ie, uint8_t *buf_temp,
4427                             int fd, int cmd, abi_long arg)
4428 {
4429     const argtype *arg_type = ie->arg_type;
4430     int target_size;
4431     abi_long ret;
4432     void *argptr;
4433     int rw_dir;
4434     struct live_urb *lurb;
4435 
4436     /*
4437      * each submitted URB needs to map to a unique ID for the
4438      * kernel, and that unique ID needs to be a pointer to
4439      * host memory.  hence, we need to malloc for each URB.
4440      * isochronous transfers have a variable length struct.
4441      */
4442     arg_type++;
4443     target_size = thunk_type_size(arg_type, THUNK_TARGET);
4444 
4445     /* construct host copy of urb and metadata */
4446     lurb = g_try_malloc0(sizeof(struct live_urb));
4447     if (!lurb) {
4448         return -TARGET_ENOMEM;
4449     }
4450 
4451     argptr = lock_user(VERIFY_READ, arg, target_size, 1);
4452     if (!argptr) {
4453         g_free(lurb);
4454         return -TARGET_EFAULT;
4455     }
4456     thunk_convert(&lurb->host_urb, argptr, arg_type, THUNK_HOST);
4457     unlock_user(argptr, arg, 0);
4458 
4459     lurb->target_urb_adr = arg;
4460     lurb->target_buf_adr = (uintptr_t)lurb->host_urb.buffer;
4461 
4462     /* buffer space used depends on endpoint type so lock the entire buffer */
4463     /* control type urbs should check the buffer contents for true direction */
4464     rw_dir = lurb->host_urb.endpoint & USB_DIR_IN ? VERIFY_WRITE : VERIFY_READ;
4465     lurb->target_buf_ptr = lock_user(rw_dir, lurb->target_buf_adr,
4466         lurb->host_urb.buffer_length, 1);
4467     if (lurb->target_buf_ptr == NULL) {
4468         g_free(lurb);
4469         return -TARGET_EFAULT;
4470     }
4471 
4472     /* update buffer pointer in host copy */
4473     lurb->host_urb.buffer = lurb->target_buf_ptr;
4474 
4475     ret = get_errno(safe_ioctl(fd, ie->host_cmd, &lurb->host_urb));
4476     if (is_error(ret)) {
4477         unlock_user(lurb->target_buf_ptr, lurb->target_buf_adr, 0);
4478         g_free(lurb);
4479     } else {
4480         urb_hashtable_insert(lurb);
4481     }
4482 
4483     return ret;
4484 }
4485 #endif /* CONFIG_USBFS */
4486 
4487 static abi_long do_ioctl_dm(const IOCTLEntry *ie, uint8_t *buf_temp, int fd,
4488                             int cmd, abi_long arg)
4489 {
4490     void *argptr;
4491     struct dm_ioctl *host_dm;
4492     abi_long guest_data;
4493     uint32_t guest_data_size;
4494     int target_size;
4495     const argtype *arg_type = ie->arg_type;
4496     abi_long ret;
4497     void *big_buf = NULL;
4498     char *host_data;
4499 
4500     arg_type++;
4501     target_size = thunk_type_size(arg_type, 0);
4502     argptr = lock_user(VERIFY_READ, arg, target_size, 1);
4503     if (!argptr) {
4504         ret = -TARGET_EFAULT;
4505         goto out;
4506     }
4507     thunk_convert(buf_temp, argptr, arg_type, THUNK_HOST);
4508     unlock_user(argptr, arg, 0);
4509 
4510     /* buf_temp is too small, so fetch things into a bigger buffer */
4511     big_buf = g_malloc0(((struct dm_ioctl*)buf_temp)->data_size * 2);
4512     memcpy(big_buf, buf_temp, target_size);
4513     buf_temp = big_buf;
4514     host_dm = big_buf;
4515 
4516     guest_data = arg + host_dm->data_start;
4517     if ((guest_data - arg) < 0) {
4518         ret = -TARGET_EINVAL;
4519         goto out;
4520     }
4521     guest_data_size = host_dm->data_size - host_dm->data_start;
4522     host_data = (char*)host_dm + host_dm->data_start;
4523 
4524     argptr = lock_user(VERIFY_READ, guest_data, guest_data_size, 1);
4525     if (!argptr) {
4526         ret = -TARGET_EFAULT;
4527         goto out;
4528     }
4529 
4530     switch (ie->host_cmd) {
4531     case DM_REMOVE_ALL:
4532     case DM_LIST_DEVICES:
4533     case DM_DEV_CREATE:
4534     case DM_DEV_REMOVE:
4535     case DM_DEV_SUSPEND:
4536     case DM_DEV_STATUS:
4537     case DM_DEV_WAIT:
4538     case DM_TABLE_STATUS:
4539     case DM_TABLE_CLEAR:
4540     case DM_TABLE_DEPS:
4541     case DM_LIST_VERSIONS:
4542         /* no input data */
4543         break;
4544     case DM_DEV_RENAME:
4545     case DM_DEV_SET_GEOMETRY:
4546         /* data contains only strings */
4547         memcpy(host_data, argptr, guest_data_size);
4548         break;
4549     case DM_TARGET_MSG:
4550         memcpy(host_data, argptr, guest_data_size);
4551         *(uint64_t*)host_data = tswap64(*(uint64_t*)argptr);
4552         break;
4553     case DM_TABLE_LOAD:
4554     {
4555         void *gspec = argptr;
4556         void *cur_data = host_data;
4557         const argtype arg_type[] = { MK_STRUCT(STRUCT_dm_target_spec) };
4558         int spec_size = thunk_type_size(arg_type, 0);
4559         int i;
4560 
4561         for (i = 0; i < host_dm->target_count; i++) {
4562             struct dm_target_spec *spec = cur_data;
4563             uint32_t next;
4564             int slen;
4565 
4566             thunk_convert(spec, gspec, arg_type, THUNK_HOST);
4567             slen = strlen((char*)gspec + spec_size) + 1;
4568             next = spec->next;
4569             spec->next = sizeof(*spec) + slen;
4570             strcpy((char*)&spec[1], gspec + spec_size);
4571             gspec += next;
4572             cur_data += spec->next;
4573         }
4574         break;
4575     }
4576     default:
4577         ret = -TARGET_EINVAL;
4578         unlock_user(argptr, guest_data, 0);
4579         goto out;
4580     }
4581     unlock_user(argptr, guest_data, 0);
4582 
4583     ret = get_errno(safe_ioctl(fd, ie->host_cmd, buf_temp));
4584     if (!is_error(ret)) {
4585         guest_data = arg + host_dm->data_start;
4586         guest_data_size = host_dm->data_size - host_dm->data_start;
4587         argptr = lock_user(VERIFY_WRITE, guest_data, guest_data_size, 0);
4588         switch (ie->host_cmd) {
4589         case DM_REMOVE_ALL:
4590         case DM_DEV_CREATE:
4591         case DM_DEV_REMOVE:
4592         case DM_DEV_RENAME:
4593         case DM_DEV_SUSPEND:
4594         case DM_DEV_STATUS:
4595         case DM_TABLE_LOAD:
4596         case DM_TABLE_CLEAR:
4597         case DM_TARGET_MSG:
4598         case DM_DEV_SET_GEOMETRY:
4599             /* no return data */
4600             break;
4601         case DM_LIST_DEVICES:
4602         {
4603             struct dm_name_list *nl = (void*)host_dm + host_dm->data_start;
4604             uint32_t remaining_data = guest_data_size;
4605             void *cur_data = argptr;
4606             const argtype arg_type[] = { MK_STRUCT(STRUCT_dm_name_list) };
4607             int nl_size = 12; /* can't use thunk_size due to alignment */
4608 
4609             while (1) {
4610                 uint32_t next = nl->next;
4611                 if (next) {
4612                     nl->next = nl_size + (strlen(nl->name) + 1);
4613                 }
4614                 if (remaining_data < nl->next) {
4615                     host_dm->flags |= DM_BUFFER_FULL_FLAG;
4616                     break;
4617                 }
4618                 thunk_convert(cur_data, nl, arg_type, THUNK_TARGET);
4619                 strcpy(cur_data + nl_size, nl->name);
4620                 cur_data += nl->next;
4621                 remaining_data -= nl->next;
4622                 if (!next) {
4623                     break;
4624                 }
4625                 nl = (void*)nl + next;
4626             }
4627             break;
4628         }
4629         case DM_DEV_WAIT:
4630         case DM_TABLE_STATUS:
4631         {
4632             struct dm_target_spec *spec = (void*)host_dm + host_dm->data_start;
4633             void *cur_data = argptr;
4634             const argtype arg_type[] = { MK_STRUCT(STRUCT_dm_target_spec) };
4635             int spec_size = thunk_type_size(arg_type, 0);
4636             int i;
4637 
4638             for (i = 0; i < host_dm->target_count; i++) {
4639                 uint32_t next = spec->next;
4640                 int slen = strlen((char*)&spec[1]) + 1;
4641                 spec->next = (cur_data - argptr) + spec_size + slen;
4642                 if (guest_data_size < spec->next) {
4643                     host_dm->flags |= DM_BUFFER_FULL_FLAG;
4644                     break;
4645                 }
4646                 thunk_convert(cur_data, spec, arg_type, THUNK_TARGET);
4647                 strcpy(cur_data + spec_size, (char*)&spec[1]);
4648                 cur_data = argptr + spec->next;
4649                 spec = (void*)host_dm + host_dm->data_start + next;
4650             }
4651             break;
4652         }
4653         case DM_TABLE_DEPS:
4654         {
4655             void *hdata = (void*)host_dm + host_dm->data_start;
4656             int count = *(uint32_t*)hdata;
4657             uint64_t *hdev = hdata + 8;
4658             uint64_t *gdev = argptr + 8;
4659             int i;
4660 
4661             *(uint32_t*)argptr = tswap32(count);
4662             for (i = 0; i < count; i++) {
4663                 *gdev = tswap64(*hdev);
4664                 gdev++;
4665                 hdev++;
4666             }
4667             break;
4668         }
4669         case DM_LIST_VERSIONS:
4670         {
4671             struct dm_target_versions *vers = (void*)host_dm + host_dm->data_start;
4672             uint32_t remaining_data = guest_data_size;
4673             void *cur_data = argptr;
4674             const argtype arg_type[] = { MK_STRUCT(STRUCT_dm_target_versions) };
4675             int vers_size = thunk_type_size(arg_type, 0);
4676 
4677             while (1) {
4678                 uint32_t next = vers->next;
4679                 if (next) {
4680                     vers->next = vers_size + (strlen(vers->name) + 1);
4681                 }
4682                 if (remaining_data < vers->next) {
4683                     host_dm->flags |= DM_BUFFER_FULL_FLAG;
4684                     break;
4685                 }
4686                 thunk_convert(cur_data, vers, arg_type, THUNK_TARGET);
4687                 strcpy(cur_data + vers_size, vers->name);
4688                 cur_data += vers->next;
4689                 remaining_data -= vers->next;
4690                 if (!next) {
4691                     break;
4692                 }
4693                 vers = (void*)vers + next;
4694             }
4695             break;
4696         }
4697         default:
4698             unlock_user(argptr, guest_data, 0);
4699             ret = -TARGET_EINVAL;
4700             goto out;
4701         }
4702         unlock_user(argptr, guest_data, guest_data_size);
4703 
4704         argptr = lock_user(VERIFY_WRITE, arg, target_size, 0);
4705         if (!argptr) {
4706             ret = -TARGET_EFAULT;
4707             goto out;
4708         }
4709         thunk_convert(argptr, buf_temp, arg_type, THUNK_TARGET);
4710         unlock_user(argptr, arg, target_size);
4711     }
4712 out:
4713     g_free(big_buf);
4714     return ret;
4715 }
4716 
4717 static abi_long do_ioctl_blkpg(const IOCTLEntry *ie, uint8_t *buf_temp, int fd,
4718                                int cmd, abi_long arg)
4719 {
4720     void *argptr;
4721     int target_size;
4722     const argtype *arg_type = ie->arg_type;
4723     const argtype part_arg_type[] = { MK_STRUCT(STRUCT_blkpg_partition) };
4724     abi_long ret;
4725 
4726     struct blkpg_ioctl_arg *host_blkpg = (void*)buf_temp;
4727     struct blkpg_partition host_part;
4728 
4729     /* Read and convert blkpg */
4730     arg_type++;
4731     target_size = thunk_type_size(arg_type, 0);
4732     argptr = lock_user(VERIFY_READ, arg, target_size, 1);
4733     if (!argptr) {
4734         ret = -TARGET_EFAULT;
4735         goto out;
4736     }
4737     thunk_convert(buf_temp, argptr, arg_type, THUNK_HOST);
4738     unlock_user(argptr, arg, 0);
4739 
4740     switch (host_blkpg->op) {
4741     case BLKPG_ADD_PARTITION:
4742     case BLKPG_DEL_PARTITION:
4743         /* payload is struct blkpg_partition */
4744         break;
4745     default:
4746         /* Unknown opcode */
4747         ret = -TARGET_EINVAL;
4748         goto out;
4749     }
4750 
4751     /* Read and convert blkpg->data */
4752     arg = (abi_long)(uintptr_t)host_blkpg->data;
4753     target_size = thunk_type_size(part_arg_type, 0);
4754     argptr = lock_user(VERIFY_READ, arg, target_size, 1);
4755     if (!argptr) {
4756         ret = -TARGET_EFAULT;
4757         goto out;
4758     }
4759     thunk_convert(&host_part, argptr, part_arg_type, THUNK_HOST);
4760     unlock_user(argptr, arg, 0);
4761 
4762     /* Swizzle the data pointer to our local copy and call! */
4763     host_blkpg->data = &host_part;
4764     ret = get_errno(safe_ioctl(fd, ie->host_cmd, host_blkpg));
4765 
4766 out:
4767     return ret;
4768 }
4769 
4770 static abi_long do_ioctl_rt(const IOCTLEntry *ie, uint8_t *buf_temp,
4771                                 int fd, int cmd, abi_long arg)
4772 {
4773     const argtype *arg_type = ie->arg_type;
4774     const StructEntry *se;
4775     const argtype *field_types;
4776     const int *dst_offsets, *src_offsets;
4777     int target_size;
4778     void *argptr;
4779     abi_ulong *target_rt_dev_ptr = NULL;
4780     unsigned long *host_rt_dev_ptr = NULL;
4781     abi_long ret;
4782     int i;
4783 
4784     assert(ie->access == IOC_W);
4785     assert(*arg_type == TYPE_PTR);
4786     arg_type++;
4787     assert(*arg_type == TYPE_STRUCT);
4788     target_size = thunk_type_size(arg_type, 0);
4789     argptr = lock_user(VERIFY_READ, arg, target_size, 1);
4790     if (!argptr) {
4791         return -TARGET_EFAULT;
4792     }
4793     arg_type++;
4794     assert(*arg_type == (int)STRUCT_rtentry);
4795     se = struct_entries + *arg_type++;
4796     assert(se->convert[0] == NULL);
4797     /* convert struct here to be able to catch rt_dev string */
4798     field_types = se->field_types;
4799     dst_offsets = se->field_offsets[THUNK_HOST];
4800     src_offsets = se->field_offsets[THUNK_TARGET];
4801     for (i = 0; i < se->nb_fields; i++) {
4802         if (dst_offsets[i] == offsetof(struct rtentry, rt_dev)) {
4803             assert(*field_types == TYPE_PTRVOID);
4804             target_rt_dev_ptr = (abi_ulong *)(argptr + src_offsets[i]);
4805             host_rt_dev_ptr = (unsigned long *)(buf_temp + dst_offsets[i]);
4806             if (*target_rt_dev_ptr != 0) {
4807                 *host_rt_dev_ptr = (unsigned long)lock_user_string(
4808                                                   tswapal(*target_rt_dev_ptr));
4809                 if (!*host_rt_dev_ptr) {
4810                     unlock_user(argptr, arg, 0);
4811                     return -TARGET_EFAULT;
4812                 }
4813             } else {
4814                 *host_rt_dev_ptr = 0;
4815             }
4816             field_types++;
4817             continue;
4818         }
4819         field_types = thunk_convert(buf_temp + dst_offsets[i],
4820                                     argptr + src_offsets[i],
4821                                     field_types, THUNK_HOST);
4822     }
4823     unlock_user(argptr, arg, 0);
4824 
4825     ret = get_errno(safe_ioctl(fd, ie->host_cmd, buf_temp));
4826 
4827     assert(host_rt_dev_ptr != NULL);
4828     assert(target_rt_dev_ptr != NULL);
4829     if (*host_rt_dev_ptr != 0) {
4830         unlock_user((void *)*host_rt_dev_ptr,
4831                     *target_rt_dev_ptr, 0);
4832     }
4833     return ret;
4834 }
4835 
4836 static abi_long do_ioctl_kdsigaccept(const IOCTLEntry *ie, uint8_t *buf_temp,
4837                                      int fd, int cmd, abi_long arg)
4838 {
4839     int sig = target_to_host_signal(arg);
4840     return get_errno(safe_ioctl(fd, ie->host_cmd, sig));
4841 }
4842 
4843 #ifdef TIOCGPTPEER
4844 static abi_long do_ioctl_tiocgptpeer(const IOCTLEntry *ie, uint8_t *buf_temp,
4845                                      int fd, int cmd, abi_long arg)
4846 {
4847     int flags = target_to_host_bitmask(arg, fcntl_flags_tbl);
4848     return get_errno(safe_ioctl(fd, ie->host_cmd, flags));
4849 }
4850 #endif
4851 
4852 static IOCTLEntry ioctl_entries[] = {
4853 #define IOCTL(cmd, access, ...) \
4854     { TARGET_ ## cmd, cmd, #cmd, access, 0, {  __VA_ARGS__ } },
4855 #define IOCTL_SPECIAL(cmd, access, dofn, ...)                      \
4856     { TARGET_ ## cmd, cmd, #cmd, access, dofn, {  __VA_ARGS__ } },
4857 #define IOCTL_IGNORE(cmd) \
4858     { TARGET_ ## cmd, 0, #cmd },
4859 #include "ioctls.h"
4860     { 0, 0, },
4861 };
4862 
4863 /* ??? Implement proper locking for ioctls.  */
4864 /* do_ioctl() Must return target values and target errnos. */
4865 static abi_long do_ioctl(int fd, int cmd, abi_long arg)
4866 {
4867     const IOCTLEntry *ie;
4868     const argtype *arg_type;
4869     abi_long ret;
4870     uint8_t buf_temp[MAX_STRUCT_SIZE];
4871     int target_size;
4872     void *argptr;
4873 
4874     ie = ioctl_entries;
4875     for(;;) {
4876         if (ie->target_cmd == 0) {
4877             gemu_log("Unsupported ioctl: cmd=0x%04lx\n", (long)cmd);
4878             return -TARGET_ENOSYS;
4879         }
4880         if (ie->target_cmd == cmd)
4881             break;
4882         ie++;
4883     }
4884     arg_type = ie->arg_type;
4885     if (ie->do_ioctl) {
4886         return ie->do_ioctl(ie, buf_temp, fd, cmd, arg);
4887     } else if (!ie->host_cmd) {
4888         /* Some architectures define BSD ioctls in their headers
4889            that are not implemented in Linux.  */
4890         return -TARGET_ENOSYS;
4891     }
4892 
4893     switch(arg_type[0]) {
4894     case TYPE_NULL:
4895         /* no argument */
4896         ret = get_errno(safe_ioctl(fd, ie->host_cmd));
4897         break;
4898     case TYPE_PTRVOID:
4899     case TYPE_INT:
4900         ret = get_errno(safe_ioctl(fd, ie->host_cmd, arg));
4901         break;
4902     case TYPE_PTR:
4903         arg_type++;
4904         target_size = thunk_type_size(arg_type, 0);
4905         switch(ie->access) {
4906         case IOC_R:
4907             ret = get_errno(safe_ioctl(fd, ie->host_cmd, buf_temp));
4908             if (!is_error(ret)) {
4909                 argptr = lock_user(VERIFY_WRITE, arg, target_size, 0);
4910                 if (!argptr)
4911                     return -TARGET_EFAULT;
4912                 thunk_convert(argptr, buf_temp, arg_type, THUNK_TARGET);
4913                 unlock_user(argptr, arg, target_size);
4914             }
4915             break;
4916         case IOC_W:
4917             argptr = lock_user(VERIFY_READ, arg, target_size, 1);
4918             if (!argptr)
4919                 return -TARGET_EFAULT;
4920             thunk_convert(buf_temp, argptr, arg_type, THUNK_HOST);
4921             unlock_user(argptr, arg, 0);
4922             ret = get_errno(safe_ioctl(fd, ie->host_cmd, buf_temp));
4923             break;
4924         default:
4925         case IOC_RW:
4926             argptr = lock_user(VERIFY_READ, arg, target_size, 1);
4927             if (!argptr)
4928                 return -TARGET_EFAULT;
4929             thunk_convert(buf_temp, argptr, arg_type, THUNK_HOST);
4930             unlock_user(argptr, arg, 0);
4931             ret = get_errno(safe_ioctl(fd, ie->host_cmd, buf_temp));
4932             if (!is_error(ret)) {
4933                 argptr = lock_user(VERIFY_WRITE, arg, target_size, 0);
4934                 if (!argptr)
4935                     return -TARGET_EFAULT;
4936                 thunk_convert(argptr, buf_temp, arg_type, THUNK_TARGET);
4937                 unlock_user(argptr, arg, target_size);
4938             }
4939             break;
4940         }
4941         break;
4942     default:
4943         gemu_log("Unsupported ioctl type: cmd=0x%04lx type=%d\n",
4944                  (long)cmd, arg_type[0]);
4945         ret = -TARGET_ENOSYS;
4946         break;
4947     }
4948     return ret;
4949 }
4950 
4951 static const bitmask_transtbl iflag_tbl[] = {
4952         { TARGET_IGNBRK, TARGET_IGNBRK, IGNBRK, IGNBRK },
4953         { TARGET_BRKINT, TARGET_BRKINT, BRKINT, BRKINT },
4954         { TARGET_IGNPAR, TARGET_IGNPAR, IGNPAR, IGNPAR },
4955         { TARGET_PARMRK, TARGET_PARMRK, PARMRK, PARMRK },
4956         { TARGET_INPCK, TARGET_INPCK, INPCK, INPCK },
4957         { TARGET_ISTRIP, TARGET_ISTRIP, ISTRIP, ISTRIP },
4958         { TARGET_INLCR, TARGET_INLCR, INLCR, INLCR },
4959         { TARGET_IGNCR, TARGET_IGNCR, IGNCR, IGNCR },
4960         { TARGET_ICRNL, TARGET_ICRNL, ICRNL, ICRNL },
4961         { TARGET_IUCLC, TARGET_IUCLC, IUCLC, IUCLC },
4962         { TARGET_IXON, TARGET_IXON, IXON, IXON },
4963         { TARGET_IXANY, TARGET_IXANY, IXANY, IXANY },
4964         { TARGET_IXOFF, TARGET_IXOFF, IXOFF, IXOFF },
4965         { TARGET_IMAXBEL, TARGET_IMAXBEL, IMAXBEL, IMAXBEL },
4966         { 0, 0, 0, 0 }
4967 };
4968 
4969 static const bitmask_transtbl oflag_tbl[] = {
4970 	{ TARGET_OPOST, TARGET_OPOST, OPOST, OPOST },
4971 	{ TARGET_OLCUC, TARGET_OLCUC, OLCUC, OLCUC },
4972 	{ TARGET_ONLCR, TARGET_ONLCR, ONLCR, ONLCR },
4973 	{ TARGET_OCRNL, TARGET_OCRNL, OCRNL, OCRNL },
4974 	{ TARGET_ONOCR, TARGET_ONOCR, ONOCR, ONOCR },
4975 	{ TARGET_ONLRET, TARGET_ONLRET, ONLRET, ONLRET },
4976 	{ TARGET_OFILL, TARGET_OFILL, OFILL, OFILL },
4977 	{ TARGET_OFDEL, TARGET_OFDEL, OFDEL, OFDEL },
4978 	{ TARGET_NLDLY, TARGET_NL0, NLDLY, NL0 },
4979 	{ TARGET_NLDLY, TARGET_NL1, NLDLY, NL1 },
4980 	{ TARGET_CRDLY, TARGET_CR0, CRDLY, CR0 },
4981 	{ TARGET_CRDLY, TARGET_CR1, CRDLY, CR1 },
4982 	{ TARGET_CRDLY, TARGET_CR2, CRDLY, CR2 },
4983 	{ TARGET_CRDLY, TARGET_CR3, CRDLY, CR3 },
4984 	{ TARGET_TABDLY, TARGET_TAB0, TABDLY, TAB0 },
4985 	{ TARGET_TABDLY, TARGET_TAB1, TABDLY, TAB1 },
4986 	{ TARGET_TABDLY, TARGET_TAB2, TABDLY, TAB2 },
4987 	{ TARGET_TABDLY, TARGET_TAB3, TABDLY, TAB3 },
4988 	{ TARGET_BSDLY, TARGET_BS0, BSDLY, BS0 },
4989 	{ TARGET_BSDLY, TARGET_BS1, BSDLY, BS1 },
4990 	{ TARGET_VTDLY, TARGET_VT0, VTDLY, VT0 },
4991 	{ TARGET_VTDLY, TARGET_VT1, VTDLY, VT1 },
4992 	{ TARGET_FFDLY, TARGET_FF0, FFDLY, FF0 },
4993 	{ TARGET_FFDLY, TARGET_FF1, FFDLY, FF1 },
4994 	{ 0, 0, 0, 0 }
4995 };
4996 
4997 static const bitmask_transtbl cflag_tbl[] = {
4998 	{ TARGET_CBAUD, TARGET_B0, CBAUD, B0 },
4999 	{ TARGET_CBAUD, TARGET_B50, CBAUD, B50 },
5000 	{ TARGET_CBAUD, TARGET_B75, CBAUD, B75 },
5001 	{ TARGET_CBAUD, TARGET_B110, CBAUD, B110 },
5002 	{ TARGET_CBAUD, TARGET_B134, CBAUD, B134 },
5003 	{ TARGET_CBAUD, TARGET_B150, CBAUD, B150 },
5004 	{ TARGET_CBAUD, TARGET_B200, CBAUD, B200 },
5005 	{ TARGET_CBAUD, TARGET_B300, CBAUD, B300 },
5006 	{ TARGET_CBAUD, TARGET_B600, CBAUD, B600 },
5007 	{ TARGET_CBAUD, TARGET_B1200, CBAUD, B1200 },
5008 	{ TARGET_CBAUD, TARGET_B1800, CBAUD, B1800 },
5009 	{ TARGET_CBAUD, TARGET_B2400, CBAUD, B2400 },
5010 	{ TARGET_CBAUD, TARGET_B4800, CBAUD, B4800 },
5011 	{ TARGET_CBAUD, TARGET_B9600, CBAUD, B9600 },
5012 	{ TARGET_CBAUD, TARGET_B19200, CBAUD, B19200 },
5013 	{ TARGET_CBAUD, TARGET_B38400, CBAUD, B38400 },
5014 	{ TARGET_CBAUD, TARGET_B57600, CBAUD, B57600 },
5015 	{ TARGET_CBAUD, TARGET_B115200, CBAUD, B115200 },
5016 	{ TARGET_CBAUD, TARGET_B230400, CBAUD, B230400 },
5017 	{ TARGET_CBAUD, TARGET_B460800, CBAUD, B460800 },
5018 	{ TARGET_CSIZE, TARGET_CS5, CSIZE, CS5 },
5019 	{ TARGET_CSIZE, TARGET_CS6, CSIZE, CS6 },
5020 	{ TARGET_CSIZE, TARGET_CS7, CSIZE, CS7 },
5021 	{ TARGET_CSIZE, TARGET_CS8, CSIZE, CS8 },
5022 	{ TARGET_CSTOPB, TARGET_CSTOPB, CSTOPB, CSTOPB },
5023 	{ TARGET_CREAD, TARGET_CREAD, CREAD, CREAD },
5024 	{ TARGET_PARENB, TARGET_PARENB, PARENB, PARENB },
5025 	{ TARGET_PARODD, TARGET_PARODD, PARODD, PARODD },
5026 	{ TARGET_HUPCL, TARGET_HUPCL, HUPCL, HUPCL },
5027 	{ TARGET_CLOCAL, TARGET_CLOCAL, CLOCAL, CLOCAL },
5028 	{ TARGET_CRTSCTS, TARGET_CRTSCTS, CRTSCTS, CRTSCTS },
5029 	{ 0, 0, 0, 0 }
5030 };
5031 
5032 static const bitmask_transtbl lflag_tbl[] = {
5033 	{ TARGET_ISIG, TARGET_ISIG, ISIG, ISIG },
5034 	{ TARGET_ICANON, TARGET_ICANON, ICANON, ICANON },
5035 	{ TARGET_XCASE, TARGET_XCASE, XCASE, XCASE },
5036 	{ TARGET_ECHO, TARGET_ECHO, ECHO, ECHO },
5037 	{ TARGET_ECHOE, TARGET_ECHOE, ECHOE, ECHOE },
5038 	{ TARGET_ECHOK, TARGET_ECHOK, ECHOK, ECHOK },
5039 	{ TARGET_ECHONL, TARGET_ECHONL, ECHONL, ECHONL },
5040 	{ TARGET_NOFLSH, TARGET_NOFLSH, NOFLSH, NOFLSH },
5041 	{ TARGET_TOSTOP, TARGET_TOSTOP, TOSTOP, TOSTOP },
5042 	{ TARGET_ECHOCTL, TARGET_ECHOCTL, ECHOCTL, ECHOCTL },
5043 	{ TARGET_ECHOPRT, TARGET_ECHOPRT, ECHOPRT, ECHOPRT },
5044 	{ TARGET_ECHOKE, TARGET_ECHOKE, ECHOKE, ECHOKE },
5045 	{ TARGET_FLUSHO, TARGET_FLUSHO, FLUSHO, FLUSHO },
5046 	{ TARGET_PENDIN, TARGET_PENDIN, PENDIN, PENDIN },
5047 	{ TARGET_IEXTEN, TARGET_IEXTEN, IEXTEN, IEXTEN },
5048 	{ 0, 0, 0, 0 }
5049 };
5050 
5051 static void target_to_host_termios (void *dst, const void *src)
5052 {
5053     struct host_termios *host = dst;
5054     const struct target_termios *target = src;
5055 
5056     host->c_iflag =
5057         target_to_host_bitmask(tswap32(target->c_iflag), iflag_tbl);
5058     host->c_oflag =
5059         target_to_host_bitmask(tswap32(target->c_oflag), oflag_tbl);
5060     host->c_cflag =
5061         target_to_host_bitmask(tswap32(target->c_cflag), cflag_tbl);
5062     host->c_lflag =
5063         target_to_host_bitmask(tswap32(target->c_lflag), lflag_tbl);
5064     host->c_line = target->c_line;
5065 
5066     memset(host->c_cc, 0, sizeof(host->c_cc));
5067     host->c_cc[VINTR] = target->c_cc[TARGET_VINTR];
5068     host->c_cc[VQUIT] = target->c_cc[TARGET_VQUIT];
5069     host->c_cc[VERASE] = target->c_cc[TARGET_VERASE];
5070     host->c_cc[VKILL] = target->c_cc[TARGET_VKILL];
5071     host->c_cc[VEOF] = target->c_cc[TARGET_VEOF];
5072     host->c_cc[VTIME] = target->c_cc[TARGET_VTIME];
5073     host->c_cc[VMIN] = target->c_cc[TARGET_VMIN];
5074     host->c_cc[VSWTC] = target->c_cc[TARGET_VSWTC];
5075     host->c_cc[VSTART] = target->c_cc[TARGET_VSTART];
5076     host->c_cc[VSTOP] = target->c_cc[TARGET_VSTOP];
5077     host->c_cc[VSUSP] = target->c_cc[TARGET_VSUSP];
5078     host->c_cc[VEOL] = target->c_cc[TARGET_VEOL];
5079     host->c_cc[VREPRINT] = target->c_cc[TARGET_VREPRINT];
5080     host->c_cc[VDISCARD] = target->c_cc[TARGET_VDISCARD];
5081     host->c_cc[VWERASE] = target->c_cc[TARGET_VWERASE];
5082     host->c_cc[VLNEXT] = target->c_cc[TARGET_VLNEXT];
5083     host->c_cc[VEOL2] = target->c_cc[TARGET_VEOL2];
5084 }
5085 
5086 static void host_to_target_termios (void *dst, const void *src)
5087 {
5088     struct target_termios *target = dst;
5089     const struct host_termios *host = src;
5090 
5091     target->c_iflag =
5092         tswap32(host_to_target_bitmask(host->c_iflag, iflag_tbl));
5093     target->c_oflag =
5094         tswap32(host_to_target_bitmask(host->c_oflag, oflag_tbl));
5095     target->c_cflag =
5096         tswap32(host_to_target_bitmask(host->c_cflag, cflag_tbl));
5097     target->c_lflag =
5098         tswap32(host_to_target_bitmask(host->c_lflag, lflag_tbl));
5099     target->c_line = host->c_line;
5100 
5101     memset(target->c_cc, 0, sizeof(target->c_cc));
5102     target->c_cc[TARGET_VINTR] = host->c_cc[VINTR];
5103     target->c_cc[TARGET_VQUIT] = host->c_cc[VQUIT];
5104     target->c_cc[TARGET_VERASE] = host->c_cc[VERASE];
5105     target->c_cc[TARGET_VKILL] = host->c_cc[VKILL];
5106     target->c_cc[TARGET_VEOF] = host->c_cc[VEOF];
5107     target->c_cc[TARGET_VTIME] = host->c_cc[VTIME];
5108     target->c_cc[TARGET_VMIN] = host->c_cc[VMIN];
5109     target->c_cc[TARGET_VSWTC] = host->c_cc[VSWTC];
5110     target->c_cc[TARGET_VSTART] = host->c_cc[VSTART];
5111     target->c_cc[TARGET_VSTOP] = host->c_cc[VSTOP];
5112     target->c_cc[TARGET_VSUSP] = host->c_cc[VSUSP];
5113     target->c_cc[TARGET_VEOL] = host->c_cc[VEOL];
5114     target->c_cc[TARGET_VREPRINT] = host->c_cc[VREPRINT];
5115     target->c_cc[TARGET_VDISCARD] = host->c_cc[VDISCARD];
5116     target->c_cc[TARGET_VWERASE] = host->c_cc[VWERASE];
5117     target->c_cc[TARGET_VLNEXT] = host->c_cc[VLNEXT];
5118     target->c_cc[TARGET_VEOL2] = host->c_cc[VEOL2];
5119 }
5120 
5121 static const StructEntry struct_termios_def = {
5122     .convert = { host_to_target_termios, target_to_host_termios },
5123     .size = { sizeof(struct target_termios), sizeof(struct host_termios) },
5124     .align = { __alignof__(struct target_termios), __alignof__(struct host_termios) },
5125 };
5126 
5127 static bitmask_transtbl mmap_flags_tbl[] = {
5128     { TARGET_MAP_SHARED, TARGET_MAP_SHARED, MAP_SHARED, MAP_SHARED },
5129     { TARGET_MAP_PRIVATE, TARGET_MAP_PRIVATE, MAP_PRIVATE, MAP_PRIVATE },
5130     { TARGET_MAP_FIXED, TARGET_MAP_FIXED, MAP_FIXED, MAP_FIXED },
5131     { TARGET_MAP_ANONYMOUS, TARGET_MAP_ANONYMOUS,
5132       MAP_ANONYMOUS, MAP_ANONYMOUS },
5133     { TARGET_MAP_GROWSDOWN, TARGET_MAP_GROWSDOWN,
5134       MAP_GROWSDOWN, MAP_GROWSDOWN },
5135     { TARGET_MAP_DENYWRITE, TARGET_MAP_DENYWRITE,
5136       MAP_DENYWRITE, MAP_DENYWRITE },
5137     { TARGET_MAP_EXECUTABLE, TARGET_MAP_EXECUTABLE,
5138       MAP_EXECUTABLE, MAP_EXECUTABLE },
5139     { TARGET_MAP_LOCKED, TARGET_MAP_LOCKED, MAP_LOCKED, MAP_LOCKED },
5140     { TARGET_MAP_NORESERVE, TARGET_MAP_NORESERVE,
5141       MAP_NORESERVE, MAP_NORESERVE },
5142     { TARGET_MAP_HUGETLB, TARGET_MAP_HUGETLB, MAP_HUGETLB, MAP_HUGETLB },
5143     /* MAP_STACK had been ignored by the kernel for quite some time.
5144        Recognize it for the target insofar as we do not want to pass
5145        it through to the host.  */
5146     { TARGET_MAP_STACK, TARGET_MAP_STACK, 0, 0 },
5147     { 0, 0, 0, 0 }
5148 };
5149 
5150 #if defined(TARGET_I386)
5151 
5152 /* NOTE: there is really one LDT for all the threads */
5153 static uint8_t *ldt_table;
5154 
5155 static abi_long read_ldt(abi_ulong ptr, unsigned long bytecount)
5156 {
5157     int size;
5158     void *p;
5159 
5160     if (!ldt_table)
5161         return 0;
5162     size = TARGET_LDT_ENTRIES * TARGET_LDT_ENTRY_SIZE;
5163     if (size > bytecount)
5164         size = bytecount;
5165     p = lock_user(VERIFY_WRITE, ptr, size, 0);
5166     if (!p)
5167         return -TARGET_EFAULT;
5168     /* ??? Should this by byteswapped?  */
5169     memcpy(p, ldt_table, size);
5170     unlock_user(p, ptr, size);
5171     return size;
5172 }
5173 
5174 /* XXX: add locking support */
5175 static abi_long write_ldt(CPUX86State *env,
5176                           abi_ulong ptr, unsigned long bytecount, int oldmode)
5177 {
5178     struct target_modify_ldt_ldt_s ldt_info;
5179     struct target_modify_ldt_ldt_s *target_ldt_info;
5180     int seg_32bit, contents, read_exec_only, limit_in_pages;
5181     int seg_not_present, useable, lm;
5182     uint32_t *lp, entry_1, entry_2;
5183 
5184     if (bytecount != sizeof(ldt_info))
5185         return -TARGET_EINVAL;
5186     if (!lock_user_struct(VERIFY_READ, target_ldt_info, ptr, 1))
5187         return -TARGET_EFAULT;
5188     ldt_info.entry_number = tswap32(target_ldt_info->entry_number);
5189     ldt_info.base_addr = tswapal(target_ldt_info->base_addr);
5190     ldt_info.limit = tswap32(target_ldt_info->limit);
5191     ldt_info.flags = tswap32(target_ldt_info->flags);
5192     unlock_user_struct(target_ldt_info, ptr, 0);
5193 
5194     if (ldt_info.entry_number >= TARGET_LDT_ENTRIES)
5195         return -TARGET_EINVAL;
5196     seg_32bit = ldt_info.flags & 1;
5197     contents = (ldt_info.flags >> 1) & 3;
5198     read_exec_only = (ldt_info.flags >> 3) & 1;
5199     limit_in_pages = (ldt_info.flags >> 4) & 1;
5200     seg_not_present = (ldt_info.flags >> 5) & 1;
5201     useable = (ldt_info.flags >> 6) & 1;
5202 #ifdef TARGET_ABI32
5203     lm = 0;
5204 #else
5205     lm = (ldt_info.flags >> 7) & 1;
5206 #endif
5207     if (contents == 3) {
5208         if (oldmode)
5209             return -TARGET_EINVAL;
5210         if (seg_not_present == 0)
5211             return -TARGET_EINVAL;
5212     }
5213     /* allocate the LDT */
5214     if (!ldt_table) {
5215         env->ldt.base = target_mmap(0,
5216                                     TARGET_LDT_ENTRIES * TARGET_LDT_ENTRY_SIZE,
5217                                     PROT_READ|PROT_WRITE,
5218                                     MAP_ANONYMOUS|MAP_PRIVATE, -1, 0);
5219         if (env->ldt.base == -1)
5220             return -TARGET_ENOMEM;
5221         memset(g2h(env->ldt.base), 0,
5222                TARGET_LDT_ENTRIES * TARGET_LDT_ENTRY_SIZE);
5223         env->ldt.limit = 0xffff;
5224         ldt_table = g2h(env->ldt.base);
5225     }
5226 
5227     /* NOTE: same code as Linux kernel */
5228     /* Allow LDTs to be cleared by the user. */
5229     if (ldt_info.base_addr == 0 && ldt_info.limit == 0) {
5230         if (oldmode ||
5231             (contents == 0		&&
5232              read_exec_only == 1	&&
5233              seg_32bit == 0		&&
5234              limit_in_pages == 0	&&
5235              seg_not_present == 1	&&
5236              useable == 0 )) {
5237             entry_1 = 0;
5238             entry_2 = 0;
5239             goto install;
5240         }
5241     }
5242 
5243     entry_1 = ((ldt_info.base_addr & 0x0000ffff) << 16) |
5244         (ldt_info.limit & 0x0ffff);
5245     entry_2 = (ldt_info.base_addr & 0xff000000) |
5246         ((ldt_info.base_addr & 0x00ff0000) >> 16) |
5247         (ldt_info.limit & 0xf0000) |
5248         ((read_exec_only ^ 1) << 9) |
5249         (contents << 10) |
5250         ((seg_not_present ^ 1) << 15) |
5251         (seg_32bit << 22) |
5252         (limit_in_pages << 23) |
5253         (lm << 21) |
5254         0x7000;
5255     if (!oldmode)
5256         entry_2 |= (useable << 20);
5257 
5258     /* Install the new entry ...  */
5259 install:
5260     lp = (uint32_t *)(ldt_table + (ldt_info.entry_number << 3));
5261     lp[0] = tswap32(entry_1);
5262     lp[1] = tswap32(entry_2);
5263     return 0;
5264 }
5265 
5266 /* specific and weird i386 syscalls */
5267 static abi_long do_modify_ldt(CPUX86State *env, int func, abi_ulong ptr,
5268                               unsigned long bytecount)
5269 {
5270     abi_long ret;
5271 
5272     switch (func) {
5273     case 0:
5274         ret = read_ldt(ptr, bytecount);
5275         break;
5276     case 1:
5277         ret = write_ldt(env, ptr, bytecount, 1);
5278         break;
5279     case 0x11:
5280         ret = write_ldt(env, ptr, bytecount, 0);
5281         break;
5282     default:
5283         ret = -TARGET_ENOSYS;
5284         break;
5285     }
5286     return ret;
5287 }
5288 
5289 #if defined(TARGET_I386) && defined(TARGET_ABI32)
5290 abi_long do_set_thread_area(CPUX86State *env, abi_ulong ptr)
5291 {
5292     uint64_t *gdt_table = g2h(env->gdt.base);
5293     struct target_modify_ldt_ldt_s ldt_info;
5294     struct target_modify_ldt_ldt_s *target_ldt_info;
5295     int seg_32bit, contents, read_exec_only, limit_in_pages;
5296     int seg_not_present, useable, lm;
5297     uint32_t *lp, entry_1, entry_2;
5298     int i;
5299 
5300     lock_user_struct(VERIFY_WRITE, target_ldt_info, ptr, 1);
5301     if (!target_ldt_info)
5302         return -TARGET_EFAULT;
5303     ldt_info.entry_number = tswap32(target_ldt_info->entry_number);
5304     ldt_info.base_addr = tswapal(target_ldt_info->base_addr);
5305     ldt_info.limit = tswap32(target_ldt_info->limit);
5306     ldt_info.flags = tswap32(target_ldt_info->flags);
5307     if (ldt_info.entry_number == -1) {
5308         for (i=TARGET_GDT_ENTRY_TLS_MIN; i<=TARGET_GDT_ENTRY_TLS_MAX; i++) {
5309             if (gdt_table[i] == 0) {
5310                 ldt_info.entry_number = i;
5311                 target_ldt_info->entry_number = tswap32(i);
5312                 break;
5313             }
5314         }
5315     }
5316     unlock_user_struct(target_ldt_info, ptr, 1);
5317 
5318     if (ldt_info.entry_number < TARGET_GDT_ENTRY_TLS_MIN ||
5319         ldt_info.entry_number > TARGET_GDT_ENTRY_TLS_MAX)
5320            return -TARGET_EINVAL;
5321     seg_32bit = ldt_info.flags & 1;
5322     contents = (ldt_info.flags >> 1) & 3;
5323     read_exec_only = (ldt_info.flags >> 3) & 1;
5324     limit_in_pages = (ldt_info.flags >> 4) & 1;
5325     seg_not_present = (ldt_info.flags >> 5) & 1;
5326     useable = (ldt_info.flags >> 6) & 1;
5327 #ifdef TARGET_ABI32
5328     lm = 0;
5329 #else
5330     lm = (ldt_info.flags >> 7) & 1;
5331 #endif
5332 
5333     if (contents == 3) {
5334         if (seg_not_present == 0)
5335             return -TARGET_EINVAL;
5336     }
5337 
5338     /* NOTE: same code as Linux kernel */
5339     /* Allow LDTs to be cleared by the user. */
5340     if (ldt_info.base_addr == 0 && ldt_info.limit == 0) {
5341         if ((contents == 0             &&
5342              read_exec_only == 1       &&
5343              seg_32bit == 0            &&
5344              limit_in_pages == 0       &&
5345              seg_not_present == 1      &&
5346              useable == 0 )) {
5347             entry_1 = 0;
5348             entry_2 = 0;
5349             goto install;
5350         }
5351     }
5352 
5353     entry_1 = ((ldt_info.base_addr & 0x0000ffff) << 16) |
5354         (ldt_info.limit & 0x0ffff);
5355     entry_2 = (ldt_info.base_addr & 0xff000000) |
5356         ((ldt_info.base_addr & 0x00ff0000) >> 16) |
5357         (ldt_info.limit & 0xf0000) |
5358         ((read_exec_only ^ 1) << 9) |
5359         (contents << 10) |
5360         ((seg_not_present ^ 1) << 15) |
5361         (seg_32bit << 22) |
5362         (limit_in_pages << 23) |
5363         (useable << 20) |
5364         (lm << 21) |
5365         0x7000;
5366 
5367     /* Install the new entry ...  */
5368 install:
5369     lp = (uint32_t *)(gdt_table + ldt_info.entry_number);
5370     lp[0] = tswap32(entry_1);
5371     lp[1] = tswap32(entry_2);
5372     return 0;
5373 }
5374 
5375 static abi_long do_get_thread_area(CPUX86State *env, abi_ulong ptr)
5376 {
5377     struct target_modify_ldt_ldt_s *target_ldt_info;
5378     uint64_t *gdt_table = g2h(env->gdt.base);
5379     uint32_t base_addr, limit, flags;
5380     int seg_32bit, contents, read_exec_only, limit_in_pages, idx;
5381     int seg_not_present, useable, lm;
5382     uint32_t *lp, entry_1, entry_2;
5383 
5384     lock_user_struct(VERIFY_WRITE, target_ldt_info, ptr, 1);
5385     if (!target_ldt_info)
5386         return -TARGET_EFAULT;
5387     idx = tswap32(target_ldt_info->entry_number);
5388     if (idx < TARGET_GDT_ENTRY_TLS_MIN ||
5389         idx > TARGET_GDT_ENTRY_TLS_MAX) {
5390         unlock_user_struct(target_ldt_info, ptr, 1);
5391         return -TARGET_EINVAL;
5392     }
5393     lp = (uint32_t *)(gdt_table + idx);
5394     entry_1 = tswap32(lp[0]);
5395     entry_2 = tswap32(lp[1]);
5396 
5397     read_exec_only = ((entry_2 >> 9) & 1) ^ 1;
5398     contents = (entry_2 >> 10) & 3;
5399     seg_not_present = ((entry_2 >> 15) & 1) ^ 1;
5400     seg_32bit = (entry_2 >> 22) & 1;
5401     limit_in_pages = (entry_2 >> 23) & 1;
5402     useable = (entry_2 >> 20) & 1;
5403 #ifdef TARGET_ABI32
5404     lm = 0;
5405 #else
5406     lm = (entry_2 >> 21) & 1;
5407 #endif
5408     flags = (seg_32bit << 0) | (contents << 1) |
5409         (read_exec_only << 3) | (limit_in_pages << 4) |
5410         (seg_not_present << 5) | (useable << 6) | (lm << 7);
5411     limit = (entry_1 & 0xffff) | (entry_2  & 0xf0000);
5412     base_addr = (entry_1 >> 16) |
5413         (entry_2 & 0xff000000) |
5414         ((entry_2 & 0xff) << 16);
5415     target_ldt_info->base_addr = tswapal(base_addr);
5416     target_ldt_info->limit = tswap32(limit);
5417     target_ldt_info->flags = tswap32(flags);
5418     unlock_user_struct(target_ldt_info, ptr, 1);
5419     return 0;
5420 }
5421 #endif /* TARGET_I386 && TARGET_ABI32 */
5422 
5423 #ifndef TARGET_ABI32
5424 abi_long do_arch_prctl(CPUX86State *env, int code, abi_ulong addr)
5425 {
5426     abi_long ret = 0;
5427     abi_ulong val;
5428     int idx;
5429 
5430     switch(code) {
5431     case TARGET_ARCH_SET_GS:
5432     case TARGET_ARCH_SET_FS:
5433         if (code == TARGET_ARCH_SET_GS)
5434             idx = R_GS;
5435         else
5436             idx = R_FS;
5437         cpu_x86_load_seg(env, idx, 0);
5438         env->segs[idx].base = addr;
5439         break;
5440     case TARGET_ARCH_GET_GS:
5441     case TARGET_ARCH_GET_FS:
5442         if (code == TARGET_ARCH_GET_GS)
5443             idx = R_GS;
5444         else
5445             idx = R_FS;
5446         val = env->segs[idx].base;
5447         if (put_user(val, addr, abi_ulong))
5448             ret = -TARGET_EFAULT;
5449         break;
5450     default:
5451         ret = -TARGET_EINVAL;
5452         break;
5453     }
5454     return ret;
5455 }
5456 #endif
5457 
5458 #endif /* defined(TARGET_I386) */
5459 
5460 #define NEW_STACK_SIZE 0x40000
5461 
5462 
5463 static pthread_mutex_t clone_lock = PTHREAD_MUTEX_INITIALIZER;
5464 typedef struct {
5465     CPUArchState *env;
5466     pthread_mutex_t mutex;
5467     pthread_cond_t cond;
5468     pthread_t thread;
5469     uint32_t tid;
5470     abi_ulong child_tidptr;
5471     abi_ulong parent_tidptr;
5472     sigset_t sigmask;
5473 } new_thread_info;
5474 
5475 static void *clone_func(void *arg)
5476 {
5477     new_thread_info *info = arg;
5478     CPUArchState *env;
5479     CPUState *cpu;
5480     TaskState *ts;
5481 
5482     rcu_register_thread();
5483     tcg_register_thread();
5484     env = info->env;
5485     cpu = env_cpu(env);
5486     thread_cpu = cpu;
5487     ts = (TaskState *)cpu->opaque;
5488     info->tid = sys_gettid();
5489     task_settid(ts);
5490     if (info->child_tidptr)
5491         put_user_u32(info->tid, info->child_tidptr);
5492     if (info->parent_tidptr)
5493         put_user_u32(info->tid, info->parent_tidptr);
5494     qemu_guest_random_seed_thread_part2(cpu->random_seed);
5495     /* Enable signals.  */
5496     sigprocmask(SIG_SETMASK, &info->sigmask, NULL);
5497     /* Signal to the parent that we're ready.  */
5498     pthread_mutex_lock(&info->mutex);
5499     pthread_cond_broadcast(&info->cond);
5500     pthread_mutex_unlock(&info->mutex);
5501     /* Wait until the parent has finished initializing the tls state.  */
5502     pthread_mutex_lock(&clone_lock);
5503     pthread_mutex_unlock(&clone_lock);
5504     cpu_loop(env);
5505     /* never exits */
5506     return NULL;
5507 }
5508 
5509 /* do_fork() Must return host values and target errnos (unlike most
5510    do_*() functions). */
5511 static int do_fork(CPUArchState *env, unsigned int flags, abi_ulong newsp,
5512                    abi_ulong parent_tidptr, target_ulong newtls,
5513                    abi_ulong child_tidptr)
5514 {
5515     CPUState *cpu = env_cpu(env);
5516     int ret;
5517     TaskState *ts;
5518     CPUState *new_cpu;
5519     CPUArchState *new_env;
5520     sigset_t sigmask;
5521 
5522     flags &= ~CLONE_IGNORED_FLAGS;
5523 
5524     /* Emulate vfork() with fork() */
5525     if (flags & CLONE_VFORK)
5526         flags &= ~(CLONE_VFORK | CLONE_VM);
5527 
5528     if (flags & CLONE_VM) {
5529         TaskState *parent_ts = (TaskState *)cpu->opaque;
5530         new_thread_info info;
5531         pthread_attr_t attr;
5532 
5533         if (((flags & CLONE_THREAD_FLAGS) != CLONE_THREAD_FLAGS) ||
5534             (flags & CLONE_INVALID_THREAD_FLAGS)) {
5535             return -TARGET_EINVAL;
5536         }
5537 
5538         ts = g_new0(TaskState, 1);
5539         init_task_state(ts);
5540 
5541         /* Grab a mutex so that thread setup appears atomic.  */
5542         pthread_mutex_lock(&clone_lock);
5543 
5544         /* we create a new CPU instance. */
5545         new_env = cpu_copy(env);
5546         /* Init regs that differ from the parent.  */
5547         cpu_clone_regs(new_env, newsp);
5548         new_cpu = env_cpu(new_env);
5549         new_cpu->opaque = ts;
5550         ts->bprm = parent_ts->bprm;
5551         ts->info = parent_ts->info;
5552         ts->signal_mask = parent_ts->signal_mask;
5553 
5554         if (flags & CLONE_CHILD_CLEARTID) {
5555             ts->child_tidptr = child_tidptr;
5556         }
5557 
5558         if (flags & CLONE_SETTLS) {
5559             cpu_set_tls (new_env, newtls);
5560         }
5561 
5562         memset(&info, 0, sizeof(info));
5563         pthread_mutex_init(&info.mutex, NULL);
5564         pthread_mutex_lock(&info.mutex);
5565         pthread_cond_init(&info.cond, NULL);
5566         info.env = new_env;
5567         if (flags & CLONE_CHILD_SETTID) {
5568             info.child_tidptr = child_tidptr;
5569         }
5570         if (flags & CLONE_PARENT_SETTID) {
5571             info.parent_tidptr = parent_tidptr;
5572         }
5573 
5574         ret = pthread_attr_init(&attr);
5575         ret = pthread_attr_setstacksize(&attr, NEW_STACK_SIZE);
5576         ret = pthread_attr_setdetachstate(&attr, PTHREAD_CREATE_DETACHED);
5577         /* It is not safe to deliver signals until the child has finished
5578            initializing, so temporarily block all signals.  */
5579         sigfillset(&sigmask);
5580         sigprocmask(SIG_BLOCK, &sigmask, &info.sigmask);
5581         cpu->random_seed = qemu_guest_random_seed_thread_part1();
5582 
5583         /* If this is our first additional thread, we need to ensure we
5584          * generate code for parallel execution and flush old translations.
5585          */
5586         if (!parallel_cpus) {
5587             parallel_cpus = true;
5588             tb_flush(cpu);
5589         }
5590 
5591         ret = pthread_create(&info.thread, &attr, clone_func, &info);
5592         /* TODO: Free new CPU state if thread creation failed.  */
5593 
5594         sigprocmask(SIG_SETMASK, &info.sigmask, NULL);
5595         pthread_attr_destroy(&attr);
5596         if (ret == 0) {
5597             /* Wait for the child to initialize.  */
5598             pthread_cond_wait(&info.cond, &info.mutex);
5599             ret = info.tid;
5600         } else {
5601             ret = -1;
5602         }
5603         pthread_mutex_unlock(&info.mutex);
5604         pthread_cond_destroy(&info.cond);
5605         pthread_mutex_destroy(&info.mutex);
5606         pthread_mutex_unlock(&clone_lock);
5607     } else {
5608         /* if no CLONE_VM, we consider it is a fork */
5609         if (flags & CLONE_INVALID_FORK_FLAGS) {
5610             return -TARGET_EINVAL;
5611         }
5612 
5613         /* We can't support custom termination signals */
5614         if ((flags & CSIGNAL) != TARGET_SIGCHLD) {
5615             return -TARGET_EINVAL;
5616         }
5617 
5618         if (block_signals()) {
5619             return -TARGET_ERESTARTSYS;
5620         }
5621 
5622         fork_start();
5623         ret = fork();
5624         if (ret == 0) {
5625             /* Child Process.  */
5626             cpu_clone_regs(env, newsp);
5627             fork_end(1);
5628             /* There is a race condition here.  The parent process could
5629                theoretically read the TID in the child process before the child
5630                tid is set.  This would require using either ptrace
5631                (not implemented) or having *_tidptr to point at a shared memory
5632                mapping.  We can't repeat the spinlock hack used above because
5633                the child process gets its own copy of the lock.  */
5634             if (flags & CLONE_CHILD_SETTID)
5635                 put_user_u32(sys_gettid(), child_tidptr);
5636             if (flags & CLONE_PARENT_SETTID)
5637                 put_user_u32(sys_gettid(), parent_tidptr);
5638             ts = (TaskState *)cpu->opaque;
5639             if (flags & CLONE_SETTLS)
5640                 cpu_set_tls (env, newtls);
5641             if (flags & CLONE_CHILD_CLEARTID)
5642                 ts->child_tidptr = child_tidptr;
5643         } else {
5644             fork_end(0);
5645         }
5646     }
5647     return ret;
5648 }
5649 
5650 /* warning : doesn't handle linux specific flags... */
5651 static int target_to_host_fcntl_cmd(int cmd)
5652 {
5653     int ret;
5654 
5655     switch(cmd) {
5656     case TARGET_F_DUPFD:
5657     case TARGET_F_GETFD:
5658     case TARGET_F_SETFD:
5659     case TARGET_F_GETFL:
5660     case TARGET_F_SETFL:
5661         ret = cmd;
5662         break;
5663     case TARGET_F_GETLK:
5664         ret = F_GETLK64;
5665         break;
5666     case TARGET_F_SETLK:
5667         ret = F_SETLK64;
5668         break;
5669     case TARGET_F_SETLKW:
5670         ret = F_SETLKW64;
5671         break;
5672     case TARGET_F_GETOWN:
5673         ret = F_GETOWN;
5674         break;
5675     case TARGET_F_SETOWN:
5676         ret = F_SETOWN;
5677         break;
5678     case TARGET_F_GETSIG:
5679         ret = F_GETSIG;
5680         break;
5681     case TARGET_F_SETSIG:
5682         ret = F_SETSIG;
5683         break;
5684 #if TARGET_ABI_BITS == 32
5685     case TARGET_F_GETLK64:
5686         ret = F_GETLK64;
5687         break;
5688     case TARGET_F_SETLK64:
5689         ret = F_SETLK64;
5690         break;
5691     case TARGET_F_SETLKW64:
5692         ret = F_SETLKW64;
5693         break;
5694 #endif
5695     case TARGET_F_SETLEASE:
5696         ret = F_SETLEASE;
5697         break;
5698     case TARGET_F_GETLEASE:
5699         ret = F_GETLEASE;
5700         break;
5701 #ifdef F_DUPFD_CLOEXEC
5702     case TARGET_F_DUPFD_CLOEXEC:
5703         ret = F_DUPFD_CLOEXEC;
5704         break;
5705 #endif
5706     case TARGET_F_NOTIFY:
5707         ret = F_NOTIFY;
5708         break;
5709 #ifdef F_GETOWN_EX
5710     case TARGET_F_GETOWN_EX:
5711         ret = F_GETOWN_EX;
5712         break;
5713 #endif
5714 #ifdef F_SETOWN_EX
5715     case TARGET_F_SETOWN_EX:
5716         ret = F_SETOWN_EX;
5717         break;
5718 #endif
5719 #ifdef F_SETPIPE_SZ
5720     case TARGET_F_SETPIPE_SZ:
5721         ret = F_SETPIPE_SZ;
5722         break;
5723     case TARGET_F_GETPIPE_SZ:
5724         ret = F_GETPIPE_SZ;
5725         break;
5726 #endif
5727     default:
5728         ret = -TARGET_EINVAL;
5729         break;
5730     }
5731 
5732 #if defined(__powerpc64__)
5733     /* On PPC64, glibc headers has the F_*LK* defined to 12, 13 and 14 and
5734      * is not supported by kernel. The glibc fcntl call actually adjusts
5735      * them to 5, 6 and 7 before making the syscall(). Since we make the
5736      * syscall directly, adjust to what is supported by the kernel.
5737      */
5738     if (ret >= F_GETLK64 && ret <= F_SETLKW64) {
5739         ret -= F_GETLK64 - 5;
5740     }
5741 #endif
5742 
5743     return ret;
5744 }
5745 
5746 #define FLOCK_TRANSTBL \
5747     switch (type) { \
5748     TRANSTBL_CONVERT(F_RDLCK); \
5749     TRANSTBL_CONVERT(F_WRLCK); \
5750     TRANSTBL_CONVERT(F_UNLCK); \
5751     TRANSTBL_CONVERT(F_EXLCK); \
5752     TRANSTBL_CONVERT(F_SHLCK); \
5753     }
5754 
5755 static int target_to_host_flock(int type)
5756 {
5757 #define TRANSTBL_CONVERT(a) case TARGET_##a: return a
5758     FLOCK_TRANSTBL
5759 #undef  TRANSTBL_CONVERT
5760     return -TARGET_EINVAL;
5761 }
5762 
5763 static int host_to_target_flock(int type)
5764 {
5765 #define TRANSTBL_CONVERT(a) case a: return TARGET_##a
5766     FLOCK_TRANSTBL
5767 #undef  TRANSTBL_CONVERT
5768     /* if we don't know how to convert the value coming
5769      * from the host we copy to the target field as-is
5770      */
5771     return type;
5772 }
5773 
5774 static inline abi_long copy_from_user_flock(struct flock64 *fl,
5775                                             abi_ulong target_flock_addr)
5776 {
5777     struct target_flock *target_fl;
5778     int l_type;
5779 
5780     if (!lock_user_struct(VERIFY_READ, target_fl, target_flock_addr, 1)) {
5781         return -TARGET_EFAULT;
5782     }
5783 
5784     __get_user(l_type, &target_fl->l_type);
5785     l_type = target_to_host_flock(l_type);
5786     if (l_type < 0) {
5787         return l_type;
5788     }
5789     fl->l_type = l_type;
5790     __get_user(fl->l_whence, &target_fl->l_whence);
5791     __get_user(fl->l_start, &target_fl->l_start);
5792     __get_user(fl->l_len, &target_fl->l_len);
5793     __get_user(fl->l_pid, &target_fl->l_pid);
5794     unlock_user_struct(target_fl, target_flock_addr, 0);
5795     return 0;
5796 }
5797 
5798 static inline abi_long copy_to_user_flock(abi_ulong target_flock_addr,
5799                                           const struct flock64 *fl)
5800 {
5801     struct target_flock *target_fl;
5802     short l_type;
5803 
5804     if (!lock_user_struct(VERIFY_WRITE, target_fl, target_flock_addr, 0)) {
5805         return -TARGET_EFAULT;
5806     }
5807 
5808     l_type = host_to_target_flock(fl->l_type);
5809     __put_user(l_type, &target_fl->l_type);
5810     __put_user(fl->l_whence, &target_fl->l_whence);
5811     __put_user(fl->l_start, &target_fl->l_start);
5812     __put_user(fl->l_len, &target_fl->l_len);
5813     __put_user(fl->l_pid, &target_fl->l_pid);
5814     unlock_user_struct(target_fl, target_flock_addr, 1);
5815     return 0;
5816 }
5817 
5818 typedef abi_long from_flock64_fn(struct flock64 *fl, abi_ulong target_addr);
5819 typedef abi_long to_flock64_fn(abi_ulong target_addr, const struct flock64 *fl);
5820 
5821 #if defined(TARGET_ARM) && TARGET_ABI_BITS == 32
5822 static inline abi_long copy_from_user_oabi_flock64(struct flock64 *fl,
5823                                                    abi_ulong target_flock_addr)
5824 {
5825     struct target_oabi_flock64 *target_fl;
5826     int l_type;
5827 
5828     if (!lock_user_struct(VERIFY_READ, target_fl, target_flock_addr, 1)) {
5829         return -TARGET_EFAULT;
5830     }
5831 
5832     __get_user(l_type, &target_fl->l_type);
5833     l_type = target_to_host_flock(l_type);
5834     if (l_type < 0) {
5835         return l_type;
5836     }
5837     fl->l_type = l_type;
5838     __get_user(fl->l_whence, &target_fl->l_whence);
5839     __get_user(fl->l_start, &target_fl->l_start);
5840     __get_user(fl->l_len, &target_fl->l_len);
5841     __get_user(fl->l_pid, &target_fl->l_pid);
5842     unlock_user_struct(target_fl, target_flock_addr, 0);
5843     return 0;
5844 }
5845 
5846 static inline abi_long copy_to_user_oabi_flock64(abi_ulong target_flock_addr,
5847                                                  const struct flock64 *fl)
5848 {
5849     struct target_oabi_flock64 *target_fl;
5850     short l_type;
5851 
5852     if (!lock_user_struct(VERIFY_WRITE, target_fl, target_flock_addr, 0)) {
5853         return -TARGET_EFAULT;
5854     }
5855 
5856     l_type = host_to_target_flock(fl->l_type);
5857     __put_user(l_type, &target_fl->l_type);
5858     __put_user(fl->l_whence, &target_fl->l_whence);
5859     __put_user(fl->l_start, &target_fl->l_start);
5860     __put_user(fl->l_len, &target_fl->l_len);
5861     __put_user(fl->l_pid, &target_fl->l_pid);
5862     unlock_user_struct(target_fl, target_flock_addr, 1);
5863     return 0;
5864 }
5865 #endif
5866 
5867 static inline abi_long copy_from_user_flock64(struct flock64 *fl,
5868                                               abi_ulong target_flock_addr)
5869 {
5870     struct target_flock64 *target_fl;
5871     int l_type;
5872 
5873     if (!lock_user_struct(VERIFY_READ, target_fl, target_flock_addr, 1)) {
5874         return -TARGET_EFAULT;
5875     }
5876 
5877     __get_user(l_type, &target_fl->l_type);
5878     l_type = target_to_host_flock(l_type);
5879     if (l_type < 0) {
5880         return l_type;
5881     }
5882     fl->l_type = l_type;
5883     __get_user(fl->l_whence, &target_fl->l_whence);
5884     __get_user(fl->l_start, &target_fl->l_start);
5885     __get_user(fl->l_len, &target_fl->l_len);
5886     __get_user(fl->l_pid, &target_fl->l_pid);
5887     unlock_user_struct(target_fl, target_flock_addr, 0);
5888     return 0;
5889 }
5890 
5891 static inline abi_long copy_to_user_flock64(abi_ulong target_flock_addr,
5892                                             const struct flock64 *fl)
5893 {
5894     struct target_flock64 *target_fl;
5895     short l_type;
5896 
5897     if (!lock_user_struct(VERIFY_WRITE, target_fl, target_flock_addr, 0)) {
5898         return -TARGET_EFAULT;
5899     }
5900 
5901     l_type = host_to_target_flock(fl->l_type);
5902     __put_user(l_type, &target_fl->l_type);
5903     __put_user(fl->l_whence, &target_fl->l_whence);
5904     __put_user(fl->l_start, &target_fl->l_start);
5905     __put_user(fl->l_len, &target_fl->l_len);
5906     __put_user(fl->l_pid, &target_fl->l_pid);
5907     unlock_user_struct(target_fl, target_flock_addr, 1);
5908     return 0;
5909 }
5910 
5911 static abi_long do_fcntl(int fd, int cmd, abi_ulong arg)
5912 {
5913     struct flock64 fl64;
5914 #ifdef F_GETOWN_EX
5915     struct f_owner_ex fox;
5916     struct target_f_owner_ex *target_fox;
5917 #endif
5918     abi_long ret;
5919     int host_cmd = target_to_host_fcntl_cmd(cmd);
5920 
5921     if (host_cmd == -TARGET_EINVAL)
5922 	    return host_cmd;
5923 
5924     switch(cmd) {
5925     case TARGET_F_GETLK:
5926         ret = copy_from_user_flock(&fl64, arg);
5927         if (ret) {
5928             return ret;
5929         }
5930         ret = get_errno(safe_fcntl(fd, host_cmd, &fl64));
5931         if (ret == 0) {
5932             ret = copy_to_user_flock(arg, &fl64);
5933         }
5934         break;
5935 
5936     case TARGET_F_SETLK:
5937     case TARGET_F_SETLKW:
5938         ret = copy_from_user_flock(&fl64, arg);
5939         if (ret) {
5940             return ret;
5941         }
5942         ret = get_errno(safe_fcntl(fd, host_cmd, &fl64));
5943         break;
5944 
5945     case TARGET_F_GETLK64:
5946         ret = copy_from_user_flock64(&fl64, arg);
5947         if (ret) {
5948             return ret;
5949         }
5950         ret = get_errno(safe_fcntl(fd, host_cmd, &fl64));
5951         if (ret == 0) {
5952             ret = copy_to_user_flock64(arg, &fl64);
5953         }
5954         break;
5955     case TARGET_F_SETLK64:
5956     case TARGET_F_SETLKW64:
5957         ret = copy_from_user_flock64(&fl64, arg);
5958         if (ret) {
5959             return ret;
5960         }
5961         ret = get_errno(safe_fcntl(fd, host_cmd, &fl64));
5962         break;
5963 
5964     case TARGET_F_GETFL:
5965         ret = get_errno(safe_fcntl(fd, host_cmd, arg));
5966         if (ret >= 0) {
5967             ret = host_to_target_bitmask(ret, fcntl_flags_tbl);
5968         }
5969         break;
5970 
5971     case TARGET_F_SETFL:
5972         ret = get_errno(safe_fcntl(fd, host_cmd,
5973                                    target_to_host_bitmask(arg,
5974                                                           fcntl_flags_tbl)));
5975         break;
5976 
5977 #ifdef F_GETOWN_EX
5978     case TARGET_F_GETOWN_EX:
5979         ret = get_errno(safe_fcntl(fd, host_cmd, &fox));
5980         if (ret >= 0) {
5981             if (!lock_user_struct(VERIFY_WRITE, target_fox, arg, 0))
5982                 return -TARGET_EFAULT;
5983             target_fox->type = tswap32(fox.type);
5984             target_fox->pid = tswap32(fox.pid);
5985             unlock_user_struct(target_fox, arg, 1);
5986         }
5987         break;
5988 #endif
5989 
5990 #ifdef F_SETOWN_EX
5991     case TARGET_F_SETOWN_EX:
5992         if (!lock_user_struct(VERIFY_READ, target_fox, arg, 1))
5993             return -TARGET_EFAULT;
5994         fox.type = tswap32(target_fox->type);
5995         fox.pid = tswap32(target_fox->pid);
5996         unlock_user_struct(target_fox, arg, 0);
5997         ret = get_errno(safe_fcntl(fd, host_cmd, &fox));
5998         break;
5999 #endif
6000 
6001     case TARGET_F_SETOWN:
6002     case TARGET_F_GETOWN:
6003     case TARGET_F_SETSIG:
6004     case TARGET_F_GETSIG:
6005     case TARGET_F_SETLEASE:
6006     case TARGET_F_GETLEASE:
6007     case TARGET_F_SETPIPE_SZ:
6008     case TARGET_F_GETPIPE_SZ:
6009         ret = get_errno(safe_fcntl(fd, host_cmd, arg));
6010         break;
6011 
6012     default:
6013         ret = get_errno(safe_fcntl(fd, cmd, arg));
6014         break;
6015     }
6016     return ret;
6017 }
6018 
6019 #ifdef USE_UID16
6020 
6021 static inline int high2lowuid(int uid)
6022 {
6023     if (uid > 65535)
6024         return 65534;
6025     else
6026         return uid;
6027 }
6028 
6029 static inline int high2lowgid(int gid)
6030 {
6031     if (gid > 65535)
6032         return 65534;
6033     else
6034         return gid;
6035 }
6036 
6037 static inline int low2highuid(int uid)
6038 {
6039     if ((int16_t)uid == -1)
6040         return -1;
6041     else
6042         return uid;
6043 }
6044 
6045 static inline int low2highgid(int gid)
6046 {
6047     if ((int16_t)gid == -1)
6048         return -1;
6049     else
6050         return gid;
6051 }
6052 static inline int tswapid(int id)
6053 {
6054     return tswap16(id);
6055 }
6056 
6057 #define put_user_id(x, gaddr) put_user_u16(x, gaddr)
6058 
6059 #else /* !USE_UID16 */
6060 static inline int high2lowuid(int uid)
6061 {
6062     return uid;
6063 }
6064 static inline int high2lowgid(int gid)
6065 {
6066     return gid;
6067 }
6068 static inline int low2highuid(int uid)
6069 {
6070     return uid;
6071 }
6072 static inline int low2highgid(int gid)
6073 {
6074     return gid;
6075 }
6076 static inline int tswapid(int id)
6077 {
6078     return tswap32(id);
6079 }
6080 
6081 #define put_user_id(x, gaddr) put_user_u32(x, gaddr)
6082 
6083 #endif /* USE_UID16 */
6084 
6085 /* We must do direct syscalls for setting UID/GID, because we want to
6086  * implement the Linux system call semantics of "change only for this thread",
6087  * not the libc/POSIX semantics of "change for all threads in process".
6088  * (See http://ewontfix.com/17/ for more details.)
6089  * We use the 32-bit version of the syscalls if present; if it is not
6090  * then either the host architecture supports 32-bit UIDs natively with
6091  * the standard syscall, or the 16-bit UID is the best we can do.
6092  */
6093 #ifdef __NR_setuid32
6094 #define __NR_sys_setuid __NR_setuid32
6095 #else
6096 #define __NR_sys_setuid __NR_setuid
6097 #endif
6098 #ifdef __NR_setgid32
6099 #define __NR_sys_setgid __NR_setgid32
6100 #else
6101 #define __NR_sys_setgid __NR_setgid
6102 #endif
6103 #ifdef __NR_setresuid32
6104 #define __NR_sys_setresuid __NR_setresuid32
6105 #else
6106 #define __NR_sys_setresuid __NR_setresuid
6107 #endif
6108 #ifdef __NR_setresgid32
6109 #define __NR_sys_setresgid __NR_setresgid32
6110 #else
6111 #define __NR_sys_setresgid __NR_setresgid
6112 #endif
6113 
6114 _syscall1(int, sys_setuid, uid_t, uid)
6115 _syscall1(int, sys_setgid, gid_t, gid)
6116 _syscall3(int, sys_setresuid, uid_t, ruid, uid_t, euid, uid_t, suid)
6117 _syscall3(int, sys_setresgid, gid_t, rgid, gid_t, egid, gid_t, sgid)
6118 
6119 void syscall_init(void)
6120 {
6121     IOCTLEntry *ie;
6122     const argtype *arg_type;
6123     int size;
6124     int i;
6125 
6126     thunk_init(STRUCT_MAX);
6127 
6128 #define STRUCT(name, ...) thunk_register_struct(STRUCT_ ## name, #name, struct_ ## name ## _def);
6129 #define STRUCT_SPECIAL(name) thunk_register_struct_direct(STRUCT_ ## name, #name, &struct_ ## name ## _def);
6130 #include "syscall_types.h"
6131 #undef STRUCT
6132 #undef STRUCT_SPECIAL
6133 
6134     /* Build target_to_host_errno_table[] table from
6135      * host_to_target_errno_table[]. */
6136     for (i = 0; i < ERRNO_TABLE_SIZE; i++) {
6137         target_to_host_errno_table[host_to_target_errno_table[i]] = i;
6138     }
6139 
6140     /* we patch the ioctl size if necessary. We rely on the fact that
6141        no ioctl has all the bits at '1' in the size field */
6142     ie = ioctl_entries;
6143     while (ie->target_cmd != 0) {
6144         if (((ie->target_cmd >> TARGET_IOC_SIZESHIFT) & TARGET_IOC_SIZEMASK) ==
6145             TARGET_IOC_SIZEMASK) {
6146             arg_type = ie->arg_type;
6147             if (arg_type[0] != TYPE_PTR) {
6148                 fprintf(stderr, "cannot patch size for ioctl 0x%x\n",
6149                         ie->target_cmd);
6150                 exit(1);
6151             }
6152             arg_type++;
6153             size = thunk_type_size(arg_type, 0);
6154             ie->target_cmd = (ie->target_cmd &
6155                               ~(TARGET_IOC_SIZEMASK << TARGET_IOC_SIZESHIFT)) |
6156                 (size << TARGET_IOC_SIZESHIFT);
6157         }
6158 
6159         /* automatic consistency check if same arch */
6160 #if (defined(__i386__) && defined(TARGET_I386) && defined(TARGET_ABI32)) || \
6161     (defined(__x86_64__) && defined(TARGET_X86_64))
6162         if (unlikely(ie->target_cmd != ie->host_cmd)) {
6163             fprintf(stderr, "ERROR: ioctl(%s): target=0x%x host=0x%x\n",
6164                     ie->name, ie->target_cmd, ie->host_cmd);
6165         }
6166 #endif
6167         ie++;
6168     }
6169 }
6170 
6171 #if TARGET_ABI_BITS == 32
6172 static inline uint64_t target_offset64(uint32_t word0, uint32_t word1)
6173 {
6174 #ifdef TARGET_WORDS_BIGENDIAN
6175     return ((uint64_t)word0 << 32) | word1;
6176 #else
6177     return ((uint64_t)word1 << 32) | word0;
6178 #endif
6179 }
6180 #else /* TARGET_ABI_BITS == 32 */
6181 static inline uint64_t target_offset64(uint64_t word0, uint64_t word1)
6182 {
6183     return word0;
6184 }
6185 #endif /* TARGET_ABI_BITS != 32 */
6186 
6187 #ifdef TARGET_NR_truncate64
6188 static inline abi_long target_truncate64(void *cpu_env, const char *arg1,
6189                                          abi_long arg2,
6190                                          abi_long arg3,
6191                                          abi_long arg4)
6192 {
6193     if (regpairs_aligned(cpu_env, TARGET_NR_truncate64)) {
6194         arg2 = arg3;
6195         arg3 = arg4;
6196     }
6197     return get_errno(truncate64(arg1, target_offset64(arg2, arg3)));
6198 }
6199 #endif
6200 
6201 #ifdef TARGET_NR_ftruncate64
6202 static inline abi_long target_ftruncate64(void *cpu_env, abi_long arg1,
6203                                           abi_long arg2,
6204                                           abi_long arg3,
6205                                           abi_long arg4)
6206 {
6207     if (regpairs_aligned(cpu_env, TARGET_NR_ftruncate64)) {
6208         arg2 = arg3;
6209         arg3 = arg4;
6210     }
6211     return get_errno(ftruncate64(arg1, target_offset64(arg2, arg3)));
6212 }
6213 #endif
6214 
6215 static inline abi_long target_to_host_timespec(struct timespec *host_ts,
6216                                                abi_ulong target_addr)
6217 {
6218     struct target_timespec *target_ts;
6219 
6220     if (!lock_user_struct(VERIFY_READ, target_ts, target_addr, 1))
6221         return -TARGET_EFAULT;
6222     __get_user(host_ts->tv_sec, &target_ts->tv_sec);
6223     __get_user(host_ts->tv_nsec, &target_ts->tv_nsec);
6224     unlock_user_struct(target_ts, target_addr, 0);
6225     return 0;
6226 }
6227 
6228 static inline abi_long host_to_target_timespec(abi_ulong target_addr,
6229                                                struct timespec *host_ts)
6230 {
6231     struct target_timespec *target_ts;
6232 
6233     if (!lock_user_struct(VERIFY_WRITE, target_ts, target_addr, 0))
6234         return -TARGET_EFAULT;
6235     __put_user(host_ts->tv_sec, &target_ts->tv_sec);
6236     __put_user(host_ts->tv_nsec, &target_ts->tv_nsec);
6237     unlock_user_struct(target_ts, target_addr, 1);
6238     return 0;
6239 }
6240 
6241 static inline abi_long target_to_host_itimerspec(struct itimerspec *host_itspec,
6242                                                  abi_ulong target_addr)
6243 {
6244     struct target_itimerspec *target_itspec;
6245 
6246     if (!lock_user_struct(VERIFY_READ, target_itspec, target_addr, 1)) {
6247         return -TARGET_EFAULT;
6248     }
6249 
6250     host_itspec->it_interval.tv_sec =
6251                             tswapal(target_itspec->it_interval.tv_sec);
6252     host_itspec->it_interval.tv_nsec =
6253                             tswapal(target_itspec->it_interval.tv_nsec);
6254     host_itspec->it_value.tv_sec = tswapal(target_itspec->it_value.tv_sec);
6255     host_itspec->it_value.tv_nsec = tswapal(target_itspec->it_value.tv_nsec);
6256 
6257     unlock_user_struct(target_itspec, target_addr, 1);
6258     return 0;
6259 }
6260 
6261 static inline abi_long host_to_target_itimerspec(abi_ulong target_addr,
6262                                                struct itimerspec *host_its)
6263 {
6264     struct target_itimerspec *target_itspec;
6265 
6266     if (!lock_user_struct(VERIFY_WRITE, target_itspec, target_addr, 0)) {
6267         return -TARGET_EFAULT;
6268     }
6269 
6270     target_itspec->it_interval.tv_sec = tswapal(host_its->it_interval.tv_sec);
6271     target_itspec->it_interval.tv_nsec = tswapal(host_its->it_interval.tv_nsec);
6272 
6273     target_itspec->it_value.tv_sec = tswapal(host_its->it_value.tv_sec);
6274     target_itspec->it_value.tv_nsec = tswapal(host_its->it_value.tv_nsec);
6275 
6276     unlock_user_struct(target_itspec, target_addr, 0);
6277     return 0;
6278 }
6279 
6280 static inline abi_long target_to_host_timex(struct timex *host_tx,
6281                                             abi_long target_addr)
6282 {
6283     struct target_timex *target_tx;
6284 
6285     if (!lock_user_struct(VERIFY_READ, target_tx, target_addr, 1)) {
6286         return -TARGET_EFAULT;
6287     }
6288 
6289     __get_user(host_tx->modes, &target_tx->modes);
6290     __get_user(host_tx->offset, &target_tx->offset);
6291     __get_user(host_tx->freq, &target_tx->freq);
6292     __get_user(host_tx->maxerror, &target_tx->maxerror);
6293     __get_user(host_tx->esterror, &target_tx->esterror);
6294     __get_user(host_tx->status, &target_tx->status);
6295     __get_user(host_tx->constant, &target_tx->constant);
6296     __get_user(host_tx->precision, &target_tx->precision);
6297     __get_user(host_tx->tolerance, &target_tx->tolerance);
6298     __get_user(host_tx->time.tv_sec, &target_tx->time.tv_sec);
6299     __get_user(host_tx->time.tv_usec, &target_tx->time.tv_usec);
6300     __get_user(host_tx->tick, &target_tx->tick);
6301     __get_user(host_tx->ppsfreq, &target_tx->ppsfreq);
6302     __get_user(host_tx->jitter, &target_tx->jitter);
6303     __get_user(host_tx->shift, &target_tx->shift);
6304     __get_user(host_tx->stabil, &target_tx->stabil);
6305     __get_user(host_tx->jitcnt, &target_tx->jitcnt);
6306     __get_user(host_tx->calcnt, &target_tx->calcnt);
6307     __get_user(host_tx->errcnt, &target_tx->errcnt);
6308     __get_user(host_tx->stbcnt, &target_tx->stbcnt);
6309     __get_user(host_tx->tai, &target_tx->tai);
6310 
6311     unlock_user_struct(target_tx, target_addr, 0);
6312     return 0;
6313 }
6314 
6315 static inline abi_long host_to_target_timex(abi_long target_addr,
6316                                             struct timex *host_tx)
6317 {
6318     struct target_timex *target_tx;
6319 
6320     if (!lock_user_struct(VERIFY_WRITE, target_tx, target_addr, 0)) {
6321         return -TARGET_EFAULT;
6322     }
6323 
6324     __put_user(host_tx->modes, &target_tx->modes);
6325     __put_user(host_tx->offset, &target_tx->offset);
6326     __put_user(host_tx->freq, &target_tx->freq);
6327     __put_user(host_tx->maxerror, &target_tx->maxerror);
6328     __put_user(host_tx->esterror, &target_tx->esterror);
6329     __put_user(host_tx->status, &target_tx->status);
6330     __put_user(host_tx->constant, &target_tx->constant);
6331     __put_user(host_tx->precision, &target_tx->precision);
6332     __put_user(host_tx->tolerance, &target_tx->tolerance);
6333     __put_user(host_tx->time.tv_sec, &target_tx->time.tv_sec);
6334     __put_user(host_tx->time.tv_usec, &target_tx->time.tv_usec);
6335     __put_user(host_tx->tick, &target_tx->tick);
6336     __put_user(host_tx->ppsfreq, &target_tx->ppsfreq);
6337     __put_user(host_tx->jitter, &target_tx->jitter);
6338     __put_user(host_tx->shift, &target_tx->shift);
6339     __put_user(host_tx->stabil, &target_tx->stabil);
6340     __put_user(host_tx->jitcnt, &target_tx->jitcnt);
6341     __put_user(host_tx->calcnt, &target_tx->calcnt);
6342     __put_user(host_tx->errcnt, &target_tx->errcnt);
6343     __put_user(host_tx->stbcnt, &target_tx->stbcnt);
6344     __put_user(host_tx->tai, &target_tx->tai);
6345 
6346     unlock_user_struct(target_tx, target_addr, 1);
6347     return 0;
6348 }
6349 
6350 
6351 static inline abi_long target_to_host_sigevent(struct sigevent *host_sevp,
6352                                                abi_ulong target_addr)
6353 {
6354     struct target_sigevent *target_sevp;
6355 
6356     if (!lock_user_struct(VERIFY_READ, target_sevp, target_addr, 1)) {
6357         return -TARGET_EFAULT;
6358     }
6359 
6360     /* This union is awkward on 64 bit systems because it has a 32 bit
6361      * integer and a pointer in it; we follow the conversion approach
6362      * used for handling sigval types in signal.c so the guest should get
6363      * the correct value back even if we did a 64 bit byteswap and it's
6364      * using the 32 bit integer.
6365      */
6366     host_sevp->sigev_value.sival_ptr =
6367         (void *)(uintptr_t)tswapal(target_sevp->sigev_value.sival_ptr);
6368     host_sevp->sigev_signo =
6369         target_to_host_signal(tswap32(target_sevp->sigev_signo));
6370     host_sevp->sigev_notify = tswap32(target_sevp->sigev_notify);
6371     host_sevp->_sigev_un._tid = tswap32(target_sevp->_sigev_un._tid);
6372 
6373     unlock_user_struct(target_sevp, target_addr, 1);
6374     return 0;
6375 }
6376 
6377 #if defined(TARGET_NR_mlockall)
6378 static inline int target_to_host_mlockall_arg(int arg)
6379 {
6380     int result = 0;
6381 
6382     if (arg & TARGET_MLOCKALL_MCL_CURRENT) {
6383         result |= MCL_CURRENT;
6384     }
6385     if (arg & TARGET_MLOCKALL_MCL_FUTURE) {
6386         result |= MCL_FUTURE;
6387     }
6388     return result;
6389 }
6390 #endif
6391 
6392 #if (defined(TARGET_NR_stat64) || defined(TARGET_NR_lstat64) ||     \
6393      defined(TARGET_NR_fstat64) || defined(TARGET_NR_fstatat64) ||  \
6394      defined(TARGET_NR_newfstatat))
6395 static inline abi_long host_to_target_stat64(void *cpu_env,
6396                                              abi_ulong target_addr,
6397                                              struct stat *host_st)
6398 {
6399 #if defined(TARGET_ARM) && defined(TARGET_ABI32)
6400     if (((CPUARMState *)cpu_env)->eabi) {
6401         struct target_eabi_stat64 *target_st;
6402 
6403         if (!lock_user_struct(VERIFY_WRITE, target_st, target_addr, 0))
6404             return -TARGET_EFAULT;
6405         memset(target_st, 0, sizeof(struct target_eabi_stat64));
6406         __put_user(host_st->st_dev, &target_st->st_dev);
6407         __put_user(host_st->st_ino, &target_st->st_ino);
6408 #ifdef TARGET_STAT64_HAS_BROKEN_ST_INO
6409         __put_user(host_st->st_ino, &target_st->__st_ino);
6410 #endif
6411         __put_user(host_st->st_mode, &target_st->st_mode);
6412         __put_user(host_st->st_nlink, &target_st->st_nlink);
6413         __put_user(host_st->st_uid, &target_st->st_uid);
6414         __put_user(host_st->st_gid, &target_st->st_gid);
6415         __put_user(host_st->st_rdev, &target_st->st_rdev);
6416         __put_user(host_st->st_size, &target_st->st_size);
6417         __put_user(host_st->st_blksize, &target_st->st_blksize);
6418         __put_user(host_st->st_blocks, &target_st->st_blocks);
6419         __put_user(host_st->st_atime, &target_st->target_st_atime);
6420         __put_user(host_st->st_mtime, &target_st->target_st_mtime);
6421         __put_user(host_st->st_ctime, &target_st->target_st_ctime);
6422 #if _POSIX_C_SOURCE >= 200809L || _XOPEN_SOURCE >= 700
6423         __put_user(host_st->st_atim.tv_nsec, &target_st->target_st_atime_nsec);
6424         __put_user(host_st->st_mtim.tv_nsec, &target_st->target_st_mtime_nsec);
6425         __put_user(host_st->st_ctim.tv_nsec, &target_st->target_st_ctime_nsec);
6426 #endif
6427         unlock_user_struct(target_st, target_addr, 1);
6428     } else
6429 #endif
6430     {
6431 #if defined(TARGET_HAS_STRUCT_STAT64)
6432         struct target_stat64 *target_st;
6433 #else
6434         struct target_stat *target_st;
6435 #endif
6436 
6437         if (!lock_user_struct(VERIFY_WRITE, target_st, target_addr, 0))
6438             return -TARGET_EFAULT;
6439         memset(target_st, 0, sizeof(*target_st));
6440         __put_user(host_st->st_dev, &target_st->st_dev);
6441         __put_user(host_st->st_ino, &target_st->st_ino);
6442 #ifdef TARGET_STAT64_HAS_BROKEN_ST_INO
6443         __put_user(host_st->st_ino, &target_st->__st_ino);
6444 #endif
6445         __put_user(host_st->st_mode, &target_st->st_mode);
6446         __put_user(host_st->st_nlink, &target_st->st_nlink);
6447         __put_user(host_st->st_uid, &target_st->st_uid);
6448         __put_user(host_st->st_gid, &target_st->st_gid);
6449         __put_user(host_st->st_rdev, &target_st->st_rdev);
6450         /* XXX: better use of kernel struct */
6451         __put_user(host_st->st_size, &target_st->st_size);
6452         __put_user(host_st->st_blksize, &target_st->st_blksize);
6453         __put_user(host_st->st_blocks, &target_st->st_blocks);
6454         __put_user(host_st->st_atime, &target_st->target_st_atime);
6455         __put_user(host_st->st_mtime, &target_st->target_st_mtime);
6456         __put_user(host_st->st_ctime, &target_st->target_st_ctime);
6457 #if _POSIX_C_SOURCE >= 200809L || _XOPEN_SOURCE >= 700
6458         __put_user(host_st->st_atim.tv_nsec, &target_st->target_st_atime_nsec);
6459         __put_user(host_st->st_mtim.tv_nsec, &target_st->target_st_mtime_nsec);
6460         __put_user(host_st->st_ctim.tv_nsec, &target_st->target_st_ctime_nsec);
6461 #endif
6462         unlock_user_struct(target_st, target_addr, 1);
6463     }
6464 
6465     return 0;
6466 }
6467 #endif
6468 
6469 /* ??? Using host futex calls even when target atomic operations
6470    are not really atomic probably breaks things.  However implementing
6471    futexes locally would make futexes shared between multiple processes
6472    tricky.  However they're probably useless because guest atomic
6473    operations won't work either.  */
6474 static int do_futex(target_ulong uaddr, int op, int val, target_ulong timeout,
6475                     target_ulong uaddr2, int val3)
6476 {
6477     struct timespec ts, *pts;
6478     int base_op;
6479 
6480     /* ??? We assume FUTEX_* constants are the same on both host
6481        and target.  */
6482 #ifdef FUTEX_CMD_MASK
6483     base_op = op & FUTEX_CMD_MASK;
6484 #else
6485     base_op = op;
6486 #endif
6487     switch (base_op) {
6488     case FUTEX_WAIT:
6489     case FUTEX_WAIT_BITSET:
6490         if (timeout) {
6491             pts = &ts;
6492             target_to_host_timespec(pts, timeout);
6493         } else {
6494             pts = NULL;
6495         }
6496         return get_errno(safe_futex(g2h(uaddr), op, tswap32(val),
6497                          pts, NULL, val3));
6498     case FUTEX_WAKE:
6499         return get_errno(safe_futex(g2h(uaddr), op, val, NULL, NULL, 0));
6500     case FUTEX_FD:
6501         return get_errno(safe_futex(g2h(uaddr), op, val, NULL, NULL, 0));
6502     case FUTEX_REQUEUE:
6503     case FUTEX_CMP_REQUEUE:
6504     case FUTEX_WAKE_OP:
6505         /* For FUTEX_REQUEUE, FUTEX_CMP_REQUEUE, and FUTEX_WAKE_OP, the
6506            TIMEOUT parameter is interpreted as a uint32_t by the kernel.
6507            But the prototype takes a `struct timespec *'; insert casts
6508            to satisfy the compiler.  We do not need to tswap TIMEOUT
6509            since it's not compared to guest memory.  */
6510         pts = (struct timespec *)(uintptr_t) timeout;
6511         return get_errno(safe_futex(g2h(uaddr), op, val, pts,
6512                                     g2h(uaddr2),
6513                                     (base_op == FUTEX_CMP_REQUEUE
6514                                      ? tswap32(val3)
6515                                      : val3)));
6516     default:
6517         return -TARGET_ENOSYS;
6518     }
6519 }
6520 #if defined(TARGET_NR_name_to_handle_at) && defined(CONFIG_OPEN_BY_HANDLE)
6521 static abi_long do_name_to_handle_at(abi_long dirfd, abi_long pathname,
6522                                      abi_long handle, abi_long mount_id,
6523                                      abi_long flags)
6524 {
6525     struct file_handle *target_fh;
6526     struct file_handle *fh;
6527     int mid = 0;
6528     abi_long ret;
6529     char *name;
6530     unsigned int size, total_size;
6531 
6532     if (get_user_s32(size, handle)) {
6533         return -TARGET_EFAULT;
6534     }
6535 
6536     name = lock_user_string(pathname);
6537     if (!name) {
6538         return -TARGET_EFAULT;
6539     }
6540 
6541     total_size = sizeof(struct file_handle) + size;
6542     target_fh = lock_user(VERIFY_WRITE, handle, total_size, 0);
6543     if (!target_fh) {
6544         unlock_user(name, pathname, 0);
6545         return -TARGET_EFAULT;
6546     }
6547 
6548     fh = g_malloc0(total_size);
6549     fh->handle_bytes = size;
6550 
6551     ret = get_errno(name_to_handle_at(dirfd, path(name), fh, &mid, flags));
6552     unlock_user(name, pathname, 0);
6553 
6554     /* man name_to_handle_at(2):
6555      * Other than the use of the handle_bytes field, the caller should treat
6556      * the file_handle structure as an opaque data type
6557      */
6558 
6559     memcpy(target_fh, fh, total_size);
6560     target_fh->handle_bytes = tswap32(fh->handle_bytes);
6561     target_fh->handle_type = tswap32(fh->handle_type);
6562     g_free(fh);
6563     unlock_user(target_fh, handle, total_size);
6564 
6565     if (put_user_s32(mid, mount_id)) {
6566         return -TARGET_EFAULT;
6567     }
6568 
6569     return ret;
6570 
6571 }
6572 #endif
6573 
6574 #if defined(TARGET_NR_open_by_handle_at) && defined(CONFIG_OPEN_BY_HANDLE)
6575 static abi_long do_open_by_handle_at(abi_long mount_fd, abi_long handle,
6576                                      abi_long flags)
6577 {
6578     struct file_handle *target_fh;
6579     struct file_handle *fh;
6580     unsigned int size, total_size;
6581     abi_long ret;
6582 
6583     if (get_user_s32(size, handle)) {
6584         return -TARGET_EFAULT;
6585     }
6586 
6587     total_size = sizeof(struct file_handle) + size;
6588     target_fh = lock_user(VERIFY_READ, handle, total_size, 1);
6589     if (!target_fh) {
6590         return -TARGET_EFAULT;
6591     }
6592 
6593     fh = g_memdup(target_fh, total_size);
6594     fh->handle_bytes = size;
6595     fh->handle_type = tswap32(target_fh->handle_type);
6596 
6597     ret = get_errno(open_by_handle_at(mount_fd, fh,
6598                     target_to_host_bitmask(flags, fcntl_flags_tbl)));
6599 
6600     g_free(fh);
6601 
6602     unlock_user(target_fh, handle, total_size);
6603 
6604     return ret;
6605 }
6606 #endif
6607 
6608 #if defined(TARGET_NR_signalfd) || defined(TARGET_NR_signalfd4)
6609 
6610 static abi_long do_signalfd4(int fd, abi_long mask, int flags)
6611 {
6612     int host_flags;
6613     target_sigset_t *target_mask;
6614     sigset_t host_mask;
6615     abi_long ret;
6616 
6617     if (flags & ~(TARGET_O_NONBLOCK | TARGET_O_CLOEXEC)) {
6618         return -TARGET_EINVAL;
6619     }
6620     if (!lock_user_struct(VERIFY_READ, target_mask, mask, 1)) {
6621         return -TARGET_EFAULT;
6622     }
6623 
6624     target_to_host_sigset(&host_mask, target_mask);
6625 
6626     host_flags = target_to_host_bitmask(flags, fcntl_flags_tbl);
6627 
6628     ret = get_errno(signalfd(fd, &host_mask, host_flags));
6629     if (ret >= 0) {
6630         fd_trans_register(ret, &target_signalfd_trans);
6631     }
6632 
6633     unlock_user_struct(target_mask, mask, 0);
6634 
6635     return ret;
6636 }
6637 #endif
6638 
6639 /* Map host to target signal numbers for the wait family of syscalls.
6640    Assume all other status bits are the same.  */
6641 int host_to_target_waitstatus(int status)
6642 {
6643     if (WIFSIGNALED(status)) {
6644         return host_to_target_signal(WTERMSIG(status)) | (status & ~0x7f);
6645     }
6646     if (WIFSTOPPED(status)) {
6647         return (host_to_target_signal(WSTOPSIG(status)) << 8)
6648                | (status & 0xff);
6649     }
6650     return status;
6651 }
6652 
6653 static int open_self_cmdline(void *cpu_env, int fd)
6654 {
6655     CPUState *cpu = env_cpu((CPUArchState *)cpu_env);
6656     struct linux_binprm *bprm = ((TaskState *)cpu->opaque)->bprm;
6657     int i;
6658 
6659     for (i = 0; i < bprm->argc; i++) {
6660         size_t len = strlen(bprm->argv[i]) + 1;
6661 
6662         if (write(fd, bprm->argv[i], len) != len) {
6663             return -1;
6664         }
6665     }
6666 
6667     return 0;
6668 }
6669 
6670 static int open_self_maps(void *cpu_env, int fd)
6671 {
6672     CPUState *cpu = env_cpu((CPUArchState *)cpu_env);
6673     TaskState *ts = cpu->opaque;
6674     FILE *fp;
6675     char *line = NULL;
6676     size_t len = 0;
6677     ssize_t read;
6678 
6679     fp = fopen("/proc/self/maps", "r");
6680     if (fp == NULL) {
6681         return -1;
6682     }
6683 
6684     while ((read = getline(&line, &len, fp)) != -1) {
6685         int fields, dev_maj, dev_min, inode;
6686         uint64_t min, max, offset;
6687         char flag_r, flag_w, flag_x, flag_p;
6688         char path[512] = "";
6689         fields = sscanf(line, "%"PRIx64"-%"PRIx64" %c%c%c%c %"PRIx64" %x:%x %d"
6690                         " %512s", &min, &max, &flag_r, &flag_w, &flag_x,
6691                         &flag_p, &offset, &dev_maj, &dev_min, &inode, path);
6692 
6693         if ((fields < 10) || (fields > 11)) {
6694             continue;
6695         }
6696         if (h2g_valid(min)) {
6697             int flags = page_get_flags(h2g(min));
6698             max = h2g_valid(max - 1) ? max : (uintptr_t)g2h(GUEST_ADDR_MAX) + 1;
6699             if (page_check_range(h2g(min), max - min, flags) == -1) {
6700                 continue;
6701             }
6702             if (h2g(min) == ts->info->stack_limit) {
6703                 pstrcpy(path, sizeof(path), "      [stack]");
6704             }
6705             dprintf(fd, TARGET_ABI_FMT_ptr "-" TARGET_ABI_FMT_ptr
6706                     " %c%c%c%c %08" PRIx64 " %02x:%02x %d %s%s\n",
6707                     h2g(min), h2g(max - 1) + 1, flag_r, flag_w,
6708                     flag_x, flag_p, offset, dev_maj, dev_min, inode,
6709                     path[0] ? "         " : "", path);
6710         }
6711     }
6712 
6713     free(line);
6714     fclose(fp);
6715 
6716     return 0;
6717 }
6718 
6719 static int open_self_stat(void *cpu_env, int fd)
6720 {
6721     CPUState *cpu = env_cpu((CPUArchState *)cpu_env);
6722     TaskState *ts = cpu->opaque;
6723     abi_ulong start_stack = ts->info->start_stack;
6724     int i;
6725 
6726     for (i = 0; i < 44; i++) {
6727       char buf[128];
6728       int len;
6729       uint64_t val = 0;
6730 
6731       if (i == 0) {
6732         /* pid */
6733         val = getpid();
6734         snprintf(buf, sizeof(buf), "%"PRId64 " ", val);
6735       } else if (i == 1) {
6736         /* app name */
6737         snprintf(buf, sizeof(buf), "(%s) ", ts->bprm->argv[0]);
6738       } else if (i == 27) {
6739         /* stack bottom */
6740         val = start_stack;
6741         snprintf(buf, sizeof(buf), "%"PRId64 " ", val);
6742       } else {
6743         /* for the rest, there is MasterCard */
6744         snprintf(buf, sizeof(buf), "0%c", i == 43 ? '\n' : ' ');
6745       }
6746 
6747       len = strlen(buf);
6748       if (write(fd, buf, len) != len) {
6749           return -1;
6750       }
6751     }
6752 
6753     return 0;
6754 }
6755 
6756 static int open_self_auxv(void *cpu_env, int fd)
6757 {
6758     CPUState *cpu = env_cpu((CPUArchState *)cpu_env);
6759     TaskState *ts = cpu->opaque;
6760     abi_ulong auxv = ts->info->saved_auxv;
6761     abi_ulong len = ts->info->auxv_len;
6762     char *ptr;
6763 
6764     /*
6765      * Auxiliary vector is stored in target process stack.
6766      * read in whole auxv vector and copy it to file
6767      */
6768     ptr = lock_user(VERIFY_READ, auxv, len, 0);
6769     if (ptr != NULL) {
6770         while (len > 0) {
6771             ssize_t r;
6772             r = write(fd, ptr, len);
6773             if (r <= 0) {
6774                 break;
6775             }
6776             len -= r;
6777             ptr += r;
6778         }
6779         lseek(fd, 0, SEEK_SET);
6780         unlock_user(ptr, auxv, len);
6781     }
6782 
6783     return 0;
6784 }
6785 
6786 static int is_proc_myself(const char *filename, const char *entry)
6787 {
6788     if (!strncmp(filename, "/proc/", strlen("/proc/"))) {
6789         filename += strlen("/proc/");
6790         if (!strncmp(filename, "self/", strlen("self/"))) {
6791             filename += strlen("self/");
6792         } else if (*filename >= '1' && *filename <= '9') {
6793             char myself[80];
6794             snprintf(myself, sizeof(myself), "%d/", getpid());
6795             if (!strncmp(filename, myself, strlen(myself))) {
6796                 filename += strlen(myself);
6797             } else {
6798                 return 0;
6799             }
6800         } else {
6801             return 0;
6802         }
6803         if (!strcmp(filename, entry)) {
6804             return 1;
6805         }
6806     }
6807     return 0;
6808 }
6809 
6810 #if defined(HOST_WORDS_BIGENDIAN) != defined(TARGET_WORDS_BIGENDIAN) || \
6811     defined(TARGET_SPARC) || defined(TARGET_M68K)
6812 static int is_proc(const char *filename, const char *entry)
6813 {
6814     return strcmp(filename, entry) == 0;
6815 }
6816 #endif
6817 
6818 #if defined(HOST_WORDS_BIGENDIAN) != defined(TARGET_WORDS_BIGENDIAN)
6819 static int open_net_route(void *cpu_env, int fd)
6820 {
6821     FILE *fp;
6822     char *line = NULL;
6823     size_t len = 0;
6824     ssize_t read;
6825 
6826     fp = fopen("/proc/net/route", "r");
6827     if (fp == NULL) {
6828         return -1;
6829     }
6830 
6831     /* read header */
6832 
6833     read = getline(&line, &len, fp);
6834     dprintf(fd, "%s", line);
6835 
6836     /* read routes */
6837 
6838     while ((read = getline(&line, &len, fp)) != -1) {
6839         char iface[16];
6840         uint32_t dest, gw, mask;
6841         unsigned int flags, refcnt, use, metric, mtu, window, irtt;
6842         int fields;
6843 
6844         fields = sscanf(line,
6845                         "%s\t%08x\t%08x\t%04x\t%d\t%d\t%d\t%08x\t%d\t%u\t%u\n",
6846                         iface, &dest, &gw, &flags, &refcnt, &use, &metric,
6847                         &mask, &mtu, &window, &irtt);
6848         if (fields != 11) {
6849             continue;
6850         }
6851         dprintf(fd, "%s\t%08x\t%08x\t%04x\t%d\t%d\t%d\t%08x\t%d\t%u\t%u\n",
6852                 iface, tswap32(dest), tswap32(gw), flags, refcnt, use,
6853                 metric, tswap32(mask), mtu, window, irtt);
6854     }
6855 
6856     free(line);
6857     fclose(fp);
6858 
6859     return 0;
6860 }
6861 #endif
6862 
6863 #if defined(TARGET_SPARC)
6864 static int open_cpuinfo(void *cpu_env, int fd)
6865 {
6866     dprintf(fd, "type\t\t: sun4u\n");
6867     return 0;
6868 }
6869 #endif
6870 
6871 #if defined(TARGET_M68K)
6872 static int open_hardware(void *cpu_env, int fd)
6873 {
6874     dprintf(fd, "Model:\t\tqemu-m68k\n");
6875     return 0;
6876 }
6877 #endif
6878 
6879 static int do_openat(void *cpu_env, int dirfd, const char *pathname, int flags, mode_t mode)
6880 {
6881     struct fake_open {
6882         const char *filename;
6883         int (*fill)(void *cpu_env, int fd);
6884         int (*cmp)(const char *s1, const char *s2);
6885     };
6886     const struct fake_open *fake_open;
6887     static const struct fake_open fakes[] = {
6888         { "maps", open_self_maps, is_proc_myself },
6889         { "stat", open_self_stat, is_proc_myself },
6890         { "auxv", open_self_auxv, is_proc_myself },
6891         { "cmdline", open_self_cmdline, is_proc_myself },
6892 #if defined(HOST_WORDS_BIGENDIAN) != defined(TARGET_WORDS_BIGENDIAN)
6893         { "/proc/net/route", open_net_route, is_proc },
6894 #endif
6895 #if defined(TARGET_SPARC)
6896         { "/proc/cpuinfo", open_cpuinfo, is_proc },
6897 #endif
6898 #if defined(TARGET_M68K)
6899         { "/proc/hardware", open_hardware, is_proc },
6900 #endif
6901         { NULL, NULL, NULL }
6902     };
6903 
6904     if (is_proc_myself(pathname, "exe")) {
6905         int execfd = qemu_getauxval(AT_EXECFD);
6906         return execfd ? execfd : safe_openat(dirfd, exec_path, flags, mode);
6907     }
6908 
6909     for (fake_open = fakes; fake_open->filename; fake_open++) {
6910         if (fake_open->cmp(pathname, fake_open->filename)) {
6911             break;
6912         }
6913     }
6914 
6915     if (fake_open->filename) {
6916         const char *tmpdir;
6917         char filename[PATH_MAX];
6918         int fd, r;
6919 
6920         /* create temporary file to map stat to */
6921         tmpdir = getenv("TMPDIR");
6922         if (!tmpdir)
6923             tmpdir = "/tmp";
6924         snprintf(filename, sizeof(filename), "%s/qemu-open.XXXXXX", tmpdir);
6925         fd = mkstemp(filename);
6926         if (fd < 0) {
6927             return fd;
6928         }
6929         unlink(filename);
6930 
6931         if ((r = fake_open->fill(cpu_env, fd))) {
6932             int e = errno;
6933             close(fd);
6934             errno = e;
6935             return r;
6936         }
6937         lseek(fd, 0, SEEK_SET);
6938 
6939         return fd;
6940     }
6941 
6942     return safe_openat(dirfd, path(pathname), flags, mode);
6943 }
6944 
6945 #define TIMER_MAGIC 0x0caf0000
6946 #define TIMER_MAGIC_MASK 0xffff0000
6947 
6948 /* Convert QEMU provided timer ID back to internal 16bit index format */
6949 static target_timer_t get_timer_id(abi_long arg)
6950 {
6951     target_timer_t timerid = arg;
6952 
6953     if ((timerid & TIMER_MAGIC_MASK) != TIMER_MAGIC) {
6954         return -TARGET_EINVAL;
6955     }
6956 
6957     timerid &= 0xffff;
6958 
6959     if (timerid >= ARRAY_SIZE(g_posix_timers)) {
6960         return -TARGET_EINVAL;
6961     }
6962 
6963     return timerid;
6964 }
6965 
6966 static int target_to_host_cpu_mask(unsigned long *host_mask,
6967                                    size_t host_size,
6968                                    abi_ulong target_addr,
6969                                    size_t target_size)
6970 {
6971     unsigned target_bits = sizeof(abi_ulong) * 8;
6972     unsigned host_bits = sizeof(*host_mask) * 8;
6973     abi_ulong *target_mask;
6974     unsigned i, j;
6975 
6976     assert(host_size >= target_size);
6977 
6978     target_mask = lock_user(VERIFY_READ, target_addr, target_size, 1);
6979     if (!target_mask) {
6980         return -TARGET_EFAULT;
6981     }
6982     memset(host_mask, 0, host_size);
6983 
6984     for (i = 0 ; i < target_size / sizeof(abi_ulong); i++) {
6985         unsigned bit = i * target_bits;
6986         abi_ulong val;
6987 
6988         __get_user(val, &target_mask[i]);
6989         for (j = 0; j < target_bits; j++, bit++) {
6990             if (val & (1UL << j)) {
6991                 host_mask[bit / host_bits] |= 1UL << (bit % host_bits);
6992             }
6993         }
6994     }
6995 
6996     unlock_user(target_mask, target_addr, 0);
6997     return 0;
6998 }
6999 
7000 static int host_to_target_cpu_mask(const unsigned long *host_mask,
7001                                    size_t host_size,
7002                                    abi_ulong target_addr,
7003                                    size_t target_size)
7004 {
7005     unsigned target_bits = sizeof(abi_ulong) * 8;
7006     unsigned host_bits = sizeof(*host_mask) * 8;
7007     abi_ulong *target_mask;
7008     unsigned i, j;
7009 
7010     assert(host_size >= target_size);
7011 
7012     target_mask = lock_user(VERIFY_WRITE, target_addr, target_size, 0);
7013     if (!target_mask) {
7014         return -TARGET_EFAULT;
7015     }
7016 
7017     for (i = 0 ; i < target_size / sizeof(abi_ulong); i++) {
7018         unsigned bit = i * target_bits;
7019         abi_ulong val = 0;
7020 
7021         for (j = 0; j < target_bits; j++, bit++) {
7022             if (host_mask[bit / host_bits] & (1UL << (bit % host_bits))) {
7023                 val |= 1UL << j;
7024             }
7025         }
7026         __put_user(val, &target_mask[i]);
7027     }
7028 
7029     unlock_user(target_mask, target_addr, target_size);
7030     return 0;
7031 }
7032 
7033 /* This is an internal helper for do_syscall so that it is easier
7034  * to have a single return point, so that actions, such as logging
7035  * of syscall results, can be performed.
7036  * All errnos that do_syscall() returns must be -TARGET_<errcode>.
7037  */
7038 static abi_long do_syscall1(void *cpu_env, int num, abi_long arg1,
7039                             abi_long arg2, abi_long arg3, abi_long arg4,
7040                             abi_long arg5, abi_long arg6, abi_long arg7,
7041                             abi_long arg8)
7042 {
7043     CPUState *cpu = env_cpu(cpu_env);
7044     abi_long ret;
7045 #if defined(TARGET_NR_stat) || defined(TARGET_NR_stat64) \
7046     || defined(TARGET_NR_lstat) || defined(TARGET_NR_lstat64) \
7047     || defined(TARGET_NR_fstat) || defined(TARGET_NR_fstat64)
7048     struct stat st;
7049 #endif
7050 #if defined(TARGET_NR_statfs) || defined(TARGET_NR_statfs64) \
7051     || defined(TARGET_NR_fstatfs)
7052     struct statfs stfs;
7053 #endif
7054     void *p;
7055 
7056     switch(num) {
7057     case TARGET_NR_exit:
7058         /* In old applications this may be used to implement _exit(2).
7059            However in threaded applictions it is used for thread termination,
7060            and _exit_group is used for application termination.
7061            Do thread termination if we have more then one thread.  */
7062 
7063         if (block_signals()) {
7064             return -TARGET_ERESTARTSYS;
7065         }
7066 
7067         cpu_list_lock();
7068 
7069         if (CPU_NEXT(first_cpu)) {
7070             TaskState *ts;
7071 
7072             /* Remove the CPU from the list.  */
7073             QTAILQ_REMOVE_RCU(&cpus, cpu, node);
7074 
7075             cpu_list_unlock();
7076 
7077             ts = cpu->opaque;
7078             if (ts->child_tidptr) {
7079                 put_user_u32(0, ts->child_tidptr);
7080                 sys_futex(g2h(ts->child_tidptr), FUTEX_WAKE, INT_MAX,
7081                           NULL, NULL, 0);
7082             }
7083             thread_cpu = NULL;
7084             object_unref(OBJECT(cpu));
7085             g_free(ts);
7086             rcu_unregister_thread();
7087             pthread_exit(NULL);
7088         }
7089 
7090         cpu_list_unlock();
7091         preexit_cleanup(cpu_env, arg1);
7092         _exit(arg1);
7093         return 0; /* avoid warning */
7094     case TARGET_NR_read:
7095         if (arg2 == 0 && arg3 == 0) {
7096             return get_errno(safe_read(arg1, 0, 0));
7097         } else {
7098             if (!(p = lock_user(VERIFY_WRITE, arg2, arg3, 0)))
7099                 return -TARGET_EFAULT;
7100             ret = get_errno(safe_read(arg1, p, arg3));
7101             if (ret >= 0 &&
7102                 fd_trans_host_to_target_data(arg1)) {
7103                 ret = fd_trans_host_to_target_data(arg1)(p, ret);
7104             }
7105             unlock_user(p, arg2, ret);
7106         }
7107         return ret;
7108     case TARGET_NR_write:
7109         if (arg2 == 0 && arg3 == 0) {
7110             return get_errno(safe_write(arg1, 0, 0));
7111         }
7112         if (!(p = lock_user(VERIFY_READ, arg2, arg3, 1)))
7113             return -TARGET_EFAULT;
7114         if (fd_trans_target_to_host_data(arg1)) {
7115             void *copy = g_malloc(arg3);
7116             memcpy(copy, p, arg3);
7117             ret = fd_trans_target_to_host_data(arg1)(copy, arg3);
7118             if (ret >= 0) {
7119                 ret = get_errno(safe_write(arg1, copy, ret));
7120             }
7121             g_free(copy);
7122         } else {
7123             ret = get_errno(safe_write(arg1, p, arg3));
7124         }
7125         unlock_user(p, arg2, 0);
7126         return ret;
7127 
7128 #ifdef TARGET_NR_open
7129     case TARGET_NR_open:
7130         if (!(p = lock_user_string(arg1)))
7131             return -TARGET_EFAULT;
7132         ret = get_errno(do_openat(cpu_env, AT_FDCWD, p,
7133                                   target_to_host_bitmask(arg2, fcntl_flags_tbl),
7134                                   arg3));
7135         fd_trans_unregister(ret);
7136         unlock_user(p, arg1, 0);
7137         return ret;
7138 #endif
7139     case TARGET_NR_openat:
7140         if (!(p = lock_user_string(arg2)))
7141             return -TARGET_EFAULT;
7142         ret = get_errno(do_openat(cpu_env, arg1, p,
7143                                   target_to_host_bitmask(arg3, fcntl_flags_tbl),
7144                                   arg4));
7145         fd_trans_unregister(ret);
7146         unlock_user(p, arg2, 0);
7147         return ret;
7148 #if defined(TARGET_NR_name_to_handle_at) && defined(CONFIG_OPEN_BY_HANDLE)
7149     case TARGET_NR_name_to_handle_at:
7150         ret = do_name_to_handle_at(arg1, arg2, arg3, arg4, arg5);
7151         return ret;
7152 #endif
7153 #if defined(TARGET_NR_open_by_handle_at) && defined(CONFIG_OPEN_BY_HANDLE)
7154     case TARGET_NR_open_by_handle_at:
7155         ret = do_open_by_handle_at(arg1, arg2, arg3);
7156         fd_trans_unregister(ret);
7157         return ret;
7158 #endif
7159     case TARGET_NR_close:
7160         fd_trans_unregister(arg1);
7161         return get_errno(close(arg1));
7162 
7163     case TARGET_NR_brk:
7164         return do_brk(arg1);
7165 #ifdef TARGET_NR_fork
7166     case TARGET_NR_fork:
7167         return get_errno(do_fork(cpu_env, TARGET_SIGCHLD, 0, 0, 0, 0));
7168 #endif
7169 #ifdef TARGET_NR_waitpid
7170     case TARGET_NR_waitpid:
7171         {
7172             int status;
7173             ret = get_errno(safe_wait4(arg1, &status, arg3, 0));
7174             if (!is_error(ret) && arg2 && ret
7175                 && put_user_s32(host_to_target_waitstatus(status), arg2))
7176                 return -TARGET_EFAULT;
7177         }
7178         return ret;
7179 #endif
7180 #ifdef TARGET_NR_waitid
7181     case TARGET_NR_waitid:
7182         {
7183             siginfo_t info;
7184             info.si_pid = 0;
7185             ret = get_errno(safe_waitid(arg1, arg2, &info, arg4, NULL));
7186             if (!is_error(ret) && arg3 && info.si_pid != 0) {
7187                 if (!(p = lock_user(VERIFY_WRITE, arg3, sizeof(target_siginfo_t), 0)))
7188                     return -TARGET_EFAULT;
7189                 host_to_target_siginfo(p, &info);
7190                 unlock_user(p, arg3, sizeof(target_siginfo_t));
7191             }
7192         }
7193         return ret;
7194 #endif
7195 #ifdef TARGET_NR_creat /* not on alpha */
7196     case TARGET_NR_creat:
7197         if (!(p = lock_user_string(arg1)))
7198             return -TARGET_EFAULT;
7199         ret = get_errno(creat(p, arg2));
7200         fd_trans_unregister(ret);
7201         unlock_user(p, arg1, 0);
7202         return ret;
7203 #endif
7204 #ifdef TARGET_NR_link
7205     case TARGET_NR_link:
7206         {
7207             void * p2;
7208             p = lock_user_string(arg1);
7209             p2 = lock_user_string(arg2);
7210             if (!p || !p2)
7211                 ret = -TARGET_EFAULT;
7212             else
7213                 ret = get_errno(link(p, p2));
7214             unlock_user(p2, arg2, 0);
7215             unlock_user(p, arg1, 0);
7216         }
7217         return ret;
7218 #endif
7219 #if defined(TARGET_NR_linkat)
7220     case TARGET_NR_linkat:
7221         {
7222             void * p2 = NULL;
7223             if (!arg2 || !arg4)
7224                 return -TARGET_EFAULT;
7225             p  = lock_user_string(arg2);
7226             p2 = lock_user_string(arg4);
7227             if (!p || !p2)
7228                 ret = -TARGET_EFAULT;
7229             else
7230                 ret = get_errno(linkat(arg1, p, arg3, p2, arg5));
7231             unlock_user(p, arg2, 0);
7232             unlock_user(p2, arg4, 0);
7233         }
7234         return ret;
7235 #endif
7236 #ifdef TARGET_NR_unlink
7237     case TARGET_NR_unlink:
7238         if (!(p = lock_user_string(arg1)))
7239             return -TARGET_EFAULT;
7240         ret = get_errno(unlink(p));
7241         unlock_user(p, arg1, 0);
7242         return ret;
7243 #endif
7244 #if defined(TARGET_NR_unlinkat)
7245     case TARGET_NR_unlinkat:
7246         if (!(p = lock_user_string(arg2)))
7247             return -TARGET_EFAULT;
7248         ret = get_errno(unlinkat(arg1, p, arg3));
7249         unlock_user(p, arg2, 0);
7250         return ret;
7251 #endif
7252     case TARGET_NR_execve:
7253         {
7254             char **argp, **envp;
7255             int argc, envc;
7256             abi_ulong gp;
7257             abi_ulong guest_argp;
7258             abi_ulong guest_envp;
7259             abi_ulong addr;
7260             char **q;
7261             int total_size = 0;
7262 
7263             argc = 0;
7264             guest_argp = arg2;
7265             for (gp = guest_argp; gp; gp += sizeof(abi_ulong)) {
7266                 if (get_user_ual(addr, gp))
7267                     return -TARGET_EFAULT;
7268                 if (!addr)
7269                     break;
7270                 argc++;
7271             }
7272             envc = 0;
7273             guest_envp = arg3;
7274             for (gp = guest_envp; gp; gp += sizeof(abi_ulong)) {
7275                 if (get_user_ual(addr, gp))
7276                     return -TARGET_EFAULT;
7277                 if (!addr)
7278                     break;
7279                 envc++;
7280             }
7281 
7282             argp = g_new0(char *, argc + 1);
7283             envp = g_new0(char *, envc + 1);
7284 
7285             for (gp = guest_argp, q = argp; gp;
7286                   gp += sizeof(abi_ulong), q++) {
7287                 if (get_user_ual(addr, gp))
7288                     goto execve_efault;
7289                 if (!addr)
7290                     break;
7291                 if (!(*q = lock_user_string(addr)))
7292                     goto execve_efault;
7293                 total_size += strlen(*q) + 1;
7294             }
7295             *q = NULL;
7296 
7297             for (gp = guest_envp, q = envp; gp;
7298                   gp += sizeof(abi_ulong), q++) {
7299                 if (get_user_ual(addr, gp))
7300                     goto execve_efault;
7301                 if (!addr)
7302                     break;
7303                 if (!(*q = lock_user_string(addr)))
7304                     goto execve_efault;
7305                 total_size += strlen(*q) + 1;
7306             }
7307             *q = NULL;
7308 
7309             if (!(p = lock_user_string(arg1)))
7310                 goto execve_efault;
7311             /* Although execve() is not an interruptible syscall it is
7312              * a special case where we must use the safe_syscall wrapper:
7313              * if we allow a signal to happen before we make the host
7314              * syscall then we will 'lose' it, because at the point of
7315              * execve the process leaves QEMU's control. So we use the
7316              * safe syscall wrapper to ensure that we either take the
7317              * signal as a guest signal, or else it does not happen
7318              * before the execve completes and makes it the other
7319              * program's problem.
7320              */
7321             ret = get_errno(safe_execve(p, argp, envp));
7322             unlock_user(p, arg1, 0);
7323 
7324             goto execve_end;
7325 
7326         execve_efault:
7327             ret = -TARGET_EFAULT;
7328 
7329         execve_end:
7330             for (gp = guest_argp, q = argp; *q;
7331                   gp += sizeof(abi_ulong), q++) {
7332                 if (get_user_ual(addr, gp)
7333                     || !addr)
7334                     break;
7335                 unlock_user(*q, addr, 0);
7336             }
7337             for (gp = guest_envp, q = envp; *q;
7338                   gp += sizeof(abi_ulong), q++) {
7339                 if (get_user_ual(addr, gp)
7340                     || !addr)
7341                     break;
7342                 unlock_user(*q, addr, 0);
7343             }
7344 
7345             g_free(argp);
7346             g_free(envp);
7347         }
7348         return ret;
7349     case TARGET_NR_chdir:
7350         if (!(p = lock_user_string(arg1)))
7351             return -TARGET_EFAULT;
7352         ret = get_errno(chdir(p));
7353         unlock_user(p, arg1, 0);
7354         return ret;
7355 #ifdef TARGET_NR_time
7356     case TARGET_NR_time:
7357         {
7358             time_t host_time;
7359             ret = get_errno(time(&host_time));
7360             if (!is_error(ret)
7361                 && arg1
7362                 && put_user_sal(host_time, arg1))
7363                 return -TARGET_EFAULT;
7364         }
7365         return ret;
7366 #endif
7367 #ifdef TARGET_NR_mknod
7368     case TARGET_NR_mknod:
7369         if (!(p = lock_user_string(arg1)))
7370             return -TARGET_EFAULT;
7371         ret = get_errno(mknod(p, arg2, arg3));
7372         unlock_user(p, arg1, 0);
7373         return ret;
7374 #endif
7375 #if defined(TARGET_NR_mknodat)
7376     case TARGET_NR_mknodat:
7377         if (!(p = lock_user_string(arg2)))
7378             return -TARGET_EFAULT;
7379         ret = get_errno(mknodat(arg1, p, arg3, arg4));
7380         unlock_user(p, arg2, 0);
7381         return ret;
7382 #endif
7383 #ifdef TARGET_NR_chmod
7384     case TARGET_NR_chmod:
7385         if (!(p = lock_user_string(arg1)))
7386             return -TARGET_EFAULT;
7387         ret = get_errno(chmod(p, arg2));
7388         unlock_user(p, arg1, 0);
7389         return ret;
7390 #endif
7391 #ifdef TARGET_NR_lseek
7392     case TARGET_NR_lseek:
7393         return get_errno(lseek(arg1, arg2, arg3));
7394 #endif
7395 #if defined(TARGET_NR_getxpid) && defined(TARGET_ALPHA)
7396     /* Alpha specific */
7397     case TARGET_NR_getxpid:
7398         ((CPUAlphaState *)cpu_env)->ir[IR_A4] = getppid();
7399         return get_errno(getpid());
7400 #endif
7401 #ifdef TARGET_NR_getpid
7402     case TARGET_NR_getpid:
7403         return get_errno(getpid());
7404 #endif
7405     case TARGET_NR_mount:
7406         {
7407             /* need to look at the data field */
7408             void *p2, *p3;
7409 
7410             if (arg1) {
7411                 p = lock_user_string(arg1);
7412                 if (!p) {
7413                     return -TARGET_EFAULT;
7414                 }
7415             } else {
7416                 p = NULL;
7417             }
7418 
7419             p2 = lock_user_string(arg2);
7420             if (!p2) {
7421                 if (arg1) {
7422                     unlock_user(p, arg1, 0);
7423                 }
7424                 return -TARGET_EFAULT;
7425             }
7426 
7427             if (arg3) {
7428                 p3 = lock_user_string(arg3);
7429                 if (!p3) {
7430                     if (arg1) {
7431                         unlock_user(p, arg1, 0);
7432                     }
7433                     unlock_user(p2, arg2, 0);
7434                     return -TARGET_EFAULT;
7435                 }
7436             } else {
7437                 p3 = NULL;
7438             }
7439 
7440             /* FIXME - arg5 should be locked, but it isn't clear how to
7441              * do that since it's not guaranteed to be a NULL-terminated
7442              * string.
7443              */
7444             if (!arg5) {
7445                 ret = mount(p, p2, p3, (unsigned long)arg4, NULL);
7446             } else {
7447                 ret = mount(p, p2, p3, (unsigned long)arg4, g2h(arg5));
7448             }
7449             ret = get_errno(ret);
7450 
7451             if (arg1) {
7452                 unlock_user(p, arg1, 0);
7453             }
7454             unlock_user(p2, arg2, 0);
7455             if (arg3) {
7456                 unlock_user(p3, arg3, 0);
7457             }
7458         }
7459         return ret;
7460 #ifdef TARGET_NR_umount
7461     case TARGET_NR_umount:
7462         if (!(p = lock_user_string(arg1)))
7463             return -TARGET_EFAULT;
7464         ret = get_errno(umount(p));
7465         unlock_user(p, arg1, 0);
7466         return ret;
7467 #endif
7468 #ifdef TARGET_NR_stime /* not on alpha */
7469     case TARGET_NR_stime:
7470         {
7471             time_t host_time;
7472             if (get_user_sal(host_time, arg1))
7473                 return -TARGET_EFAULT;
7474             return get_errno(stime(&host_time));
7475         }
7476 #endif
7477 #ifdef TARGET_NR_alarm /* not on alpha */
7478     case TARGET_NR_alarm:
7479         return alarm(arg1);
7480 #endif
7481 #ifdef TARGET_NR_pause /* not on alpha */
7482     case TARGET_NR_pause:
7483         if (!block_signals()) {
7484             sigsuspend(&((TaskState *)cpu->opaque)->signal_mask);
7485         }
7486         return -TARGET_EINTR;
7487 #endif
7488 #ifdef TARGET_NR_utime
7489     case TARGET_NR_utime:
7490         {
7491             struct utimbuf tbuf, *host_tbuf;
7492             struct target_utimbuf *target_tbuf;
7493             if (arg2) {
7494                 if (!lock_user_struct(VERIFY_READ, target_tbuf, arg2, 1))
7495                     return -TARGET_EFAULT;
7496                 tbuf.actime = tswapal(target_tbuf->actime);
7497                 tbuf.modtime = tswapal(target_tbuf->modtime);
7498                 unlock_user_struct(target_tbuf, arg2, 0);
7499                 host_tbuf = &tbuf;
7500             } else {
7501                 host_tbuf = NULL;
7502             }
7503             if (!(p = lock_user_string(arg1)))
7504                 return -TARGET_EFAULT;
7505             ret = get_errno(utime(p, host_tbuf));
7506             unlock_user(p, arg1, 0);
7507         }
7508         return ret;
7509 #endif
7510 #ifdef TARGET_NR_utimes
7511     case TARGET_NR_utimes:
7512         {
7513             struct timeval *tvp, tv[2];
7514             if (arg2) {
7515                 if (copy_from_user_timeval(&tv[0], arg2)
7516                     || copy_from_user_timeval(&tv[1],
7517                                               arg2 + sizeof(struct target_timeval)))
7518                     return -TARGET_EFAULT;
7519                 tvp = tv;
7520             } else {
7521                 tvp = NULL;
7522             }
7523             if (!(p = lock_user_string(arg1)))
7524                 return -TARGET_EFAULT;
7525             ret = get_errno(utimes(p, tvp));
7526             unlock_user(p, arg1, 0);
7527         }
7528         return ret;
7529 #endif
7530 #if defined(TARGET_NR_futimesat)
7531     case TARGET_NR_futimesat:
7532         {
7533             struct timeval *tvp, tv[2];
7534             if (arg3) {
7535                 if (copy_from_user_timeval(&tv[0], arg3)
7536                     || copy_from_user_timeval(&tv[1],
7537                                               arg3 + sizeof(struct target_timeval)))
7538                     return -TARGET_EFAULT;
7539                 tvp = tv;
7540             } else {
7541                 tvp = NULL;
7542             }
7543             if (!(p = lock_user_string(arg2))) {
7544                 return -TARGET_EFAULT;
7545             }
7546             ret = get_errno(futimesat(arg1, path(p), tvp));
7547             unlock_user(p, arg2, 0);
7548         }
7549         return ret;
7550 #endif
7551 #ifdef TARGET_NR_access
7552     case TARGET_NR_access:
7553         if (!(p = lock_user_string(arg1))) {
7554             return -TARGET_EFAULT;
7555         }
7556         ret = get_errno(access(path(p), arg2));
7557         unlock_user(p, arg1, 0);
7558         return ret;
7559 #endif
7560 #if defined(TARGET_NR_faccessat) && defined(__NR_faccessat)
7561     case TARGET_NR_faccessat:
7562         if (!(p = lock_user_string(arg2))) {
7563             return -TARGET_EFAULT;
7564         }
7565         ret = get_errno(faccessat(arg1, p, arg3, 0));
7566         unlock_user(p, arg2, 0);
7567         return ret;
7568 #endif
7569 #ifdef TARGET_NR_nice /* not on alpha */
7570     case TARGET_NR_nice:
7571         return get_errno(nice(arg1));
7572 #endif
7573     case TARGET_NR_sync:
7574         sync();
7575         return 0;
7576 #if defined(TARGET_NR_syncfs) && defined(CONFIG_SYNCFS)
7577     case TARGET_NR_syncfs:
7578         return get_errno(syncfs(arg1));
7579 #endif
7580     case TARGET_NR_kill:
7581         return get_errno(safe_kill(arg1, target_to_host_signal(arg2)));
7582 #ifdef TARGET_NR_rename
7583     case TARGET_NR_rename:
7584         {
7585             void *p2;
7586             p = lock_user_string(arg1);
7587             p2 = lock_user_string(arg2);
7588             if (!p || !p2)
7589                 ret = -TARGET_EFAULT;
7590             else
7591                 ret = get_errno(rename(p, p2));
7592             unlock_user(p2, arg2, 0);
7593             unlock_user(p, arg1, 0);
7594         }
7595         return ret;
7596 #endif
7597 #if defined(TARGET_NR_renameat)
7598     case TARGET_NR_renameat:
7599         {
7600             void *p2;
7601             p  = lock_user_string(arg2);
7602             p2 = lock_user_string(arg4);
7603             if (!p || !p2)
7604                 ret = -TARGET_EFAULT;
7605             else
7606                 ret = get_errno(renameat(arg1, p, arg3, p2));
7607             unlock_user(p2, arg4, 0);
7608             unlock_user(p, arg2, 0);
7609         }
7610         return ret;
7611 #endif
7612 #if defined(TARGET_NR_renameat2)
7613     case TARGET_NR_renameat2:
7614         {
7615             void *p2;
7616             p  = lock_user_string(arg2);
7617             p2 = lock_user_string(arg4);
7618             if (!p || !p2) {
7619                 ret = -TARGET_EFAULT;
7620             } else {
7621                 ret = get_errno(sys_renameat2(arg1, p, arg3, p2, arg5));
7622             }
7623             unlock_user(p2, arg4, 0);
7624             unlock_user(p, arg2, 0);
7625         }
7626         return ret;
7627 #endif
7628 #ifdef TARGET_NR_mkdir
7629     case TARGET_NR_mkdir:
7630         if (!(p = lock_user_string(arg1)))
7631             return -TARGET_EFAULT;
7632         ret = get_errno(mkdir(p, arg2));
7633         unlock_user(p, arg1, 0);
7634         return ret;
7635 #endif
7636 #if defined(TARGET_NR_mkdirat)
7637     case TARGET_NR_mkdirat:
7638         if (!(p = lock_user_string(arg2)))
7639             return -TARGET_EFAULT;
7640         ret = get_errno(mkdirat(arg1, p, arg3));
7641         unlock_user(p, arg2, 0);
7642         return ret;
7643 #endif
7644 #ifdef TARGET_NR_rmdir
7645     case TARGET_NR_rmdir:
7646         if (!(p = lock_user_string(arg1)))
7647             return -TARGET_EFAULT;
7648         ret = get_errno(rmdir(p));
7649         unlock_user(p, arg1, 0);
7650         return ret;
7651 #endif
7652     case TARGET_NR_dup:
7653         ret = get_errno(dup(arg1));
7654         if (ret >= 0) {
7655             fd_trans_dup(arg1, ret);
7656         }
7657         return ret;
7658 #ifdef TARGET_NR_pipe
7659     case TARGET_NR_pipe:
7660         return do_pipe(cpu_env, arg1, 0, 0);
7661 #endif
7662 #ifdef TARGET_NR_pipe2
7663     case TARGET_NR_pipe2:
7664         return do_pipe(cpu_env, arg1,
7665                        target_to_host_bitmask(arg2, fcntl_flags_tbl), 1);
7666 #endif
7667     case TARGET_NR_times:
7668         {
7669             struct target_tms *tmsp;
7670             struct tms tms;
7671             ret = get_errno(times(&tms));
7672             if (arg1) {
7673                 tmsp = lock_user(VERIFY_WRITE, arg1, sizeof(struct target_tms), 0);
7674                 if (!tmsp)
7675                     return -TARGET_EFAULT;
7676                 tmsp->tms_utime = tswapal(host_to_target_clock_t(tms.tms_utime));
7677                 tmsp->tms_stime = tswapal(host_to_target_clock_t(tms.tms_stime));
7678                 tmsp->tms_cutime = tswapal(host_to_target_clock_t(tms.tms_cutime));
7679                 tmsp->tms_cstime = tswapal(host_to_target_clock_t(tms.tms_cstime));
7680             }
7681             if (!is_error(ret))
7682                 ret = host_to_target_clock_t(ret);
7683         }
7684         return ret;
7685     case TARGET_NR_acct:
7686         if (arg1 == 0) {
7687             ret = get_errno(acct(NULL));
7688         } else {
7689             if (!(p = lock_user_string(arg1))) {
7690                 return -TARGET_EFAULT;
7691             }
7692             ret = get_errno(acct(path(p)));
7693             unlock_user(p, arg1, 0);
7694         }
7695         return ret;
7696 #ifdef TARGET_NR_umount2
7697     case TARGET_NR_umount2:
7698         if (!(p = lock_user_string(arg1)))
7699             return -TARGET_EFAULT;
7700         ret = get_errno(umount2(p, arg2));
7701         unlock_user(p, arg1, 0);
7702         return ret;
7703 #endif
7704     case TARGET_NR_ioctl:
7705         return do_ioctl(arg1, arg2, arg3);
7706 #ifdef TARGET_NR_fcntl
7707     case TARGET_NR_fcntl:
7708         return do_fcntl(arg1, arg2, arg3);
7709 #endif
7710     case TARGET_NR_setpgid:
7711         return get_errno(setpgid(arg1, arg2));
7712     case TARGET_NR_umask:
7713         return get_errno(umask(arg1));
7714     case TARGET_NR_chroot:
7715         if (!(p = lock_user_string(arg1)))
7716             return -TARGET_EFAULT;
7717         ret = get_errno(chroot(p));
7718         unlock_user(p, arg1, 0);
7719         return ret;
7720 #ifdef TARGET_NR_dup2
7721     case TARGET_NR_dup2:
7722         ret = get_errno(dup2(arg1, arg2));
7723         if (ret >= 0) {
7724             fd_trans_dup(arg1, arg2);
7725         }
7726         return ret;
7727 #endif
7728 #if defined(CONFIG_DUP3) && defined(TARGET_NR_dup3)
7729     case TARGET_NR_dup3:
7730     {
7731         int host_flags;
7732 
7733         if ((arg3 & ~TARGET_O_CLOEXEC) != 0) {
7734             return -EINVAL;
7735         }
7736         host_flags = target_to_host_bitmask(arg3, fcntl_flags_tbl);
7737         ret = get_errno(dup3(arg1, arg2, host_flags));
7738         if (ret >= 0) {
7739             fd_trans_dup(arg1, arg2);
7740         }
7741         return ret;
7742     }
7743 #endif
7744 #ifdef TARGET_NR_getppid /* not on alpha */
7745     case TARGET_NR_getppid:
7746         return get_errno(getppid());
7747 #endif
7748 #ifdef TARGET_NR_getpgrp
7749     case TARGET_NR_getpgrp:
7750         return get_errno(getpgrp());
7751 #endif
7752     case TARGET_NR_setsid:
7753         return get_errno(setsid());
7754 #ifdef TARGET_NR_sigaction
7755     case TARGET_NR_sigaction:
7756         {
7757 #if defined(TARGET_ALPHA)
7758             struct target_sigaction act, oact, *pact = 0;
7759             struct target_old_sigaction *old_act;
7760             if (arg2) {
7761                 if (!lock_user_struct(VERIFY_READ, old_act, arg2, 1))
7762                     return -TARGET_EFAULT;
7763                 act._sa_handler = old_act->_sa_handler;
7764                 target_siginitset(&act.sa_mask, old_act->sa_mask);
7765                 act.sa_flags = old_act->sa_flags;
7766                 act.sa_restorer = 0;
7767                 unlock_user_struct(old_act, arg2, 0);
7768                 pact = &act;
7769             }
7770             ret = get_errno(do_sigaction(arg1, pact, &oact));
7771             if (!is_error(ret) && arg3) {
7772                 if (!lock_user_struct(VERIFY_WRITE, old_act, arg3, 0))
7773                     return -TARGET_EFAULT;
7774                 old_act->_sa_handler = oact._sa_handler;
7775                 old_act->sa_mask = oact.sa_mask.sig[0];
7776                 old_act->sa_flags = oact.sa_flags;
7777                 unlock_user_struct(old_act, arg3, 1);
7778             }
7779 #elif defined(TARGET_MIPS)
7780 	    struct target_sigaction act, oact, *pact, *old_act;
7781 
7782 	    if (arg2) {
7783                 if (!lock_user_struct(VERIFY_READ, old_act, arg2, 1))
7784                     return -TARGET_EFAULT;
7785 		act._sa_handler = old_act->_sa_handler;
7786 		target_siginitset(&act.sa_mask, old_act->sa_mask.sig[0]);
7787 		act.sa_flags = old_act->sa_flags;
7788 		unlock_user_struct(old_act, arg2, 0);
7789 		pact = &act;
7790 	    } else {
7791 		pact = NULL;
7792 	    }
7793 
7794 	    ret = get_errno(do_sigaction(arg1, pact, &oact));
7795 
7796 	    if (!is_error(ret) && arg3) {
7797                 if (!lock_user_struct(VERIFY_WRITE, old_act, arg3, 0))
7798                     return -TARGET_EFAULT;
7799 		old_act->_sa_handler = oact._sa_handler;
7800 		old_act->sa_flags = oact.sa_flags;
7801 		old_act->sa_mask.sig[0] = oact.sa_mask.sig[0];
7802 		old_act->sa_mask.sig[1] = 0;
7803 		old_act->sa_mask.sig[2] = 0;
7804 		old_act->sa_mask.sig[3] = 0;
7805 		unlock_user_struct(old_act, arg3, 1);
7806 	    }
7807 #else
7808             struct target_old_sigaction *old_act;
7809             struct target_sigaction act, oact, *pact;
7810             if (arg2) {
7811                 if (!lock_user_struct(VERIFY_READ, old_act, arg2, 1))
7812                     return -TARGET_EFAULT;
7813                 act._sa_handler = old_act->_sa_handler;
7814                 target_siginitset(&act.sa_mask, old_act->sa_mask);
7815                 act.sa_flags = old_act->sa_flags;
7816                 act.sa_restorer = old_act->sa_restorer;
7817 #ifdef TARGET_ARCH_HAS_KA_RESTORER
7818                 act.ka_restorer = 0;
7819 #endif
7820                 unlock_user_struct(old_act, arg2, 0);
7821                 pact = &act;
7822             } else {
7823                 pact = NULL;
7824             }
7825             ret = get_errno(do_sigaction(arg1, pact, &oact));
7826             if (!is_error(ret) && arg3) {
7827                 if (!lock_user_struct(VERIFY_WRITE, old_act, arg3, 0))
7828                     return -TARGET_EFAULT;
7829                 old_act->_sa_handler = oact._sa_handler;
7830                 old_act->sa_mask = oact.sa_mask.sig[0];
7831                 old_act->sa_flags = oact.sa_flags;
7832                 old_act->sa_restorer = oact.sa_restorer;
7833                 unlock_user_struct(old_act, arg3, 1);
7834             }
7835 #endif
7836         }
7837         return ret;
7838 #endif
7839     case TARGET_NR_rt_sigaction:
7840         {
7841 #if defined(TARGET_ALPHA)
7842             /* For Alpha and SPARC this is a 5 argument syscall, with
7843              * a 'restorer' parameter which must be copied into the
7844              * sa_restorer field of the sigaction struct.
7845              * For Alpha that 'restorer' is arg5; for SPARC it is arg4,
7846              * and arg5 is the sigsetsize.
7847              * Alpha also has a separate rt_sigaction struct that it uses
7848              * here; SPARC uses the usual sigaction struct.
7849              */
7850             struct target_rt_sigaction *rt_act;
7851             struct target_sigaction act, oact, *pact = 0;
7852 
7853             if (arg4 != sizeof(target_sigset_t)) {
7854                 return -TARGET_EINVAL;
7855             }
7856             if (arg2) {
7857                 if (!lock_user_struct(VERIFY_READ, rt_act, arg2, 1))
7858                     return -TARGET_EFAULT;
7859                 act._sa_handler = rt_act->_sa_handler;
7860                 act.sa_mask = rt_act->sa_mask;
7861                 act.sa_flags = rt_act->sa_flags;
7862                 act.sa_restorer = arg5;
7863                 unlock_user_struct(rt_act, arg2, 0);
7864                 pact = &act;
7865             }
7866             ret = get_errno(do_sigaction(arg1, pact, &oact));
7867             if (!is_error(ret) && arg3) {
7868                 if (!lock_user_struct(VERIFY_WRITE, rt_act, arg3, 0))
7869                     return -TARGET_EFAULT;
7870                 rt_act->_sa_handler = oact._sa_handler;
7871                 rt_act->sa_mask = oact.sa_mask;
7872                 rt_act->sa_flags = oact.sa_flags;
7873                 unlock_user_struct(rt_act, arg3, 1);
7874             }
7875 #else
7876 #ifdef TARGET_SPARC
7877             target_ulong restorer = arg4;
7878             target_ulong sigsetsize = arg5;
7879 #else
7880             target_ulong sigsetsize = arg4;
7881 #endif
7882             struct target_sigaction *act;
7883             struct target_sigaction *oact;
7884 
7885             if (sigsetsize != sizeof(target_sigset_t)) {
7886                 return -TARGET_EINVAL;
7887             }
7888             if (arg2) {
7889                 if (!lock_user_struct(VERIFY_READ, act, arg2, 1)) {
7890                     return -TARGET_EFAULT;
7891                 }
7892 #ifdef TARGET_ARCH_HAS_KA_RESTORER
7893                 act->ka_restorer = restorer;
7894 #endif
7895             } else {
7896                 act = NULL;
7897             }
7898             if (arg3) {
7899                 if (!lock_user_struct(VERIFY_WRITE, oact, arg3, 0)) {
7900                     ret = -TARGET_EFAULT;
7901                     goto rt_sigaction_fail;
7902                 }
7903             } else
7904                 oact = NULL;
7905             ret = get_errno(do_sigaction(arg1, act, oact));
7906 	rt_sigaction_fail:
7907             if (act)
7908                 unlock_user_struct(act, arg2, 0);
7909             if (oact)
7910                 unlock_user_struct(oact, arg3, 1);
7911 #endif
7912         }
7913         return ret;
7914 #ifdef TARGET_NR_sgetmask /* not on alpha */
7915     case TARGET_NR_sgetmask:
7916         {
7917             sigset_t cur_set;
7918             abi_ulong target_set;
7919             ret = do_sigprocmask(0, NULL, &cur_set);
7920             if (!ret) {
7921                 host_to_target_old_sigset(&target_set, &cur_set);
7922                 ret = target_set;
7923             }
7924         }
7925         return ret;
7926 #endif
7927 #ifdef TARGET_NR_ssetmask /* not on alpha */
7928     case TARGET_NR_ssetmask:
7929         {
7930             sigset_t set, oset;
7931             abi_ulong target_set = arg1;
7932             target_to_host_old_sigset(&set, &target_set);
7933             ret = do_sigprocmask(SIG_SETMASK, &set, &oset);
7934             if (!ret) {
7935                 host_to_target_old_sigset(&target_set, &oset);
7936                 ret = target_set;
7937             }
7938         }
7939         return ret;
7940 #endif
7941 #ifdef TARGET_NR_sigprocmask
7942     case TARGET_NR_sigprocmask:
7943         {
7944 #if defined(TARGET_ALPHA)
7945             sigset_t set, oldset;
7946             abi_ulong mask;
7947             int how;
7948 
7949             switch (arg1) {
7950             case TARGET_SIG_BLOCK:
7951                 how = SIG_BLOCK;
7952                 break;
7953             case TARGET_SIG_UNBLOCK:
7954                 how = SIG_UNBLOCK;
7955                 break;
7956             case TARGET_SIG_SETMASK:
7957                 how = SIG_SETMASK;
7958                 break;
7959             default:
7960                 return -TARGET_EINVAL;
7961             }
7962             mask = arg2;
7963             target_to_host_old_sigset(&set, &mask);
7964 
7965             ret = do_sigprocmask(how, &set, &oldset);
7966             if (!is_error(ret)) {
7967                 host_to_target_old_sigset(&mask, &oldset);
7968                 ret = mask;
7969                 ((CPUAlphaState *)cpu_env)->ir[IR_V0] = 0; /* force no error */
7970             }
7971 #else
7972             sigset_t set, oldset, *set_ptr;
7973             int how;
7974 
7975             if (arg2) {
7976                 switch (arg1) {
7977                 case TARGET_SIG_BLOCK:
7978                     how = SIG_BLOCK;
7979                     break;
7980                 case TARGET_SIG_UNBLOCK:
7981                     how = SIG_UNBLOCK;
7982                     break;
7983                 case TARGET_SIG_SETMASK:
7984                     how = SIG_SETMASK;
7985                     break;
7986                 default:
7987                     return -TARGET_EINVAL;
7988                 }
7989                 if (!(p = lock_user(VERIFY_READ, arg2, sizeof(target_sigset_t), 1)))
7990                     return -TARGET_EFAULT;
7991                 target_to_host_old_sigset(&set, p);
7992                 unlock_user(p, arg2, 0);
7993                 set_ptr = &set;
7994             } else {
7995                 how = 0;
7996                 set_ptr = NULL;
7997             }
7998             ret = do_sigprocmask(how, set_ptr, &oldset);
7999             if (!is_error(ret) && arg3) {
8000                 if (!(p = lock_user(VERIFY_WRITE, arg3, sizeof(target_sigset_t), 0)))
8001                     return -TARGET_EFAULT;
8002                 host_to_target_old_sigset(p, &oldset);
8003                 unlock_user(p, arg3, sizeof(target_sigset_t));
8004             }
8005 #endif
8006         }
8007         return ret;
8008 #endif
8009     case TARGET_NR_rt_sigprocmask:
8010         {
8011             int how = arg1;
8012             sigset_t set, oldset, *set_ptr;
8013 
8014             if (arg4 != sizeof(target_sigset_t)) {
8015                 return -TARGET_EINVAL;
8016             }
8017 
8018             if (arg2) {
8019                 switch(how) {
8020                 case TARGET_SIG_BLOCK:
8021                     how = SIG_BLOCK;
8022                     break;
8023                 case TARGET_SIG_UNBLOCK:
8024                     how = SIG_UNBLOCK;
8025                     break;
8026                 case TARGET_SIG_SETMASK:
8027                     how = SIG_SETMASK;
8028                     break;
8029                 default:
8030                     return -TARGET_EINVAL;
8031                 }
8032                 if (!(p = lock_user(VERIFY_READ, arg2, sizeof(target_sigset_t), 1)))
8033                     return -TARGET_EFAULT;
8034                 target_to_host_sigset(&set, p);
8035                 unlock_user(p, arg2, 0);
8036                 set_ptr = &set;
8037             } else {
8038                 how = 0;
8039                 set_ptr = NULL;
8040             }
8041             ret = do_sigprocmask(how, set_ptr, &oldset);
8042             if (!is_error(ret) && arg3) {
8043                 if (!(p = lock_user(VERIFY_WRITE, arg3, sizeof(target_sigset_t), 0)))
8044                     return -TARGET_EFAULT;
8045                 host_to_target_sigset(p, &oldset);
8046                 unlock_user(p, arg3, sizeof(target_sigset_t));
8047             }
8048         }
8049         return ret;
8050 #ifdef TARGET_NR_sigpending
8051     case TARGET_NR_sigpending:
8052         {
8053             sigset_t set;
8054             ret = get_errno(sigpending(&set));
8055             if (!is_error(ret)) {
8056                 if (!(p = lock_user(VERIFY_WRITE, arg1, sizeof(target_sigset_t), 0)))
8057                     return -TARGET_EFAULT;
8058                 host_to_target_old_sigset(p, &set);
8059                 unlock_user(p, arg1, sizeof(target_sigset_t));
8060             }
8061         }
8062         return ret;
8063 #endif
8064     case TARGET_NR_rt_sigpending:
8065         {
8066             sigset_t set;
8067 
8068             /* Yes, this check is >, not != like most. We follow the kernel's
8069              * logic and it does it like this because it implements
8070              * NR_sigpending through the same code path, and in that case
8071              * the old_sigset_t is smaller in size.
8072              */
8073             if (arg2 > sizeof(target_sigset_t)) {
8074                 return -TARGET_EINVAL;
8075             }
8076 
8077             ret = get_errno(sigpending(&set));
8078             if (!is_error(ret)) {
8079                 if (!(p = lock_user(VERIFY_WRITE, arg1, sizeof(target_sigset_t), 0)))
8080                     return -TARGET_EFAULT;
8081                 host_to_target_sigset(p, &set);
8082                 unlock_user(p, arg1, sizeof(target_sigset_t));
8083             }
8084         }
8085         return ret;
8086 #ifdef TARGET_NR_sigsuspend
8087     case TARGET_NR_sigsuspend:
8088         {
8089             TaskState *ts = cpu->opaque;
8090 #if defined(TARGET_ALPHA)
8091             abi_ulong mask = arg1;
8092             target_to_host_old_sigset(&ts->sigsuspend_mask, &mask);
8093 #else
8094             if (!(p = lock_user(VERIFY_READ, arg1, sizeof(target_sigset_t), 1)))
8095                 return -TARGET_EFAULT;
8096             target_to_host_old_sigset(&ts->sigsuspend_mask, p);
8097             unlock_user(p, arg1, 0);
8098 #endif
8099             ret = get_errno(safe_rt_sigsuspend(&ts->sigsuspend_mask,
8100                                                SIGSET_T_SIZE));
8101             if (ret != -TARGET_ERESTARTSYS) {
8102                 ts->in_sigsuspend = 1;
8103             }
8104         }
8105         return ret;
8106 #endif
8107     case TARGET_NR_rt_sigsuspend:
8108         {
8109             TaskState *ts = cpu->opaque;
8110 
8111             if (arg2 != sizeof(target_sigset_t)) {
8112                 return -TARGET_EINVAL;
8113             }
8114             if (!(p = lock_user(VERIFY_READ, arg1, sizeof(target_sigset_t), 1)))
8115                 return -TARGET_EFAULT;
8116             target_to_host_sigset(&ts->sigsuspend_mask, p);
8117             unlock_user(p, arg1, 0);
8118             ret = get_errno(safe_rt_sigsuspend(&ts->sigsuspend_mask,
8119                                                SIGSET_T_SIZE));
8120             if (ret != -TARGET_ERESTARTSYS) {
8121                 ts->in_sigsuspend = 1;
8122             }
8123         }
8124         return ret;
8125     case TARGET_NR_rt_sigtimedwait:
8126         {
8127             sigset_t set;
8128             struct timespec uts, *puts;
8129             siginfo_t uinfo;
8130 
8131             if (arg4 != sizeof(target_sigset_t)) {
8132                 return -TARGET_EINVAL;
8133             }
8134 
8135             if (!(p = lock_user(VERIFY_READ, arg1, sizeof(target_sigset_t), 1)))
8136                 return -TARGET_EFAULT;
8137             target_to_host_sigset(&set, p);
8138             unlock_user(p, arg1, 0);
8139             if (arg3) {
8140                 puts = &uts;
8141                 target_to_host_timespec(puts, arg3);
8142             } else {
8143                 puts = NULL;
8144             }
8145             ret = get_errno(safe_rt_sigtimedwait(&set, &uinfo, puts,
8146                                                  SIGSET_T_SIZE));
8147             if (!is_error(ret)) {
8148                 if (arg2) {
8149                     p = lock_user(VERIFY_WRITE, arg2, sizeof(target_siginfo_t),
8150                                   0);
8151                     if (!p) {
8152                         return -TARGET_EFAULT;
8153                     }
8154                     host_to_target_siginfo(p, &uinfo);
8155                     unlock_user(p, arg2, sizeof(target_siginfo_t));
8156                 }
8157                 ret = host_to_target_signal(ret);
8158             }
8159         }
8160         return ret;
8161     case TARGET_NR_rt_sigqueueinfo:
8162         {
8163             siginfo_t uinfo;
8164 
8165             p = lock_user(VERIFY_READ, arg3, sizeof(target_siginfo_t), 1);
8166             if (!p) {
8167                 return -TARGET_EFAULT;
8168             }
8169             target_to_host_siginfo(&uinfo, p);
8170             unlock_user(p, arg3, 0);
8171             ret = get_errno(sys_rt_sigqueueinfo(arg1, arg2, &uinfo));
8172         }
8173         return ret;
8174     case TARGET_NR_rt_tgsigqueueinfo:
8175         {
8176             siginfo_t uinfo;
8177 
8178             p = lock_user(VERIFY_READ, arg4, sizeof(target_siginfo_t), 1);
8179             if (!p) {
8180                 return -TARGET_EFAULT;
8181             }
8182             target_to_host_siginfo(&uinfo, p);
8183             unlock_user(p, arg4, 0);
8184             ret = get_errno(sys_rt_tgsigqueueinfo(arg1, arg2, arg3, &uinfo));
8185         }
8186         return ret;
8187 #ifdef TARGET_NR_sigreturn
8188     case TARGET_NR_sigreturn:
8189         if (block_signals()) {
8190             return -TARGET_ERESTARTSYS;
8191         }
8192         return do_sigreturn(cpu_env);
8193 #endif
8194     case TARGET_NR_rt_sigreturn:
8195         if (block_signals()) {
8196             return -TARGET_ERESTARTSYS;
8197         }
8198         return do_rt_sigreturn(cpu_env);
8199     case TARGET_NR_sethostname:
8200         if (!(p = lock_user_string(arg1)))
8201             return -TARGET_EFAULT;
8202         ret = get_errno(sethostname(p, arg2));
8203         unlock_user(p, arg1, 0);
8204         return ret;
8205 #ifdef TARGET_NR_setrlimit
8206     case TARGET_NR_setrlimit:
8207         {
8208             int resource = target_to_host_resource(arg1);
8209             struct target_rlimit *target_rlim;
8210             struct rlimit rlim;
8211             if (!lock_user_struct(VERIFY_READ, target_rlim, arg2, 1))
8212                 return -TARGET_EFAULT;
8213             rlim.rlim_cur = target_to_host_rlim(target_rlim->rlim_cur);
8214             rlim.rlim_max = target_to_host_rlim(target_rlim->rlim_max);
8215             unlock_user_struct(target_rlim, arg2, 0);
8216             /*
8217              * If we just passed through resource limit settings for memory then
8218              * they would also apply to QEMU's own allocations, and QEMU will
8219              * crash or hang or die if its allocations fail. Ideally we would
8220              * track the guest allocations in QEMU and apply the limits ourselves.
8221              * For now, just tell the guest the call succeeded but don't actually
8222              * limit anything.
8223              */
8224             if (resource != RLIMIT_AS &&
8225                 resource != RLIMIT_DATA &&
8226                 resource != RLIMIT_STACK) {
8227                 return get_errno(setrlimit(resource, &rlim));
8228             } else {
8229                 return 0;
8230             }
8231         }
8232 #endif
8233 #ifdef TARGET_NR_getrlimit
8234     case TARGET_NR_getrlimit:
8235         {
8236             int resource = target_to_host_resource(arg1);
8237             struct target_rlimit *target_rlim;
8238             struct rlimit rlim;
8239 
8240             ret = get_errno(getrlimit(resource, &rlim));
8241             if (!is_error(ret)) {
8242                 if (!lock_user_struct(VERIFY_WRITE, target_rlim, arg2, 0))
8243                     return -TARGET_EFAULT;
8244                 target_rlim->rlim_cur = host_to_target_rlim(rlim.rlim_cur);
8245                 target_rlim->rlim_max = host_to_target_rlim(rlim.rlim_max);
8246                 unlock_user_struct(target_rlim, arg2, 1);
8247             }
8248         }
8249         return ret;
8250 #endif
8251     case TARGET_NR_getrusage:
8252         {
8253             struct rusage rusage;
8254             ret = get_errno(getrusage(arg1, &rusage));
8255             if (!is_error(ret)) {
8256                 ret = host_to_target_rusage(arg2, &rusage);
8257             }
8258         }
8259         return ret;
8260     case TARGET_NR_gettimeofday:
8261         {
8262             struct timeval tv;
8263             ret = get_errno(gettimeofday(&tv, NULL));
8264             if (!is_error(ret)) {
8265                 if (copy_to_user_timeval(arg1, &tv))
8266                     return -TARGET_EFAULT;
8267             }
8268         }
8269         return ret;
8270     case TARGET_NR_settimeofday:
8271         {
8272             struct timeval tv, *ptv = NULL;
8273             struct timezone tz, *ptz = NULL;
8274 
8275             if (arg1) {
8276                 if (copy_from_user_timeval(&tv, arg1)) {
8277                     return -TARGET_EFAULT;
8278                 }
8279                 ptv = &tv;
8280             }
8281 
8282             if (arg2) {
8283                 if (copy_from_user_timezone(&tz, arg2)) {
8284                     return -TARGET_EFAULT;
8285                 }
8286                 ptz = &tz;
8287             }
8288 
8289             return get_errno(settimeofday(ptv, ptz));
8290         }
8291 #if defined(TARGET_NR_select)
8292     case TARGET_NR_select:
8293 #if defined(TARGET_WANT_NI_OLD_SELECT)
8294         /* some architectures used to have old_select here
8295          * but now ENOSYS it.
8296          */
8297         ret = -TARGET_ENOSYS;
8298 #elif defined(TARGET_WANT_OLD_SYS_SELECT)
8299         ret = do_old_select(arg1);
8300 #else
8301         ret = do_select(arg1, arg2, arg3, arg4, arg5);
8302 #endif
8303         return ret;
8304 #endif
8305 #ifdef TARGET_NR_pselect6
8306     case TARGET_NR_pselect6:
8307         {
8308             abi_long rfd_addr, wfd_addr, efd_addr, n, ts_addr;
8309             fd_set rfds, wfds, efds;
8310             fd_set *rfds_ptr, *wfds_ptr, *efds_ptr;
8311             struct timespec ts, *ts_ptr;
8312 
8313             /*
8314              * The 6th arg is actually two args smashed together,
8315              * so we cannot use the C library.
8316              */
8317             sigset_t set;
8318             struct {
8319                 sigset_t *set;
8320                 size_t size;
8321             } sig, *sig_ptr;
8322 
8323             abi_ulong arg_sigset, arg_sigsize, *arg7;
8324             target_sigset_t *target_sigset;
8325 
8326             n = arg1;
8327             rfd_addr = arg2;
8328             wfd_addr = arg3;
8329             efd_addr = arg4;
8330             ts_addr = arg5;
8331 
8332             ret = copy_from_user_fdset_ptr(&rfds, &rfds_ptr, rfd_addr, n);
8333             if (ret) {
8334                 return ret;
8335             }
8336             ret = copy_from_user_fdset_ptr(&wfds, &wfds_ptr, wfd_addr, n);
8337             if (ret) {
8338                 return ret;
8339             }
8340             ret = copy_from_user_fdset_ptr(&efds, &efds_ptr, efd_addr, n);
8341             if (ret) {
8342                 return ret;
8343             }
8344 
8345             /*
8346              * This takes a timespec, and not a timeval, so we cannot
8347              * use the do_select() helper ...
8348              */
8349             if (ts_addr) {
8350                 if (target_to_host_timespec(&ts, ts_addr)) {
8351                     return -TARGET_EFAULT;
8352                 }
8353                 ts_ptr = &ts;
8354             } else {
8355                 ts_ptr = NULL;
8356             }
8357 
8358             /* Extract the two packed args for the sigset */
8359             if (arg6) {
8360                 sig_ptr = &sig;
8361                 sig.size = SIGSET_T_SIZE;
8362 
8363                 arg7 = lock_user(VERIFY_READ, arg6, sizeof(*arg7) * 2, 1);
8364                 if (!arg7) {
8365                     return -TARGET_EFAULT;
8366                 }
8367                 arg_sigset = tswapal(arg7[0]);
8368                 arg_sigsize = tswapal(arg7[1]);
8369                 unlock_user(arg7, arg6, 0);
8370 
8371                 if (arg_sigset) {
8372                     sig.set = &set;
8373                     if (arg_sigsize != sizeof(*target_sigset)) {
8374                         /* Like the kernel, we enforce correct size sigsets */
8375                         return -TARGET_EINVAL;
8376                     }
8377                     target_sigset = lock_user(VERIFY_READ, arg_sigset,
8378                                               sizeof(*target_sigset), 1);
8379                     if (!target_sigset) {
8380                         return -TARGET_EFAULT;
8381                     }
8382                     target_to_host_sigset(&set, target_sigset);
8383                     unlock_user(target_sigset, arg_sigset, 0);
8384                 } else {
8385                     sig.set = NULL;
8386                 }
8387             } else {
8388                 sig_ptr = NULL;
8389             }
8390 
8391             ret = get_errno(safe_pselect6(n, rfds_ptr, wfds_ptr, efds_ptr,
8392                                           ts_ptr, sig_ptr));
8393 
8394             if (!is_error(ret)) {
8395                 if (rfd_addr && copy_to_user_fdset(rfd_addr, &rfds, n))
8396                     return -TARGET_EFAULT;
8397                 if (wfd_addr && copy_to_user_fdset(wfd_addr, &wfds, n))
8398                     return -TARGET_EFAULT;
8399                 if (efd_addr && copy_to_user_fdset(efd_addr, &efds, n))
8400                     return -TARGET_EFAULT;
8401 
8402                 if (ts_addr && host_to_target_timespec(ts_addr, &ts))
8403                     return -TARGET_EFAULT;
8404             }
8405         }
8406         return ret;
8407 #endif
8408 #ifdef TARGET_NR_symlink
8409     case TARGET_NR_symlink:
8410         {
8411             void *p2;
8412             p = lock_user_string(arg1);
8413             p2 = lock_user_string(arg2);
8414             if (!p || !p2)
8415                 ret = -TARGET_EFAULT;
8416             else
8417                 ret = get_errno(symlink(p, p2));
8418             unlock_user(p2, arg2, 0);
8419             unlock_user(p, arg1, 0);
8420         }
8421         return ret;
8422 #endif
8423 #if defined(TARGET_NR_symlinkat)
8424     case TARGET_NR_symlinkat:
8425         {
8426             void *p2;
8427             p  = lock_user_string(arg1);
8428             p2 = lock_user_string(arg3);
8429             if (!p || !p2)
8430                 ret = -TARGET_EFAULT;
8431             else
8432                 ret = get_errno(symlinkat(p, arg2, p2));
8433             unlock_user(p2, arg3, 0);
8434             unlock_user(p, arg1, 0);
8435         }
8436         return ret;
8437 #endif
8438 #ifdef TARGET_NR_readlink
8439     case TARGET_NR_readlink:
8440         {
8441             void *p2;
8442             p = lock_user_string(arg1);
8443             p2 = lock_user(VERIFY_WRITE, arg2, arg3, 0);
8444             if (!p || !p2) {
8445                 ret = -TARGET_EFAULT;
8446             } else if (!arg3) {
8447                 /* Short circuit this for the magic exe check. */
8448                 ret = -TARGET_EINVAL;
8449             } else if (is_proc_myself((const char *)p, "exe")) {
8450                 char real[PATH_MAX], *temp;
8451                 temp = realpath(exec_path, real);
8452                 /* Return value is # of bytes that we wrote to the buffer. */
8453                 if (temp == NULL) {
8454                     ret = get_errno(-1);
8455                 } else {
8456                     /* Don't worry about sign mismatch as earlier mapping
8457                      * logic would have thrown a bad address error. */
8458                     ret = MIN(strlen(real), arg3);
8459                     /* We cannot NUL terminate the string. */
8460                     memcpy(p2, real, ret);
8461                 }
8462             } else {
8463                 ret = get_errno(readlink(path(p), p2, arg3));
8464             }
8465             unlock_user(p2, arg2, ret);
8466             unlock_user(p, arg1, 0);
8467         }
8468         return ret;
8469 #endif
8470 #if defined(TARGET_NR_readlinkat)
8471     case TARGET_NR_readlinkat:
8472         {
8473             void *p2;
8474             p  = lock_user_string(arg2);
8475             p2 = lock_user(VERIFY_WRITE, arg3, arg4, 0);
8476             if (!p || !p2) {
8477                 ret = -TARGET_EFAULT;
8478             } else if (is_proc_myself((const char *)p, "exe")) {
8479                 char real[PATH_MAX], *temp;
8480                 temp = realpath(exec_path, real);
8481                 ret = temp == NULL ? get_errno(-1) : strlen(real) ;
8482                 snprintf((char *)p2, arg4, "%s", real);
8483             } else {
8484                 ret = get_errno(readlinkat(arg1, path(p), p2, arg4));
8485             }
8486             unlock_user(p2, arg3, ret);
8487             unlock_user(p, arg2, 0);
8488         }
8489         return ret;
8490 #endif
8491 #ifdef TARGET_NR_swapon
8492     case TARGET_NR_swapon:
8493         if (!(p = lock_user_string(arg1)))
8494             return -TARGET_EFAULT;
8495         ret = get_errno(swapon(p, arg2));
8496         unlock_user(p, arg1, 0);
8497         return ret;
8498 #endif
8499     case TARGET_NR_reboot:
8500         if (arg3 == LINUX_REBOOT_CMD_RESTART2) {
8501            /* arg4 must be ignored in all other cases */
8502            p = lock_user_string(arg4);
8503            if (!p) {
8504                return -TARGET_EFAULT;
8505            }
8506            ret = get_errno(reboot(arg1, arg2, arg3, p));
8507            unlock_user(p, arg4, 0);
8508         } else {
8509            ret = get_errno(reboot(arg1, arg2, arg3, NULL));
8510         }
8511         return ret;
8512 #ifdef TARGET_NR_mmap
8513     case TARGET_NR_mmap:
8514 #if (defined(TARGET_I386) && defined(TARGET_ABI32)) || \
8515     (defined(TARGET_ARM) && defined(TARGET_ABI32)) || \
8516     defined(TARGET_M68K) || defined(TARGET_CRIS) || defined(TARGET_MICROBLAZE) \
8517     || defined(TARGET_S390X)
8518         {
8519             abi_ulong *v;
8520             abi_ulong v1, v2, v3, v4, v5, v6;
8521             if (!(v = lock_user(VERIFY_READ, arg1, 6 * sizeof(abi_ulong), 1)))
8522                 return -TARGET_EFAULT;
8523             v1 = tswapal(v[0]);
8524             v2 = tswapal(v[1]);
8525             v3 = tswapal(v[2]);
8526             v4 = tswapal(v[3]);
8527             v5 = tswapal(v[4]);
8528             v6 = tswapal(v[5]);
8529             unlock_user(v, arg1, 0);
8530             ret = get_errno(target_mmap(v1, v2, v3,
8531                                         target_to_host_bitmask(v4, mmap_flags_tbl),
8532                                         v5, v6));
8533         }
8534 #else
8535         ret = get_errno(target_mmap(arg1, arg2, arg3,
8536                                     target_to_host_bitmask(arg4, mmap_flags_tbl),
8537                                     arg5,
8538                                     arg6));
8539 #endif
8540         return ret;
8541 #endif
8542 #ifdef TARGET_NR_mmap2
8543     case TARGET_NR_mmap2:
8544 #ifndef MMAP_SHIFT
8545 #define MMAP_SHIFT 12
8546 #endif
8547         ret = target_mmap(arg1, arg2, arg3,
8548                           target_to_host_bitmask(arg4, mmap_flags_tbl),
8549                           arg5, arg6 << MMAP_SHIFT);
8550         return get_errno(ret);
8551 #endif
8552     case TARGET_NR_munmap:
8553         return get_errno(target_munmap(arg1, arg2));
8554     case TARGET_NR_mprotect:
8555         {
8556             TaskState *ts = cpu->opaque;
8557             /* Special hack to detect libc making the stack executable.  */
8558             if ((arg3 & PROT_GROWSDOWN)
8559                 && arg1 >= ts->info->stack_limit
8560                 && arg1 <= ts->info->start_stack) {
8561                 arg3 &= ~PROT_GROWSDOWN;
8562                 arg2 = arg2 + arg1 - ts->info->stack_limit;
8563                 arg1 = ts->info->stack_limit;
8564             }
8565         }
8566         return get_errno(target_mprotect(arg1, arg2, arg3));
8567 #ifdef TARGET_NR_mremap
8568     case TARGET_NR_mremap:
8569         return get_errno(target_mremap(arg1, arg2, arg3, arg4, arg5));
8570 #endif
8571         /* ??? msync/mlock/munlock are broken for softmmu.  */
8572 #ifdef TARGET_NR_msync
8573     case TARGET_NR_msync:
8574         return get_errno(msync(g2h(arg1), arg2, arg3));
8575 #endif
8576 #ifdef TARGET_NR_mlock
8577     case TARGET_NR_mlock:
8578         return get_errno(mlock(g2h(arg1), arg2));
8579 #endif
8580 #ifdef TARGET_NR_munlock
8581     case TARGET_NR_munlock:
8582         return get_errno(munlock(g2h(arg1), arg2));
8583 #endif
8584 #ifdef TARGET_NR_mlockall
8585     case TARGET_NR_mlockall:
8586         return get_errno(mlockall(target_to_host_mlockall_arg(arg1)));
8587 #endif
8588 #ifdef TARGET_NR_munlockall
8589     case TARGET_NR_munlockall:
8590         return get_errno(munlockall());
8591 #endif
8592 #ifdef TARGET_NR_truncate
8593     case TARGET_NR_truncate:
8594         if (!(p = lock_user_string(arg1)))
8595             return -TARGET_EFAULT;
8596         ret = get_errno(truncate(p, arg2));
8597         unlock_user(p, arg1, 0);
8598         return ret;
8599 #endif
8600 #ifdef TARGET_NR_ftruncate
8601     case TARGET_NR_ftruncate:
8602         return get_errno(ftruncate(arg1, arg2));
8603 #endif
8604     case TARGET_NR_fchmod:
8605         return get_errno(fchmod(arg1, arg2));
8606 #if defined(TARGET_NR_fchmodat)
8607     case TARGET_NR_fchmodat:
8608         if (!(p = lock_user_string(arg2)))
8609             return -TARGET_EFAULT;
8610         ret = get_errno(fchmodat(arg1, p, arg3, 0));
8611         unlock_user(p, arg2, 0);
8612         return ret;
8613 #endif
8614     case TARGET_NR_getpriority:
8615         /* Note that negative values are valid for getpriority, so we must
8616            differentiate based on errno settings.  */
8617         errno = 0;
8618         ret = getpriority(arg1, arg2);
8619         if (ret == -1 && errno != 0) {
8620             return -host_to_target_errno(errno);
8621         }
8622 #ifdef TARGET_ALPHA
8623         /* Return value is the unbiased priority.  Signal no error.  */
8624         ((CPUAlphaState *)cpu_env)->ir[IR_V0] = 0;
8625 #else
8626         /* Return value is a biased priority to avoid negative numbers.  */
8627         ret = 20 - ret;
8628 #endif
8629         return ret;
8630     case TARGET_NR_setpriority:
8631         return get_errno(setpriority(arg1, arg2, arg3));
8632 #ifdef TARGET_NR_statfs
8633     case TARGET_NR_statfs:
8634         if (!(p = lock_user_string(arg1))) {
8635             return -TARGET_EFAULT;
8636         }
8637         ret = get_errno(statfs(path(p), &stfs));
8638         unlock_user(p, arg1, 0);
8639     convert_statfs:
8640         if (!is_error(ret)) {
8641             struct target_statfs *target_stfs;
8642 
8643             if (!lock_user_struct(VERIFY_WRITE, target_stfs, arg2, 0))
8644                 return -TARGET_EFAULT;
8645             __put_user(stfs.f_type, &target_stfs->f_type);
8646             __put_user(stfs.f_bsize, &target_stfs->f_bsize);
8647             __put_user(stfs.f_blocks, &target_stfs->f_blocks);
8648             __put_user(stfs.f_bfree, &target_stfs->f_bfree);
8649             __put_user(stfs.f_bavail, &target_stfs->f_bavail);
8650             __put_user(stfs.f_files, &target_stfs->f_files);
8651             __put_user(stfs.f_ffree, &target_stfs->f_ffree);
8652             __put_user(stfs.f_fsid.__val[0], &target_stfs->f_fsid.val[0]);
8653             __put_user(stfs.f_fsid.__val[1], &target_stfs->f_fsid.val[1]);
8654             __put_user(stfs.f_namelen, &target_stfs->f_namelen);
8655             __put_user(stfs.f_frsize, &target_stfs->f_frsize);
8656 #ifdef _STATFS_F_FLAGS
8657             __put_user(stfs.f_flags, &target_stfs->f_flags);
8658 #else
8659             __put_user(0, &target_stfs->f_flags);
8660 #endif
8661             memset(target_stfs->f_spare, 0, sizeof(target_stfs->f_spare));
8662             unlock_user_struct(target_stfs, arg2, 1);
8663         }
8664         return ret;
8665 #endif
8666 #ifdef TARGET_NR_fstatfs
8667     case TARGET_NR_fstatfs:
8668         ret = get_errno(fstatfs(arg1, &stfs));
8669         goto convert_statfs;
8670 #endif
8671 #ifdef TARGET_NR_statfs64
8672     case TARGET_NR_statfs64:
8673         if (!(p = lock_user_string(arg1))) {
8674             return -TARGET_EFAULT;
8675         }
8676         ret = get_errno(statfs(path(p), &stfs));
8677         unlock_user(p, arg1, 0);
8678     convert_statfs64:
8679         if (!is_error(ret)) {
8680             struct target_statfs64 *target_stfs;
8681 
8682             if (!lock_user_struct(VERIFY_WRITE, target_stfs, arg3, 0))
8683                 return -TARGET_EFAULT;
8684             __put_user(stfs.f_type, &target_stfs->f_type);
8685             __put_user(stfs.f_bsize, &target_stfs->f_bsize);
8686             __put_user(stfs.f_blocks, &target_stfs->f_blocks);
8687             __put_user(stfs.f_bfree, &target_stfs->f_bfree);
8688             __put_user(stfs.f_bavail, &target_stfs->f_bavail);
8689             __put_user(stfs.f_files, &target_stfs->f_files);
8690             __put_user(stfs.f_ffree, &target_stfs->f_ffree);
8691             __put_user(stfs.f_fsid.__val[0], &target_stfs->f_fsid.val[0]);
8692             __put_user(stfs.f_fsid.__val[1], &target_stfs->f_fsid.val[1]);
8693             __put_user(stfs.f_namelen, &target_stfs->f_namelen);
8694             __put_user(stfs.f_frsize, &target_stfs->f_frsize);
8695             memset(target_stfs->f_spare, 0, sizeof(target_stfs->f_spare));
8696             unlock_user_struct(target_stfs, arg3, 1);
8697         }
8698         return ret;
8699     case TARGET_NR_fstatfs64:
8700         ret = get_errno(fstatfs(arg1, &stfs));
8701         goto convert_statfs64;
8702 #endif
8703 #ifdef TARGET_NR_socketcall
8704     case TARGET_NR_socketcall:
8705         return do_socketcall(arg1, arg2);
8706 #endif
8707 #ifdef TARGET_NR_accept
8708     case TARGET_NR_accept:
8709         return do_accept4(arg1, arg2, arg3, 0);
8710 #endif
8711 #ifdef TARGET_NR_accept4
8712     case TARGET_NR_accept4:
8713         return do_accept4(arg1, arg2, arg3, arg4);
8714 #endif
8715 #ifdef TARGET_NR_bind
8716     case TARGET_NR_bind:
8717         return do_bind(arg1, arg2, arg3);
8718 #endif
8719 #ifdef TARGET_NR_connect
8720     case TARGET_NR_connect:
8721         return do_connect(arg1, arg2, arg3);
8722 #endif
8723 #ifdef TARGET_NR_getpeername
8724     case TARGET_NR_getpeername:
8725         return do_getpeername(arg1, arg2, arg3);
8726 #endif
8727 #ifdef TARGET_NR_getsockname
8728     case TARGET_NR_getsockname:
8729         return do_getsockname(arg1, arg2, arg3);
8730 #endif
8731 #ifdef TARGET_NR_getsockopt
8732     case TARGET_NR_getsockopt:
8733         return do_getsockopt(arg1, arg2, arg3, arg4, arg5);
8734 #endif
8735 #ifdef TARGET_NR_listen
8736     case TARGET_NR_listen:
8737         return get_errno(listen(arg1, arg2));
8738 #endif
8739 #ifdef TARGET_NR_recv
8740     case TARGET_NR_recv:
8741         return do_recvfrom(arg1, arg2, arg3, arg4, 0, 0);
8742 #endif
8743 #ifdef TARGET_NR_recvfrom
8744     case TARGET_NR_recvfrom:
8745         return do_recvfrom(arg1, arg2, arg3, arg4, arg5, arg6);
8746 #endif
8747 #ifdef TARGET_NR_recvmsg
8748     case TARGET_NR_recvmsg:
8749         return do_sendrecvmsg(arg1, arg2, arg3, 0);
8750 #endif
8751 #ifdef TARGET_NR_send
8752     case TARGET_NR_send:
8753         return do_sendto(arg1, arg2, arg3, arg4, 0, 0);
8754 #endif
8755 #ifdef TARGET_NR_sendmsg
8756     case TARGET_NR_sendmsg:
8757         return do_sendrecvmsg(arg1, arg2, arg3, 1);
8758 #endif
8759 #ifdef TARGET_NR_sendmmsg
8760     case TARGET_NR_sendmmsg:
8761         return do_sendrecvmmsg(arg1, arg2, arg3, arg4, 1);
8762     case TARGET_NR_recvmmsg:
8763         return do_sendrecvmmsg(arg1, arg2, arg3, arg4, 0);
8764 #endif
8765 #ifdef TARGET_NR_sendto
8766     case TARGET_NR_sendto:
8767         return do_sendto(arg1, arg2, arg3, arg4, arg5, arg6);
8768 #endif
8769 #ifdef TARGET_NR_shutdown
8770     case TARGET_NR_shutdown:
8771         return get_errno(shutdown(arg1, arg2));
8772 #endif
8773 #if defined(TARGET_NR_getrandom) && defined(__NR_getrandom)
8774     case TARGET_NR_getrandom:
8775         p = lock_user(VERIFY_WRITE, arg1, arg2, 0);
8776         if (!p) {
8777             return -TARGET_EFAULT;
8778         }
8779         ret = get_errno(getrandom(p, arg2, arg3));
8780         unlock_user(p, arg1, ret);
8781         return ret;
8782 #endif
8783 #ifdef TARGET_NR_socket
8784     case TARGET_NR_socket:
8785         return do_socket(arg1, arg2, arg3);
8786 #endif
8787 #ifdef TARGET_NR_socketpair
8788     case TARGET_NR_socketpair:
8789         return do_socketpair(arg1, arg2, arg3, arg4);
8790 #endif
8791 #ifdef TARGET_NR_setsockopt
8792     case TARGET_NR_setsockopt:
8793         return do_setsockopt(arg1, arg2, arg3, arg4, (socklen_t) arg5);
8794 #endif
8795 #if defined(TARGET_NR_syslog)
8796     case TARGET_NR_syslog:
8797         {
8798             int len = arg2;
8799 
8800             switch (arg1) {
8801             case TARGET_SYSLOG_ACTION_CLOSE:         /* Close log */
8802             case TARGET_SYSLOG_ACTION_OPEN:          /* Open log */
8803             case TARGET_SYSLOG_ACTION_CLEAR:         /* Clear ring buffer */
8804             case TARGET_SYSLOG_ACTION_CONSOLE_OFF:   /* Disable logging */
8805             case TARGET_SYSLOG_ACTION_CONSOLE_ON:    /* Enable logging */
8806             case TARGET_SYSLOG_ACTION_CONSOLE_LEVEL: /* Set messages level */
8807             case TARGET_SYSLOG_ACTION_SIZE_UNREAD:   /* Number of chars */
8808             case TARGET_SYSLOG_ACTION_SIZE_BUFFER:   /* Size of the buffer */
8809                 return get_errno(sys_syslog((int)arg1, NULL, (int)arg3));
8810             case TARGET_SYSLOG_ACTION_READ:          /* Read from log */
8811             case TARGET_SYSLOG_ACTION_READ_CLEAR:    /* Read/clear msgs */
8812             case TARGET_SYSLOG_ACTION_READ_ALL:      /* Read last messages */
8813                 {
8814                     if (len < 0) {
8815                         return -TARGET_EINVAL;
8816                     }
8817                     if (len == 0) {
8818                         return 0;
8819                     }
8820                     p = lock_user(VERIFY_WRITE, arg2, arg3, 0);
8821                     if (!p) {
8822                         return -TARGET_EFAULT;
8823                     }
8824                     ret = get_errno(sys_syslog((int)arg1, p, (int)arg3));
8825                     unlock_user(p, arg2, arg3);
8826                 }
8827                 return ret;
8828             default:
8829                 return -TARGET_EINVAL;
8830             }
8831         }
8832         break;
8833 #endif
8834     case TARGET_NR_setitimer:
8835         {
8836             struct itimerval value, ovalue, *pvalue;
8837 
8838             if (arg2) {
8839                 pvalue = &value;
8840                 if (copy_from_user_timeval(&pvalue->it_interval, arg2)
8841                     || copy_from_user_timeval(&pvalue->it_value,
8842                                               arg2 + sizeof(struct target_timeval)))
8843                     return -TARGET_EFAULT;
8844             } else {
8845                 pvalue = NULL;
8846             }
8847             ret = get_errno(setitimer(arg1, pvalue, &ovalue));
8848             if (!is_error(ret) && arg3) {
8849                 if (copy_to_user_timeval(arg3,
8850                                          &ovalue.it_interval)
8851                     || copy_to_user_timeval(arg3 + sizeof(struct target_timeval),
8852                                             &ovalue.it_value))
8853                     return -TARGET_EFAULT;
8854             }
8855         }
8856         return ret;
8857     case TARGET_NR_getitimer:
8858         {
8859             struct itimerval value;
8860 
8861             ret = get_errno(getitimer(arg1, &value));
8862             if (!is_error(ret) && arg2) {
8863                 if (copy_to_user_timeval(arg2,
8864                                          &value.it_interval)
8865                     || copy_to_user_timeval(arg2 + sizeof(struct target_timeval),
8866                                             &value.it_value))
8867                     return -TARGET_EFAULT;
8868             }
8869         }
8870         return ret;
8871 #ifdef TARGET_NR_stat
8872     case TARGET_NR_stat:
8873         if (!(p = lock_user_string(arg1))) {
8874             return -TARGET_EFAULT;
8875         }
8876         ret = get_errno(stat(path(p), &st));
8877         unlock_user(p, arg1, 0);
8878         goto do_stat;
8879 #endif
8880 #ifdef TARGET_NR_lstat
8881     case TARGET_NR_lstat:
8882         if (!(p = lock_user_string(arg1))) {
8883             return -TARGET_EFAULT;
8884         }
8885         ret = get_errno(lstat(path(p), &st));
8886         unlock_user(p, arg1, 0);
8887         goto do_stat;
8888 #endif
8889 #ifdef TARGET_NR_fstat
8890     case TARGET_NR_fstat:
8891         {
8892             ret = get_errno(fstat(arg1, &st));
8893 #if defined(TARGET_NR_stat) || defined(TARGET_NR_lstat)
8894         do_stat:
8895 #endif
8896             if (!is_error(ret)) {
8897                 struct target_stat *target_st;
8898 
8899                 if (!lock_user_struct(VERIFY_WRITE, target_st, arg2, 0))
8900                     return -TARGET_EFAULT;
8901                 memset(target_st, 0, sizeof(*target_st));
8902                 __put_user(st.st_dev, &target_st->st_dev);
8903                 __put_user(st.st_ino, &target_st->st_ino);
8904                 __put_user(st.st_mode, &target_st->st_mode);
8905                 __put_user(st.st_uid, &target_st->st_uid);
8906                 __put_user(st.st_gid, &target_st->st_gid);
8907                 __put_user(st.st_nlink, &target_st->st_nlink);
8908                 __put_user(st.st_rdev, &target_st->st_rdev);
8909                 __put_user(st.st_size, &target_st->st_size);
8910                 __put_user(st.st_blksize, &target_st->st_blksize);
8911                 __put_user(st.st_blocks, &target_st->st_blocks);
8912                 __put_user(st.st_atime, &target_st->target_st_atime);
8913                 __put_user(st.st_mtime, &target_st->target_st_mtime);
8914                 __put_user(st.st_ctime, &target_st->target_st_ctime);
8915 #if (_POSIX_C_SOURCE >= 200809L || _XOPEN_SOURCE >= 700) && \
8916     defined(TARGET_STAT_HAVE_NSEC)
8917                 __put_user(st.st_atim.tv_nsec,
8918                            &target_st->target_st_atime_nsec);
8919                 __put_user(st.st_mtim.tv_nsec,
8920                            &target_st->target_st_mtime_nsec);
8921                 __put_user(st.st_ctim.tv_nsec,
8922                            &target_st->target_st_ctime_nsec);
8923 #endif
8924                 unlock_user_struct(target_st, arg2, 1);
8925             }
8926         }
8927         return ret;
8928 #endif
8929     case TARGET_NR_vhangup:
8930         return get_errno(vhangup());
8931 #ifdef TARGET_NR_syscall
8932     case TARGET_NR_syscall:
8933         return do_syscall(cpu_env, arg1 & 0xffff, arg2, arg3, arg4, arg5,
8934                           arg6, arg7, arg8, 0);
8935 #endif
8936     case TARGET_NR_wait4:
8937         {
8938             int status;
8939             abi_long status_ptr = arg2;
8940             struct rusage rusage, *rusage_ptr;
8941             abi_ulong target_rusage = arg4;
8942             abi_long rusage_err;
8943             if (target_rusage)
8944                 rusage_ptr = &rusage;
8945             else
8946                 rusage_ptr = NULL;
8947             ret = get_errno(safe_wait4(arg1, &status, arg3, rusage_ptr));
8948             if (!is_error(ret)) {
8949                 if (status_ptr && ret) {
8950                     status = host_to_target_waitstatus(status);
8951                     if (put_user_s32(status, status_ptr))
8952                         return -TARGET_EFAULT;
8953                 }
8954                 if (target_rusage) {
8955                     rusage_err = host_to_target_rusage(target_rusage, &rusage);
8956                     if (rusage_err) {
8957                         ret = rusage_err;
8958                     }
8959                 }
8960             }
8961         }
8962         return ret;
8963 #ifdef TARGET_NR_swapoff
8964     case TARGET_NR_swapoff:
8965         if (!(p = lock_user_string(arg1)))
8966             return -TARGET_EFAULT;
8967         ret = get_errno(swapoff(p));
8968         unlock_user(p, arg1, 0);
8969         return ret;
8970 #endif
8971     case TARGET_NR_sysinfo:
8972         {
8973             struct target_sysinfo *target_value;
8974             struct sysinfo value;
8975             ret = get_errno(sysinfo(&value));
8976             if (!is_error(ret) && arg1)
8977             {
8978                 if (!lock_user_struct(VERIFY_WRITE, target_value, arg1, 0))
8979                     return -TARGET_EFAULT;
8980                 __put_user(value.uptime, &target_value->uptime);
8981                 __put_user(value.loads[0], &target_value->loads[0]);
8982                 __put_user(value.loads[1], &target_value->loads[1]);
8983                 __put_user(value.loads[2], &target_value->loads[2]);
8984                 __put_user(value.totalram, &target_value->totalram);
8985                 __put_user(value.freeram, &target_value->freeram);
8986                 __put_user(value.sharedram, &target_value->sharedram);
8987                 __put_user(value.bufferram, &target_value->bufferram);
8988                 __put_user(value.totalswap, &target_value->totalswap);
8989                 __put_user(value.freeswap, &target_value->freeswap);
8990                 __put_user(value.procs, &target_value->procs);
8991                 __put_user(value.totalhigh, &target_value->totalhigh);
8992                 __put_user(value.freehigh, &target_value->freehigh);
8993                 __put_user(value.mem_unit, &target_value->mem_unit);
8994                 unlock_user_struct(target_value, arg1, 1);
8995             }
8996         }
8997         return ret;
8998 #ifdef TARGET_NR_ipc
8999     case TARGET_NR_ipc:
9000         return do_ipc(cpu_env, arg1, arg2, arg3, arg4, arg5, arg6);
9001 #endif
9002 #ifdef TARGET_NR_semget
9003     case TARGET_NR_semget:
9004         return get_errno(semget(arg1, arg2, arg3));
9005 #endif
9006 #ifdef TARGET_NR_semop
9007     case TARGET_NR_semop:
9008         return do_semop(arg1, arg2, arg3);
9009 #endif
9010 #ifdef TARGET_NR_semctl
9011     case TARGET_NR_semctl:
9012         return do_semctl(arg1, arg2, arg3, arg4);
9013 #endif
9014 #ifdef TARGET_NR_msgctl
9015     case TARGET_NR_msgctl:
9016         return do_msgctl(arg1, arg2, arg3);
9017 #endif
9018 #ifdef TARGET_NR_msgget
9019     case TARGET_NR_msgget:
9020         return get_errno(msgget(arg1, arg2));
9021 #endif
9022 #ifdef TARGET_NR_msgrcv
9023     case TARGET_NR_msgrcv:
9024         return do_msgrcv(arg1, arg2, arg3, arg4, arg5);
9025 #endif
9026 #ifdef TARGET_NR_msgsnd
9027     case TARGET_NR_msgsnd:
9028         return do_msgsnd(arg1, arg2, arg3, arg4);
9029 #endif
9030 #ifdef TARGET_NR_shmget
9031     case TARGET_NR_shmget:
9032         return get_errno(shmget(arg1, arg2, arg3));
9033 #endif
9034 #ifdef TARGET_NR_shmctl
9035     case TARGET_NR_shmctl:
9036         return do_shmctl(arg1, arg2, arg3);
9037 #endif
9038 #ifdef TARGET_NR_shmat
9039     case TARGET_NR_shmat:
9040         return do_shmat(cpu_env, arg1, arg2, arg3);
9041 #endif
9042 #ifdef TARGET_NR_shmdt
9043     case TARGET_NR_shmdt:
9044         return do_shmdt(arg1);
9045 #endif
9046     case TARGET_NR_fsync:
9047         return get_errno(fsync(arg1));
9048     case TARGET_NR_clone:
9049         /* Linux manages to have three different orderings for its
9050          * arguments to clone(); the BACKWARDS and BACKWARDS2 defines
9051          * match the kernel's CONFIG_CLONE_* settings.
9052          * Microblaze is further special in that it uses a sixth
9053          * implicit argument to clone for the TLS pointer.
9054          */
9055 #if defined(TARGET_MICROBLAZE)
9056         ret = get_errno(do_fork(cpu_env, arg1, arg2, arg4, arg6, arg5));
9057 #elif defined(TARGET_CLONE_BACKWARDS)
9058         ret = get_errno(do_fork(cpu_env, arg1, arg2, arg3, arg4, arg5));
9059 #elif defined(TARGET_CLONE_BACKWARDS2)
9060         ret = get_errno(do_fork(cpu_env, arg2, arg1, arg3, arg5, arg4));
9061 #else
9062         ret = get_errno(do_fork(cpu_env, arg1, arg2, arg3, arg5, arg4));
9063 #endif
9064         return ret;
9065 #ifdef __NR_exit_group
9066         /* new thread calls */
9067     case TARGET_NR_exit_group:
9068         preexit_cleanup(cpu_env, arg1);
9069         return get_errno(exit_group(arg1));
9070 #endif
9071     case TARGET_NR_setdomainname:
9072         if (!(p = lock_user_string(arg1)))
9073             return -TARGET_EFAULT;
9074         ret = get_errno(setdomainname(p, arg2));
9075         unlock_user(p, arg1, 0);
9076         return ret;
9077     case TARGET_NR_uname:
9078         /* no need to transcode because we use the linux syscall */
9079         {
9080             struct new_utsname * buf;
9081 
9082             if (!lock_user_struct(VERIFY_WRITE, buf, arg1, 0))
9083                 return -TARGET_EFAULT;
9084             ret = get_errno(sys_uname(buf));
9085             if (!is_error(ret)) {
9086                 /* Overwrite the native machine name with whatever is being
9087                    emulated. */
9088                 g_strlcpy(buf->machine, cpu_to_uname_machine(cpu_env),
9089                           sizeof(buf->machine));
9090                 /* Allow the user to override the reported release.  */
9091                 if (qemu_uname_release && *qemu_uname_release) {
9092                     g_strlcpy(buf->release, qemu_uname_release,
9093                               sizeof(buf->release));
9094                 }
9095             }
9096             unlock_user_struct(buf, arg1, 1);
9097         }
9098         return ret;
9099 #ifdef TARGET_I386
9100     case TARGET_NR_modify_ldt:
9101         return do_modify_ldt(cpu_env, arg1, arg2, arg3);
9102 #if !defined(TARGET_X86_64)
9103     case TARGET_NR_vm86:
9104         return do_vm86(cpu_env, arg1, arg2);
9105 #endif
9106 #endif
9107     case TARGET_NR_adjtimex:
9108         {
9109             struct timex host_buf;
9110 
9111             if (target_to_host_timex(&host_buf, arg1) != 0) {
9112                 return -TARGET_EFAULT;
9113             }
9114             ret = get_errno(adjtimex(&host_buf));
9115             if (!is_error(ret)) {
9116                 if (host_to_target_timex(arg1, &host_buf) != 0) {
9117                     return -TARGET_EFAULT;
9118                 }
9119             }
9120         }
9121         return ret;
9122 #if defined(TARGET_NR_clock_adjtime) && defined(CONFIG_CLOCK_ADJTIME)
9123     case TARGET_NR_clock_adjtime:
9124         {
9125             struct timex htx, *phtx = &htx;
9126 
9127             if (target_to_host_timex(phtx, arg2) != 0) {
9128                 return -TARGET_EFAULT;
9129             }
9130             ret = get_errno(clock_adjtime(arg1, phtx));
9131             if (!is_error(ret) && phtx) {
9132                 if (host_to_target_timex(arg2, phtx) != 0) {
9133                     return -TARGET_EFAULT;
9134                 }
9135             }
9136         }
9137         return ret;
9138 #endif
9139     case TARGET_NR_getpgid:
9140         return get_errno(getpgid(arg1));
9141     case TARGET_NR_fchdir:
9142         return get_errno(fchdir(arg1));
9143     case TARGET_NR_personality:
9144         return get_errno(personality(arg1));
9145 #ifdef TARGET_NR__llseek /* Not on alpha */
9146     case TARGET_NR__llseek:
9147         {
9148             int64_t res;
9149 #if !defined(__NR_llseek)
9150             res = lseek(arg1, ((uint64_t)arg2 << 32) | (abi_ulong)arg3, arg5);
9151             if (res == -1) {
9152                 ret = get_errno(res);
9153             } else {
9154                 ret = 0;
9155             }
9156 #else
9157             ret = get_errno(_llseek(arg1, arg2, arg3, &res, arg5));
9158 #endif
9159             if ((ret == 0) && put_user_s64(res, arg4)) {
9160                 return -TARGET_EFAULT;
9161             }
9162         }
9163         return ret;
9164 #endif
9165 #ifdef TARGET_NR_getdents
9166     case TARGET_NR_getdents:
9167 #ifdef EMULATE_GETDENTS_WITH_GETDENTS
9168 #if TARGET_ABI_BITS == 32 && HOST_LONG_BITS == 64
9169         {
9170             struct target_dirent *target_dirp;
9171             struct linux_dirent *dirp;
9172             abi_long count = arg3;
9173 
9174             dirp = g_try_malloc(count);
9175             if (!dirp) {
9176                 return -TARGET_ENOMEM;
9177             }
9178 
9179             ret = get_errno(sys_getdents(arg1, dirp, count));
9180             if (!is_error(ret)) {
9181                 struct linux_dirent *de;
9182 		struct target_dirent *tde;
9183                 int len = ret;
9184                 int reclen, treclen;
9185 		int count1, tnamelen;
9186 
9187 		count1 = 0;
9188                 de = dirp;
9189                 if (!(target_dirp = lock_user(VERIFY_WRITE, arg2, count, 0)))
9190                     return -TARGET_EFAULT;
9191 		tde = target_dirp;
9192                 while (len > 0) {
9193                     reclen = de->d_reclen;
9194                     tnamelen = reclen - offsetof(struct linux_dirent, d_name);
9195                     assert(tnamelen >= 0);
9196                     treclen = tnamelen + offsetof(struct target_dirent, d_name);
9197                     assert(count1 + treclen <= count);
9198                     tde->d_reclen = tswap16(treclen);
9199                     tde->d_ino = tswapal(de->d_ino);
9200                     tde->d_off = tswapal(de->d_off);
9201                     memcpy(tde->d_name, de->d_name, tnamelen);
9202                     de = (struct linux_dirent *)((char *)de + reclen);
9203                     len -= reclen;
9204                     tde = (struct target_dirent *)((char *)tde + treclen);
9205 		    count1 += treclen;
9206                 }
9207 		ret = count1;
9208                 unlock_user(target_dirp, arg2, ret);
9209             }
9210             g_free(dirp);
9211         }
9212 #else
9213         {
9214             struct linux_dirent *dirp;
9215             abi_long count = arg3;
9216 
9217             if (!(dirp = lock_user(VERIFY_WRITE, arg2, count, 0)))
9218                 return -TARGET_EFAULT;
9219             ret = get_errno(sys_getdents(arg1, dirp, count));
9220             if (!is_error(ret)) {
9221                 struct linux_dirent *de;
9222                 int len = ret;
9223                 int reclen;
9224                 de = dirp;
9225                 while (len > 0) {
9226                     reclen = de->d_reclen;
9227                     if (reclen > len)
9228                         break;
9229                     de->d_reclen = tswap16(reclen);
9230                     tswapls(&de->d_ino);
9231                     tswapls(&de->d_off);
9232                     de = (struct linux_dirent *)((char *)de + reclen);
9233                     len -= reclen;
9234                 }
9235             }
9236             unlock_user(dirp, arg2, ret);
9237         }
9238 #endif
9239 #else
9240         /* Implement getdents in terms of getdents64 */
9241         {
9242             struct linux_dirent64 *dirp;
9243             abi_long count = arg3;
9244 
9245             dirp = lock_user(VERIFY_WRITE, arg2, count, 0);
9246             if (!dirp) {
9247                 return -TARGET_EFAULT;
9248             }
9249             ret = get_errno(sys_getdents64(arg1, dirp, count));
9250             if (!is_error(ret)) {
9251                 /* Convert the dirent64 structs to target dirent.  We do this
9252                  * in-place, since we can guarantee that a target_dirent is no
9253                  * larger than a dirent64; however this means we have to be
9254                  * careful to read everything before writing in the new format.
9255                  */
9256                 struct linux_dirent64 *de;
9257                 struct target_dirent *tde;
9258                 int len = ret;
9259                 int tlen = 0;
9260 
9261                 de = dirp;
9262                 tde = (struct target_dirent *)dirp;
9263                 while (len > 0) {
9264                     int namelen, treclen;
9265                     int reclen = de->d_reclen;
9266                     uint64_t ino = de->d_ino;
9267                     int64_t off = de->d_off;
9268                     uint8_t type = de->d_type;
9269 
9270                     namelen = strlen(de->d_name);
9271                     treclen = offsetof(struct target_dirent, d_name)
9272                         + namelen + 2;
9273                     treclen = QEMU_ALIGN_UP(treclen, sizeof(abi_long));
9274 
9275                     memmove(tde->d_name, de->d_name, namelen + 1);
9276                     tde->d_ino = tswapal(ino);
9277                     tde->d_off = tswapal(off);
9278                     tde->d_reclen = tswap16(treclen);
9279                     /* The target_dirent type is in what was formerly a padding
9280                      * byte at the end of the structure:
9281                      */
9282                     *(((char *)tde) + treclen - 1) = type;
9283 
9284                     de = (struct linux_dirent64 *)((char *)de + reclen);
9285                     tde = (struct target_dirent *)((char *)tde + treclen);
9286                     len -= reclen;
9287                     tlen += treclen;
9288                 }
9289                 ret = tlen;
9290             }
9291             unlock_user(dirp, arg2, ret);
9292         }
9293 #endif
9294         return ret;
9295 #endif /* TARGET_NR_getdents */
9296 #if defined(TARGET_NR_getdents64) && defined(__NR_getdents64)
9297     case TARGET_NR_getdents64:
9298         {
9299             struct linux_dirent64 *dirp;
9300             abi_long count = arg3;
9301             if (!(dirp = lock_user(VERIFY_WRITE, arg2, count, 0)))
9302                 return -TARGET_EFAULT;
9303             ret = get_errno(sys_getdents64(arg1, dirp, count));
9304             if (!is_error(ret)) {
9305                 struct linux_dirent64 *de;
9306                 int len = ret;
9307                 int reclen;
9308                 de = dirp;
9309                 while (len > 0) {
9310                     reclen = de->d_reclen;
9311                     if (reclen > len)
9312                         break;
9313                     de->d_reclen = tswap16(reclen);
9314                     tswap64s((uint64_t *)&de->d_ino);
9315                     tswap64s((uint64_t *)&de->d_off);
9316                     de = (struct linux_dirent64 *)((char *)de + reclen);
9317                     len -= reclen;
9318                 }
9319             }
9320             unlock_user(dirp, arg2, ret);
9321         }
9322         return ret;
9323 #endif /* TARGET_NR_getdents64 */
9324 #if defined(TARGET_NR__newselect)
9325     case TARGET_NR__newselect:
9326         return do_select(arg1, arg2, arg3, arg4, arg5);
9327 #endif
9328 #if defined(TARGET_NR_poll) || defined(TARGET_NR_ppoll)
9329 # ifdef TARGET_NR_poll
9330     case TARGET_NR_poll:
9331 # endif
9332 # ifdef TARGET_NR_ppoll
9333     case TARGET_NR_ppoll:
9334 # endif
9335         {
9336             struct target_pollfd *target_pfd;
9337             unsigned int nfds = arg2;
9338             struct pollfd *pfd;
9339             unsigned int i;
9340 
9341             pfd = NULL;
9342             target_pfd = NULL;
9343             if (nfds) {
9344                 if (nfds > (INT_MAX / sizeof(struct target_pollfd))) {
9345                     return -TARGET_EINVAL;
9346                 }
9347 
9348                 target_pfd = lock_user(VERIFY_WRITE, arg1,
9349                                        sizeof(struct target_pollfd) * nfds, 1);
9350                 if (!target_pfd) {
9351                     return -TARGET_EFAULT;
9352                 }
9353 
9354                 pfd = alloca(sizeof(struct pollfd) * nfds);
9355                 for (i = 0; i < nfds; i++) {
9356                     pfd[i].fd = tswap32(target_pfd[i].fd);
9357                     pfd[i].events = tswap16(target_pfd[i].events);
9358                 }
9359             }
9360 
9361             switch (num) {
9362 # ifdef TARGET_NR_ppoll
9363             case TARGET_NR_ppoll:
9364             {
9365                 struct timespec _timeout_ts, *timeout_ts = &_timeout_ts;
9366                 target_sigset_t *target_set;
9367                 sigset_t _set, *set = &_set;
9368 
9369                 if (arg3) {
9370                     if (target_to_host_timespec(timeout_ts, arg3)) {
9371                         unlock_user(target_pfd, arg1, 0);
9372                         return -TARGET_EFAULT;
9373                     }
9374                 } else {
9375                     timeout_ts = NULL;
9376                 }
9377 
9378                 if (arg4) {
9379                     if (arg5 != sizeof(target_sigset_t)) {
9380                         unlock_user(target_pfd, arg1, 0);
9381                         return -TARGET_EINVAL;
9382                     }
9383 
9384                     target_set = lock_user(VERIFY_READ, arg4, sizeof(target_sigset_t), 1);
9385                     if (!target_set) {
9386                         unlock_user(target_pfd, arg1, 0);
9387                         return -TARGET_EFAULT;
9388                     }
9389                     target_to_host_sigset(set, target_set);
9390                 } else {
9391                     set = NULL;
9392                 }
9393 
9394                 ret = get_errno(safe_ppoll(pfd, nfds, timeout_ts,
9395                                            set, SIGSET_T_SIZE));
9396 
9397                 if (!is_error(ret) && arg3) {
9398                     host_to_target_timespec(arg3, timeout_ts);
9399                 }
9400                 if (arg4) {
9401                     unlock_user(target_set, arg4, 0);
9402                 }
9403                 break;
9404             }
9405 # endif
9406 # ifdef TARGET_NR_poll
9407             case TARGET_NR_poll:
9408             {
9409                 struct timespec ts, *pts;
9410 
9411                 if (arg3 >= 0) {
9412                     /* Convert ms to secs, ns */
9413                     ts.tv_sec = arg3 / 1000;
9414                     ts.tv_nsec = (arg3 % 1000) * 1000000LL;
9415                     pts = &ts;
9416                 } else {
9417                     /* -ve poll() timeout means "infinite" */
9418                     pts = NULL;
9419                 }
9420                 ret = get_errno(safe_ppoll(pfd, nfds, pts, NULL, 0));
9421                 break;
9422             }
9423 # endif
9424             default:
9425                 g_assert_not_reached();
9426             }
9427 
9428             if (!is_error(ret)) {
9429                 for(i = 0; i < nfds; i++) {
9430                     target_pfd[i].revents = tswap16(pfd[i].revents);
9431                 }
9432             }
9433             unlock_user(target_pfd, arg1, sizeof(struct target_pollfd) * nfds);
9434         }
9435         return ret;
9436 #endif
9437     case TARGET_NR_flock:
9438         /* NOTE: the flock constant seems to be the same for every
9439            Linux platform */
9440         return get_errno(safe_flock(arg1, arg2));
9441     case TARGET_NR_readv:
9442         {
9443             struct iovec *vec = lock_iovec(VERIFY_WRITE, arg2, arg3, 0);
9444             if (vec != NULL) {
9445                 ret = get_errno(safe_readv(arg1, vec, arg3));
9446                 unlock_iovec(vec, arg2, arg3, 1);
9447             } else {
9448                 ret = -host_to_target_errno(errno);
9449             }
9450         }
9451         return ret;
9452     case TARGET_NR_writev:
9453         {
9454             struct iovec *vec = lock_iovec(VERIFY_READ, arg2, arg3, 1);
9455             if (vec != NULL) {
9456                 ret = get_errno(safe_writev(arg1, vec, arg3));
9457                 unlock_iovec(vec, arg2, arg3, 0);
9458             } else {
9459                 ret = -host_to_target_errno(errno);
9460             }
9461         }
9462         return ret;
9463 #if defined(TARGET_NR_preadv)
9464     case TARGET_NR_preadv:
9465         {
9466             struct iovec *vec = lock_iovec(VERIFY_WRITE, arg2, arg3, 0);
9467             if (vec != NULL) {
9468                 unsigned long low, high;
9469 
9470                 target_to_host_low_high(arg4, arg5, &low, &high);
9471                 ret = get_errno(safe_preadv(arg1, vec, arg3, low, high));
9472                 unlock_iovec(vec, arg2, arg3, 1);
9473             } else {
9474                 ret = -host_to_target_errno(errno);
9475            }
9476         }
9477         return ret;
9478 #endif
9479 #if defined(TARGET_NR_pwritev)
9480     case TARGET_NR_pwritev:
9481         {
9482             struct iovec *vec = lock_iovec(VERIFY_READ, arg2, arg3, 1);
9483             if (vec != NULL) {
9484                 unsigned long low, high;
9485 
9486                 target_to_host_low_high(arg4, arg5, &low, &high);
9487                 ret = get_errno(safe_pwritev(arg1, vec, arg3, low, high));
9488                 unlock_iovec(vec, arg2, arg3, 0);
9489             } else {
9490                 ret = -host_to_target_errno(errno);
9491            }
9492         }
9493         return ret;
9494 #endif
9495     case TARGET_NR_getsid:
9496         return get_errno(getsid(arg1));
9497 #if defined(TARGET_NR_fdatasync) /* Not on alpha (osf_datasync ?) */
9498     case TARGET_NR_fdatasync:
9499         return get_errno(fdatasync(arg1));
9500 #endif
9501 #ifdef TARGET_NR__sysctl
9502     case TARGET_NR__sysctl:
9503         /* We don't implement this, but ENOTDIR is always a safe
9504            return value. */
9505         return -TARGET_ENOTDIR;
9506 #endif
9507     case TARGET_NR_sched_getaffinity:
9508         {
9509             unsigned int mask_size;
9510             unsigned long *mask;
9511 
9512             /*
9513              * sched_getaffinity needs multiples of ulong, so need to take
9514              * care of mismatches between target ulong and host ulong sizes.
9515              */
9516             if (arg2 & (sizeof(abi_ulong) - 1)) {
9517                 return -TARGET_EINVAL;
9518             }
9519             mask_size = (arg2 + (sizeof(*mask) - 1)) & ~(sizeof(*mask) - 1);
9520 
9521             mask = alloca(mask_size);
9522             memset(mask, 0, mask_size);
9523             ret = get_errno(sys_sched_getaffinity(arg1, mask_size, mask));
9524 
9525             if (!is_error(ret)) {
9526                 if (ret > arg2) {
9527                     /* More data returned than the caller's buffer will fit.
9528                      * This only happens if sizeof(abi_long) < sizeof(long)
9529                      * and the caller passed us a buffer holding an odd number
9530                      * of abi_longs. If the host kernel is actually using the
9531                      * extra 4 bytes then fail EINVAL; otherwise we can just
9532                      * ignore them and only copy the interesting part.
9533                      */
9534                     int numcpus = sysconf(_SC_NPROCESSORS_CONF);
9535                     if (numcpus > arg2 * 8) {
9536                         return -TARGET_EINVAL;
9537                     }
9538                     ret = arg2;
9539                 }
9540 
9541                 if (host_to_target_cpu_mask(mask, mask_size, arg3, ret)) {
9542                     return -TARGET_EFAULT;
9543                 }
9544             }
9545         }
9546         return ret;
9547     case TARGET_NR_sched_setaffinity:
9548         {
9549             unsigned int mask_size;
9550             unsigned long *mask;
9551 
9552             /*
9553              * sched_setaffinity needs multiples of ulong, so need to take
9554              * care of mismatches between target ulong and host ulong sizes.
9555              */
9556             if (arg2 & (sizeof(abi_ulong) - 1)) {
9557                 return -TARGET_EINVAL;
9558             }
9559             mask_size = (arg2 + (sizeof(*mask) - 1)) & ~(sizeof(*mask) - 1);
9560             mask = alloca(mask_size);
9561 
9562             ret = target_to_host_cpu_mask(mask, mask_size, arg3, arg2);
9563             if (ret) {
9564                 return ret;
9565             }
9566 
9567             return get_errno(sys_sched_setaffinity(arg1, mask_size, mask));
9568         }
9569     case TARGET_NR_getcpu:
9570         {
9571             unsigned cpu, node;
9572             ret = get_errno(sys_getcpu(arg1 ? &cpu : NULL,
9573                                        arg2 ? &node : NULL,
9574                                        NULL));
9575             if (is_error(ret)) {
9576                 return ret;
9577             }
9578             if (arg1 && put_user_u32(cpu, arg1)) {
9579                 return -TARGET_EFAULT;
9580             }
9581             if (arg2 && put_user_u32(node, arg2)) {
9582                 return -TARGET_EFAULT;
9583             }
9584         }
9585         return ret;
9586     case TARGET_NR_sched_setparam:
9587         {
9588             struct sched_param *target_schp;
9589             struct sched_param schp;
9590 
9591             if (arg2 == 0) {
9592                 return -TARGET_EINVAL;
9593             }
9594             if (!lock_user_struct(VERIFY_READ, target_schp, arg2, 1))
9595                 return -TARGET_EFAULT;
9596             schp.sched_priority = tswap32(target_schp->sched_priority);
9597             unlock_user_struct(target_schp, arg2, 0);
9598             return get_errno(sched_setparam(arg1, &schp));
9599         }
9600     case TARGET_NR_sched_getparam:
9601         {
9602             struct sched_param *target_schp;
9603             struct sched_param schp;
9604 
9605             if (arg2 == 0) {
9606                 return -TARGET_EINVAL;
9607             }
9608             ret = get_errno(sched_getparam(arg1, &schp));
9609             if (!is_error(ret)) {
9610                 if (!lock_user_struct(VERIFY_WRITE, target_schp, arg2, 0))
9611                     return -TARGET_EFAULT;
9612                 target_schp->sched_priority = tswap32(schp.sched_priority);
9613                 unlock_user_struct(target_schp, arg2, 1);
9614             }
9615         }
9616         return ret;
9617     case TARGET_NR_sched_setscheduler:
9618         {
9619             struct sched_param *target_schp;
9620             struct sched_param schp;
9621             if (arg3 == 0) {
9622                 return -TARGET_EINVAL;
9623             }
9624             if (!lock_user_struct(VERIFY_READ, target_schp, arg3, 1))
9625                 return -TARGET_EFAULT;
9626             schp.sched_priority = tswap32(target_schp->sched_priority);
9627             unlock_user_struct(target_schp, arg3, 0);
9628             return get_errno(sched_setscheduler(arg1, arg2, &schp));
9629         }
9630     case TARGET_NR_sched_getscheduler:
9631         return get_errno(sched_getscheduler(arg1));
9632     case TARGET_NR_sched_yield:
9633         return get_errno(sched_yield());
9634     case TARGET_NR_sched_get_priority_max:
9635         return get_errno(sched_get_priority_max(arg1));
9636     case TARGET_NR_sched_get_priority_min:
9637         return get_errno(sched_get_priority_min(arg1));
9638     case TARGET_NR_sched_rr_get_interval:
9639         {
9640             struct timespec ts;
9641             ret = get_errno(sched_rr_get_interval(arg1, &ts));
9642             if (!is_error(ret)) {
9643                 ret = host_to_target_timespec(arg2, &ts);
9644             }
9645         }
9646         return ret;
9647     case TARGET_NR_nanosleep:
9648         {
9649             struct timespec req, rem;
9650             target_to_host_timespec(&req, arg1);
9651             ret = get_errno(safe_nanosleep(&req, &rem));
9652             if (is_error(ret) && arg2) {
9653                 host_to_target_timespec(arg2, &rem);
9654             }
9655         }
9656         return ret;
9657     case TARGET_NR_prctl:
9658         switch (arg1) {
9659         case PR_GET_PDEATHSIG:
9660         {
9661             int deathsig;
9662             ret = get_errno(prctl(arg1, &deathsig, arg3, arg4, arg5));
9663             if (!is_error(ret) && arg2
9664                 && put_user_ual(deathsig, arg2)) {
9665                 return -TARGET_EFAULT;
9666             }
9667             return ret;
9668         }
9669 #ifdef PR_GET_NAME
9670         case PR_GET_NAME:
9671         {
9672             void *name = lock_user(VERIFY_WRITE, arg2, 16, 1);
9673             if (!name) {
9674                 return -TARGET_EFAULT;
9675             }
9676             ret = get_errno(prctl(arg1, (unsigned long)name,
9677                                   arg3, arg4, arg5));
9678             unlock_user(name, arg2, 16);
9679             return ret;
9680         }
9681         case PR_SET_NAME:
9682         {
9683             void *name = lock_user(VERIFY_READ, arg2, 16, 1);
9684             if (!name) {
9685                 return -TARGET_EFAULT;
9686             }
9687             ret = get_errno(prctl(arg1, (unsigned long)name,
9688                                   arg3, arg4, arg5));
9689             unlock_user(name, arg2, 0);
9690             return ret;
9691         }
9692 #endif
9693 #ifdef TARGET_MIPS
9694         case TARGET_PR_GET_FP_MODE:
9695         {
9696             CPUMIPSState *env = ((CPUMIPSState *)cpu_env);
9697             ret = 0;
9698             if (env->CP0_Status & (1 << CP0St_FR)) {
9699                 ret |= TARGET_PR_FP_MODE_FR;
9700             }
9701             if (env->CP0_Config5 & (1 << CP0C5_FRE)) {
9702                 ret |= TARGET_PR_FP_MODE_FRE;
9703             }
9704             return ret;
9705         }
9706         case TARGET_PR_SET_FP_MODE:
9707         {
9708             CPUMIPSState *env = ((CPUMIPSState *)cpu_env);
9709             bool old_fr = env->CP0_Status & (1 << CP0St_FR);
9710             bool old_fre = env->CP0_Config5 & (1 << CP0C5_FRE);
9711             bool new_fr = arg2 & TARGET_PR_FP_MODE_FR;
9712             bool new_fre = arg2 & TARGET_PR_FP_MODE_FRE;
9713 
9714             const unsigned int known_bits = TARGET_PR_FP_MODE_FR |
9715                                             TARGET_PR_FP_MODE_FRE;
9716 
9717             /* If nothing to change, return right away, successfully.  */
9718             if (old_fr == new_fr && old_fre == new_fre) {
9719                 return 0;
9720             }
9721             /* Check the value is valid */
9722             if (arg2 & ~known_bits) {
9723                 return -TARGET_EOPNOTSUPP;
9724             }
9725             /* Setting FRE without FR is not supported.  */
9726             if (new_fre && !new_fr) {
9727                 return -TARGET_EOPNOTSUPP;
9728             }
9729             if (new_fr && !(env->active_fpu.fcr0 & (1 << FCR0_F64))) {
9730                 /* FR1 is not supported */
9731                 return -TARGET_EOPNOTSUPP;
9732             }
9733             if (!new_fr && (env->active_fpu.fcr0 & (1 << FCR0_F64))
9734                 && !(env->CP0_Status_rw_bitmask & (1 << CP0St_FR))) {
9735                 /* cannot set FR=0 */
9736                 return -TARGET_EOPNOTSUPP;
9737             }
9738             if (new_fre && !(env->active_fpu.fcr0 & (1 << FCR0_FREP))) {
9739                 /* Cannot set FRE=1 */
9740                 return -TARGET_EOPNOTSUPP;
9741             }
9742 
9743             int i;
9744             fpr_t *fpr = env->active_fpu.fpr;
9745             for (i = 0; i < 32 ; i += 2) {
9746                 if (!old_fr && new_fr) {
9747                     fpr[i].w[!FP_ENDIAN_IDX] = fpr[i + 1].w[FP_ENDIAN_IDX];
9748                 } else if (old_fr && !new_fr) {
9749                     fpr[i + 1].w[FP_ENDIAN_IDX] = fpr[i].w[!FP_ENDIAN_IDX];
9750                 }
9751             }
9752 
9753             if (new_fr) {
9754                 env->CP0_Status |= (1 << CP0St_FR);
9755                 env->hflags |= MIPS_HFLAG_F64;
9756             } else {
9757                 env->CP0_Status &= ~(1 << CP0St_FR);
9758                 env->hflags &= ~MIPS_HFLAG_F64;
9759             }
9760             if (new_fre) {
9761                 env->CP0_Config5 |= (1 << CP0C5_FRE);
9762                 if (env->active_fpu.fcr0 & (1 << FCR0_FREP)) {
9763                     env->hflags |= MIPS_HFLAG_FRE;
9764                 }
9765             } else {
9766                 env->CP0_Config5 &= ~(1 << CP0C5_FRE);
9767                 env->hflags &= ~MIPS_HFLAG_FRE;
9768             }
9769 
9770             return 0;
9771         }
9772 #endif /* MIPS */
9773 #ifdef TARGET_AARCH64
9774         case TARGET_PR_SVE_SET_VL:
9775             /*
9776              * We cannot support either PR_SVE_SET_VL_ONEXEC or
9777              * PR_SVE_VL_INHERIT.  Note the kernel definition
9778              * of sve_vl_valid allows for VQ=512, i.e. VL=8192,
9779              * even though the current architectural maximum is VQ=16.
9780              */
9781             ret = -TARGET_EINVAL;
9782             if (cpu_isar_feature(aa64_sve, env_archcpu(cpu_env))
9783                 && arg2 >= 0 && arg2 <= 512 * 16 && !(arg2 & 15)) {
9784                 CPUARMState *env = cpu_env;
9785                 ARMCPU *cpu = env_archcpu(env);
9786                 uint32_t vq, old_vq;
9787 
9788                 old_vq = (env->vfp.zcr_el[1] & 0xf) + 1;
9789                 vq = MAX(arg2 / 16, 1);
9790                 vq = MIN(vq, cpu->sve_max_vq);
9791 
9792                 if (vq < old_vq) {
9793                     aarch64_sve_narrow_vq(env, vq);
9794                 }
9795                 env->vfp.zcr_el[1] = vq - 1;
9796                 ret = vq * 16;
9797             }
9798             return ret;
9799         case TARGET_PR_SVE_GET_VL:
9800             ret = -TARGET_EINVAL;
9801             {
9802                 ARMCPU *cpu = env_archcpu(cpu_env);
9803                 if (cpu_isar_feature(aa64_sve, cpu)) {
9804                     ret = ((cpu->env.vfp.zcr_el[1] & 0xf) + 1) * 16;
9805                 }
9806             }
9807             return ret;
9808         case TARGET_PR_PAC_RESET_KEYS:
9809             {
9810                 CPUARMState *env = cpu_env;
9811                 ARMCPU *cpu = env_archcpu(env);
9812 
9813                 if (arg3 || arg4 || arg5) {
9814                     return -TARGET_EINVAL;
9815                 }
9816                 if (cpu_isar_feature(aa64_pauth, cpu)) {
9817                     int all = (TARGET_PR_PAC_APIAKEY | TARGET_PR_PAC_APIBKEY |
9818                                TARGET_PR_PAC_APDAKEY | TARGET_PR_PAC_APDBKEY |
9819                                TARGET_PR_PAC_APGAKEY);
9820                     int ret = 0;
9821                     Error *err = NULL;
9822 
9823                     if (arg2 == 0) {
9824                         arg2 = all;
9825                     } else if (arg2 & ~all) {
9826                         return -TARGET_EINVAL;
9827                     }
9828                     if (arg2 & TARGET_PR_PAC_APIAKEY) {
9829                         ret |= qemu_guest_getrandom(&env->keys.apia,
9830                                                     sizeof(ARMPACKey), &err);
9831                     }
9832                     if (arg2 & TARGET_PR_PAC_APIBKEY) {
9833                         ret |= qemu_guest_getrandom(&env->keys.apib,
9834                                                     sizeof(ARMPACKey), &err);
9835                     }
9836                     if (arg2 & TARGET_PR_PAC_APDAKEY) {
9837                         ret |= qemu_guest_getrandom(&env->keys.apda,
9838                                                     sizeof(ARMPACKey), &err);
9839                     }
9840                     if (arg2 & TARGET_PR_PAC_APDBKEY) {
9841                         ret |= qemu_guest_getrandom(&env->keys.apdb,
9842                                                     sizeof(ARMPACKey), &err);
9843                     }
9844                     if (arg2 & TARGET_PR_PAC_APGAKEY) {
9845                         ret |= qemu_guest_getrandom(&env->keys.apga,
9846                                                     sizeof(ARMPACKey), &err);
9847                     }
9848                     if (ret != 0) {
9849                         /*
9850                          * Some unknown failure in the crypto.  The best
9851                          * we can do is log it and fail the syscall.
9852                          * The real syscall cannot fail this way.
9853                          */
9854                         qemu_log_mask(LOG_UNIMP,
9855                                       "PR_PAC_RESET_KEYS: Crypto failure: %s",
9856                                       error_get_pretty(err));
9857                         error_free(err);
9858                         return -TARGET_EIO;
9859                     }
9860                     return 0;
9861                 }
9862             }
9863             return -TARGET_EINVAL;
9864 #endif /* AARCH64 */
9865         case PR_GET_SECCOMP:
9866         case PR_SET_SECCOMP:
9867             /* Disable seccomp to prevent the target disabling syscalls we
9868              * need. */
9869             return -TARGET_EINVAL;
9870         default:
9871             /* Most prctl options have no pointer arguments */
9872             return get_errno(prctl(arg1, arg2, arg3, arg4, arg5));
9873         }
9874         break;
9875 #ifdef TARGET_NR_arch_prctl
9876     case TARGET_NR_arch_prctl:
9877 #if defined(TARGET_I386) && !defined(TARGET_ABI32)
9878         return do_arch_prctl(cpu_env, arg1, arg2);
9879 #else
9880 #error unreachable
9881 #endif
9882 #endif
9883 #ifdef TARGET_NR_pread64
9884     case TARGET_NR_pread64:
9885         if (regpairs_aligned(cpu_env, num)) {
9886             arg4 = arg5;
9887             arg5 = arg6;
9888         }
9889         if (arg2 == 0 && arg3 == 0) {
9890             /* Special-case NULL buffer and zero length, which should succeed */
9891             p = 0;
9892         } else {
9893             p = lock_user(VERIFY_WRITE, arg2, arg3, 0);
9894             if (!p) {
9895                 return -TARGET_EFAULT;
9896             }
9897         }
9898         ret = get_errno(pread64(arg1, p, arg3, target_offset64(arg4, arg5)));
9899         unlock_user(p, arg2, ret);
9900         return ret;
9901     case TARGET_NR_pwrite64:
9902         if (regpairs_aligned(cpu_env, num)) {
9903             arg4 = arg5;
9904             arg5 = arg6;
9905         }
9906         if (arg2 == 0 && arg3 == 0) {
9907             /* Special-case NULL buffer and zero length, which should succeed */
9908             p = 0;
9909         } else {
9910             p = lock_user(VERIFY_READ, arg2, arg3, 1);
9911             if (!p) {
9912                 return -TARGET_EFAULT;
9913             }
9914         }
9915         ret = get_errno(pwrite64(arg1, p, arg3, target_offset64(arg4, arg5)));
9916         unlock_user(p, arg2, 0);
9917         return ret;
9918 #endif
9919     case TARGET_NR_getcwd:
9920         if (!(p = lock_user(VERIFY_WRITE, arg1, arg2, 0)))
9921             return -TARGET_EFAULT;
9922         ret = get_errno(sys_getcwd1(p, arg2));
9923         unlock_user(p, arg1, ret);
9924         return ret;
9925     case TARGET_NR_capget:
9926     case TARGET_NR_capset:
9927     {
9928         struct target_user_cap_header *target_header;
9929         struct target_user_cap_data *target_data = NULL;
9930         struct __user_cap_header_struct header;
9931         struct __user_cap_data_struct data[2];
9932         struct __user_cap_data_struct *dataptr = NULL;
9933         int i, target_datalen;
9934         int data_items = 1;
9935 
9936         if (!lock_user_struct(VERIFY_WRITE, target_header, arg1, 1)) {
9937             return -TARGET_EFAULT;
9938         }
9939         header.version = tswap32(target_header->version);
9940         header.pid = tswap32(target_header->pid);
9941 
9942         if (header.version != _LINUX_CAPABILITY_VERSION) {
9943             /* Version 2 and up takes pointer to two user_data structs */
9944             data_items = 2;
9945         }
9946 
9947         target_datalen = sizeof(*target_data) * data_items;
9948 
9949         if (arg2) {
9950             if (num == TARGET_NR_capget) {
9951                 target_data = lock_user(VERIFY_WRITE, arg2, target_datalen, 0);
9952             } else {
9953                 target_data = lock_user(VERIFY_READ, arg2, target_datalen, 1);
9954             }
9955             if (!target_data) {
9956                 unlock_user_struct(target_header, arg1, 0);
9957                 return -TARGET_EFAULT;
9958             }
9959 
9960             if (num == TARGET_NR_capset) {
9961                 for (i = 0; i < data_items; i++) {
9962                     data[i].effective = tswap32(target_data[i].effective);
9963                     data[i].permitted = tswap32(target_data[i].permitted);
9964                     data[i].inheritable = tswap32(target_data[i].inheritable);
9965                 }
9966             }
9967 
9968             dataptr = data;
9969         }
9970 
9971         if (num == TARGET_NR_capget) {
9972             ret = get_errno(capget(&header, dataptr));
9973         } else {
9974             ret = get_errno(capset(&header, dataptr));
9975         }
9976 
9977         /* The kernel always updates version for both capget and capset */
9978         target_header->version = tswap32(header.version);
9979         unlock_user_struct(target_header, arg1, 1);
9980 
9981         if (arg2) {
9982             if (num == TARGET_NR_capget) {
9983                 for (i = 0; i < data_items; i++) {
9984                     target_data[i].effective = tswap32(data[i].effective);
9985                     target_data[i].permitted = tswap32(data[i].permitted);
9986                     target_data[i].inheritable = tswap32(data[i].inheritable);
9987                 }
9988                 unlock_user(target_data, arg2, target_datalen);
9989             } else {
9990                 unlock_user(target_data, arg2, 0);
9991             }
9992         }
9993         return ret;
9994     }
9995     case TARGET_NR_sigaltstack:
9996         return do_sigaltstack(arg1, arg2,
9997                               get_sp_from_cpustate((CPUArchState *)cpu_env));
9998 
9999 #ifdef CONFIG_SENDFILE
10000 #ifdef TARGET_NR_sendfile
10001     case TARGET_NR_sendfile:
10002     {
10003         off_t *offp = NULL;
10004         off_t off;
10005         if (arg3) {
10006             ret = get_user_sal(off, arg3);
10007             if (is_error(ret)) {
10008                 return ret;
10009             }
10010             offp = &off;
10011         }
10012         ret = get_errno(sendfile(arg1, arg2, offp, arg4));
10013         if (!is_error(ret) && arg3) {
10014             abi_long ret2 = put_user_sal(off, arg3);
10015             if (is_error(ret2)) {
10016                 ret = ret2;
10017             }
10018         }
10019         return ret;
10020     }
10021 #endif
10022 #ifdef TARGET_NR_sendfile64
10023     case TARGET_NR_sendfile64:
10024     {
10025         off_t *offp = NULL;
10026         off_t off;
10027         if (arg3) {
10028             ret = get_user_s64(off, arg3);
10029             if (is_error(ret)) {
10030                 return ret;
10031             }
10032             offp = &off;
10033         }
10034         ret = get_errno(sendfile(arg1, arg2, offp, arg4));
10035         if (!is_error(ret) && arg3) {
10036             abi_long ret2 = put_user_s64(off, arg3);
10037             if (is_error(ret2)) {
10038                 ret = ret2;
10039             }
10040         }
10041         return ret;
10042     }
10043 #endif
10044 #endif
10045 #ifdef TARGET_NR_vfork
10046     case TARGET_NR_vfork:
10047         return get_errno(do_fork(cpu_env,
10048                          CLONE_VFORK | CLONE_VM | TARGET_SIGCHLD,
10049                          0, 0, 0, 0));
10050 #endif
10051 #ifdef TARGET_NR_ugetrlimit
10052     case TARGET_NR_ugetrlimit:
10053     {
10054 	struct rlimit rlim;
10055 	int resource = target_to_host_resource(arg1);
10056 	ret = get_errno(getrlimit(resource, &rlim));
10057 	if (!is_error(ret)) {
10058 	    struct target_rlimit *target_rlim;
10059             if (!lock_user_struct(VERIFY_WRITE, target_rlim, arg2, 0))
10060                 return -TARGET_EFAULT;
10061 	    target_rlim->rlim_cur = host_to_target_rlim(rlim.rlim_cur);
10062 	    target_rlim->rlim_max = host_to_target_rlim(rlim.rlim_max);
10063             unlock_user_struct(target_rlim, arg2, 1);
10064 	}
10065         return ret;
10066     }
10067 #endif
10068 #ifdef TARGET_NR_truncate64
10069     case TARGET_NR_truncate64:
10070         if (!(p = lock_user_string(arg1)))
10071             return -TARGET_EFAULT;
10072 	ret = target_truncate64(cpu_env, p, arg2, arg3, arg4);
10073         unlock_user(p, arg1, 0);
10074         return ret;
10075 #endif
10076 #ifdef TARGET_NR_ftruncate64
10077     case TARGET_NR_ftruncate64:
10078         return target_ftruncate64(cpu_env, arg1, arg2, arg3, arg4);
10079 #endif
10080 #ifdef TARGET_NR_stat64
10081     case TARGET_NR_stat64:
10082         if (!(p = lock_user_string(arg1))) {
10083             return -TARGET_EFAULT;
10084         }
10085         ret = get_errno(stat(path(p), &st));
10086         unlock_user(p, arg1, 0);
10087         if (!is_error(ret))
10088             ret = host_to_target_stat64(cpu_env, arg2, &st);
10089         return ret;
10090 #endif
10091 #ifdef TARGET_NR_lstat64
10092     case TARGET_NR_lstat64:
10093         if (!(p = lock_user_string(arg1))) {
10094             return -TARGET_EFAULT;
10095         }
10096         ret = get_errno(lstat(path(p), &st));
10097         unlock_user(p, arg1, 0);
10098         if (!is_error(ret))
10099             ret = host_to_target_stat64(cpu_env, arg2, &st);
10100         return ret;
10101 #endif
10102 #ifdef TARGET_NR_fstat64
10103     case TARGET_NR_fstat64:
10104         ret = get_errno(fstat(arg1, &st));
10105         if (!is_error(ret))
10106             ret = host_to_target_stat64(cpu_env, arg2, &st);
10107         return ret;
10108 #endif
10109 #if (defined(TARGET_NR_fstatat64) || defined(TARGET_NR_newfstatat))
10110 #ifdef TARGET_NR_fstatat64
10111     case TARGET_NR_fstatat64:
10112 #endif
10113 #ifdef TARGET_NR_newfstatat
10114     case TARGET_NR_newfstatat:
10115 #endif
10116         if (!(p = lock_user_string(arg2))) {
10117             return -TARGET_EFAULT;
10118         }
10119         ret = get_errno(fstatat(arg1, path(p), &st, arg4));
10120         unlock_user(p, arg2, 0);
10121         if (!is_error(ret))
10122             ret = host_to_target_stat64(cpu_env, arg3, &st);
10123         return ret;
10124 #endif
10125 #ifdef TARGET_NR_lchown
10126     case TARGET_NR_lchown:
10127         if (!(p = lock_user_string(arg1)))
10128             return -TARGET_EFAULT;
10129         ret = get_errno(lchown(p, low2highuid(arg2), low2highgid(arg3)));
10130         unlock_user(p, arg1, 0);
10131         return ret;
10132 #endif
10133 #ifdef TARGET_NR_getuid
10134     case TARGET_NR_getuid:
10135         return get_errno(high2lowuid(getuid()));
10136 #endif
10137 #ifdef TARGET_NR_getgid
10138     case TARGET_NR_getgid:
10139         return get_errno(high2lowgid(getgid()));
10140 #endif
10141 #ifdef TARGET_NR_geteuid
10142     case TARGET_NR_geteuid:
10143         return get_errno(high2lowuid(geteuid()));
10144 #endif
10145 #ifdef TARGET_NR_getegid
10146     case TARGET_NR_getegid:
10147         return get_errno(high2lowgid(getegid()));
10148 #endif
10149     case TARGET_NR_setreuid:
10150         return get_errno(setreuid(low2highuid(arg1), low2highuid(arg2)));
10151     case TARGET_NR_setregid:
10152         return get_errno(setregid(low2highgid(arg1), low2highgid(arg2)));
10153     case TARGET_NR_getgroups:
10154         {
10155             int gidsetsize = arg1;
10156             target_id *target_grouplist;
10157             gid_t *grouplist;
10158             int i;
10159 
10160             grouplist = alloca(gidsetsize * sizeof(gid_t));
10161             ret = get_errno(getgroups(gidsetsize, grouplist));
10162             if (gidsetsize == 0)
10163                 return ret;
10164             if (!is_error(ret)) {
10165                 target_grouplist = lock_user(VERIFY_WRITE, arg2, gidsetsize * sizeof(target_id), 0);
10166                 if (!target_grouplist)
10167                     return -TARGET_EFAULT;
10168                 for(i = 0;i < ret; i++)
10169                     target_grouplist[i] = tswapid(high2lowgid(grouplist[i]));
10170                 unlock_user(target_grouplist, arg2, gidsetsize * sizeof(target_id));
10171             }
10172         }
10173         return ret;
10174     case TARGET_NR_setgroups:
10175         {
10176             int gidsetsize = arg1;
10177             target_id *target_grouplist;
10178             gid_t *grouplist = NULL;
10179             int i;
10180             if (gidsetsize) {
10181                 grouplist = alloca(gidsetsize * sizeof(gid_t));
10182                 target_grouplist = lock_user(VERIFY_READ, arg2, gidsetsize * sizeof(target_id), 1);
10183                 if (!target_grouplist) {
10184                     return -TARGET_EFAULT;
10185                 }
10186                 for (i = 0; i < gidsetsize; i++) {
10187                     grouplist[i] = low2highgid(tswapid(target_grouplist[i]));
10188                 }
10189                 unlock_user(target_grouplist, arg2, 0);
10190             }
10191             return get_errno(setgroups(gidsetsize, grouplist));
10192         }
10193     case TARGET_NR_fchown:
10194         return get_errno(fchown(arg1, low2highuid(arg2), low2highgid(arg3)));
10195 #if defined(TARGET_NR_fchownat)
10196     case TARGET_NR_fchownat:
10197         if (!(p = lock_user_string(arg2)))
10198             return -TARGET_EFAULT;
10199         ret = get_errno(fchownat(arg1, p, low2highuid(arg3),
10200                                  low2highgid(arg4), arg5));
10201         unlock_user(p, arg2, 0);
10202         return ret;
10203 #endif
10204 #ifdef TARGET_NR_setresuid
10205     case TARGET_NR_setresuid:
10206         return get_errno(sys_setresuid(low2highuid(arg1),
10207                                        low2highuid(arg2),
10208                                        low2highuid(arg3)));
10209 #endif
10210 #ifdef TARGET_NR_getresuid
10211     case TARGET_NR_getresuid:
10212         {
10213             uid_t ruid, euid, suid;
10214             ret = get_errno(getresuid(&ruid, &euid, &suid));
10215             if (!is_error(ret)) {
10216                 if (put_user_id(high2lowuid(ruid), arg1)
10217                     || put_user_id(high2lowuid(euid), arg2)
10218                     || put_user_id(high2lowuid(suid), arg3))
10219                     return -TARGET_EFAULT;
10220             }
10221         }
10222         return ret;
10223 #endif
10224 #ifdef TARGET_NR_getresgid
10225     case TARGET_NR_setresgid:
10226         return get_errno(sys_setresgid(low2highgid(arg1),
10227                                        low2highgid(arg2),
10228                                        low2highgid(arg3)));
10229 #endif
10230 #ifdef TARGET_NR_getresgid
10231     case TARGET_NR_getresgid:
10232         {
10233             gid_t rgid, egid, sgid;
10234             ret = get_errno(getresgid(&rgid, &egid, &sgid));
10235             if (!is_error(ret)) {
10236                 if (put_user_id(high2lowgid(rgid), arg1)
10237                     || put_user_id(high2lowgid(egid), arg2)
10238                     || put_user_id(high2lowgid(sgid), arg3))
10239                     return -TARGET_EFAULT;
10240             }
10241         }
10242         return ret;
10243 #endif
10244 #ifdef TARGET_NR_chown
10245     case TARGET_NR_chown:
10246         if (!(p = lock_user_string(arg1)))
10247             return -TARGET_EFAULT;
10248         ret = get_errno(chown(p, low2highuid(arg2), low2highgid(arg3)));
10249         unlock_user(p, arg1, 0);
10250         return ret;
10251 #endif
10252     case TARGET_NR_setuid:
10253         return get_errno(sys_setuid(low2highuid(arg1)));
10254     case TARGET_NR_setgid:
10255         return get_errno(sys_setgid(low2highgid(arg1)));
10256     case TARGET_NR_setfsuid:
10257         return get_errno(setfsuid(arg1));
10258     case TARGET_NR_setfsgid:
10259         return get_errno(setfsgid(arg1));
10260 
10261 #ifdef TARGET_NR_lchown32
10262     case TARGET_NR_lchown32:
10263         if (!(p = lock_user_string(arg1)))
10264             return -TARGET_EFAULT;
10265         ret = get_errno(lchown(p, arg2, arg3));
10266         unlock_user(p, arg1, 0);
10267         return ret;
10268 #endif
10269 #ifdef TARGET_NR_getuid32
10270     case TARGET_NR_getuid32:
10271         return get_errno(getuid());
10272 #endif
10273 
10274 #if defined(TARGET_NR_getxuid) && defined(TARGET_ALPHA)
10275    /* Alpha specific */
10276     case TARGET_NR_getxuid:
10277          {
10278             uid_t euid;
10279             euid=geteuid();
10280             ((CPUAlphaState *)cpu_env)->ir[IR_A4]=euid;
10281          }
10282         return get_errno(getuid());
10283 #endif
10284 #if defined(TARGET_NR_getxgid) && defined(TARGET_ALPHA)
10285    /* Alpha specific */
10286     case TARGET_NR_getxgid:
10287          {
10288             uid_t egid;
10289             egid=getegid();
10290             ((CPUAlphaState *)cpu_env)->ir[IR_A4]=egid;
10291          }
10292         return get_errno(getgid());
10293 #endif
10294 #if defined(TARGET_NR_osf_getsysinfo) && defined(TARGET_ALPHA)
10295     /* Alpha specific */
10296     case TARGET_NR_osf_getsysinfo:
10297         ret = -TARGET_EOPNOTSUPP;
10298         switch (arg1) {
10299           case TARGET_GSI_IEEE_FP_CONTROL:
10300             {
10301                 uint64_t fpcr = cpu_alpha_load_fpcr(cpu_env);
10302                 uint64_t swcr = ((CPUAlphaState *)cpu_env)->swcr;
10303 
10304                 swcr &= ~SWCR_STATUS_MASK;
10305                 swcr |= (fpcr >> 35) & SWCR_STATUS_MASK;
10306 
10307                 if (put_user_u64 (swcr, arg2))
10308                         return -TARGET_EFAULT;
10309                 ret = 0;
10310             }
10311             break;
10312 
10313           /* case GSI_IEEE_STATE_AT_SIGNAL:
10314              -- Not implemented in linux kernel.
10315              case GSI_UACPROC:
10316              -- Retrieves current unaligned access state; not much used.
10317              case GSI_PROC_TYPE:
10318              -- Retrieves implver information; surely not used.
10319              case GSI_GET_HWRPB:
10320              -- Grabs a copy of the HWRPB; surely not used.
10321           */
10322         }
10323         return ret;
10324 #endif
10325 #if defined(TARGET_NR_osf_setsysinfo) && defined(TARGET_ALPHA)
10326     /* Alpha specific */
10327     case TARGET_NR_osf_setsysinfo:
10328         ret = -TARGET_EOPNOTSUPP;
10329         switch (arg1) {
10330           case TARGET_SSI_IEEE_FP_CONTROL:
10331             {
10332                 uint64_t swcr, fpcr;
10333 
10334                 if (get_user_u64 (swcr, arg2)) {
10335                     return -TARGET_EFAULT;
10336                 }
10337 
10338                 /*
10339                  * The kernel calls swcr_update_status to update the
10340                  * status bits from the fpcr at every point that it
10341                  * could be queried.  Therefore, we store the status
10342                  * bits only in FPCR.
10343                  */
10344                 ((CPUAlphaState *)cpu_env)->swcr
10345                     = swcr & (SWCR_TRAP_ENABLE_MASK | SWCR_MAP_MASK);
10346 
10347                 fpcr = cpu_alpha_load_fpcr(cpu_env);
10348                 fpcr &= ((uint64_t)FPCR_DYN_MASK << 32);
10349                 fpcr |= alpha_ieee_swcr_to_fpcr(swcr);
10350                 cpu_alpha_store_fpcr(cpu_env, fpcr);
10351                 ret = 0;
10352             }
10353             break;
10354 
10355           case TARGET_SSI_IEEE_RAISE_EXCEPTION:
10356             {
10357                 uint64_t exc, fpcr, fex;
10358 
10359                 if (get_user_u64(exc, arg2)) {
10360                     return -TARGET_EFAULT;
10361                 }
10362                 exc &= SWCR_STATUS_MASK;
10363                 fpcr = cpu_alpha_load_fpcr(cpu_env);
10364 
10365                 /* Old exceptions are not signaled.  */
10366                 fex = alpha_ieee_fpcr_to_swcr(fpcr);
10367                 fex = exc & ~fex;
10368                 fex >>= SWCR_STATUS_TO_EXCSUM_SHIFT;
10369                 fex &= ((CPUArchState *)cpu_env)->swcr;
10370 
10371                 /* Update the hardware fpcr.  */
10372                 fpcr |= alpha_ieee_swcr_to_fpcr(exc);
10373                 cpu_alpha_store_fpcr(cpu_env, fpcr);
10374 
10375                 if (fex) {
10376                     int si_code = TARGET_FPE_FLTUNK;
10377                     target_siginfo_t info;
10378 
10379                     if (fex & SWCR_TRAP_ENABLE_DNO) {
10380                         si_code = TARGET_FPE_FLTUND;
10381                     }
10382                     if (fex & SWCR_TRAP_ENABLE_INE) {
10383                         si_code = TARGET_FPE_FLTRES;
10384                     }
10385                     if (fex & SWCR_TRAP_ENABLE_UNF) {
10386                         si_code = TARGET_FPE_FLTUND;
10387                     }
10388                     if (fex & SWCR_TRAP_ENABLE_OVF) {
10389                         si_code = TARGET_FPE_FLTOVF;
10390                     }
10391                     if (fex & SWCR_TRAP_ENABLE_DZE) {
10392                         si_code = TARGET_FPE_FLTDIV;
10393                     }
10394                     if (fex & SWCR_TRAP_ENABLE_INV) {
10395                         si_code = TARGET_FPE_FLTINV;
10396                     }
10397 
10398                     info.si_signo = SIGFPE;
10399                     info.si_errno = 0;
10400                     info.si_code = si_code;
10401                     info._sifields._sigfault._addr
10402                         = ((CPUArchState *)cpu_env)->pc;
10403                     queue_signal((CPUArchState *)cpu_env, info.si_signo,
10404                                  QEMU_SI_FAULT, &info);
10405                 }
10406                 ret = 0;
10407             }
10408             break;
10409 
10410           /* case SSI_NVPAIRS:
10411              -- Used with SSIN_UACPROC to enable unaligned accesses.
10412              case SSI_IEEE_STATE_AT_SIGNAL:
10413              case SSI_IEEE_IGNORE_STATE_AT_SIGNAL:
10414              -- Not implemented in linux kernel
10415           */
10416         }
10417         return ret;
10418 #endif
10419 #ifdef TARGET_NR_osf_sigprocmask
10420     /* Alpha specific.  */
10421     case TARGET_NR_osf_sigprocmask:
10422         {
10423             abi_ulong mask;
10424             int how;
10425             sigset_t set, oldset;
10426 
10427             switch(arg1) {
10428             case TARGET_SIG_BLOCK:
10429                 how = SIG_BLOCK;
10430                 break;
10431             case TARGET_SIG_UNBLOCK:
10432                 how = SIG_UNBLOCK;
10433                 break;
10434             case TARGET_SIG_SETMASK:
10435                 how = SIG_SETMASK;
10436                 break;
10437             default:
10438                 return -TARGET_EINVAL;
10439             }
10440             mask = arg2;
10441             target_to_host_old_sigset(&set, &mask);
10442             ret = do_sigprocmask(how, &set, &oldset);
10443             if (!ret) {
10444                 host_to_target_old_sigset(&mask, &oldset);
10445                 ret = mask;
10446             }
10447         }
10448         return ret;
10449 #endif
10450 
10451 #ifdef TARGET_NR_getgid32
10452     case TARGET_NR_getgid32:
10453         return get_errno(getgid());
10454 #endif
10455 #ifdef TARGET_NR_geteuid32
10456     case TARGET_NR_geteuid32:
10457         return get_errno(geteuid());
10458 #endif
10459 #ifdef TARGET_NR_getegid32
10460     case TARGET_NR_getegid32:
10461         return get_errno(getegid());
10462 #endif
10463 #ifdef TARGET_NR_setreuid32
10464     case TARGET_NR_setreuid32:
10465         return get_errno(setreuid(arg1, arg2));
10466 #endif
10467 #ifdef TARGET_NR_setregid32
10468     case TARGET_NR_setregid32:
10469         return get_errno(setregid(arg1, arg2));
10470 #endif
10471 #ifdef TARGET_NR_getgroups32
10472     case TARGET_NR_getgroups32:
10473         {
10474             int gidsetsize = arg1;
10475             uint32_t *target_grouplist;
10476             gid_t *grouplist;
10477             int i;
10478 
10479             grouplist = alloca(gidsetsize * sizeof(gid_t));
10480             ret = get_errno(getgroups(gidsetsize, grouplist));
10481             if (gidsetsize == 0)
10482                 return ret;
10483             if (!is_error(ret)) {
10484                 target_grouplist = lock_user(VERIFY_WRITE, arg2, gidsetsize * 4, 0);
10485                 if (!target_grouplist) {
10486                     return -TARGET_EFAULT;
10487                 }
10488                 for(i = 0;i < ret; i++)
10489                     target_grouplist[i] = tswap32(grouplist[i]);
10490                 unlock_user(target_grouplist, arg2, gidsetsize * 4);
10491             }
10492         }
10493         return ret;
10494 #endif
10495 #ifdef TARGET_NR_setgroups32
10496     case TARGET_NR_setgroups32:
10497         {
10498             int gidsetsize = arg1;
10499             uint32_t *target_grouplist;
10500             gid_t *grouplist;
10501             int i;
10502 
10503             grouplist = alloca(gidsetsize * sizeof(gid_t));
10504             target_grouplist = lock_user(VERIFY_READ, arg2, gidsetsize * 4, 1);
10505             if (!target_grouplist) {
10506                 return -TARGET_EFAULT;
10507             }
10508             for(i = 0;i < gidsetsize; i++)
10509                 grouplist[i] = tswap32(target_grouplist[i]);
10510             unlock_user(target_grouplist, arg2, 0);
10511             return get_errno(setgroups(gidsetsize, grouplist));
10512         }
10513 #endif
10514 #ifdef TARGET_NR_fchown32
10515     case TARGET_NR_fchown32:
10516         return get_errno(fchown(arg1, arg2, arg3));
10517 #endif
10518 #ifdef TARGET_NR_setresuid32
10519     case TARGET_NR_setresuid32:
10520         return get_errno(sys_setresuid(arg1, arg2, arg3));
10521 #endif
10522 #ifdef TARGET_NR_getresuid32
10523     case TARGET_NR_getresuid32:
10524         {
10525             uid_t ruid, euid, suid;
10526             ret = get_errno(getresuid(&ruid, &euid, &suid));
10527             if (!is_error(ret)) {
10528                 if (put_user_u32(ruid, arg1)
10529                     || put_user_u32(euid, arg2)
10530                     || put_user_u32(suid, arg3))
10531                     return -TARGET_EFAULT;
10532             }
10533         }
10534         return ret;
10535 #endif
10536 #ifdef TARGET_NR_setresgid32
10537     case TARGET_NR_setresgid32:
10538         return get_errno(sys_setresgid(arg1, arg2, arg3));
10539 #endif
10540 #ifdef TARGET_NR_getresgid32
10541     case TARGET_NR_getresgid32:
10542         {
10543             gid_t rgid, egid, sgid;
10544             ret = get_errno(getresgid(&rgid, &egid, &sgid));
10545             if (!is_error(ret)) {
10546                 if (put_user_u32(rgid, arg1)
10547                     || put_user_u32(egid, arg2)
10548                     || put_user_u32(sgid, arg3))
10549                     return -TARGET_EFAULT;
10550             }
10551         }
10552         return ret;
10553 #endif
10554 #ifdef TARGET_NR_chown32
10555     case TARGET_NR_chown32:
10556         if (!(p = lock_user_string(arg1)))
10557             return -TARGET_EFAULT;
10558         ret = get_errno(chown(p, arg2, arg3));
10559         unlock_user(p, arg1, 0);
10560         return ret;
10561 #endif
10562 #ifdef TARGET_NR_setuid32
10563     case TARGET_NR_setuid32:
10564         return get_errno(sys_setuid(arg1));
10565 #endif
10566 #ifdef TARGET_NR_setgid32
10567     case TARGET_NR_setgid32:
10568         return get_errno(sys_setgid(arg1));
10569 #endif
10570 #ifdef TARGET_NR_setfsuid32
10571     case TARGET_NR_setfsuid32:
10572         return get_errno(setfsuid(arg1));
10573 #endif
10574 #ifdef TARGET_NR_setfsgid32
10575     case TARGET_NR_setfsgid32:
10576         return get_errno(setfsgid(arg1));
10577 #endif
10578 #ifdef TARGET_NR_mincore
10579     case TARGET_NR_mincore:
10580         {
10581             void *a = lock_user(VERIFY_READ, arg1, arg2, 0);
10582             if (!a) {
10583                 return -TARGET_ENOMEM;
10584             }
10585             p = lock_user_string(arg3);
10586             if (!p) {
10587                 ret = -TARGET_EFAULT;
10588             } else {
10589                 ret = get_errno(mincore(a, arg2, p));
10590                 unlock_user(p, arg3, ret);
10591             }
10592             unlock_user(a, arg1, 0);
10593         }
10594         return ret;
10595 #endif
10596 #ifdef TARGET_NR_arm_fadvise64_64
10597     case TARGET_NR_arm_fadvise64_64:
10598         /* arm_fadvise64_64 looks like fadvise64_64 but
10599          * with different argument order: fd, advice, offset, len
10600          * rather than the usual fd, offset, len, advice.
10601          * Note that offset and len are both 64-bit so appear as
10602          * pairs of 32-bit registers.
10603          */
10604         ret = posix_fadvise(arg1, target_offset64(arg3, arg4),
10605                             target_offset64(arg5, arg6), arg2);
10606         return -host_to_target_errno(ret);
10607 #endif
10608 
10609 #if TARGET_ABI_BITS == 32
10610 
10611 #ifdef TARGET_NR_fadvise64_64
10612     case TARGET_NR_fadvise64_64:
10613 #if defined(TARGET_PPC) || defined(TARGET_XTENSA)
10614         /* 6 args: fd, advice, offset (high, low), len (high, low) */
10615         ret = arg2;
10616         arg2 = arg3;
10617         arg3 = arg4;
10618         arg4 = arg5;
10619         arg5 = arg6;
10620         arg6 = ret;
10621 #else
10622         /* 6 args: fd, offset (high, low), len (high, low), advice */
10623         if (regpairs_aligned(cpu_env, num)) {
10624             /* offset is in (3,4), len in (5,6) and advice in 7 */
10625             arg2 = arg3;
10626             arg3 = arg4;
10627             arg4 = arg5;
10628             arg5 = arg6;
10629             arg6 = arg7;
10630         }
10631 #endif
10632         ret = posix_fadvise(arg1, target_offset64(arg2, arg3),
10633                             target_offset64(arg4, arg5), arg6);
10634         return -host_to_target_errno(ret);
10635 #endif
10636 
10637 #ifdef TARGET_NR_fadvise64
10638     case TARGET_NR_fadvise64:
10639         /* 5 args: fd, offset (high, low), len, advice */
10640         if (regpairs_aligned(cpu_env, num)) {
10641             /* offset is in (3,4), len in 5 and advice in 6 */
10642             arg2 = arg3;
10643             arg3 = arg4;
10644             arg4 = arg5;
10645             arg5 = arg6;
10646         }
10647         ret = posix_fadvise(arg1, target_offset64(arg2, arg3), arg4, arg5);
10648         return -host_to_target_errno(ret);
10649 #endif
10650 
10651 #else /* not a 32-bit ABI */
10652 #if defined(TARGET_NR_fadvise64_64) || defined(TARGET_NR_fadvise64)
10653 #ifdef TARGET_NR_fadvise64_64
10654     case TARGET_NR_fadvise64_64:
10655 #endif
10656 #ifdef TARGET_NR_fadvise64
10657     case TARGET_NR_fadvise64:
10658 #endif
10659 #ifdef TARGET_S390X
10660         switch (arg4) {
10661         case 4: arg4 = POSIX_FADV_NOREUSE + 1; break; /* make sure it's an invalid value */
10662         case 5: arg4 = POSIX_FADV_NOREUSE + 2; break; /* ditto */
10663         case 6: arg4 = POSIX_FADV_DONTNEED; break;
10664         case 7: arg4 = POSIX_FADV_NOREUSE; break;
10665         default: break;
10666         }
10667 #endif
10668         return -host_to_target_errno(posix_fadvise(arg1, arg2, arg3, arg4));
10669 #endif
10670 #endif /* end of 64-bit ABI fadvise handling */
10671 
10672 #ifdef TARGET_NR_madvise
10673     case TARGET_NR_madvise:
10674         /* A straight passthrough may not be safe because qemu sometimes
10675            turns private file-backed mappings into anonymous mappings.
10676            This will break MADV_DONTNEED.
10677            This is a hint, so ignoring and returning success is ok.  */
10678         return 0;
10679 #endif
10680 #if TARGET_ABI_BITS == 32
10681     case TARGET_NR_fcntl64:
10682     {
10683 	int cmd;
10684 	struct flock64 fl;
10685         from_flock64_fn *copyfrom = copy_from_user_flock64;
10686         to_flock64_fn *copyto = copy_to_user_flock64;
10687 
10688 #ifdef TARGET_ARM
10689         if (!((CPUARMState *)cpu_env)->eabi) {
10690             copyfrom = copy_from_user_oabi_flock64;
10691             copyto = copy_to_user_oabi_flock64;
10692         }
10693 #endif
10694 
10695 	cmd = target_to_host_fcntl_cmd(arg2);
10696         if (cmd == -TARGET_EINVAL) {
10697             return cmd;
10698         }
10699 
10700         switch(arg2) {
10701         case TARGET_F_GETLK64:
10702             ret = copyfrom(&fl, arg3);
10703             if (ret) {
10704                 break;
10705             }
10706             ret = get_errno(safe_fcntl(arg1, cmd, &fl));
10707             if (ret == 0) {
10708                 ret = copyto(arg3, &fl);
10709             }
10710 	    break;
10711 
10712         case TARGET_F_SETLK64:
10713         case TARGET_F_SETLKW64:
10714             ret = copyfrom(&fl, arg3);
10715             if (ret) {
10716                 break;
10717             }
10718             ret = get_errno(safe_fcntl(arg1, cmd, &fl));
10719 	    break;
10720         default:
10721             ret = do_fcntl(arg1, arg2, arg3);
10722             break;
10723         }
10724         return ret;
10725     }
10726 #endif
10727 #ifdef TARGET_NR_cacheflush
10728     case TARGET_NR_cacheflush:
10729         /* self-modifying code is handled automatically, so nothing needed */
10730         return 0;
10731 #endif
10732 #ifdef TARGET_NR_getpagesize
10733     case TARGET_NR_getpagesize:
10734         return TARGET_PAGE_SIZE;
10735 #endif
10736     case TARGET_NR_gettid:
10737         return get_errno(sys_gettid());
10738 #ifdef TARGET_NR_readahead
10739     case TARGET_NR_readahead:
10740 #if TARGET_ABI_BITS == 32
10741         if (regpairs_aligned(cpu_env, num)) {
10742             arg2 = arg3;
10743             arg3 = arg4;
10744             arg4 = arg5;
10745         }
10746         ret = get_errno(readahead(arg1, target_offset64(arg2, arg3) , arg4));
10747 #else
10748         ret = get_errno(readahead(arg1, arg2, arg3));
10749 #endif
10750         return ret;
10751 #endif
10752 #ifdef CONFIG_ATTR
10753 #ifdef TARGET_NR_setxattr
10754     case TARGET_NR_listxattr:
10755     case TARGET_NR_llistxattr:
10756     {
10757         void *p, *b = 0;
10758         if (arg2) {
10759             b = lock_user(VERIFY_WRITE, arg2, arg3, 0);
10760             if (!b) {
10761                 return -TARGET_EFAULT;
10762             }
10763         }
10764         p = lock_user_string(arg1);
10765         if (p) {
10766             if (num == TARGET_NR_listxattr) {
10767                 ret = get_errno(listxattr(p, b, arg3));
10768             } else {
10769                 ret = get_errno(llistxattr(p, b, arg3));
10770             }
10771         } else {
10772             ret = -TARGET_EFAULT;
10773         }
10774         unlock_user(p, arg1, 0);
10775         unlock_user(b, arg2, arg3);
10776         return ret;
10777     }
10778     case TARGET_NR_flistxattr:
10779     {
10780         void *b = 0;
10781         if (arg2) {
10782             b = lock_user(VERIFY_WRITE, arg2, arg3, 0);
10783             if (!b) {
10784                 return -TARGET_EFAULT;
10785             }
10786         }
10787         ret = get_errno(flistxattr(arg1, b, arg3));
10788         unlock_user(b, arg2, arg3);
10789         return ret;
10790     }
10791     case TARGET_NR_setxattr:
10792     case TARGET_NR_lsetxattr:
10793         {
10794             void *p, *n, *v = 0;
10795             if (arg3) {
10796                 v = lock_user(VERIFY_READ, arg3, arg4, 1);
10797                 if (!v) {
10798                     return -TARGET_EFAULT;
10799                 }
10800             }
10801             p = lock_user_string(arg1);
10802             n = lock_user_string(arg2);
10803             if (p && n) {
10804                 if (num == TARGET_NR_setxattr) {
10805                     ret = get_errno(setxattr(p, n, v, arg4, arg5));
10806                 } else {
10807                     ret = get_errno(lsetxattr(p, n, v, arg4, arg5));
10808                 }
10809             } else {
10810                 ret = -TARGET_EFAULT;
10811             }
10812             unlock_user(p, arg1, 0);
10813             unlock_user(n, arg2, 0);
10814             unlock_user(v, arg3, 0);
10815         }
10816         return ret;
10817     case TARGET_NR_fsetxattr:
10818         {
10819             void *n, *v = 0;
10820             if (arg3) {
10821                 v = lock_user(VERIFY_READ, arg3, arg4, 1);
10822                 if (!v) {
10823                     return -TARGET_EFAULT;
10824                 }
10825             }
10826             n = lock_user_string(arg2);
10827             if (n) {
10828                 ret = get_errno(fsetxattr(arg1, n, v, arg4, arg5));
10829             } else {
10830                 ret = -TARGET_EFAULT;
10831             }
10832             unlock_user(n, arg2, 0);
10833             unlock_user(v, arg3, 0);
10834         }
10835         return ret;
10836     case TARGET_NR_getxattr:
10837     case TARGET_NR_lgetxattr:
10838         {
10839             void *p, *n, *v = 0;
10840             if (arg3) {
10841                 v = lock_user(VERIFY_WRITE, arg3, arg4, 0);
10842                 if (!v) {
10843                     return -TARGET_EFAULT;
10844                 }
10845             }
10846             p = lock_user_string(arg1);
10847             n = lock_user_string(arg2);
10848             if (p && n) {
10849                 if (num == TARGET_NR_getxattr) {
10850                     ret = get_errno(getxattr(p, n, v, arg4));
10851                 } else {
10852                     ret = get_errno(lgetxattr(p, n, v, arg4));
10853                 }
10854             } else {
10855                 ret = -TARGET_EFAULT;
10856             }
10857             unlock_user(p, arg1, 0);
10858             unlock_user(n, arg2, 0);
10859             unlock_user(v, arg3, arg4);
10860         }
10861         return ret;
10862     case TARGET_NR_fgetxattr:
10863         {
10864             void *n, *v = 0;
10865             if (arg3) {
10866                 v = lock_user(VERIFY_WRITE, arg3, arg4, 0);
10867                 if (!v) {
10868                     return -TARGET_EFAULT;
10869                 }
10870             }
10871             n = lock_user_string(arg2);
10872             if (n) {
10873                 ret = get_errno(fgetxattr(arg1, n, v, arg4));
10874             } else {
10875                 ret = -TARGET_EFAULT;
10876             }
10877             unlock_user(n, arg2, 0);
10878             unlock_user(v, arg3, arg4);
10879         }
10880         return ret;
10881     case TARGET_NR_removexattr:
10882     case TARGET_NR_lremovexattr:
10883         {
10884             void *p, *n;
10885             p = lock_user_string(arg1);
10886             n = lock_user_string(arg2);
10887             if (p && n) {
10888                 if (num == TARGET_NR_removexattr) {
10889                     ret = get_errno(removexattr(p, n));
10890                 } else {
10891                     ret = get_errno(lremovexattr(p, n));
10892                 }
10893             } else {
10894                 ret = -TARGET_EFAULT;
10895             }
10896             unlock_user(p, arg1, 0);
10897             unlock_user(n, arg2, 0);
10898         }
10899         return ret;
10900     case TARGET_NR_fremovexattr:
10901         {
10902             void *n;
10903             n = lock_user_string(arg2);
10904             if (n) {
10905                 ret = get_errno(fremovexattr(arg1, n));
10906             } else {
10907                 ret = -TARGET_EFAULT;
10908             }
10909             unlock_user(n, arg2, 0);
10910         }
10911         return ret;
10912 #endif
10913 #endif /* CONFIG_ATTR */
10914 #ifdef TARGET_NR_set_thread_area
10915     case TARGET_NR_set_thread_area:
10916 #if defined(TARGET_MIPS)
10917       ((CPUMIPSState *) cpu_env)->active_tc.CP0_UserLocal = arg1;
10918       return 0;
10919 #elif defined(TARGET_CRIS)
10920       if (arg1 & 0xff)
10921           ret = -TARGET_EINVAL;
10922       else {
10923           ((CPUCRISState *) cpu_env)->pregs[PR_PID] = arg1;
10924           ret = 0;
10925       }
10926       return ret;
10927 #elif defined(TARGET_I386) && defined(TARGET_ABI32)
10928       return do_set_thread_area(cpu_env, arg1);
10929 #elif defined(TARGET_M68K)
10930       {
10931           TaskState *ts = cpu->opaque;
10932           ts->tp_value = arg1;
10933           return 0;
10934       }
10935 #else
10936       return -TARGET_ENOSYS;
10937 #endif
10938 #endif
10939 #ifdef TARGET_NR_get_thread_area
10940     case TARGET_NR_get_thread_area:
10941 #if defined(TARGET_I386) && defined(TARGET_ABI32)
10942         return do_get_thread_area(cpu_env, arg1);
10943 #elif defined(TARGET_M68K)
10944         {
10945             TaskState *ts = cpu->opaque;
10946             return ts->tp_value;
10947         }
10948 #else
10949         return -TARGET_ENOSYS;
10950 #endif
10951 #endif
10952 #ifdef TARGET_NR_getdomainname
10953     case TARGET_NR_getdomainname:
10954         return -TARGET_ENOSYS;
10955 #endif
10956 
10957 #ifdef TARGET_NR_clock_settime
10958     case TARGET_NR_clock_settime:
10959     {
10960         struct timespec ts;
10961 
10962         ret = target_to_host_timespec(&ts, arg2);
10963         if (!is_error(ret)) {
10964             ret = get_errno(clock_settime(arg1, &ts));
10965         }
10966         return ret;
10967     }
10968 #endif
10969 #ifdef TARGET_NR_clock_gettime
10970     case TARGET_NR_clock_gettime:
10971     {
10972         struct timespec ts;
10973         ret = get_errno(clock_gettime(arg1, &ts));
10974         if (!is_error(ret)) {
10975             ret = host_to_target_timespec(arg2, &ts);
10976         }
10977         return ret;
10978     }
10979 #endif
10980 #ifdef TARGET_NR_clock_getres
10981     case TARGET_NR_clock_getres:
10982     {
10983         struct timespec ts;
10984         ret = get_errno(clock_getres(arg1, &ts));
10985         if (!is_error(ret)) {
10986             host_to_target_timespec(arg2, &ts);
10987         }
10988         return ret;
10989     }
10990 #endif
10991 #ifdef TARGET_NR_clock_nanosleep
10992     case TARGET_NR_clock_nanosleep:
10993     {
10994         struct timespec ts;
10995         target_to_host_timespec(&ts, arg3);
10996         ret = get_errno(safe_clock_nanosleep(arg1, arg2,
10997                                              &ts, arg4 ? &ts : NULL));
10998         if (arg4)
10999             host_to_target_timespec(arg4, &ts);
11000 
11001 #if defined(TARGET_PPC)
11002         /* clock_nanosleep is odd in that it returns positive errno values.
11003          * On PPC, CR0 bit 3 should be set in such a situation. */
11004         if (ret && ret != -TARGET_ERESTARTSYS) {
11005             ((CPUPPCState *)cpu_env)->crf[0] |= 1;
11006         }
11007 #endif
11008         return ret;
11009     }
11010 #endif
11011 
11012 #if defined(TARGET_NR_set_tid_address) && defined(__NR_set_tid_address)
11013     case TARGET_NR_set_tid_address:
11014         return get_errno(set_tid_address((int *)g2h(arg1)));
11015 #endif
11016 
11017     case TARGET_NR_tkill:
11018         return get_errno(safe_tkill((int)arg1, target_to_host_signal(arg2)));
11019 
11020     case TARGET_NR_tgkill:
11021         return get_errno(safe_tgkill((int)arg1, (int)arg2,
11022                          target_to_host_signal(arg3)));
11023 
11024 #ifdef TARGET_NR_set_robust_list
11025     case TARGET_NR_set_robust_list:
11026     case TARGET_NR_get_robust_list:
11027         /* The ABI for supporting robust futexes has userspace pass
11028          * the kernel a pointer to a linked list which is updated by
11029          * userspace after the syscall; the list is walked by the kernel
11030          * when the thread exits. Since the linked list in QEMU guest
11031          * memory isn't a valid linked list for the host and we have
11032          * no way to reliably intercept the thread-death event, we can't
11033          * support these. Silently return ENOSYS so that guest userspace
11034          * falls back to a non-robust futex implementation (which should
11035          * be OK except in the corner case of the guest crashing while
11036          * holding a mutex that is shared with another process via
11037          * shared memory).
11038          */
11039         return -TARGET_ENOSYS;
11040 #endif
11041 
11042 #if defined(TARGET_NR_utimensat)
11043     case TARGET_NR_utimensat:
11044         {
11045             struct timespec *tsp, ts[2];
11046             if (!arg3) {
11047                 tsp = NULL;
11048             } else {
11049                 target_to_host_timespec(ts, arg3);
11050                 target_to_host_timespec(ts+1, arg3+sizeof(struct target_timespec));
11051                 tsp = ts;
11052             }
11053             if (!arg2)
11054                 ret = get_errno(sys_utimensat(arg1, NULL, tsp, arg4));
11055             else {
11056                 if (!(p = lock_user_string(arg2))) {
11057                     return -TARGET_EFAULT;
11058                 }
11059                 ret = get_errno(sys_utimensat(arg1, path(p), tsp, arg4));
11060                 unlock_user(p, arg2, 0);
11061             }
11062         }
11063         return ret;
11064 #endif
11065     case TARGET_NR_futex:
11066         return do_futex(arg1, arg2, arg3, arg4, arg5, arg6);
11067 #if defined(TARGET_NR_inotify_init) && defined(__NR_inotify_init)
11068     case TARGET_NR_inotify_init:
11069         ret = get_errno(sys_inotify_init());
11070         if (ret >= 0) {
11071             fd_trans_register(ret, &target_inotify_trans);
11072         }
11073         return ret;
11074 #endif
11075 #ifdef CONFIG_INOTIFY1
11076 #if defined(TARGET_NR_inotify_init1) && defined(__NR_inotify_init1)
11077     case TARGET_NR_inotify_init1:
11078         ret = get_errno(sys_inotify_init1(target_to_host_bitmask(arg1,
11079                                           fcntl_flags_tbl)));
11080         if (ret >= 0) {
11081             fd_trans_register(ret, &target_inotify_trans);
11082         }
11083         return ret;
11084 #endif
11085 #endif
11086 #if defined(TARGET_NR_inotify_add_watch) && defined(__NR_inotify_add_watch)
11087     case TARGET_NR_inotify_add_watch:
11088         p = lock_user_string(arg2);
11089         ret = get_errno(sys_inotify_add_watch(arg1, path(p), arg3));
11090         unlock_user(p, arg2, 0);
11091         return ret;
11092 #endif
11093 #if defined(TARGET_NR_inotify_rm_watch) && defined(__NR_inotify_rm_watch)
11094     case TARGET_NR_inotify_rm_watch:
11095         return get_errno(sys_inotify_rm_watch(arg1, arg2));
11096 #endif
11097 
11098 #if defined(TARGET_NR_mq_open) && defined(__NR_mq_open)
11099     case TARGET_NR_mq_open:
11100         {
11101             struct mq_attr posix_mq_attr;
11102             struct mq_attr *pposix_mq_attr;
11103             int host_flags;
11104 
11105             host_flags = target_to_host_bitmask(arg2, fcntl_flags_tbl);
11106             pposix_mq_attr = NULL;
11107             if (arg4) {
11108                 if (copy_from_user_mq_attr(&posix_mq_attr, arg4) != 0) {
11109                     return -TARGET_EFAULT;
11110                 }
11111                 pposix_mq_attr = &posix_mq_attr;
11112             }
11113             p = lock_user_string(arg1 - 1);
11114             if (!p) {
11115                 return -TARGET_EFAULT;
11116             }
11117             ret = get_errno(mq_open(p, host_flags, arg3, pposix_mq_attr));
11118             unlock_user (p, arg1, 0);
11119         }
11120         return ret;
11121 
11122     case TARGET_NR_mq_unlink:
11123         p = lock_user_string(arg1 - 1);
11124         if (!p) {
11125             return -TARGET_EFAULT;
11126         }
11127         ret = get_errno(mq_unlink(p));
11128         unlock_user (p, arg1, 0);
11129         return ret;
11130 
11131     case TARGET_NR_mq_timedsend:
11132         {
11133             struct timespec ts;
11134 
11135             p = lock_user (VERIFY_READ, arg2, arg3, 1);
11136             if (arg5 != 0) {
11137                 target_to_host_timespec(&ts, arg5);
11138                 ret = get_errno(safe_mq_timedsend(arg1, p, arg3, arg4, &ts));
11139                 host_to_target_timespec(arg5, &ts);
11140             } else {
11141                 ret = get_errno(safe_mq_timedsend(arg1, p, arg3, arg4, NULL));
11142             }
11143             unlock_user (p, arg2, arg3);
11144         }
11145         return ret;
11146 
11147     case TARGET_NR_mq_timedreceive:
11148         {
11149             struct timespec ts;
11150             unsigned int prio;
11151 
11152             p = lock_user (VERIFY_READ, arg2, arg3, 1);
11153             if (arg5 != 0) {
11154                 target_to_host_timespec(&ts, arg5);
11155                 ret = get_errno(safe_mq_timedreceive(arg1, p, arg3,
11156                                                      &prio, &ts));
11157                 host_to_target_timespec(arg5, &ts);
11158             } else {
11159                 ret = get_errno(safe_mq_timedreceive(arg1, p, arg3,
11160                                                      &prio, NULL));
11161             }
11162             unlock_user (p, arg2, arg3);
11163             if (arg4 != 0)
11164                 put_user_u32(prio, arg4);
11165         }
11166         return ret;
11167 
11168     /* Not implemented for now... */
11169 /*     case TARGET_NR_mq_notify: */
11170 /*         break; */
11171 
11172     case TARGET_NR_mq_getsetattr:
11173         {
11174             struct mq_attr posix_mq_attr_in, posix_mq_attr_out;
11175             ret = 0;
11176             if (arg2 != 0) {
11177                 copy_from_user_mq_attr(&posix_mq_attr_in, arg2);
11178                 ret = get_errno(mq_setattr(arg1, &posix_mq_attr_in,
11179                                            &posix_mq_attr_out));
11180             } else if (arg3 != 0) {
11181                 ret = get_errno(mq_getattr(arg1, &posix_mq_attr_out));
11182             }
11183             if (ret == 0 && arg3 != 0) {
11184                 copy_to_user_mq_attr(arg3, &posix_mq_attr_out);
11185             }
11186         }
11187         return ret;
11188 #endif
11189 
11190 #ifdef CONFIG_SPLICE
11191 #ifdef TARGET_NR_tee
11192     case TARGET_NR_tee:
11193         {
11194             ret = get_errno(tee(arg1,arg2,arg3,arg4));
11195         }
11196         return ret;
11197 #endif
11198 #ifdef TARGET_NR_splice
11199     case TARGET_NR_splice:
11200         {
11201             loff_t loff_in, loff_out;
11202             loff_t *ploff_in = NULL, *ploff_out = NULL;
11203             if (arg2) {
11204                 if (get_user_u64(loff_in, arg2)) {
11205                     return -TARGET_EFAULT;
11206                 }
11207                 ploff_in = &loff_in;
11208             }
11209             if (arg4) {
11210                 if (get_user_u64(loff_out, arg4)) {
11211                     return -TARGET_EFAULT;
11212                 }
11213                 ploff_out = &loff_out;
11214             }
11215             ret = get_errno(splice(arg1, ploff_in, arg3, ploff_out, arg5, arg6));
11216             if (arg2) {
11217                 if (put_user_u64(loff_in, arg2)) {
11218                     return -TARGET_EFAULT;
11219                 }
11220             }
11221             if (arg4) {
11222                 if (put_user_u64(loff_out, arg4)) {
11223                     return -TARGET_EFAULT;
11224                 }
11225             }
11226         }
11227         return ret;
11228 #endif
11229 #ifdef TARGET_NR_vmsplice
11230 	case TARGET_NR_vmsplice:
11231         {
11232             struct iovec *vec = lock_iovec(VERIFY_READ, arg2, arg3, 1);
11233             if (vec != NULL) {
11234                 ret = get_errno(vmsplice(arg1, vec, arg3, arg4));
11235                 unlock_iovec(vec, arg2, arg3, 0);
11236             } else {
11237                 ret = -host_to_target_errno(errno);
11238             }
11239         }
11240         return ret;
11241 #endif
11242 #endif /* CONFIG_SPLICE */
11243 #ifdef CONFIG_EVENTFD
11244 #if defined(TARGET_NR_eventfd)
11245     case TARGET_NR_eventfd:
11246         ret = get_errno(eventfd(arg1, 0));
11247         if (ret >= 0) {
11248             fd_trans_register(ret, &target_eventfd_trans);
11249         }
11250         return ret;
11251 #endif
11252 #if defined(TARGET_NR_eventfd2)
11253     case TARGET_NR_eventfd2:
11254     {
11255         int host_flags = arg2 & (~(TARGET_O_NONBLOCK | TARGET_O_CLOEXEC));
11256         if (arg2 & TARGET_O_NONBLOCK) {
11257             host_flags |= O_NONBLOCK;
11258         }
11259         if (arg2 & TARGET_O_CLOEXEC) {
11260             host_flags |= O_CLOEXEC;
11261         }
11262         ret = get_errno(eventfd(arg1, host_flags));
11263         if (ret >= 0) {
11264             fd_trans_register(ret, &target_eventfd_trans);
11265         }
11266         return ret;
11267     }
11268 #endif
11269 #endif /* CONFIG_EVENTFD  */
11270 #if defined(CONFIG_FALLOCATE) && defined(TARGET_NR_fallocate)
11271     case TARGET_NR_fallocate:
11272 #if TARGET_ABI_BITS == 32
11273         ret = get_errno(fallocate(arg1, arg2, target_offset64(arg3, arg4),
11274                                   target_offset64(arg5, arg6)));
11275 #else
11276         ret = get_errno(fallocate(arg1, arg2, arg3, arg4));
11277 #endif
11278         return ret;
11279 #endif
11280 #if defined(CONFIG_SYNC_FILE_RANGE)
11281 #if defined(TARGET_NR_sync_file_range)
11282     case TARGET_NR_sync_file_range:
11283 #if TARGET_ABI_BITS == 32
11284 #if defined(TARGET_MIPS)
11285         ret = get_errno(sync_file_range(arg1, target_offset64(arg3, arg4),
11286                                         target_offset64(arg5, arg6), arg7));
11287 #else
11288         ret = get_errno(sync_file_range(arg1, target_offset64(arg2, arg3),
11289                                         target_offset64(arg4, arg5), arg6));
11290 #endif /* !TARGET_MIPS */
11291 #else
11292         ret = get_errno(sync_file_range(arg1, arg2, arg3, arg4));
11293 #endif
11294         return ret;
11295 #endif
11296 #if defined(TARGET_NR_sync_file_range2)
11297     case TARGET_NR_sync_file_range2:
11298         /* This is like sync_file_range but the arguments are reordered */
11299 #if TARGET_ABI_BITS == 32
11300         ret = get_errno(sync_file_range(arg1, target_offset64(arg3, arg4),
11301                                         target_offset64(arg5, arg6), arg2));
11302 #else
11303         ret = get_errno(sync_file_range(arg1, arg3, arg4, arg2));
11304 #endif
11305         return ret;
11306 #endif
11307 #endif
11308 #if defined(TARGET_NR_signalfd4)
11309     case TARGET_NR_signalfd4:
11310         return do_signalfd4(arg1, arg2, arg4);
11311 #endif
11312 #if defined(TARGET_NR_signalfd)
11313     case TARGET_NR_signalfd:
11314         return do_signalfd4(arg1, arg2, 0);
11315 #endif
11316 #if defined(CONFIG_EPOLL)
11317 #if defined(TARGET_NR_epoll_create)
11318     case TARGET_NR_epoll_create:
11319         return get_errno(epoll_create(arg1));
11320 #endif
11321 #if defined(TARGET_NR_epoll_create1) && defined(CONFIG_EPOLL_CREATE1)
11322     case TARGET_NR_epoll_create1:
11323         return get_errno(epoll_create1(arg1));
11324 #endif
11325 #if defined(TARGET_NR_epoll_ctl)
11326     case TARGET_NR_epoll_ctl:
11327     {
11328         struct epoll_event ep;
11329         struct epoll_event *epp = 0;
11330         if (arg4) {
11331             struct target_epoll_event *target_ep;
11332             if (!lock_user_struct(VERIFY_READ, target_ep, arg4, 1)) {
11333                 return -TARGET_EFAULT;
11334             }
11335             ep.events = tswap32(target_ep->events);
11336             /* The epoll_data_t union is just opaque data to the kernel,
11337              * so we transfer all 64 bits across and need not worry what
11338              * actual data type it is.
11339              */
11340             ep.data.u64 = tswap64(target_ep->data.u64);
11341             unlock_user_struct(target_ep, arg4, 0);
11342             epp = &ep;
11343         }
11344         return get_errno(epoll_ctl(arg1, arg2, arg3, epp));
11345     }
11346 #endif
11347 
11348 #if defined(TARGET_NR_epoll_wait) || defined(TARGET_NR_epoll_pwait)
11349 #if defined(TARGET_NR_epoll_wait)
11350     case TARGET_NR_epoll_wait:
11351 #endif
11352 #if defined(TARGET_NR_epoll_pwait)
11353     case TARGET_NR_epoll_pwait:
11354 #endif
11355     {
11356         struct target_epoll_event *target_ep;
11357         struct epoll_event *ep;
11358         int epfd = arg1;
11359         int maxevents = arg3;
11360         int timeout = arg4;
11361 
11362         if (maxevents <= 0 || maxevents > TARGET_EP_MAX_EVENTS) {
11363             return -TARGET_EINVAL;
11364         }
11365 
11366         target_ep = lock_user(VERIFY_WRITE, arg2,
11367                               maxevents * sizeof(struct target_epoll_event), 1);
11368         if (!target_ep) {
11369             return -TARGET_EFAULT;
11370         }
11371 
11372         ep = g_try_new(struct epoll_event, maxevents);
11373         if (!ep) {
11374             unlock_user(target_ep, arg2, 0);
11375             return -TARGET_ENOMEM;
11376         }
11377 
11378         switch (num) {
11379 #if defined(TARGET_NR_epoll_pwait)
11380         case TARGET_NR_epoll_pwait:
11381         {
11382             target_sigset_t *target_set;
11383             sigset_t _set, *set = &_set;
11384 
11385             if (arg5) {
11386                 if (arg6 != sizeof(target_sigset_t)) {
11387                     ret = -TARGET_EINVAL;
11388                     break;
11389                 }
11390 
11391                 target_set = lock_user(VERIFY_READ, arg5,
11392                                        sizeof(target_sigset_t), 1);
11393                 if (!target_set) {
11394                     ret = -TARGET_EFAULT;
11395                     break;
11396                 }
11397                 target_to_host_sigset(set, target_set);
11398                 unlock_user(target_set, arg5, 0);
11399             } else {
11400                 set = NULL;
11401             }
11402 
11403             ret = get_errno(safe_epoll_pwait(epfd, ep, maxevents, timeout,
11404                                              set, SIGSET_T_SIZE));
11405             break;
11406         }
11407 #endif
11408 #if defined(TARGET_NR_epoll_wait)
11409         case TARGET_NR_epoll_wait:
11410             ret = get_errno(safe_epoll_pwait(epfd, ep, maxevents, timeout,
11411                                              NULL, 0));
11412             break;
11413 #endif
11414         default:
11415             ret = -TARGET_ENOSYS;
11416         }
11417         if (!is_error(ret)) {
11418             int i;
11419             for (i = 0; i < ret; i++) {
11420                 target_ep[i].events = tswap32(ep[i].events);
11421                 target_ep[i].data.u64 = tswap64(ep[i].data.u64);
11422             }
11423             unlock_user(target_ep, arg2,
11424                         ret * sizeof(struct target_epoll_event));
11425         } else {
11426             unlock_user(target_ep, arg2, 0);
11427         }
11428         g_free(ep);
11429         return ret;
11430     }
11431 #endif
11432 #endif
11433 #ifdef TARGET_NR_prlimit64
11434     case TARGET_NR_prlimit64:
11435     {
11436         /* args: pid, resource number, ptr to new rlimit, ptr to old rlimit */
11437         struct target_rlimit64 *target_rnew, *target_rold;
11438         struct host_rlimit64 rnew, rold, *rnewp = 0;
11439         int resource = target_to_host_resource(arg2);
11440         if (arg3) {
11441             if (!lock_user_struct(VERIFY_READ, target_rnew, arg3, 1)) {
11442                 return -TARGET_EFAULT;
11443             }
11444             rnew.rlim_cur = tswap64(target_rnew->rlim_cur);
11445             rnew.rlim_max = tswap64(target_rnew->rlim_max);
11446             unlock_user_struct(target_rnew, arg3, 0);
11447             rnewp = &rnew;
11448         }
11449 
11450         ret = get_errno(sys_prlimit64(arg1, resource, rnewp, arg4 ? &rold : 0));
11451         if (!is_error(ret) && arg4) {
11452             if (!lock_user_struct(VERIFY_WRITE, target_rold, arg4, 1)) {
11453                 return -TARGET_EFAULT;
11454             }
11455             target_rold->rlim_cur = tswap64(rold.rlim_cur);
11456             target_rold->rlim_max = tswap64(rold.rlim_max);
11457             unlock_user_struct(target_rold, arg4, 1);
11458         }
11459         return ret;
11460     }
11461 #endif
11462 #ifdef TARGET_NR_gethostname
11463     case TARGET_NR_gethostname:
11464     {
11465         char *name = lock_user(VERIFY_WRITE, arg1, arg2, 0);
11466         if (name) {
11467             ret = get_errno(gethostname(name, arg2));
11468             unlock_user(name, arg1, arg2);
11469         } else {
11470             ret = -TARGET_EFAULT;
11471         }
11472         return ret;
11473     }
11474 #endif
11475 #ifdef TARGET_NR_atomic_cmpxchg_32
11476     case TARGET_NR_atomic_cmpxchg_32:
11477     {
11478         /* should use start_exclusive from main.c */
11479         abi_ulong mem_value;
11480         if (get_user_u32(mem_value, arg6)) {
11481             target_siginfo_t info;
11482             info.si_signo = SIGSEGV;
11483             info.si_errno = 0;
11484             info.si_code = TARGET_SEGV_MAPERR;
11485             info._sifields._sigfault._addr = arg6;
11486             queue_signal((CPUArchState *)cpu_env, info.si_signo,
11487                          QEMU_SI_FAULT, &info);
11488             ret = 0xdeadbeef;
11489 
11490         }
11491         if (mem_value == arg2)
11492             put_user_u32(arg1, arg6);
11493         return mem_value;
11494     }
11495 #endif
11496 #ifdef TARGET_NR_atomic_barrier
11497     case TARGET_NR_atomic_barrier:
11498         /* Like the kernel implementation and the
11499            qemu arm barrier, no-op this? */
11500         return 0;
11501 #endif
11502 
11503 #ifdef TARGET_NR_timer_create
11504     case TARGET_NR_timer_create:
11505     {
11506         /* args: clockid_t clockid, struct sigevent *sevp, timer_t *timerid */
11507 
11508         struct sigevent host_sevp = { {0}, }, *phost_sevp = NULL;
11509 
11510         int clkid = arg1;
11511         int timer_index = next_free_host_timer();
11512 
11513         if (timer_index < 0) {
11514             ret = -TARGET_EAGAIN;
11515         } else {
11516             timer_t *phtimer = g_posix_timers  + timer_index;
11517 
11518             if (arg2) {
11519                 phost_sevp = &host_sevp;
11520                 ret = target_to_host_sigevent(phost_sevp, arg2);
11521                 if (ret != 0) {
11522                     return ret;
11523                 }
11524             }
11525 
11526             ret = get_errno(timer_create(clkid, phost_sevp, phtimer));
11527             if (ret) {
11528                 phtimer = NULL;
11529             } else {
11530                 if (put_user(TIMER_MAGIC | timer_index, arg3, target_timer_t)) {
11531                     return -TARGET_EFAULT;
11532                 }
11533             }
11534         }
11535         return ret;
11536     }
11537 #endif
11538 
11539 #ifdef TARGET_NR_timer_settime
11540     case TARGET_NR_timer_settime:
11541     {
11542         /* args: timer_t timerid, int flags, const struct itimerspec *new_value,
11543          * struct itimerspec * old_value */
11544         target_timer_t timerid = get_timer_id(arg1);
11545 
11546         if (timerid < 0) {
11547             ret = timerid;
11548         } else if (arg3 == 0) {
11549             ret = -TARGET_EINVAL;
11550         } else {
11551             timer_t htimer = g_posix_timers[timerid];
11552             struct itimerspec hspec_new = {{0},}, hspec_old = {{0},};
11553 
11554             if (target_to_host_itimerspec(&hspec_new, arg3)) {
11555                 return -TARGET_EFAULT;
11556             }
11557             ret = get_errno(
11558                           timer_settime(htimer, arg2, &hspec_new, &hspec_old));
11559             if (arg4 && host_to_target_itimerspec(arg4, &hspec_old)) {
11560                 return -TARGET_EFAULT;
11561             }
11562         }
11563         return ret;
11564     }
11565 #endif
11566 
11567 #ifdef TARGET_NR_timer_gettime
11568     case TARGET_NR_timer_gettime:
11569     {
11570         /* args: timer_t timerid, struct itimerspec *curr_value */
11571         target_timer_t timerid = get_timer_id(arg1);
11572 
11573         if (timerid < 0) {
11574             ret = timerid;
11575         } else if (!arg2) {
11576             ret = -TARGET_EFAULT;
11577         } else {
11578             timer_t htimer = g_posix_timers[timerid];
11579             struct itimerspec hspec;
11580             ret = get_errno(timer_gettime(htimer, &hspec));
11581 
11582             if (host_to_target_itimerspec(arg2, &hspec)) {
11583                 ret = -TARGET_EFAULT;
11584             }
11585         }
11586         return ret;
11587     }
11588 #endif
11589 
11590 #ifdef TARGET_NR_timer_getoverrun
11591     case TARGET_NR_timer_getoverrun:
11592     {
11593         /* args: timer_t timerid */
11594         target_timer_t timerid = get_timer_id(arg1);
11595 
11596         if (timerid < 0) {
11597             ret = timerid;
11598         } else {
11599             timer_t htimer = g_posix_timers[timerid];
11600             ret = get_errno(timer_getoverrun(htimer));
11601         }
11602         fd_trans_unregister(ret);
11603         return ret;
11604     }
11605 #endif
11606 
11607 #ifdef TARGET_NR_timer_delete
11608     case TARGET_NR_timer_delete:
11609     {
11610         /* args: timer_t timerid */
11611         target_timer_t timerid = get_timer_id(arg1);
11612 
11613         if (timerid < 0) {
11614             ret = timerid;
11615         } else {
11616             timer_t htimer = g_posix_timers[timerid];
11617             ret = get_errno(timer_delete(htimer));
11618             g_posix_timers[timerid] = 0;
11619         }
11620         return ret;
11621     }
11622 #endif
11623 
11624 #if defined(TARGET_NR_timerfd_create) && defined(CONFIG_TIMERFD)
11625     case TARGET_NR_timerfd_create:
11626         return get_errno(timerfd_create(arg1,
11627                           target_to_host_bitmask(arg2, fcntl_flags_tbl)));
11628 #endif
11629 
11630 #if defined(TARGET_NR_timerfd_gettime) && defined(CONFIG_TIMERFD)
11631     case TARGET_NR_timerfd_gettime:
11632         {
11633             struct itimerspec its_curr;
11634 
11635             ret = get_errno(timerfd_gettime(arg1, &its_curr));
11636 
11637             if (arg2 && host_to_target_itimerspec(arg2, &its_curr)) {
11638                 return -TARGET_EFAULT;
11639             }
11640         }
11641         return ret;
11642 #endif
11643 
11644 #if defined(TARGET_NR_timerfd_settime) && defined(CONFIG_TIMERFD)
11645     case TARGET_NR_timerfd_settime:
11646         {
11647             struct itimerspec its_new, its_old, *p_new;
11648 
11649             if (arg3) {
11650                 if (target_to_host_itimerspec(&its_new, arg3)) {
11651                     return -TARGET_EFAULT;
11652                 }
11653                 p_new = &its_new;
11654             } else {
11655                 p_new = NULL;
11656             }
11657 
11658             ret = get_errno(timerfd_settime(arg1, arg2, p_new, &its_old));
11659 
11660             if (arg4 && host_to_target_itimerspec(arg4, &its_old)) {
11661                 return -TARGET_EFAULT;
11662             }
11663         }
11664         return ret;
11665 #endif
11666 
11667 #if defined(TARGET_NR_ioprio_get) && defined(__NR_ioprio_get)
11668     case TARGET_NR_ioprio_get:
11669         return get_errno(ioprio_get(arg1, arg2));
11670 #endif
11671 
11672 #if defined(TARGET_NR_ioprio_set) && defined(__NR_ioprio_set)
11673     case TARGET_NR_ioprio_set:
11674         return get_errno(ioprio_set(arg1, arg2, arg3));
11675 #endif
11676 
11677 #if defined(TARGET_NR_setns) && defined(CONFIG_SETNS)
11678     case TARGET_NR_setns:
11679         return get_errno(setns(arg1, arg2));
11680 #endif
11681 #if defined(TARGET_NR_unshare) && defined(CONFIG_SETNS)
11682     case TARGET_NR_unshare:
11683         return get_errno(unshare(arg1));
11684 #endif
11685 #if defined(TARGET_NR_kcmp) && defined(__NR_kcmp)
11686     case TARGET_NR_kcmp:
11687         return get_errno(kcmp(arg1, arg2, arg3, arg4, arg5));
11688 #endif
11689 #ifdef TARGET_NR_swapcontext
11690     case TARGET_NR_swapcontext:
11691         /* PowerPC specific.  */
11692         return do_swapcontext(cpu_env, arg1, arg2, arg3);
11693 #endif
11694 
11695     default:
11696         qemu_log_mask(LOG_UNIMP, "Unsupported syscall: %d\n", num);
11697         return -TARGET_ENOSYS;
11698     }
11699     return ret;
11700 }
11701 
11702 abi_long do_syscall(void *cpu_env, int num, abi_long arg1,
11703                     abi_long arg2, abi_long arg3, abi_long arg4,
11704                     abi_long arg5, abi_long arg6, abi_long arg7,
11705                     abi_long arg8)
11706 {
11707     CPUState *cpu = env_cpu(cpu_env);
11708     abi_long ret;
11709 
11710 #ifdef DEBUG_ERESTARTSYS
11711     /* Debug-only code for exercising the syscall-restart code paths
11712      * in the per-architecture cpu main loops: restart every syscall
11713      * the guest makes once before letting it through.
11714      */
11715     {
11716         static bool flag;
11717         flag = !flag;
11718         if (flag) {
11719             return -TARGET_ERESTARTSYS;
11720         }
11721     }
11722 #endif
11723 
11724     trace_guest_user_syscall(cpu, num, arg1, arg2, arg3, arg4,
11725                              arg5, arg6, arg7, arg8);
11726 
11727     if (unlikely(do_strace)) {
11728         print_syscall(num, arg1, arg2, arg3, arg4, arg5, arg6);
11729         ret = do_syscall1(cpu_env, num, arg1, arg2, arg3, arg4,
11730                           arg5, arg6, arg7, arg8);
11731         print_syscall_ret(num, ret);
11732     } else {
11733         ret = do_syscall1(cpu_env, num, arg1, arg2, arg3, arg4,
11734                           arg5, arg6, arg7, arg8);
11735     }
11736 
11737     trace_guest_user_syscall_ret(cpu, num, ret);
11738     return ret;
11739 }
11740